From bdd88e1ed7c3d3c703f477e574a0db376104e0b6 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 11 Feb 2021 21:22:45 -0800 Subject: [PATCH 0001/1976] YOLOv5 Segmentation Dataloader Updates (#2188) * Update C3 module * Update C3 module * Update C3 module * Update C3 module * update * update * update * update * update * update * update * update * update * updates * updates * updates * updates * updates * updates * updates * updates * updates * updates * update * update * update * update * updates * updates * updates * updates * update * update * update * update * update * update * update * update * update * update * update * update * update * update * update * update * update * update * update * update * update * update * update * update * update * update * update * update * update * update * update * update * update * update * update * update * update * update * update * update * update datasets * update * update * update * update attempt_downlaod() * merge * merge * update * update * update * update * update * update * update * update * update * update * parameterize eps * comments * gs-multiple * update * max_nms implemented * Create one_cycle() function * update * update * update * update * update * update * update * update * update * update * update * update * update * update * update * update * update * update * GitHub API rate limit fix * update * ComputeLoss * ComputeLoss * ComputeLoss * ComputeLoss * ComputeLoss * ComputeLoss * ComputeLoss * ComputeLoss * ComputeLoss * ComputeLoss * ComputeLoss * astuple * epochs * update * update * ComputeLoss() * update * update * update * update * update * update * update * update * update * update * update * merge * merge * merge * merge * update * update * update * update * commit=tag == tags[-1] * Update cudnn.benchmark * update * update * update * updates * updates * updates * updates * updates * updates * updates * update * update * update * update * update * mosaic9 * update * update * update * update * update * update * institute cache versioning * only display on existing cache * reverse cache exists booleans --- data/scripts/get_coco.sh | 2 +- utils/datasets.py | 134 ++++++++++++++++++++++----------------- utils/general.py | 36 ++++++++++- utils/loss.py | 2 +- 4 files changed, 113 insertions(+), 61 deletions(-) diff --git a/data/scripts/get_coco.sh b/data/scripts/get_coco.sh index 02634c000dfe..bbb1e9291d5b 100755 --- a/data/scripts/get_coco.sh +++ b/data/scripts/get_coco.sh @@ -10,7 +10,7 @@ # Download/unzip labels d='../' # unzip directory url=https://github.com/ultralytics/yolov5/releases/download/v1.0/ -f='coco2017labels.zip' # 68 MB +f='coco2017labels.zip' # or 'coco2017labels-segments.zip', 68 MB echo 'Downloading' $url$f ' ...' curl -L $url$f -o $f && unzip -q $f -d $d && rm $f & # download, unzip, remove in background diff --git a/utils/datasets.py b/utils/datasets.py index 05c8fdbf4c4f..29a8812a20a2 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -20,7 +20,8 @@ from torch.utils.data import Dataset from tqdm import tqdm -from utils.general import xyxy2xywh, xywh2xyxy, xywhn2xyxy, clean_str +from utils.general import xyxy2xywh, xywh2xyxy, xywhn2xyxy, xyn2xy, segment2box, segments2boxes, resample_segments, \ + clean_str from utils.torch_utils import torch_distributed_zero_first # Parameters @@ -374,21 +375,23 @@ def __init__(self, path, img_size=640, batch_size=16, augment=False, hyp=None, r self.label_files = img2label_paths(self.img_files) # labels cache_path = (p if p.is_file() else Path(self.label_files[0]).parent).with_suffix('.cache') # cached labels if cache_path.is_file(): - cache = torch.load(cache_path) # load - if cache['hash'] != get_hash(self.label_files + self.img_files) or 'results' not in cache: # changed - cache = self.cache_labels(cache_path, prefix) # re-cache + cache, exists = torch.load(cache_path), True # load + if cache['hash'] != get_hash(self.label_files + self.img_files) or 'version' not in cache: # changed + cache, exists = self.cache_labels(cache_path, prefix), False # re-cache else: - cache = self.cache_labels(cache_path, prefix) # cache + cache, exists = self.cache_labels(cache_path, prefix), False # cache # Display cache - [nf, nm, ne, nc, n] = cache.pop('results') # found, missing, empty, corrupted, total - desc = f"Scanning '{cache_path}' for images and labels... {nf} found, {nm} missing, {ne} empty, {nc} corrupted" - tqdm(None, desc=prefix + desc, total=n, initial=n) + nf, nm, ne, nc, n = cache.pop('results') # found, missing, empty, corrupted, total + if exists: + d = f"Scanning '{cache_path}' for images and labels... {nf} found, {nm} missing, {ne} empty, {nc} corrupted" + tqdm(None, desc=prefix + d, total=n, initial=n) # display cache results assert nf > 0 or not augment, f'{prefix}No labels in {cache_path}. Can not train without labels. See {help_url}' # Read cache cache.pop('hash') # remove hash - labels, shapes = zip(*cache.values()) + cache.pop('version') # remove version + labels, shapes, self.segments = zip(*cache.values()) self.labels = list(labels) self.shapes = np.array(shapes, dtype=np.float64) self.img_files = list(cache.keys()) # update @@ -451,6 +454,7 @@ def cache_labels(self, path=Path('./labels.cache'), prefix=''): im = Image.open(im_file) im.verify() # PIL verify shape = exif_size(im) # image size + segments = [] # instance segments assert (shape[0] > 9) & (shape[1] > 9), f'image size {shape} <10 pixels' assert im.format.lower() in img_formats, f'invalid image format {im.format}' @@ -458,7 +462,12 @@ def cache_labels(self, path=Path('./labels.cache'), prefix=''): if os.path.isfile(lb_file): nf += 1 # label found with open(lb_file, 'r') as f: - l = np.array([x.split() for x in f.read().strip().splitlines()], dtype=np.float32) # labels + l = [x.split() for x in f.read().strip().splitlines()] + if any([len(x) > 8 for x in l]): # is segment + classes = np.array([x[0] for x in l], dtype=np.float32) + segments = [np.array(x[1:], dtype=np.float32).reshape(-1, 2) for x in l] # (cls, xy1...) + l = np.concatenate((classes.reshape(-1, 1), segments2boxes(segments)), 1) # (cls, xywh) + l = np.array(l, dtype=np.float32) if len(l): assert l.shape[1] == 5, 'labels require 5 columns each' assert (l >= 0).all(), 'negative labels' @@ -470,7 +479,7 @@ def cache_labels(self, path=Path('./labels.cache'), prefix=''): else: nm += 1 # label missing l = np.zeros((0, 5), dtype=np.float32) - x[im_file] = [l, shape] + x[im_file] = [l, shape, segments] except Exception as e: nc += 1 print(f'{prefix}WARNING: Ignoring corrupted image and/or label {im_file}: {e}') @@ -482,7 +491,8 @@ def cache_labels(self, path=Path('./labels.cache'), prefix=''): print(f'{prefix}WARNING: No labels found in {path}. See {help_url}') x['hash'] = get_hash(self.label_files + self.img_files) - x['results'] = [nf, nm, ne, nc, i + 1] + x['results'] = nf, nm, ne, nc, i + 1 + x['version'] = 0.1 # cache version torch.save(x, path) # save for next time logging.info(f'{prefix}New cache created: {path}') return x @@ -652,7 +662,7 @@ def hist_equalize(img, clahe=True, bgr=False): def load_mosaic(self, index): # loads images in a 4-mosaic - labels4 = [] + labels4, segments4 = [], [] s = self.img_size yc, xc = [int(random.uniform(-x, 2 * s + x)) for x in self.mosaic_border] # mosaic center x, y indices = [index] + [self.indices[random.randint(0, self.n - 1)] for _ in range(3)] # 3 additional image indices @@ -680,19 +690,21 @@ def load_mosaic(self, index): padh = y1a - y1b # Labels - labels = self.labels[index].copy() + labels, segments = self.labels[index].copy(), self.segments[index].copy() if labels.size: labels[:, 1:] = xywhn2xyxy(labels[:, 1:], w, h, padw, padh) # normalized xywh to pixel xyxy format + segments = [xyn2xy(x, w, h, padw, padh) for x in segments] labels4.append(labels) + segments4.extend(segments) # Concat/clip labels - if len(labels4): - labels4 = np.concatenate(labels4, 0) - np.clip(labels4[:, 1:], 0, 2 * s, out=labels4[:, 1:]) # use with random_perspective - # img4, labels4 = replicate(img4, labels4) # replicate + labels4 = np.concatenate(labels4, 0) + for x in (labels4[:, 1:], *segments4): + np.clip(x, 0, 2 * s, out=x) # clip when using random_perspective() + # img4, labels4 = replicate(img4, labels4) # replicate # Augment - img4, labels4 = random_perspective(img4, labels4, + img4, labels4 = random_perspective(img4, labels4, segments4, degrees=self.hyp['degrees'], translate=self.hyp['translate'], scale=self.hyp['scale'], @@ -706,7 +718,7 @@ def load_mosaic(self, index): def load_mosaic9(self, index): # loads images in a 9-mosaic - labels9 = [] + labels9, segments9 = [], [] s = self.img_size indices = [index] + [self.indices[random.randint(0, self.n - 1)] for _ in range(8)] # 8 additional image indices for i, index in enumerate(indices): @@ -739,30 +751,34 @@ def load_mosaic9(self, index): x1, y1, x2, y2 = [max(x, 0) for x in c] # allocate coords # Labels - labels = self.labels[index].copy() + labels, segments = self.labels[index].copy(), self.segments[index].copy() if labels.size: labels[:, 1:] = xywhn2xyxy(labels[:, 1:], w, h, padx, pady) # normalized xywh to pixel xyxy format + segments = [xyn2xy(x, w, h, padx, pady) for x in segments] labels9.append(labels) + segments9.extend(segments) # Image img9[y1:y2, x1:x2] = img[y1 - pady:, x1 - padx:] # img9[ymin:ymax, xmin:xmax] hp, wp = h, w # height, width previous # Offset - yc, xc = [int(random.uniform(0, s)) for x in self.mosaic_border] # mosaic center x, y + yc, xc = [int(random.uniform(0, s)) for _ in self.mosaic_border] # mosaic center x, y img9 = img9[yc:yc + 2 * s, xc:xc + 2 * s] # Concat/clip labels - if len(labels9): - labels9 = np.concatenate(labels9, 0) - labels9[:, [1, 3]] -= xc - labels9[:, [2, 4]] -= yc + labels9 = np.concatenate(labels9, 0) + labels9[:, [1, 3]] -= xc + labels9[:, [2, 4]] -= yc + c = np.array([xc, yc]) # centers + segments9 = [x - c for x in segments9] - np.clip(labels9[:, 1:], 0, 2 * s, out=labels9[:, 1:]) # use with random_perspective - # img9, labels9 = replicate(img9, labels9) # replicate + for x in (labels9[:, 1:], *segments9): + np.clip(x, 0, 2 * s, out=x) # clip when using random_perspective() + # img9, labels9 = replicate(img9, labels9) # replicate # Augment - img9, labels9 = random_perspective(img9, labels9, + img9, labels9 = random_perspective(img9, labels9, segments9, degrees=self.hyp['degrees'], translate=self.hyp['translate'], scale=self.hyp['scale'], @@ -823,7 +839,8 @@ def letterbox(img, new_shape=(640, 640), color=(114, 114, 114), auto=True, scale return img, ratio, (dw, dh) -def random_perspective(img, targets=(), degrees=10, translate=.1, scale=.1, shear=10, perspective=0.0, border=(0, 0)): +def random_perspective(img, targets=(), segments=(), degrees=10, translate=.1, scale=.1, shear=10, perspective=0.0, + border=(0, 0)): # torchvision.transforms.RandomAffine(degrees=(-10, 10), translate=(.1, .1), scale=(.9, 1.1), shear=(-10, 10)) # targets = [cls, xyxy] @@ -875,37 +892,38 @@ def random_perspective(img, targets=(), degrees=10, translate=.1, scale=.1, shea # Transform label coordinates n = len(targets) if n: - # warp points - xy = np.ones((n * 4, 3)) - xy[:, :2] = targets[:, [1, 2, 3, 4, 1, 4, 3, 2]].reshape(n * 4, 2) # x1y1, x2y2, x1y2, x2y1 - xy = xy @ M.T # transform - if perspective: - xy = (xy[:, :2] / xy[:, 2:3]).reshape(n, 8) # rescale - else: # affine - xy = xy[:, :2].reshape(n, 8) - - # create new boxes - x = xy[:, [0, 2, 4, 6]] - y = xy[:, [1, 3, 5, 7]] - xy = np.concatenate((x.min(1), y.min(1), x.max(1), y.max(1))).reshape(4, n).T - - # # apply angle-based reduction of bounding boxes - # radians = a * math.pi / 180 - # reduction = max(abs(math.sin(radians)), abs(math.cos(radians))) ** 0.5 - # x = (xy[:, 2] + xy[:, 0]) / 2 - # y = (xy[:, 3] + xy[:, 1]) / 2 - # w = (xy[:, 2] - xy[:, 0]) * reduction - # h = (xy[:, 3] - xy[:, 1]) * reduction - # xy = np.concatenate((x - w / 2, y - h / 2, x + w / 2, y + h / 2)).reshape(4, n).T - - # clip boxes - xy[:, [0, 2]] = xy[:, [0, 2]].clip(0, width) - xy[:, [1, 3]] = xy[:, [1, 3]].clip(0, height) + use_segments = any(x.any() for x in segments) + new = np.zeros((n, 4)) + if use_segments: # warp segments + segments = resample_segments(segments) # upsample + for i, segment in enumerate(segments): + xy = np.ones((len(segment), 3)) + xy[:, :2] = segment + xy = xy @ M.T # transform + xy = xy[:, :2] / xy[:, 2:3] if perspective else xy[:, :2] # perspective rescale or affine + + # clip + new[i] = segment2box(xy, width, height) + + else: # warp boxes + xy = np.ones((n * 4, 3)) + xy[:, :2] = targets[:, [1, 2, 3, 4, 1, 4, 3, 2]].reshape(n * 4, 2) # x1y1, x2y2, x1y2, x2y1 + xy = xy @ M.T # transform + xy = (xy[:, :2] / xy[:, 2:3] if perspective else xy[:, :2]).reshape(n, 8) # perspective rescale or affine + + # create new boxes + x = xy[:, [0, 2, 4, 6]] + y = xy[:, [1, 3, 5, 7]] + new = np.concatenate((x.min(1), y.min(1), x.max(1), y.max(1))).reshape(4, n).T + + # clip + new[:, [0, 2]] = new[:, [0, 2]].clip(0, width) + new[:, [1, 3]] = new[:, [1, 3]].clip(0, height) # filter candidates - i = box_candidates(box1=targets[:, 1:5].T * s, box2=xy.T) + i = box_candidates(box1=targets[:, 1:5].T * s, box2=new.T, area_thr=0.01 if use_segments else 0.10) targets = targets[i] - targets[:, 1:5] = xy[i] + targets[:, 1:5] = new[i] return img, targets diff --git a/utils/general.py b/utils/general.py index f979a05c6e49..24807483f5f4 100755 --- a/utils/general.py +++ b/utils/general.py @@ -225,7 +225,7 @@ def xywh2xyxy(x): return y -def xywhn2xyxy(x, w=640, h=640, padw=32, padh=32): +def xywhn2xyxy(x, w=640, h=640, padw=0, padh=0): # Convert nx4 boxes from [x, y, w, h] normalized to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) y[:, 0] = w * (x[:, 0] - x[:, 2] / 2) + padw # top left x @@ -235,6 +235,40 @@ def xywhn2xyxy(x, w=640, h=640, padw=32, padh=32): return y +def xyn2xy(x, w=640, h=640, padw=0, padh=0): + # Convert normalized segments into pixel segments, shape (n,2) + y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) + y[:, 0] = w * x[:, 0] + padw # top left x + y[:, 1] = h * x[:, 1] + padh # top left y + return y + + +def segment2box(segment, width=640, height=640): + # Convert 1 segment label to 1 box label, applying inside-image constraint, i.e. (xy1, xy2, ...) to (xyxy) + x, y = segment.T # segment xy + inside = (x >= 0) & (y >= 0) & (x <= width) & (y <= height) + x, y, = x[inside], y[inside] + return np.array([x.min(), y.min(), x.max(), y.max()]) if any(x) else np.zeros((1, 4)) # cls, xyxy + + +def segments2boxes(segments): + # Convert segment labels to box labels, i.e. (cls, xy1, xy2, ...) to (cls, xywh) + boxes = [] + for s in segments: + x, y = s.T # segment xy + boxes.append([x.min(), y.min(), x.max(), y.max()]) # cls, xyxy + return xyxy2xywh(np.array(boxes)) # cls, xywh + + +def resample_segments(segments, n=1000): + # Up-sample an (n,2) segment + for i, s in enumerate(segments): + x = np.linspace(0, len(s) - 1, n) + xp = np.arange(len(s)) + segments[i] = np.concatenate([np.interp(x, xp, s[:, i]) for i in range(2)]).reshape(2, -1).T # segment xy + return segments + + def scale_coords(img1_shape, coords, img0_shape, ratio_pad=None): # Rescale coords (xyxy) from img1_shape to img0_shape if ratio_pad is None: # calculate from img0_shape diff --git a/utils/loss.py b/utils/loss.py index 2490d4bb7cfc..481d25e207f2 100644 --- a/utils/loss.py +++ b/utils/loss.py @@ -105,7 +105,7 @@ def __init__(self, model, autobalance=False): BCEcls, BCEobj = FocalLoss(BCEcls, g), FocalLoss(BCEobj, g) det = model.module.model[-1] if is_parallel(model) else model.model[-1] # Detect() module - self.balance = {3: [3.67, 1.0, 0.43], 4: [4.0, 1.0, 0.25, 0.06], 5: [4.0, 1.0, 0.25, 0.06, .02]}[det.nl] + self.balance = {3: [4.0, 1.0, 0.4], 4: [4.0, 1.0, 0.25, 0.06], 5: [4.0, 1.0, 0.25, 0.06, .02]}[det.nl] self.ssi = (det.stride == 16).nonzero(as_tuple=False).item() # stride 16 index self.BCEcls, self.BCEobj, self.gr, self.hyp, self.autobalance = BCEcls, BCEobj, model.gr, h, autobalance for k in 'na', 'nc', 'nl', 'anchors': From 17ac94b7968703e708bfeb7274de755c4b2f1f43 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 11 Feb 2021 22:39:37 -0800 Subject: [PATCH 0002/1976] Created using Colaboratory --- tutorial.ipynb | 182 ++++++++++++++++++++++++------------------------- 1 file changed, 90 insertions(+), 92 deletions(-) diff --git a/tutorial.ipynb b/tutorial.ipynb index 3f7133f4f7d7..7587d9f536fe 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -16,7 +16,7 @@ "accelerator": "GPU", "widgets": { "application/vnd.jupyter.widget-state+json": { - "811fd52fef65422c8267bafcde8a2c3d": { + "1f8e9b8ebded4175b2eaa9f75c3ceb00": { "model_module": "@jupyter-widgets/controls", "model_name": "HBoxModel", "state": { @@ -28,15 +28,15 @@ "_view_count": null, "_view_module_version": "1.5.0", "box_style": "", - "layout": "IPY_MODEL_8f41b90117224eef9133a9c3a103dbba", + "layout": "IPY_MODEL_0a1246a73077468ab80e979cc0576cd2", "_model_module": "@jupyter-widgets/controls", "children": [ - "IPY_MODEL_ca2fb37af6ed43d4a74cdc9f2ac5c4a5", - "IPY_MODEL_29419ae5ebb9403ea73f7e5a68037bdd" + "IPY_MODEL_d327cde5a85a4a51bb8b1b3e9cf06c97", + "IPY_MODEL_d5ef1cb2cbed4b87b3c5d292ff2b0da6" ] } }, - "8f41b90117224eef9133a9c3a103dbba": { + "0a1246a73077468ab80e979cc0576cd2": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "state": { @@ -87,12 +87,12 @@ "left": null } }, - "ca2fb37af6ed43d4a74cdc9f2ac5c4a5": { + "d327cde5a85a4a51bb8b1b3e9cf06c97": { "model_module": "@jupyter-widgets/controls", "model_name": "FloatProgressModel", "state": { "_view_name": "ProgressView", - "style": "IPY_MODEL_6511b4dfb10b48d1bc98bcfb3987bfa0", + "style": "IPY_MODEL_8d5dff8bca14435a88fa1814533acd85", "_dom_classes": [], "description": "100%", "_model_name": "FloatProgressModel", @@ -107,30 +107,30 @@ "min": 0, "description_tooltip": null, "_model_module": "@jupyter-widgets/controls", - "layout": "IPY_MODEL_64f0badf1a8f489885aa984dd62d37dc" + "layout": "IPY_MODEL_3d5136c19e7645ca9bc8f51ceffb2be1" } }, - "29419ae5ebb9403ea73f7e5a68037bdd": { + "d5ef1cb2cbed4b87b3c5d292ff2b0da6": { "model_module": "@jupyter-widgets/controls", "model_name": "HTMLModel", "state": { "_view_name": "HTMLView", - "style": "IPY_MODEL_f569911c5cfc4d81bb1bdfa83447afc8", + "style": "IPY_MODEL_2919396dbd4b4c8e821d12bd28665d8a", "_dom_classes": [], "description": "", "_model_name": "HTMLModel", "placeholder": "​", "_view_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", - "value": " 781M/781M [00:23<00:00, 34.2MB/s]", + "value": " 781M/781M [00:12<00:00, 65.5MB/s]", "_view_count": null, "_view_module_version": "1.5.0", "description_tooltip": null, "_model_module": "@jupyter-widgets/controls", - "layout": "IPY_MODEL_84943ade566440aaa2dcf3b3b27e7074" + "layout": "IPY_MODEL_6feb16f2b2fa4021b1a271e1dd442d04" } }, - "6511b4dfb10b48d1bc98bcfb3987bfa0": { + "8d5dff8bca14435a88fa1814533acd85": { "model_module": "@jupyter-widgets/controls", "model_name": "ProgressStyleModel", "state": { @@ -145,7 +145,7 @@ "_model_module": "@jupyter-widgets/controls" } }, - "64f0badf1a8f489885aa984dd62d37dc": { + "3d5136c19e7645ca9bc8f51ceffb2be1": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "state": { @@ -196,7 +196,7 @@ "left": null } }, - "f569911c5cfc4d81bb1bdfa83447afc8": { + "2919396dbd4b4c8e821d12bd28665d8a": { "model_module": "@jupyter-widgets/controls", "model_name": "DescriptionStyleModel", "state": { @@ -210,7 +210,7 @@ "_model_module": "@jupyter-widgets/controls" } }, - "84943ade566440aaa2dcf3b3b27e7074": { + "6feb16f2b2fa4021b1a271e1dd442d04": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "state": { @@ -261,7 +261,7 @@ "left": null } }, - "8501ed1563e4452eac9df6b7a66e8f8c": { + "e6459e0bcee449b090fc9807672725bc": { "model_module": "@jupyter-widgets/controls", "model_name": "HBoxModel", "state": { @@ -273,15 +273,15 @@ "_view_count": null, "_view_module_version": "1.5.0", "box_style": "", - "layout": "IPY_MODEL_d2bb96801e1f46f4a58e02534f7026ff", + "layout": "IPY_MODEL_c341e1d3bf3b40d1821ce392eb966c68", "_model_module": "@jupyter-widgets/controls", "children": [ - "IPY_MODEL_468a796ef06b4a24bcba6fbd4a0a8db5", - "IPY_MODEL_42ad5c1ea7be4835bffebf90642178f1" + "IPY_MODEL_660afee173694231a6dce3cd94df6cae", + "IPY_MODEL_261218485cef48df961519dde5edfcbe" ] } }, - "d2bb96801e1f46f4a58e02534f7026ff": { + "c341e1d3bf3b40d1821ce392eb966c68": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "state": { @@ -332,12 +332,12 @@ "left": null } }, - "468a796ef06b4a24bcba6fbd4a0a8db5": { + "660afee173694231a6dce3cd94df6cae": { "model_module": "@jupyter-widgets/controls", "model_name": "FloatProgressModel", "state": { "_view_name": "ProgressView", - "style": "IPY_MODEL_c58b5536d98f4814831934e9c30c4d78", + "style": "IPY_MODEL_32736d503c06497abfae8c0421918255", "_dom_classes": [], "description": "100%", "_model_name": "FloatProgressModel", @@ -352,30 +352,30 @@ "min": 0, "description_tooltip": null, "_model_module": "@jupyter-widgets/controls", - "layout": "IPY_MODEL_505597101151486ea29e9ab754544d27" + "layout": "IPY_MODEL_e257738711f54d5280c8393d9d3dce1c" } }, - "42ad5c1ea7be4835bffebf90642178f1": { + "261218485cef48df961519dde5edfcbe": { "model_module": "@jupyter-widgets/controls", "model_name": "HTMLModel", "state": { "_view_name": "HTMLView", - "style": "IPY_MODEL_de6e7b4b4a1c408c9f89d89b07a13bcd", + "style": "IPY_MODEL_beb7a6fe34b840899bb79c062681696f", "_dom_classes": [], "description": "", "_model_name": "HTMLModel", "placeholder": "​", "_view_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", - "value": " 21.1M/21.1M [00:01<00:00, 18.2MB/s]", + "value": " 21.1M/21.1M [00:00<00:00, 33.5MB/s]", "_view_count": null, "_view_module_version": "1.5.0", "description_tooltip": null, "_model_module": "@jupyter-widgets/controls", - "layout": "IPY_MODEL_f5cc9c7d4c274b2d81327ba3163c43fd" + "layout": "IPY_MODEL_e639132395d64d70b99d8b72c32f8fbb" } }, - "c58b5536d98f4814831934e9c30c4d78": { + "32736d503c06497abfae8c0421918255": { "model_module": "@jupyter-widgets/controls", "model_name": "ProgressStyleModel", "state": { @@ -390,7 +390,7 @@ "_model_module": "@jupyter-widgets/controls" } }, - "505597101151486ea29e9ab754544d27": { + "e257738711f54d5280c8393d9d3dce1c": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "state": { @@ -441,7 +441,7 @@ "left": null } }, - "de6e7b4b4a1c408c9f89d89b07a13bcd": { + "beb7a6fe34b840899bb79c062681696f": { "model_module": "@jupyter-widgets/controls", "model_name": "DescriptionStyleModel", "state": { @@ -455,7 +455,7 @@ "_model_module": "@jupyter-widgets/controls" } }, - "f5cc9c7d4c274b2d81327ba3163c43fd": { + "e639132395d64d70b99d8b72c32f8fbb": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "state": { @@ -550,7 +550,7 @@ "colab": { "base_uri": "https://localhost:8080/" }, - "outputId": "c6ad57c2-40b7-4764-b07d-19ee2ceaabaf" + "outputId": "ae8805a9-ce15-4e1c-f6b4-baa1c1033f56" }, "source": [ "!git clone https://github.com/ultralytics/yolov5 # clone repo\n", @@ -563,12 +563,12 @@ "clear_output()\n", "print('Setup complete. Using torch %s %s' % (torch.__version__, torch.cuda.get_device_properties(0) if torch.cuda.is_available() else 'CPU'))" ], - "execution_count": null, + "execution_count": 1, "outputs": [ { "output_type": "stream", "text": [ - "Setup complete. Using torch 1.7.0+cu101 _CudaDeviceProperties(name='Tesla V100-SXM2-16GB', major=7, minor=0, total_memory=16130MB, multi_processor_count=80)\n" + "Setup complete. Using torch 1.7.0+cu101 _CudaDeviceProperties(name='Tesla V100-SXM2-16GB', major=7, minor=0, total_memory=16160MB, multi_processor_count=80)\n" ], "name": "stdout" } @@ -672,30 +672,30 @@ "base_uri": "https://localhost:8080/", "height": 65, "referenced_widgets": [ - "811fd52fef65422c8267bafcde8a2c3d", - "8f41b90117224eef9133a9c3a103dbba", - "ca2fb37af6ed43d4a74cdc9f2ac5c4a5", - "29419ae5ebb9403ea73f7e5a68037bdd", - "6511b4dfb10b48d1bc98bcfb3987bfa0", - "64f0badf1a8f489885aa984dd62d37dc", - "f569911c5cfc4d81bb1bdfa83447afc8", - "84943ade566440aaa2dcf3b3b27e7074" + "1f8e9b8ebded4175b2eaa9f75c3ceb00", + "0a1246a73077468ab80e979cc0576cd2", + "d327cde5a85a4a51bb8b1b3e9cf06c97", + "d5ef1cb2cbed4b87b3c5d292ff2b0da6", + "8d5dff8bca14435a88fa1814533acd85", + "3d5136c19e7645ca9bc8f51ceffb2be1", + "2919396dbd4b4c8e821d12bd28665d8a", + "6feb16f2b2fa4021b1a271e1dd442d04" ] }, - "outputId": "59a7a546-8492-492e-861d-70a2c85a6794" + "outputId": "d6ace7c6-1be5-41ff-d607-1c716b88d298" }, "source": [ "# Download COCO val2017\n", "torch.hub.download_url_to_file('https://github.com/ultralytics/yolov5/releases/download/v1.0/coco2017val.zip', 'tmp.zip')\n", "!unzip -q tmp.zip -d ../ && rm tmp.zip" ], - "execution_count": null, + "execution_count": 2, "outputs": [ { "output_type": "display_data", "data": { "application/vnd.jupyter.widget-view+json": { - "model_id": "811fd52fef65422c8267bafcde8a2c3d", + "model_id": "1f8e9b8ebded4175b2eaa9f75c3ceb00", "version_minor": 0, "version_major": 2 }, @@ -723,46 +723,45 @@ "colab": { "base_uri": "https://localhost:8080/" }, - "outputId": "427c211e-e283-4e87-f7b3-7b8dfb11a4a5" + "outputId": "cc25f70c-0a11-44f6-cc44-e92c5083488c" }, "source": [ "# Run YOLOv5x on COCO val2017\n", "!python test.py --weights yolov5x.pt --data coco.yaml --img 640 --iou 0.65" ], - "execution_count": null, + "execution_count": 3, "outputs": [ { "output_type": "stream", "text": [ "Namespace(augment=False, batch_size=32, conf_thres=0.001, data='./data/coco.yaml', device='', exist_ok=False, img_size=640, iou_thres=0.65, name='exp', project='runs/test', save_conf=False, save_hybrid=False, save_json=True, save_txt=False, single_cls=False, task='val', verbose=False, weights=['yolov5x.pt'])\n", - "YOLOv5 v4.0-21-gb26a2f6 torch 1.7.0+cu101 CUDA:0 (Tesla V100-SXM2-16GB, 16130.5MB)\n", + "YOLOv5 v4.0-75-gbdd88e1 torch 1.7.0+cu101 CUDA:0 (Tesla V100-SXM2-16GB, 16160.5MB)\n", "\n", "Downloading https://github.com/ultralytics/yolov5/releases/download/v4.0/yolov5x.pt to yolov5x.pt...\n", - "100% 168M/168M [00:05<00:00, 31.9MB/s]\n", + "100% 168M/168M [00:04<00:00, 39.7MB/s]\n", "\n", "Fusing layers... \n", "Model Summary: 476 layers, 87730285 parameters, 0 gradients, 218.8 GFLOPS\n", - "\u001b[34m\u001b[1mval: \u001b[0mScanning '../coco/labels/val2017' for images and labels... 4952 found, 48 missing, 0 empty, 0 corrupted: 100% 5000/5000 [00:01<00:00, 2791.81it/s]\n", - "\u001b[34m\u001b[1mval: \u001b[0mNew cache created: ../coco/labels/val2017.cache\n", - "\u001b[34m\u001b[1mval: \u001b[0mScanning '../coco/labels/val2017.cache' for images and labels... 4952 found, 48 missing, 0 empty, 0 corrupted: 100% 5000/5000 [00:00<00:00, 13332180.55it/s]\n", - " Class Images Targets P R mAP@.5 mAP@.5:.95: 100% 157/157 [01:30<00:00, 1.73it/s]\n", - " all 5e+03 3.63e+04 0.419 0.765 0.68 0.486\n", - "Speed: 5.2/2.0/7.2 ms inference/NMS/total per 640x640 image at batch-size 32\n", + "\u001b[34m\u001b[1mval: \u001b[0mScanning '../coco/val2017' for images and labels... 4952 found, 48 missing, 0 empty, 0 corrupted: 100% 5000/5000 [00:01<00:00, 2824.78it/s]\n", + "\u001b[34m\u001b[1mval: \u001b[0mNew cache created: ../coco/val2017.cache\n", + " Class Images Targets P R mAP@.5 mAP@.5:.95: 100% 157/157 [01:33<00:00, 1.68it/s]\n", + " all 5e+03 3.63e+04 0.749 0.619 0.68 0.486\n", + "Speed: 5.2/2.0/7.3 ms inference/NMS/total per 640x640 image at batch-size 32\n", "\n", "Evaluating pycocotools mAP... saving runs/test/exp/yolov5x_predictions.json...\n", "loading annotations into memory...\n", - "Done (t=0.41s)\n", + "Done (t=0.44s)\n", "creating index...\n", "index created!\n", "Loading and preparing results...\n", - "DONE (t=5.26s)\n", + "DONE (t=4.47s)\n", "creating index...\n", "index created!\n", "Running per image evaluation...\n", "Evaluate annotation type *bbox*\n", - "DONE (t=93.97s).\n", + "DONE (t=94.87s).\n", "Accumulating evaluation results...\n", - "DONE (t=15.06s).\n", + "DONE (t=15.96s).\n", " Average Precision (AP) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.501\n", " Average Precision (AP) @[ IoU=0.50 | area= all | maxDets=100 ] = 0.687\n", " Average Precision (AP) @[ IoU=0.75 | area= all | maxDets=100 ] = 0.544\n", @@ -837,30 +836,30 @@ "base_uri": "https://localhost:8080/", "height": 65, "referenced_widgets": [ - "8501ed1563e4452eac9df6b7a66e8f8c", - "d2bb96801e1f46f4a58e02534f7026ff", - "468a796ef06b4a24bcba6fbd4a0a8db5", - "42ad5c1ea7be4835bffebf90642178f1", - "c58b5536d98f4814831934e9c30c4d78", - "505597101151486ea29e9ab754544d27", - "de6e7b4b4a1c408c9f89d89b07a13bcd", - "f5cc9c7d4c274b2d81327ba3163c43fd" + "e6459e0bcee449b090fc9807672725bc", + "c341e1d3bf3b40d1821ce392eb966c68", + "660afee173694231a6dce3cd94df6cae", + "261218485cef48df961519dde5edfcbe", + "32736d503c06497abfae8c0421918255", + "e257738711f54d5280c8393d9d3dce1c", + "beb7a6fe34b840899bb79c062681696f", + "e639132395d64d70b99d8b72c32f8fbb" ] }, - "outputId": "c68a3db4-1314-46b4-9e52-83532eb65749" + "outputId": "e8b7d5b3-a71e-4446-eec2-ad13419cf700" }, "source": [ "# Download COCO128\n", "torch.hub.download_url_to_file('https://github.com/ultralytics/yolov5/releases/download/v1.0/coco128.zip', 'tmp.zip')\n", "!unzip -q tmp.zip -d ../ && rm tmp.zip" ], - "execution_count": null, + "execution_count": 4, "outputs": [ { "output_type": "display_data", "data": { "application/vnd.jupyter.widget-view+json": { - "model_id": "8501ed1563e4452eac9df6b7a66e8f8c", + "model_id": "e6459e0bcee449b090fc9807672725bc", "version_minor": 0, "version_major": 2 }, @@ -925,27 +924,27 @@ "colab": { "base_uri": "https://localhost:8080/" }, - "outputId": "6af7116a-01ab-4b94-e5d7-b37c17dc95de" + "outputId": "38e51b29-2df4-4f00-cde8-5f6e4a34da9e" }, "source": [ "# Train YOLOv5s on COCO128 for 3 epochs\n", "!python train.py --img 640 --batch 16 --epochs 3 --data coco128.yaml --weights yolov5s.pt --nosave --cache" ], - "execution_count": null, + "execution_count": 5, "outputs": [ { "output_type": "stream", "text": [ "\u001b[34m\u001b[1mgithub: \u001b[0mup to date with https://github.com/ultralytics/yolov5 ✅\n", - "YOLOv5 v4.0-21-gb26a2f6 torch 1.7.0+cu101 CUDA:0 (Tesla V100-SXM2-16GB, 16130.5MB)\n", + "YOLOv5 v4.0-75-gbdd88e1 torch 1.7.0+cu101 CUDA:0 (Tesla V100-SXM2-16GB, 16160.5MB)\n", "\n", - "Namespace(adam=False, batch_size=16, bucket='', cache_images=True, cfg='', data='./data/coco128.yaml', device='', epochs=3, evolve=False, exist_ok=False, global_rank=-1, hyp='data/hyp.scratch.yaml', image_weights=False, img_size=[640, 640], local_rank=-1, log_artifacts=False, log_imgs=16, multi_scale=False, name='exp', noautoanchor=False, nosave=True, notest=False, project='runs/train', quad=False, rect=False, resume=False, save_dir='runs/train/exp', single_cls=False, sync_bn=False, total_batch_size=16, weights='yolov5s.pt', workers=8, world_size=1)\n", + "Namespace(adam=False, batch_size=16, bucket='', cache_images=True, cfg='', data='./data/coco128.yaml', device='', epochs=3, evolve=False, exist_ok=False, global_rank=-1, hyp='data/hyp.scratch.yaml', image_weights=False, img_size=[640, 640], linear_lr=False, local_rank=-1, log_artifacts=False, log_imgs=16, multi_scale=False, name='exp', noautoanchor=False, nosave=True, notest=False, project='runs/train', quad=False, rect=False, resume=False, save_dir='runs/train/exp', single_cls=False, sync_bn=False, total_batch_size=16, weights='yolov5s.pt', workers=8, world_size=1)\n", "\u001b[34m\u001b[1mwandb: \u001b[0mInstall Weights & Biases for YOLOv5 logging with 'pip install wandb' (recommended)\n", "Start Tensorboard with \"tensorboard --logdir runs/train\", view at http://localhost:6006/\n", - "2021-01-17 19:56:03.945851: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcudart.so.10.1\n", + "2021-02-12 06:38:28.027271: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcudart.so.10.1\n", "\u001b[34m\u001b[1mhyperparameters: \u001b[0mlr0=0.01, lrf=0.2, momentum=0.937, weight_decay=0.0005, warmup_epochs=3.0, warmup_momentum=0.8, warmup_bias_lr=0.1, box=0.05, cls=0.5, cls_pw=1.0, obj=1.0, obj_pw=1.0, iou_t=0.2, anchor_t=4.0, fl_gamma=0.0, hsv_h=0.015, hsv_s=0.7, hsv_v=0.4, degrees=0.0, translate=0.1, scale=0.5, shear=0.0, perspective=0.0, flipud=0.0, fliplr=0.5, mosaic=1.0, mixup=0.0\n", "Downloading https://github.com/ultralytics/yolov5/releases/download/v4.0/yolov5s.pt to yolov5s.pt...\n", - "100% 14.1M/14.1M [00:00<00:00, 15.8MB/s]\n", + "100% 14.1M/14.1M [00:01<00:00, 13.2MB/s]\n", "\n", "\n", " from n params module arguments \n", @@ -979,12 +978,11 @@ "Transferred 362/362 items from yolov5s.pt\n", "Scaled weight_decay = 0.0005\n", "Optimizer groups: 62 .bias, 62 conv.weight, 59 other\n", - "\u001b[34m\u001b[1mtrain: \u001b[0mScanning '../coco128/labels/train2017' for images and labels... 128 found, 0 missing, 2 empty, 0 corrupted: 100% 128/128 [00:00<00:00, 2647.74it/s]\n", + "\u001b[34m\u001b[1mtrain: \u001b[0mScanning '../coco128/labels/train2017' for images and labels... 128 found, 0 missing, 2 empty, 0 corrupted: 100% 128/128 [00:00<00:00, 2566.00it/s]\n", "\u001b[34m\u001b[1mtrain: \u001b[0mNew cache created: ../coco128/labels/train2017.cache\n", - "\u001b[34m\u001b[1mtrain: \u001b[0mScanning '../coco128/labels/train2017.cache' for images and labels... 128 found, 0 missing, 2 empty, 0 corrupted: 100% 128/128 [00:00<00:00, 1503840.09it/s]\n", - "\u001b[34m\u001b[1mtrain: \u001b[0mCaching images (0.1GB): 100% 128/128 [00:00<00:00, 176.03it/s]\n", - "\u001b[34m\u001b[1mval: \u001b[0mScanning '../coco128/labels/train2017.cache' for images and labels... 128 found, 0 missing, 2 empty, 0 corrupted: 100% 128/128 [00:00<00:00, 24200.82it/s]\n", - "\u001b[34m\u001b[1mval: \u001b[0mCaching images (0.1GB): 100% 128/128 [00:01<00:00, 123.25it/s]\n", + "\u001b[34m\u001b[1mtrain: \u001b[0mCaching images (0.1GB): 100% 128/128 [00:00<00:00, 175.07it/s]\n", + "\u001b[34m\u001b[1mval: \u001b[0mScanning '../coco128/labels/train2017.cache' for images and labels... 128 found, 0 missing, 2 empty, 0 corrupted: 100% 128/128 [00:00<00:00, 764773.38it/s]\n", + "\u001b[34m\u001b[1mval: \u001b[0mCaching images (0.1GB): 100% 128/128 [00:00<00:00, 128.17it/s]\n", "Plotting labels... \n", "\n", "\u001b[34m\u001b[1mautoanchor: \u001b[0mAnalyzing anchors... anchors/target = 4.26, Best Possible Recall (BPR) = 0.9946\n", @@ -994,19 +992,19 @@ "Starting training for 3 epochs...\n", "\n", " Epoch gpu_mem box obj cls total targets img_size\n", - " 0/2 3.27G 0.04357 0.06779 0.01869 0.1301 207 640: 100% 8/8 [00:04<00:00, 1.95it/s]\n", - " Class Images Targets P R mAP@.5 mAP@.5:.95: 100% 8/8 [00:05<00:00, 1.36it/s]\n", - " all 128 929 0.392 0.732 0.657 0.428\n", + " 0/2 3.27G 0.04357 0.06781 0.01869 0.1301 207 640: 100% 8/8 [00:03<00:00, 2.03it/s]\n", + " Class Images Targets P R mAP@.5 mAP@.5:.95: 100% 4/4 [00:04<00:00, 1.14s/it]\n", + " all 128 929 0.646 0.627 0.659 0.431\n", "\n", " Epoch gpu_mem box obj cls total targets img_size\n", - " 1/2 7.47G 0.04308 0.06636 0.02083 0.1303 227 640: 100% 8/8 [00:02<00:00, 3.88it/s]\n", - " Class Images Targets P R mAP@.5 mAP@.5:.95: 100% 8/8 [00:01<00:00, 5.07it/s]\n", - " all 128 929 0.387 0.737 0.657 0.432\n", + " 1/2 7.75G 0.04308 0.06654 0.02083 0.1304 227 640: 100% 8/8 [00:01<00:00, 4.11it/s]\n", + " Class Images Targets P R mAP@.5 mAP@.5:.95: 100% 4/4 [00:01<00:00, 2.94it/s]\n", + " all 128 929 0.681 0.607 0.663 0.434\n", "\n", " Epoch gpu_mem box obj cls total targets img_size\n", - " 2/2 7.48G 0.04461 0.06864 0.01866 0.1319 191 640: 100% 8/8 [00:02<00:00, 3.57it/s]\n", - " Class Images Targets P R mAP@.5 mAP@.5:.95: 100% 8/8 [00:02<00:00, 2.82it/s]\n", - " all 128 929 0.385 0.742 0.658 0.431\n", + " 2/2 7.75G 0.04461 0.06896 0.01866 0.1322 191 640: 100% 8/8 [00:02<00:00, 3.94it/s]\n", + " Class Images Targets P R mAP@.5 mAP@.5:.95: 100% 4/4 [00:03<00:00, 1.22it/s]\n", + " all 128 929 0.642 0.632 0.662 0.432\n", "Optimizer stripped from runs/train/exp/weights/last.pt, 14.8MB\n", "3 epochs completed in 0.007 hours.\n", "\n" @@ -1238,4 +1236,4 @@ "outputs": [] } ] -} +} \ No newline at end of file From 3e560e2faeeaba00adbb2c8e72716c0a133dd917 Mon Sep 17 00:00:00 2001 From: Daniel Khromov Date: Sat, 13 Feb 2021 02:37:51 +0300 Subject: [PATCH 0003/1976] YOLOv5 PyTorch Hub results.save() method retains filenames (#2194) * save results with name * debug * save original imgs names * Update common.py Co-authored-by: Glenn Jocher --- models/common.py | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/models/common.py b/models/common.py index 7cfea01f223e..4f4f331da583 100644 --- a/models/common.py +++ b/models/common.py @@ -196,10 +196,11 @@ def forward(self, imgs, size=640, augment=False, profile=False): # Pre-process n, imgs = (len(imgs), imgs) if isinstance(imgs, list) else (1, [imgs]) # number of images, list of images - shape0, shape1 = [], [] # image and inference shapes + shape0, shape1, files = [], [], [] # image and inference shapes, filenames for i, im in enumerate(imgs): if isinstance(im, str): # filename or uri im = Image.open(requests.get(im, stream=True).raw if im.startswith('http') else im) # open + files.append(Path(im.filename).with_suffix('.jpg').name if isinstance(im, Image.Image) else f'image{i}.jpg') im = np.array(im) # to numpy if im.shape[0] < 5: # image in CHW im = im.transpose((1, 2, 0)) # reverse dataloader .transpose(2, 0, 1) @@ -224,18 +225,19 @@ def forward(self, imgs, size=640, augment=False, profile=False): for i in range(n): scale_coords(shape1, y[i][:, :4], shape0[i]) - return Detections(imgs, y, self.names) + return Detections(imgs, y, files, self.names) class Detections: # detections class for YOLOv5 inference results - def __init__(self, imgs, pred, names=None): + def __init__(self, imgs, pred, files, names=None): super(Detections, self).__init__() d = pred[0].device # device gn = [torch.tensor([*[im.shape[i] for i in [1, 0, 1, 0]], 1., 1.], device=d) for im in imgs] # normalizations self.imgs = imgs # list of images as numpy arrays self.pred = pred # list of tensors pred[0] = (xyxy, conf, cls) self.names = names # class names + self.files = files # image filenames self.xyxy = pred # xyxy pixels self.xywh = [xyxy2xywh(x) for x in pred] # xywh pixels self.xyxyn = [x / g for x, g in zip(self.xyxy, gn)] # xyxy normalized @@ -258,9 +260,9 @@ def display(self, pprint=False, show=False, save=False, render=False, save_dir=' if pprint: print(str.rstrip(', ')) if show: - img.show(f'image {i}') # show + img.show(self.files[i]) # show if save: - f = Path(save_dir) / f'results{i}.jpg' + f = Path(save_dir) / self.files[i] img.save(f) # save print(f"{'Saving' * (i == 0)} {f},", end='' if i < self.n - 1 else ' done.\n') if render: @@ -272,7 +274,8 @@ def print(self): def show(self): self.display(show=True) # show results - def save(self, save_dir=''): + def save(self, save_dir='results/'): + Path(save_dir).mkdir(exist_ok=True) self.display(save=True, save_dir=save_dir) # save results def render(self): From 3ff783c18f32ec790bba5d7ca2b8d067ecd2160b Mon Sep 17 00:00:00 2001 From: VdLMV Date: Mon, 15 Feb 2021 19:49:22 +0100 Subject: [PATCH 0004/1976] TTA augument boxes one pixel shifted in de-flip ud and lr (#2219) * TTA augument boxes one pixel shifted in de-flip ud and lr * PEP8 reformat Co-authored-by: Jaap van de Loosdrecht Co-authored-by: Glenn Jocher --- models/yolo.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/models/yolo.py b/models/yolo.py index 11e6a65921a4..704d0e6d260d 100644 --- a/models/yolo.py +++ b/models/yolo.py @@ -110,9 +110,9 @@ def forward(self, x, augment=False, profile=False): # cv2.imwrite(f'img_{si}.jpg', 255 * xi[0].cpu().numpy().transpose((1, 2, 0))[:, :, ::-1]) # save yi[..., :4] /= si # de-scale if fi == 2: - yi[..., 1] = img_size[0] - yi[..., 1] # de-flip ud + yi[..., 1] = img_size[0] - 1 - yi[..., 1] # de-flip ud elif fi == 3: - yi[..., 0] = img_size[1] - yi[..., 0] # de-flip lr + yi[..., 0] = img_size[1] - 1 - yi[..., 0] # de-flip lr y.append(yi) return torch.cat(y, 1), None # augmented inference, train else: From 7b833e37bf074758c94d66b3bf439582d0a08dfe Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 15 Feb 2021 11:02:20 -0800 Subject: [PATCH 0005/1976] LoadStreams() frame loss bug fix (#2222) --- utils/datasets.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/utils/datasets.py b/utils/datasets.py index 29a8812a20a2..4f2939d4bef2 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -300,7 +300,8 @@ def update(self, index, cap): # _, self.imgs[index] = cap.read() cap.grab() if n == 4: # read every 4th frame - _, self.imgs[index] = cap.retrieve() + success, im = cap.retrieve() + self.imgs[index] = im if success else self.imgs[index] * 0 n = 0 time.sleep(0.01) # wait time From f8464b4f66e627ed2778c9a27dbe4a8642482baf Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 15 Feb 2021 21:21:53 -0800 Subject: [PATCH 0006/1976] Update yolo.py channel array (#2223) --- models/yolo.py | 35 ++++++++++------------------------- 1 file changed, 10 insertions(+), 25 deletions(-) diff --git a/models/yolo.py b/models/yolo.py index 704d0e6d260d..41817098ccbc 100644 --- a/models/yolo.py +++ b/models/yolo.py @@ -2,7 +2,6 @@ import logging import sys from copy import deepcopy -from pathlib import Path sys.path.append('./') # to run '$ python *.py' files in subdirectories logger = logging.getLogger(__name__) @@ -213,43 +212,27 @@ def parse_model(d, ch): # model_dict, input_channels(3) if m in [Conv, GhostConv, Bottleneck, GhostBottleneck, SPP, DWConv, MixConv2d, Focus, CrossConv, BottleneckCSP, C3]: c1, c2 = ch[f], args[0] - - # Normal - # if i > 0 and args[0] != no: # channel expansion factor - # ex = 1.75 # exponential (default 2.0) - # e = math.log(c2 / ch[1]) / math.log(2) - # c2 = int(ch[1] * ex ** e) - # if m != Focus: - - c2 = make_divisible(c2 * gw, 8) if c2 != no else c2 - - # Experimental - # if i > 0 and args[0] != no: # channel expansion factor - # ex = 1 + gw # exponential (default 2.0) - # ch1 = 32 # ch[1] - # e = math.log(c2 / ch1) / math.log(2) # level 1-n - # c2 = int(ch1 * ex ** e) - # if m != Focus: - # c2 = make_divisible(c2, 8) if c2 != no else c2 + if c2 != no: # if not output + c2 = make_divisible(c2 * gw, 8) args = [c1, c2, *args[1:]] if m in [BottleneckCSP, C3]: - args.insert(2, n) + args.insert(2, n) # number of repeats n = 1 elif m is nn.BatchNorm2d: args = [ch[f]] elif m is Concat: - c2 = sum([ch[x if x < 0 else x + 1] for x in f]) + c2 = sum([ch[x] for x in f]) elif m is Detect: - args.append([ch[x + 1] for x in f]) + args.append([ch[x] for x in f]) if isinstance(args[1], int): # number of anchors args[1] = [list(range(args[1] * 2))] * len(f) elif m is Contract: - c2 = ch[f if f < 0 else f + 1] * args[0] ** 2 + c2 = ch[f] * args[0] ** 2 elif m is Expand: - c2 = ch[f if f < 0 else f + 1] // args[0] ** 2 + c2 = ch[f] // args[0] ** 2 else: - c2 = ch[f if f < 0 else f + 1] + c2 = ch[f] m_ = nn.Sequential(*[m(*args) for _ in range(n)]) if n > 1 else m(*args) # module t = str(m)[8:-2].replace('__main__.', '') # module type @@ -258,6 +241,8 @@ def parse_model(d, ch): # model_dict, input_channels(3) logger.info('%3s%18s%3s%10.0f %-40s%-30s' % (i, f, n, np, t, args)) # print save.extend(x % i for x in ([f] if isinstance(f, int) else f) if x != -1) # append to savelist layers.append(m_) + if i == 0: + ch = [] ch.append(c2) return nn.Sequential(*layers), sorted(save) From 26c2e54c8f97e66b646f92932eb521901d69f889 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 16 Feb 2021 13:56:47 -0800 Subject: [PATCH 0007/1976] Add check_imshow() (#2231) * Add check_imshow() * Update general.py * Update general.py --- detect.py | 8 ++++---- utils/general.py | 13 +++++++++++++ 2 files changed, 17 insertions(+), 4 deletions(-) diff --git a/detect.py b/detect.py index 3f1d6c521b67..22bf21b4c825 100644 --- a/detect.py +++ b/detect.py @@ -9,8 +9,8 @@ from models.experimental import attempt_load from utils.datasets import LoadStreams, LoadImages -from utils.general import check_img_size, check_requirements, non_max_suppression, apply_classifier, scale_coords, \ - xyxy2xywh, strip_optimizer, set_logging, increment_path +from utils.general import check_img_size, check_requirements, check_imshow, non_max_suppression, apply_classifier, \ + scale_coords, xyxy2xywh, strip_optimizer, set_logging, increment_path from utils.plots import plot_one_box from utils.torch_utils import select_device, load_classifier, time_synchronized @@ -45,7 +45,7 @@ def detect(save_img=False): # Set Dataloader vid_path, vid_writer = None, None if webcam: - view_img = True + view_img = check_imshow() cudnn.benchmark = True # set True to speed up constant image size inference dataset = LoadStreams(source, img_size=imgsz, stride=stride) else: @@ -118,7 +118,7 @@ def detect(save_img=False): # Stream results if view_img: cv2.imshow(str(p), im0) - cv2.waitKey(1) # 1 millisecond + cv2.waitKey(1) # 1 millisecond # Save results (image with detections) if save_img: diff --git a/utils/general.py b/utils/general.py index 24807483f5f4..2d3e83ede35e 100755 --- a/utils/general.py +++ b/utils/general.py @@ -95,6 +95,19 @@ def check_img_size(img_size, s=32): return new_size +def check_imshow(): + # Check if environment supports image displays + try: + cv2.imshow('test', np.zeros((1, 1, 3))) + cv2.waitKey(1) + cv2.destroyAllWindows() + cv2.waitKey(1) + return True + except Exception as e: + print(f'WARNING: Environment does not support cv2.imshow() or PIL Image.show() image previews\n{e}') + return False + + def check_file(file): # Search for file if not found if os.path.isfile(file) or file == '': From 5a40ce65ce215a79949b96f4ac2e6f4da90256ad Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 16 Feb 2021 15:27:24 -0800 Subject: [PATCH 0008/1976] Update CI badge (#2230) --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 3c14071698c5..233fc17f1c35 100755 --- a/README.md +++ b/README.md @@ -2,7 +2,7 @@   -![CI CPU testing](https://github.com/ultralytics/yolov5/workflows/CI%20CPU%20testing/badge.svg) +CI CPU testing This repository represents Ultralytics open-source research into future object detection methods, and incorporates lessons learned and best practices evolved over thousands of hours of training and evolution on anonymized client datasets. **All code and models are under active development, and are subject to modification or deletion without notice.** Use at your own risk. From d2e754b67bc08d3634df05932cc94d8c9314a7b1 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 16 Feb 2021 15:58:07 -0800 Subject: [PATCH 0009/1976] Add isdocker() (#2232) * Add isdocker() * Update general.py * Update general.py --- utils/general.py | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/utils/general.py b/utils/general.py index 2d3e83ede35e..64b360fbe7df 100755 --- a/utils/general.py +++ b/utils/general.py @@ -47,6 +47,11 @@ def get_latest_run(search_dir='.'): return max(last_list, key=os.path.getctime) if last_list else '' +def isdocker(): + # Is environment a Docker container + return Path('/workspace').exists() # or Path('/.dockerenv').exists() + + def check_online(): # Check internet connectivity import socket @@ -62,7 +67,7 @@ def check_git_status(): print(colorstr('github: '), end='') try: assert Path('.git').exists(), 'skipping check (not a git repository)' - assert not Path('/workspace').exists(), 'skipping check (Docker image)' # not Path('/.dockerenv').exists() + assert not isdocker(), 'skipping check (Docker image)' assert check_online(), 'skipping check (offline)' cmd = 'git fetch && git config --get remote.origin.url' @@ -98,13 +103,14 @@ def check_img_size(img_size, s=32): def check_imshow(): # Check if environment supports image displays try: + assert not isdocker(), 'cv2.imshow() is disabled in Docker environments' cv2.imshow('test', np.zeros((1, 1, 3))) cv2.waitKey(1) cv2.destroyAllWindows() cv2.waitKey(1) return True except Exception as e: - print(f'WARNING: Environment does not support cv2.imshow() or PIL Image.show() image previews\n{e}') + print(f'WARNING: Environment does not support cv2.imshow() or PIL Image.show() image displays\n{e}') return False From 9d873077841434d1c6cbd1c4248ca2252820d3ba Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 19 Feb 2021 11:22:42 -0800 Subject: [PATCH 0010/1976] YOLOv5 Hub URL inference bug fix (#2250) * Update common.py * Update common.py * Update common.py --- models/common.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/models/common.py b/models/common.py index 4f4f331da583..f24ea7885668 100644 --- a/models/common.py +++ b/models/common.py @@ -199,7 +199,8 @@ def forward(self, imgs, size=640, augment=False, profile=False): shape0, shape1, files = [], [], [] # image and inference shapes, filenames for i, im in enumerate(imgs): if isinstance(im, str): # filename or uri - im = Image.open(requests.get(im, stream=True).raw if im.startswith('http') else im) # open + im, f = Image.open(requests.get(im, stream=True).raw if im.startswith('http') else im), im # open + im.filename = f # for uri files.append(Path(im.filename).with_suffix('.jpg').name if isinstance(im, Image.Image) else f'image{i}.jpg') im = np.array(im) # to numpy if im.shape[0] < 5: # image in CHW @@ -253,7 +254,7 @@ def display(self, pprint=False, show=False, save=False, render=False, save_dir=' n = (pred[:, -1] == c).sum() # detections per class str += f"{n} {self.names[int(c)]}{'s' * (n > 1)}, " # add to string if show or save or render: - img = Image.fromarray(img.astype(np.uint8)) if isinstance(img, np.ndarray) else img # from np + img = Image.fromarray(img) if isinstance(img, np.ndarray) else img # from np for *box, conf, cls in pred: # xyxy, confidence, class # str += '%s %.2f, ' % (names[int(cls)], conf) # label ImageDraw.Draw(img).rectangle(box, width=4, outline=colors[int(cls) % 10]) # plot From db28ce61acbeec9eaeb1577ccd417796ca138ee8 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 19 Feb 2021 12:35:38 -0800 Subject: [PATCH 0011/1976] Improved hubconf.py CI tests (#2251) --- hubconf.py | 9 +++++++-- models/common.py | 2 +- 2 files changed, 8 insertions(+), 3 deletions(-) diff --git a/hubconf.py b/hubconf.py index 2a34813310e8..47eee4477725 100644 --- a/hubconf.py +++ b/hubconf.py @@ -133,9 +133,14 @@ def custom(path_or_model='path/to/model.pt', autoshape=True): # model = custom(path_or_model='path/to/model.pt') # custom example # Verify inference + import numpy as np from PIL import Image - imgs = [Image.open(x) for x in Path('data/images').glob('*.jpg')] - results = model(imgs) + imgs = [Image.open('data/images/bus.jpg'), # PIL + 'data/images/zidane.jpg', # filename + 'https://github.com/ultralytics/yolov5/raw/master/data/images/bus.jpg', # URI + np.zeros((640, 480, 3))] # numpy + + results = model(imgs) # batched inference results.print() results.save() diff --git a/models/common.py b/models/common.py index f24ea7885668..e8e5ff1eb2c1 100644 --- a/models/common.py +++ b/models/common.py @@ -254,7 +254,7 @@ def display(self, pprint=False, show=False, save=False, render=False, save_dir=' n = (pred[:, -1] == c).sum() # detections per class str += f"{n} {self.names[int(c)]}{'s' * (n > 1)}, " # add to string if show or save or render: - img = Image.fromarray(img) if isinstance(img, np.ndarray) else img # from np + img = Image.fromarray(img.astype(np.uint8)) if isinstance(img, np.ndarray) else img # from np for *box, conf, cls in pred: # xyxy, confidence, class # str += '%s %.2f, ' % (names[int(cls)], conf) # label ImageDraw.Draw(img).rectangle(box, width=4, outline=colors[int(cls) % 10]) # plot From 5f42643a53125ccc450add998401e3529d9d59d1 Mon Sep 17 00:00:00 2001 From: Yann Defretin Date: Fri, 19 Feb 2021 21:38:05 +0100 Subject: [PATCH 0012/1976] Unified hub and detect.py box and labels plotting (#2243) --- models/common.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/models/common.py b/models/common.py index e8e5ff1eb2c1..efcc6071af63 100644 --- a/models/common.py +++ b/models/common.py @@ -11,7 +11,7 @@ from utils.datasets import letterbox from utils.general import non_max_suppression, make_divisible, scale_coords, xyxy2xywh -from utils.plots import color_list +from utils.plots import color_list, plot_one_box def autopad(k, p=None): # kernel, padding @@ -254,10 +254,10 @@ def display(self, pprint=False, show=False, save=False, render=False, save_dir=' n = (pred[:, -1] == c).sum() # detections per class str += f"{n} {self.names[int(c)]}{'s' * (n > 1)}, " # add to string if show or save or render: - img = Image.fromarray(img.astype(np.uint8)) if isinstance(img, np.ndarray) else img # from np for *box, conf, cls in pred: # xyxy, confidence, class - # str += '%s %.2f, ' % (names[int(cls)], conf) # label - ImageDraw.Draw(img).rectangle(box, width=4, outline=colors[int(cls) % 10]) # plot + label = f'{self.names[int(cls)]} {conf:.2f}' + plot_one_box(box, img, label=label, color=colors[int(cls) % 10]) + img = Image.fromarray(img.astype(np.uint8)) if isinstance(img, np.ndarray) else img # from np if pprint: print(str.rstrip(', ')) if show: From 47faf95079d004b6114058fc9fa802190cbb95c5 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 19 Feb 2021 15:20:41 -0800 Subject: [PATCH 0013/1976] reset head --- utils/plots.py | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-) diff --git a/utils/plots.py b/utils/plots.py index 3ec793528fe5..94f46a9a4026 100644 --- a/utils/plots.py +++ b/utils/plots.py @@ -15,7 +15,7 @@ import seaborn as sns import torch import yaml -from PIL import Image, ImageDraw +from PIL import Image, ImageDraw, ImageFont from scipy.signal import butter, filtfilt from utils.general import xywh2xyxy, xyxy2xywh @@ -68,6 +68,20 @@ def plot_one_box(x, img, color=None, label=None, line_thickness=None): cv2.putText(img, label, (c1[0], c1[1] - 2), 0, tl / 3, [225, 255, 255], thickness=tf, lineType=cv2.LINE_AA) +def plot_one_box_PIL(box, img, color=None, label=None, line_thickness=None): + img = Image.fromarray(img) + draw = ImageDraw.Draw(img) + line_thickness = line_thickness or max(int(min(img.size) / 200), 2) + draw.rectangle(box, width=line_thickness, outline=tuple(color)) # plot + if label: + fontsize = max(round(max(img.size) / 40), 12) + font = ImageFont.truetype("Arial.ttf", fontsize) + txt_width, txt_height = font.getsize(label) + draw.rectangle([box[0], box[1] - txt_height + 4, box[0] + txt_width, box[1]], fill=tuple(color)) + draw.text((box[0], box[1] - txt_height + 1), label, fill=(255, 255, 255), font=font) + return np.asarray(img) + + def plot_wh_methods(): # from utils.plots import *; plot_wh_methods() # Compares the two methods for width-height anchor multiplication # https://github.com/ultralytics/yolov3/issues/168 From c09964c27cc275c8e32630715cca5be77078dae2 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 19 Feb 2021 15:39:09 -0800 Subject: [PATCH 0014/1976] Update inference default to multi_label=False (#2252) * Update inference default to multi_label=False * bug fix * Update plots.py * Update plots.py --- models/common.py | 2 +- test.py | 8 ++++---- utils/general.py | 9 +++++---- utils/plots.py | 2 +- 4 files changed, 11 insertions(+), 10 deletions(-) diff --git a/models/common.py b/models/common.py index efcc6071af63..ad35f908d865 100644 --- a/models/common.py +++ b/models/common.py @@ -7,7 +7,7 @@ import requests import torch import torch.nn as nn -from PIL import Image, ImageDraw +from PIL import Image from utils.datasets import letterbox from utils.general import non_max_suppression, make_divisible, scale_coords, xyxy2xywh diff --git a/test.py b/test.py index 738764f15601..c30148dfb2f1 100644 --- a/test.py +++ b/test.py @@ -106,7 +106,7 @@ def test(data, with torch.no_grad(): # Run model t = time_synchronized() - inf_out, train_out = model(img, augment=augment) # inference and training outputs + out, train_out = model(img, augment=augment) # inference and training outputs t0 += time_synchronized() - t # Compute loss @@ -117,11 +117,11 @@ def test(data, targets[:, 2:] *= torch.Tensor([width, height, width, height]).to(device) # to pixels lb = [targets[targets[:, 0] == i, 1:] for i in range(nb)] if save_hybrid else [] # for autolabelling t = time_synchronized() - output = non_max_suppression(inf_out, conf_thres=conf_thres, iou_thres=iou_thres, labels=lb) + out = non_max_suppression(out, conf_thres=conf_thres, iou_thres=iou_thres, labels=lb, multi_label=True) t1 += time_synchronized() - t # Statistics per image - for si, pred in enumerate(output): + for si, pred in enumerate(out): labels = targets[targets[:, 0] == si, 1:] nl = len(labels) tcls = labels[:, 0].tolist() if nl else [] # target class @@ -209,7 +209,7 @@ def test(data, f = save_dir / f'test_batch{batch_i}_labels.jpg' # labels Thread(target=plot_images, args=(img, targets, paths, f, names), daemon=True).start() f = save_dir / f'test_batch{batch_i}_pred.jpg' # predictions - Thread(target=plot_images, args=(img, output_to_target(output), paths, f, names), daemon=True).start() + Thread(target=plot_images, args=(img, output_to_target(out), paths, f, names), daemon=True).start() # Compute statistics stats = [np.concatenate(x, 0) for x in zip(*stats)] # to numpy diff --git a/utils/general.py b/utils/general.py index 64b360fbe7df..3b5f4629b00a 100755 --- a/utils/general.py +++ b/utils/general.py @@ -390,11 +390,12 @@ def wh_iou(wh1, wh2): return inter / (wh1.prod(2) + wh2.prod(2) - inter) # iou = inter / (area1 + area2 - inter) -def non_max_suppression(prediction, conf_thres=0.25, iou_thres=0.45, classes=None, agnostic=False, labels=()): - """Performs Non-Maximum Suppression (NMS) on inference results +def non_max_suppression(prediction, conf_thres=0.25, iou_thres=0.45, classes=None, agnostic=False, multi_label=False, + labels=()): + """Runs Non-Maximum Suppression (NMS) on inference results Returns: - detections with shape: nx6 (x1, y1, x2, y2, conf, cls) + list of detections, on (n,6) tensor per image [xyxy, conf, cls] """ nc = prediction.shape[2] - 5 # number of classes @@ -406,7 +407,7 @@ def non_max_suppression(prediction, conf_thres=0.25, iou_thres=0.45, classes=Non max_nms = 30000 # maximum number of boxes into torchvision.ops.nms() time_limit = 10.0 # seconds to quit after redundant = True # require redundant detections - multi_label = nc > 1 # multiple labels per box (adds 0.5ms/img) + multi_label &= nc > 1 # multiple labels per box (adds 0.5ms/img) merge = False # use merge-NMS t = time.time() diff --git a/utils/plots.py b/utils/plots.py index 94f46a9a4026..aa9a1cab81f0 100644 --- a/utils/plots.py +++ b/utils/plots.py @@ -54,7 +54,7 @@ def butter_lowpass(cutoff, fs, order): return filtfilt(b, a, data) # forward-backward filter -def plot_one_box(x, img, color=None, label=None, line_thickness=None): +def plot_one_box(x, img, color=None, label=None, line_thickness=3): # Plots one bounding box on image img tl = line_thickness or round(0.002 * (img.shape[0] + img.shape[1]) / 2) + 1 # line/font thickness color = color or [random.randint(0, 255) for _ in range(3)] From 6f5d6fcdaa8c1c5b24a06fdf9fd4e12c781fb4f7 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 20 Feb 2021 11:19:01 -0800 Subject: [PATCH 0015/1976] Robust objectness loss balancing (#2256) --- utils/loss.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/utils/loss.py b/utils/loss.py index 481d25e207f2..2302d18de87d 100644 --- a/utils/loss.py +++ b/utils/loss.py @@ -105,8 +105,8 @@ def __init__(self, model, autobalance=False): BCEcls, BCEobj = FocalLoss(BCEcls, g), FocalLoss(BCEobj, g) det = model.module.model[-1] if is_parallel(model) else model.model[-1] # Detect() module - self.balance = {3: [4.0, 1.0, 0.4], 4: [4.0, 1.0, 0.25, 0.06], 5: [4.0, 1.0, 0.25, 0.06, .02]}[det.nl] - self.ssi = (det.stride == 16).nonzero(as_tuple=False).item() # stride 16 index + self.balance = {3: [4.0, 1.0, 0.4]}.get(det.nl, [4.0, 1.0, 0.25, 0.06, .02]) # P3-P7 + self.ssi = list(det.stride).index(16) if autobalance else 0 # stride 16 index self.BCEcls, self.BCEobj, self.gr, self.hyp, self.autobalance = BCEcls, BCEobj, model.gr, h, autobalance for k in 'na', 'nc', 'nl', 'anchors': setattr(self, k, getattr(det, k)) From 095d2c11d89892cd9c0c4d034cd1c768a0dba11c Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 20 Feb 2021 13:21:58 -0800 Subject: [PATCH 0016/1976] Created using Colaboratory --- tutorial.ipynb | 23 ++++++++++++++++++----- 1 file changed, 18 insertions(+), 5 deletions(-) diff --git a/tutorial.ipynb b/tutorial.ipynb index 7587d9f536fe..7fce40c3824e 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -563,7 +563,7 @@ "clear_output()\n", "print('Setup complete. Using torch %s %s' % (torch.__version__, torch.cuda.get_device_properties(0) if torch.cuda.is_available() else 'CPU'))" ], - "execution_count": 1, + "execution_count": null, "outputs": [ { "output_type": "stream", @@ -689,7 +689,7 @@ "torch.hub.download_url_to_file('https://github.com/ultralytics/yolov5/releases/download/v1.0/coco2017val.zip', 'tmp.zip')\n", "!unzip -q tmp.zip -d ../ && rm tmp.zip" ], - "execution_count": 2, + "execution_count": null, "outputs": [ { "output_type": "display_data", @@ -729,7 +729,7 @@ "# Run YOLOv5x on COCO val2017\n", "!python test.py --weights yolov5x.pt --data coco.yaml --img 640 --iou 0.65" ], - "execution_count": 3, + "execution_count": null, "outputs": [ { "output_type": "stream", @@ -853,7 +853,7 @@ "torch.hub.download_url_to_file('https://github.com/ultralytics/yolov5/releases/download/v1.0/coco128.zip', 'tmp.zip')\n", "!unzip -q tmp.zip -d ../ && rm tmp.zip" ], - "execution_count": 4, + "execution_count": null, "outputs": [ { "output_type": "display_data", @@ -930,7 +930,7 @@ "# Train YOLOv5s on COCO128 for 3 epochs\n", "!python train.py --img 640 --batch 16 --epochs 3 --data coco128.yaml --weights yolov5s.pt --nosave --cache" ], - "execution_count": 5, + "execution_count": null, "outputs": [ { "output_type": "stream", @@ -1222,6 +1222,19 @@ "execution_count": null, "outputs": [] }, + { + "cell_type": "code", + "metadata": { + "id": "RVRSOhEvUdb5" + }, + "source": [ + "# Evolve\n", + "!python train.py --img 640 --batch 64 --epochs 100 --data coco128.yaml --weights yolov5s.pt --cache --noautoanchor --evolve\n", + "!d=runs/train/evolve && cp evolve.* $d && zip -r evolve.zip $d && gsutil mv evolve.zip gs://bucket # upload results (optional)" + ], + "execution_count": null, + "outputs": [] + }, { "cell_type": "code", "metadata": { From e27ca0d8455ad91ec52e4dfd757825e653508bde Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 21 Feb 2021 21:46:42 -0800 Subject: [PATCH 0017/1976] Update minimum stride to 32 (#2266) --- test.py | 5 +++-- train.py | 2 +- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/test.py b/test.py index c30148dfb2f1..ecd45f5f4943 100644 --- a/test.py +++ b/test.py @@ -52,7 +52,8 @@ def test(data, # Load model model = attempt_load(weights, map_location=device) # load FP32 model - imgsz = check_img_size(imgsz, s=model.stride.max()) # check img_size + gs = max(int(model.stride.max()), 32) # grid size (max stride) + imgsz = check_img_size(imgsz, s=gs) # check img_size # Multi-GPU disabled, incompatible with .half() https://github.com/ultralytics/yolov5/issues/99 # if device.type != 'cpu' and torch.cuda.device_count() > 1: @@ -85,7 +86,7 @@ def test(data, if device.type != 'cpu': model(torch.zeros(1, 3, imgsz, imgsz).to(device).type_as(next(model.parameters()))) # run once path = data['test'] if opt.task == 'test' else data['val'] # path to val/test images - dataloader = create_dataloader(path, imgsz, batch_size, model.stride.max(), opt, pad=0.5, rect=True, + dataloader = create_dataloader(path, imgsz, batch_size, gs, opt, pad=0.5, rect=True, prefix=colorstr('test: ' if opt.task == 'test' else 'val: '))[0] seen = 0 diff --git a/train.py b/train.py index 4065e1f149ef..e19cfa81d8da 100644 --- a/train.py +++ b/train.py @@ -161,7 +161,7 @@ def train(hyp, opt, device, tb_writer=None, wandb=None): del ckpt, state_dict # Image sizes - gs = int(model.stride.max()) # grid size (max stride) + gs = max(int(model.stride.max()), 32) # grid size (max stride) nl = model.model[-1].nl # number of detection layers (used for scaling hyp['obj']) imgsz, imgsz_test = [check_img_size(x, gs) for x in opt.img_size] # verify imgsz are gs-multiples From 95aefea49374a1fe867794971c76337526a4d6cb Mon Sep 17 00:00:00 2001 From: Aditya Lohia <64709773+aditya-dl@users.noreply.github.com> Date: Mon, 22 Feb 2021 11:20:44 +0530 Subject: [PATCH 0018/1976] Dynamic ONNX engine generation (#2208) * add: dynamic onnx export * delete: test onnx inference * fix dynamic output axis * Code reduction * fix: dynamic output axes, dynamic input naming * Remove fixed axes Co-authored-by: Shivam Swanrkar Co-authored-by: Glenn Jocher --- models/export.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/models/export.py b/models/export.py index 057658af53dc..cc817871f218 100644 --- a/models/export.py +++ b/models/export.py @@ -22,6 +22,7 @@ parser = argparse.ArgumentParser() parser.add_argument('--weights', type=str, default='./yolov5s.pt', help='weights path') # from yolov5/models/ parser.add_argument('--img-size', nargs='+', type=int, default=[640, 640], help='image size') # height, width + parser.add_argument('--dynamic', action='store_true', help='dynamic ONNX axes') parser.add_argument('--batch-size', type=int, default=1, help='batch size') opt = parser.parse_args() opt.img_size *= 2 if len(opt.img_size) == 1 else 1 # expand @@ -70,7 +71,9 @@ print('\nStarting ONNX export with onnx %s...' % onnx.__version__) f = opt.weights.replace('.pt', '.onnx') # filename torch.onnx.export(model, img, f, verbose=False, opset_version=12, input_names=['images'], - output_names=['classes', 'boxes'] if y is None else ['output']) + output_names=['classes', 'boxes'] if y is None else ['output'], + dynamic_axes={'images': {0: 'batch', 2: 'height', 3: 'width'}, # size(1,3,640,640) + 'output': {0: 'batch', 2: 'y', 3: 'x'}} if opt.dynamic else None) # Checks onnx_model = onnx.load(f) # load onnx model From 32dd1614f405d16678fea787137eb9662d7dc1e0 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 22 Feb 2021 18:34:07 -0800 Subject: [PATCH 0019/1976] Update greetings.yml for auto-rebase on PR (#2272) --- .github/workflows/greetings.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/greetings.yml b/.github/workflows/greetings.yml index d62cf5c1600d..ee472297107e 100644 --- a/.github/workflows/greetings.yml +++ b/.github/workflows/greetings.yml @@ -11,7 +11,7 @@ jobs: repo-token: ${{ secrets.GITHUB_TOKEN }} pr-message: | 👋 Hello @${{ github.actor }}, thank you for submitting a 🚀 PR! To allow your work to be integrated as seamlessly as possible, we advise you to: - - ✅ Verify your PR is **up-to-date with origin/master.** If your PR is behind origin/master update by running the following, replacing 'feature' with the name of your local branch: + - ✅ Verify your PR is **up-to-date with origin/master.** If your PR is behind origin/master an automatic [GitHub actions](https://github.com/ultralytics/yolov5/blob/master/.github/workflows/rebase.yml) rebase may be attempted by including the /rebase command in a comment body, or by running the following code, replacing 'feature' with the name of your local branch: ```bash git remote add upstream https://github.com/ultralytics/yolov5.git git fetch upstream From cc79f3a9ea5d927475e7b896b18aa998c6e70795 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 22 Feb 2021 22:50:00 -0800 Subject: [PATCH 0020/1976] Update Dockerfile with apt install zip (#2274) --- Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Dockerfile b/Dockerfile index 98dfee204770..fe64d6da29f9 100644 --- a/Dockerfile +++ b/Dockerfile @@ -2,7 +2,7 @@ FROM nvcr.io/nvidia/pytorch:20.12-py3 # Install linux packages -RUN apt update && apt install -y screen libgl1-mesa-glx +RUN apt update && apt install -y zip screen libgl1-mesa-glx # Install python dependencies RUN python -m pip install --upgrade pip From 83dc1b4484d8c5fe69c6a6ff50912ca90cace35a Mon Sep 17 00:00:00 2001 From: xiaowo1996 <429740343@qq.com> Date: Wed, 24 Feb 2021 01:38:56 +0800 Subject: [PATCH 0021/1976] FLOPS min stride 32 (#2276) Signed-off-by: xiaowo1996 <429740343@qq.com> --- utils/torch_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/torch_utils.py b/utils/torch_utils.py index 2cb09e71ce71..1b1cc2038c55 100644 --- a/utils/torch_utils.py +++ b/utils/torch_utils.py @@ -205,7 +205,7 @@ def model_info(model, verbose=False, img_size=640): try: # FLOPS from thop import profile - stride = int(model.stride.max()) if hasattr(model, 'stride') else 32 + stride = max(int(model.stride.max()), 32) if hasattr(model, 'stride') else 32 img = torch.zeros((1, model.yaml.get('ch', 3), stride, stride), device=next(model.parameters()).device) # input flops = profile(deepcopy(model), inputs=(img,), verbose=False)[0] / 1E9 * 2 # stride GFLOPS img_size = img_size if isinstance(img_size, list) else [img_size, img_size] # expand if int/float From 7a6870b81f31db40b06d2e899801febbeed96696 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 23 Feb 2021 11:27:44 -0800 Subject: [PATCH 0022/1976] Update README.md --- README.md | 25 +++++++++++-------------- 1 file changed, 11 insertions(+), 14 deletions(-) diff --git a/README.md b/README.md index 233fc17f1c35..b7129e80adfe 100755 --- a/README.md +++ b/README.md @@ -89,17 +89,15 @@ To run inference on example images in `data/images`: ```bash $ python detect.py --source data/images --weights yolov5s.pt --conf 0.25 -Namespace(agnostic_nms=False, augment=False, classes=None, conf_thres=0.25, device='', img_size=640, iou_thres=0.45, save_conf=False, save_dir='runs/detect', save_txt=False, source='data/images/', update=False, view_img=False, weights=['yolov5s.pt']) -Using torch 1.7.0+cu101 CUDA:0 (Tesla V100-SXM2-16GB, 16130MB) - -Downloading https://github.com/ultralytics/yolov5/releases/download/v3.1/yolov5s.pt to yolov5s.pt... 100%|██████████████| 14.5M/14.5M [00:00<00:00, 21.3MB/s] +Namespace(agnostic_nms=False, augment=False, classes=None, conf_thres=0.25, device='', exist_ok=False, img_size=640, iou_thres=0.45, name='exp', project='runs/detect', save_conf=False, save_txt=False, source='data/images/', update=False, view_img=False, weights=['yolov5s.pt']) +YOLOv5 v4.0-96-g83dc1b4 torch 1.7.0+cu101 CUDA:0 (Tesla V100-SXM2-16GB, 16160.5MB) Fusing layers... -Model Summary: 232 layers, 7459581 parameters, 0 gradients -image 1/2 data/images/bus.jpg: 640x480 4 persons, 1 buss, 1 skateboards, Done. (0.012s) -image 2/2 data/images/zidane.jpg: 384x640 2 persons, 2 ties, Done. (0.012s) -Results saved to runs/detect/exp -Done. (0.113s) +Model Summary: 224 layers, 7266973 parameters, 0 gradients, 17.0 GFLOPS +image 1/2 /content/yolov5/data/images/bus.jpg: 640x480 4 persons, 1 bus, Done. (0.010s) +image 2/2 /content/yolov5/data/images/zidane.jpg: 384x640 2 persons, 1 tie, Done. (0.011s) +Results saved to runs/detect/exp2 +Done. (0.103s) ``` @@ -108,18 +106,17 @@ Done. (0.113s) To run **batched inference** with YOLOv5 and [PyTorch Hub](https://github.com/ultralytics/yolov5/issues/36): ```python import torch -from PIL import Image # Model model = torch.hub.load('ultralytics/yolov5', 'yolov5s', pretrained=True) # Images -img1 = Image.open('zidane.jpg') -img2 = Image.open('bus.jpg') -imgs = [img1, img2] # batched list of images +dir = 'https://github.com/ultralytics/yolov5/raw/master/data/images/' +imgs = [dir + f for f in ('zidane.jpg', 'bus.jpg')] # batched list of images # Inference -result = model(imgs) +results = model(imgs) +results.print() # or .show(), .save() ``` From d5d275b6e97766835ebb04d02e5d1e3478d3eeee Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 23 Feb 2021 23:10:14 -0800 Subject: [PATCH 0023/1976] Amazon AWS EC2 startup and re-startup scripts (#2185) * Amazon AWS EC2 startup and re-startup scripts * Create resume.py * cleanup --- utils/aws/__init__.py | 0 utils/aws/mime.sh | 26 ++++++++++++++++++++++++++ utils/aws/resume.py | 34 ++++++++++++++++++++++++++++++++++ utils/aws/userdata.sh | 26 ++++++++++++++++++++++++++ 4 files changed, 86 insertions(+) create mode 100644 utils/aws/__init__.py create mode 100644 utils/aws/mime.sh create mode 100644 utils/aws/resume.py create mode 100644 utils/aws/userdata.sh diff --git a/utils/aws/__init__.py b/utils/aws/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/utils/aws/mime.sh b/utils/aws/mime.sh new file mode 100644 index 000000000000..c319a83cfbdf --- /dev/null +++ b/utils/aws/mime.sh @@ -0,0 +1,26 @@ +# AWS EC2 instance startup 'MIME' script https://aws.amazon.com/premiumsupport/knowledge-center/execute-user-data-ec2/ +# This script will run on every instance restart, not only on first start +# --- DO NOT COPY ABOVE COMMENTS WHEN PASTING INTO USERDATA --- + +Content-Type: multipart/mixed; boundary="//" +MIME-Version: 1.0 + +--// +Content-Type: text/cloud-config; charset="us-ascii" +MIME-Version: 1.0 +Content-Transfer-Encoding: 7bit +Content-Disposition: attachment; filename="cloud-config.txt" + +#cloud-config +cloud_final_modules: +- [scripts-user, always] + +--// +Content-Type: text/x-shellscript; charset="us-ascii" +MIME-Version: 1.0 +Content-Transfer-Encoding: 7bit +Content-Disposition: attachment; filename="userdata.txt" + +#!/bin/bash +# --- paste contents of userdata.sh here --- +--// diff --git a/utils/aws/resume.py b/utils/aws/resume.py new file mode 100644 index 000000000000..338c8b10127b --- /dev/null +++ b/utils/aws/resume.py @@ -0,0 +1,34 @@ +# Resume all interrupted trainings in yolov5/ dir including DPP trainings +# Usage: $ python utils/aws/resume.py + +import os +from pathlib import Path + +import torch +import yaml + +port = 0 # --master_port +path = Path('').resolve() +for last in path.rglob('*/**/last.pt'): + ckpt = torch.load(last) + if ckpt['optimizer'] is None: + continue + + # Load opt.yaml + with open(last.parent.parent / 'opt.yaml') as f: + opt = yaml.load(f, Loader=yaml.SafeLoader) + + # Get device count + d = opt['device'].split(',') # devices + nd = len(d) # number of devices + ddp = nd > 1 or (nd == 0 and torch.cuda.device_count() > 1) # distributed data parallel + + if ddp: # multi-GPU + port += 1 + cmd = f'python -m torch.distributed.launch --nproc_per_node {nd} --master_port {port} train.py --resume {last}' + else: # single-GPU + cmd = f'python train.py --resume {last}' + + cmd += ' > /dev/null 2>&1 &' # redirect output to dev/null and run in daemon thread + print(cmd) + os.system(cmd) diff --git a/utils/aws/userdata.sh b/utils/aws/userdata.sh new file mode 100644 index 000000000000..36405d1a1565 --- /dev/null +++ b/utils/aws/userdata.sh @@ -0,0 +1,26 @@ +#!/bin/bash +# AWS EC2 instance startup script https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/user-data.html +# This script will run only once on first instance start (for a re-start script see mime.sh) +# /home/ubuntu (ubuntu) or /home/ec2-user (amazon-linux) is working dir +# Use >300 GB SSD + +cd home/ubuntu +if [ ! -d yolov5 ]; then + echo "Running first-time script." # install dependencies, download COCO, pull Docker + git clone https://github.com/ultralytics/yolov5 && sudo chmod -R 777 yolov5 + cd yolov5 + bash data/scripts/get_coco.sh && echo "Data done." & + sudo docker pull ultralytics/yolov5:latest && echo "Docker done." & + # python -m pip install --upgrade pip && pip install -r requirements.txt && python detect.py && echo "Requirements done." & +else + echo "Running re-start script." # resume interrupted runs + i=0 + list=$(docker ps -qa) # container list i.e. $'one\ntwo\nthree\nfour' + while IFS= read -r id; do + ((i++)) + echo "restarting container $i: $id" + docker start $id + # docker exec -it $id python train.py --resume # single-GPU + docker exec -d $id python utils/aws/resume.py + done <<<"$list" +fi From 0070995bd58629d4628d11b1c8de9788aa55379b Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 24 Feb 2021 01:43:59 -0800 Subject: [PATCH 0024/1976] Amazon AWS EC2 startup and re-startup scripts (#2282) --- utils/aws/resume.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/utils/aws/resume.py b/utils/aws/resume.py index 338c8b10127b..563f22be20dc 100644 --- a/utils/aws/resume.py +++ b/utils/aws/resume.py @@ -2,11 +2,14 @@ # Usage: $ python utils/aws/resume.py import os +import sys from pathlib import Path import torch import yaml +sys.path.append('./') # to run '$ python *.py' files in subdirectories + port = 0 # --master_port path = Path('').resolve() for last in path.rglob('*/**/last.pt'): From ca5b10b759d2e41221e7ffddcefe1f8087791dec Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 24 Feb 2021 13:31:20 -0800 Subject: [PATCH 0025/1976] Update train.py (#2290) * Update train.py * Update train.py * Update train.py * Update train.py * Create train.py --- train.py | 35 ++++++++++++++++------------------- 1 file changed, 16 insertions(+), 19 deletions(-) diff --git a/train.py b/train.py index e19cfa81d8da..8533667fe57f 100644 --- a/train.py +++ b/train.py @@ -146,8 +146,7 @@ def train(hyp, opt, device, tb_writer=None, wandb=None): # Results if ckpt.get('training_results') is not None: - with open(results_file, 'w') as file: - file.write(ckpt['training_results']) # write results.txt + results_file.write_text(ckpt['training_results']) # write results.txt # Epochs start_epoch = ckpt['epoch'] + 1 @@ -354,7 +353,7 @@ def train(hyp, opt, device, tb_writer=None, wandb=None): # Write with open(results_file, 'a') as f: - f.write(s + '%10.4g' * 7 % results + '\n') # P, R, mAP@.5, mAP@.5-.95, val_loss(box, obj, cls) + f.write(s + '%10.4g' * 7 % results + '\n') # append metrics, val_loss if len(opt.name) and opt.bucket: os.system('gsutil cp %s gs://%s/results/results%s.txt' % (results_file, opt.bucket, opt.name)) @@ -375,15 +374,13 @@ def train(hyp, opt, device, tb_writer=None, wandb=None): best_fitness = fi # Save model - save = (not opt.nosave) or (final_epoch and not opt.evolve) - if save: - with open(results_file, 'r') as f: # create checkpoint - ckpt = {'epoch': epoch, - 'best_fitness': best_fitness, - 'training_results': f.read(), - 'model': ema.ema, - 'optimizer': None if final_epoch else optimizer.state_dict(), - 'wandb_id': wandb_run.id if wandb else None} + if (not opt.nosave) or (final_epoch and not opt.evolve): # if save + ckpt = {'epoch': epoch, + 'best_fitness': best_fitness, + 'training_results': results_file.read_text(), + 'model': ema.ema, + 'optimizer': None if final_epoch else optimizer.state_dict(), + 'wandb_id': wandb_run.id if wandb else None} # Save last, best and delete torch.save(ckpt, last) @@ -396,9 +393,9 @@ def train(hyp, opt, device, tb_writer=None, wandb=None): if rank in [-1, 0]: # Strip optimizers final = best if best.exists() else last # final model - for f in [last, best]: + for f in last, best: if f.exists(): - strip_optimizer(f) # strip optimizers + strip_optimizer(f) if opt.bucket: os.system(f'gsutil cp {final} gs://{opt.bucket}/weights') # upload @@ -415,17 +412,17 @@ def train(hyp, opt, device, tb_writer=None, wandb=None): # Test best.pt logger.info('%g epochs completed in %.3f hours.\n' % (epoch - start_epoch + 1, (time.time() - t0) / 3600)) if opt.data.endswith('coco.yaml') and nc == 80: # if COCO - for conf, iou, save_json in ([0.25, 0.45, False], [0.001, 0.65, True]): # speed, mAP tests + for m in (last, best) if best.exists() else (last): # speed, mAP tests results, _, _ = test.test(opt.data, batch_size=batch_size * 2, imgsz=imgsz_test, - conf_thres=conf, - iou_thres=iou, - model=attempt_load(final, device).half(), + conf_thres=0.001, + iou_thres=0.7, + model=attempt_load(m, device).half(), single_cls=opt.single_cls, dataloader=testloader, save_dir=save_dir, - save_json=save_json, + save_json=True, plots=False) else: From ec1d8496baa6bff7cb3ea223fd23f2d0cf0804ec Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 24 Feb 2021 18:26:46 -0800 Subject: [PATCH 0026/1976] Improved model+EMA checkpointing (#2292) * Enhanced model+EMA checkpointing * update * bug fix * bug fix 2 * always save optimizer * ema half * remove model.float() * model half * carry ema/model in fp32 * rm model.float() * both to float always * cleanup * cleanup --- test.py | 1 - train.py | 25 ++++++++++++++++--------- utils/general.py | 4 ++-- 3 files changed, 18 insertions(+), 12 deletions(-) diff --git a/test.py b/test.py index ecd45f5f4943..9f484c809052 100644 --- a/test.py +++ b/test.py @@ -272,7 +272,6 @@ def test(data, if not training: s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else '' print(f"Results saved to {save_dir}{s}") - model.float() # for training maps = np.zeros(nc) + map for i, c in enumerate(ap_class): maps[c] = ap[i] diff --git a/train.py b/train.py index 8533667fe57f..7aa57fa99e24 100644 --- a/train.py +++ b/train.py @@ -31,7 +31,7 @@ from utils.google_utils import attempt_download from utils.loss import ComputeLoss from utils.plots import plot_images, plot_labels, plot_results, plot_evolution -from utils.torch_utils import ModelEMA, select_device, intersect_dicts, torch_distributed_zero_first +from utils.torch_utils import ModelEMA, select_device, intersect_dicts, torch_distributed_zero_first, is_parallel logger = logging.getLogger(__name__) @@ -136,6 +136,9 @@ def train(hyp, opt, device, tb_writer=None, wandb=None): id=ckpt.get('wandb_id') if 'ckpt' in locals() else None) loggers = {'wandb': wandb} # loggers dict + # EMA + ema = ModelEMA(model) if rank in [-1, 0] else None + # Resume start_epoch, best_fitness = 0, 0.0 if pretrained: @@ -144,6 +147,11 @@ def train(hyp, opt, device, tb_writer=None, wandb=None): optimizer.load_state_dict(ckpt['optimizer']) best_fitness = ckpt['best_fitness'] + # EMA + if ema and ckpt.get('ema'): + ema.ema.load_state_dict(ckpt['ema'][0].float().state_dict()) + ema.updates = ckpt['ema'][1] + # Results if ckpt.get('training_results') is not None: results_file.write_text(ckpt['training_results']) # write results.txt @@ -173,9 +181,6 @@ def train(hyp, opt, device, tb_writer=None, wandb=None): model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model).to(device) logger.info('Using SyncBatchNorm()') - # EMA - ema = ModelEMA(model) if rank in [-1, 0] else None - # DDP mode if cuda and rank != -1: model = DDP(model, device_ids=[opt.local_rank], output_device=opt.local_rank) @@ -191,7 +196,6 @@ def train(hyp, opt, device, tb_writer=None, wandb=None): # Process 0 if rank in [-1, 0]: - ema.updates = start_epoch * nb // accumulate # set EMA updates testloader = create_dataloader(test_path, imgsz_test, batch_size * 2, gs, opt, # testloader hyp=hyp, cache=opt.cache_images and not opt.notest, rect=True, rank=-1, world_size=opt.world_size, workers=opt.workers, @@ -335,8 +339,7 @@ def train(hyp, opt, device, tb_writer=None, wandb=None): # DDP process 0 or single-GPU if rank in [-1, 0]: # mAP - if ema: - ema.update_attr(model, include=['yaml', 'nc', 'hyp', 'gr', 'names', 'stride', 'class_weights']) + ema.update_attr(model, include=['yaml', 'nc', 'hyp', 'gr', 'names', 'stride', 'class_weights']) final_epoch = epoch + 1 == epochs if not opt.notest or final_epoch: # Calculate mAP results, maps, times = test.test(opt.data, @@ -378,8 +381,9 @@ def train(hyp, opt, device, tb_writer=None, wandb=None): ckpt = {'epoch': epoch, 'best_fitness': best_fitness, 'training_results': results_file.read_text(), - 'model': ema.ema, - 'optimizer': None if final_epoch else optimizer.state_dict(), + 'model': (model.module if is_parallel(model) else model).half(), + 'ema': (ema.ema.half(), ema.updates), + 'optimizer': optimizer.state_dict(), 'wandb_id': wandb_run.id if wandb else None} # Save last, best and delete @@ -387,6 +391,9 @@ def train(hyp, opt, device, tb_writer=None, wandb=None): if best_fitness == fi: torch.save(ckpt, best) del ckpt + + model.float(), ema.ema.float() + # end epoch ---------------------------------------------------------------------------------------------------- # end training diff --git a/utils/general.py b/utils/general.py index 3b5f4629b00a..e5bbc50c6177 100755 --- a/utils/general.py +++ b/utils/general.py @@ -484,8 +484,8 @@ def non_max_suppression(prediction, conf_thres=0.25, iou_thres=0.45, classes=Non def strip_optimizer(f='weights/best.pt', s=''): # from utils.general import *; strip_optimizer() # Strip optimizer from 'f' to finalize training, optionally save as 's' x = torch.load(f, map_location=torch.device('cpu')) - for key in 'optimizer', 'training_results', 'wandb_id': - x[key] = None + for k in 'optimizer', 'training_results', 'wandb_id', 'ema': # keys + x[k] = None x['epoch'] = -1 x['model'].half() # to FP16 for p in x['model'].parameters(): From 71dd2768f28ed24e83087203a2dea565c99a1120 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 24 Feb 2021 21:03:21 -0800 Subject: [PATCH 0027/1976] Improved model+EMA checkpointing 2 (#2295) --- test.py | 1 + train.py | 7 +++---- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/test.py b/test.py index 9f484c809052..91176eca01db 100644 --- a/test.py +++ b/test.py @@ -269,6 +269,7 @@ def test(data, print(f'pycocotools unable to run: {e}') # Return results + model.float() # for training if not training: s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else '' print(f"Results saved to {save_dir}{s}") diff --git a/train.py b/train.py index 7aa57fa99e24..e37cf816bcb1 100644 --- a/train.py +++ b/train.py @@ -4,6 +4,7 @@ import os import random import time +from copy import deepcopy from pathlib import Path from threading import Thread @@ -381,8 +382,8 @@ def train(hyp, opt, device, tb_writer=None, wandb=None): ckpt = {'epoch': epoch, 'best_fitness': best_fitness, 'training_results': results_file.read_text(), - 'model': (model.module if is_parallel(model) else model).half(), - 'ema': (ema.ema.half(), ema.updates), + 'model': deepcopy(model.module if is_parallel(model) else model).half(), + 'ema': (deepcopy(ema.ema).half(), ema.updates), 'optimizer': optimizer.state_dict(), 'wandb_id': wandb_run.id if wandb else None} @@ -392,8 +393,6 @@ def train(hyp, opt, device, tb_writer=None, wandb=None): torch.save(ckpt, best) del ckpt - model.float(), ema.ema.float() - # end epoch ---------------------------------------------------------------------------------------------------- # end training From a82dce7faa5d13d6f9c342f04aaaa3b5de80d749 Mon Sep 17 00:00:00 2001 From: Iden Craven Date: Thu, 25 Feb 2021 19:05:38 -0700 Subject: [PATCH 0028/1976] Fix labels being missed when image extension appears twice in filename (#2300) --- utils/datasets.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/datasets.py b/utils/datasets.py index 4f2939d4bef2..d6ab16518034 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -335,7 +335,7 @@ def __len__(self): def img2label_paths(img_paths): # Define label paths as a function of image paths sa, sb = os.sep + 'images' + os.sep, os.sep + 'labels' + os.sep # /images/, /labels/ substrings - return [x.replace(sa, sb, 1).replace('.' + x.split('.')[-1], '.txt') for x in img_paths] + return ['txt'.join(x.replace(sa, sb, 1).rsplit(x.split('.')[-1], 1)) for x in img_paths] class LoadImagesAndLabels(Dataset): # for training/testing From efa4946d158f4042890b243cf9314aa62dac83e4 Mon Sep 17 00:00:00 2001 From: Jan Hajek Date: Fri, 26 Feb 2021 04:18:19 +0100 Subject: [PATCH 0029/1976] W&B entity support (#2298) * W&B entity support * shorten wandb_entity to entity Co-authored-by: Jan Hajek Co-authored-by: Glenn Jocher --- train.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/train.py b/train.py index e37cf816bcb1..bbf879f3af5f 100644 --- a/train.py +++ b/train.py @@ -134,6 +134,7 @@ def train(hyp, opt, device, tb_writer=None, wandb=None): wandb_run = wandb.init(config=opt, resume="allow", project='YOLOv5' if opt.project == 'runs/train' else Path(opt.project).stem, name=save_dir.stem, + entity=opt.entity, id=ckpt.get('wandb_id') if 'ckpt' in locals() else None) loggers = {'wandb': wandb} # loggers dict @@ -467,6 +468,7 @@ def train(hyp, opt, device, tb_writer=None, wandb=None): parser.add_argument('--log-artifacts', action='store_true', help='log artifacts, i.e. final trained model') parser.add_argument('--workers', type=int, default=8, help='maximum number of dataloader workers') parser.add_argument('--project', default='runs/train', help='save to project/name') + parser.add_argument('--entity', default=None, help='W&B entity') parser.add_argument('--name', default='exp', help='save to project/name') parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment') parser.add_argument('--quad', action='store_true', help='quad dataloader') From cbd55da5d24becbe3b94afaaa4cdd1187a512c3f Mon Sep 17 00:00:00 2001 From: oleg Date: Fri, 26 Feb 2021 15:07:40 -0800 Subject: [PATCH 0030/1976] Update yolo.py (#2120) * Avoid mutable state in Detect * LoadImages() pathlib update (#2140) * Unique *.cache filenames fix (#2134) * fix #2121 * Update test.py * Update train.py * Update autoanchor.py * Update datasets.py * Update log_dataset.py * Update datasets.py Co-authored-by: Glenn Jocher * Update train.py test batch_size (#2148) * Update train.py * Update loss.py * Update train.py (#2149) * Linear LR scheduler option (#2150) * Linear LR scheduler option * Update train.py * Update data-autodownload background tasks (#2154) * Update get_coco.sh * Update get_voc.sh * Update detect.py (#2167) Without this cv2.imshow opens a window but nothing is visible * Update requirements.txt (#2173) * Update utils/datasets.py to support .webp files (#2174) Simply added 'webp' as an image format to the img_formats array so that webp image files can be used as training data. * Changed socket port and added timeout (#2176) * PyTorch Hub results.save('path/to/dir') (#2179) * YOLOv5 Segmentation Dataloader Updates (#2188) * Update C3 module * Update C3 module * Update C3 module * Update C3 module * update * update * update * update * update * update * update * update * update * updates * updates * updates * updates * updates * updates * updates * updates * updates * updates * update * update * update * update * updates * updates * updates * updates * update * update * update * update * update * update * update * update * update * update * update * update * update * update * update * update * update * update * update * update * update * update * update * update * update * update * update * update * update * update * update * update * update * update * update * update * update * update * update * update * update datasets * update * update * update * update attempt_downlaod() * merge * merge * update * update * update * update * update * update * update * update * update * update * parameterize eps * comments * gs-multiple * update * max_nms implemented * Create one_cycle() function * update * update * update * update * update * update * update * update * update * update * update * update * update * update * update * update * update * update * GitHub API rate limit fix * update * ComputeLoss * ComputeLoss * ComputeLoss * ComputeLoss * ComputeLoss * ComputeLoss * ComputeLoss * ComputeLoss * ComputeLoss * ComputeLoss * ComputeLoss * astuple * epochs * update * update * ComputeLoss() * update * update * update * update * update * update * update * update * update * update * update * merge * merge * merge * merge * update * update * update * update * commit=tag == tags[-1] * Update cudnn.benchmark * update * update * update * updates * updates * updates * updates * updates * updates * updates * update * update * update * update * update * mosaic9 * update * update * update * update * update * update * institute cache versioning * only display on existing cache * reverse cache exists booleans * Created using Colaboratory * YOLOv5 PyTorch Hub results.save() method retains filenames (#2194) * save results with name * debug * save original imgs names * Update common.py Co-authored-by: Glenn Jocher * TTA augument boxes one pixel shifted in de-flip ud and lr (#2219) * TTA augument boxes one pixel shifted in de-flip ud and lr * PEP8 reformat Co-authored-by: Jaap van de Loosdrecht Co-authored-by: Glenn Jocher * LoadStreams() frame loss bug fix (#2222) * Update yolo.py channel array (#2223) * Add check_imshow() (#2231) * Add check_imshow() * Update general.py * Update general.py * Update CI badge (#2230) * Add isdocker() (#2232) * Add isdocker() * Update general.py * Update general.py * YOLOv5 Hub URL inference bug fix (#2250) * Update common.py * Update common.py * Update common.py * Improved hubconf.py CI tests (#2251) * Unified hub and detect.py box and labels plotting (#2243) * reset head * Update inference default to multi_label=False (#2252) * Update inference default to multi_label=False * bug fix * Update plots.py * Update plots.py * Robust objectness loss balancing (#2256) * Created using Colaboratory * Update minimum stride to 32 (#2266) * Dynamic ONNX engine generation (#2208) * add: dynamic onnx export * delete: test onnx inference * fix dynamic output axis * Code reduction * fix: dynamic output axes, dynamic input naming * Remove fixed axes Co-authored-by: Shivam Swanrkar Co-authored-by: Glenn Jocher * Update greetings.yml for auto-rebase on PR (#2272) * Update Dockerfile with apt install zip (#2274) * FLOPS min stride 32 (#2276) Signed-off-by: xiaowo1996 <429740343@qq.com> * Update README.md * Amazon AWS EC2 startup and re-startup scripts (#2185) * Amazon AWS EC2 startup and re-startup scripts * Create resume.py * cleanup * Amazon AWS EC2 startup and re-startup scripts (#2282) * Update train.py (#2290) * Update train.py * Update train.py * Update train.py * Update train.py * Create train.py * Improved model+EMA checkpointing (#2292) * Enhanced model+EMA checkpointing * update * bug fix * bug fix 2 * always save optimizer * ema half * remove model.float() * model half * carry ema/model in fp32 * rm model.float() * both to float always * cleanup * cleanup * Improved model+EMA checkpointing 2 (#2295) * Fix labels being missed when image extension appears twice in filename (#2300) * W&B entity support (#2298) * W&B entity support * shorten wandb_entity to entity Co-authored-by: Jan Hajek Co-authored-by: Glenn Jocher * Avoid mutable state in Detect * Update yolo and remove .to(device) Co-authored-by: Oleg Boiko Co-authored-by: Glenn Jocher Co-authored-by: train255 Co-authored-by: ab-101 <56578530+ab-101@users.noreply.github.com> Co-authored-by: Transigent Co-authored-by: NanoCode012 Co-authored-by: Daniel Khromov Co-authored-by: VdLMV Co-authored-by: Jaap van de Loosdrecht Co-authored-by: Yann Defretin Co-authored-by: Aditya Lohia <64709773+aditya-dl@users.noreply.github.com> Co-authored-by: Shivam Swanrkar Co-authored-by: xiaowo1996 <429740343@qq.com> Co-authored-by: Iden Craven Co-authored-by: Jan Hajek Co-authored-by: Jan Hajek --- models/yolo.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/models/yolo.py b/models/yolo.py index 41817098ccbc..85043f2b0205 100644 --- a/models/yolo.py +++ b/models/yolo.py @@ -49,7 +49,7 @@ def forward(self, x): self.grid[i] = self._make_grid(nx, ny).to(x[i].device) y = x[i].sigmoid() - y[..., 0:2] = (y[..., 0:2] * 2. - 0.5 + self.grid[i].to(x[i].device)) * self.stride[i] # xy + y[..., 0:2] = (y[..., 0:2] * 2. - 0.5 + self.grid[i]) * self.stride[i] # xy y[..., 2:4] = (y[..., 2:4] * 2) ** 2 * self.anchor_grid[i] # wh z.append(y.view(bs, -1, self.no)) From dfeec198cbb0d19bf06a26e3712b7825f993fc47 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 27 Feb 2021 12:51:33 -0800 Subject: [PATCH 0031/1976] final_epoch EMA bug fix (#2317) --- train.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/train.py b/train.py index bbf879f3af5f..5c203f12651d 100644 --- a/train.py +++ b/train.py @@ -383,7 +383,8 @@ def train(hyp, opt, device, tb_writer=None, wandb=None): ckpt = {'epoch': epoch, 'best_fitness': best_fitness, 'training_results': results_file.read_text(), - 'model': deepcopy(model.module if is_parallel(model) else model).half(), + 'model': ema.ema if final_epoch else deepcopy( + model.module if is_parallel(model) else model).half(), 'ema': (deepcopy(ema.ema).half(), ema.updates), 'optimizer': optimizer.state_dict(), 'wandb_id': wandb_run.id if wandb else None} From cd30d838eb098b1c96219a83521e71bdd9360f60 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 27 Feb 2021 15:28:23 -0800 Subject: [PATCH 0032/1976] Update test.py (#2319) --- test.py | 1 + 1 file changed, 1 insertion(+) diff --git a/test.py b/test.py index 91176eca01db..fd4d339ffea6 100644 --- a/test.py +++ b/test.py @@ -326,6 +326,7 @@ def test(data, test(opt.data, w, opt.batch_size, opt.img_size, 0.25, 0.45, save_json=False, plots=False) elif opt.task == 'study': # run over a range of settings and save/plot + # python test.py --task study --data coco.yaml --iou 0.7 --weights yolov5s.pt yolov5m.pt yolov5l.pt yolov5x.pt x = list(range(256, 1536 + 128, 128)) # x axis (image sizes) for w in opt.weights: f = f'study_{Path(opt.data).stem}_{Path(w).stem}.txt' # filename to save to From c2026a5f35fd632c71b10fdbaf9194e714906f02 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 27 Feb 2021 15:55:31 -0800 Subject: [PATCH 0033/1976] Update Dockerfile install htop (#2320) --- Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Dockerfile b/Dockerfile index fe64d6da29f9..a768774fa9c7 100644 --- a/Dockerfile +++ b/Dockerfile @@ -2,7 +2,7 @@ FROM nvcr.io/nvidia/pytorch:20.12-py3 # Install linux packages -RUN apt update && apt install -y zip screen libgl1-mesa-glx +RUN apt update && apt install -y zip htop screen libgl1-mesa-glx # Install python dependencies RUN python -m pip install --upgrade pip From fd96810518adcbb07ca0c5e1373c57e9025966c4 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 28 Feb 2021 21:14:08 -0800 Subject: [PATCH 0034/1976] remove TTA 1 pixel offset (#2325) --- models/yolo.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/models/yolo.py b/models/yolo.py index 85043f2b0205..a9e1da43d913 100644 --- a/models/yolo.py +++ b/models/yolo.py @@ -109,9 +109,9 @@ def forward(self, x, augment=False, profile=False): # cv2.imwrite(f'img_{si}.jpg', 255 * xi[0].cpu().numpy().transpose((1, 2, 0))[:, :, ::-1]) # save yi[..., :4] /= si # de-scale if fi == 2: - yi[..., 1] = img_size[0] - 1 - yi[..., 1] # de-flip ud + yi[..., 1] = img_size[0] - yi[..., 1] # de-flip ud elif fi == 3: - yi[..., 0] = img_size[1] - 1 - yi[..., 0] # de-flip lr + yi[..., 0] = img_size[1] - yi[..., 0] # de-flip lr y.append(yi) return torch.cat(y, 1), None # augmented inference, train else: From fab5085674f7748dc16d7ca25afb225fa441bc9d Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 1 Mar 2021 17:13:34 -0800 Subject: [PATCH 0035/1976] EMA bug fix 2 (#2330) * EMA bug fix 2 * update --- hubconf.py | 2 +- models/experimental.py | 3 ++- train.py | 10 +++++----- utils/general.py | 8 +++++--- 4 files changed, 13 insertions(+), 10 deletions(-) diff --git a/hubconf.py b/hubconf.py index 47eee4477725..a8eb51681794 100644 --- a/hubconf.py +++ b/hubconf.py @@ -120,7 +120,7 @@ def custom(path_or_model='path/to/model.pt', autoshape=True): """ model = torch.load(path_or_model) if isinstance(path_or_model, str) else path_or_model # load checkpoint if isinstance(model, dict): - model = model['model'] # load model + model = model['ema' if model.get('ema') else 'model'] # load model hub_model = Model(model.yaml).to(next(model.parameters()).device) # create hub_model.load_state_dict(model.float().state_dict()) # load state_dict diff --git a/models/experimental.py b/models/experimental.py index 5fe56858c54a..d79052314f9b 100644 --- a/models/experimental.py +++ b/models/experimental.py @@ -115,7 +115,8 @@ def attempt_load(weights, map_location=None): model = Ensemble() for w in weights if isinstance(weights, list) else [weights]: attempt_download(w) - model.append(torch.load(w, map_location=map_location)['model'].float().fuse().eval()) # load FP32 model + ckpt = torch.load(w, map_location=map_location) # load + model.append(ckpt['ema' if ckpt.get('ema') else 'model'].float().fuse().eval()) # FP32 model # Compatibility updates for m in model.modules(): diff --git a/train.py b/train.py index 5c203f12651d..e2c82339f7fe 100644 --- a/train.py +++ b/train.py @@ -151,8 +151,8 @@ def train(hyp, opt, device, tb_writer=None, wandb=None): # EMA if ema and ckpt.get('ema'): - ema.ema.load_state_dict(ckpt['ema'][0].float().state_dict()) - ema.updates = ckpt['ema'][1] + ema.ema.load_state_dict(ckpt['ema'].float().state_dict()) + ema.updates = ckpt['updates'] # Results if ckpt.get('training_results') is not None: @@ -383,9 +383,9 @@ def train(hyp, opt, device, tb_writer=None, wandb=None): ckpt = {'epoch': epoch, 'best_fitness': best_fitness, 'training_results': results_file.read_text(), - 'model': ema.ema if final_epoch else deepcopy( - model.module if is_parallel(model) else model).half(), - 'ema': (deepcopy(ema.ema).half(), ema.updates), + 'model': deepcopy(model.module if is_parallel(model) else model).half(), + 'ema': deepcopy(ema.ema).half(), + 'updates': ema.updates, 'optimizer': optimizer.state_dict(), 'wandb_id': wandb_run.id if wandb else None} diff --git a/utils/general.py b/utils/general.py index e5bbc50c6177..df8cf7bab60d 100755 --- a/utils/general.py +++ b/utils/general.py @@ -481,10 +481,12 @@ def non_max_suppression(prediction, conf_thres=0.25, iou_thres=0.45, classes=Non return output -def strip_optimizer(f='weights/best.pt', s=''): # from utils.general import *; strip_optimizer() +def strip_optimizer(f='best.pt', s=''): # from utils.general import *; strip_optimizer() # Strip optimizer from 'f' to finalize training, optionally save as 's' x = torch.load(f, map_location=torch.device('cpu')) - for k in 'optimizer', 'training_results', 'wandb_id', 'ema': # keys + if x.get('ema'): + x['model'] = x['ema'] # replace model with ema + for k in 'optimizer', 'training_results', 'wandb_id', 'ema', 'updates': # keys x[k] = None x['epoch'] = -1 x['model'].half() # to FP16 @@ -492,7 +494,7 @@ def strip_optimizer(f='weights/best.pt', s=''): # from utils.general import *; p.requires_grad = False torch.save(x, s or f) mb = os.path.getsize(s or f) / 1E6 # filesize - print('Optimizer stripped from %s,%s %.1fMB' % (f, (' saved as %s,' % s) if s else '', mb)) + print(f"Optimizer stripped from {f},{(' saved as %s,' % s) if s else ''} {mb:.1f}MB") def print_mutation(hyp, results, yaml_file='hyp_evolved.yaml', bucket=''): From ab86cec85443f979ee7f99bdb60223ad36b07198 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 2 Mar 2021 13:01:59 -0800 Subject: [PATCH 0036/1976] FROM nvcr.io/nvidia/pytorch:21.02-py3 (#2341) --- Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Dockerfile b/Dockerfile index a768774fa9c7..d42af2f78954 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,5 +1,5 @@ # Start FROM Nvidia PyTorch image https://ngc.nvidia.com/catalog/containers/nvidia:pytorch -FROM nvcr.io/nvidia/pytorch:20.12-py3 +FROM nvcr.io/nvidia/pytorch:21.02-py3 # Install linux packages RUN apt update && apt install -y zip htop screen libgl1-mesa-glx From 2c56ad5436cf0b84612c0c83842067d34df5c94b Mon Sep 17 00:00:00 2001 From: Ryan Avery Date: Tue, 2 Mar 2021 16:09:52 -0800 Subject: [PATCH 0037/1976] Confusion matrix background axis swap (#2114) --- utils/metrics.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/utils/metrics.py b/utils/metrics.py index ba812ff13a58..666b8c7ec1c0 100644 --- a/utils/metrics.py +++ b/utils/metrics.py @@ -147,12 +147,12 @@ def process_batch(self, detections, labels): if n and sum(j) == 1: self.matrix[gc, detection_classes[m1[j]]] += 1 # correct else: - self.matrix[gc, self.nc] += 1 # background FP + self.matrix[self.nc, gc] += 1 # background FP if n: for i, dc in enumerate(detection_classes): if not any(m1 == i): - self.matrix[self.nc, dc] += 1 # background FN + self.matrix[dc, self.nc] += 1 # background FN def matrix(self): return self.matrix @@ -168,8 +168,8 @@ def plot(self, save_dir='', names=()): sn.set(font_scale=1.0 if self.nc < 50 else 0.8) # for label size labels = (0 < len(names) < 99) and len(names) == self.nc # apply names to ticklabels sn.heatmap(array, annot=self.nc < 30, annot_kws={"size": 8}, cmap='Blues', fmt='.2f', square=True, - xticklabels=names + ['background FN'] if labels else "auto", - yticklabels=names + ['background FP'] if labels else "auto").set_facecolor((1, 1, 1)) + xticklabels=names + ['background FP'] if labels else "auto", + yticklabels=names + ['background FN'] if labels else "auto").set_facecolor((1, 1, 1)) fig.axes[0].set_xlabel('True') fig.axes[0].set_ylabel('Predicted') fig.savefig(Path(save_dir) / 'confusion_matrix.png', dpi=250) From fe6ebb96bbe630cc45ed02ec0ea3fa0a3aa8c506 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 2 Mar 2021 19:20:51 -0800 Subject: [PATCH 0038/1976] Created using Colaboratory --- tutorial.ipynb | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/tutorial.ipynb b/tutorial.ipynb index 7fce40c3824e..f2b03dc57f0a 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -1169,11 +1169,9 @@ }, "source": [ "# Reproduce\n", - "%%shell\n", - "for x in yolov5s yolov5m yolov5l yolov5x; do\n", - " python test.py --weights $x.pt --data coco.yaml --img 640 --conf 0.25 --iou 0.45 # speed\n", - " python test.py --weights $x.pt --data coco.yaml --img 640 --conf 0.001 --iou 0.65 # mAP\n", - "done" + "for x in 'yolov5s', 'yolov5m', 'yolov5l', 'yolov5x':\n", + " !python test.py --weights {x}.pt --data coco.yaml --img 640 --conf 0.25 --iou 0.45 # speed\n", + " !python test.py --weights {x}.pt --data coco.yaml --img 640 --conf 0.001 --iou 0.65 # mAP" ], "execution_count": null, "outputs": [] From a3ecf0fd640465f9a7c009e81bcc5ecabf381004 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 2 Mar 2021 23:08:21 -0800 Subject: [PATCH 0039/1976] Anchor override (#2350) --- models/yolo.py | 7 +++++-- train.py | 2 +- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/models/yolo.py b/models/yolo.py index a9e1da43d913..a047fef397ee 100644 --- a/models/yolo.py +++ b/models/yolo.py @@ -62,7 +62,7 @@ def _make_grid(nx=20, ny=20): class Model(nn.Module): - def __init__(self, cfg='yolov5s.yaml', ch=3, nc=None): # model, input channels, number of classes + def __init__(self, cfg='yolov5s.yaml', ch=3, nc=None, anchors=None): # model, input channels, number of classes super(Model, self).__init__() if isinstance(cfg, dict): self.yaml = cfg # model dict @@ -75,8 +75,11 @@ def __init__(self, cfg='yolov5s.yaml', ch=3, nc=None): # model, input channels, # Define model ch = self.yaml['ch'] = self.yaml.get('ch', ch) # input channels if nc and nc != self.yaml['nc']: - logger.info('Overriding model.yaml nc=%g with nc=%g' % (self.yaml['nc'], nc)) + logger.info(f"Overriding model.yaml nc={self.yaml['nc']} with nc={nc}") self.yaml['nc'] = nc # override yaml value + if anchors: + logger.info(f'Overriding model.yaml anchors with anchors={anchors}') + self.yaml['anchors'] = round(anchors) # override yaml value self.model, self.save = parse_model(deepcopy(self.yaml), ch=[ch]) # model, savelist self.names = [str(i) for i in range(self.yaml['nc'])] # default names # print([x.shape for x in self.forward(torch.zeros(1, ch, 64, 64))]) diff --git a/train.py b/train.py index e2c82339f7fe..1b8b315ce927 100644 --- a/train.py +++ b/train.py @@ -84,7 +84,7 @@ def train(hyp, opt, device, tb_writer=None, wandb=None): model.load_state_dict(state_dict, strict=False) # load logger.info('Transferred %g/%g items from %s' % (len(state_dict), len(model.state_dict()), weights)) # report else: - model = Model(opt.cfg, ch=3, nc=nc).to(device) # create + model = Model(opt.cfg, ch=3, nc=nc, anchors=hyp.get('anchors')).to(device) # create # Freeze freeze = [] # parameter names to freeze (full or partial) From e931b9da33f45551928059b8d61bddd50e401e48 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 3 Mar 2021 21:06:36 -0800 Subject: [PATCH 0040/1976] Resume with custom anchors fix (#2361) * Resume with custom anchors fix * Update train.py --- train.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/train.py b/train.py index 1b8b315ce927..ecac59857ccc 100644 --- a/train.py +++ b/train.py @@ -75,10 +75,8 @@ def train(hyp, opt, device, tb_writer=None, wandb=None): with torch_distributed_zero_first(rank): attempt_download(weights) # download if not found locally ckpt = torch.load(weights, map_location=device) # load checkpoint - if hyp.get('anchors'): - ckpt['model'].yaml['anchors'] = round(hyp['anchors']) # force autoanchor - model = Model(opt.cfg or ckpt['model'].yaml, ch=3, nc=nc).to(device) # create - exclude = ['anchor'] if opt.cfg or hyp.get('anchors') else [] # exclude keys + model = Model(opt.cfg or ckpt['model'].yaml, ch=3, nc=nc, anchors=hyp.get('anchors')).to(device) # create + exclude = ['anchor'] if (opt.cfg or hyp.get('anchors')) and not opt.resume else [] # exclude keys state_dict = ckpt['model'].float().state_dict() # to FP32 state_dict = intersect_dicts(state_dict, model.state_dict(), exclude=exclude) # intersect model.load_state_dict(state_dict, strict=False) # load @@ -216,6 +214,7 @@ def train(hyp, opt, device, tb_writer=None, wandb=None): # Anchors if not opt.noautoanchor: check_anchors(dataset, model=model, thr=hyp['anchor_t'], imgsz=imgsz) + model.half().float() # pre-reduce anchor precision # Model parameters hyp['box'] *= 3. / nl # scale to layers From 300d518f73796cebb26f0a3233e180ef1665d6ee Mon Sep 17 00:00:00 2001 From: Yonghye Kwon Date: Sat, 6 Mar 2021 06:06:18 +0900 Subject: [PATCH 0041/1976] Faster random index generator for mosaic augmentation (#2345) * faster random index generator for mosaic augementation We don't need to access list to generate random index It makes augmentation slower. * Update datasets.py Co-authored-by: Glenn Jocher From 692e1f31dc1fecdd57bfada86380933953b6e899 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 5 Mar 2021 15:26:27 -0800 Subject: [PATCH 0042/1976] --no-cache notebook (#2381) --- Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Dockerfile b/Dockerfile index d42af2f78954..1a8fe2e72885 100644 --- a/Dockerfile +++ b/Dockerfile @@ -7,7 +7,7 @@ RUN apt update && apt install -y zip htop screen libgl1-mesa-glx # Install python dependencies RUN python -m pip install --upgrade pip COPY requirements.txt . -RUN pip install -r requirements.txt gsutil +RUN pip install --no-cache -r requirements.txt gsutil notebook # Create working directory RUN mkdir -p /usr/src/app From c64fe219b4333b98c88a2a706101597f4059bb71 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 5 Mar 2021 15:53:57 -0800 Subject: [PATCH 0043/1976] ENV HOME=/usr/src/app (#2382) Set HOME environment variable per Binder requirements. https://github.com/binder-examples/minimal-dockerfile --- Dockerfile | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/Dockerfile b/Dockerfile index 1a8fe2e72885..e1b40c2d15c6 100644 --- a/Dockerfile +++ b/Dockerfile @@ -5,8 +5,8 @@ FROM nvcr.io/nvidia/pytorch:21.02-py3 RUN apt update && apt install -y zip htop screen libgl1-mesa-glx # Install python dependencies -RUN python -m pip install --upgrade pip COPY requirements.txt . +RUN python -m pip install --upgrade pip RUN pip install --no-cache -r requirements.txt gsutil notebook # Create working directory @@ -16,11 +16,8 @@ WORKDIR /usr/src/app # Copy contents COPY . /usr/src/app -# Copy weights -#RUN python3 -c "from models import *; \ -#attempt_download('weights/yolov5s.pt'); \ -#attempt_download('weights/yolov5m.pt'); \ -#attempt_download('weights/yolov5l.pt')" +# Set environment variables +ENV HOME=/usr/src/app # --------------------------------------------------- Extras Below --------------------------------------------------- From cd8ed3521d98ea120d07f57ea5372c4b375241ca Mon Sep 17 00:00:00 2001 From: Yonghye Kwon Date: Sat, 6 Mar 2021 15:58:26 +0900 Subject: [PATCH 0044/1976] image weights compatible faster random index generator v2 for mosaic augmentation (#2383) image weights compatible faster random index generator v2 for mosaic augmentation --- utils/datasets.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/utils/datasets.py b/utils/datasets.py index d6ab16518034..ed18f449ddd3 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -666,7 +666,7 @@ def load_mosaic(self, index): labels4, segments4 = [], [] s = self.img_size yc, xc = [int(random.uniform(-x, 2 * s + x)) for x in self.mosaic_border] # mosaic center x, y - indices = [index] + [self.indices[random.randint(0, self.n - 1)] for _ in range(3)] # 3 additional image indices + indices = [index] + random.choices(self.indices, k=3) # 3 additional image indices for i, index in enumerate(indices): # Load image img, _, (h, w) = load_image(self, index) @@ -721,7 +721,7 @@ def load_mosaic9(self, index): labels9, segments9 = [], [] s = self.img_size - indices = [index] + [self.indices[random.randint(0, self.n - 1)] for _ in range(8)] # 8 additional image indices + indices = [index] + random.choices(self.indices, k=8) # 8 additional image indices for i, index in enumerate(indices): # Load image img, _, (h, w) = load_image(self, index) From 7a0a81fd1d770bbfbf94ced5e38cc0f0573b765e Mon Sep 17 00:00:00 2001 From: Jan Hajek Date: Sat, 6 Mar 2021 21:02:10 +0100 Subject: [PATCH 0045/1976] GPU export options (#2297) * option for skip last layer and cuda export support * added parameter device * fix import * cleanup 1 * cleanup 2 * opt-in grid --grid will export with grid computation, default export will skip grid (same as current) * default --device cpu GPU export causes ONNX and CoreML errors. Co-authored-by: Jan Hajek Co-authored-by: Glenn Jocher --- models/export.py | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/models/export.py b/models/export.py index cc817871f218..11e60c7a583d 100644 --- a/models/export.py +++ b/models/export.py @@ -17,13 +17,16 @@ from models.experimental import attempt_load from utils.activations import Hardswish, SiLU from utils.general import set_logging, check_img_size +from utils.torch_utils import select_device if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--weights', type=str, default='./yolov5s.pt', help='weights path') # from yolov5/models/ parser.add_argument('--img-size', nargs='+', type=int, default=[640, 640], help='image size') # height, width - parser.add_argument('--dynamic', action='store_true', help='dynamic ONNX axes') parser.add_argument('--batch-size', type=int, default=1, help='batch size') + parser.add_argument('--dynamic', action='store_true', help='dynamic ONNX axes') + parser.add_argument('--grid', action='store_true', help='export Detect() layer grid') + parser.add_argument('--device', default='cpu', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') opt = parser.parse_args() opt.img_size *= 2 if len(opt.img_size) == 1 else 1 # expand print(opt) @@ -31,7 +34,8 @@ t = time.time() # Load PyTorch model - model = attempt_load(opt.weights, map_location=torch.device('cpu')) # load FP32 model + device = select_device(opt.device) + model = attempt_load(opt.weights, map_location=device) # load FP32 model labels = model.names # Checks @@ -39,7 +43,7 @@ opt.img_size = [check_img_size(x, gs) for x in opt.img_size] # verify img_size are gs-multiples # Input - img = torch.zeros(opt.batch_size, 3, *opt.img_size) # image size(1,3,320,192) iDetection + img = torch.zeros(opt.batch_size, 3, *opt.img_size).to(device) # image size(1,3,320,192) iDetection # Update model for k, m in model.named_modules(): @@ -51,7 +55,7 @@ m.act = SiLU() # elif isinstance(m, models.yolo.Detect): # m.forward = m.forward_export # assign forward (optional) - model.model[-1].export = True # set Detect() layer export=True + model.model[-1].export = not opt.grid # set Detect() layer grid export y = model(img) # dry run # TorchScript export From ba18528b4737a4b08b55653c54f3d3e830f8e151 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 6 Mar 2021 13:07:34 -0800 Subject: [PATCH 0046/1976] bbox_iou() stability and speed improvements (#2385) --- utils/general.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/utils/general.py b/utils/general.py index df8cf7bab60d..e1c14bdaa4b3 100755 --- a/utils/general.py +++ b/utils/general.py @@ -312,7 +312,7 @@ def clip_coords(boxes, img_shape): boxes[:, 3].clamp_(0, img_shape[0]) # y2 -def bbox_iou(box1, box2, x1y1x2y2=True, GIoU=False, DIoU=False, CIoU=False, eps=1e-9): +def bbox_iou(box1, box2, x1y1x2y2=True, GIoU=False, DIoU=False, CIoU=False, eps=1e-7): # Returns the IoU of box1 to box2. box1 is 4, box2 is nx4 box2 = box2.T @@ -348,7 +348,7 @@ def bbox_iou(box1, box2, x1y1x2y2=True, GIoU=False, DIoU=False, CIoU=False, eps= elif CIoU: # https://github.com/Zzh-tju/DIoU-SSD-pytorch/blob/master/utils/box/box_utils.py#L47 v = (4 / math.pi ** 2) * torch.pow(torch.atan(w2 / h2) - torch.atan(w1 / h1), 2) with torch.no_grad(): - alpha = v / ((1 + eps) - iou + v) + alpha = v / (v - iou + (1 + eps)) return iou - (rho2 / c2 + v * alpha) # CIoU else: # GIoU https://arxiv.org/pdf/1902.09630.pdf c_area = cw * ch + eps # convex area From 7c2c95732c3eaa10465080b693e14a9e12e08e8d Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 7 Mar 2021 20:18:30 -0800 Subject: [PATCH 0047/1976] AWS wait && echo "All tasks done." (#2391) --- utils/aws/userdata.sh | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/utils/aws/userdata.sh b/utils/aws/userdata.sh index 36405d1a1565..a6d6e7976cf3 100644 --- a/utils/aws/userdata.sh +++ b/utils/aws/userdata.sh @@ -11,7 +11,8 @@ if [ ! -d yolov5 ]; then cd yolov5 bash data/scripts/get_coco.sh && echo "Data done." & sudo docker pull ultralytics/yolov5:latest && echo "Docker done." & - # python -m pip install --upgrade pip && pip install -r requirements.txt && python detect.py && echo "Requirements done." & + python -m pip install --upgrade pip && pip install -r requirements.txt && python detect.py && echo "Requirements done." & + wait && echo "All tasks done." # finish background tasks else echo "Running re-start script." # resume interrupted runs i=0 From e8a2b83268950e346899a84e8d29e84d178553b1 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 7 Mar 2021 20:21:49 -0800 Subject: [PATCH 0048/1976] GCP sudo docker userdata.sh (#2393) * GCP sudo docker * cleanup --- utils/aws/userdata.sh | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/utils/aws/userdata.sh b/utils/aws/userdata.sh index a6d6e7976cf3..890606b76a06 100644 --- a/utils/aws/userdata.sh +++ b/utils/aws/userdata.sh @@ -16,12 +16,12 @@ if [ ! -d yolov5 ]; then else echo "Running re-start script." # resume interrupted runs i=0 - list=$(docker ps -qa) # container list i.e. $'one\ntwo\nthree\nfour' + list=$(sudo docker ps -qa) # container list i.e. $'one\ntwo\nthree\nfour' while IFS= read -r id; do ((i++)) echo "restarting container $i: $id" - docker start $id - # docker exec -it $id python train.py --resume # single-GPU - docker exec -d $id python utils/aws/resume.py + sudo docker start $id + # sudo docker exec -it $id python train.py --resume # single-GPU + sudo docker exec -d $id python utils/aws/resume.py # multi-scenario done <<<"$list" fi From c51dfec8ea554db6811579f6d618dac45766e647 Mon Sep 17 00:00:00 2001 From: Kartikeya Sharma Date: Tue, 9 Mar 2021 21:26:49 -0600 Subject: [PATCH 0049/1976] CVPR 2021 Argoverse-HD dataset autodownload support (#2400) * added argoverse-download ability * bugfix * add support for Argoverse dataset * Refactored code * renamed to argoverse-HD * unzip -q and YOLOv5 small cleanup items * add image counts Co-authored-by: Kartikeya Sharma Co-authored-by: Kartikeya Sharma Co-authored-by: Glenn Jocher --- data/argoverse_hd.yaml | 21 +++++++++++ data/scripts/get_argoverse_hd.sh | 65 ++++++++++++++++++++++++++++++++ 2 files changed, 86 insertions(+) create mode 100644 data/argoverse_hd.yaml create mode 100644 data/scripts/get_argoverse_hd.sh diff --git a/data/argoverse_hd.yaml b/data/argoverse_hd.yaml new file mode 100644 index 000000000000..df7a9361e769 --- /dev/null +++ b/data/argoverse_hd.yaml @@ -0,0 +1,21 @@ +# Argoverse-HD dataset (ring-front-center camera) http://www.cs.cmu.edu/~mengtial/proj/streaming/ +# Train command: python train.py --data argoverse_hd.yaml +# Default dataset location is next to /yolov5: +# /parent_folder +# /argoverse +# /yolov5 + + +# download command/URL (optional) +download: bash data/scripts/get_argoverse_hd.sh + +# train and val data as 1) directory: path/images/, 2) file: path/images.txt, or 3) list: [path1/images/, path2/images/] +train: ../argoverse/Argoverse-1.1/images/train/ # 39384 images +val: ../argoverse/Argoverse-1.1/images/val/ # 15062 iamges +test: ../argoverse/Argoverse-1.1/images/test/ # Submit to: https://eval.ai/web/challenges/challenge-page/800/overview + +# number of classes +nc: 8 + +# class names +names: [ 'person', 'bicycle', 'car', 'motorcycle', 'bus', 'truck', 'traffic_light', 'stop_sign' ] diff --git a/data/scripts/get_argoverse_hd.sh b/data/scripts/get_argoverse_hd.sh new file mode 100644 index 000000000000..884862db03f5 --- /dev/null +++ b/data/scripts/get_argoverse_hd.sh @@ -0,0 +1,65 @@ +#!/bin/bash +# Argoverse-HD dataset (ring-front-center camera) http://www.cs.cmu.edu/~mengtial/proj/streaming/ +# Download command: bash data/scripts/get_argoverse_hd.sh +# Train command: python train.py --data argoverse_hd.yaml +# Default dataset location is next to /yolov5: +# /parent_folder +# /argoverse +# /yolov5 + +# Download/unzip images +d='../argoverse/' # unzip directory +mkdir $d +url=https://argoverse-hd.s3.us-east-2.amazonaws.com/ +f=Argoverse-HD-Full.zip +wget $url$f -O $f && unzip -q $f -d $d && rm $f & # download, unzip, remove in background +wait # finish background tasks + +cd ../argoverse/Argoverse-1.1/ +ln -s tracking images + +cd ../Argoverse-HD/annotations/ + +python3 - "$@" < Date: Tue, 9 Mar 2021 21:07:27 -0800 Subject: [PATCH 0050/1976] CVPR 2021 Argoverse-HD autodownload fix (#2418) --- data/scripts/get_argoverse_hd.sh | 19 ++++++++----------- 1 file changed, 8 insertions(+), 11 deletions(-) diff --git a/data/scripts/get_argoverse_hd.sh b/data/scripts/get_argoverse_hd.sh index 884862db03f5..9e0db9fad91b 100644 --- a/data/scripts/get_argoverse_hd.sh +++ b/data/scripts/get_argoverse_hd.sh @@ -12,8 +12,8 @@ d='../argoverse/' # unzip directory mkdir $d url=https://argoverse-hd.s3.us-east-2.amazonaws.com/ f=Argoverse-HD-Full.zip -wget $url$f -O $f && unzip -q $f -d $d && rm $f & # download, unzip, remove in background -wait # finish background tasks +wget $url$f -O $f && unzip -q $f -d $d && rm $f &# download, unzip, remove in background +wait # finish background tasks cd ../argoverse/Argoverse-1.1/ ln -s tracking images @@ -23,6 +23,7 @@ cd ../Argoverse-HD/annotations/ python3 - "$@" < Date: Tue, 9 Mar 2021 23:43:46 -0800 Subject: [PATCH 0051/1976] DDP after autoanchor reorder (#2421) --- train.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/train.py b/train.py index ecac59857ccc..6bd65f063391 100644 --- a/train.py +++ b/train.py @@ -181,10 +181,6 @@ def train(hyp, opt, device, tb_writer=None, wandb=None): model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model).to(device) logger.info('Using SyncBatchNorm()') - # DDP mode - if cuda and rank != -1: - model = DDP(model, device_ids=[opt.local_rank], output_device=opt.local_rank) - # Trainloader dataloader, dataset = create_dataloader(train_path, imgsz, batch_size, gs, opt, hyp=hyp, augment=True, cache=opt.cache_images, rect=opt.rect, rank=rank, @@ -214,7 +210,11 @@ def train(hyp, opt, device, tb_writer=None, wandb=None): # Anchors if not opt.noautoanchor: check_anchors(dataset, model=model, thr=hyp['anchor_t'], imgsz=imgsz) - model.half().float() # pre-reduce anchor precision + model.half().float() # pre-reduce anchor precision + + # DDP mode + if cuda and rank != -1: + model = DDP(model, device_ids=[opt.local_rank], output_device=opt.local_rank) # Model parameters hyp['box'] *= 3. / nl # scale to layers From f01f3223d564e40e7dfa99997c3c520ab128c925 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 12 Mar 2021 13:35:44 -0800 Subject: [PATCH 0052/1976] Integer printout (#2450) * Integer printout * test.py 'Labels' * Update train.py --- test.py | 4 ++-- train.py | 2 +- utils/torch_utils.py | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/test.py b/test.py index fd4d339ffea6..46288019a8bd 100644 --- a/test.py +++ b/test.py @@ -93,7 +93,7 @@ def test(data, confusion_matrix = ConfusionMatrix(nc=nc) names = {k: v for k, v in enumerate(model.names if hasattr(model, 'names') else model.module.names)} coco91class = coco80_to_coco91_class() - s = ('%20s' + '%12s' * 6) % ('Class', 'Images', 'Targets', 'P', 'R', 'mAP@.5', 'mAP@.5:.95') + s = ('%20s' + '%12s' * 6) % ('Class', 'Images', 'Labels', 'P', 'R', 'mAP@.5', 'mAP@.5:.95') p, r, f1, mp, mr, map50, map, t0, t1 = 0., 0., 0., 0., 0., 0., 0., 0., 0. loss = torch.zeros(3, device=device) jdict, stats, ap, ap_class, wandb_images = [], [], [], [], [] @@ -223,7 +223,7 @@ def test(data, nt = torch.zeros(1) # Print results - pf = '%20s' + '%12.3g' * 6 # print format + pf = '%20s' + '%12i' * 2 + '%12.3g' * 4 # print format print(pf % ('all', seen, nt.sum(), mp, mr, map50, map)) # Print results per class diff --git a/train.py b/train.py index 6bd65f063391..dcb89a3c199b 100644 --- a/train.py +++ b/train.py @@ -264,7 +264,7 @@ def train(hyp, opt, device, tb_writer=None, wandb=None): if rank != -1: dataloader.sampler.set_epoch(epoch) pbar = enumerate(dataloader) - logger.info(('\n' + '%10s' * 8) % ('Epoch', 'gpu_mem', 'box', 'obj', 'cls', 'total', 'targets', 'img_size')) + logger.info(('\n' + '%10s' * 8) % ('Epoch', 'gpu_mem', 'box', 'obj', 'cls', 'total', 'labels', 'img_size')) if rank in [-1, 0]: pbar = tqdm(pbar, total=nb) # progress bar optimizer.zero_grad() diff --git a/utils/torch_utils.py b/utils/torch_utils.py index 1b1cc2038c55..806d29470e55 100644 --- a/utils/torch_utils.py +++ b/utils/torch_utils.py @@ -120,7 +120,7 @@ def profile(x, ops, n=100, device=None): s_in = tuple(x.shape) if isinstance(x, torch.Tensor) else 'list' s_out = tuple(y.shape) if isinstance(y, torch.Tensor) else 'list' p = sum(list(x.numel() for x in m.parameters())) if isinstance(m, nn.Module) else 0 # parameters - print(f'{p:12.4g}{flops:12.4g}{dtf:16.4g}{dtb:16.4g}{str(s_in):>24s}{str(s_out):>24s}') + print(f'{p:12}{flops:12.4g}{dtf:16.4g}{dtb:16.4g}{str(s_in):>24s}{str(s_out):>24s}') def is_parallel(model): From f4197214aa3776ea2dfab0f5fdf1f36537b0b125 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 12 Mar 2021 22:08:42 -0800 Subject: [PATCH 0053/1976] Update test.py --task train val study (#2453) * Update test.py --task train val study * update argparser --task --- test.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/test.py b/test.py index 46288019a8bd..39e0992264ec 100644 --- a/test.py +++ b/test.py @@ -85,9 +85,9 @@ def test(data, if not training: if device.type != 'cpu': model(torch.zeros(1, 3, imgsz, imgsz).to(device).type_as(next(model.parameters()))) # run once - path = data['test'] if opt.task == 'test' else data['val'] # path to val/test images - dataloader = create_dataloader(path, imgsz, batch_size, gs, opt, pad=0.5, rect=True, - prefix=colorstr('test: ' if opt.task == 'test' else 'val: '))[0] + task = opt.task if opt.task in ('train', 'val', 'test') else 'val' # path to train/val/test images + dataloader = create_dataloader(data[task], imgsz, batch_size, gs, opt, pad=0.5, rect=True, + prefix=colorstr(f'{task}: '))[0] seen = 0 confusion_matrix = ConfusionMatrix(nc=nc) @@ -287,7 +287,7 @@ def test(data, parser.add_argument('--img-size', type=int, default=640, help='inference size (pixels)') parser.add_argument('--conf-thres', type=float, default=0.001, help='object confidence threshold') parser.add_argument('--iou-thres', type=float, default=0.6, help='IOU threshold for NMS') - parser.add_argument('--task', default='val', help="'val', 'test', 'study'") + parser.add_argument('--task', default='val', help='train, val, test, speed or study') parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') parser.add_argument('--single-cls', action='store_true', help='treat as single-class dataset') parser.add_argument('--augment', action='store_true', help='augmented inference') @@ -305,7 +305,7 @@ def test(data, print(opt) check_requirements() - if opt.task in ['val', 'test']: # run normally + if opt.task in ('train', 'val', 'test'): # run normally test(opt.data, opt.weights, opt.batch_size, From 08d4918d7f49055158b1cceb27ea0d1990251afc Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 12 Mar 2021 22:15:41 -0800 Subject: [PATCH 0054/1976] labels.jpg class names (#2454) * labels.png class names * fontsize=10 --- train.py | 2 +- utils/plots.py | 9 +++++++-- 2 files changed, 8 insertions(+), 3 deletions(-) diff --git a/train.py b/train.py index dcb89a3c199b..005fdf60c021 100644 --- a/train.py +++ b/train.py @@ -203,7 +203,7 @@ def train(hyp, opt, device, tb_writer=None, wandb=None): # cf = torch.bincount(c.long(), minlength=nc) + 1. # frequency # model._initialize_biases(cf.to(device)) if plots: - plot_labels(labels, save_dir, loggers) + plot_labels(labels, names, save_dir, loggers) if tb_writer: tb_writer.add_histogram('classes', c, 0) diff --git a/utils/plots.py b/utils/plots.py index aa9a1cab81f0..47e7b7b74f1c 100644 --- a/utils/plots.py +++ b/utils/plots.py @@ -269,7 +269,7 @@ def plot_study_txt(path='', x=None): # from utils.plots import *; plot_study_tx plt.savefig(str(Path(path).name) + '.png', dpi=300) -def plot_labels(labels, save_dir=Path(''), loggers=None): +def plot_labels(labels, names=(), save_dir=Path(''), loggers=None): # plot dataset labels print('Plotting labels... ') c, b = labels[:, 0], labels[:, 1:].transpose() # classes, boxes @@ -286,7 +286,12 @@ def plot_labels(labels, save_dir=Path(''), loggers=None): matplotlib.use('svg') # faster ax = plt.subplots(2, 2, figsize=(8, 8), tight_layout=True)[1].ravel() ax[0].hist(c, bins=np.linspace(0, nc, nc + 1) - 0.5, rwidth=0.8) - ax[0].set_xlabel('classes') + ax[0].set_ylabel('instances') + if 0 < len(names) < 30: + ax[0].set_xticks(range(len(names))) + ax[0].set_xticklabels(names, rotation=90, fontsize=10) + else: + ax[0].set_xlabel('classes') sns.histplot(x, x='x', y='y', ax=ax[2], bins=50, pmax=0.9) sns.histplot(x, x='width', y='height', ax=ax[3], bins=50, pmax=0.9) From 747c2653eecfb870b1ed40b1e00e0ef209b036e9 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 12 Mar 2021 22:27:53 -0800 Subject: [PATCH 0055/1976] CVPR 2021 Argoverse-HD autodownload curl (#2455) curl preferred over wget for slightly better cross platform compatibility (i.e. out of the box macos compatible). --- data/scripts/get_argoverse_hd.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/data/scripts/get_argoverse_hd.sh b/data/scripts/get_argoverse_hd.sh index 9e0db9fad91b..caec61efed78 100644 --- a/data/scripts/get_argoverse_hd.sh +++ b/data/scripts/get_argoverse_hd.sh @@ -12,7 +12,7 @@ d='../argoverse/' # unzip directory mkdir $d url=https://argoverse-hd.s3.us-east-2.amazonaws.com/ f=Argoverse-HD-Full.zip -wget $url$f -O $f && unzip -q $f -d $d && rm $f &# download, unzip, remove in background +curl -L $url$f -o $f && unzip -q $f -d $d && rm $f &# download, unzip, remove in background wait # finish background tasks cd ../argoverse/Argoverse-1.1/ From 569757ecc09d115e275a6ec3662514d72dfe18c2 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 13 Mar 2021 19:50:34 -0800 Subject: [PATCH 0056/1976] Add autoShape() speed profiling (#2459) * Add autoShape() speed profiling * Update common.py * Create README.md * Update hubconf.py * cleanuip --- README.md | 4 ++-- hubconf.py | 8 ++++---- models/common.py | 14 +++++++++++--- 3 files changed, 17 insertions(+), 9 deletions(-) diff --git a/README.md b/README.md index b7129e80adfe..097b2750bf49 100755 --- a/README.md +++ b/README.md @@ -108,11 +108,11 @@ To run **batched inference** with YOLOv5 and [PyTorch Hub](https://github.com/ul import torch # Model -model = torch.hub.load('ultralytics/yolov5', 'yolov5s', pretrained=True) +model = torch.hub.load('ultralytics/yolov5', 'yolov5s') # Images dir = 'https://github.com/ultralytics/yolov5/raw/master/data/images/' -imgs = [dir + f for f in ('zidane.jpg', 'bus.jpg')] # batched list of images +imgs = [dir + f for f in ('zidane.jpg', 'bus.jpg')] # batch of images # Inference results = model(imgs) diff --git a/hubconf.py b/hubconf.py index a8eb51681794..e51ac90da36c 100644 --- a/hubconf.py +++ b/hubconf.py @@ -51,7 +51,7 @@ def create(name, pretrained, channels, classes, autoshape): raise Exception(s) from e -def yolov5s(pretrained=False, channels=3, classes=80, autoshape=True): +def yolov5s(pretrained=True, channels=3, classes=80, autoshape=True): """YOLOv5-small model from https://github.com/ultralytics/yolov5 Arguments: @@ -65,7 +65,7 @@ def yolov5s(pretrained=False, channels=3, classes=80, autoshape=True): return create('yolov5s', pretrained, channels, classes, autoshape) -def yolov5m(pretrained=False, channels=3, classes=80, autoshape=True): +def yolov5m(pretrained=True, channels=3, classes=80, autoshape=True): """YOLOv5-medium model from https://github.com/ultralytics/yolov5 Arguments: @@ -79,7 +79,7 @@ def yolov5m(pretrained=False, channels=3, classes=80, autoshape=True): return create('yolov5m', pretrained, channels, classes, autoshape) -def yolov5l(pretrained=False, channels=3, classes=80, autoshape=True): +def yolov5l(pretrained=True, channels=3, classes=80, autoshape=True): """YOLOv5-large model from https://github.com/ultralytics/yolov5 Arguments: @@ -93,7 +93,7 @@ def yolov5l(pretrained=False, channels=3, classes=80, autoshape=True): return create('yolov5l', pretrained, channels, classes, autoshape) -def yolov5x(pretrained=False, channels=3, classes=80, autoshape=True): +def yolov5x(pretrained=True, channels=3, classes=80, autoshape=True): """YOLOv5-xlarge model from https://github.com/ultralytics/yolov5 Arguments: diff --git a/models/common.py b/models/common.py index ad35f908d865..7ef5762efbf3 100644 --- a/models/common.py +++ b/models/common.py @@ -12,6 +12,7 @@ from utils.datasets import letterbox from utils.general import non_max_suppression, make_divisible, scale_coords, xyxy2xywh from utils.plots import color_list, plot_one_box +from utils.torch_utils import time_synchronized def autopad(k, p=None): # kernel, padding @@ -190,6 +191,7 @@ def forward(self, imgs, size=640, augment=False, profile=False): # torch: = torch.zeros(16,3,720,1280) # BCHW # multiple: = [Image.open('image1.jpg'), Image.open('image2.jpg'), ...] # list of images + t = [time_synchronized()] p = next(self.model.parameters()) # for device and type if isinstance(imgs, torch.Tensor): # torch return self.model(imgs.to(p.device).type_as(p), augment, profile) # inference @@ -216,22 +218,25 @@ def forward(self, imgs, size=640, augment=False, profile=False): x = np.stack(x, 0) if n > 1 else x[0][None] # stack x = np.ascontiguousarray(x.transpose((0, 3, 1, 2))) # BHWC to BCHW x = torch.from_numpy(x).to(p.device).type_as(p) / 255. # uint8 to fp16/32 + t.append(time_synchronized()) # Inference with torch.no_grad(): y = self.model(x, augment, profile)[0] # forward - y = non_max_suppression(y, conf_thres=self.conf, iou_thres=self.iou, classes=self.classes) # NMS + t.append(time_synchronized()) # Post-process + y = non_max_suppression(y, conf_thres=self.conf, iou_thres=self.iou, classes=self.classes) # NMS for i in range(n): scale_coords(shape1, y[i][:, :4], shape0[i]) + t.append(time_synchronized()) - return Detections(imgs, y, files, self.names) + return Detections(imgs, y, files, t, self.names, x.shape) class Detections: # detections class for YOLOv5 inference results - def __init__(self, imgs, pred, files, names=None): + def __init__(self, imgs, pred, files, times, names=None, shape=None): super(Detections, self).__init__() d = pred[0].device # device gn = [torch.tensor([*[im.shape[i] for i in [1, 0, 1, 0]], 1., 1.], device=d) for im in imgs] # normalizations @@ -244,6 +249,8 @@ def __init__(self, imgs, pred, files, names=None): self.xyxyn = [x / g for x, g in zip(self.xyxy, gn)] # xyxy normalized self.xywhn = [x / g for x, g in zip(self.xywh, gn)] # xywh normalized self.n = len(self.pred) + self.t = ((times[i + 1] - times[i]) * 1000 / self.n for i in range(3)) # timestamps (ms) + self.s = shape # inference BCHW shape def display(self, pprint=False, show=False, save=False, render=False, save_dir=''): colors = color_list() @@ -271,6 +278,7 @@ def display(self, pprint=False, show=False, save=False, render=False, save_dir=' def print(self): self.display(pprint=True) # print results + print(f'Speed: %.1f/%.1f/%.1f ms pre-process/inference/NMS per image at shape {tuple(self.s)}' % tuple(self.t)) def show(self): self.display(show=True) # show results From f813f6dcc875901c6ba7a509c14227c2292efed4 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 13 Mar 2021 20:00:03 -0800 Subject: [PATCH 0057/1976] autoShape() speed profiling update (#2460) --- models/common.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/models/common.py b/models/common.py index 7ef5762efbf3..464d639a1f0b 100644 --- a/models/common.py +++ b/models/common.py @@ -168,7 +168,6 @@ def forward(self, x): class autoShape(nn.Module): # input-robust model wrapper for passing cv2/np/PIL/torch inputs. Includes preprocessing, inference and NMS - img_size = 640 # inference size (pixels) conf = 0.25 # NMS confidence threshold iou = 0.45 # NMS IoU threshold classes = None # (optional list) filter by class @@ -278,7 +277,8 @@ def display(self, pprint=False, show=False, save=False, render=False, save_dir=' def print(self): self.display(pprint=True) # print results - print(f'Speed: %.1f/%.1f/%.1f ms pre-process/inference/NMS per image at shape {tuple(self.s)}' % tuple(self.t)) + print(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {tuple(self.s)}' % + tuple(self.t)) def show(self): self.display(show=True) # show results From 20d879db36c4b5f72f4002127a9ebbdf30da11de Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 13 Mar 2021 20:05:21 -0800 Subject: [PATCH 0058/1976] Update tutorial.ipynb --- tutorial.ipynb | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/tutorial.ipynb b/tutorial.ipynb index f2b03dc57f0a..5eeb78d12faa 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -605,14 +605,14 @@ "output_type": "stream", "text": [ "Namespace(agnostic_nms=False, augment=False, classes=None, conf_thres=0.25, device='', exist_ok=False, img_size=640, iou_thres=0.45, name='exp', project='runs/detect', save_conf=False, save_txt=False, source='data/images/', update=False, view_img=False, weights=['yolov5s.pt'])\n", - "YOLOv5 v4.0-21-gb26a2f6 torch 1.7.0+cu101 CUDA:0 (Tesla V100-SXM2-16GB, 16130.5MB)\n", + "YOLOv5 v4.0-132-gf813f6d torch 1.8.0+cu101 CUDA:0 (Tesla V100-SXM2-16GB, 16160.5MB)\n", "\n", "Fusing layers... \n", "Model Summary: 224 layers, 7266973 parameters, 0 gradients, 17.0 GFLOPS\n", - "image 1/2 /content/yolov5/data/images/bus.jpg: 640x480 4 persons, 1 buss, 1 skateboards, Done. (0.011s)\n", - "image 2/2 /content/yolov5/data/images/zidane.jpg: 384x640 2 persons, 2 ties, Done. (0.011s)\n", + "image 1/2 /content/yolov5/data/images/bus.jpg: 640x480 4 persons, 1 bus, Done. (0.008s)\n", + "image 2/2 /content/yolov5/data/images/zidane.jpg: 384x640 2 persons, 1 tie, Done. (0.008s)\n", "Results saved to runs/detect/exp\n", - "Done. (0.110s)\n" + "Done. (0.087)\n" ], "name": "stdout" }, @@ -1247,4 +1247,4 @@ "outputs": [] } ] -} \ No newline at end of file +} From 6f718cee740e7cd423edd1136db78c5be49fa7c0 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 13 Mar 2021 20:20:09 -0800 Subject: [PATCH 0059/1976] Created using Colaboratory --- tutorial.ipynb | 185 +++++++++++++++++++++++++------------------------ 1 file changed, 93 insertions(+), 92 deletions(-) diff --git a/tutorial.ipynb b/tutorial.ipynb index 5eeb78d12faa..b678e4bec9c2 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -16,7 +16,7 @@ "accelerator": "GPU", "widgets": { "application/vnd.jupyter.widget-state+json": { - "1f8e9b8ebded4175b2eaa9f75c3ceb00": { + "b54ab52f1d4f4903897ab6cd49a3b9b2": { "model_module": "@jupyter-widgets/controls", "model_name": "HBoxModel", "state": { @@ -28,15 +28,15 @@ "_view_count": null, "_view_module_version": "1.5.0", "box_style": "", - "layout": "IPY_MODEL_0a1246a73077468ab80e979cc0576cd2", + "layout": "IPY_MODEL_1852f93fc2714d40adccb8aa161c42ff", "_model_module": "@jupyter-widgets/controls", "children": [ - "IPY_MODEL_d327cde5a85a4a51bb8b1b3e9cf06c97", - "IPY_MODEL_d5ef1cb2cbed4b87b3c5d292ff2b0da6" + "IPY_MODEL_3293cfe869bd4a1bbbe18b49b6815de1", + "IPY_MODEL_8d5ee8b8ab6d46b98818bd2c562ddd1c" ] } }, - "0a1246a73077468ab80e979cc0576cd2": { + "1852f93fc2714d40adccb8aa161c42ff": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "state": { @@ -87,12 +87,12 @@ "left": null } }, - "d327cde5a85a4a51bb8b1b3e9cf06c97": { + "3293cfe869bd4a1bbbe18b49b6815de1": { "model_module": "@jupyter-widgets/controls", "model_name": "FloatProgressModel", "state": { "_view_name": "ProgressView", - "style": "IPY_MODEL_8d5dff8bca14435a88fa1814533acd85", + "style": "IPY_MODEL_49fcb2adb0354430b76f491af98abfe9", "_dom_classes": [], "description": "100%", "_model_name": "FloatProgressModel", @@ -107,30 +107,30 @@ "min": 0, "description_tooltip": null, "_model_module": "@jupyter-widgets/controls", - "layout": "IPY_MODEL_3d5136c19e7645ca9bc8f51ceffb2be1" + "layout": "IPY_MODEL_c7d76e0c53064363add56b8d05e561f5" } }, - "d5ef1cb2cbed4b87b3c5d292ff2b0da6": { + "8d5ee8b8ab6d46b98818bd2c562ddd1c": { "model_module": "@jupyter-widgets/controls", "model_name": "HTMLModel", "state": { "_view_name": "HTMLView", - "style": "IPY_MODEL_2919396dbd4b4c8e821d12bd28665d8a", + "style": "IPY_MODEL_48f321f789634aa584f8a29a3b925dd5", "_dom_classes": [], "description": "", "_model_name": "HTMLModel", "placeholder": "​", "_view_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", - "value": " 781M/781M [00:12<00:00, 65.5MB/s]", + "value": " 781M/781M [00:13<00:00, 62.6MB/s]", "_view_count": null, "_view_module_version": "1.5.0", "description_tooltip": null, "_model_module": "@jupyter-widgets/controls", - "layout": "IPY_MODEL_6feb16f2b2fa4021b1a271e1dd442d04" + "layout": "IPY_MODEL_6610d6275f3e49d9937d50ed0a105947" } }, - "8d5dff8bca14435a88fa1814533acd85": { + "49fcb2adb0354430b76f491af98abfe9": { "model_module": "@jupyter-widgets/controls", "model_name": "ProgressStyleModel", "state": { @@ -145,7 +145,7 @@ "_model_module": "@jupyter-widgets/controls" } }, - "3d5136c19e7645ca9bc8f51ceffb2be1": { + "c7d76e0c53064363add56b8d05e561f5": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "state": { @@ -196,7 +196,7 @@ "left": null } }, - "2919396dbd4b4c8e821d12bd28665d8a": { + "48f321f789634aa584f8a29a3b925dd5": { "model_module": "@jupyter-widgets/controls", "model_name": "DescriptionStyleModel", "state": { @@ -210,7 +210,7 @@ "_model_module": "@jupyter-widgets/controls" } }, - "6feb16f2b2fa4021b1a271e1dd442d04": { + "6610d6275f3e49d9937d50ed0a105947": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "state": { @@ -261,7 +261,7 @@ "left": null } }, - "e6459e0bcee449b090fc9807672725bc": { + "0fffa335322b41658508e06aed0acbf0": { "model_module": "@jupyter-widgets/controls", "model_name": "HBoxModel", "state": { @@ -273,15 +273,15 @@ "_view_count": null, "_view_module_version": "1.5.0", "box_style": "", - "layout": "IPY_MODEL_c341e1d3bf3b40d1821ce392eb966c68", + "layout": "IPY_MODEL_a354c6f80ce347e5a3ef64af87c0eccb", "_model_module": "@jupyter-widgets/controls", "children": [ - "IPY_MODEL_660afee173694231a6dce3cd94df6cae", - "IPY_MODEL_261218485cef48df961519dde5edfcbe" + "IPY_MODEL_85823e71fea54c39bd11e2e972348836", + "IPY_MODEL_fb11acd663fa4e71b041d67310d045fd" ] } }, - "c341e1d3bf3b40d1821ce392eb966c68": { + "a354c6f80ce347e5a3ef64af87c0eccb": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "state": { @@ -332,12 +332,12 @@ "left": null } }, - "660afee173694231a6dce3cd94df6cae": { + "85823e71fea54c39bd11e2e972348836": { "model_module": "@jupyter-widgets/controls", "model_name": "FloatProgressModel", "state": { "_view_name": "ProgressView", - "style": "IPY_MODEL_32736d503c06497abfae8c0421918255", + "style": "IPY_MODEL_8a919053b780449aae5523658ad611fa", "_dom_classes": [], "description": "100%", "_model_name": "FloatProgressModel", @@ -352,30 +352,30 @@ "min": 0, "description_tooltip": null, "_model_module": "@jupyter-widgets/controls", - "layout": "IPY_MODEL_e257738711f54d5280c8393d9d3dce1c" + "layout": "IPY_MODEL_5bae9393a58b44f7b69fb04816f94f6f" } }, - "261218485cef48df961519dde5edfcbe": { + "fb11acd663fa4e71b041d67310d045fd": { "model_module": "@jupyter-widgets/controls", "model_name": "HTMLModel", "state": { "_view_name": "HTMLView", - "style": "IPY_MODEL_beb7a6fe34b840899bb79c062681696f", + "style": "IPY_MODEL_d26c6d16c7f24030ab2da5285bf198ee", "_dom_classes": [], "description": "", "_model_name": "HTMLModel", "placeholder": "​", "_view_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", - "value": " 21.1M/21.1M [00:00<00:00, 33.5MB/s]", + "value": " 21.1M/21.1M [00:02<00:00, 9.36MB/s]", "_view_count": null, "_view_module_version": "1.5.0", "description_tooltip": null, "_model_module": "@jupyter-widgets/controls", - "layout": "IPY_MODEL_e639132395d64d70b99d8b72c32f8fbb" + "layout": "IPY_MODEL_f7767886b2364c8d9efdc79e175ad8eb" } }, - "32736d503c06497abfae8c0421918255": { + "8a919053b780449aae5523658ad611fa": { "model_module": "@jupyter-widgets/controls", "model_name": "ProgressStyleModel", "state": { @@ -390,7 +390,7 @@ "_model_module": "@jupyter-widgets/controls" } }, - "e257738711f54d5280c8393d9d3dce1c": { + "5bae9393a58b44f7b69fb04816f94f6f": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "state": { @@ -441,7 +441,7 @@ "left": null } }, - "beb7a6fe34b840899bb79c062681696f": { + "d26c6d16c7f24030ab2da5285bf198ee": { "model_module": "@jupyter-widgets/controls", "model_name": "DescriptionStyleModel", "state": { @@ -455,7 +455,7 @@ "_model_module": "@jupyter-widgets/controls" } }, - "e639132395d64d70b99d8b72c32f8fbb": { + "f7767886b2364c8d9efdc79e175ad8eb": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "state": { @@ -550,7 +550,7 @@ "colab": { "base_uri": "https://localhost:8080/" }, - "outputId": "ae8805a9-ce15-4e1c-f6b4-baa1c1033f56" + "outputId": "20027455-bf84-41fd-c902-b7282d53c91d" }, "source": [ "!git clone https://github.com/ultralytics/yolov5 # clone repo\n", @@ -563,12 +563,12 @@ "clear_output()\n", "print('Setup complete. Using torch %s %s' % (torch.__version__, torch.cuda.get_device_properties(0) if torch.cuda.is_available() else 'CPU'))" ], - "execution_count": null, + "execution_count": 1, "outputs": [ { "output_type": "stream", "text": [ - "Setup complete. Using torch 1.7.0+cu101 _CudaDeviceProperties(name='Tesla V100-SXM2-16GB', major=7, minor=0, total_memory=16160MB, multi_processor_count=80)\n" + "Setup complete. Using torch 1.8.0+cu101 _CudaDeviceProperties(name='Tesla V100-SXM2-16GB', major=7, minor=0, total_memory=16160MB, multi_processor_count=80)\n" ], "name": "stdout" } @@ -672,30 +672,30 @@ "base_uri": "https://localhost:8080/", "height": 65, "referenced_widgets": [ - "1f8e9b8ebded4175b2eaa9f75c3ceb00", - "0a1246a73077468ab80e979cc0576cd2", - "d327cde5a85a4a51bb8b1b3e9cf06c97", - "d5ef1cb2cbed4b87b3c5d292ff2b0da6", - "8d5dff8bca14435a88fa1814533acd85", - "3d5136c19e7645ca9bc8f51ceffb2be1", - "2919396dbd4b4c8e821d12bd28665d8a", - "6feb16f2b2fa4021b1a271e1dd442d04" + "b54ab52f1d4f4903897ab6cd49a3b9b2", + "1852f93fc2714d40adccb8aa161c42ff", + "3293cfe869bd4a1bbbe18b49b6815de1", + "8d5ee8b8ab6d46b98818bd2c562ddd1c", + "49fcb2adb0354430b76f491af98abfe9", + "c7d76e0c53064363add56b8d05e561f5", + "48f321f789634aa584f8a29a3b925dd5", + "6610d6275f3e49d9937d50ed0a105947" ] }, - "outputId": "d6ace7c6-1be5-41ff-d607-1c716b88d298" + "outputId": "f0884441-78d9-443c-afa6-d00ec387908d" }, "source": [ "# Download COCO val2017\n", "torch.hub.download_url_to_file('https://github.com/ultralytics/yolov5/releases/download/v1.0/coco2017val.zip', 'tmp.zip')\n", "!unzip -q tmp.zip -d ../ && rm tmp.zip" ], - "execution_count": null, + "execution_count": 2, "outputs": [ { "output_type": "display_data", "data": { "application/vnd.jupyter.widget-view+json": { - "model_id": "1f8e9b8ebded4175b2eaa9f75c3ceb00", + "model_id": "b54ab52f1d4f4903897ab6cd49a3b9b2", "version_minor": 0, "version_major": 2 }, @@ -723,45 +723,45 @@ "colab": { "base_uri": "https://localhost:8080/" }, - "outputId": "cc25f70c-0a11-44f6-cc44-e92c5083488c" + "outputId": "5b54c11e-9f4b-4d9a-8e6e-6a2a4f0cc60d" }, "source": [ "# Run YOLOv5x on COCO val2017\n", "!python test.py --weights yolov5x.pt --data coco.yaml --img 640 --iou 0.65" ], - "execution_count": null, + "execution_count": 3, "outputs": [ { "output_type": "stream", "text": [ "Namespace(augment=False, batch_size=32, conf_thres=0.001, data='./data/coco.yaml', device='', exist_ok=False, img_size=640, iou_thres=0.65, name='exp', project='runs/test', save_conf=False, save_hybrid=False, save_json=True, save_txt=False, single_cls=False, task='val', verbose=False, weights=['yolov5x.pt'])\n", - "YOLOv5 v4.0-75-gbdd88e1 torch 1.7.0+cu101 CUDA:0 (Tesla V100-SXM2-16GB, 16160.5MB)\n", + "YOLOv5 v4.0-133-g20d879d torch 1.8.0+cu101 CUDA:0 (Tesla V100-SXM2-16GB, 16160.5MB)\n", "\n", "Downloading https://github.com/ultralytics/yolov5/releases/download/v4.0/yolov5x.pt to yolov5x.pt...\n", - "100% 168M/168M [00:04<00:00, 39.7MB/s]\n", + "100% 168M/168M [00:02<00:00, 59.1MB/s]\n", "\n", "Fusing layers... \n", "Model Summary: 476 layers, 87730285 parameters, 0 gradients, 218.8 GFLOPS\n", - "\u001b[34m\u001b[1mval: \u001b[0mScanning '../coco/val2017' for images and labels... 4952 found, 48 missing, 0 empty, 0 corrupted: 100% 5000/5000 [00:01<00:00, 2824.78it/s]\n", + "\u001b[34m\u001b[1mval: \u001b[0mScanning '../coco/val2017' for images and labels... 4952 found, 48 missing, 0 empty, 0 corrupted: 100% 5000/5000 [00:01<00:00, 3236.68it/s]\n", "\u001b[34m\u001b[1mval: \u001b[0mNew cache created: ../coco/val2017.cache\n", - " Class Images Targets P R mAP@.5 mAP@.5:.95: 100% 157/157 [01:33<00:00, 1.68it/s]\n", - " all 5e+03 3.63e+04 0.749 0.619 0.68 0.486\n", - "Speed: 5.2/2.0/7.3 ms inference/NMS/total per 640x640 image at batch-size 32\n", + " Class Images Labels P R mAP@.5 mAP@.5:.95: 100% 157/157 [01:20<00:00, 1.95it/s]\n", + " all 5000 36335 0.749 0.619 0.68 0.486\n", + "Speed: 5.3/1.7/6.9 ms inference/NMS/total per 640x640 image at batch-size 32\n", "\n", "Evaluating pycocotools mAP... saving runs/test/exp/yolov5x_predictions.json...\n", "loading annotations into memory...\n", - "Done (t=0.44s)\n", + "Done (t=0.43s)\n", "creating index...\n", "index created!\n", "Loading and preparing results...\n", - "DONE (t=4.47s)\n", + "DONE (t=5.10s)\n", "creating index...\n", "index created!\n", "Running per image evaluation...\n", "Evaluate annotation type *bbox*\n", - "DONE (t=94.87s).\n", + "DONE (t=88.52s).\n", "Accumulating evaluation results...\n", - "DONE (t=15.96s).\n", + "DONE (t=17.17s).\n", " Average Precision (AP) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.501\n", " Average Precision (AP) @[ IoU=0.50 | area= all | maxDets=100 ] = 0.687\n", " Average Precision (AP) @[ IoU=0.75 | area= all | maxDets=100 ] = 0.544\n", @@ -836,30 +836,30 @@ "base_uri": "https://localhost:8080/", "height": 65, "referenced_widgets": [ - "e6459e0bcee449b090fc9807672725bc", - "c341e1d3bf3b40d1821ce392eb966c68", - "660afee173694231a6dce3cd94df6cae", - "261218485cef48df961519dde5edfcbe", - "32736d503c06497abfae8c0421918255", - "e257738711f54d5280c8393d9d3dce1c", - "beb7a6fe34b840899bb79c062681696f", - "e639132395d64d70b99d8b72c32f8fbb" + "0fffa335322b41658508e06aed0acbf0", + "a354c6f80ce347e5a3ef64af87c0eccb", + "85823e71fea54c39bd11e2e972348836", + "fb11acd663fa4e71b041d67310d045fd", + "8a919053b780449aae5523658ad611fa", + "5bae9393a58b44f7b69fb04816f94f6f", + "d26c6d16c7f24030ab2da5285bf198ee", + "f7767886b2364c8d9efdc79e175ad8eb" ] }, - "outputId": "e8b7d5b3-a71e-4446-eec2-ad13419cf700" + "outputId": "b41ac253-9e1b-4c26-d78b-700ea0154f43" }, "source": [ "# Download COCO128\n", "torch.hub.download_url_to_file('https://github.com/ultralytics/yolov5/releases/download/v1.0/coco128.zip', 'tmp.zip')\n", "!unzip -q tmp.zip -d ../ && rm tmp.zip" ], - "execution_count": null, + "execution_count": 4, "outputs": [ { "output_type": "display_data", "data": { "application/vnd.jupyter.widget-view+json": { - "model_id": "e6459e0bcee449b090fc9807672725bc", + "model_id": "0fffa335322b41658508e06aed0acbf0", "version_minor": 0, "version_major": 2 }, @@ -924,27 +924,27 @@ "colab": { "base_uri": "https://localhost:8080/" }, - "outputId": "38e51b29-2df4-4f00-cde8-5f6e4a34da9e" + "outputId": "cf494627-09b9-4399-ff0c-fdb62b32340a" }, "source": [ "# Train YOLOv5s on COCO128 for 3 epochs\n", "!python train.py --img 640 --batch 16 --epochs 3 --data coco128.yaml --weights yolov5s.pt --nosave --cache" ], - "execution_count": null, + "execution_count": 5, "outputs": [ { "output_type": "stream", "text": [ "\u001b[34m\u001b[1mgithub: \u001b[0mup to date with https://github.com/ultralytics/yolov5 ✅\n", - "YOLOv5 v4.0-75-gbdd88e1 torch 1.7.0+cu101 CUDA:0 (Tesla V100-SXM2-16GB, 16160.5MB)\n", + "YOLOv5 v4.0-133-g20d879d torch 1.8.0+cu101 CUDA:0 (Tesla V100-SXM2-16GB, 16160.5MB)\n", "\n", - "Namespace(adam=False, batch_size=16, bucket='', cache_images=True, cfg='', data='./data/coco128.yaml', device='', epochs=3, evolve=False, exist_ok=False, global_rank=-1, hyp='data/hyp.scratch.yaml', image_weights=False, img_size=[640, 640], linear_lr=False, local_rank=-1, log_artifacts=False, log_imgs=16, multi_scale=False, name='exp', noautoanchor=False, nosave=True, notest=False, project='runs/train', quad=False, rect=False, resume=False, save_dir='runs/train/exp', single_cls=False, sync_bn=False, total_batch_size=16, weights='yolov5s.pt', workers=8, world_size=1)\n", + "Namespace(adam=False, batch_size=16, bucket='', cache_images=True, cfg='', data='./data/coco128.yaml', device='', entity=None, epochs=3, evolve=False, exist_ok=False, global_rank=-1, hyp='data/hyp.scratch.yaml', image_weights=False, img_size=[640, 640], linear_lr=False, local_rank=-1, log_artifacts=False, log_imgs=16, multi_scale=False, name='exp', noautoanchor=False, nosave=True, notest=False, project='runs/train', quad=False, rect=False, resume=False, save_dir='runs/train/exp', single_cls=False, sync_bn=False, total_batch_size=16, weights='yolov5s.pt', workers=8, world_size=1)\n", "\u001b[34m\u001b[1mwandb: \u001b[0mInstall Weights & Biases for YOLOv5 logging with 'pip install wandb' (recommended)\n", "Start Tensorboard with \"tensorboard --logdir runs/train\", view at http://localhost:6006/\n", - "2021-02-12 06:38:28.027271: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcudart.so.10.1\n", + "2021-03-14 04:18:58.124672: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcudart.so.11.0\n", "\u001b[34m\u001b[1mhyperparameters: \u001b[0mlr0=0.01, lrf=0.2, momentum=0.937, weight_decay=0.0005, warmup_epochs=3.0, warmup_momentum=0.8, warmup_bias_lr=0.1, box=0.05, cls=0.5, cls_pw=1.0, obj=1.0, obj_pw=1.0, iou_t=0.2, anchor_t=4.0, fl_gamma=0.0, hsv_h=0.015, hsv_s=0.7, hsv_v=0.4, degrees=0.0, translate=0.1, scale=0.5, shear=0.0, perspective=0.0, flipud=0.0, fliplr=0.5, mosaic=1.0, mixup=0.0\n", "Downloading https://github.com/ultralytics/yolov5/releases/download/v4.0/yolov5s.pt to yolov5s.pt...\n", - "100% 14.1M/14.1M [00:01<00:00, 13.2MB/s]\n", + "100% 14.1M/14.1M [00:00<00:00, 63.1MB/s]\n", "\n", "\n", " from n params module arguments \n", @@ -978,11 +978,11 @@ "Transferred 362/362 items from yolov5s.pt\n", "Scaled weight_decay = 0.0005\n", "Optimizer groups: 62 .bias, 62 conv.weight, 59 other\n", - "\u001b[34m\u001b[1mtrain: \u001b[0mScanning '../coco128/labels/train2017' for images and labels... 128 found, 0 missing, 2 empty, 0 corrupted: 100% 128/128 [00:00<00:00, 2566.00it/s]\n", + "\u001b[34m\u001b[1mtrain: \u001b[0mScanning '../coco128/labels/train2017' for images and labels... 128 found, 0 missing, 2 empty, 0 corrupted: 100% 128/128 [00:00<00:00, 2956.76it/s]\n", "\u001b[34m\u001b[1mtrain: \u001b[0mNew cache created: ../coco128/labels/train2017.cache\n", - "\u001b[34m\u001b[1mtrain: \u001b[0mCaching images (0.1GB): 100% 128/128 [00:00<00:00, 175.07it/s]\n", - "\u001b[34m\u001b[1mval: \u001b[0mScanning '../coco128/labels/train2017.cache' for images and labels... 128 found, 0 missing, 2 empty, 0 corrupted: 100% 128/128 [00:00<00:00, 764773.38it/s]\n", - "\u001b[34m\u001b[1mval: \u001b[0mCaching images (0.1GB): 100% 128/128 [00:00<00:00, 128.17it/s]\n", + "\u001b[34m\u001b[1mtrain: \u001b[0mCaching images (0.1GB): 100% 128/128 [00:00<00:00, 205.30it/s]\n", + "\u001b[34m\u001b[1mval: \u001b[0mScanning '../coco128/labels/train2017.cache' for images and labels... 128 found, 0 missing, 2 empty, 0 corrupted: 100% 128/128 [00:00<00:00, 604584.36it/s]\n", + "\u001b[34m\u001b[1mval: \u001b[0mCaching images (0.1GB): 100% 128/128 [00:00<00:00, 144.17it/s]\n", "Plotting labels... \n", "\n", "\u001b[34m\u001b[1mautoanchor: \u001b[0mAnalyzing anchors... anchors/target = 4.26, Best Possible Recall (BPR) = 0.9946\n", @@ -991,21 +991,22 @@ "Logging results to runs/train/exp\n", "Starting training for 3 epochs...\n", "\n", - " Epoch gpu_mem box obj cls total targets img_size\n", - " 0/2 3.27G 0.04357 0.06781 0.01869 0.1301 207 640: 100% 8/8 [00:03<00:00, 2.03it/s]\n", - " Class Images Targets P R mAP@.5 mAP@.5:.95: 100% 4/4 [00:04<00:00, 1.14s/it]\n", - " all 128 929 0.646 0.627 0.659 0.431\n", + " Epoch gpu_mem box obj cls total labels img_size\n", + " 0/2 3.29G 0.04237 0.06417 0.02121 0.1277 183 640: 100% 8/8 [00:03<00:00, 2.41it/s]\n", + " Class Images Labels P R mAP@.5 mAP@.5:.95: 100% 4/4 [00:04<00:00, 1.04s/it]\n", + " all 128 929 0.642 0.637 0.661 0.432\n", "\n", - " Epoch gpu_mem box obj cls total targets img_size\n", - " 1/2 7.75G 0.04308 0.06654 0.02083 0.1304 227 640: 100% 8/8 [00:01<00:00, 4.11it/s]\n", - " Class Images Targets P R mAP@.5 mAP@.5:.95: 100% 4/4 [00:01<00:00, 2.94it/s]\n", - " all 128 929 0.681 0.607 0.663 0.434\n", + " Epoch gpu_mem box obj cls total labels img_size\n", + " 1/2 6.65G 0.04431 0.06403 0.019 0.1273 166 640: 100% 8/8 [00:01<00:00, 5.73it/s]\n", + " Class Images Labels P R mAP@.5 mAP@.5:.95: 100% 4/4 [00:01<00:00, 3.21it/s]\n", + " all 128 929 0.662 0.626 0.658 0.433\n", "\n", - " Epoch gpu_mem box obj cls total targets img_size\n", - " 2/2 7.75G 0.04461 0.06896 0.01866 0.1322 191 640: 100% 8/8 [00:02<00:00, 3.94it/s]\n", - " Class Images Targets P R mAP@.5 mAP@.5:.95: 100% 4/4 [00:03<00:00, 1.22it/s]\n", - " all 128 929 0.642 0.632 0.662 0.432\n", + " Epoch gpu_mem box obj cls total labels img_size\n", + " 2/2 6.65G 0.04506 0.06836 0.01913 0.1325 182 640: 100% 8/8 [00:01<00:00, 5.51it/s]\n", + " Class Images Labels P R mAP@.5 mAP@.5:.95: 100% 4/4 [00:02<00:00, 1.35it/s]\n", + " all 128 929 0.658 0.625 0.661 0.433\n", "Optimizer stripped from runs/train/exp/weights/last.pt, 14.8MB\n", + "Optimizer stripped from runs/train/exp/weights/best.pt, 14.8MB\n", "3 epochs completed in 0.007 hours.\n", "\n" ], @@ -1247,4 +1248,4 @@ "outputs": [] } ] -} +} \ No newline at end of file From 38ff499b26b9f8bf183cd1c08746dd33d000eb59 Mon Sep 17 00:00:00 2001 From: Yann Defretin Date: Mon, 15 Mar 2021 01:11:27 +0100 Subject: [PATCH 0060/1976] Update autosplit() with annotated_only option (#2466) * Be able to create dataset from annotated images only Add the ability to create a dataset/splits only with images that have an annotation file, i.e a .txt file, associated to it. As we talked about this, the absence of a txt file could mean two things: * either the image wasn't yet labelled by someone, * either there is no object to detect. When it's easy to create small datasets, when you have to create datasets with thousands of images (and more coming), it's hard to track where you at and you don't want to wait to have all of them annotated before starting to train. Which means some images would lack txt files and annotations, resulting in label inconsistency as you say in #2313. By adding the annotated_only argument to the function, people could create, if they want to, datasets/splits only with images that were labelled, for sure. * Cleanup and update print() Co-authored-by: Glenn Jocher --- utils/datasets.py | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) diff --git a/utils/datasets.py b/utils/datasets.py index ed18f449ddd3..9a4b3f9fcc9f 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -1032,20 +1032,24 @@ def extract_boxes(path='../coco128/'): # from utils.datasets import *; extract_ b[[1, 3]] = np.clip(b[[1, 3]], 0, h) assert cv2.imwrite(str(f), im[b[1]:b[3], b[0]:b[2]]), f'box failure in {f}' - -def autosplit(path='../coco128', weights=(0.9, 0.1, 0.0)): # from utils.datasets import *; autosplit('../coco128') +def autosplit(path='../coco128', weights=(0.9, 0.1, 0.0), annotated_only=False): """ Autosplit a dataset into train/val/test splits and save path/autosplit_*.txt files - # Arguments - path: Path to images directory - weights: Train, val, test weights (list) + Usage: from utils.datasets import *; autosplit('../coco128') + Arguments + path: Path to images directory + weights: Train, val, test weights (list) + annotated_only: Only use images with an annotated txt file """ path = Path(path) # images dir - files = list(path.rglob('*.*')) + files = sum([list(path.rglob(f"*.{img_ext}")) for img_ext in img_formats], []) # image files only n = len(files) # number of files indices = random.choices([0, 1, 2], weights=weights, k=n) # assign each image to a split + txt = ['autosplit_train.txt', 'autosplit_val.txt', 'autosplit_test.txt'] # 3 txt files [(path / x).unlink() for x in txt if (path / x).exists()] # remove existing + + print(f'Autosplitting images from {path}' + ', using *.txt labeled images only' * annotated_only) for i, img in tqdm(zip(indices, files), total=n): - if img.suffix[1:] in img_formats: + if not annotated_only or Path(img2label_paths([str(img)])[0]).exists(): # check label with open(path / txt[i], 'a') as f: f.write(str(img) + '\n') # add image to txt file From 2d41e70e828c215a3c8486bb24ac2169084079f1 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 14 Mar 2021 21:58:12 -0700 Subject: [PATCH 0061/1976] Scipy kmeans-robust autoanchor update (#2470) Fix for https://github.com/ultralytics/yolov5/issues/2394 --- utils/autoanchor.py | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) diff --git a/utils/autoanchor.py b/utils/autoanchor.py index 5dba9f1ea22f..57777462e89f 100644 --- a/utils/autoanchor.py +++ b/utils/autoanchor.py @@ -37,17 +37,21 @@ def metric(k): # compute metric bpr = (best > 1. / thr).float().mean() # best possible recall return bpr, aat - bpr, aat = metric(m.anchor_grid.clone().cpu().view(-1, 2)) + anchors = m.anchor_grid.clone().cpu().view(-1, 2) # current anchors + bpr, aat = metric(anchors) print(f'anchors/target = {aat:.2f}, Best Possible Recall (BPR) = {bpr:.4f}', end='') if bpr < 0.98: # threshold to recompute print('. Attempting to improve anchors, please wait...') na = m.anchor_grid.numel() // 2 # number of anchors - new_anchors = kmean_anchors(dataset, n=na, img_size=imgsz, thr=thr, gen=1000, verbose=False) - new_bpr = metric(new_anchors.reshape(-1, 2))[0] + try: + anchors = kmean_anchors(dataset, n=na, img_size=imgsz, thr=thr, gen=1000, verbose=False) + except Exception as e: + print(f'{prefix}ERROR: {e}') + new_bpr = metric(anchors)[0] if new_bpr > bpr: # replace anchors - new_anchors = torch.tensor(new_anchors, device=m.anchors.device).type_as(m.anchors) - m.anchor_grid[:] = new_anchors.clone().view_as(m.anchor_grid) # for inference - m.anchors[:] = new_anchors.clone().view_as(m.anchors) / m.stride.to(m.anchors.device).view(-1, 1, 1) # loss + anchors = torch.tensor(anchors, device=m.anchors.device).type_as(m.anchors) + m.anchor_grid[:] = anchors.clone().view_as(m.anchor_grid) # for inference + m.anchors[:] = anchors.clone().view_as(m.anchors) / m.stride.to(m.anchors.device).view(-1, 1, 1) # loss check_anchor_order(m) print(f'{prefix}New anchors saved to model. Update model *.yaml to use these anchors in the future.') else: @@ -119,6 +123,7 @@ def print_results(k): print(f'{prefix}Running kmeans for {n} anchors on {len(wh)} points...') s = wh.std(0) # sigmas for whitening k, dist = kmeans(wh / s, n, iter=30) # points, mean distance + assert len(k) == n, print(f'{prefix}ERROR: scipy.cluster.vq.kmeans requested {n} points but returned only {len(k)}') k *= s wh = torch.tensor(wh, dtype=torch.float32) # filtered wh0 = torch.tensor(wh0, dtype=torch.float32) # unfiltered From 9b11f0c58b7c1f775ee32acb7dcc6a36407a779b Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 14 Mar 2021 23:16:17 -0700 Subject: [PATCH 0062/1976] PyTorch Hub models default to CUDA:0 if available (#2472) * PyTorch Hub models default to CUDA:0 if available * device as string bug fix --- hubconf.py | 4 +++- utils/datasets.py | 4 ++-- utils/general.py | 2 +- utils/torch_utils.py | 6 +++--- 4 files changed, 9 insertions(+), 7 deletions(-) diff --git a/hubconf.py b/hubconf.py index e51ac90da36c..b7b740d39c06 100644 --- a/hubconf.py +++ b/hubconf.py @@ -12,6 +12,7 @@ from models.yolo import Model from utils.general import set_logging from utils.google_utils import attempt_download +from utils.torch_utils import select_device dependencies = ['torch', 'yaml'] set_logging() @@ -43,7 +44,8 @@ def create(name, pretrained, channels, classes, autoshape): model.names = ckpt['model'].names # set class names attribute if autoshape: model = model.autoshape() # for file/URI/PIL/cv2/np inputs and NMS - return model + device = select_device('0' if torch.cuda.is_available() else 'cpu') # default to GPU if available + return model.to(device) except Exception as e: help_url = 'https://github.com/ultralytics/yolov5/issues/36' diff --git a/utils/datasets.py b/utils/datasets.py index 9a4b3f9fcc9f..86d7be39bec0 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -385,7 +385,7 @@ def __init__(self, path, img_size=640, batch_size=16, augment=False, hyp=None, r # Display cache nf, nm, ne, nc, n = cache.pop('results') # found, missing, empty, corrupted, total if exists: - d = f"Scanning '{cache_path}' for images and labels... {nf} found, {nm} missing, {ne} empty, {nc} corrupted" + d = f"Scanning '{cache_path}' images and labels... {nf} found, {nm} missing, {ne} empty, {nc} corrupted" tqdm(None, desc=prefix + d, total=n, initial=n) # display cache results assert nf > 0 or not augment, f'{prefix}No labels in {cache_path}. Can not train without labels. See {help_url}' @@ -485,7 +485,7 @@ def cache_labels(self, path=Path('./labels.cache'), prefix=''): nc += 1 print(f'{prefix}WARNING: Ignoring corrupted image and/or label {im_file}: {e}') - pbar.desc = f"{prefix}Scanning '{path.parent / path.stem}' for images and labels... " \ + pbar.desc = f"{prefix}Scanning '{path.parent / path.stem}' images and labels... " \ f"{nf} found, {nm} missing, {ne} empty, {nc} corrupted" if nf == 0: diff --git a/utils/general.py b/utils/general.py index e1c14bdaa4b3..621df64c6cf1 100755 --- a/utils/general.py +++ b/utils/general.py @@ -79,7 +79,7 @@ def check_git_status(): f"Use 'git pull' to update or 'git clone {url}' to download latest." else: s = f'up to date with {url} ✅' - print(s.encode().decode('ascii', 'ignore') if platform.system() == 'Windows' else s) + print(s.encode().decode('ascii', 'ignore') if platform.system() == 'Windows' else s) # emoji-safe except Exception as e: print(e) diff --git a/utils/torch_utils.py b/utils/torch_utils.py index 806d29470e55..8f3538ab152a 100644 --- a/utils/torch_utils.py +++ b/utils/torch_utils.py @@ -1,8 +1,8 @@ # PyTorch utils - import logging import math import os +import platform import subprocess import time from contextlib import contextmanager @@ -53,7 +53,7 @@ def git_describe(): def select_device(device='', batch_size=None): # device = 'cpu' or '0' or '0,1,2,3' - s = f'YOLOv5 {git_describe()} torch {torch.__version__} ' # string + s = f'YOLOv5 🚀 {git_describe()} torch {torch.__version__} ' # string cpu = device.lower() == 'cpu' if cpu: os.environ['CUDA_VISIBLE_DEVICES'] = '-1' # force torch.cuda.is_available() = False @@ -73,7 +73,7 @@ def select_device(device='', batch_size=None): else: s += 'CPU\n' - logger.info(s) # skip a line + logger.info(s.encode().decode('ascii', 'ignore') if platform.system() == 'Windows' else s) # emoji-safe return torch.device('cuda:0' if cuda else 'cpu') From ed2c74218d6d46605cc5fa68ce9bd6ece213abe4 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 14 Mar 2021 23:32:39 -0700 Subject: [PATCH 0063/1976] Created using Colaboratory --- tutorial.ipynb | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tutorial.ipynb b/tutorial.ipynb index b678e4bec9c2..c710685b7e75 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -605,7 +605,7 @@ "output_type": "stream", "text": [ "Namespace(agnostic_nms=False, augment=False, classes=None, conf_thres=0.25, device='', exist_ok=False, img_size=640, iou_thres=0.45, name='exp', project='runs/detect', save_conf=False, save_txt=False, source='data/images/', update=False, view_img=False, weights=['yolov5s.pt'])\n", - "YOLOv5 v4.0-132-gf813f6d torch 1.8.0+cu101 CUDA:0 (Tesla V100-SXM2-16GB, 16160.5MB)\n", + "YOLOv5 🚀 v4.0-137-g9b11f0c torch 1.8.0+cu101 CUDA:0 (Tesla V100-SXM2-16GB, 16160.5MB)\n", "\n", "Fusing layers... \n", "Model Summary: 224 layers, 7266973 parameters, 0 gradients, 17.0 GFLOPS\n", @@ -735,7 +735,7 @@ "output_type": "stream", "text": [ "Namespace(augment=False, batch_size=32, conf_thres=0.001, data='./data/coco.yaml', device='', exist_ok=False, img_size=640, iou_thres=0.65, name='exp', project='runs/test', save_conf=False, save_hybrid=False, save_json=True, save_txt=False, single_cls=False, task='val', verbose=False, weights=['yolov5x.pt'])\n", - "YOLOv5 v4.0-133-g20d879d torch 1.8.0+cu101 CUDA:0 (Tesla V100-SXM2-16GB, 16160.5MB)\n", + "YOLOv5 🚀 v4.0-137-g9b11f0c torch 1.8.0+cu101 CUDA:0 (Tesla V100-SXM2-16GB, 16160.5MB)\n", "\n", "Downloading https://github.com/ultralytics/yolov5/releases/download/v4.0/yolov5x.pt to yolov5x.pt...\n", "100% 168M/168M [00:02<00:00, 59.1MB/s]\n", @@ -936,7 +936,7 @@ "output_type": "stream", "text": [ "\u001b[34m\u001b[1mgithub: \u001b[0mup to date with https://github.com/ultralytics/yolov5 ✅\n", - "YOLOv5 v4.0-133-g20d879d torch 1.8.0+cu101 CUDA:0 (Tesla V100-SXM2-16GB, 16160.5MB)\n", + "YOLOv5 🚀 v4.0-137-g9b11f0c torch 1.8.0+cu101 CUDA:0 (Tesla V100-SXM2-16GB, 16160.5MB)\n", "\n", "Namespace(adam=False, batch_size=16, bucket='', cache_images=True, cfg='', data='./data/coco128.yaml', device='', entity=None, epochs=3, evolve=False, exist_ok=False, global_rank=-1, hyp='data/hyp.scratch.yaml', image_weights=False, img_size=[640, 640], linear_lr=False, local_rank=-1, log_artifacts=False, log_imgs=16, multi_scale=False, name='exp', noautoanchor=False, nosave=True, notest=False, project='runs/train', quad=False, rect=False, resume=False, save_dir='runs/train/exp', single_cls=False, sync_bn=False, total_batch_size=16, weights='yolov5s.pt', workers=8, world_size=1)\n", "\u001b[34m\u001b[1mwandb: \u001b[0mInstall Weights & Biases for YOLOv5 logging with 'pip install wandb' (recommended)\n", From e8fc97aa3891f05812d7dfff90ca66d3481bda2c Mon Sep 17 00:00:00 2001 From: Ayush Chaurasia Date: Tue, 23 Mar 2021 05:14:50 +0530 Subject: [PATCH 0064/1976] Improved W&B integration (#2125) * Init Commit * new wandb integration * Update * Use data_dict in test * Updates * Update: scope of log_img * Update: scope of log_img * Update * Update: Fix logging conditions * Add tqdm bar, support for .txt dataset format * Improve Result table Logger * Init Commit * new wandb integration * Update * Use data_dict in test * Updates * Update: scope of log_img * Update: scope of log_img * Update * Update: Fix logging conditions * Add tqdm bar, support for .txt dataset format * Improve Result table Logger * Add dataset creation in training script * Change scope: self.wandb_run * Add wandb-artifact:// natively you can now use --resume with wandb run links * Add suuport for logging dataset while training * Cleanup * Fix: Merge conflict * Fix: CI tests * Automatically use wandb config * Fix: Resume * Fix: CI * Enhance: Using val_table * More resume enhancement * FIX : CI * Add alias * Get useful opt config data * train.py cleanup * Cleanup train.py * more cleanup * Cleanup| CI fix * Reformat using PEP8 * FIX:CI * rebase * remove uneccesary changes * remove uneccesary changes * remove uneccesary changes * remove unecessary chage from test.py * FIX: resume from local checkpoint * FIX:resume * FIX:resume * Reformat * Performance improvement * Fix local resume * Fix local resume * FIX:CI * Fix: CI * Imporve image logging * (:(:Redo CI tests:):) * Remember epochs when resuming * Remember epochs when resuming * Update DDP location Potential fix for #2405 * PEP8 reformat * 0.25 confidence threshold * reset train.py plots syntax to previous * reset epochs completed syntax to previous * reset space to previous * remove brackets * reset comment to previous * Update: is_coco check, remove unused code * Remove redundant print statement * Remove wandb imports * remove dsviz logger from test.py * Remove redundant change from test.py * remove redundant changes from train.py * reformat and improvements * Fix typo * Add tqdm tqdm progress when scanning files, naming improvements Co-authored-by: Glenn Jocher --- models/common.py | 2 +- test.py | 49 +++--- train.py | 116 +++++++------ utils/wandb_logging/log_dataset.py | 16 +- utils/wandb_logging/wandb_utils.py | 267 +++++++++++++++++++++-------- 5 files changed, 282 insertions(+), 168 deletions(-) diff --git a/models/common.py b/models/common.py index 464d639a1f0b..83cc8b5ce27b 100644 --- a/models/common.py +++ b/models/common.py @@ -278,7 +278,7 @@ def display(self, pprint=False, show=False, save=False, render=False, save_dir=' def print(self): self.display(pprint=True) # print results print(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {tuple(self.s)}' % - tuple(self.t)) + tuple(self.t)) def show(self): self.display(show=True) # show results diff --git a/test.py b/test.py index 39e0992264ec..61d6965f7414 100644 --- a/test.py +++ b/test.py @@ -35,8 +35,9 @@ def test(data, save_hybrid=False, # for hybrid auto-labelling save_conf=False, # save auto-label confidences plots=True, - log_imgs=0, # number of logged images - compute_loss=None): + wandb_logger=None, + compute_loss=None, + is_coco=False): # Initialize/load model and set device training = model is not None if training: # called by train.py @@ -66,21 +67,19 @@ def test(data, # Configure model.eval() - is_coco = data.endswith('coco.yaml') # is COCO dataset - with open(data) as f: - data = yaml.load(f, Loader=yaml.SafeLoader) # model dict + if isinstance(data, str): + is_coco = data.endswith('coco.yaml') + with open(data) as f: + data = yaml.load(f, Loader=yaml.SafeLoader) check_dataset(data) # check nc = 1 if single_cls else int(data['nc']) # number of classes iouv = torch.linspace(0.5, 0.95, 10).to(device) # iou vector for mAP@0.5:0.95 niou = iouv.numel() # Logging - log_imgs, wandb = min(log_imgs, 100), None # ceil - try: - import wandb # Weights & Biases - except ImportError: - log_imgs = 0 - + log_imgs = 0 + if wandb_logger and wandb_logger.wandb: + log_imgs = min(wandb_logger.log_imgs, 100) # Dataloader if not training: if device.type != 'cpu': @@ -147,15 +146,17 @@ def test(data, with open(save_dir / 'labels' / (path.stem + '.txt'), 'a') as f: f.write(('%g ' * len(line)).rstrip() % line + '\n') - # W&B logging - if plots and len(wandb_images) < log_imgs: - box_data = [{"position": {"minX": xyxy[0], "minY": xyxy[1], "maxX": xyxy[2], "maxY": xyxy[3]}, - "class_id": int(cls), - "box_caption": "%s %.3f" % (names[cls], conf), - "scores": {"class_score": conf}, - "domain": "pixel"} for *xyxy, conf, cls in pred.tolist()] - boxes = {"predictions": {"box_data": box_data, "class_labels": names}} # inference-space - wandb_images.append(wandb.Image(img[si], boxes=boxes, caption=path.name)) + # W&B logging - Media Panel Plots + if len(wandb_images) < log_imgs and wandb_logger.current_epoch > 0: # Check for test operation + if wandb_logger.current_epoch % wandb_logger.bbox_interval == 0: + box_data = [{"position": {"minX": xyxy[0], "minY": xyxy[1], "maxX": xyxy[2], "maxY": xyxy[3]}, + "class_id": int(cls), + "box_caption": "%s %.3f" % (names[cls], conf), + "scores": {"class_score": conf}, + "domain": "pixel"} for *xyxy, conf, cls in pred.tolist()] + boxes = {"predictions": {"box_data": box_data, "class_labels": names}} # inference-space + wandb_images.append(wandb_logger.wandb.Image(img[si], boxes=boxes, caption=path.name)) + wandb_logger.log_training_progress(predn, path, names) # logs dsviz tables # Append to pycocotools JSON dictionary if save_json: @@ -239,9 +240,11 @@ def test(data, # Plots if plots: confusion_matrix.plot(save_dir=save_dir, names=list(names.values())) - if wandb and wandb.run: - val_batches = [wandb.Image(str(f), caption=f.name) for f in sorted(save_dir.glob('test*.jpg'))] - wandb.log({"Images": wandb_images, "Validation": val_batches}, commit=False) + if wandb_logger and wandb_logger.wandb: + val_batches = [wandb_logger.wandb.Image(str(f), caption=f.name) for f in sorted(save_dir.glob('test*.jpg'))] + wandb_logger.log({"Validation": val_batches}) + if wandb_images: + wandb_logger.log({"Bounding Box Debugger/Images": wandb_images}) # Save JSON if save_json and len(jdict): diff --git a/train.py b/train.py index 005fdf60c021..62a72375c7a3 100644 --- a/train.py +++ b/train.py @@ -1,3 +1,4 @@ + import argparse import logging import math @@ -33,11 +34,12 @@ from utils.loss import ComputeLoss from utils.plots import plot_images, plot_labels, plot_results, plot_evolution from utils.torch_utils import ModelEMA, select_device, intersect_dicts, torch_distributed_zero_first, is_parallel +from utils.wandb_logging.wandb_utils import WandbLogger, resume_and_get_id, check_wandb_config_file logger = logging.getLogger(__name__) -def train(hyp, opt, device, tb_writer=None, wandb=None): +def train(hyp, opt, device, tb_writer=None): logger.info(colorstr('hyperparameters: ') + ', '.join(f'{k}={v}' for k, v in hyp.items())) save_dir, epochs, batch_size, total_batch_size, weights, rank = \ Path(opt.save_dir), opt.epochs, opt.batch_size, opt.total_batch_size, opt.weights, opt.global_rank @@ -61,10 +63,17 @@ def train(hyp, opt, device, tb_writer=None, wandb=None): init_seeds(2 + rank) with open(opt.data) as f: data_dict = yaml.load(f, Loader=yaml.SafeLoader) # data dict - with torch_distributed_zero_first(rank): - check_dataset(data_dict) # check - train_path = data_dict['train'] - test_path = data_dict['val'] + is_coco = opt.data.endswith('coco.yaml') + + # Logging- Doing this before checking the dataset. Might update data_dict + if rank in [-1, 0]: + opt.hyp = hyp # add hyperparameters + run_id = torch.load(weights).get('wandb_id') if weights.endswith('.pt') and os.path.isfile(weights) else None + wandb_logger = WandbLogger(opt, Path(opt.save_dir).stem, run_id, data_dict) + data_dict = wandb_logger.data_dict + if wandb_logger.wandb: + weights, epochs, hyp = opt.weights, opt.epochs, opt.hyp # WandbLogger might update weights, epochs if resuming + loggers = {'wandb': wandb_logger.wandb} # loggers dict nc = 1 if opt.single_cls else int(data_dict['nc']) # number of classes names = ['item'] if opt.single_cls and len(data_dict['names']) != 1 else data_dict['names'] # class names assert len(names) == nc, '%g names found for nc=%g dataset in %s' % (len(names), nc, opt.data) # check @@ -83,6 +92,10 @@ def train(hyp, opt, device, tb_writer=None, wandb=None): logger.info('Transferred %g/%g items from %s' % (len(state_dict), len(model.state_dict()), weights)) # report else: model = Model(opt.cfg, ch=3, nc=nc, anchors=hyp.get('anchors')).to(device) # create + with torch_distributed_zero_first(rank): + check_dataset(data_dict) # check + train_path = data_dict['train'] + test_path = data_dict['val'] # Freeze freeze = [] # parameter names to freeze (full or partial) @@ -126,16 +139,6 @@ def train(hyp, opt, device, tb_writer=None, wandb=None): scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf) # plot_lr_scheduler(optimizer, scheduler, epochs) - # Logging - if rank in [-1, 0] and wandb and wandb.run is None: - opt.hyp = hyp # add hyperparameters - wandb_run = wandb.init(config=opt, resume="allow", - project='YOLOv5' if opt.project == 'runs/train' else Path(opt.project).stem, - name=save_dir.stem, - entity=opt.entity, - id=ckpt.get('wandb_id') if 'ckpt' in locals() else None) - loggers = {'wandb': wandb} # loggers dict - # EMA ema = ModelEMA(model) if rank in [-1, 0] else None @@ -326,9 +329,9 @@ def train(hyp, opt, device, tb_writer=None, wandb=None): # if tb_writer: # tb_writer.add_image(f, result, dataformats='HWC', global_step=epoch) # tb_writer.add_graph(model, imgs) # add model to tensorboard - elif plots and ni == 10 and wandb: - wandb.log({"Mosaics": [wandb.Image(str(x), caption=x.name) for x in save_dir.glob('train*.jpg') - if x.exists()]}, commit=False) + elif plots and ni == 10 and wandb_logger.wandb: + wandb_logger.log({"Mosaics": [wandb_logger.wandb.Image(str(x), caption=x.name) for x in + save_dir.glob('train*.jpg') if x.exists()]}) # end batch ------------------------------------------------------------------------------------------------ # end epoch ---------------------------------------------------------------------------------------------------- @@ -343,8 +346,9 @@ def train(hyp, opt, device, tb_writer=None, wandb=None): ema.update_attr(model, include=['yaml', 'nc', 'hyp', 'gr', 'names', 'stride', 'class_weights']) final_epoch = epoch + 1 == epochs if not opt.notest or final_epoch: # Calculate mAP - results, maps, times = test.test(opt.data, - batch_size=batch_size * 2, + wandb_logger.current_epoch = epoch + 1 + results, maps, times = test.test(data_dict, + batch_size=total_batch_size, imgsz=imgsz_test, model=ema.ema, single_cls=opt.single_cls, @@ -352,8 +356,9 @@ def train(hyp, opt, device, tb_writer=None, wandb=None): save_dir=save_dir, verbose=nc < 50 and final_epoch, plots=plots and final_epoch, - log_imgs=opt.log_imgs if wandb else 0, - compute_loss=compute_loss) + wandb_logger=wandb_logger, + compute_loss=compute_loss, + is_coco=is_coco) # Write with open(results_file, 'a') as f: @@ -369,8 +374,8 @@ def train(hyp, opt, device, tb_writer=None, wandb=None): for x, tag in zip(list(mloss[:-1]) + list(results) + lr, tags): if tb_writer: tb_writer.add_scalar(tag, x, epoch) # tensorboard - if wandb: - wandb.log({tag: x}, step=epoch, commit=tag == tags[-1]) # W&B + if wandb_logger.wandb: + wandb_logger.log({tag: x}) # W&B # Update best mAP fi = fitness(np.array(results).reshape(1, -1)) # weighted combination of [P, R, mAP@.5, mAP@.5-.95] @@ -386,36 +391,29 @@ def train(hyp, opt, device, tb_writer=None, wandb=None): 'ema': deepcopy(ema.ema).half(), 'updates': ema.updates, 'optimizer': optimizer.state_dict(), - 'wandb_id': wandb_run.id if wandb else None} + 'wandb_id': wandb_logger.wandb_run.id if wandb_logger.wandb else None} # Save last, best and delete torch.save(ckpt, last) if best_fitness == fi: torch.save(ckpt, best) + if wandb_logger.wandb: + if ((epoch + 1) % opt.save_period == 0 and not final_epoch) and opt.save_period != -1: + wandb_logger.log_model( + last.parent, opt, epoch, fi, best_model=best_fitness == fi) del ckpt - + wandb_logger.end_epoch(best_result=best_fitness == fi) + # end epoch ---------------------------------------------------------------------------------------------------- # end training - if rank in [-1, 0]: - # Strip optimizers - final = best if best.exists() else last # final model - for f in last, best: - if f.exists(): - strip_optimizer(f) - if opt.bucket: - os.system(f'gsutil cp {final} gs://{opt.bucket}/weights') # upload - # Plots if plots: plot_results(save_dir=save_dir) # save as results.png - if wandb: + if wandb_logger.wandb: files = ['results.png', 'confusion_matrix.png', *[f'{x}_curve.png' for x in ('F1', 'PR', 'P', 'R')]] - wandb.log({"Results": [wandb.Image(str(save_dir / f), caption=f) for f in files - if (save_dir / f).exists()]}) - if opt.log_artifacts: - wandb.log_artifact(artifact_or_path=str(final), type='model', name=save_dir.stem) - + wandb_logger.log({"Results": [wandb_logger.wandb.Image(str(save_dir / f), caption=f) for f in files + if (save_dir / f).exists()]}) # Test best.pt logger.info('%g epochs completed in %.3f hours.\n' % (epoch - start_epoch + 1, (time.time() - t0) / 3600)) if opt.data.endswith('coco.yaml') and nc == 80: # if COCO @@ -430,13 +428,24 @@ def train(hyp, opt, device, tb_writer=None, wandb=None): dataloader=testloader, save_dir=save_dir, save_json=True, - plots=False) + plots=False, + is_coco=is_coco) + # Strip optimizers + final = best if best.exists() else last # final model + for f in last, best: + if f.exists(): + strip_optimizer(f) # strip optimizers + if opt.bucket: + os.system(f'gsutil cp {final} gs://{opt.bucket}/weights') # upload + if wandb_logger.wandb: # Log the stripped model + wandb_logger.wandb.log_artifact(str(final), type='model', + name='run_' + wandb_logger.wandb_run.id + '_model', + aliases=['last', 'best', 'stripped']) else: dist.destroy_process_group() - - wandb.run.finish() if wandb and wandb.run else None torch.cuda.empty_cache() + wandb_logger.finish_run() return results @@ -464,8 +473,6 @@ def train(hyp, opt, device, tb_writer=None, wandb=None): parser.add_argument('--adam', action='store_true', help='use torch.optim.Adam() optimizer') parser.add_argument('--sync-bn', action='store_true', help='use SyncBatchNorm, only available in DDP mode') parser.add_argument('--local_rank', type=int, default=-1, help='DDP parameter, do not modify') - parser.add_argument('--log-imgs', type=int, default=16, help='number of images for W&B logging, max 100') - parser.add_argument('--log-artifacts', action='store_true', help='log artifacts, i.e. final trained model') parser.add_argument('--workers', type=int, default=8, help='maximum number of dataloader workers') parser.add_argument('--project', default='runs/train', help='save to project/name') parser.add_argument('--entity', default=None, help='W&B entity') @@ -473,6 +480,10 @@ def train(hyp, opt, device, tb_writer=None, wandb=None): parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment') parser.add_argument('--quad', action='store_true', help='quad dataloader') parser.add_argument('--linear-lr', action='store_true', help='linear LR') + parser.add_argument('--upload_dataset', action='store_true', help='Upload dataset as W&B artifact table') + parser.add_argument('--bbox_interval', type=int, default=-1, help='Set bounding-box image logging interval for W&B') + parser.add_argument('--save_period', type=int, default=-1, help='Log model after every "save_period" epoch') + parser.add_argument('--artifact_alias', type=str, default="latest", help='version of dataset artifact to be used') opt = parser.parse_args() # Set DDP variables @@ -484,7 +495,8 @@ def train(hyp, opt, device, tb_writer=None, wandb=None): check_requirements() # Resume - if opt.resume: # resume an interrupted run + wandb_run = resume_and_get_id(opt) + if opt.resume and not wandb_run: # resume an interrupted run ckpt = opt.resume if isinstance(opt.resume, str) else get_latest_run() # specified or most recent path assert os.path.isfile(ckpt), 'ERROR: --resume checkpoint does not exist' apriori = opt.global_rank, opt.local_rank @@ -517,18 +529,12 @@ def train(hyp, opt, device, tb_writer=None, wandb=None): # Train logger.info(opt) - try: - import wandb - except ImportError: - wandb = None - prefix = colorstr('wandb: ') - logger.info(f"{prefix}Install Weights & Biases for YOLOv5 logging with 'pip install wandb' (recommended)") if not opt.evolve: tb_writer = None # init loggers if opt.global_rank in [-1, 0]: logger.info(f'Start Tensorboard with "tensorboard --logdir {opt.project}", view at http://localhost:6006/') tb_writer = SummaryWriter(opt.save_dir) # Tensorboard - train(hyp, opt, device, tb_writer, wandb) + train(hyp, opt, device, tb_writer) # Evolve hyperparameters (optional) else: @@ -602,7 +608,7 @@ def train(hyp, opt, device, tb_writer=None, wandb=None): hyp[k] = round(hyp[k], 5) # significant digits # Train mutation - results = train(hyp.copy(), opt, device, wandb=wandb) + results = train(hyp.copy(), opt, device) # Write mutation results print_mutation(hyp.copy(), results, yaml_file, opt.bucket) diff --git a/utils/wandb_logging/log_dataset.py b/utils/wandb_logging/log_dataset.py index d790a9ce721e..97e68425cddd 100644 --- a/utils/wandb_logging/log_dataset.py +++ b/utils/wandb_logging/log_dataset.py @@ -12,20 +12,7 @@ def create_dataset_artifact(opt): with open(opt.data) as f: data = yaml.load(f, Loader=yaml.SafeLoader) # data dict - logger = WandbLogger(opt, '', None, data, job_type='create_dataset') - nc, names = (1, ['item']) if opt.single_cls else (int(data['nc']), data['names']) - names = {k: v for k, v in enumerate(names)} # to index dictionary - logger.log_dataset_artifact(LoadImagesAndLabels(data['train']), names, name='train') # trainset - logger.log_dataset_artifact(LoadImagesAndLabels(data['val']), names, name='val') # valset - - # Update data.yaml with artifact links - data['train'] = WANDB_ARTIFACT_PREFIX + str(Path(opt.project) / 'train') - data['val'] = WANDB_ARTIFACT_PREFIX + str(Path(opt.project) / 'val') - path = opt.data if opt.overwrite_config else opt.data.replace('.', '_wandb.') # updated data.yaml path - data.pop('download', None) # download via artifact instead of predefined field 'download:' - with open(path, 'w') as f: - yaml.dump(data, f) - print("New Config file => ", path) + logger = WandbLogger(opt, '', None, data, job_type='Dataset Creation') if __name__ == '__main__': @@ -33,7 +20,6 @@ def create_dataset_artifact(opt): parser.add_argument('--data', type=str, default='data/coco128.yaml', help='data.yaml path') parser.add_argument('--single-cls', action='store_true', help='train as single-class dataset') parser.add_argument('--project', type=str, default='YOLOv5', help='name of W&B Project') - parser.add_argument('--overwrite_config', action='store_true', help='overwrite data.yaml') opt = parser.parse_args() create_dataset_artifact(opt) diff --git a/utils/wandb_logging/wandb_utils.py b/utils/wandb_logging/wandb_utils.py index 264cd4840e3c..c9a32f5b6026 100644 --- a/utils/wandb_logging/wandb_utils.py +++ b/utils/wandb_logging/wandb_utils.py @@ -1,13 +1,18 @@ +import argparse import json +import os import shutil import sys +import torch +import yaml from datetime import datetime from pathlib import Path - -import torch +from tqdm import tqdm sys.path.append(str(Path(__file__).parent.parent.parent)) # add utils/ to path -from utils.general import colorstr, xywh2xyxy +from utils.datasets import LoadImagesAndLabels +from utils.datasets import img2label_paths +from utils.general import colorstr, xywh2xyxy, check_dataset try: import wandb @@ -22,87 +27,183 @@ def remove_prefix(from_string, prefix): return from_string[len(prefix):] +def check_wandb_config_file(data_config_file): + wandb_config = '_wandb.'.join(data_config_file.rsplit('.', 1)) # updated data.yaml path + if Path(wandb_config).is_file(): + return wandb_config + return data_config_file + + +def resume_and_get_id(opt): + # It's more elegant to stick to 1 wandb.init call, but as useful config data is overwritten in the WandbLogger's wandb.init call + if isinstance(opt.resume, str): + if opt.resume.startswith(WANDB_ARTIFACT_PREFIX): + run_path = Path(remove_prefix(opt.resume, WANDB_ARTIFACT_PREFIX)) + run_id = run_path.stem + project = run_path.parent.stem + model_artifact_name = WANDB_ARTIFACT_PREFIX + 'run_' + run_id + '_model' + assert wandb, 'install wandb to resume wandb runs' + # Resume wandb-artifact:// runs here| workaround for not overwriting wandb.config + run = wandb.init(id=run_id, project=project, resume='allow') + opt.resume = model_artifact_name + return run + return None + + class WandbLogger(): def __init__(self, opt, name, run_id, data_dict, job_type='Training'): - self.wandb = wandb - self.wandb_run = wandb.init(config=opt, resume="allow", - project='YOLOv5' if opt.project == 'runs/train' else Path(opt.project).stem, - name=name, - job_type=job_type, - id=run_id) if self.wandb else None - - if job_type == 'Training': - self.setup_training(opt, data_dict) - if opt.bbox_interval == -1: - opt.bbox_interval = (opt.epochs // 10) if opt.epochs > 10 else opt.epochs - if opt.save_period == -1: - opt.save_period = (opt.epochs // 10) if opt.epochs > 10 else opt.epochs + # Pre-training routine -- + self.job_type = job_type + self.wandb, self.wandb_run, self.data_dict = wandb, None if not wandb else wandb.run, data_dict + if self.wandb: + self.wandb_run = wandb.init(config=opt, + resume="allow", + project='YOLOv5' if opt.project == 'runs/train' else Path(opt.project).stem, + name=name, + job_type=job_type, + id=run_id) if not wandb.run else wandb.run + if self.job_type == 'Training': + if not opt.resume: + wandb_data_dict = self.check_and_upload_dataset(opt) if opt.upload_dataset else data_dict + # Info useful for resuming from artifacts + self.wandb_run.config.opt = vars(opt) + self.wandb_run.config.data_dict = wandb_data_dict + self.data_dict = self.setup_training(opt, data_dict) + if self.job_type == 'Dataset Creation': + self.data_dict = self.check_and_upload_dataset(opt) + + def check_and_upload_dataset(self, opt): + assert wandb, 'Install wandb to upload dataset' + check_dataset(self.data_dict) + config_path = self.log_dataset_artifact(opt.data, + opt.single_cls, + 'YOLOv5' if opt.project == 'runs/train' else Path(opt.project).stem) + print("Created dataset config file ", config_path) + with open(config_path) as f: + wandb_data_dict = yaml.load(f, Loader=yaml.SafeLoader) + return wandb_data_dict def setup_training(self, opt, data_dict): - self.log_dict = {} - self.train_artifact_path, self.trainset_artifact = \ - self.download_dataset_artifact(data_dict['train'], opt.artifact_alias) - self.test_artifact_path, self.testset_artifact = \ - self.download_dataset_artifact(data_dict['val'], opt.artifact_alias) - self.result_artifact, self.result_table, self.weights = None, None, None - if self.train_artifact_path is not None: - train_path = Path(self.train_artifact_path) / 'data/images/' - data_dict['train'] = str(train_path) - if self.test_artifact_path is not None: - test_path = Path(self.test_artifact_path) / 'data/images/' - data_dict['val'] = str(test_path) + self.log_dict, self.current_epoch, self.log_imgs = {}, 0, 16 # Logging Constants + self.bbox_interval = opt.bbox_interval + if isinstance(opt.resume, str): + modeldir, _ = self.download_model_artifact(opt) + if modeldir: + self.weights = Path(modeldir) / "last.pt" + config = self.wandb_run.config + opt.weights, opt.save_period, opt.batch_size, opt.bbox_interval, opt.epochs, opt.hyp = str( + self.weights), config.save_period, config.total_batch_size, config.bbox_interval, config.epochs, \ + config.opt['hyp'] + data_dict = dict(self.wandb_run.config.data_dict) # eliminates the need for config file to resume + if 'val_artifact' not in self.__dict__: # If --upload_dataset is set, use the existing artifact, don't download + self.train_artifact_path, self.train_artifact = self.download_dataset_artifact(data_dict.get('train'), + opt.artifact_alias) + self.val_artifact_path, self.val_artifact = self.download_dataset_artifact(data_dict.get('val'), + opt.artifact_alias) + self.result_artifact, self.result_table, self.val_table, self.weights = None, None, None, None + if self.train_artifact_path is not None: + train_path = Path(self.train_artifact_path) / 'data/images/' + data_dict['train'] = str(train_path) + if self.val_artifact_path is not None: + val_path = Path(self.val_artifact_path) / 'data/images/' + data_dict['val'] = str(val_path) + self.val_table = self.val_artifact.get("val") + self.map_val_table_path() + if self.val_artifact is not None: self.result_artifact = wandb.Artifact("run_" + wandb.run.id + "_progress", "evaluation") self.result_table = wandb.Table(["epoch", "id", "prediction", "avg_confidence"]) - if opt.resume_from_artifact: - modeldir, _ = self.download_model_artifact(opt.resume_from_artifact) - if modeldir: - self.weights = Path(modeldir) / "best.pt" - opt.weights = self.weights + if opt.bbox_interval == -1: + self.bbox_interval = opt.bbox_interval = (opt.epochs // 10) if opt.epochs > 10 else 1 + return data_dict def download_dataset_artifact(self, path, alias): if path.startswith(WANDB_ARTIFACT_PREFIX): dataset_artifact = wandb.use_artifact(remove_prefix(path, WANDB_ARTIFACT_PREFIX) + ":" + alias) assert dataset_artifact is not None, "'Error: W&B dataset artifact doesn\'t exist'" datadir = dataset_artifact.download() - labels_zip = Path(datadir) / "data/labels.zip" - shutil.unpack_archive(labels_zip, Path(datadir) / 'data/labels', 'zip') - print("Downloaded dataset to : ", datadir) return datadir, dataset_artifact return None, None - def download_model_artifact(self, name): - model_artifact = wandb.use_artifact(name + ":latest") - assert model_artifact is not None, 'Error: W&B model artifact doesn\'t exist' - modeldir = model_artifact.download() - print("Downloaded model to : ", modeldir) - return modeldir, model_artifact + def download_model_artifact(self, opt): + if opt.resume.startswith(WANDB_ARTIFACT_PREFIX): + model_artifact = wandb.use_artifact(remove_prefix(opt.resume, WANDB_ARTIFACT_PREFIX) + ":latest") + assert model_artifact is not None, 'Error: W&B model artifact doesn\'t exist' + modeldir = model_artifact.download() + epochs_trained = model_artifact.metadata.get('epochs_trained') + total_epochs = model_artifact.metadata.get('total_epochs') + assert epochs_trained < total_epochs, 'training to %g epochs is finished, nothing to resume.' % ( + total_epochs) + return modeldir, model_artifact + return None, None - def log_model(self, path, opt, epoch): - datetime_suffix = datetime.today().strftime('%Y-%m-%d-%H-%M-%S') + def log_model(self, path, opt, epoch, fitness_score, best_model=False): model_artifact = wandb.Artifact('run_' + wandb.run.id + '_model', type='model', metadata={ 'original_url': str(path), - 'epoch': epoch + 1, + 'epochs_trained': epoch + 1, 'save period': opt.save_period, 'project': opt.project, - 'datetime': datetime_suffix + 'total_epochs': opt.epochs, + 'fitness_score': fitness_score }) model_artifact.add_file(str(path / 'last.pt'), name='last.pt') - model_artifact.add_file(str(path / 'best.pt'), name='best.pt') - wandb.log_artifact(model_artifact) + wandb.log_artifact(model_artifact, + aliases=['latest', 'epoch ' + str(self.current_epoch), 'best' if best_model else '']) print("Saving model artifact on epoch ", epoch + 1) - def log_dataset_artifact(self, dataset, class_to_id, name='dataset'): + def log_dataset_artifact(self, data_file, single_cls, project, overwrite_config=False): + with open(data_file) as f: + data = yaml.load(f, Loader=yaml.SafeLoader) # data dict + nc, names = (1, ['item']) if single_cls else (int(data['nc']), data['names']) + names = {k: v for k, v in enumerate(names)} # to index dictionary + self.train_artifact = self.create_dataset_table(LoadImagesAndLabels( + data['train']), names, name='train') if data.get('train') else None + self.val_artifact = self.create_dataset_table(LoadImagesAndLabels( + data['val']), names, name='val') if data.get('val') else None + if data.get('train'): + data['train'] = WANDB_ARTIFACT_PREFIX + str(Path(project) / 'train') + if data.get('val'): + data['val'] = WANDB_ARTIFACT_PREFIX + str(Path(project) / 'val') + path = data_file if overwrite_config else '_wandb.'.join(data_file.rsplit('.', 1)) # updated data.yaml path + data.pop('download', None) + with open(path, 'w') as f: + yaml.dump(data, f) + + if self.job_type == 'Training': # builds correct artifact pipeline graph + self.wandb_run.use_artifact(self.val_artifact) + self.wandb_run.use_artifact(self.train_artifact) + self.val_artifact.wait() + self.val_table = self.val_artifact.get('val') + self.map_val_table_path() + else: + self.wandb_run.log_artifact(self.train_artifact) + self.wandb_run.log_artifact(self.val_artifact) + return path + + def map_val_table_path(self): + self.val_table_map = {} + print("Mapping dataset") + for i, data in enumerate(tqdm(self.val_table.data)): + self.val_table_map[data[3]] = data[0] + + def create_dataset_table(self, dataset, class_to_id, name='dataset'): + # TODO: Explore multiprocessing to slpit this loop parallely| This is essential for speeding up the the logging artifact = wandb.Artifact(name=name, type="dataset") - image_path = dataset.path - artifact.add_dir(image_path, name='data/images') - table = wandb.Table(columns=["id", "train_image", "Classes"]) + for img_file in tqdm([dataset.path]) if Path(dataset.path).is_dir() else tqdm(dataset.img_files): + if Path(img_file).is_dir(): + artifact.add_dir(img_file, name='data/images') + labels_path = 'labels'.join(dataset.path.rsplit('images', 1)) + artifact.add_dir(labels_path, name='data/labels') + else: + artifact.add_file(img_file, name='data/images/' + Path(img_file).name) + label_file = Path(img2label_paths([img_file])[0]) + artifact.add_file(str(label_file), + name='data/labels/' + label_file.name) if label_file.exists() else None + table = wandb.Table(columns=["id", "train_image", "Classes", "name"]) class_set = wandb.Classes([{'id': id, 'name': name} for id, name in class_to_id.items()]) - for si, (img, labels, paths, shapes) in enumerate(dataset): + for si, (img, labels, paths, shapes) in enumerate(tqdm(dataset)): height, width = shapes[0] - labels[:, 2:] = (xywh2xyxy(labels[:, 2:].view(-1, 4))) - labels[:, 2:] *= torch.Tensor([width, height, width, height]) - box_data = [] - img_classes = {} + labels[:, 2:] = (xywh2xyxy(labels[:, 2:].view(-1, 4))) * torch.Tensor([width, height, width, height]) + box_data, img_classes = [], {} for cls, *xyxy in labels[:, 1:].tolist(): cls = int(cls) box_data.append({"position": {"minX": xyxy[0], "minY": xyxy[1], "maxX": xyxy[2], "maxY": xyxy[3]}, @@ -112,34 +213,52 @@ def log_dataset_artifact(self, dataset, class_to_id, name='dataset'): "domain": "pixel"}) img_classes[cls] = class_to_id[cls] boxes = {"ground_truth": {"box_data": box_data, "class_labels": class_to_id}} # inference-space - table.add_data(si, wandb.Image(paths, classes=class_set, boxes=boxes), json.dumps(img_classes)) + table.add_data(si, wandb.Image(paths, classes=class_set, boxes=boxes), json.dumps(img_classes), + Path(paths).name) artifact.add(table, name) - labels_path = 'labels'.join(image_path.rsplit('images', 1)) - zip_path = Path(labels_path).parent / (name + '_labels.zip') - if not zip_path.is_file(): # make_archive won't check if file exists - shutil.make_archive(zip_path.with_suffix(''), 'zip', labels_path) - artifact.add_file(str(zip_path), name='data/labels.zip') - wandb.log_artifact(artifact) - print("Saving data to W&B...") + return artifact + + def log_training_progress(self, predn, path, names): + if self.val_table and self.result_table: + class_set = wandb.Classes([{'id': id, 'name': name} for id, name in names.items()]) + box_data = [] + total_conf = 0 + for *xyxy, conf, cls in predn.tolist(): + if conf >= 0.25: + box_data.append( + {"position": {"minX": xyxy[0], "minY": xyxy[1], "maxX": xyxy[2], "maxY": xyxy[3]}, + "class_id": int(cls), + "box_caption": "%s %.3f" % (names[cls], conf), + "scores": {"class_score": conf}, + "domain": "pixel"}) + total_conf = total_conf + conf + boxes = {"predictions": {"box_data": box_data, "class_labels": names}} # inference-space + id = self.val_table_map[Path(path).name] + self.result_table.add_data(self.current_epoch, + id, + wandb.Image(self.val_table.data[id][1], boxes=boxes, classes=class_set), + total_conf / max(1, len(box_data)) + ) def log(self, log_dict): if self.wandb_run: for key, value in log_dict.items(): self.log_dict[key] = value - def end_epoch(self): - if self.wandb_run and self.log_dict: + def end_epoch(self, best_result=False): + if self.wandb_run: wandb.log(self.log_dict) - self.log_dict = {} + self.log_dict = {} + if self.result_artifact: + train_results = wandb.JoinedTable(self.val_table, self.result_table, "id") + self.result_artifact.add(train_results, 'result') + wandb.log_artifact(self.result_artifact, aliases=['latest', 'epoch ' + str(self.current_epoch), + ('best' if best_result else '')]) + self.result_table = wandb.Table(["epoch", "id", "prediction", "avg_confidence"]) + self.result_artifact = wandb.Artifact("run_" + wandb.run.id + "_progress", "evaluation") def finish_run(self): if self.wandb_run: - if self.result_artifact: - print("Add Training Progress Artifact") - self.result_artifact.add(self.result_table, 'result') - train_results = wandb.JoinedTable(self.testset_artifact.get("val"), self.result_table, "id") - self.result_artifact.add(train_results, 'joined_result') - wandb.log_artifact(self.result_artifact) if self.log_dict: wandb.log(self.log_dict) wandb.run.finish() From 1c132a1f9426d91c18ec7eff6ab95a727344c690 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 23 Mar 2021 14:10:47 +0100 Subject: [PATCH 0065/1976] Update Detections() times=None (#2570) Fix for results.tolist() method breaking after YOLOv5 Hub profiling PRshttps://github.com/ultralytics/yolov5/pull/2460 https://github.com/ultralytics/yolov5/pull/2459 and --- models/common.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/models/common.py b/models/common.py index 83cc8b5ce27b..721171393e04 100644 --- a/models/common.py +++ b/models/common.py @@ -235,7 +235,7 @@ def forward(self, imgs, size=640, augment=False, profile=False): class Detections: # detections class for YOLOv5 inference results - def __init__(self, imgs, pred, files, times, names=None, shape=None): + def __init__(self, imgs, pred, files, times=None, names=None, shape=None): super(Detections, self).__init__() d = pred[0].device # device gn = [torch.tensor([*[im.shape[i] for i in [1, 0, 1, 0]], 1., 1.], device=d) for im in imgs] # normalizations From 0d891c601e8178e4b9665da46d630456668b1996 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 23 Mar 2021 14:25:55 +0100 Subject: [PATCH 0066/1976] check_requirements() exclude pycocotools, thop (#2571) Exclude non-critical packages from dependency checks in detect.py. pycocotools and thop in particular are not required for inference. Issue first raised in https://github.com/ultralytics/yolov5/issues/1944 and also raised in https://github.com/ultralytics/yolov5/discussions/2556 --- detect.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/detect.py b/detect.py index 22bf21b4c825..c843447260ba 100644 --- a/detect.py +++ b/detect.py @@ -164,7 +164,7 @@ def detect(save_img=False): parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment') opt = parser.parse_args() print(opt) - check_requirements() + check_requirements(exclude=('pycocotools', 'thop')) with torch.no_grad(): if opt.update: # update all models (to fix SourceChangeWarning) From 1bf936528018c36bcbd22e9b9f76a8c61e97d2a6 Mon Sep 17 00:00:00 2001 From: Ayush Chaurasia Date: Tue, 23 Mar 2021 21:24:34 +0530 Subject: [PATCH 0067/1976] W&B DDP fix (#2574) --- train.py | 8 +++++--- utils/wandb_logging/wandb_utils.py | 5 ++++- 2 files changed, 9 insertions(+), 4 deletions(-) diff --git a/train.py b/train.py index 62a72375c7a3..fd2d6745ab46 100644 --- a/train.py +++ b/train.py @@ -66,14 +66,16 @@ def train(hyp, opt, device, tb_writer=None): is_coco = opt.data.endswith('coco.yaml') # Logging- Doing this before checking the dataset. Might update data_dict + loggers = {'wandb': None} # loggers dict if rank in [-1, 0]: opt.hyp = hyp # add hyperparameters run_id = torch.load(weights).get('wandb_id') if weights.endswith('.pt') and os.path.isfile(weights) else None wandb_logger = WandbLogger(opt, Path(opt.save_dir).stem, run_id, data_dict) + loggers['wandb'] = wandb_logger.wandb data_dict = wandb_logger.data_dict if wandb_logger.wandb: weights, epochs, hyp = opt.weights, opt.epochs, opt.hyp # WandbLogger might update weights, epochs if resuming - loggers = {'wandb': wandb_logger.wandb} # loggers dict + nc = 1 if opt.single_cls else int(data_dict['nc']) # number of classes names = ['item'] if opt.single_cls and len(data_dict['names']) != 1 else data_dict['names'] # class names assert len(names) == nc, '%g names found for nc=%g dataset in %s' % (len(names), nc, opt.data) # check @@ -381,6 +383,7 @@ def train(hyp, opt, device, tb_writer=None): fi = fitness(np.array(results).reshape(1, -1)) # weighted combination of [P, R, mAP@.5, mAP@.5-.95] if fi > best_fitness: best_fitness = fi + wandb_logger.end_epoch(best_result=best_fitness == fi) # Save model if (not opt.nosave) or (final_epoch and not opt.evolve): # if save @@ -402,7 +405,6 @@ def train(hyp, opt, device, tb_writer=None): wandb_logger.log_model( last.parent, opt, epoch, fi, best_model=best_fitness == fi) del ckpt - wandb_logger.end_epoch(best_result=best_fitness == fi) # end epoch ---------------------------------------------------------------------------------------------------- # end training @@ -442,10 +444,10 @@ def train(hyp, opt, device, tb_writer=None): wandb_logger.wandb.log_artifact(str(final), type='model', name='run_' + wandb_logger.wandb_run.id + '_model', aliases=['last', 'best', 'stripped']) + wandb_logger.finish_run() else: dist.destroy_process_group() torch.cuda.empty_cache() - wandb_logger.finish_run() return results diff --git a/utils/wandb_logging/wandb_utils.py b/utils/wandb_logging/wandb_utils.py index c9a32f5b6026..d6dd256366e0 100644 --- a/utils/wandb_logging/wandb_utils.py +++ b/utils/wandb_logging/wandb_utils.py @@ -16,9 +16,9 @@ try: import wandb + from wandb import init, finish except ImportError: wandb = None - print(f"{colorstr('wandb: ')}Install Weights & Biases for YOLOv5 logging with 'pip install wandb' (recommended)") WANDB_ARTIFACT_PREFIX = 'wandb-artifact://' @@ -71,6 +71,9 @@ def __init__(self, opt, name, run_id, data_dict, job_type='Training'): self.data_dict = self.setup_training(opt, data_dict) if self.job_type == 'Dataset Creation': self.data_dict = self.check_and_upload_dataset(opt) + else: + print(f"{colorstr('wandb: ')}Install Weights & Biases for YOLOv5 logging with 'pip install wandb' (recommended)") + def check_and_upload_dataset(self, opt): assert wandb, 'Install wandb to upload dataset' From 2b329b0945a69431fd8bf36668307069e6e999b1 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 24 Mar 2021 01:05:59 +0100 Subject: [PATCH 0068/1976] Enhanced check_requirements() with auto-install (#2575) * Update check_requirements() with auto-install This PR builds on an idea I had to automatically install missing dependencies rather than simply report an error message. YOLOv5 should now 1) display all dependency issues and not simply display the first missing dependency, and 2) attempt to install/update each missing/VersionConflict package. * cleanup * cleanup 2 * Check requirements.txt file exists * cleanup 3 --- utils/general.py | 20 +++++++++++++++----- 1 file changed, 15 insertions(+), 5 deletions(-) diff --git a/utils/general.py b/utils/general.py index 621df64c6cf1..ef89ea3a0f03 100755 --- a/utils/general.py +++ b/utils/general.py @@ -1,4 +1,4 @@ -# General utils +# YOLOv5 general utils import glob import logging @@ -86,10 +86,20 @@ def check_git_status(): def check_requirements(file='requirements.txt', exclude=()): # Check installed dependencies meet requirements - import pkg_resources - requirements = [f'{x.name}{x.specifier}' for x in pkg_resources.parse_requirements(Path(file).open()) - if x.name not in exclude] - pkg_resources.require(requirements) # DistributionNotFound or VersionConflict exception if requirements not met + import pkg_resources as pkg + prefix = colorstr('red', 'bold', 'requirements:') + file = Path(file) + if not file.exists(): + print(f"{prefix} {file.resolve()} not found, check failed.") + return + + requirements = [f'{x.name}{x.specifier}' for x in pkg.parse_requirements(file.open()) if x.name not in exclude] + for r in requirements: + try: + pkg.require(r) + except Exception as e: # DistributionNotFound or VersionConflict if requirements not met + print(f"{prefix} {e.req} not found and is required by YOLOv5, attempting auto-install...") + print(subprocess.check_output(f"pip install '{e.req}'", shell=True).decode()) def check_img_size(img_size, s=32): From e5b0200cd250759c782207160761ca9756300065 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 24 Mar 2021 01:29:00 +0100 Subject: [PATCH 0069/1976] Update tensorboard>=2.4.1 (#2576) * Update tensorboard>=2.4.1 Update tensorboard version to attempt to address https://github.com/ultralytics/yolov5/issues/2573 (tensorboard logging fail in Docker image). * cleanup --- requirements.txt | 2 +- train.py | 10 +++++----- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/requirements.txt b/requirements.txt index cb50cf8f32e1..51de7735d301 100755 --- a/requirements.txt +++ b/requirements.txt @@ -8,12 +8,12 @@ opencv-python>=4.1.2 Pillow PyYAML>=5.3.1 scipy>=1.4.1 -tensorboard>=2.2 torch>=1.7.0 torchvision>=0.8.1 tqdm>=4.41.0 # logging ------------------------------------- +tensorboard>=2.4.1 # wandb # plotting ------------------------------------ diff --git a/train.py b/train.py index fd2d6745ab46..b9e4eea613dc 100644 --- a/train.py +++ b/train.py @@ -1,4 +1,3 @@ - import argparse import logging import math @@ -34,7 +33,7 @@ from utils.loss import ComputeLoss from utils.plots import plot_images, plot_labels, plot_results, plot_evolution from utils.torch_utils import ModelEMA, select_device, intersect_dicts, torch_distributed_zero_first, is_parallel -from utils.wandb_logging.wandb_utils import WandbLogger, resume_and_get_id, check_wandb_config_file +from utils.wandb_logging.wandb_utils import WandbLogger, resume_and_get_id logger = logging.getLogger(__name__) @@ -75,7 +74,7 @@ def train(hyp, opt, device, tb_writer=None): data_dict = wandb_logger.data_dict if wandb_logger.wandb: weights, epochs, hyp = opt.weights, opt.epochs, opt.hyp # WandbLogger might update weights, epochs if resuming - + nc = 1 if opt.single_cls else int(data_dict['nc']) # number of classes names = ['item'] if opt.single_cls and len(data_dict['names']) != 1 else data_dict['names'] # class names assert len(names) == nc, '%g names found for nc=%g dataset in %s' % (len(names), nc, opt.data) # check @@ -405,7 +404,7 @@ def train(hyp, opt, device, tb_writer=None): wandb_logger.log_model( last.parent, opt, epoch, fi, best_model=best_fitness == fi) del ckpt - + # end epoch ---------------------------------------------------------------------------------------------------- # end training if rank in [-1, 0]: @@ -534,7 +533,8 @@ def train(hyp, opt, device, tb_writer=None): if not opt.evolve: tb_writer = None # init loggers if opt.global_rank in [-1, 0]: - logger.info(f'Start Tensorboard with "tensorboard --logdir {opt.project}", view at http://localhost:6006/') + prefix = colorstr('tensorboard: ') + logger.info(f"{prefix}Start with 'tensorboard --logdir {opt.project}', view at http://localhost:6006/") tb_writer = SummaryWriter(opt.save_dir) # Tensorboard train(hyp, opt, device, tb_writer) From 2bcc89d76225a704bb9a21c926bd28ef7847d81d Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 24 Mar 2021 15:42:00 +0100 Subject: [PATCH 0070/1976] YOLOv5 PyTorch Hub models >> check_requirements() (#2577) * Update hubconf.py with check_requirements() Dependency checks have been missing from YOLOv5 PyTorch Hub model loading, causing errors in some cases when users are attempting to import hub models in unsupported environments. This should examine the YOLOv5 requirements.txt file and pip install any missing or version-conflict packages encountered. This is highly experimental (!), please let us know if this creates problems in your custom workflows. * Update hubconf.py --- hubconf.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/hubconf.py b/hubconf.py index b7b740d39c06..4b4ae04cf332 100644 --- a/hubconf.py +++ b/hubconf.py @@ -1,8 +1,8 @@ -"""File for accessing YOLOv5 via PyTorch Hub https://pytorch.org/hub/ +"""File for accessing YOLOv5 models via PyTorch Hub https://pytorch.org/hub/ultralytics_yolov5/ Usage: import torch - model = torch.hub.load('ultralytics/yolov5', 'yolov5s', pretrained=True, channels=3, classes=80) + model = torch.hub.load('ultralytics/yolov5', 'yolov5s') """ from pathlib import Path @@ -10,11 +10,12 @@ import torch from models.yolo import Model -from utils.general import set_logging +from utils.general import check_requirements, set_logging from utils.google_utils import attempt_download from utils.torch_utils import select_device dependencies = ['torch', 'yaml'] +check_requirements(exclude=('pycocotools', 'thop')) set_logging() From 9f98201dd98651a768acefdd87856c86a031ff89 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 24 Mar 2021 15:43:32 +0100 Subject: [PATCH 0071/1976] W&B DDP fix 2 (#2587) Revert unintentional change to test batch sizes caused by PR https://github.com/ultralytics/yolov5/pull/2125 --- train.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/train.py b/train.py index b9e4eea613dc..25a9accd3be0 100644 --- a/train.py +++ b/train.py @@ -349,7 +349,7 @@ def train(hyp, opt, device, tb_writer=None): if not opt.notest or final_epoch: # Calculate mAP wandb_logger.current_epoch = epoch + 1 results, maps, times = test.test(data_dict, - batch_size=total_batch_size, + batch_size=batch_size * 2, imgsz=imgsz_test, model=ema.ema, single_cls=opt.single_cls, From 8ace1b1b992433f31721c0553287cd664f2efe6b Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 24 Mar 2021 16:23:54 +0100 Subject: [PATCH 0072/1976] YOLOv5 PyTorch Hub models >> check_requirements() (#2588) * YOLOv5 PyTorch Hub models >> check_requirements() Update YOLOv5 PyTorch Hub requirements.txt path to cache path. * Update hubconf.py --- hubconf.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hubconf.py b/hubconf.py index 4b4ae04cf332..710882cf158f 100644 --- a/hubconf.py +++ b/hubconf.py @@ -15,7 +15,7 @@ from utils.torch_utils import select_device dependencies = ['torch', 'yaml'] -check_requirements(exclude=('pycocotools', 'thop')) +check_requirements(Path(__file__).parent / 'requirements.txt', exclude=('pycocotools', 'thop')) set_logging() From 75feeb797c4a9553f4274860c6c278f1fc628f60 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 24 Mar 2021 16:42:54 +0100 Subject: [PATCH 0073/1976] YOLOv5 PyTorch Hub models >> check_requirements() (#2591) Prints 'Please restart runtime or rerun command for update to take effect.' following package auto-install to inform users to restart/rerun. --- utils/general.py | 1 + 1 file changed, 1 insertion(+) diff --git a/utils/general.py b/utils/general.py index ef89ea3a0f03..50d60c519b04 100755 --- a/utils/general.py +++ b/utils/general.py @@ -100,6 +100,7 @@ def check_requirements(file='requirements.txt', exclude=()): except Exception as e: # DistributionNotFound or VersionConflict if requirements not met print(f"{prefix} {e.req} not found and is required by YOLOv5, attempting auto-install...") print(subprocess.check_output(f"pip install '{e.req}'", shell=True).decode()) + print(f'Please restart runtime or rerun command for update to take effect.') def check_img_size(img_size, s=32): From 333ccc5b0f66c7d0aba096e3e2d9d1912db1e610 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 24 Mar 2021 17:51:39 +0100 Subject: [PATCH 0074/1976] YOLOv5 PyTorch Hub models >> check_requirements() (#2592) Improved user-feedback following requirements auto-update. --- utils/general.py | 17 ++++++++++++++--- 1 file changed, 14 insertions(+), 3 deletions(-) diff --git a/utils/general.py b/utils/general.py index 50d60c519b04..284146c87e10 100755 --- a/utils/general.py +++ b/utils/general.py @@ -52,6 +52,11 @@ def isdocker(): return Path('/workspace').exists() # or Path('/.dockerenv').exists() +def emojis(str=''): + # Return platform-dependent emoji-safe version of string + return str.encode().decode('ascii', 'ignore') if platform.system() == 'Windows' else str + + def check_online(): # Check internet connectivity import socket @@ -79,7 +84,7 @@ def check_git_status(): f"Use 'git pull' to update or 'git clone {url}' to download latest." else: s = f'up to date with {url} ✅' - print(s.encode().decode('ascii', 'ignore') if platform.system() == 'Windows' else s) # emoji-safe + print(emojis(s)) # emoji-safe except Exception as e: print(e) @@ -93,14 +98,20 @@ def check_requirements(file='requirements.txt', exclude=()): print(f"{prefix} {file.resolve()} not found, check failed.") return + n = 0 # number of packages updates requirements = [f'{x.name}{x.specifier}' for x in pkg.parse_requirements(file.open()) if x.name not in exclude] for r in requirements: try: pkg.require(r) except Exception as e: # DistributionNotFound or VersionConflict if requirements not met - print(f"{prefix} {e.req} not found and is required by YOLOv5, attempting auto-install...") + n += 1 + print(f"{prefix} {e.req} not found and is required by YOLOv5, attempting auto-update...") print(subprocess.check_output(f"pip install '{e.req}'", shell=True).decode()) - print(f'Please restart runtime or rerun command for update to take effect.') + + if n: # if packages updated + s = f"{prefix} {n} package{'s' * (n > 1)} updated per {file.resolve()}\n" \ + f"{prefix} ⚠️ {colorstr('bold', 'Restart runtime or rerun command for updates to take effect')}\n" + print(emojis(s)) # emoji-safe def check_img_size(img_size, s=32): From 16206692f245e713a5beb380de6dc4bed944986c Mon Sep 17 00:00:00 2001 From: Max Kolomeychenko Date: Thu, 25 Mar 2021 02:57:34 +0300 Subject: [PATCH 0075/1976] Supervisely Ecosystem (#2519) guide describes YOLOv5 apps collection in Supervisely Ecosystem --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index 097b2750bf49..1240f83be2a5 100755 --- a/README.md +++ b/README.md @@ -50,6 +50,7 @@ $ pip install -r requirements.txt * [Train Custom Data](https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data)  🚀 RECOMMENDED * [Weights & Biases Logging](https://github.com/ultralytics/yolov5/issues/1289)  🌟 NEW +* [Supervisely Ecosystem](https://github.com/ultralytics/yolov5/issues/2518)  🌟 NEW * [Multi-GPU Training](https://github.com/ultralytics/yolov5/issues/475) * [PyTorch Hub](https://github.com/ultralytics/yolov5/issues/36)  ⭐ NEW * [ONNX and TorchScript Export](https://github.com/ultralytics/yolov5/issues/251) From ad05e37d99bd1b86f7223540ad93381b8269d75c Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 25 Mar 2021 14:09:49 +0100 Subject: [PATCH 0076/1976] Save webcam results, add --nosave option (#2598) This updates the default detect.py behavior to automatically save all inference images/videos/webcams unless the new argument --nosave is used (python detect.py --nosave) or unless a list of streaming sources is passed (python detect.py --source streams.txt) --- detect.py | 19 +++++++++++-------- 1 file changed, 11 insertions(+), 8 deletions(-) diff --git a/detect.py b/detect.py index c843447260ba..2a4d6f4550c8 100644 --- a/detect.py +++ b/detect.py @@ -17,6 +17,7 @@ def detect(save_img=False): source, weights, view_img, save_txt, imgsz = opt.source, opt.weights, opt.view_img, opt.save_txt, opt.img_size + save_img = not opt.nosave and not source.endswith('.txt') # save inference images webcam = source.isnumeric() or source.endswith('.txt') or source.lower().startswith( ('rtsp://', 'rtmp://', 'http://')) @@ -49,7 +50,6 @@ def detect(save_img=False): cudnn.benchmark = True # set True to speed up constant image size inference dataset = LoadStreams(source, img_size=imgsz, stride=stride) else: - save_img = True dataset = LoadImages(source, img_size=imgsz, stride=stride) # Get names and colors @@ -124,17 +124,19 @@ def detect(save_img=False): if save_img: if dataset.mode == 'image': cv2.imwrite(save_path, im0) - else: # 'video' + else: # 'video' or 'stream' if vid_path != save_path: # new video vid_path = save_path if isinstance(vid_writer, cv2.VideoWriter): vid_writer.release() # release previous video writer - - fourcc = 'mp4v' # output video codec - fps = vid_cap.get(cv2.CAP_PROP_FPS) - w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH)) - h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) - vid_writer = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*fourcc), fps, (w, h)) + if vid_cap: # video + fps = vid_cap.get(cv2.CAP_PROP_FPS) + w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH)) + h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) + else: # stream + fps, w, h = 30, im0.shape[1], im0.shape[0] + save_path += '.mp4' + vid_writer = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h)) vid_writer.write(im0) if save_txt or save_img: @@ -155,6 +157,7 @@ def detect(save_img=False): parser.add_argument('--view-img', action='store_true', help='display results') parser.add_argument('--save-txt', action='store_true', help='save results to *.txt') parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels') + parser.add_argument('--nosave', action='store_true', help='do not save images/videos') parser.add_argument('--classes', nargs='+', type=int, help='filter by class: --class 0, or --class 0 2 3') parser.add_argument('--agnostic-nms', action='store_true', help='class-agnostic NMS') parser.add_argument('--augment', action='store_true', help='augmented inference') From d4456e43b23be03dfd5098d2a1992cd338581801 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 25 Mar 2021 15:12:49 +0100 Subject: [PATCH 0077/1976] Update segment2box() comment (#2600) --- utils/general.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/general.py b/utils/general.py index 284146c87e10..9822582cdb86 100755 --- a/utils/general.py +++ b/utils/general.py @@ -289,7 +289,7 @@ def segment2box(segment, width=640, height=640): x, y = segment.T # segment xy inside = (x >= 0) & (y >= 0) & (x <= width) & (y <= height) x, y, = x[inside], y[inside] - return np.array([x.min(), y.min(), x.max(), y.max()]) if any(x) else np.zeros((1, 4)) # cls, xyxy + return np.array([x.min(), y.min(), x.max(), y.max()]) if any(x) else np.zeros((1, 4)) # xyxy def segments2boxes(segments): From 3bb414890a253bb1a269fb81cc275d11c8fffa72 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 25 Mar 2021 20:55:20 +0100 Subject: [PATCH 0078/1976] resume.py typo (#2603) --- utils/aws/resume.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/aws/resume.py b/utils/aws/resume.py index 563f22be20dc..faad8d247411 100644 --- a/utils/aws/resume.py +++ b/utils/aws/resume.py @@ -1,4 +1,4 @@ -# Resume all interrupted trainings in yolov5/ dir including DPP trainings +# Resume all interrupted trainings in yolov5/ dir including DDP trainings # Usage: $ python utils/aws/resume.py import os From fca16dc4b3b877391a9e2710b52ab78b3ee59130 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 25 Mar 2021 21:48:25 +0100 Subject: [PATCH 0079/1976] Remove Cython from requirements.txt (#2604) Cython should be a dependency of the remaining packages in requirements.txt, so should be installed anyway even if not a direct requirement. --- requirements.txt | 1 - 1 file changed, 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 51de7735d301..fd187eb56cfe 100755 --- a/requirements.txt +++ b/requirements.txt @@ -1,7 +1,6 @@ # pip install -r requirements.txt # base ---------------------------------------- -Cython matplotlib>=3.2.2 numpy>=1.18.5 opencv-python>=4.1.2 From 77415a42e5975ea356393c9f1d5cff0ae8acae2c Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 26 Mar 2021 01:44:00 +0100 Subject: [PATCH 0080/1976] Update git_describe() for remote dir usage (#2606) --- utils/torch_utils.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/utils/torch_utils.py b/utils/torch_utils.py index 8f3538ab152a..78c42b6d0c05 100644 --- a/utils/torch_utils.py +++ b/utils/torch_utils.py @@ -45,9 +45,10 @@ def init_torch_seeds(seed=0): def git_describe(): # return human-readable git description, i.e. v5.0-5-g3e25f1e https://git-scm.com/docs/git-describe - if Path('.git').exists(): - return subprocess.check_output('git describe --tags --long --always', shell=True).decode('utf-8')[:-1] - else: + s = f'git -C {Path(__file__).resolve().parent} describe --tags --long --always' + try: + return subprocess.check_output(s, shell=True).decode()[:-1] + except subprocess.CalledProcessError as e: return '' From 196bf10603a1c5257852106e8f6b44011ad0256b Mon Sep 17 00:00:00 2001 From: maxupp Date: Fri, 26 Mar 2021 12:45:22 +0100 Subject: [PATCH 0081/1976] Add '*.mpo' to supported image formats (#2615) Co-authored-by: Max Uppenkamp --- utils/datasets.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/datasets.py b/utils/datasets.py index 86d7be39bec0..dfe1dcc52971 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -26,7 +26,7 @@ # Parameters help_url = 'https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data' -img_formats = ['bmp', 'jpg', 'jpeg', 'png', 'tif', 'tiff', 'dng', 'webp'] # acceptable image suffixes +img_formats = ['bmp', 'jpg', 'jpeg', 'png', 'tif', 'tiff', 'dng', 'webp', 'mpo'] # acceptable image suffixes vid_formats = ['mov', 'avi', 'mp4', 'mpg', 'mpeg', 'm4v', 'wmv', 'mkv'] # acceptable video suffixes logger = logging.getLogger(__name__) From 0ff5aeca6152f25b7239ff3ca72b50a56a86390b Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 26 Mar 2021 12:52:47 +0100 Subject: [PATCH 0082/1976] Create date_modified() (#2616) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Updated device selection string with fallback for non-git directories. ```python def select_device(device='', batch_size=None): # device = 'cpu' or '0' or '0,1,2,3' s = f'YOLOv5 🚀 {git_describe() or date_modified()} torch {torch.__version__} ' # string ... ``` --- utils/torch_utils.py | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/utils/torch_utils.py b/utils/torch_utils.py index 78c42b6d0c05..0499da49782e 100644 --- a/utils/torch_utils.py +++ b/utils/torch_utils.py @@ -1,4 +1,6 @@ -# PyTorch utils +# YOLOv5 PyTorch utils + +import datetime import logging import math import os @@ -43,9 +45,15 @@ def init_torch_seeds(seed=0): cudnn.benchmark, cudnn.deterministic = True, False -def git_describe(): +def date_modified(path=__file__): + # return human-readable file modification date, i.e. '2021-3-26' + t = datetime.datetime.fromtimestamp(Path(path).stat().st_mtime) + return f'{t.year}-{t.month}-{t.day}' + + +def git_describe(path=Path(__file__).parent): # path must be a directory # return human-readable git description, i.e. v5.0-5-g3e25f1e https://git-scm.com/docs/git-describe - s = f'git -C {Path(__file__).resolve().parent} describe --tags --long --always' + s = f'git -C {path} describe --tags --long --always' try: return subprocess.check_output(s, shell=True).decode()[:-1] except subprocess.CalledProcessError as e: @@ -54,7 +62,7 @@ def git_describe(): def select_device(device='', batch_size=None): # device = 'cpu' or '0' or '0,1,2,3' - s = f'YOLOv5 🚀 {git_describe()} torch {torch.__version__} ' # string + s = f'YOLOv5 🚀 {git_describe() or date_modified()} torch {torch.__version__} ' # string cpu = device.lower() == 'cpu' if cpu: os.environ['CUDA_VISIBLE_DEVICES'] = '-1' # force torch.cuda.is_available() = False From a57f23d18b8e76658dd4d4f1445ac4c05a52fae7 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 26 Mar 2021 13:23:11 +0100 Subject: [PATCH 0083/1976] Update detections() self.t = tuple() (#2617) * Update detections() self.t = tuple() Fix multiple results.print() bug. * Update experimental.py * Update yolo.py --- models/common.py | 7 +++---- models/experimental.py | 2 +- models/yolo.py | 2 ++ 3 files changed, 6 insertions(+), 5 deletions(-) diff --git a/models/common.py b/models/common.py index 721171393e04..c6b9cda32e29 100644 --- a/models/common.py +++ b/models/common.py @@ -1,4 +1,4 @@ -# This file contains modules common to various models +# YOLOv5 common modules import math from pathlib import Path @@ -248,7 +248,7 @@ def __init__(self, imgs, pred, files, times=None, names=None, shape=None): self.xyxyn = [x / g for x, g in zip(self.xyxy, gn)] # xyxy normalized self.xywhn = [x / g for x, g in zip(self.xywh, gn)] # xywh normalized self.n = len(self.pred) - self.t = ((times[i + 1] - times[i]) * 1000 / self.n for i in range(3)) # timestamps (ms) + self.t = tuple((times[i + 1] - times[i]) * 1000 / self.n for i in range(3)) # timestamps (ms) self.s = shape # inference BCHW shape def display(self, pprint=False, show=False, save=False, render=False, save_dir=''): @@ -277,8 +277,7 @@ def display(self, pprint=False, show=False, save=False, render=False, save_dir=' def print(self): self.display(pprint=True) # print results - print(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {tuple(self.s)}' % - tuple(self.t)) + print(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {tuple(self.s)}' % self.t) def show(self): self.display(show=True) # show results diff --git a/models/experimental.py b/models/experimental.py index d79052314f9b..548353c93be0 100644 --- a/models/experimental.py +++ b/models/experimental.py @@ -1,4 +1,4 @@ -# This file contains experimental modules +# YOLOv5 experimental modules import numpy as np import torch diff --git a/models/yolo.py b/models/yolo.py index a047fef397ee..e5c676dae558 100644 --- a/models/yolo.py +++ b/models/yolo.py @@ -1,3 +1,5 @@ +# YOLOv5 YOLO-specific modules + import argparse import logging import sys From 8f6e447729e34a46fdbe9552fcfac705b82deac5 Mon Sep 17 00:00:00 2001 From: Ayush Chaurasia Date: Sat, 27 Mar 2021 01:17:11 +0530 Subject: [PATCH 0084/1976] Fix Indentation in test.py (#2614) * Fix Indentation in test.py * CI fix * Comply with PEP8: 80 characters per line --- test.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test.py b/test.py index 61d6965f7414..c0af91120e60 100644 --- a/test.py +++ b/test.py @@ -156,7 +156,7 @@ def test(data, "domain": "pixel"} for *xyxy, conf, cls in pred.tolist()] boxes = {"predictions": {"box_data": box_data, "class_labels": names}} # inference-space wandb_images.append(wandb_logger.wandb.Image(img[si], boxes=boxes, caption=path.name)) - wandb_logger.log_training_progress(predn, path, names) # logs dsviz tables + wandb_logger.log_training_progress(predn, path, names) if wandb_logger and wandb_logger.wandb_run else None # Append to pycocotools JSON dictionary if save_json: From 005d7a8c54a39d89bf2b9dc03fba82a489cd0628 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 26 Mar 2021 21:19:15 +0100 Subject: [PATCH 0085/1976] Update Detections() self.n comment (#2620) ```python self.n = len(self.pred) # number of images (batch size) ``` --- models/common.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/models/common.py b/models/common.py index c6b9cda32e29..21a2ed5a2ca7 100644 --- a/models/common.py +++ b/models/common.py @@ -247,7 +247,7 @@ def __init__(self, imgs, pred, files, times=None, names=None, shape=None): self.xywh = [xyxy2xywh(x) for x in pred] # xywh pixels self.xyxyn = [x / g for x, g in zip(self.xyxy, gn)] # xyxy normalized self.xywhn = [x / g for x, g in zip(self.xywh, gn)] # xywh normalized - self.n = len(self.pred) + self.n = len(self.pred) # number of images (batch size) self.t = tuple((times[i + 1] - times[i]) * 1000 / self.n for i in range(3)) # timestamps (ms) self.s = shape # inference BCHW shape From 2dfe32030ad5f73d08275b93c0baa089bd513cf3 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 27 Mar 2021 18:31:53 +0100 Subject: [PATCH 0086/1976] Remove conflicting nvidia-tensorboard package (#2622) Attempt to resolve tensorboard Docker error in https://github.com/ultralytics/yolov5/issues/2573 --- Dockerfile | 1 + 1 file changed, 1 insertion(+) diff --git a/Dockerfile b/Dockerfile index e1b40c2d15c6..a3d870cafba3 100644 --- a/Dockerfile +++ b/Dockerfile @@ -7,6 +7,7 @@ RUN apt update && apt install -y zip htop screen libgl1-mesa-glx # Install python dependencies COPY requirements.txt . RUN python -m pip install --upgrade pip +RUN pip uninstall -y nvidia-tensorboard nvidia-tensorboard-plugin-dlprof RUN pip install --no-cache -r requirements.txt gsutil notebook # Create working directory From 9b92d3ee769e8f26f2d535879dc69708998c47a3 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 27 Mar 2021 18:35:53 +0100 Subject: [PATCH 0087/1976] FROM nvcr.io/nvidia/pytorch:21.03-py3 (#2623) Update Docker FROM nvcr.io/nvidia/pytorch:21.03-py3 --- Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Dockerfile b/Dockerfile index a3d870cafba3..c0484e5b9c1c 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,5 +1,5 @@ # Start FROM Nvidia PyTorch image https://ngc.nvidia.com/catalog/containers/nvidia:pytorch -FROM nvcr.io/nvidia/pytorch:21.02-py3 +FROM nvcr.io/nvidia/pytorch:21.03-py3 # Install linux packages RUN apt update && apt install -y zip htop screen libgl1-mesa-glx From 6e8c5b767866ecebe08dc1b673537348394680f3 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 28 Mar 2021 15:39:31 +0200 Subject: [PATCH 0088/1976] Improve git_describe() (#2633) Catch 'fatal: not a git repository' returns and return '' instead (observed in GCP Hub checks). --- utils/torch_utils.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/utils/torch_utils.py b/utils/torch_utils.py index 0499da49782e..dfab83d5374a 100644 --- a/utils/torch_utils.py +++ b/utils/torch_utils.py @@ -55,7 +55,8 @@ def git_describe(path=Path(__file__).parent): # path must be a directory # return human-readable git description, i.e. v5.0-5-g3e25f1e https://git-scm.com/docs/git-describe s = f'git -C {path} describe --tags --long --always' try: - return subprocess.check_output(s, shell=True).decode()[:-1] + r = subprocess.check_output(s, shell=True).decode()[:-1] + return '' if r.startswith('fatal: not a git repository') else r except subprocess.CalledProcessError as e: return '' From dc51e80b005c0e63c794ae20c712e5db7eb0ba90 Mon Sep 17 00:00:00 2001 From: Ayush Chaurasia Date: Sun, 28 Mar 2021 19:39:35 +0530 Subject: [PATCH 0089/1976] Fix: evolve with wandb (#2634) --- train.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/train.py b/train.py index 25a9accd3be0..211cc04fb63b 100644 --- a/train.py +++ b/train.py @@ -439,7 +439,7 @@ def train(hyp, opt, device, tb_writer=None): strip_optimizer(f) # strip optimizers if opt.bucket: os.system(f'gsutil cp {final} gs://{opt.bucket}/weights') # upload - if wandb_logger.wandb: # Log the stripped model + if wandb_logger.wandb and not opt.evolve: # Log the stripped model wandb_logger.wandb.log_artifact(str(final), type='model', name='run_' + wandb_logger.wandb_run.id + '_model', aliases=['last', 'best', 'stripped']) From 518c09578e90d71c798cd1f0bb3274959376539c Mon Sep 17 00:00:00 2001 From: Ayush Chaurasia Date: Sun, 28 Mar 2021 19:41:36 +0530 Subject: [PATCH 0090/1976] W&B resume ddp from run link fix (#2579) * W&B resume ddp from run link fix * Native DDP W&B support for training, resuming --- train.py | 4 +- utils/wandb_logging/wandb_utils.py | 66 +++++++++++++++++++++++------- 2 files changed, 54 insertions(+), 16 deletions(-) diff --git a/train.py b/train.py index 211cc04fb63b..d5b2d1b75c52 100644 --- a/train.py +++ b/train.py @@ -33,7 +33,7 @@ from utils.loss import ComputeLoss from utils.plots import plot_images, plot_labels, plot_results, plot_evolution from utils.torch_utils import ModelEMA, select_device, intersect_dicts, torch_distributed_zero_first, is_parallel -from utils.wandb_logging.wandb_utils import WandbLogger, resume_and_get_id +from utils.wandb_logging.wandb_utils import WandbLogger, check_wandb_resume logger = logging.getLogger(__name__) @@ -496,7 +496,7 @@ def train(hyp, opt, device, tb_writer=None): check_requirements() # Resume - wandb_run = resume_and_get_id(opt) + wandb_run = check_wandb_resume(opt) if opt.resume and not wandb_run: # resume an interrupted run ckpt = opt.resume if isinstance(opt.resume, str) else get_latest_run() # specified or most recent path assert os.path.isfile(ckpt), 'ERROR: --resume checkpoint does not exist' diff --git a/utils/wandb_logging/wandb_utils.py b/utils/wandb_logging/wandb_utils.py index d6dd256366e0..17132874e0d0 100644 --- a/utils/wandb_logging/wandb_utils.py +++ b/utils/wandb_logging/wandb_utils.py @@ -23,7 +23,7 @@ WANDB_ARTIFACT_PREFIX = 'wandb-artifact://' -def remove_prefix(from_string, prefix): +def remove_prefix(from_string, prefix=WANDB_ARTIFACT_PREFIX): return from_string[len(prefix):] @@ -33,35 +33,73 @@ def check_wandb_config_file(data_config_file): return wandb_config return data_config_file +def get_run_info(run_path): + run_path = Path(remove_prefix(run_path, WANDB_ARTIFACT_PREFIX)) + run_id = run_path.stem + project = run_path.parent.stem + model_artifact_name = 'run_' + run_id + '_model' + return run_id, project, model_artifact_name -def resume_and_get_id(opt): - # It's more elegant to stick to 1 wandb.init call, but as useful config data is overwritten in the WandbLogger's wandb.init call +def check_wandb_resume(opt): + process_wandb_config_ddp_mode(opt) if opt.global_rank not in [-1, 0] else None if isinstance(opt.resume, str): if opt.resume.startswith(WANDB_ARTIFACT_PREFIX): - run_path = Path(remove_prefix(opt.resume, WANDB_ARTIFACT_PREFIX)) - run_id = run_path.stem - project = run_path.parent.stem - model_artifact_name = WANDB_ARTIFACT_PREFIX + 'run_' + run_id + '_model' - assert wandb, 'install wandb to resume wandb runs' - # Resume wandb-artifact:// runs here| workaround for not overwriting wandb.config - run = wandb.init(id=run_id, project=project, resume='allow') - opt.resume = model_artifact_name - return run + if opt.global_rank not in [-1, 0]: # For resuming DDP runs + run_id, project, model_artifact_name = get_run_info(opt.resume) + api = wandb.Api() + artifact = api.artifact(project + '/' + model_artifact_name + ':latest') + modeldir = artifact.download() + opt.weights = str(Path(modeldir) / "last.pt") + return True return None +def process_wandb_config_ddp_mode(opt): + with open(opt.data) as f: + data_dict = yaml.load(f, Loader=yaml.SafeLoader) # data dict + train_dir, val_dir = None, None + if data_dict['train'].startswith(WANDB_ARTIFACT_PREFIX): + api = wandb.Api() + train_artifact = api.artifact(remove_prefix(data_dict['train']) + ':' + opt.artifact_alias) + train_dir = train_artifact.download() + train_path = Path(train_dir) / 'data/images/' + data_dict['train'] = str(train_path) + + if data_dict['val'].startswith(WANDB_ARTIFACT_PREFIX): + api = wandb.Api() + val_artifact = api.artifact(remove_prefix(data_dict['val']) + ':' + opt.artifact_alias) + val_dir = val_artifact.download() + val_path = Path(val_dir) / 'data/images/' + data_dict['val'] = str(val_path) + if train_dir or val_dir: + ddp_data_path = str(Path(val_dir) / 'wandb_local_data.yaml') + with open(ddp_data_path, 'w') as f: + yaml.dump(data_dict, f) + opt.data = ddp_data_path + + class WandbLogger(): def __init__(self, opt, name, run_id, data_dict, job_type='Training'): # Pre-training routine -- self.job_type = job_type self.wandb, self.wandb_run, self.data_dict = wandb, None if not wandb else wandb.run, data_dict - if self.wandb: + # It's more elegant to stick to 1 wandb.init call, but useful config data is overwritten in the WandbLogger's wandb.init call + if isinstance(opt.resume, str): # checks resume from artifact + if opt.resume.startswith(WANDB_ARTIFACT_PREFIX): + run_id, project, model_artifact_name = get_run_info(opt.resume) + model_artifact_name = WANDB_ARTIFACT_PREFIX + model_artifact_name + assert wandb, 'install wandb to resume wandb runs' + # Resume wandb-artifact:// runs here| workaround for not overwriting wandb.config + self.wandb_run = wandb.init(id=run_id, project=project, resume='allow') + opt.resume = model_artifact_name + elif self.wandb: self.wandb_run = wandb.init(config=opt, resume="allow", project='YOLOv5' if opt.project == 'runs/train' else Path(opt.project).stem, name=name, job_type=job_type, - id=run_id) if not wandb.run else wandb.run + id=run_id) if not wandb.run else wandb.run + if self.wandb_run: if self.job_type == 'Training': if not opt.resume: wandb_data_dict = self.check_and_upload_dataset(opt) if opt.upload_dataset else data_dict From 2e95cf3d794fe8b04dadea63d8cab523b959d853 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 28 Mar 2021 17:09:06 +0200 Subject: [PATCH 0091/1976] Improve git_describe() fix 1 (#2635) Add stderr=subprocess.STDOUT to catch error messages. --- utils/torch_utils.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/utils/torch_utils.py b/utils/torch_utils.py index dfab83d5374a..d6da0cae8945 100644 --- a/utils/torch_utils.py +++ b/utils/torch_utils.py @@ -55,10 +55,9 @@ def git_describe(path=Path(__file__).parent): # path must be a directory # return human-readable git description, i.e. v5.0-5-g3e25f1e https://git-scm.com/docs/git-describe s = f'git -C {path} describe --tags --long --always' try: - r = subprocess.check_output(s, shell=True).decode()[:-1] - return '' if r.startswith('fatal: not a git repository') else r + return subprocess.check_output(s, shell=True, stderr=subprocess.STDOUT).decode()[:-1] except subprocess.CalledProcessError as e: - return '' + return '' # not a git repository def select_device(device='', batch_size=None): From ee169834bd0edf4e03b555688053da7bdd05a71e Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 28 Mar 2021 17:22:00 +0200 Subject: [PATCH 0092/1976] PyTorch Hub custom model to CUDA device fix (#2636) Fix for #2630 raised by @Pro100rus32 --- hubconf.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/hubconf.py b/hubconf.py index 710882cf158f..0eaf70787e64 100644 --- a/hubconf.py +++ b/hubconf.py @@ -128,7 +128,10 @@ def custom(path_or_model='path/to/model.pt', autoshape=True): hub_model = Model(model.yaml).to(next(model.parameters()).device) # create hub_model.load_state_dict(model.float().state_dict()) # load state_dict hub_model.names = model.names # class names - return hub_model.autoshape() if autoshape else hub_model + if autoshape: + hub_model = hub_model.autoshape() # for file/URI/PIL/cv2/np inputs and NMS + device = select_device('0' if torch.cuda.is_available() else 'cpu') # default to GPU if available + return hub_model.to(device) if __name__ == '__main__': From 2bf34f50fda2d5997f301364f9a0b196fa57117b Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 28 Mar 2021 20:23:40 +0200 Subject: [PATCH 0093/1976] PyTorch Hub amp.autocast() inference (#2641) I think this should help speed up CUDA inference, as currently models may be running in FP32 inference mode on CUDA devices unnecesarily. --- models/common.py | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/models/common.py b/models/common.py index 21a2ed5a2ca7..5c0e571b752f 100644 --- a/models/common.py +++ b/models/common.py @@ -8,6 +8,7 @@ import torch import torch.nn as nn from PIL import Image +from torch.cuda import amp from utils.datasets import letterbox from utils.general import non_max_suppression, make_divisible, scale_coords, xyxy2xywh @@ -219,17 +220,17 @@ def forward(self, imgs, size=640, augment=False, profile=False): x = torch.from_numpy(x).to(p.device).type_as(p) / 255. # uint8 to fp16/32 t.append(time_synchronized()) - # Inference - with torch.no_grad(): + with torch.no_grad(), amp.autocast(enabled=p.device.type != 'cpu'): + # Inference y = self.model(x, augment, profile)[0] # forward - t.append(time_synchronized()) + t.append(time_synchronized()) - # Post-process - y = non_max_suppression(y, conf_thres=self.conf, iou_thres=self.iou, classes=self.classes) # NMS - for i in range(n): - scale_coords(shape1, y[i][:, :4], shape0[i]) - t.append(time_synchronized()) + # Post-process + y = non_max_suppression(y, conf_thres=self.conf, iou_thres=self.iou, classes=self.classes) # NMS + for i in range(n): + scale_coords(shape1, y[i][:, :4], shape0[i]) + t.append(time_synchronized()) return Detections(imgs, y, files, t, self.names, x.shape) From 1e8ab3f5f2048b91f8f5e8ec0b15fe855853eebc Mon Sep 17 00:00:00 2001 From: zzttqu <80448114+zzttqu@users.noreply.github.com> Date: Mon, 29 Mar 2021 05:21:25 -0500 Subject: [PATCH 0094/1976] Add tqdm pbar.close() (#2644) When using tqdm, sometimes it can't print in one line and roll to next line. --- utils/datasets.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/utils/datasets.py b/utils/datasets.py index dfe1dcc52971..5ef89ab6ea83 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -443,7 +443,8 @@ def __init__(self, path, img_size=640, batch_size=16, augment=False, hyp=None, r self.imgs[i], self.img_hw0[i], self.img_hw[i] = x # img, hw_original, hw_resized = load_image(self, i) gb += self.imgs[i].nbytes pbar.desc = f'{prefix}Caching images ({gb / 1E9:.1f}GB)' - + pbar.close() + def cache_labels(self, path=Path('./labels.cache'), prefix=''): # Cache dataset labels, check images and read shapes x = {} # dict @@ -487,7 +488,8 @@ def cache_labels(self, path=Path('./labels.cache'), prefix=''): pbar.desc = f"{prefix}Scanning '{path.parent / path.stem}' images and labels... " \ f"{nf} found, {nm} missing, {ne} empty, {nc} corrupted" - + pbar.close() + if nf == 0: print(f'{prefix}WARNING: No labels found in {path}. See {help_url}') From 866bc7d640b04913943820c45636e7c2da6d8245 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 29 Mar 2021 15:19:07 +0200 Subject: [PATCH 0095/1976] Speed profiling improvements (#2648) * Speed profiling improvements * Update torch_utils.py deepcopy() required to avoid adding elements to model. * Update torch_utils.py --- hubconf.py | 7 ++++--- utils/torch_utils.py | 2 +- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/hubconf.py b/hubconf.py index 0eaf70787e64..1e6b9c78ac6a 100644 --- a/hubconf.py +++ b/hubconf.py @@ -38,9 +38,10 @@ def create(name, pretrained, channels, classes, autoshape): fname = f'{name}.pt' # checkpoint filename attempt_download(fname) # download if not found locally ckpt = torch.load(fname, map_location=torch.device('cpu')) # load - state_dict = ckpt['model'].float().state_dict() # to FP32 - state_dict = {k: v for k, v in state_dict.items() if model.state_dict()[k].shape == v.shape} # filter - model.load_state_dict(state_dict, strict=False) # load + msd = model.state_dict() # model state_dict + csd = ckpt['model'].float().state_dict() # checkpoint state_dict as FP32 + csd = {k: v for k, v in csd.items() if msd[k].shape == v.shape} # filter + model.load_state_dict(csd, strict=False) # load if len(ckpt['model'].names) == classes: model.names = ckpt['model'].names # set class names attribute if autoshape: diff --git a/utils/torch_utils.py b/utils/torch_utils.py index d6da0cae8945..9991e5ec87d8 100644 --- a/utils/torch_utils.py +++ b/utils/torch_utils.py @@ -191,7 +191,7 @@ def fuse_conv_and_bn(conv, bn): # prepare filters w_conv = conv.weight.clone().view(conv.out_channels, -1) w_bn = torch.diag(bn.weight.div(torch.sqrt(bn.eps + bn.running_var))) - fusedconv.weight.copy_(torch.mm(w_bn, w_conv).view(fusedconv.weight.size())) + fusedconv.weight.copy_(torch.mm(w_bn, w_conv).view(fusedconv.weight.shape)) # prepare spatial bias b_conv = torch.zeros(conv.weight.size(0), device=conv.weight.device) if conv.bias is None else conv.bias From 1b100cd53e3344cf9d95d29e3de1e5a6a9c0f1a3 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 29 Mar 2021 16:43:37 +0200 Subject: [PATCH 0096/1976] Created using Colaboratory (#2649) --- tutorial.ipynb | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/tutorial.ipynb b/tutorial.ipynb index c710685b7e75..9d8f08d5fcf5 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -563,7 +563,7 @@ "clear_output()\n", "print('Setup complete. Using torch %s %s' % (torch.__version__, torch.cuda.get_device_properties(0) if torch.cuda.is_available() else 'CPU'))" ], - "execution_count": 1, + "execution_count": null, "outputs": [ { "output_type": "stream", @@ -689,7 +689,7 @@ "torch.hub.download_url_to_file('https://github.com/ultralytics/yolov5/releases/download/v1.0/coco2017val.zip', 'tmp.zip')\n", "!unzip -q tmp.zip -d ../ && rm tmp.zip" ], - "execution_count": 2, + "execution_count": null, "outputs": [ { "output_type": "display_data", @@ -729,7 +729,7 @@ "# Run YOLOv5x on COCO val2017\n", "!python test.py --weights yolov5x.pt --data coco.yaml --img 640 --iou 0.65" ], - "execution_count": 3, + "execution_count": null, "outputs": [ { "output_type": "stream", @@ -798,9 +798,9 @@ "source": [ "# Download COCO test-dev2017\n", "torch.hub.download_url_to_file('https://github.com/ultralytics/yolov5/releases/download/v1.0/coco2017labels.zip', 'tmp.zip')\n", - "!unzip -q tmp.zip -d ../ && rm tmp.zip # unzip labels\n", + "!unzip -q tmp.zip -d ../ && rm tmp.zip # unzip labels\n", "!f=\"test2017.zip\" && curl http://images.cocodataset.org/zips/$f -o $f && unzip -q $f && rm $f # 7GB, 41k images\n", - "%mv ./test2017 ./coco/images && mv ./coco ../ # move images to /coco and move /coco next to /yolov5" + "%mv ./test2017 ../coco/images # move to /coco" ], "execution_count": null, "outputs": [] @@ -853,7 +853,7 @@ "torch.hub.download_url_to_file('https://github.com/ultralytics/yolov5/releases/download/v1.0/coco128.zip', 'tmp.zip')\n", "!unzip -q tmp.zip -d ../ && rm tmp.zip" ], - "execution_count": 4, + "execution_count": null, "outputs": [ { "output_type": "display_data", @@ -930,7 +930,7 @@ "# Train YOLOv5s on COCO128 for 3 epochs\n", "!python train.py --img 640 --batch 16 --epochs 3 --data coco128.yaml --weights yolov5s.pt --nosave --cache" ], - "execution_count": 5, + "execution_count": null, "outputs": [ { "output_type": "stream", From 7cdc5165a1f8f67d46458c0229ef42379140d7fe Mon Sep 17 00:00:00 2001 From: Youngjin Shin Date: Tue, 30 Mar 2021 00:05:52 +0900 Subject: [PATCH 0097/1976] Update requirements.txt (#2564) * Add opencv-contrib-python to requirements.txt * Update requirements.txt Co-authored-by: Glenn Jocher From fd1679975bf55325f606631b28d5d3feb47fbda5 Mon Sep 17 00:00:00 2001 From: Benjamin Fineran Date: Mon, 29 Mar 2021 11:15:26 -0400 Subject: [PATCH 0098/1976] add option to disable half precision in test.py (#2507) Co-authored-by: Glenn Jocher --- test.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/test.py b/test.py index c0af91120e60..d099699bcad8 100644 --- a/test.py +++ b/test.py @@ -37,6 +37,7 @@ def test(data, plots=True, wandb_logger=None, compute_loss=None, + half_precision=True, is_coco=False): # Initialize/load model and set device training = model is not None @@ -61,7 +62,7 @@ def test(data, # model = nn.DataParallel(model) # Half - half = device.type != 'cpu' # half precision only supported on CUDA + half = device.type != 'cpu' and half_precision # half precision only supported on CUDA if half: model.half() From 9c803f2f7e4f3759e8121c9c02fc0d0b4a7b04b1 Mon Sep 17 00:00:00 2001 From: Phat Tran <36766404+ptran1203@users.noreply.github.com> Date: Mon, 29 Mar 2021 23:45:46 +0700 Subject: [PATCH 0099/1976] Add --label-smoothing eps argument to train.py (default 0.0) (#2344) * Add label smoothing option * Correct data type * add_log * Remove log * Add log * Update loss.py remove comment (too versbose) Co-authored-by: phattran Co-authored-by: Glenn Jocher --- train.py | 2 ++ utils/loss.py | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/train.py b/train.py index d5b2d1b75c52..d55833bf45a3 100644 --- a/train.py +++ b/train.py @@ -224,6 +224,7 @@ def train(hyp, opt, device, tb_writer=None): hyp['box'] *= 3. / nl # scale to layers hyp['cls'] *= nc / 80. * 3. / nl # scale to classes and layers hyp['obj'] *= (imgsz / 640) ** 2 * 3. / nl # scale to image size and layers + hyp['label_smoothing'] = opt.label_smoothing model.nc = nc # attach number of classes to model model.hyp = hyp # attach hyperparameters to model model.gr = 1.0 # iou loss ratio (obj_loss = 1.0 or iou) @@ -481,6 +482,7 @@ def train(hyp, opt, device, tb_writer=None): parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment') parser.add_argument('--quad', action='store_true', help='quad dataloader') parser.add_argument('--linear-lr', action='store_true', help='linear LR') + parser.add_argument('--label-smoothing', type=float, default=0.0, help='Label smoothing epsilon') parser.add_argument('--upload_dataset', action='store_true', help='Upload dataset as W&B artifact table') parser.add_argument('--bbox_interval', type=int, default=-1, help='Set bounding-box image logging interval for W&B') parser.add_argument('--save_period', type=int, default=-1, help='Log model after every "save_period" epoch') diff --git a/utils/loss.py b/utils/loss.py index 2302d18de87d..9e78df17fdf3 100644 --- a/utils/loss.py +++ b/utils/loss.py @@ -97,7 +97,7 @@ def __init__(self, model, autobalance=False): BCEobj = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h['obj_pw']], device=device)) # Class label smoothing https://arxiv.org/pdf/1902.04103.pdf eqn 3 - self.cp, self.cn = smooth_BCE(eps=0.0) + self.cp, self.cn = smooth_BCE(eps=h.get('label_smoothing', 0.0)) # positive, negative BCE targets # Focal loss g = h['fl_gamma'] # focal loss gamma From 1b475c1797fdf116e363bc54593a8f1289aeae22 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 30 Mar 2021 20:07:18 +0200 Subject: [PATCH 0100/1976] Created using Colaboratory --- tutorial.ipynb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tutorial.ipynb b/tutorial.ipynb index 9d8f08d5fcf5..d11f6822d94c 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -787,7 +787,7 @@ }, "source": [ "## COCO test-dev2017\n", - "Download [COCO test2017](https://github.com/ultralytics/yolov5/blob/74b34872fdf41941cddcf243951cdb090fbac17b/data/coco.yaml#L15) dataset (7GB - 40,000 images), to test model accuracy on test-dev set (20,000 images). Results are saved to a `*.json` file which can be submitted to the evaluation server at https://competitions.codalab.org/competitions/20794." + "Download [COCO test2017](https://github.com/ultralytics/yolov5/blob/74b34872fdf41941cddcf243951cdb090fbac17b/data/coco.yaml#L15) dataset (7GB - 40,000 images), to test model accuracy on test-dev set (**20,000 images, no labels**). Results are saved to a `*.json` file which should be **zipped** and submitted to the evaluation server at https://competitions.codalab.org/competitions/20794." ] }, { From 2a28ef374be61653ce3f68fd414efe03292356d7 Mon Sep 17 00:00:00 2001 From: Ayush Chaurasia Date: Wed, 31 Mar 2021 17:17:54 +0530 Subject: [PATCH 0101/1976] Set resume flag to false (#2657) --- utils/wandb_logging/log_dataset.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/utils/wandb_logging/log_dataset.py b/utils/wandb_logging/log_dataset.py index 97e68425cddd..0ccb8735bd42 100644 --- a/utils/wandb_logging/log_dataset.py +++ b/utils/wandb_logging/log_dataset.py @@ -21,5 +21,6 @@ def create_dataset_artifact(opt): parser.add_argument('--single-cls', action='store_true', help='train as single-class dataset') parser.add_argument('--project', type=str, default='YOLOv5', help='name of W&B Project') opt = parser.parse_args() - + opt.resume = False # Explicitly disallow resume check for dataset upload Job + create_dataset_artifact(opt) From 51cc0962b5688e0769592f2ba646d35eda957da8 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 1 Apr 2021 11:16:56 +0200 Subject: [PATCH 0102/1976] Update README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 1240f83be2a5..c708f058fc93 100755 --- a/README.md +++ b/README.md @@ -8,7 +8,7 @@ This repository represents Ultralytics open-source research into future object d ** GPU Speed measures end-to-end time per image averaged over 5000 COCO val2017 images using a V100 GPU with batch size 32, and includes image preprocessing, PyTorch FP16 inference, postprocessing and NMS. EfficientDet data from [google/automl](https://github.com/google/automl) at batch size 8. -- **January 5, 2021**: [v4.0 release](https://github.com/ultralytics/yolov5/releases/tag/v4.0): nn.SiLU() activations, [Weights & Biases](https://wandb.ai/) logging, [PyTorch Hub](https://pytorch.org/hub/ultralytics_yolov5/) integration. +- **January 5, 2021**: [v4.0 release](https://github.com/ultralytics/yolov5/releases/tag/v4.0): nn.SiLU() activations, [Weights & Biases](https://wandb.ai/site?utm_campaign=repo_yolo_readme) logging, [PyTorch Hub](https://pytorch.org/hub/ultralytics_yolov5/) integration. - **August 13, 2020**: [v3.0 release](https://github.com/ultralytics/yolov5/releases/tag/v3.0): nn.Hardswish() activations, data autodownload, native AMP. - **July 23, 2020**: [v2.0 release](https://github.com/ultralytics/yolov5/releases/tag/v2.0): improved model definition, training and mAP. - **June 22, 2020**: [PANet](https://arxiv.org/abs/1803.01534) updates: new heads, reduced parameters, improved speed and mAP [364fcfd](https://github.com/ultralytics/yolov5/commit/364fcfd7dba53f46edd4f04c037a039c0a287972). From 877b826e3af3a0c7fc9da49cff47d57b5993064d Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 1 Apr 2021 11:31:27 +0200 Subject: [PATCH 0103/1976] Created using Colaboratory --- tutorial.ipynb | 27 +++++++++++++++++++++++++-- 1 file changed, 25 insertions(+), 2 deletions(-) diff --git a/tutorial.ipynb b/tutorial.ipynb index d11f6822d94c..8a191609b24d 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -1031,9 +1031,9 @@ "source": [ "## Weights & Biases Logging 🌟 NEW\n", "\n", - "[Weights & Biases](https://www.wandb.com/) (W&B) is now integrated with YOLOv5 for real-time visualization and cloud logging of training runs. This allows for better run comparison and introspection, as well improved visibility and collaboration for teams. To enable W&B `pip install wandb`, and then train normally (you will be guided through setup on first use). \n", + "[Weights & Biases](https://wandb.ai/site?utm_campaign=repo_yolo_notebook) (W&B) is now integrated with YOLOv5 for real-time visualization and cloud logging of training runs. This allows for better run comparison and introspection, as well improved visibility and collaboration for teams. To enable W&B `pip install wandb`, and then train normally (you will be guided through setup on first use). \n", "\n", - "During training you will see live updates at [https://wandb.ai/home](https://wandb.ai/home), and you can create and share detailed [Reports](https://wandb.ai/glenn-jocher/yolov5_tutorial/reports/YOLOv5-COCO128-Tutorial-Results--VmlldzozMDI5OTY) of your results. For more information see the [YOLOv5 Weights & Biases Tutorial](https://github.com/ultralytics/yolov5/issues/1289). \n", + "During training you will see live updates at [https://wandb.ai/home](https://wandb.ai/home?utm_campaign=repo_yolo_notebook), and you can create and share detailed [Reports](https://wandb.ai/glenn-jocher/yolov5_tutorial/reports/YOLOv5-COCO128-Tutorial-Results--VmlldzozMDI5OTY) of your results. For more information see the [YOLOv5 Weights & Biases Tutorial](https://github.com/ultralytics/yolov5/issues/1289). \n", "\n", "" ] @@ -1177,6 +1177,29 @@ "execution_count": null, "outputs": [] }, + { + "cell_type": "code", + "metadata": { + "id": "GMusP4OAxFu6" + }, + "source": [ + "# PyTorch Hub\n", + "import torch\n", + "\n", + "# Model\n", + "model = torch.hub.load('ultralytics/yolov5', 'yolov5s')\n", + "\n", + "# Images\n", + "dir = 'https://github.com/ultralytics/yolov5/raw/master/data/images/'\n", + "imgs = [dir + f for f in ('zidane.jpg', 'bus.jpg')] # batch of images\n", + "\n", + "# Inference\n", + "results = model(imgs)\n", + "results.print() # or .show(), .save()" + ], + "execution_count": null, + "outputs": [] + }, { "cell_type": "code", "metadata": { From b8b862993d8e0a267f7d96eb94307f3f0f7dce51 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 1 Apr 2021 15:01:00 +0200 Subject: [PATCH 0104/1976] Update README with Tips for Best Results tutorial (#2682) * Update README with Tips for Best Results tutorial * Update README.md --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index c708f058fc93..6e3f38761543 100755 --- a/README.md +++ b/README.md @@ -49,6 +49,7 @@ $ pip install -r requirements.txt ## Tutorials * [Train Custom Data](https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data)  🚀 RECOMMENDED +* [Tips for Best Training Results](https://github.com/ultralytics/yolov5/wiki/Tips-for-Best-Training-Results)  ☘️ RECOMMENDED * [Weights & Biases Logging](https://github.com/ultralytics/yolov5/issues/1289)  🌟 NEW * [Supervisely Ecosystem](https://github.com/ultralytics/yolov5/issues/2518)  🌟 NEW * [Multi-GPU Training](https://github.com/ultralytics/yolov5/issues/475) From 1148e2ea63f498645df5c742fb8078ef6317b46f Mon Sep 17 00:00:00 2001 From: Ding Yiwei <16083536+dingyiwei@users.noreply.github.com> Date: Thu, 1 Apr 2021 23:26:53 +0800 Subject: [PATCH 0105/1976] Add TransformerLayer, TransformerBlock, C3TR modules (#2333) * yolotr * transformer block * Remove bias in Transformer * Remove C3T * Remove a deprecated class * put the 2nd LayerNorm into the 2nd residual block * move example model to models/hub, rename to -transformer * Add module comments and TODOs * Remove LN in Transformer * Add comments for Transformer * Solve the problem of MA with DDP * cleanup * cleanup find_unused_parameters * PEP8 reformat Co-authored-by: DingYiwei <846414640@qq.com> Co-authored-by: Glenn Jocher --- models/common.py | 54 +++++++++++++++++++++++++++++ models/hub/yolov5s-transformer.yaml | 48 +++++++++++++++++++++++++ models/yolo.py | 4 +-- train.py | 4 ++- 4 files changed, 107 insertions(+), 3 deletions(-) create mode 100644 models/hub/yolov5s-transformer.yaml diff --git a/models/common.py b/models/common.py index 5c0e571b752f..a25172dcfcac 100644 --- a/models/common.py +++ b/models/common.py @@ -43,6 +43,52 @@ def fuseforward(self, x): return self.act(self.conv(x)) +class TransformerLayer(nn.Module): + # Transformer layer https://arxiv.org/abs/2010.11929 (LayerNorm layers removed for better performance) + def __init__(self, c, num_heads): + super().__init__() + self.q = nn.Linear(c, c, bias=False) + self.k = nn.Linear(c, c, bias=False) + self.v = nn.Linear(c, c, bias=False) + self.ma = nn.MultiheadAttention(embed_dim=c, num_heads=num_heads) + self.fc1 = nn.Linear(c, c, bias=False) + self.fc2 = nn.Linear(c, c, bias=False) + + def forward(self, x): + x = self.ma(self.q(x), self.k(x), self.v(x))[0] + x + x = self.fc2(self.fc1(x)) + x + return x + + +class TransformerBlock(nn.Module): + # Vision Transformer https://arxiv.org/abs/2010.11929 + def __init__(self, c1, c2, num_heads, num_layers): + super().__init__() + self.conv = None + if c1 != c2: + self.conv = Conv(c1, c2) + self.linear = nn.Linear(c2, c2) # learnable position embedding + self.tr = nn.Sequential(*[TransformerLayer(c2, num_heads) for _ in range(num_layers)]) + self.c2 = c2 + + def forward(self, x): + if self.conv is not None: + x = self.conv(x) + b, _, w, h = x.shape + p = x.flatten(2) + p = p.unsqueeze(0) + p = p.transpose(0, 3) + p = p.squeeze(3) + e = self.linear(p) + x = p + e + + x = self.tr(x) + x = x.unsqueeze(3) + x = x.transpose(0, 3) + x = x.reshape(b, self.c2, w, h) + return x + + class Bottleneck(nn.Module): # Standard bottleneck def __init__(self, c1, c2, shortcut=True, g=1, e=0.5): # ch_in, ch_out, shortcut, groups, expansion @@ -90,6 +136,14 @@ def forward(self, x): return self.cv3(torch.cat((self.m(self.cv1(x)), self.cv2(x)), dim=1)) +class C3TR(C3): + # C3 module with TransformerBlock() + def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): + super().__init__(c1, c2, n, shortcut, g, e) + c_ = int(c2 * e) + self.m = TransformerBlock(c_, c_, 4, n) + + class SPP(nn.Module): # Spatial pyramid pooling layer used in YOLOv3-SPP def __init__(self, c1, c2, k=(5, 9, 13)): diff --git a/models/hub/yolov5s-transformer.yaml b/models/hub/yolov5s-transformer.yaml new file mode 100644 index 000000000000..f2d666722b30 --- /dev/null +++ b/models/hub/yolov5s-transformer.yaml @@ -0,0 +1,48 @@ +# parameters +nc: 80 # number of classes +depth_multiple: 0.33 # model depth multiple +width_multiple: 0.50 # layer channel multiple + +# anchors +anchors: + - [10,13, 16,30, 33,23] # P3/8 + - [30,61, 62,45, 59,119] # P4/16 + - [116,90, 156,198, 373,326] # P5/32 + +# YOLOv5 backbone +backbone: + # [from, number, module, args] + [[-1, 1, Focus, [64, 3]], # 0-P1/2 + [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 + [-1, 3, C3, [128]], + [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 + [-1, 9, C3, [256]], + [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 + [-1, 9, C3, [512]], + [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 + [-1, 1, SPP, [1024, [5, 9, 13]]], + [-1, 3, C3TR, [1024, False]], # 9 <-------- C3TR() Transformer module + ] + +# YOLOv5 head +head: + [[-1, 1, Conv, [512, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 6], 1, Concat, [1]], # cat backbone P4 + [-1, 3, C3, [512, False]], # 13 + + [-1, 1, Conv, [256, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 4], 1, Concat, [1]], # cat backbone P3 + [-1, 3, C3, [256, False]], # 17 (P3/8-small) + + [-1, 1, Conv, [256, 3, 2]], + [[-1, 14], 1, Concat, [1]], # cat head P4 + [-1, 3, C3, [512, False]], # 20 (P4/16-medium) + + [-1, 1, Conv, [512, 3, 2]], + [[-1, 10], 1, Concat, [1]], # cat head P5 + [-1, 3, C3, [1024, False]], # 23 (P5/32-large) + + [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) + ] diff --git a/models/yolo.py b/models/yolo.py index e5c676dae558..f730a1efa3b3 100644 --- a/models/yolo.py +++ b/models/yolo.py @@ -215,13 +215,13 @@ def parse_model(d, ch): # model_dict, input_channels(3) n = max(round(n * gd), 1) if n > 1 else n # depth gain if m in [Conv, GhostConv, Bottleneck, GhostBottleneck, SPP, DWConv, MixConv2d, Focus, CrossConv, BottleneckCSP, - C3]: + C3, C3TR]: c1, c2 = ch[f], args[0] if c2 != no: # if not output c2 = make_divisible(c2 * gw, 8) args = [c1, c2, *args[1:]] - if m in [BottleneckCSP, C3]: + if m in [BottleneckCSP, C3, C3TR]: args.insert(2, n) # number of repeats n = 1 elif m is nn.BatchNorm2d: diff --git a/train.py b/train.py index d55833bf45a3..1f2b467e732b 100644 --- a/train.py +++ b/train.py @@ -218,7 +218,9 @@ def train(hyp, opt, device, tb_writer=None): # DDP mode if cuda and rank != -1: - model = DDP(model, device_ids=[opt.local_rank], output_device=opt.local_rank) + model = DDP(model, device_ids=[opt.local_rank], output_device=opt.local_rank, + # nn.MultiheadAttention incompatibility with DDP https://github.com/pytorch/pytorch/issues/26698 + find_unused_parameters=any(isinstance(layer, nn.MultiheadAttention) for layer in model.modules())) # Model parameters hyp['box'] *= 3. / nl # scale to layers From 514ebcdf3395b1977f2663f206d6d3c93afac235 Mon Sep 17 00:00:00 2001 From: Ayush Chaurasia Date: Fri, 2 Apr 2021 15:24:50 +0530 Subject: [PATCH 0106/1976] Fix: #2674 (#2683) * Set resume flag to false * Check existance of val dataset --- utils/wandb_logging/wandb_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/wandb_logging/wandb_utils.py b/utils/wandb_logging/wandb_utils.py index 17132874e0d0..86038e199dc8 100644 --- a/utils/wandb_logging/wandb_utils.py +++ b/utils/wandb_logging/wandb_utils.py @@ -158,7 +158,7 @@ def setup_training(self, opt, data_dict): return data_dict def download_dataset_artifact(self, path, alias): - if path.startswith(WANDB_ARTIFACT_PREFIX): + if path and path.startswith(WANDB_ARTIFACT_PREFIX): dataset_artifact = wandb.use_artifact(remove_prefix(path, WANDB_ARTIFACT_PREFIX) + ":" + alias) assert dataset_artifact is not None, "'Error: W&B dataset artifact doesn\'t exist'" datadir = dataset_artifact.download() From 2af059c0d85f89813254a644443fb074033e3629 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 2 Apr 2021 11:55:10 +0200 Subject: [PATCH 0107/1976] PyTorch Hub model.save() increment as runs/hub/exp (#2684) * PyTorch Hub model.save() increment as runs/hub/exp This chane will align PyTorch Hub results saving with the existing unified results saving directory structure of runs/ /train /detect /test /hub /exp /exp2 ... * cleanup --- models/common.py | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/models/common.py b/models/common.py index a25172dcfcac..9970fbc8e2d9 100644 --- a/models/common.py +++ b/models/common.py @@ -11,7 +11,7 @@ from torch.cuda import amp from utils.datasets import letterbox -from utils.general import non_max_suppression, make_divisible, scale_coords, xyxy2xywh +from utils.general import non_max_suppression, make_divisible, scale_coords, increment_path, xyxy2xywh from utils.plots import color_list, plot_one_box from utils.torch_utils import time_synchronized @@ -324,9 +324,9 @@ def display(self, pprint=False, show=False, save=False, render=False, save_dir=' if show: img.show(self.files[i]) # show if save: - f = Path(save_dir) / self.files[i] - img.save(f) # save - print(f"{'Saving' * (i == 0)} {f},", end='' if i < self.n - 1 else ' done.\n') + f = self.files[i] + img.save(Path(save_dir) / f) # save + print(f"{'Saved' * (i == 0)} {f}", end=',' if i < self.n - 1 else f' to {save_dir}\n') if render: self.imgs[i] = np.asarray(img) @@ -337,8 +337,9 @@ def print(self): def show(self): self.display(show=True) # show results - def save(self, save_dir='results/'): - Path(save_dir).mkdir(exist_ok=True) + def save(self, save_dir='runs/hub/exp'): + save_dir = increment_path(save_dir, exist_ok=save_dir != 'runs/hub/exp') # increment save_dir + Path(save_dir).mkdir(parents=True, exist_ok=True) self.display(save=True, save_dir=save_dir) # save results def render(self): From 17300a4c7b6f2dfe3e30eb9a4feb0bd21f697856 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 2 Apr 2021 12:36:38 +0200 Subject: [PATCH 0108/1976] autoShape forward im = np.asarray(im) # to numpy (#2689) Slight speedup. --- models/common.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/models/common.py b/models/common.py index 9970fbc8e2d9..713297c14433 100644 --- a/models/common.py +++ b/models/common.py @@ -258,7 +258,8 @@ def forward(self, imgs, size=640, augment=False, profile=False): im, f = Image.open(requests.get(im, stream=True).raw if im.startswith('http') else im), im # open im.filename = f # for uri files.append(Path(im.filename).with_suffix('.jpg').name if isinstance(im, Image.Image) else f'image{i}.jpg') - im = np.array(im) # to numpy + if not isinstance(im, np.ndarray): + im = np.asarray(im) # to numpy if im.shape[0] < 5: # image in CHW im = im.transpose((1, 2, 0)) # reverse dataloader .transpose(2, 0, 1) im = im[:, :, :3] if im.ndim == 3 else np.tile(im[:, :, None], 3) # enforce 3ch input From 9ccfa85249a2409d311bdf2e817f99377e135091 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 2 Apr 2021 13:00:46 +0200 Subject: [PATCH 0109/1976] pip install coremltools onnx (#2690) Requested in https://github.com/ultralytics/yolov5/issues/2686 --- Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Dockerfile b/Dockerfile index c0484e5b9c1c..b47e5bbff194 100644 --- a/Dockerfile +++ b/Dockerfile @@ -8,7 +8,7 @@ RUN apt update && apt install -y zip htop screen libgl1-mesa-glx COPY requirements.txt . RUN python -m pip install --upgrade pip RUN pip uninstall -y nvidia-tensorboard nvidia-tensorboard-plugin-dlprof -RUN pip install --no-cache -r requirements.txt gsutil notebook +RUN pip install --no-cache -r requirements.txt coremltools onnx gsutil notebook # Create working directory RUN mkdir -p /usr/src/app From 74276d51894497ea6193fd4e09435453ed2df6ca Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 5 Apr 2021 22:20:09 +0200 Subject: [PATCH 0110/1976] Updated filename attributes for YOLOv5 Hub results (#2708) Proposed fix for 'Model predict with forward will fail if PIL image does not have filename attribute' #2702 --- models/common.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/models/common.py b/models/common.py index 713297c14433..f6da7ad3113b 100644 --- a/models/common.py +++ b/models/common.py @@ -254,12 +254,12 @@ def forward(self, imgs, size=640, augment=False, profile=False): n, imgs = (len(imgs), imgs) if isinstance(imgs, list) else (1, [imgs]) # number of images, list of images shape0, shape1, files = [], [], [] # image and inference shapes, filenames for i, im in enumerate(imgs): + f = f'image{i}' # filename if isinstance(im, str): # filename or uri - im, f = Image.open(requests.get(im, stream=True).raw if im.startswith('http') else im), im # open - im.filename = f # for uri - files.append(Path(im.filename).with_suffix('.jpg').name if isinstance(im, Image.Image) else f'image{i}.jpg') - if not isinstance(im, np.ndarray): - im = np.asarray(im) # to numpy + im, f = np.asarray(Image.open(requests.get(im, stream=True).raw if im.startswith('http') else im)), im + elif isinstance(im, Image.Image): # PIL Image + im, f = np.asarray(im), getattr(im, 'filename', f) + files.append(Path(f).with_suffix('.jpg').name) if im.shape[0] < 5: # image in CHW im = im.transpose((1, 2, 0)) # reverse dataloader .transpose(2, 0, 1) im = im[:, :, :3] if im.ndim == 3 else np.tile(im[:, :, None], 3) # enforce 3ch input From ec8979f1d2f99b6873c2eafe05ec5bc2febad468 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 6 Apr 2021 13:18:56 +0200 Subject: [PATCH 0111/1976] Updated filename attributes for YOLOv5 Hub BytesIO (#2718) Fix 2 for 'Model predict with forward will fail if PIL image does not have filename attribute' #2702 --- models/common.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/models/common.py b/models/common.py index f6da7ad3113b..4fd1a8159c64 100644 --- a/models/common.py +++ b/models/common.py @@ -258,7 +258,7 @@ def forward(self, imgs, size=640, augment=False, profile=False): if isinstance(im, str): # filename or uri im, f = np.asarray(Image.open(requests.get(im, stream=True).raw if im.startswith('http') else im)), im elif isinstance(im, Image.Image): # PIL Image - im, f = np.asarray(im), getattr(im, 'filename', f) + im, f = np.asarray(im), getattr(im, 'filename', f) or f files.append(Path(f).with_suffix('.jpg').name) if im.shape[0] < 5: # image in CHW im = im.transpose((1, 2, 0)) # reverse dataloader .transpose(2, 0, 1) From 3067429307873bb85361076a810a8eb1b9405fda Mon Sep 17 00:00:00 2001 From: Ayush Chaurasia Date: Tue, 6 Apr 2021 20:27:13 +0530 Subject: [PATCH 0112/1976] Add support for list-of-directory data format for wandb (#2719) --- utils/wandb_logging/wandb_utils.py | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/utils/wandb_logging/wandb_utils.py b/utils/wandb_logging/wandb_utils.py index 86038e199dc8..d407e6cd54fb 100644 --- a/utils/wandb_logging/wandb_utils.py +++ b/utils/wandb_logging/wandb_utils.py @@ -57,14 +57,14 @@ def process_wandb_config_ddp_mode(opt): with open(opt.data) as f: data_dict = yaml.load(f, Loader=yaml.SafeLoader) # data dict train_dir, val_dir = None, None - if data_dict['train'].startswith(WANDB_ARTIFACT_PREFIX): + if isinstance(data_dict['train'], str) and data_dict['train'].startswith(WANDB_ARTIFACT_PREFIX): api = wandb.Api() train_artifact = api.artifact(remove_prefix(data_dict['train']) + ':' + opt.artifact_alias) train_dir = train_artifact.download() train_path = Path(train_dir) / 'data/images/' data_dict['train'] = str(train_path) - if data_dict['val'].startswith(WANDB_ARTIFACT_PREFIX): + if isinstance(data_dict['val'], str) and data_dict['val'].startswith(WANDB_ARTIFACT_PREFIX): api = wandb.Api() val_artifact = api.artifact(remove_prefix(data_dict['val']) + ':' + opt.artifact_alias) val_dir = val_artifact.download() @@ -158,7 +158,7 @@ def setup_training(self, opt, data_dict): return data_dict def download_dataset_artifact(self, path, alias): - if path and path.startswith(WANDB_ARTIFACT_PREFIX): + if isinstance(path, str) and path.startswith(WANDB_ARTIFACT_PREFIX): dataset_artifact = wandb.use_artifact(remove_prefix(path, WANDB_ARTIFACT_PREFIX) + ":" + alias) assert dataset_artifact is not None, "'Error: W&B dataset artifact doesn\'t exist'" datadir = dataset_artifact.download() @@ -229,7 +229,9 @@ def map_val_table_path(self): def create_dataset_table(self, dataset, class_to_id, name='dataset'): # TODO: Explore multiprocessing to slpit this loop parallely| This is essential for speeding up the the logging artifact = wandb.Artifact(name=name, type="dataset") - for img_file in tqdm([dataset.path]) if Path(dataset.path).is_dir() else tqdm(dataset.img_files): + img_files = tqdm([dataset.path]) if isinstance(dataset.path, str) and Path(dataset.path).is_dir() else None + img_files = tqdm(dataset.img_files) if not img_files else img_files + for img_file in img_files: if Path(img_file).is_dir(): artifact.add_dir(img_file, name='data/images') labels_path = 'labels'.join(dataset.path.rsplit('images', 1)) From c8c8da60792e37e2941fc27ee4d0594fcdcee34a Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 6 Apr 2021 17:54:47 +0200 Subject: [PATCH 0113/1976] Update README with collapsable notes (#2721) * Update README with collapsable notes. * cleanup * center table --- README.md | 22 ++++++++++++++++------ 1 file changed, 16 insertions(+), 6 deletions(-) diff --git a/README.md b/README.md index 6e3f38761543..f51ccd97712f 100755 --- a/README.md +++ b/README.md @@ -6,7 +6,13 @@ This repository represents Ultralytics open-source research into future object detection methods, and incorporates lessons learned and best practices evolved over thousands of hours of training and evolution on anonymized client datasets. **All code and models are under active development, and are subject to modification or deletion without notice.** Use at your own risk. -** GPU Speed measures end-to-end time per image averaged over 5000 COCO val2017 images using a V100 GPU with batch size 32, and includes image preprocessing, PyTorch FP16 inference, postprocessing and NMS. EfficientDet data from [google/automl](https://github.com/google/automl) at batch size 8. +

+
+ Figure Notes (click to expand) + + * GPU Speed measures end-to-end time per image averaged over 5000 COCO val2017 images using a V100 GPU with batch size 32, and includes image preprocessing, PyTorch FP16 inference, postprocessing and NMS. + * EfficientDet data from [google/automl](https://github.com/google/automl) at batch size 8. +
- **January 5, 2021**: [v4.0 release](https://github.com/ultralytics/yolov5/releases/tag/v4.0): nn.SiLU() activations, [Weights & Biases](https://wandb.ai/site?utm_campaign=repo_yolo_readme) logging, [PyTorch Hub](https://pytorch.org/hub/ultralytics_yolov5/) integration. - **August 13, 2020**: [v3.0 release](https://github.com/ultralytics/yolov5/releases/tag/v3.0): nn.Hardswish() activations, data autodownload, native AMP. @@ -31,11 +37,15 @@ This repository represents Ultralytics open-source research into future object d | [YOLOv5l6](https://github.com/ultralytics/yolov5/releases) |1280 |53.0 |53.0 |70.8 |12.3ms |81 ||77.2M |117.7 ---> -** APtest denotes COCO [test-dev2017](http://cocodataset.org/#upload) server results, all other AP results denote val2017 accuracy. -** All AP numbers are for single-model single-scale without ensemble or TTA. **Reproduce mAP** by `python test.py --data coco.yaml --img 640 --conf 0.001 --iou 0.65` -** SpeedGPU averaged over 5000 COCO val2017 images using a GCP [n1-standard-16](https://cloud.google.com/compute/docs/machine-types#n1_standard_machine_types) V100 instance, and includes image preprocessing, FP16 inference, postprocessing and NMS. NMS is 1-2ms/img. **Reproduce speed** by `python test.py --data coco.yaml --img 640 --conf 0.25 --iou 0.45` -** All checkpoints are trained to 300 epochs with default settings and hyperparameters (no autoaugmentation). -** Test Time Augmentation ([TTA](https://github.com/ultralytics/yolov5/issues/303)) runs at 3 image sizes. **Reproduce TTA** by `python test.py --data coco.yaml --img 832 --iou 0.65 --augment` +
+ Table Notes (click to expand) + + * APtest denotes COCO [test-dev2017](http://cocodataset.org/#upload) server results, all other AP results denote val2017 accuracy. + * AP values are for single-model single-scale unless otherwise noted. **Reproduce mAP** by `python test.py --data coco.yaml --img 640 --conf 0.001 --iou 0.65` + * SpeedGPU averaged over 5000 COCO val2017 images using a GCP [n1-standard-16](https://cloud.google.com/compute/docs/machine-types#n1_standard_machine_types) V100 instance, and includes FP16 inference, postprocessing and NMS. **Reproduce speed** by `python test.py --data coco.yaml --img 640 --conf 0.25 --iou 0.45` + * All checkpoints are trained to 300 epochs with default settings and hyperparameters (no autoaugmentation). + * Test Time Augmentation ([TTA](https://github.com/ultralytics/yolov5/issues/303)) includes reflection and scale augmentation. **Reproduce TTA** by `python test.py --data coco.yaml --img 832 --iou 0.65 --augment` +
## Requirements From c03d590320ea875a9ce5288c077a9ce5c7a1c160 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 7 Apr 2021 16:28:07 +0200 Subject: [PATCH 0114/1976] Add Hub results.pandas() method (#2725) * Add Hub results.pandas() method New method converts results from torch tensors to pandas DataFrames with column names. This PR may partially resolve issue https://github.com/ultralytics/yolov5/issues/2703 ```python results = model(imgs) print(results.pandas().xyxy[0]) xmin ymin xmax ymax confidence class name 0 57.068970 391.770599 241.383545 905.797852 0.868964 0 person 1 667.661255 399.303589 810.000000 881.396667 0.851888 0 person 2 222.878387 414.774231 343.804474 857.825073 0.838376 0 person 3 4.205386 234.447678 803.739136 750.023376 0.658006 5 bus 4 0.000000 550.596008 76.681190 878.669922 0.450596 0 person ``` * Update comments torch example input now shown resized to size=640 and also now a multiple of P6 stride 64 (see https://github.com/ultralytics/yolov5/issues/2722#issuecomment-814785930) * apply decorators * PEP8 * Update common.py * pd.options.display.max_columns = 10 * Update common.py --- hubconf.py | 2 +- models/common.py | 46 +++++++++++++++++++++++++++++----------------- utils/general.py | 2 ++ 3 files changed, 32 insertions(+), 18 deletions(-) diff --git a/hubconf.py b/hubconf.py index 1e6b9c78ac6a..0f9aa150a34e 100644 --- a/hubconf.py +++ b/hubconf.py @@ -38,7 +38,7 @@ def create(name, pretrained, channels, classes, autoshape): fname = f'{name}.pt' # checkpoint filename attempt_download(fname) # download if not found locally ckpt = torch.load(fname, map_location=torch.device('cpu')) # load - msd = model.state_dict() # model state_dict + msd = model.state_dict() # model state_dict csd = ckpt['model'].float().state_dict() # checkpoint state_dict as FP32 csd = {k: v for k, v in csd.items() if msd[k].shape == v.shape} # filter model.load_state_dict(csd, strict=False) # load diff --git a/models/common.py b/models/common.py index 4fd1a8159c64..412e9bf1e411 100644 --- a/models/common.py +++ b/models/common.py @@ -1,14 +1,15 @@ # YOLOv5 common modules import math +from copy import copy from pathlib import Path import numpy as np +import pandas as pd import requests import torch import torch.nn as nn from PIL import Image -from torch.cuda import amp from utils.datasets import letterbox from utils.general import non_max_suppression, make_divisible, scale_coords, increment_path, xyxy2xywh @@ -235,14 +236,16 @@ def autoshape(self): print('autoShape already enabled, skipping... ') # model already converted to model.autoshape() return self + @torch.no_grad() + @torch.cuda.amp.autocast() def forward(self, imgs, size=640, augment=False, profile=False): - # Inference from various sources. For height=720, width=1280, RGB images example inputs are: + # Inference from various sources. For height=640, width=1280, RGB images example inputs are: # filename: imgs = 'data/samples/zidane.jpg' # URI: = 'https://github.com/ultralytics/yolov5/releases/download/v1.0/zidane.jpg' - # OpenCV: = cv2.imread('image.jpg')[:,:,::-1] # HWC BGR to RGB x(720,1280,3) - # PIL: = Image.open('image.jpg') # HWC x(720,1280,3) - # numpy: = np.zeros((720,1280,3)) # HWC - # torch: = torch.zeros(16,3,720,1280) # BCHW + # OpenCV: = cv2.imread('image.jpg')[:,:,::-1] # HWC BGR to RGB x(640,1280,3) + # PIL: = Image.open('image.jpg') # HWC x(640,1280,3) + # numpy: = np.zeros((640,1280,3)) # HWC + # torch: = torch.zeros(16,3,320,640) # BCHW (scaled to size=640, 0-1 values) # multiple: = [Image.open('image1.jpg'), Image.open('image2.jpg'), ...] # list of images t = [time_synchronized()] @@ -275,15 +278,14 @@ def forward(self, imgs, size=640, augment=False, profile=False): x = torch.from_numpy(x).to(p.device).type_as(p) / 255. # uint8 to fp16/32 t.append(time_synchronized()) - with torch.no_grad(), amp.autocast(enabled=p.device.type != 'cpu'): - # Inference - y = self.model(x, augment, profile)[0] # forward - t.append(time_synchronized()) + # Inference + y = self.model(x, augment, profile)[0] # forward + t.append(time_synchronized()) - # Post-process - y = non_max_suppression(y, conf_thres=self.conf, iou_thres=self.iou, classes=self.classes) # NMS - for i in range(n): - scale_coords(shape1, y[i][:, :4], shape0[i]) + # Post-process + y = non_max_suppression(y, conf_thres=self.conf, iou_thres=self.iou, classes=self.classes) # NMS + for i in range(n): + scale_coords(shape1, y[i][:, :4], shape0[i]) t.append(time_synchronized()) return Detections(imgs, y, files, t, self.names, x.shape) @@ -347,17 +349,27 @@ def render(self): self.display(render=True) # render results return self.imgs - def __len__(self): - return self.n + def pandas(self): + # return detections as pandas DataFrames, i.e. print(results.pandas().xyxy[0]) + new = copy(self) # return copy + ca = 'xmin', 'ymin', 'xmax', 'ymax', 'confidence', 'class', 'name' # xyxy columns + cb = 'xcenter', 'ycenter', 'width', 'height', 'confidence', 'class', 'name' # xywh columns + for k, c in zip(['xyxy', 'xyxyn', 'xywh', 'xywhn'], [ca, ca, cb, cb]): + a = [[x[:5] + [int(x[5]), self.names[int(x[5])]] for x in x.tolist()] for x in getattr(self, k)] # update + setattr(new, k, [pd.DataFrame(x, columns=c) for x in a]) + return new def tolist(self): # return a list of Detections objects, i.e. 'for result in results.tolist():' - x = [Detections([self.imgs[i]], [self.pred[i]], self.names) for i in range(self.n)] + x = [Detections([self.imgs[i]], [self.pred[i]], self.names, self.s) for i in range(self.n)] for d in x: for k in ['imgs', 'pred', 'xyxy', 'xyxyn', 'xywh', 'xywhn']: setattr(d, k, getattr(d, k)[0]) # pop out of list return x + def __len__(self): + return self.n + class Classify(nn.Module): # Classification head, i.e. x(b,c1,20,20) to x(b,c2) diff --git a/utils/general.py b/utils/general.py index 9822582cdb86..a8aad16a8ab9 100755 --- a/utils/general.py +++ b/utils/general.py @@ -13,6 +13,7 @@ import cv2 import numpy as np +import pandas as pd import torch import torchvision import yaml @@ -24,6 +25,7 @@ # Settings torch.set_printoptions(linewidth=320, precision=5, profile='long') np.set_printoptions(linewidth=320, formatter={'float_kind': '{:11.5g}'.format}) # format short g, %precision=5 +pd.options.display.max_columns = 10 cv2.setNumThreads(0) # prevent OpenCV from multithreading (incompatible with PyTorch DataLoader) os.environ['NUMEXPR_MAX_THREADS'] = str(min(os.cpu_count(), 8)) # NumExpr max threads From fca5e2a48fb526b57bda0c66be6b7ac1aaa8d83d Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 9 Apr 2021 13:34:49 +0200 Subject: [PATCH 0115/1976] autocast enable=torch.cuda.is_available() (#2748) --- models/common.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/models/common.py b/models/common.py index 412e9bf1e411..c77ecbeceace 100644 --- a/models/common.py +++ b/models/common.py @@ -237,7 +237,7 @@ def autoshape(self): return self @torch.no_grad() - @torch.cuda.amp.autocast() + @torch.cuda.amp.autocast(torch.cuda.is_available()) def forward(self, imgs, size=640, augment=False, profile=False): # Inference from various sources. For height=640, width=1280, RGB images example inputs are: # filename: imgs = 'data/samples/zidane.jpg' From b5de52c4cdfefb3c7acfbff7d7f450a46b4aaada Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 9 Apr 2021 18:19:49 +0200 Subject: [PATCH 0116/1976] torch.cuda.amp bug fix (#2750) PR https://github.com/ultralytics/yolov5/pull/2725 introduced a very specific bug that only affects multi-GPU trainings. Apparently the cause was using the torch.cuda.amp decorator in the autoShape forward method. I've implemented amp more traditionally in this PR, and the bug is resolved. --- models/common.py | 24 +++++++++++++----------- 1 file changed, 13 insertions(+), 11 deletions(-) diff --git a/models/common.py b/models/common.py index c77ecbeceace..1130471e904b 100644 --- a/models/common.py +++ b/models/common.py @@ -10,6 +10,7 @@ import torch import torch.nn as nn from PIL import Image +from torch.cuda import amp from utils.datasets import letterbox from utils.general import non_max_suppression, make_divisible, scale_coords, increment_path, xyxy2xywh @@ -237,7 +238,6 @@ def autoshape(self): return self @torch.no_grad() - @torch.cuda.amp.autocast(torch.cuda.is_available()) def forward(self, imgs, size=640, augment=False, profile=False): # Inference from various sources. For height=640, width=1280, RGB images example inputs are: # filename: imgs = 'data/samples/zidane.jpg' @@ -251,7 +251,8 @@ def forward(self, imgs, size=640, augment=False, profile=False): t = [time_synchronized()] p = next(self.model.parameters()) # for device and type if isinstance(imgs, torch.Tensor): # torch - return self.model(imgs.to(p.device).type_as(p), augment, profile) # inference + with amp.autocast(enabled=p.device.type != 'cpu'): + return self.model(imgs.to(p.device).type_as(p), augment, profile) # inference # Pre-process n, imgs = (len(imgs), imgs) if isinstance(imgs, list) else (1, [imgs]) # number of images, list of images @@ -278,17 +279,18 @@ def forward(self, imgs, size=640, augment=False, profile=False): x = torch.from_numpy(x).to(p.device).type_as(p) / 255. # uint8 to fp16/32 t.append(time_synchronized()) - # Inference - y = self.model(x, augment, profile)[0] # forward - t.append(time_synchronized()) + with amp.autocast(enabled=p.device.type != 'cpu'): + # Inference + y = self.model(x, augment, profile)[0] # forward + t.append(time_synchronized()) - # Post-process - y = non_max_suppression(y, conf_thres=self.conf, iou_thres=self.iou, classes=self.classes) # NMS - for i in range(n): - scale_coords(shape1, y[i][:, :4], shape0[i]) + # Post-process + y = non_max_suppression(y, conf_thres=self.conf, iou_thres=self.iou, classes=self.classes) # NMS + for i in range(n): + scale_coords(shape1, y[i][:, :4], shape0[i]) - t.append(time_synchronized()) - return Detections(imgs, y, files, t, self.names, x.shape) + t.append(time_synchronized()) + return Detections(imgs, y, files, t, self.names, x.shape) class Detections: From 0cae7576a9241110157cd154fc2237e703c2719e Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 10 Apr 2021 21:09:23 +0200 Subject: [PATCH 0117/1976] utils/wandb_logging PEP8 reformat (#2755) * wandb_logging PEP8 reformat * Update wandb_utils.py --- utils/wandb_logging/log_dataset.py | 6 ++---- utils/wandb_logging/wandb_utils.py | 31 +++++++++++++++--------------- 2 files changed, 17 insertions(+), 20 deletions(-) diff --git a/utils/wandb_logging/log_dataset.py b/utils/wandb_logging/log_dataset.py index 0ccb8735bd42..d7a521f1414b 100644 --- a/utils/wandb_logging/log_dataset.py +++ b/utils/wandb_logging/log_dataset.py @@ -1,10 +1,8 @@ import argparse -from pathlib import Path import yaml from wandb_utils import WandbLogger -from utils.datasets import LoadImagesAndLabels WANDB_ARTIFACT_PREFIX = 'wandb-artifact://' @@ -21,6 +19,6 @@ def create_dataset_artifact(opt): parser.add_argument('--single-cls', action='store_true', help='train as single-class dataset') parser.add_argument('--project', type=str, default='YOLOv5', help='name of W&B Project') opt = parser.parse_args() - opt.resume = False # Explicitly disallow resume check for dataset upload Job - + opt.resume = False # Explicitly disallow resume check for dataset upload job + create_dataset_artifact(opt) diff --git a/utils/wandb_logging/wandb_utils.py b/utils/wandb_logging/wandb_utils.py index d407e6cd54fb..d8f50ae8a80e 100644 --- a/utils/wandb_logging/wandb_utils.py +++ b/utils/wandb_logging/wandb_utils.py @@ -1,12 +1,9 @@ -import argparse import json -import os -import shutil import sys +from pathlib import Path + import torch import yaml -from datetime import datetime -from pathlib import Path from tqdm import tqdm sys.path.append(str(Path(__file__).parent.parent.parent)) # add utils/ to path @@ -33,6 +30,7 @@ def check_wandb_config_file(data_config_file): return wandb_config return data_config_file + def get_run_info(run_path): run_path = Path(remove_prefix(run_path, WANDB_ARTIFACT_PREFIX)) run_id = run_path.stem @@ -40,11 +38,12 @@ def get_run_info(run_path): model_artifact_name = 'run_' + run_id + '_model' return run_id, project, model_artifact_name + def check_wandb_resume(opt): process_wandb_config_ddp_mode(opt) if opt.global_rank not in [-1, 0] else None if isinstance(opt.resume, str): if opt.resume.startswith(WANDB_ARTIFACT_PREFIX): - if opt.global_rank not in [-1, 0]: # For resuming DDP runs + if opt.global_rank not in [-1, 0]: # For resuming DDP runs run_id, project, model_artifact_name = get_run_info(opt.resume) api = wandb.Api() artifact = api.artifact(project + '/' + model_artifact_name + ':latest') @@ -53,6 +52,7 @@ def check_wandb_resume(opt): return True return None + def process_wandb_config_ddp_mode(opt): with open(opt.data) as f: data_dict = yaml.load(f, Loader=yaml.SafeLoader) # data dict @@ -63,7 +63,7 @@ def process_wandb_config_ddp_mode(opt): train_dir = train_artifact.download() train_path = Path(train_dir) / 'data/images/' data_dict['train'] = str(train_path) - + if isinstance(data_dict['val'], str) and data_dict['val'].startswith(WANDB_ARTIFACT_PREFIX): api = wandb.Api() val_artifact = api.artifact(remove_prefix(data_dict['val']) + ':' + opt.artifact_alias) @@ -71,12 +71,11 @@ def process_wandb_config_ddp_mode(opt): val_path = Path(val_dir) / 'data/images/' data_dict['val'] = str(val_path) if train_dir or val_dir: - ddp_data_path = str(Path(val_dir) / 'wandb_local_data.yaml') + ddp_data_path = str(Path(val_dir) / 'wandb_local_data.yaml') with open(ddp_data_path, 'w') as f: yaml.dump(data_dict, f) opt.data = ddp_data_path - - + class WandbLogger(): def __init__(self, opt, name, run_id, data_dict, job_type='Training'): @@ -84,7 +83,7 @@ def __init__(self, opt, name, run_id, data_dict, job_type='Training'): self.job_type = job_type self.wandb, self.wandb_run, self.data_dict = wandb, None if not wandb else wandb.run, data_dict # It's more elegant to stick to 1 wandb.init call, but useful config data is overwritten in the WandbLogger's wandb.init call - if isinstance(opt.resume, str): # checks resume from artifact + if isinstance(opt.resume, str): # checks resume from artifact if opt.resume.startswith(WANDB_ARTIFACT_PREFIX): run_id, project, model_artifact_name = get_run_info(opt.resume) model_artifact_name = WANDB_ARTIFACT_PREFIX + model_artifact_name @@ -98,7 +97,7 @@ def __init__(self, opt, name, run_id, data_dict, job_type='Training'): project='YOLOv5' if opt.project == 'runs/train' else Path(opt.project).stem, name=name, job_type=job_type, - id=run_id) if not wandb.run else wandb.run + id=run_id) if not wandb.run else wandb.run if self.wandb_run: if self.job_type == 'Training': if not opt.resume: @@ -110,15 +109,15 @@ def __init__(self, opt, name, run_id, data_dict, job_type='Training'): if self.job_type == 'Dataset Creation': self.data_dict = self.check_and_upload_dataset(opt) else: - print(f"{colorstr('wandb: ')}Install Weights & Biases for YOLOv5 logging with 'pip install wandb' (recommended)") - + prefix = colorstr('wandb: ') + print(f"{prefix}Install Weights & Biases for YOLOv5 logging with 'pip install wandb' (recommended)") def check_and_upload_dataset(self, opt): assert wandb, 'Install wandb to upload dataset' check_dataset(self.data_dict) config_path = self.log_dataset_artifact(opt.data, - opt.single_cls, - 'YOLOv5' if opt.project == 'runs/train' else Path(opt.project).stem) + opt.single_cls, + 'YOLOv5' if opt.project == 'runs/train' else Path(opt.project).stem) print("Created dataset config file ", config_path) with open(config_path) as f: wandb_data_dict = yaml.load(f, Loader=yaml.SafeLoader) From 6dd1083bbbc5d29643aafef3373853f03a317a92 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 11 Apr 2021 01:33:55 +0200 Subject: [PATCH 0118/1976] Tensorboard model visualization bug fix (#2758) This fix should allow for visualizing YOLOv5 model graphs correctly in Tensorboard by uncommenting line 335 in train.py: ```python if tb_writer: tb_writer.add_graph(torch.jit.trace(model, imgs, strict=False), []) # add model graph ``` The problem was that the detect() layer checks the input size to adapt the grid if required, and tracing does not seem to like this shape check (even if the shape is fine and no grid recomputation is required). The following will warn: https://github.com/ultralytics/yolov5/blob/0cae7576a9241110157cd154fc2237e703c2719e/train.py#L335 Solution is below. This is a YOLOv5s model displayed in TensorBoard. You can see the Detect() layer merging the 3 layers into a single output for example, and everything appears to work and visualize correctly. ```python tb_writer.add_graph(torch.jit.trace(model, imgs, strict=False), []) ``` Screenshot 2021-04-11 at 01 10 09 --- train.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/train.py b/train.py index 1f2b467e732b..82043b7fff34 100644 --- a/train.py +++ b/train.py @@ -332,7 +332,7 @@ def train(hyp, opt, device, tb_writer=None): Thread(target=plot_images, args=(imgs, targets, paths, f), daemon=True).start() # if tb_writer: # tb_writer.add_image(f, result, dataformats='HWC', global_step=epoch) - # tb_writer.add_graph(model, imgs) # add model to tensorboard + # tb_writer.add_graph(torch.jit.trace(model, imgs, strict=False), []) # add model graph elif plots and ni == 10 and wandb_logger.wandb: wandb_logger.log({"Mosaics": [wandb_logger.wandb.Image(str(x), caption=x.name) for x in save_dir.glob('train*.jpg') if x.exists()]}) From 9029759cb3b39de724f148f0c9eee8c70e0ffdc4 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 11 Apr 2021 16:28:32 +0200 Subject: [PATCH 0119/1976] Created using Colaboratory --- tutorial.ipynb | 14 +++----------- 1 file changed, 3 insertions(+), 11 deletions(-) diff --git a/tutorial.ipynb b/tutorial.ipynb index 8a191609b24d..f334f5a15ef0 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -582,7 +582,9 @@ "source": [ "# 1. Inference\n", "\n", - "`detect.py` runs inference on a variety of sources, downloading models automatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases)." + "`detect.py` runs YOLOv5 inference on a variety of sources, downloading models automatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases), and saving results to `runs/detect`. Example inference sources are:\n", + "\n", + " " ] }, { @@ -634,16 +636,6 @@ } ] }, - { - "cell_type": "markdown", - "metadata": { - "id": "4qbaa3iEcrcE" - }, - "source": [ - "Results are saved to `runs/detect`. A full list of available inference sources:\n", - " " - ] - }, { "cell_type": "markdown", "metadata": { From e2b7bc0b32ecf306fc179bb87bad82216a470b37 Mon Sep 17 00:00:00 2001 From: Ben Milanko Date: Mon, 12 Apr 2021 02:53:40 +1000 Subject: [PATCH 0120/1976] YouTube Livestream Detection (#2752) * Youtube livestream detection * dependancy update to auto install pafy * Remove print * include youtube_dl in deps * PEP8 reformat * youtube url check fix * reduce lines * add comment * update check_requirements * stream framerate fix * Update README.md * cleanup * PEP8 * remove cap.retrieve() failure code Co-authored-by: Glenn Jocher --- README.md | 5 ++--- detect.py | 2 +- utils/datasets.py | 23 +++++++++++++++-------- utils/general.py | 20 ++++++++++++-------- 4 files changed, 30 insertions(+), 20 deletions(-) diff --git a/README.md b/README.md index f51ccd97712f..d409b3fdeadf 100755 --- a/README.md +++ b/README.md @@ -92,9 +92,8 @@ $ python detect.py --source 0 # webcam file.mp4 # video path/ # directory path/*.jpg # glob - rtsp://170.93.143.139/rtplive/470011e600ef003a004ee33696235daa # rtsp stream - rtmp://192.168.1.105/live/test # rtmp stream - http://112.50.243.8/PLTV/88888888/224/3221225900/1.m3u8 # http stream + 'https://youtu.be/NUsoVlDFqZg' # YouTube video + 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream ``` To run inference on example images in `data/images`: diff --git a/detect.py b/detect.py index 2a4d6f4550c8..c0707da69e6a 100644 --- a/detect.py +++ b/detect.py @@ -19,7 +19,7 @@ def detect(save_img=False): source, weights, view_img, save_txt, imgsz = opt.source, opt.weights, opt.view_img, opt.save_txt, opt.img_size save_img = not opt.nosave and not source.endswith('.txt') # save inference images webcam = source.isnumeric() or source.endswith('.txt') or source.lower().startswith( - ('rtsp://', 'rtmp://', 'http://')) + ('rtsp://', 'rtmp://', 'http://', 'https://')) # Directories save_dir = Path(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok)) # increment run diff --git a/utils/datasets.py b/utils/datasets.py index 5ef89ab6ea83..ec597b628106 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -20,8 +20,8 @@ from torch.utils.data import Dataset from tqdm import tqdm -from utils.general import xyxy2xywh, xywh2xyxy, xywhn2xyxy, xyn2xy, segment2box, segments2boxes, resample_segments, \ - clean_str +from utils.general import check_requirements, xyxy2xywh, xywh2xyxy, xywhn2xyxy, xyn2xy, segment2box, segments2boxes, \ + resample_segments, clean_str from utils.torch_utils import torch_distributed_zero_first # Parameters @@ -275,14 +275,20 @@ def __init__(self, sources='streams.txt', img_size=640, stride=32): for i, s in enumerate(sources): # Start the thread to read frames from the video stream print(f'{i + 1}/{n}: {s}... ', end='') - cap = cv2.VideoCapture(eval(s) if s.isnumeric() else s) + url = eval(s) if s.isnumeric() else s + if 'youtube.com/' in url or 'youtu.be/' in url: # if source is YouTube video + check_requirements(('pafy', 'youtube_dl')) + import pafy + url = pafy.new(url).getbest(preftype="mp4").url + cap = cv2.VideoCapture(url) assert cap.isOpened(), f'Failed to open {s}' w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) - fps = cap.get(cv2.CAP_PROP_FPS) % 100 + self.fps = cap.get(cv2.CAP_PROP_FPS) % 100 + _, self.imgs[i] = cap.read() # guarantee first frame thread = Thread(target=self.update, args=([i, cap]), daemon=True) - print(f' success ({w}x{h} at {fps:.2f} FPS).') + print(f' success ({w}x{h} at {self.fps:.2f} FPS).') thread.start() print('') # newline @@ -303,7 +309,7 @@ def update(self, index, cap): success, im = cap.retrieve() self.imgs[index] = im if success else self.imgs[index] * 0 n = 0 - time.sleep(0.01) # wait time + time.sleep(1 / self.fps) # wait time def __iter__(self): self.count = -1 @@ -444,7 +450,7 @@ def __init__(self, path, img_size=640, batch_size=16, augment=False, hyp=None, r gb += self.imgs[i].nbytes pbar.desc = f'{prefix}Caching images ({gb / 1E9:.1f}GB)' pbar.close() - + def cache_labels(self, path=Path('./labels.cache'), prefix=''): # Cache dataset labels, check images and read shapes x = {} # dict @@ -489,7 +495,7 @@ def cache_labels(self, path=Path('./labels.cache'), prefix=''): pbar.desc = f"{prefix}Scanning '{path.parent / path.stem}' images and labels... " \ f"{nf} found, {nm} missing, {ne} empty, {nc} corrupted" pbar.close() - + if nf == 0: print(f'{prefix}WARNING: No labels found in {path}. See {help_url}') @@ -1034,6 +1040,7 @@ def extract_boxes(path='../coco128/'): # from utils.datasets import *; extract_ b[[1, 3]] = np.clip(b[[1, 3]], 0, h) assert cv2.imwrite(str(f), im[b[1]:b[3], b[0]:b[2]]), f'box failure in {f}' + def autosplit(path='../coco128', weights=(0.9, 0.1, 0.0), annotated_only=False): """ Autosplit a dataset into train/val/test splits and save path/autosplit_*.txt files Usage: from utils.datasets import *; autosplit('../coco128') diff --git a/utils/general.py b/utils/general.py index a8aad16a8ab9..5482629ac8c0 100755 --- a/utils/general.py +++ b/utils/general.py @@ -91,17 +91,20 @@ def check_git_status(): print(e) -def check_requirements(file='requirements.txt', exclude=()): - # Check installed dependencies meet requirements +def check_requirements(requirements='requirements.txt', exclude=()): + # Check installed dependencies meet requirements (pass *.txt file or list of packages) import pkg_resources as pkg prefix = colorstr('red', 'bold', 'requirements:') - file = Path(file) - if not file.exists(): - print(f"{prefix} {file.resolve()} not found, check failed.") - return + if isinstance(requirements, (str, Path)): # requirements.txt file + file = Path(requirements) + if not file.exists(): + print(f"{prefix} {file.resolve()} not found, check failed.") + return + requirements = [f'{x.name}{x.specifier}' for x in pkg.parse_requirements(file.open()) if x.name not in exclude] + else: # list or tuple of packages + requirements = [x for x in requirements if x not in exclude] n = 0 # number of packages updates - requirements = [f'{x.name}{x.specifier}' for x in pkg.parse_requirements(file.open()) if x.name not in exclude] for r in requirements: try: pkg.require(r) @@ -111,7 +114,8 @@ def check_requirements(file='requirements.txt', exclude=()): print(subprocess.check_output(f"pip install '{e.req}'", shell=True).decode()) if n: # if packages updated - s = f"{prefix} {n} package{'s' * (n > 1)} updated per {file.resolve()}\n" \ + source = file.resolve() if 'file' in locals() else requirements + s = f"{prefix} {n} package{'s' * (n > 1)} updated per {source}\n" \ f"{prefix} ⚠️ {colorstr('bold', 'Restart runtime or rerun command for updates to take effect')}\n" print(emojis(s)) # emoji-safe From f5b8f7d54c9fa69210da0177fec7ac2d9e4a627c Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 11 Apr 2021 19:23:47 +0200 Subject: [PATCH 0121/1976] YOLOv5 v5.0 Release (#2762) --- README.md | 43 +++++++++++++---------- hubconf.py | 92 +++++++++++++++++++++----------------------------- utils/plots.py | 6 ++-- 3 files changed, 66 insertions(+), 75 deletions(-) diff --git a/README.md b/README.md index d409b3fdeadf..02908db0fd18 100755 --- a/README.md +++ b/README.md @@ -6,36 +6,43 @@ This repository represents Ultralytics open-source research into future object detection methods, and incorporates lessons learned and best practices evolved over thousands of hours of training and evolution on anonymized client datasets. **All code and models are under active development, and are subject to modification or deletion without notice.** Use at your own risk. -

+

+
+ YOLOv5-P5 640 Figure (click to expand) + +

+
Figure Notes (click to expand) * GPU Speed measures end-to-end time per image averaged over 5000 COCO val2017 images using a V100 GPU with batch size 32, and includes image preprocessing, PyTorch FP16 inference, postprocessing and NMS. * EfficientDet data from [google/automl](https://github.com/google/automl) at batch size 8. + * **Reproduce** by `python test.py --task study --data coco.yaml --iou 0.7 --weights yolov5s6.pt yolov5m6.pt yolov5l6.pt yolov5x6.pt`
+- **April 11, 2021**: [v5.0 release](https://github.com/ultralytics/yolov5/releases/tag/v5.0): YOLOv5-P6 1280 models, [AWS](https://github.com/ultralytics/yolov5/wiki/AWS-Quickstart), [Supervise.ly](https://github.com/ultralytics/yolov5/issues/2518) and [YouTube](https://github.com/ultralytics/yolov5/pull/2752) integrations. - **January 5, 2021**: [v4.0 release](https://github.com/ultralytics/yolov5/releases/tag/v4.0): nn.SiLU() activations, [Weights & Biases](https://wandb.ai/site?utm_campaign=repo_yolo_readme) logging, [PyTorch Hub](https://pytorch.org/hub/ultralytics_yolov5/) integration. - **August 13, 2020**: [v3.0 release](https://github.com/ultralytics/yolov5/releases/tag/v3.0): nn.Hardswish() activations, data autodownload, native AMP. - **July 23, 2020**: [v2.0 release](https://github.com/ultralytics/yolov5/releases/tag/v2.0): improved model definition, training and mAP. -- **June 22, 2020**: [PANet](https://arxiv.org/abs/1803.01534) updates: new heads, reduced parameters, improved speed and mAP [364fcfd](https://github.com/ultralytics/yolov5/commit/364fcfd7dba53f46edd4f04c037a039c0a287972). -- **June 19, 2020**: [FP16](https://pytorch.org/docs/stable/nn.html#torch.nn.Module.half) as new default for smaller checkpoints and faster inference [d4c6674](https://github.com/ultralytics/yolov5/commit/d4c6674c98e19df4c40e33a777610a18d1961145). ## Pretrained Checkpoints -| Model | size | APval | APtest | AP50 | SpeedV100 | FPSV100 || params | GFLOPS | -|---------- |------ |------ |------ |------ | -------- | ------| ------ |------ | :------: | -| [YOLOv5s](https://github.com/ultralytics/yolov5/releases) |640 |36.8 |36.8 |55.6 |**2.2ms** |**455** ||7.3M |17.0 -| [YOLOv5m](https://github.com/ultralytics/yolov5/releases) |640 |44.5 |44.5 |63.1 |2.9ms |345 ||21.4M |51.3 -| [YOLOv5l](https://github.com/ultralytics/yolov5/releases) |640 |48.1 |48.1 |66.4 |3.8ms |264 ||47.0M |115.4 -| [YOLOv5x](https://github.com/ultralytics/yolov5/releases) |640 |**50.1** |**50.1** |**68.7** |6.0ms |167 ||87.7M |218.8 -| | | | | | | || | -| [YOLOv5x](https://github.com/ultralytics/yolov5/releases) + TTA |832 |**51.9** |**51.9** |**69.6** |24.9ms |40 ||87.7M |1005.3 - - +[assets]: https://github.com/ultralytics/yolov5/releases + +Model |size
(pixels) |mAPval
0.5:0.95 |mAPtest
0.5:0.95 |mAPval
0.5 |Speed
V100 (ms) | |params
(M) |FLOPS
640 (B) +--- |--- |--- |--- |--- |--- |---|--- |--- +[YOLOv5s][assets] |640 |36.7 |36.7 |55.4 |**2.0** | |7.3 |17.0 +[YOLOv5m][assets] |640 |44.5 |44.5 |63.3 |2.7 | |21.4 |51.3 +[YOLOv5l][assets] |640 |48.2 |48.2 |66.9 |3.8 | |47.0 |115.4 +[YOLOv5x][assets] |640 |**50.4** |**50.4** |**68.8** |6.1 | |87.7 |218.8 +| | | | | | || | +[YOLOv5s6][assets] |1280 |43.3 |43.3 |61.9 |**4.3** | |12.7 |17.4 +[YOLOv5m6][assets] |1280 |50.5 |50.5 |68.7 |8.4 | |35.9 |52.4 +[YOLOv5l6][assets] |1280 |53.4 |53.4 |71.1 |12.3 | |77.2 |117.7 +[YOLOv5x6][assets] |1280 |**54.4** |**54.4** |**72.0** |22.4 | |141.8 |222.9 +| | | | | | || | +[YOLOv5x6][assets] TTA |1280 |**55.0** |**55.0** |**72.0** |70.8 | |- |-
Table Notes (click to expand) @@ -44,7 +51,7 @@ This repository represents Ultralytics open-source research into future object d * AP values are for single-model single-scale unless otherwise noted. **Reproduce mAP** by `python test.py --data coco.yaml --img 640 --conf 0.001 --iou 0.65` * SpeedGPU averaged over 5000 COCO val2017 images using a GCP [n1-standard-16](https://cloud.google.com/compute/docs/machine-types#n1_standard_machine_types) V100 instance, and includes FP16 inference, postprocessing and NMS. **Reproduce speed** by `python test.py --data coco.yaml --img 640 --conf 0.25 --iou 0.45` * All checkpoints are trained to 300 epochs with default settings and hyperparameters (no autoaugmentation). - * Test Time Augmentation ([TTA](https://github.com/ultralytics/yolov5/issues/303)) includes reflection and scale augmentation. **Reproduce TTA** by `python test.py --data coco.yaml --img 832 --iou 0.65 --augment` + * Test Time Augmentation ([TTA](https://github.com/ultralytics/yolov5/issues/303)) includes reflection and scale augmentation. **Reproduce TTA** by `python test.py --data coco.yaml --img 1536 --iou 0.7 --augment`
@@ -85,7 +92,7 @@ YOLOv5 may be run in any of the following up-to-date verified environments (with ## Inference -detect.py runs inference on a variety of sources, downloading models automatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases) and saving results to `runs/detect`. +`detect.py` runs inference on a variety of sources, downloading models automatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases) and saving results to `runs/detect`. ```bash $ python detect.py --source 0 # webcam file.jpg # image diff --git a/hubconf.py b/hubconf.py index 0f9aa150a34e..d26db45695de 100644 --- a/hubconf.py +++ b/hubconf.py @@ -55,84 +55,68 @@ def create(name, pretrained, channels, classes, autoshape): raise Exception(s) from e -def yolov5s(pretrained=True, channels=3, classes=80, autoshape=True): - """YOLOv5-small model from https://github.com/ultralytics/yolov5 +def custom(path_or_model='path/to/model.pt', autoshape=True): + """YOLOv5-custom model https://github.com/ultralytics/yolov5 - Arguments: - pretrained (bool): load pretrained weights into the model, default=False - channels (int): number of input channels, default=3 - classes (int): number of model classes, default=80 + Arguments (3 options): + path_or_model (str): 'path/to/model.pt' + path_or_model (dict): torch.load('path/to/model.pt') + path_or_model (nn.Module): torch.load('path/to/model.pt')['model'] Returns: pytorch model """ - return create('yolov5s', pretrained, channels, classes, autoshape) + model = torch.load(path_or_model) if isinstance(path_or_model, str) else path_or_model # load checkpoint + if isinstance(model, dict): + model = model['ema' if model.get('ema') else 'model'] # load model + hub_model = Model(model.yaml).to(next(model.parameters()).device) # create + hub_model.load_state_dict(model.float().state_dict()) # load state_dict + hub_model.names = model.names # class names + if autoshape: + hub_model = hub_model.autoshape() # for file/URI/PIL/cv2/np inputs and NMS + device = select_device('0' if torch.cuda.is_available() else 'cpu') # default to GPU if available + return hub_model.to(device) -def yolov5m(pretrained=True, channels=3, classes=80, autoshape=True): - """YOLOv5-medium model from https://github.com/ultralytics/yolov5 - Arguments: - pretrained (bool): load pretrained weights into the model, default=False - channels (int): number of input channels, default=3 - classes (int): number of model classes, default=80 +def yolov5s(pretrained=True, channels=3, classes=80, autoshape=True): + # YOLOv5-small model https://github.com/ultralytics/yolov5 + return create('yolov5s', pretrained, channels, classes, autoshape) - Returns: - pytorch model - """ + +def yolov5m(pretrained=True, channels=3, classes=80, autoshape=True): + # YOLOv5-medium model https://github.com/ultralytics/yolov5 return create('yolov5m', pretrained, channels, classes, autoshape) def yolov5l(pretrained=True, channels=3, classes=80, autoshape=True): - """YOLOv5-large model from https://github.com/ultralytics/yolov5 - - Arguments: - pretrained (bool): load pretrained weights into the model, default=False - channels (int): number of input channels, default=3 - classes (int): number of model classes, default=80 - - Returns: - pytorch model - """ + # YOLOv5-large model https://github.com/ultralytics/yolov5 return create('yolov5l', pretrained, channels, classes, autoshape) def yolov5x(pretrained=True, channels=3, classes=80, autoshape=True): - """YOLOv5-xlarge model from https://github.com/ultralytics/yolov5 + # YOLOv5-xlarge model https://github.com/ultralytics/yolov5 + return create('yolov5x', pretrained, channels, classes, autoshape) - Arguments: - pretrained (bool): load pretrained weights into the model, default=False - channels (int): number of input channels, default=3 - classes (int): number of model classes, default=80 - Returns: - pytorch model - """ - return create('yolov5x', pretrained, channels, classes, autoshape) +def yolov5s6(pretrained=True, channels=3, classes=80, autoshape=True): + # YOLOv5-small model https://github.com/ultralytics/yolov5 + return create('yolov5s6', pretrained, channels, classes, autoshape) -def custom(path_or_model='path/to/model.pt', autoshape=True): - """YOLOv5-custom model from https://github.com/ultralytics/yolov5 +def yolov5m6(pretrained=True, channels=3, classes=80, autoshape=True): + # YOLOv5-medium model https://github.com/ultralytics/yolov5 + return create('yolov5m6', pretrained, channels, classes, autoshape) - Arguments (3 options): - path_or_model (str): 'path/to/model.pt' - path_or_model (dict): torch.load('path/to/model.pt') - path_or_model (nn.Module): torch.load('path/to/model.pt')['model'] - Returns: - pytorch model - """ - model = torch.load(path_or_model) if isinstance(path_or_model, str) else path_or_model # load checkpoint - if isinstance(model, dict): - model = model['ema' if model.get('ema') else 'model'] # load model +def yolov5l6(pretrained=True, channels=3, classes=80, autoshape=True): + # YOLOv5-large model https://github.com/ultralytics/yolov5 + return create('yolov5l6', pretrained, channels, classes, autoshape) - hub_model = Model(model.yaml).to(next(model.parameters()).device) # create - hub_model.load_state_dict(model.float().state_dict()) # load state_dict - hub_model.names = model.names # class names - if autoshape: - hub_model = hub_model.autoshape() # for file/URI/PIL/cv2/np inputs and NMS - device = select_device('0' if torch.cuda.is_available() else 'cpu') # default to GPU if available - return hub_model.to(device) + +def yolov5x6(pretrained=True, channels=3, classes=80, autoshape=True): + # YOLOv5-xlarge model https://github.com/ultralytics/yolov5 + return create('yolov5x6', pretrained, channels, classes, autoshape) if __name__ == '__main__': diff --git a/utils/plots.py b/utils/plots.py index 47e7b7b74f1c..5b23a34f5141 100644 --- a/utils/plots.py +++ b/utils/plots.py @@ -243,7 +243,7 @@ def plot_study_txt(path='', x=None): # from utils.plots import *; plot_study_tx # ax = ax.ravel() fig2, ax2 = plt.subplots(1, 1, figsize=(8, 4), tight_layout=True) - # for f in [Path(path) / f'study_coco_{x}.txt' for x in ['yolov5s', 'yolov5m', 'yolov5l', 'yolov5x']]: + # for f in [Path(path) / f'study_coco_{x}.txt' for x in ['yolov5s6', 'yolov5m6', 'yolov5l6', 'yolov5x6']]: for f in sorted(Path(path).glob('study*.txt')): y = np.loadtxt(f, dtype=np.float32, usecols=[0, 1, 2, 3, 7, 8, 9], ndmin=2).T x = np.arange(y.shape[1]) if x is None else np.array(x) @@ -253,7 +253,7 @@ def plot_study_txt(path='', x=None): # from utils.plots import *; plot_study_tx # ax[i].set_title(s[i]) j = y[3].argmax() + 1 - ax2.plot(y[6, :j], y[3, :j] * 1E2, '.-', linewidth=2, markersize=8, + ax2.plot(y[6, 1:j], y[3, 1:j] * 1E2, '.-', linewidth=2, markersize=8, label=f.stem.replace('study_coco_', '').replace('yolo', 'YOLO')) ax2.plot(1E3 / np.array([209, 140, 97, 58, 35, 18]), [34.6, 40.5, 43.0, 47.5, 49.7, 51.5], @@ -261,7 +261,7 @@ def plot_study_txt(path='', x=None): # from utils.plots import *; plot_study_tx ax2.grid(alpha=0.2) ax2.set_yticks(np.arange(20, 60, 5)) - ax2.set_xlim(0, 30) + ax2.set_xlim(0, 57) ax2.set_ylim(30, 55) ax2.set_xlabel('GPU Speed (ms/img)') ax2.set_ylabel('COCO AP val') From 0f395b3e3bccbc019ab3d1cbd41303a5b50dc0f0 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 11 Apr 2021 23:11:43 +0200 Subject: [PATCH 0122/1976] YOLOv5 v5.0 Release patch 1 (#2764) * torch.jit.trace(model, img, strict=False) * Update check_file() * Update hubconf.py * Update README.md --- README.md | 2 +- hubconf.py | 6 +++--- models/export.py | 2 +- utils/general.py | 6 +++--- 4 files changed, 8 insertions(+), 8 deletions(-) diff --git a/README.md b/README.md index 02908db0fd18..577c908de304 100755 --- a/README.md +++ b/README.md @@ -33,7 +33,7 @@ This repository represents Ultralytics open-source research into future object d Model |size
(pixels) |mAPval
0.5:0.95 |mAPtest
0.5:0.95 |mAPval
0.5 |Speed
V100 (ms) | |params
(M) |FLOPS
640 (B) --- |--- |--- |--- |--- |--- |---|--- |--- [YOLOv5s][assets] |640 |36.7 |36.7 |55.4 |**2.0** | |7.3 |17.0 -[YOLOv5m][assets] |640 |44.5 |44.5 |63.3 |2.7 | |21.4 |51.3 +[YOLOv5m][assets] |640 |44.5 |44.5 |63.1 |2.7 | |21.4 |51.3 [YOLOv5l][assets] |640 |48.2 |48.2 |66.9 |3.8 | |47.0 |115.4 [YOLOv5x][assets] |640 |**50.4** |**50.4** |**68.8** |6.1 | |87.7 |218.8 | | | | | | || | diff --git a/hubconf.py b/hubconf.py index d26db45695de..a2a43a7a10cb 100644 --- a/hubconf.py +++ b/hubconf.py @@ -1,4 +1,4 @@ -"""File for accessing YOLOv5 models via PyTorch Hub https://pytorch.org/hub/ultralytics_yolov5/ +"""YOLOv5 PyTorch Hub models https://pytorch.org/hub/ultralytics_yolov5/ Usage: import torch @@ -31,9 +31,9 @@ def create(name, pretrained, channels, classes, autoshape): Returns: pytorch model """ - config = Path(__file__).parent / 'models' / f'{name}.yaml' # model.yaml path try: - model = Model(config, channels, classes) + cfg = list((Path(__file__).parent / 'models').rglob(f'{name}.yaml'))[0] # model.yaml path + model = Model(cfg, channels, classes) if pretrained: fname = f'{name}.pt' # checkpoint filename attempt_download(fname) # download if not found locally diff --git a/models/export.py b/models/export.py index 11e60c7a583d..0bb5398e4841 100644 --- a/models/export.py +++ b/models/export.py @@ -62,7 +62,7 @@ try: print('\nStarting TorchScript export with torch %s...' % torch.__version__) f = opt.weights.replace('.pt', '.torchscript.pt') # filename - ts = torch.jit.trace(model, img) + ts = torch.jit.trace(model, img, strict=False) ts.save(f) print('TorchScript export success, saved as %s' % f) except Exception as e: diff --git a/utils/general.py b/utils/general.py index 5482629ac8c0..413eb5b8fa97 100755 --- a/utils/general.py +++ b/utils/general.py @@ -144,12 +144,12 @@ def check_imshow(): def check_file(file): # Search for file if not found - if os.path.isfile(file) or file == '': + if Path(file).is_file() or file == '': return file else: files = glob.glob('./**/' + file, recursive=True) # find file - assert len(files), 'File Not Found: %s' % file # assert file was found - assert len(files) == 1, "Multiple files match '%s', specify exact path: %s" % (file, files) # assert unique + assert len(files), f'File Not Found: {file}' # assert file was found + assert len(files) == 1, f"Multiple files match '{file}', specify exact path: {files}" # assert unique return files[0] # return file From 54d65160b799ec75c2a8c01de6cb069bf417eabe Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 12 Apr 2021 12:26:28 +0200 Subject: [PATCH 0123/1976] Update tutorial.ipynb --- tutorial.ipynb | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tutorial.ipynb b/tutorial.ipynb index f334f5a15ef0..881632daa375 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -607,7 +607,7 @@ "output_type": "stream", "text": [ "Namespace(agnostic_nms=False, augment=False, classes=None, conf_thres=0.25, device='', exist_ok=False, img_size=640, iou_thres=0.45, name='exp', project='runs/detect', save_conf=False, save_txt=False, source='data/images/', update=False, view_img=False, weights=['yolov5s.pt'])\n", - "YOLOv5 🚀 v4.0-137-g9b11f0c torch 1.8.0+cu101 CUDA:0 (Tesla V100-SXM2-16GB, 16160.5MB)\n", + "YOLOv5 🚀 v5.0-1-g0f395b3 torch 1.8.1+cu101 CUDA:0 (Tesla V100-SXM2-16GB, 16160.5MB)\n", "\n", "Fusing layers... \n", "Model Summary: 224 layers, 7266973 parameters, 0 gradients, 17.0 GFLOPS\n", @@ -1263,4 +1263,4 @@ "outputs": [] } ] -} \ No newline at end of file +} From 6b718e91275fde8367ec8d3fc4cda5d7ba6a5ca0 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 12 Apr 2021 12:31:28 +0200 Subject: [PATCH 0124/1976] Created using Colaboratory --- tutorial.ipynb | 164 ++++++++++++++++++++++++------------------------- 1 file changed, 80 insertions(+), 84 deletions(-) diff --git a/tutorial.ipynb b/tutorial.ipynb index 881632daa375..e4344d3ddcec 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -16,7 +16,7 @@ "accelerator": "GPU", "widgets": { "application/vnd.jupyter.widget-state+json": { - "b54ab52f1d4f4903897ab6cd49a3b9b2": { + "8815626359d84416a2f44a95500580a4": { "model_module": "@jupyter-widgets/controls", "model_name": "HBoxModel", "state": { @@ -28,15 +28,15 @@ "_view_count": null, "_view_module_version": "1.5.0", "box_style": "", - "layout": "IPY_MODEL_1852f93fc2714d40adccb8aa161c42ff", + "layout": "IPY_MODEL_3b85609c4ce94a74823f2cfe141ce68e", "_model_module": "@jupyter-widgets/controls", "children": [ - "IPY_MODEL_3293cfe869bd4a1bbbe18b49b6815de1", - "IPY_MODEL_8d5ee8b8ab6d46b98818bd2c562ddd1c" + "IPY_MODEL_876609753c2946248890344722963d44", + "IPY_MODEL_8abfdd8778e44b7ca0d29881cb1ada05" ] } }, - "1852f93fc2714d40adccb8aa161c42ff": { + "3b85609c4ce94a74823f2cfe141ce68e": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "state": { @@ -87,12 +87,12 @@ "left": null } }, - "3293cfe869bd4a1bbbe18b49b6815de1": { + "876609753c2946248890344722963d44": { "model_module": "@jupyter-widgets/controls", "model_name": "FloatProgressModel", "state": { "_view_name": "ProgressView", - "style": "IPY_MODEL_49fcb2adb0354430b76f491af98abfe9", + "style": "IPY_MODEL_78c6c3d97c484916b8ee167c63556800", "_dom_classes": [], "description": "100%", "_model_name": "FloatProgressModel", @@ -107,30 +107,30 @@ "min": 0, "description_tooltip": null, "_model_module": "@jupyter-widgets/controls", - "layout": "IPY_MODEL_c7d76e0c53064363add56b8d05e561f5" + "layout": "IPY_MODEL_9dd0f182db5d45378ceafb855e486eb8" } }, - "8d5ee8b8ab6d46b98818bd2c562ddd1c": { + "8abfdd8778e44b7ca0d29881cb1ada05": { "model_module": "@jupyter-widgets/controls", "model_name": "HTMLModel", "state": { "_view_name": "HTMLView", - "style": "IPY_MODEL_48f321f789634aa584f8a29a3b925dd5", + "style": "IPY_MODEL_a3dab28b45c247089a3d1b8b09f327de", "_dom_classes": [], "description": "", "_model_name": "HTMLModel", "placeholder": "​", "_view_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", - "value": " 781M/781M [00:13<00:00, 62.6MB/s]", + "value": " 781M/781M [08:43<00:00, 1.56MB/s]", "_view_count": null, "_view_module_version": "1.5.0", "description_tooltip": null, "_model_module": "@jupyter-widgets/controls", - "layout": "IPY_MODEL_6610d6275f3e49d9937d50ed0a105947" + "layout": "IPY_MODEL_32451332b7a94ba9aacddeaa6ac94d50" } }, - "49fcb2adb0354430b76f491af98abfe9": { + "78c6c3d97c484916b8ee167c63556800": { "model_module": "@jupyter-widgets/controls", "model_name": "ProgressStyleModel", "state": { @@ -145,7 +145,7 @@ "_model_module": "@jupyter-widgets/controls" } }, - "c7d76e0c53064363add56b8d05e561f5": { + "9dd0f182db5d45378ceafb855e486eb8": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "state": { @@ -196,7 +196,7 @@ "left": null } }, - "48f321f789634aa584f8a29a3b925dd5": { + "a3dab28b45c247089a3d1b8b09f327de": { "model_module": "@jupyter-widgets/controls", "model_name": "DescriptionStyleModel", "state": { @@ -210,7 +210,7 @@ "_model_module": "@jupyter-widgets/controls" } }, - "6610d6275f3e49d9937d50ed0a105947": { + "32451332b7a94ba9aacddeaa6ac94d50": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "state": { @@ -550,7 +550,7 @@ "colab": { "base_uri": "https://localhost:8080/" }, - "outputId": "20027455-bf84-41fd-c902-b7282d53c91d" + "outputId": "4576b05f-d6d1-404a-fc99-5663c71e3dc4" }, "source": [ "!git clone https://github.com/ultralytics/yolov5 # clone repo\n", @@ -563,12 +563,12 @@ "clear_output()\n", "print('Setup complete. Using torch %s %s' % (torch.__version__, torch.cuda.get_device_properties(0) if torch.cuda.is_available() else 'CPU'))" ], - "execution_count": null, + "execution_count": 1, "outputs": [ { "output_type": "stream", "text": [ - "Setup complete. Using torch 1.8.0+cu101 _CudaDeviceProperties(name='Tesla V100-SXM2-16GB', major=7, minor=0, total_memory=16160MB, multi_processor_count=80)\n" + "Setup complete. Using torch 1.8.1+cu101 _CudaDeviceProperties(name='Tesla V100-SXM2-16GB', major=7, minor=0, total_memory=16160MB, multi_processor_count=80)\n" ], "name": "stdout" } @@ -607,7 +607,7 @@ "output_type": "stream", "text": [ "Namespace(agnostic_nms=False, augment=False, classes=None, conf_thres=0.25, device='', exist_ok=False, img_size=640, iou_thres=0.45, name='exp', project='runs/detect', save_conf=False, save_txt=False, source='data/images/', update=False, view_img=False, weights=['yolov5s.pt'])\n", - "YOLOv5 🚀 v5.0-1-g0f395b3 torch 1.8.1+cu101 CUDA:0 (Tesla V100-SXM2-16GB, 16160.5MB)\n", + "YOLOv5 🚀 v4.0-137-g9b11f0c torch 1.8.0+cu101 CUDA:0 (Tesla V100-SXM2-16GB, 16160.5MB)\n", "\n", "Fusing layers... \n", "Model Summary: 224 layers, 7266973 parameters, 0 gradients, 17.0 GFLOPS\n", @@ -664,30 +664,30 @@ "base_uri": "https://localhost:8080/", "height": 65, "referenced_widgets": [ - "b54ab52f1d4f4903897ab6cd49a3b9b2", - "1852f93fc2714d40adccb8aa161c42ff", - "3293cfe869bd4a1bbbe18b49b6815de1", - "8d5ee8b8ab6d46b98818bd2c562ddd1c", - "49fcb2adb0354430b76f491af98abfe9", - "c7d76e0c53064363add56b8d05e561f5", - "48f321f789634aa584f8a29a3b925dd5", - "6610d6275f3e49d9937d50ed0a105947" + "8815626359d84416a2f44a95500580a4", + "3b85609c4ce94a74823f2cfe141ce68e", + "876609753c2946248890344722963d44", + "8abfdd8778e44b7ca0d29881cb1ada05", + "78c6c3d97c484916b8ee167c63556800", + "9dd0f182db5d45378ceafb855e486eb8", + "a3dab28b45c247089a3d1b8b09f327de", + "32451332b7a94ba9aacddeaa6ac94d50" ] }, - "outputId": "f0884441-78d9-443c-afa6-d00ec387908d" + "outputId": "81521192-cf67-4a47-a4cc-434cb0ebc363" }, "source": [ "# Download COCO val2017\n", "torch.hub.download_url_to_file('https://github.com/ultralytics/yolov5/releases/download/v1.0/coco2017val.zip', 'tmp.zip')\n", "!unzip -q tmp.zip -d ../ && rm tmp.zip" ], - "execution_count": null, + "execution_count": 2, "outputs": [ { "output_type": "display_data", "data": { "application/vnd.jupyter.widget-view+json": { - "model_id": "b54ab52f1d4f4903897ab6cd49a3b9b2", + "model_id": "8815626359d84416a2f44a95500580a4", "version_minor": 0, "version_major": 2 }, @@ -715,57 +715,57 @@ "colab": { "base_uri": "https://localhost:8080/" }, - "outputId": "5b54c11e-9f4b-4d9a-8e6e-6a2a4f0cc60d" + "outputId": "2340b131-9943-4cd6-fd3a-8272aeb0774f" }, "source": [ "# Run YOLOv5x on COCO val2017\n", "!python test.py --weights yolov5x.pt --data coco.yaml --img 640 --iou 0.65" ], - "execution_count": null, + "execution_count": 6, "outputs": [ { "output_type": "stream", "text": [ "Namespace(augment=False, batch_size=32, conf_thres=0.001, data='./data/coco.yaml', device='', exist_ok=False, img_size=640, iou_thres=0.65, name='exp', project='runs/test', save_conf=False, save_hybrid=False, save_json=True, save_txt=False, single_cls=False, task='val', verbose=False, weights=['yolov5x.pt'])\n", - "YOLOv5 🚀 v4.0-137-g9b11f0c torch 1.8.0+cu101 CUDA:0 (Tesla V100-SXM2-16GB, 16160.5MB)\n", + "YOLOv5 🚀 v5.0-1-g0f395b3 torch 1.8.1+cu101 CUDA:0 (Tesla V100-SXM2-16GB, 16160.5MB)\n", "\n", - "Downloading https://github.com/ultralytics/yolov5/releases/download/v4.0/yolov5x.pt to yolov5x.pt...\n", - "100% 168M/168M [00:02<00:00, 59.1MB/s]\n", + "Downloading https://github.com/ultralytics/yolov5/releases/download/v5.0/yolov5x.pt to yolov5x.pt...\n", + "100% 168M/168M [00:05<00:00, 32.3MB/s]\n", "\n", "Fusing layers... \n", "Model Summary: 476 layers, 87730285 parameters, 0 gradients, 218.8 GFLOPS\n", - "\u001b[34m\u001b[1mval: \u001b[0mScanning '../coco/val2017' for images and labels... 4952 found, 48 missing, 0 empty, 0 corrupted: 100% 5000/5000 [00:01<00:00, 3236.68it/s]\n", + "\u001b[34m\u001b[1mval: \u001b[0mScanning '../coco/val2017' images and labels... 4952 found, 48 missing, 0 empty, 0 corrupted: 100% 5000/5000 [00:01<00:00, 3102.29it/s]\n", "\u001b[34m\u001b[1mval: \u001b[0mNew cache created: ../coco/val2017.cache\n", - " Class Images Labels P R mAP@.5 mAP@.5:.95: 100% 157/157 [01:20<00:00, 1.95it/s]\n", - " all 5000 36335 0.749 0.619 0.68 0.486\n", - "Speed: 5.3/1.7/6.9 ms inference/NMS/total per 640x640 image at batch-size 32\n", + " Class Images Labels P R mAP@.5 mAP@.5:.95: 100% 157/157 [01:23<00:00, 1.87it/s]\n", + " all 5000 36335 0.745 0.627 0.68 0.49\n", + "Speed: 5.3/1.6/6.9 ms inference/NMS/total per 640x640 image at batch-size 32\n", "\n", "Evaluating pycocotools mAP... saving runs/test/exp/yolov5x_predictions.json...\n", "loading annotations into memory...\n", - "Done (t=0.43s)\n", + "Done (t=0.48s)\n", "creating index...\n", "index created!\n", "Loading and preparing results...\n", - "DONE (t=5.10s)\n", + "DONE (t=5.08s)\n", "creating index...\n", "index created!\n", "Running per image evaluation...\n", "Evaluate annotation type *bbox*\n", - "DONE (t=88.52s).\n", + "DONE (t=90.51s).\n", "Accumulating evaluation results...\n", - "DONE (t=17.17s).\n", - " Average Precision (AP) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.501\n", - " Average Precision (AP) @[ IoU=0.50 | area= all | maxDets=100 ] = 0.687\n", - " Average Precision (AP) @[ IoU=0.75 | area= all | maxDets=100 ] = 0.544\n", - " Average Precision (AP) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.338\n", - " Average Precision (AP) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.548\n", - " Average Precision (AP) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.637\n", - " Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 1 ] = 0.378\n", - " Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 10 ] = 0.628\n", - " Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.680\n", - " Average Recall (AR) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.520\n", - " Average Recall (AR) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.729\n", - " Average Recall (AR) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.826\n", + "DONE (t=15.16s).\n", + " Average Precision (AP) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.504\n", + " Average Precision (AP) @[ IoU=0.50 | area= all | maxDets=100 ] = 0.688\n", + " Average Precision (AP) @[ IoU=0.75 | area= all | maxDets=100 ] = 0.546\n", + " Average Precision (AP) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.351\n", + " Average Precision (AP) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.551\n", + " Average Precision (AP) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.644\n", + " Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 1 ] = 0.382\n", + " Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 10 ] = 0.629\n", + " Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.681\n", + " Average Recall (AR) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.524\n", + " Average Recall (AR) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.735\n", + " Average Recall (AR) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.827\n", "Results saved to runs/test/exp\n" ], "name": "stdout" @@ -916,28 +916,25 @@ "colab": { "base_uri": "https://localhost:8080/" }, - "outputId": "cf494627-09b9-4399-ff0c-fdb62b32340a" + "outputId": "e715d09c-5d93-4912-a0df-9da0893f2014" }, "source": [ "# Train YOLOv5s on COCO128 for 3 epochs\n", "!python train.py --img 640 --batch 16 --epochs 3 --data coco128.yaml --weights yolov5s.pt --nosave --cache" ], - "execution_count": null, + "execution_count": 12, "outputs": [ { "output_type": "stream", "text": [ "\u001b[34m\u001b[1mgithub: \u001b[0mup to date with https://github.com/ultralytics/yolov5 ✅\n", - "YOLOv5 🚀 v4.0-137-g9b11f0c torch 1.8.0+cu101 CUDA:0 (Tesla V100-SXM2-16GB, 16160.5MB)\n", + "YOLOv5 🚀 v5.0-2-g54d6516 torch 1.8.1+cu101 CUDA:0 (Tesla V100-SXM2-16GB, 16160.5MB)\n", "\n", - "Namespace(adam=False, batch_size=16, bucket='', cache_images=True, cfg='', data='./data/coco128.yaml', device='', entity=None, epochs=3, evolve=False, exist_ok=False, global_rank=-1, hyp='data/hyp.scratch.yaml', image_weights=False, img_size=[640, 640], linear_lr=False, local_rank=-1, log_artifacts=False, log_imgs=16, multi_scale=False, name='exp', noautoanchor=False, nosave=True, notest=False, project='runs/train', quad=False, rect=False, resume=False, save_dir='runs/train/exp', single_cls=False, sync_bn=False, total_batch_size=16, weights='yolov5s.pt', workers=8, world_size=1)\n", - "\u001b[34m\u001b[1mwandb: \u001b[0mInstall Weights & Biases for YOLOv5 logging with 'pip install wandb' (recommended)\n", - "Start Tensorboard with \"tensorboard --logdir runs/train\", view at http://localhost:6006/\n", - "2021-03-14 04:18:58.124672: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcudart.so.11.0\n", + "Namespace(adam=False, artifact_alias='latest', batch_size=16, bbox_interval=-1, bucket='', cache_images=True, cfg='', data='./data/coco128.yaml', device='', entity=None, epochs=3, evolve=False, exist_ok=False, global_rank=-1, hyp='data/hyp.scratch.yaml', image_weights=False, img_size=[640, 640], label_smoothing=0.0, linear_lr=False, local_rank=-1, multi_scale=False, name='exp', noautoanchor=False, nosave=True, notest=False, project='runs/train', quad=False, rect=False, resume=False, save_dir='runs/train/exp', save_period=-1, single_cls=False, sync_bn=False, total_batch_size=16, upload_dataset=False, weights='yolov5s.pt', workers=8, world_size=1)\n", + "\u001b[34m\u001b[1mtensorboard: \u001b[0mStart with 'tensorboard --logdir runs/train', view at http://localhost:6006/\n", + "2021-04-12 10:29:58.539457: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcudart.so.11.0\n", "\u001b[34m\u001b[1mhyperparameters: \u001b[0mlr0=0.01, lrf=0.2, momentum=0.937, weight_decay=0.0005, warmup_epochs=3.0, warmup_momentum=0.8, warmup_bias_lr=0.1, box=0.05, cls=0.5, cls_pw=1.0, obj=1.0, obj_pw=1.0, iou_t=0.2, anchor_t=4.0, fl_gamma=0.0, hsv_h=0.015, hsv_s=0.7, hsv_v=0.4, degrees=0.0, translate=0.1, scale=0.5, shear=0.0, perspective=0.0, flipud=0.0, fliplr=0.5, mosaic=1.0, mixup=0.0\n", - "Downloading https://github.com/ultralytics/yolov5/releases/download/v4.0/yolov5s.pt to yolov5s.pt...\n", - "100% 14.1M/14.1M [00:00<00:00, 63.1MB/s]\n", - "\n", + "\u001b[34m\u001b[1mwandb: \u001b[0mInstall Weights & Biases for YOLOv5 logging with 'pip install wandb' (recommended)\n", "\n", " from n params module arguments \n", " 0 -1 1 3520 models.common.Focus [3, 32, 3] \n", @@ -970,11 +967,10 @@ "Transferred 362/362 items from yolov5s.pt\n", "Scaled weight_decay = 0.0005\n", "Optimizer groups: 62 .bias, 62 conv.weight, 59 other\n", - "\u001b[34m\u001b[1mtrain: \u001b[0mScanning '../coco128/labels/train2017' for images and labels... 128 found, 0 missing, 2 empty, 0 corrupted: 100% 128/128 [00:00<00:00, 2956.76it/s]\n", - "\u001b[34m\u001b[1mtrain: \u001b[0mNew cache created: ../coco128/labels/train2017.cache\n", - "\u001b[34m\u001b[1mtrain: \u001b[0mCaching images (0.1GB): 100% 128/128 [00:00<00:00, 205.30it/s]\n", - "\u001b[34m\u001b[1mval: \u001b[0mScanning '../coco128/labels/train2017.cache' for images and labels... 128 found, 0 missing, 2 empty, 0 corrupted: 100% 128/128 [00:00<00:00, 604584.36it/s]\n", - "\u001b[34m\u001b[1mval: \u001b[0mCaching images (0.1GB): 100% 128/128 [00:00<00:00, 144.17it/s]\n", + "\u001b[34m\u001b[1mtrain: \u001b[0mScanning '../coco128/labels/train2017.cache' images and labels... 128 found, 0 missing, 2 empty, 0 corrupted: 100% 128/128 [00:00<00:00, 796544.38it/s]\n", + "\u001b[34m\u001b[1mtrain: \u001b[0mCaching images (0.1GB): 100% 128/128 [00:00<00:00, 176.73it/s]\n", + "\u001b[34m\u001b[1mval: \u001b[0mScanning '../coco128/labels/train2017.cache' images and labels... 128 found, 0 missing, 2 empty, 0 corrupted: 100% 128/128 [00:00<00:00, 500812.42it/s]\n", + "\u001b[34m\u001b[1mval: \u001b[0mCaching images (0.1GB): 100% 128/128 [00:00<00:00, 134.10it/s]\n", "Plotting labels... \n", "\n", "\u001b[34m\u001b[1mautoanchor: \u001b[0mAnalyzing anchors... anchors/target = 4.26, Best Possible Recall (BPR) = 0.9946\n", @@ -984,23 +980,23 @@ "Starting training for 3 epochs...\n", "\n", " Epoch gpu_mem box obj cls total labels img_size\n", - " 0/2 3.29G 0.04237 0.06417 0.02121 0.1277 183 640: 100% 8/8 [00:03<00:00, 2.41it/s]\n", - " Class Images Labels P R mAP@.5 mAP@.5:.95: 100% 4/4 [00:04<00:00, 1.04s/it]\n", - " all 128 929 0.642 0.637 0.661 0.432\n", + " 0/2 3.29G 0.04368 0.065 0.02127 0.1299 183 640: 100% 8/8 [00:03<00:00, 2.21it/s]\n", + " Class Images Labels P R mAP@.5 mAP@.5:.95: 100% 4/4 [00:04<00:00, 1.09s/it]\n", + " all 128 929 0.605 0.657 0.666 0.434\n", "\n", " Epoch gpu_mem box obj cls total labels img_size\n", - " 1/2 6.65G 0.04431 0.06403 0.019 0.1273 166 640: 100% 8/8 [00:01<00:00, 5.73it/s]\n", - " Class Images Labels P R mAP@.5 mAP@.5:.95: 100% 4/4 [00:01<00:00, 3.21it/s]\n", - " all 128 929 0.662 0.626 0.658 0.433\n", + " 1/2 6.65G 0.04556 0.0651 0.01987 0.1305 166 640: 100% 8/8 [00:01<00:00, 5.18it/s]\n", + " Class Images Labels P R mAP@.5 mAP@.5:.95: 100% 4/4 [00:01<00:00, 2.72it/s]\n", + " all 128 929 0.61 0.66 0.669 0.438\n", "\n", " Epoch gpu_mem box obj cls total labels img_size\n", - " 2/2 6.65G 0.04506 0.06836 0.01913 0.1325 182 640: 100% 8/8 [00:01<00:00, 5.51it/s]\n", - " Class Images Labels P R mAP@.5 mAP@.5:.95: 100% 4/4 [00:02<00:00, 1.35it/s]\n", - " all 128 929 0.658 0.625 0.661 0.433\n", - "Optimizer stripped from runs/train/exp/weights/last.pt, 14.8MB\n", - "Optimizer stripped from runs/train/exp/weights/best.pt, 14.8MB\n", + " 2/2 6.65G 0.04624 0.06923 0.0196 0.1351 182 640: 100% 8/8 [00:01<00:00, 5.19it/s]\n", + " Class Images Labels P R mAP@.5 mAP@.5:.95: 100% 4/4 [00:03<00:00, 1.27it/s]\n", + " all 128 929 0.618 0.659 0.671 0.438\n", "3 epochs completed in 0.007 hours.\n", - "\n" + "\n", + "Optimizer stripped from runs/train/exp/weights/last.pt, 14.8MB\n", + "Optimizer stripped from runs/train/exp/weights/best.pt, 14.8MB\n" ], "name": "stdout" } @@ -1263,4 +1259,4 @@ "outputs": [] } ] -} +} \ No newline at end of file From 2eab46e2cfbd4e99b0f5d3d17a5f8c2acfb3285b Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 12 Apr 2021 12:33:04 +0200 Subject: [PATCH 0125/1976] Update tutorial.ipynb --- tutorial.ipynb | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tutorial.ipynb b/tutorial.ipynb index e4344d3ddcec..a8d41d3e1be9 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -607,12 +607,12 @@ "output_type": "stream", "text": [ "Namespace(agnostic_nms=False, augment=False, classes=None, conf_thres=0.25, device='', exist_ok=False, img_size=640, iou_thres=0.45, name='exp', project='runs/detect', save_conf=False, save_txt=False, source='data/images/', update=False, view_img=False, weights=['yolov5s.pt'])\n", - "YOLOv5 🚀 v4.0-137-g9b11f0c torch 1.8.0+cu101 CUDA:0 (Tesla V100-SXM2-16GB, 16160.5MB)\n", + "YOLOv5 🚀 v5.0-1-g0f395b3 torch 1.8.1+cu101 CUDA:0 (Tesla V100-SXM2-16GB, 16160.5MB)\n", "\n", "Fusing layers... \n", "Model Summary: 224 layers, 7266973 parameters, 0 gradients, 17.0 GFLOPS\n", "image 1/2 /content/yolov5/data/images/bus.jpg: 640x480 4 persons, 1 bus, Done. (0.008s)\n", - "image 2/2 /content/yolov5/data/images/zidane.jpg: 384x640 2 persons, 1 tie, Done. (0.008s)\n", + "image 2/2 /content/yolov5/data/images/zidane.jpg: 384x640 2 persons, 2 ties, Done. (0.008s)\n", "Results saved to runs/detect/exp\n", "Done. (0.087)\n" ], @@ -1259,4 +1259,4 @@ "outputs": [] } ] -} \ No newline at end of file +} From cac8a765c896bdd5a7912b51a476da5abf974a1a Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 12 Apr 2021 13:02:40 +0200 Subject: [PATCH 0126/1976] Created using Colaboratory --- tutorial.ipynb | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/tutorial.ipynb b/tutorial.ipynb index a8d41d3e1be9..e36046731afd 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -550,7 +550,7 @@ "colab": { "base_uri": "https://localhost:8080/" }, - "outputId": "4576b05f-d6d1-404a-fc99-5663c71e3dc4" + "outputId": "9b022435-4197-41fc-abea-81f86ce857d0" }, "source": [ "!git clone https://github.com/ultralytics/yolov5 # clone repo\n", @@ -561,14 +561,14 @@ "from IPython.display import Image, clear_output # to display images\n", "\n", "clear_output()\n", - "print('Setup complete. Using torch %s %s' % (torch.__version__, torch.cuda.get_device_properties(0) if torch.cuda.is_available() else 'CPU'))" + "print(f\"Setup complete. Using torch {torch.__version__} ({torch.cuda.get_device_properties(0).name if torch.cuda.is_available() else 'CPU'})\")" ], - "execution_count": 1, + "execution_count": 31, "outputs": [ { "output_type": "stream", "text": [ - "Setup complete. Using torch 1.8.1+cu101 _CudaDeviceProperties(name='Tesla V100-SXM2-16GB', major=7, minor=0, total_memory=16160MB, multi_processor_count=80)\n" + "Setup complete. Using torch 1.8.1+cu101 (Tesla V100-SXM2-16GB)\n" ], "name": "stdout" } @@ -681,7 +681,7 @@ "torch.hub.download_url_to_file('https://github.com/ultralytics/yolov5/releases/download/v1.0/coco2017val.zip', 'tmp.zip')\n", "!unzip -q tmp.zip -d ../ && rm tmp.zip" ], - "execution_count": 2, + "execution_count": null, "outputs": [ { "output_type": "display_data", @@ -721,7 +721,7 @@ "# Run YOLOv5x on COCO val2017\n", "!python test.py --weights yolov5x.pt --data coco.yaml --img 640 --iou 0.65" ], - "execution_count": 6, + "execution_count": null, "outputs": [ { "output_type": "stream", @@ -922,7 +922,7 @@ "# Train YOLOv5s on COCO128 for 3 epochs\n", "!python train.py --img 640 --batch 16 --epochs 3 --data coco128.yaml --weights yolov5s.pt --nosave --cache" ], - "execution_count": 12, + "execution_count": null, "outputs": [ { "output_type": "stream", @@ -1259,4 +1259,4 @@ "outputs": [] } ] -} +} \ No newline at end of file From 860ca98832fd59e8495915af829b7caa5e7ec3d3 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 12 Apr 2021 13:10:08 +0200 Subject: [PATCH 0127/1976] Created using Colaboratory --- tutorial.ipynb | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tutorial.ipynb b/tutorial.ipynb index e36046731afd..245b46aa7d9f 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -528,8 +528,8 @@ "source": [ "\n", "\n", - "This notebook was written by Ultralytics LLC, and is freely available for redistribution under the [GPL-3.0 license](https://choosealicense.com/licenses/gpl-3.0/). \n", - "For more information please visit https://github.com/ultralytics/yolov5 and https://www.ultralytics.com." + "This is the **official YOLOv5 🚀 notebook** authored by **Ultralytics**, and is freely available for redistribution under the [GPL-3.0 license](https://choosealicense.com/licenses/gpl-3.0/). \n", + "For more information please visit https://github.com/ultralytics/yolov5 and https://www.ultralytics.com. Thank you!" ] }, { @@ -643,7 +643,7 @@ }, "source": [ "# 2. Test\n", - "Test a model on [COCO](https://cocodataset.org/#home) val or test-dev dataset to evaluate trained accuracy. Models are downloaded automatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases). To show results by class use the `--verbose` flag. Note that `pycocotools` metrics may be 1-2% better than the equivalent repo metrics, as is visible below, due to slight differences in mAP computation." + "Test a model's accuracy on [COCO](https://cocodataset.org/#home) val or test-dev datasets. Models are downloaded automatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases). To show results by class use the `--verbose` flag. Note that `pycocotools` metrics may be ~1% better than the equivalent repo metrics, as is visible below, due to slight differences in mAP computation." ] }, { From 1487bc84ff3babfb502dffb5ffbdc7e02fcb1879 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 12 Apr 2021 13:27:40 +0200 Subject: [PATCH 0128/1976] Update README.md --- README.md | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/README.md b/README.md index 577c908de304..3de9271232c1 100755 --- a/README.md +++ b/README.md @@ -1,16 +1,16 @@ - - + +   CI CPU testing This repository represents Ultralytics open-source research into future object detection methods, and incorporates lessons learned and best practices evolved over thousands of hours of training and evolution on anonymized client datasets. **All code and models are under active development, and are subject to modification or deletion without notice.** Use at your own risk. -

+

YOLOv5-P5 640 Figure (click to expand) -

+

Figure Notes (click to expand) @@ -117,7 +117,7 @@ image 2/2 /content/yolov5/data/images/zidane.jpg: 384x640 2 persons, 1 tie, Done Results saved to runs/detect/exp2 Done. (0.103s) ``` - + ### PyTorch Hub @@ -147,7 +147,7 @@ $ python train.py --data coco.yaml --cfg yolov5s.yaml --weights '' --batch-size yolov5l 24 yolov5x 16 ``` - + ## Citation From 14797370646d25e226f0093a5982d5cd54ba729a Mon Sep 17 00:00:00 2001 From: Robin Date: Thu, 15 Apr 2021 12:26:08 +0100 Subject: [PATCH 0129/1976] Flask REST API Example (#2732) * add files * Update README.md * Update README.md * Update restapi.py pretrained=True and model.eval() are used by default when loading a model now, so no need to call them manually. * PEP8 reformat * PEP8 reformat Co-authored-by: Glenn Jocher --- utils/flask_rest_api/README.md | 51 +++++++++++++++++++++++++ utils/flask_rest_api/example_request.py | 13 +++++++ utils/flask_rest_api/restapi.py | 38 ++++++++++++++++++ 3 files changed, 102 insertions(+) create mode 100644 utils/flask_rest_api/README.md create mode 100644 utils/flask_rest_api/example_request.py create mode 100644 utils/flask_rest_api/restapi.py diff --git a/utils/flask_rest_api/README.md b/utils/flask_rest_api/README.md new file mode 100644 index 000000000000..0cdc51be692d --- /dev/null +++ b/utils/flask_rest_api/README.md @@ -0,0 +1,51 @@ +# Flask REST API +[REST](https://en.wikipedia.org/wiki/Representational_state_transfer) [API](https://en.wikipedia.org/wiki/API)s are commonly used to expose Machine Learning (ML) models to other services. This folder contains an example REST API created using Flask to expose the `yolov5s` model from [PyTorch Hub](https://pytorch.org/hub/ultralytics_yolov5/). + +## Requirements + +[Flask](https://palletsprojects.com/p/flask/) is required. Install with: +```shell +$ pip install Flask +``` + +## Run + +After Flask installation run: + +```shell +$ python3 restapi.py --port 5000 +``` + +Then use [curl](https://curl.se/) to perform a request: + +```shell +$ curl -X POST -F image=@zidane.jpg 'http://localhost:5000/v1/object-detection/yolov5s'` +``` + +The model inference results are returned: + +```shell +[{'class': 0, + 'confidence': 0.8197850585, + 'name': 'person', + 'xmax': 1159.1403808594, + 'xmin': 750.912902832, + 'ymax': 711.2583007812, + 'ymin': 44.0350036621}, + {'class': 0, + 'confidence': 0.5667674541, + 'name': 'person', + 'xmax': 1065.5523681641, + 'xmin': 116.0448303223, + 'ymax': 713.8904418945, + 'ymin': 198.4603881836}, + {'class': 27, + 'confidence': 0.5661227107, + 'name': 'tie', + 'xmax': 516.7975463867, + 'xmin': 416.6880187988, + 'ymax': 717.0524902344, + 'ymin': 429.2020568848}] +``` + +An example python script to perform inference using [requests](https://docs.python-requests.org/en/master/) is given in `example_request.py` diff --git a/utils/flask_rest_api/example_request.py b/utils/flask_rest_api/example_request.py new file mode 100644 index 000000000000..ff21f30f93ca --- /dev/null +++ b/utils/flask_rest_api/example_request.py @@ -0,0 +1,13 @@ +"""Perform test request""" +import pprint + +import requests + +DETECTION_URL = "http://localhost:5000/v1/object-detection/yolov5s" +TEST_IMAGE = "zidane.jpg" + +image_data = open(TEST_IMAGE, "rb").read() + +response = requests.post(DETECTION_URL, files={"image": image_data}).json() + +pprint.pprint(response) diff --git a/utils/flask_rest_api/restapi.py b/utils/flask_rest_api/restapi.py new file mode 100644 index 000000000000..9d88f618905d --- /dev/null +++ b/utils/flask_rest_api/restapi.py @@ -0,0 +1,38 @@ +""" +Run a rest API exposing the yolov5s object detection model +""" +import argparse +import io + +import torch +from PIL import Image +from flask import Flask, request + +app = Flask(__name__) + +DETECTION_URL = "/v1/object-detection/yolov5s" + + +@app.route(DETECTION_URL, methods=["POST"]) +def predict(): + if not request.method == "POST": + return + + if request.files.get("image"): + image_file = request.files["image"] + image_bytes = image_file.read() + + img = Image.open(io.BytesIO(image_bytes)) + + results = model(img, size=640) + data = results.pandas().xyxy[0].to_json(orient="records") + return data + + +if __name__ == "__main__": + parser = argparse.ArgumentParser(description="Flask api exposing yolov5 model") + parser.add_argument("--port", default=5000, type=int, help="port number") + args = parser.parse_args() + + model = torch.hub.load("ultralytics/yolov5", "yolov5s", force_reload=True).autoshape() # force_reload to recache + app.run(host="0.0.0.0", port=args.port) # debug=True causes Restarting with stat From e5d71223b83b9de2911a3d53685de6a20a2dc0f1 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 15 Apr 2021 16:45:50 +0200 Subject: [PATCH 0130/1976] Update README.md --- README.md | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/README.md b/README.md index 3de9271232c1..27ea18da1932 100755 --- a/README.md +++ b/README.md @@ -121,19 +121,18 @@ Done. (0.103s) ### PyTorch Hub -To run **batched inference** with YOLOv5 and [PyTorch Hub](https://github.com/ultralytics/yolov5/issues/36): +Inference with YOLOv5 and [PyTorch Hub](https://github.com/ultralytics/yolov5/issues/36): ```python import torch # Model model = torch.hub.load('ultralytics/yolov5', 'yolov5s') -# Images -dir = 'https://github.com/ultralytics/yolov5/raw/master/data/images/' -imgs = [dir + f for f in ('zidane.jpg', 'bus.jpg')] # batch of images +# Image +img = 'https://github.com/ultralytics/yolov5/raw/master/data/images/zidane.jpg' # Inference -results = model(imgs) +results = model(img) results.print() # or .show(), .save() ``` From 1f3e482bce89a348bcdace91dfc89c5e47862066 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 16 Apr 2021 14:03:27 +0200 Subject: [PATCH 0131/1976] ONNX Simplifier (#2815) * ONNX Simplifier Add ONNX Simplifier to ONNX export pipeline in export.py. Will auto-install onnx-simplifier if onnx is installed but onnx-simplifier is not. * Update general.py --- models/export.py | 45 ++++++++++++++++++++++++++++++--------------- utils/general.py | 2 +- 2 files changed, 31 insertions(+), 16 deletions(-) diff --git a/models/export.py b/models/export.py index 0bb5398e4841..bec9194319c1 100644 --- a/models/export.py +++ b/models/export.py @@ -1,7 +1,7 @@ """Exports a YOLOv5 *.pt model to ONNX and TorchScript formats Usage: - $ export PYTHONPATH="$PWD" && python models/export.py --weights ./weights/yolov5s.pt --img 640 --batch 1 + $ export PYTHONPATH="$PWD" && python models/export.py --weights yolov5s.pt --img 640 --batch 1 """ import argparse @@ -16,7 +16,7 @@ import models from models.experimental import attempt_load from utils.activations import Hardswish, SiLU -from utils.general import set_logging, check_img_size +from utils.general import colorstr, check_img_size, check_requirements, set_logging from utils.torch_utils import select_device if __name__ == '__main__': @@ -59,20 +59,22 @@ y = model(img) # dry run # TorchScript export + prefix = colorstr('TorchScript:') try: - print('\nStarting TorchScript export with torch %s...' % torch.__version__) + print(f'\n{prefix} starting export with torch {torch.__version__}...') f = opt.weights.replace('.pt', '.torchscript.pt') # filename ts = torch.jit.trace(model, img, strict=False) ts.save(f) - print('TorchScript export success, saved as %s' % f) + print(f'{prefix} export success, saved as {f}') except Exception as e: - print('TorchScript export failure: %s' % e) + print(f'{prefix} export failure: {e}') # ONNX export + prefix = colorstr('ONNX:') try: import onnx - print('\nStarting ONNX export with onnx %s...' % onnx.__version__) + print(f'{prefix} starting export with onnx {onnx.__version__}...') f = opt.weights.replace('.pt', '.onnx') # filename torch.onnx.export(model, img, f, verbose=False, opset_version=12, input_names=['images'], output_names=['classes', 'boxes'] if y is None else ['output'], @@ -80,25 +82,38 @@ 'output': {0: 'batch', 2: 'y', 3: 'x'}} if opt.dynamic else None) # Checks - onnx_model = onnx.load(f) # load onnx model - onnx.checker.check_model(onnx_model) # check onnx model - # print(onnx.helper.printable_graph(onnx_model.graph)) # print a human readable model - print('ONNX export success, saved as %s' % f) + model_onnx = onnx.load(f) # load onnx model + onnx.checker.check_model(model_onnx) # check onnx model + # print(onnx.helper.printable_graph(model_onnx.graph)) # print + + # Simplify + try: + check_requirements(['onnx-simplifier']) + import onnxsim + + print(f'{prefix} simplifying with onnx-simplifier {onnxsim.__version__}...') + model_onnx, check = onnxsim.simplify(model_onnx) + assert check, 'assert check failed' + onnx.save(model_onnx, f) + except Exception as e: + print(f'{prefix} simplifier failure: {e}') + print(f'{prefix} export success, saved as {f}') except Exception as e: - print('ONNX export failure: %s' % e) + print(f'{prefix} export failure: {e}') # CoreML export + prefix = colorstr('CoreML:') try: import coremltools as ct - print('\nStarting CoreML export with coremltools %s...' % ct.__version__) + print(f'{prefix} starting export with coremltools {onnx.__version__}...') # convert model from torchscript and apply pixel scaling as per detect.py model = ct.convert(ts, inputs=[ct.ImageType(name='image', shape=img.shape, scale=1 / 255.0, bias=[0, 0, 0])]) f = opt.weights.replace('.pt', '.mlmodel') # filename model.save(f) - print('CoreML export success, saved as %s' % f) + print(f'{prefix} export success, saved as {f}') except Exception as e: - print('CoreML export failure: %s' % e) + print(f'{prefix} export failure: {e}') # Finish - print('\nExport complete (%.2fs). Visualize with https://github.com/lutzroeder/netron.' % (time.time() - t)) + print(f'\nExport complete ({time.time() - t:.2f}s). Visualize with https://github.com/lutzroeder/netron.') diff --git a/utils/general.py b/utils/general.py index 413eb5b8fa97..ac3a6981b3d4 100755 --- a/utils/general.py +++ b/utils/general.py @@ -111,7 +111,7 @@ def check_requirements(requirements='requirements.txt', exclude=()): except Exception as e: # DistributionNotFound or VersionConflict if requirements not met n += 1 print(f"{prefix} {e.req} not found and is required by YOLOv5, attempting auto-update...") - print(subprocess.check_output(f"pip install '{e.req}'", shell=True).decode()) + print(subprocess.check_output(f"pip install {e.req}", shell=True).decode()) if n: # if packages updated source = file.resolve() if 'file' in locals() else requirements From aff03be35a8f5c7fb7da8bfd2f26a93cde416fbc Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 16 Apr 2021 17:58:28 +0200 Subject: [PATCH 0132/1976] YouTube Bug Fix (#2818) Fix for #2810 ```shell python detect.py --source 0 ``` introduced by YouTube Livestream Detection PR #2752 --- utils/datasets.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/utils/datasets.py b/utils/datasets.py index ec597b628106..b81c634dcb7a 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -272,15 +272,15 @@ def __init__(self, sources='streams.txt', img_size=640, stride=32): n = len(sources) self.imgs = [None] * n self.sources = [clean_str(x) for x in sources] # clean source names for later - for i, s in enumerate(sources): - # Start the thread to read frames from the video stream + for i, s in enumerate(sources): # index, source + # Start thread to read frames from video stream print(f'{i + 1}/{n}: {s}... ', end='') - url = eval(s) if s.isnumeric() else s - if 'youtube.com/' in url or 'youtu.be/' in url: # if source is YouTube video + if 'youtube.com/' in s or 'youtu.be/' in s: # if source is YouTube video check_requirements(('pafy', 'youtube_dl')) import pafy - url = pafy.new(url).getbest(preftype="mp4").url - cap = cv2.VideoCapture(url) + s = pafy.new(s).getbest(preftype="mp4").url # YouTube URL + s = eval(s) if s.isnumeric() else s # i.e. s = '0' local webcam + cap = cv2.VideoCapture(s) assert cap.isOpened(), f'Failed to open {s}' w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) From c15e25c40fa03e91a10708f9af27e23184d8faa2 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 18 Apr 2021 13:47:40 +0200 Subject: [PATCH 0133/1976] PyTorch Hub cv2 .save() .show() bug fix (#2831) * PyTorch Hub cv2 .save() .show() bug fix cv2.rectangle() was failing on non-contiguous np array inputs. This checks for contiguous arrays and applies is necessary: ```python imgs[i] = im if im.data.contiguous else np.ascontiguousarray(im) # update ``` * Update plots.py ```python assert im.data.contiguous, 'Image not contiguous. Apply np.ascontiguousarray(im) to plot_on_box() input image.' ``` * Update hubconf.py Expand CI tests to OpenCV image. --- hubconf.py | 10 ++++++---- models/common.py | 4 ++-- utils/plots.py | 26 ++++++++++++++------------ 3 files changed, 22 insertions(+), 18 deletions(-) diff --git a/hubconf.py b/hubconf.py index a2a43a7a10cb..d89502f4ee76 100644 --- a/hubconf.py +++ b/hubconf.py @@ -124,13 +124,15 @@ def yolov5x6(pretrained=True, channels=3, classes=80, autoshape=True): # model = custom(path_or_model='path/to/model.pt') # custom example # Verify inference + import cv2 import numpy as np from PIL import Image - imgs = [Image.open('data/images/bus.jpg'), # PIL - 'data/images/zidane.jpg', # filename - 'https://github.com/ultralytics/yolov5/raw/master/data/images/bus.jpg', # URI - np.zeros((640, 480, 3))] # numpy + imgs = ['data/images/zidane.jpg', # filename + 'https://github.com/ultralytics/yolov5/releases/download/v1.0/zidane.jpg', # URI + cv2.imread('data/images/bus.jpg')[:, :, ::-1], # OpenCV + Image.open('data/images/bus.jpg'), # PIL + np.zeros((320, 640, 3))] # numpy results = model(imgs) # batched inference results.print() diff --git a/models/common.py b/models/common.py index 1130471e904b..2fdc0e0b70ca 100644 --- a/models/common.py +++ b/models/common.py @@ -240,7 +240,7 @@ def autoshape(self): @torch.no_grad() def forward(self, imgs, size=640, augment=False, profile=False): # Inference from various sources. For height=640, width=1280, RGB images example inputs are: - # filename: imgs = 'data/samples/zidane.jpg' + # filename: imgs = 'data/images/zidane.jpg' # URI: = 'https://github.com/ultralytics/yolov5/releases/download/v1.0/zidane.jpg' # OpenCV: = cv2.imread('image.jpg')[:,:,::-1] # HWC BGR to RGB x(640,1280,3) # PIL: = Image.open('image.jpg') # HWC x(640,1280,3) @@ -271,7 +271,7 @@ def forward(self, imgs, size=640, augment=False, profile=False): shape0.append(s) # image shape g = (size / max(s)) # gain shape1.append([y * g for y in s]) - imgs[i] = im # update + imgs[i] = im if im.data.contiguous else np.ascontiguousarray(im) # update shape1 = [make_divisible(x, int(self.stride.max())) for x in np.stack(shape1, 0).max(0)] # inference shape x = [letterbox(im, new_shape=shape1, auto=False)[0] for im in imgs] # pad x = np.stack(x, 0) if n > 1 else x[0][None] # stack diff --git a/utils/plots.py b/utils/plots.py index 5b23a34f5141..09b6bcd15a9f 100644 --- a/utils/plots.py +++ b/utils/plots.py @@ -54,32 +54,34 @@ def butter_lowpass(cutoff, fs, order): return filtfilt(b, a, data) # forward-backward filter -def plot_one_box(x, img, color=None, label=None, line_thickness=3): - # Plots one bounding box on image img - tl = line_thickness or round(0.002 * (img.shape[0] + img.shape[1]) / 2) + 1 # line/font thickness +def plot_one_box(x, im, color=None, label=None, line_thickness=3): + # Plots one bounding box on image 'im' using OpenCV + assert im.data.contiguous, 'Image not contiguous. Apply np.ascontiguousarray(im) to plot_on_box() input image.' + tl = line_thickness or round(0.002 * (im.shape[0] + im.shape[1]) / 2) + 1 # line/font thickness color = color or [random.randint(0, 255) for _ in range(3)] c1, c2 = (int(x[0]), int(x[1])), (int(x[2]), int(x[3])) - cv2.rectangle(img, c1, c2, color, thickness=tl, lineType=cv2.LINE_AA) + cv2.rectangle(im, c1, c2, color, thickness=tl, lineType=cv2.LINE_AA) if label: tf = max(tl - 1, 1) # font thickness t_size = cv2.getTextSize(label, 0, fontScale=tl / 3, thickness=tf)[0] c2 = c1[0] + t_size[0], c1[1] - t_size[1] - 3 - cv2.rectangle(img, c1, c2, color, -1, cv2.LINE_AA) # filled - cv2.putText(img, label, (c1[0], c1[1] - 2), 0, tl / 3, [225, 255, 255], thickness=tf, lineType=cv2.LINE_AA) + cv2.rectangle(im, c1, c2, color, -1, cv2.LINE_AA) # filled + cv2.putText(im, label, (c1[0], c1[1] - 2), 0, tl / 3, [225, 255, 255], thickness=tf, lineType=cv2.LINE_AA) -def plot_one_box_PIL(box, img, color=None, label=None, line_thickness=None): - img = Image.fromarray(img) - draw = ImageDraw.Draw(img) - line_thickness = line_thickness or max(int(min(img.size) / 200), 2) +def plot_one_box_PIL(box, im, color=None, label=None, line_thickness=None): + # Plots one bounding box on image 'im' using PIL + im = Image.fromarray(im) + draw = ImageDraw.Draw(im) + line_thickness = line_thickness or max(int(min(im.size) / 200), 2) draw.rectangle(box, width=line_thickness, outline=tuple(color)) # plot if label: - fontsize = max(round(max(img.size) / 40), 12) + fontsize = max(round(max(im.size) / 40), 12) font = ImageFont.truetype("Arial.ttf", fontsize) txt_width, txt_height = font.getsize(label) draw.rectangle([box[0], box[1] - txt_height + 4, box[0] + txt_width, box[1]], fill=tuple(color)) draw.text((box[0], box[1] - txt_height + 1), label, fill=(255, 255, 255), font=font) - return np.asarray(img) + return np.asarray(im) def plot_wh_methods(): # from utils.plots import *; plot_wh_methods() From 803f51bceedb502e8f112b05911b805bf9ddac6b Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 18 Apr 2021 14:28:27 +0200 Subject: [PATCH 0134/1976] Create FUNDING.yml (#2832) --- FUNDING.yml | 5 +++++ 1 file changed, 5 insertions(+) create mode 100644 FUNDING.yml diff --git a/FUNDING.yml b/FUNDING.yml new file mode 100644 index 000000000000..56798bae1769 --- /dev/null +++ b/FUNDING.yml @@ -0,0 +1,5 @@ +# These are supported funding model platforms + +github: [glenn-jocher] +patreon: ultralytics +open_collective: ultralytics From 238583b7d5c19029920d56c417c406c829569c75 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 18 Apr 2021 14:40:58 +0200 Subject: [PATCH 0135/1976] Update FUNDING.yml (#2833) * Update FUNDING.yml * move FUNDING.yml to ./github --- FUNDING.yml => .github/FUNDING.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) rename FUNDING.yml => .github/FUNDING.yml (80%) diff --git a/FUNDING.yml b/.github/FUNDING.yml similarity index 80% rename from FUNDING.yml rename to .github/FUNDING.yml index 56798bae1769..3da386f7e724 100644 --- a/FUNDING.yml +++ b/.github/FUNDING.yml @@ -1,5 +1,5 @@ # These are supported funding model platforms -github: [glenn-jocher] +github: glenn-jocher patreon: ultralytics open_collective: ultralytics From 1df8c6c963d31ce84895101a70e45e0afdcb0bc2 Mon Sep 17 00:00:00 2001 From: Tim Stokman <41363+timstokman@users.noreply.github.com> Date: Tue, 20 Apr 2021 13:54:03 +0200 Subject: [PATCH 0136/1976] Fix ONNX dynamic axes export support with onnx simplifier, make onnx simplifier optional (#2856) * Ensure dynamic export works succesfully, onnx simplifier optional * Update export.py * add dashes Co-authored-by: Tim Co-authored-by: Glenn Jocher --- models/export.py | 34 +++++++++++++++++++--------------- 1 file changed, 19 insertions(+), 15 deletions(-) diff --git a/models/export.py b/models/export.py index bec9194319c1..c527a47951cb 100644 --- a/models/export.py +++ b/models/export.py @@ -21,12 +21,13 @@ if __name__ == '__main__': parser = argparse.ArgumentParser() - parser.add_argument('--weights', type=str, default='./yolov5s.pt', help='weights path') # from yolov5/models/ + parser.add_argument('--weights', type=str, default='./yolov5s.pt', help='weights path') parser.add_argument('--img-size', nargs='+', type=int, default=[640, 640], help='image size') # height, width parser.add_argument('--batch-size', type=int, default=1, help='batch size') - parser.add_argument('--dynamic', action='store_true', help='dynamic ONNX axes') parser.add_argument('--grid', action='store_true', help='export Detect() layer grid') parser.add_argument('--device', default='cpu', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') + parser.add_argument('--dynamic', action='store_true', help='dynamic ONNX axes') # ONNX-only + parser.add_argument('--simplify', action='store_true', help='simplify ONNX model') # ONNX-only opt = parser.parse_args() opt.img_size *= 2 if len(opt.img_size) == 1 else 1 # expand print(opt) @@ -58,7 +59,7 @@ model.model[-1].export = not opt.grid # set Detect() layer grid export y = model(img) # dry run - # TorchScript export + # TorchScript export ----------------------------------------------------------------------------------------------- prefix = colorstr('TorchScript:') try: print(f'\n{prefix} starting export with torch {torch.__version__}...') @@ -69,7 +70,7 @@ except Exception as e: print(f'{prefix} export failure: {e}') - # ONNX export + # ONNX export ------------------------------------------------------------------------------------------------------ prefix = colorstr('ONNX:') try: import onnx @@ -87,21 +88,24 @@ # print(onnx.helper.printable_graph(model_onnx.graph)) # print # Simplify - try: - check_requirements(['onnx-simplifier']) - import onnxsim - - print(f'{prefix} simplifying with onnx-simplifier {onnxsim.__version__}...') - model_onnx, check = onnxsim.simplify(model_onnx) - assert check, 'assert check failed' - onnx.save(model_onnx, f) - except Exception as e: - print(f'{prefix} simplifier failure: {e}') + if opt.simplify: + try: + check_requirements(['onnx-simplifier']) + import onnxsim + + print(f'{prefix} simplifying with onnx-simplifier {onnxsim.__version__}...') + model_onnx, check = onnxsim.simplify(model_onnx, + dynamic_input_shape=opt.dynamic, + input_shapes={'images': list(img.shape)} if opt.dynamic else None) + assert check, 'assert check failed' + onnx.save(model_onnx, f) + except Exception as e: + print(f'{prefix} simplifier failure: {e}') print(f'{prefix} export success, saved as {f}') except Exception as e: print(f'{prefix} export failure: {e}') - # CoreML export + # CoreML export ---------------------------------------------------------------------------------------------------- prefix = colorstr('CoreML:') try: import coremltools as ct From c5c647e2816f70f17843755ecfc913a11e1d6492 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 20 Apr 2021 19:47:07 +0200 Subject: [PATCH 0137/1976] Update increment_path() to handle file paths (#2867) --- utils/general.py | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/utils/general.py b/utils/general.py index ac3a6981b3d4..c7d084e09326 100755 --- a/utils/general.py +++ b/utils/general.py @@ -591,14 +591,16 @@ def apply_classifier(x, model, img, im0): return x -def increment_path(path, exist_ok=True, sep=''): - # Increment path, i.e. runs/exp --> runs/exp{sep}0, runs/exp{sep}1 etc. +def increment_path(path, exist_ok=False, sep=''): + # Increment file or directory path, i.e. runs/exp --> runs/exp{sep}2, runs/exp{sep}3, ... etc. path = Path(path) # os-agnostic - if (path.exists() and exist_ok) or (not path.exists()): + if not path.exists() or exist_ok: return str(path) else: + suffix = path.suffix + path = path.with_suffix('') dirs = glob.glob(f"{path}{sep}*") # similar paths matches = [re.search(rf"%s{sep}(\d+)" % path.stem, d) for d in dirs] i = [int(m.groups()[0]) for m in matches if m] # indices n = max(i) + 1 if i else 2 # increment number - return f"{path}{sep}{n}" # update path + return f"{path}{sep}{n}{suffix}" # update path From c949fc86d1914bdbf0a61d193855c1b4e1536da5 Mon Sep 17 00:00:00 2001 From: Burhan Date: Wed, 21 Apr 2021 05:51:08 +0800 Subject: [PATCH 0138/1976] Detection cropping+saving feature addition for detect.py and PyTorch Hub (#2827) * Update detect.py * Update detect.py * Update greetings.yml * Update cropping * cleanup * Update increment_path() * Update common.py * Update detect.py * Update detect.py * Update detect.py * Update common.py * cleanup * Update detect.py Co-authored-by: Glenn Jocher --- detect.py | 18 +++++++++++------- models/common.py | 32 ++++++++++++++++++++------------ test.py | 2 +- train.py | 6 +++--- utils/general.py | 27 +++++++++++++++++++++------ 5 files changed, 56 insertions(+), 29 deletions(-) diff --git a/detect.py b/detect.py index c0707da69e6a..081ae3d89e2e 100644 --- a/detect.py +++ b/detect.py @@ -10,19 +10,19 @@ from models.experimental import attempt_load from utils.datasets import LoadStreams, LoadImages from utils.general import check_img_size, check_requirements, check_imshow, non_max_suppression, apply_classifier, \ - scale_coords, xyxy2xywh, strip_optimizer, set_logging, increment_path + scale_coords, xyxy2xywh, strip_optimizer, set_logging, increment_path, save_one_box from utils.plots import plot_one_box from utils.torch_utils import select_device, load_classifier, time_synchronized -def detect(save_img=False): +def detect(): source, weights, view_img, save_txt, imgsz = opt.source, opt.weights, opt.view_img, opt.save_txt, opt.img_size save_img = not opt.nosave and not source.endswith('.txt') # save inference images webcam = source.isnumeric() or source.endswith('.txt') or source.lower().startswith( ('rtsp://', 'rtmp://', 'http://', 'https://')) # Directories - save_dir = Path(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok)) # increment run + save_dir = increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok) # increment run (save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir # Initialize @@ -84,7 +84,7 @@ def detect(save_img=False): if webcam: # batch_size >= 1 p, s, im0, frame = path[i], '%g: ' % i, im0s[i].copy(), dataset.count else: - p, s, im0, frame = path, '', im0s, getattr(dataset, 'frame', 0) + p, s, im0, frame = path, '', im0s.copy(), getattr(dataset, 'frame', 0) p = Path(p) # to Path save_path = str(save_dir / p.name) # img.jpg @@ -108,9 +108,12 @@ def detect(save_img=False): with open(txt_path + '.txt', 'a') as f: f.write(('%g ' * len(line)).rstrip() % line + '\n') - if save_img or view_img: # Add bbox to image - label = f'{names[int(cls)]} {conf:.2f}' - plot_one_box(xyxy, im0, label=label, color=colors[int(cls)], line_thickness=3) + if save_img or opt.save_crop or view_img: # Add bbox to image + c = int(cls) # integer class + label = f'{names[c]} {conf:.2f}' + plot_one_box(xyxy, im0, label=label, color=colors[c], line_thickness=3) + if opt.save_crop: + save_one_box(xyxy, im0s, file=save_dir / 'crops' / names[c] / f'{p.stem}.jpg', BGR=True) # Print time (inference + NMS) print(f'{s}Done. ({t2 - t1:.3f}s)') @@ -157,6 +160,7 @@ def detect(save_img=False): parser.add_argument('--view-img', action='store_true', help='display results') parser.add_argument('--save-txt', action='store_true', help='save results to *.txt') parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels') + parser.add_argument('--save-crop', action='store_true', help='save cropped prediction boxes') parser.add_argument('--nosave', action='store_true', help='do not save images/videos') parser.add_argument('--classes', nargs='+', type=int, help='filter by class: --class 0, or --class 0 2 3') parser.add_argument('--agnostic-nms', action='store_true', help='class-agnostic NMS') diff --git a/models/common.py b/models/common.py index 2fdc0e0b70ca..a28621904b0e 100644 --- a/models/common.py +++ b/models/common.py @@ -13,7 +13,7 @@ from torch.cuda import amp from utils.datasets import letterbox -from utils.general import non_max_suppression, make_divisible, scale_coords, increment_path, xyxy2xywh +from utils.general import non_max_suppression, make_divisible, scale_coords, increment_path, xyxy2xywh, save_one_box from utils.plots import color_list, plot_one_box from utils.torch_utils import time_synchronized @@ -311,29 +311,33 @@ def __init__(self, imgs, pred, files, times=None, names=None, shape=None): self.t = tuple((times[i + 1] - times[i]) * 1000 / self.n for i in range(3)) # timestamps (ms) self.s = shape # inference BCHW shape - def display(self, pprint=False, show=False, save=False, render=False, save_dir=''): + def display(self, pprint=False, show=False, save=False, crop=False, render=False, save_dir=Path('')): colors = color_list() - for i, (img, pred) in enumerate(zip(self.imgs, self.pred)): - str = f'image {i + 1}/{len(self.pred)}: {img.shape[0]}x{img.shape[1]} ' + for i, (im, pred) in enumerate(zip(self.imgs, self.pred)): + str = f'image {i + 1}/{len(self.pred)}: {im.shape[0]}x{im.shape[1]} ' if pred is not None: for c in pred[:, -1].unique(): n = (pred[:, -1] == c).sum() # detections per class str += f"{n} {self.names[int(c)]}{'s' * (n > 1)}, " # add to string - if show or save or render: + if show or save or render or crop: for *box, conf, cls in pred: # xyxy, confidence, class label = f'{self.names[int(cls)]} {conf:.2f}' - plot_one_box(box, img, label=label, color=colors[int(cls) % 10]) - img = Image.fromarray(img.astype(np.uint8)) if isinstance(img, np.ndarray) else img # from np + if crop: + save_one_box(box, im, file=save_dir / 'crops' / self.names[int(cls)] / self.files[i]) + else: # all others + plot_one_box(box, im, label=label, color=colors[int(cls) % 10]) + + im = Image.fromarray(im.astype(np.uint8)) if isinstance(im, np.ndarray) else im # from np if pprint: print(str.rstrip(', ')) if show: - img.show(self.files[i]) # show + im.show(self.files[i]) # show if save: f = self.files[i] - img.save(Path(save_dir) / f) # save + im.save(save_dir / f) # save print(f"{'Saved' * (i == 0)} {f}", end=',' if i < self.n - 1 else f' to {save_dir}\n') if render: - self.imgs[i] = np.asarray(img) + self.imgs[i] = np.asarray(im) def print(self): self.display(pprint=True) # print results @@ -343,10 +347,14 @@ def show(self): self.display(show=True) # show results def save(self, save_dir='runs/hub/exp'): - save_dir = increment_path(save_dir, exist_ok=save_dir != 'runs/hub/exp') # increment save_dir - Path(save_dir).mkdir(parents=True, exist_ok=True) + save_dir = increment_path(save_dir, exist_ok=save_dir != 'runs/hub/exp', mkdir=True) # increment save_dir self.display(save=True, save_dir=save_dir) # save results + def crop(self, save_dir='runs/hub/exp'): + save_dir = increment_path(save_dir, exist_ok=save_dir != 'runs/hub/exp', mkdir=True) # increment save_dir + self.display(crop=True, save_dir=save_dir) # crop results + print(f'Saved results to {save_dir}\n') + def render(self): self.display(render=True) # render results return self.imgs diff --git a/test.py b/test.py index d099699bcad8..db1651d07f65 100644 --- a/test.py +++ b/test.py @@ -49,7 +49,7 @@ def test(data, device = select_device(opt.device, batch_size=batch_size) # Directories - save_dir = Path(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok)) # increment run + save_dir = increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok) # increment run (save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir # Load model diff --git a/train.py b/train.py index 82043b7fff34..17b5ac5dda50 100644 --- a/train.py +++ b/train.py @@ -41,7 +41,7 @@ def train(hyp, opt, device, tb_writer=None): logger.info(colorstr('hyperparameters: ') + ', '.join(f'{k}={v}' for k, v in hyp.items())) save_dir, epochs, batch_size, total_batch_size, weights, rank = \ - Path(opt.save_dir), opt.epochs, opt.batch_size, opt.total_batch_size, opt.weights, opt.global_rank + opt.save_dir, opt.epochs, opt.batch_size, opt.total_batch_size, opt.weights, opt.global_rank # Directories wdir = save_dir / 'weights' @@ -69,7 +69,7 @@ def train(hyp, opt, device, tb_writer=None): if rank in [-1, 0]: opt.hyp = hyp # add hyperparameters run_id = torch.load(weights).get('wandb_id') if weights.endswith('.pt') and os.path.isfile(weights) else None - wandb_logger = WandbLogger(opt, Path(opt.save_dir).stem, run_id, data_dict) + wandb_logger = WandbLogger(opt, save_dir.stem, run_id, data_dict) loggers['wandb'] = wandb_logger.wandb data_dict = wandb_logger.data_dict if wandb_logger.wandb: @@ -577,7 +577,7 @@ def train(hyp, opt, device, tb_writer=None): assert opt.local_rank == -1, 'DDP mode not implemented for --evolve' opt.notest, opt.nosave = True, True # only test/save final epoch # ei = [isinstance(x, (int, float)) for x in hyp.values()] # evolvable indices - yaml_file = Path(opt.save_dir) / 'hyp_evolved.yaml' # save best result here + yaml_file = opt.save_dir / 'hyp_evolved.yaml' # save best result here if opt.bucket: os.system('gsutil cp gs://%s/evolve.txt .' % opt.bucket) # download evolve.txt if exists diff --git a/utils/general.py b/utils/general.py index c7d084e09326..817023f33dd3 100755 --- a/utils/general.py +++ b/utils/general.py @@ -557,7 +557,7 @@ def print_mutation(hyp, results, yaml_file='hyp_evolved.yaml', bucket=''): def apply_classifier(x, model, img, im0): - # applies a second stage classifier to yolo outputs + # Apply a second stage classifier to yolo outputs im0 = [im0] if isinstance(im0, np.ndarray) else im0 for i, d in enumerate(x): # per image if d is not None and len(d): @@ -591,16 +591,31 @@ def apply_classifier(x, model, img, im0): return x -def increment_path(path, exist_ok=False, sep=''): +def save_one_box(xyxy, im, file='image.jpg', gain=1.02, pad=10, square=False, BGR=False): + # Save an image crop as {file} with crop size multiplied by {gain} and padded by {pad} pixels + xyxy = torch.tensor(xyxy).view(-1, 4) + b = xyxy2xywh(xyxy) # boxes + if square: + b[:, 2:] = b[:, 2:].max(1)[0].unsqueeze(1) # attempt rectangle to square + b[:, 2:] = b[:, 2:] * gain + pad # box wh * gain + pad + xyxy = xywh2xyxy(b).long() + clip_coords(xyxy, im.shape) + crop = im[int(xyxy[0, 1]):int(xyxy[0, 3]), int(xyxy[0, 0]):int(xyxy[0, 2])] + cv2.imwrite(str(increment_path(file, mkdir=True).with_suffix('.jpg')), crop if BGR else crop[..., ::-1]) + + +def increment_path(path, exist_ok=False, sep='', mkdir=False): # Increment file or directory path, i.e. runs/exp --> runs/exp{sep}2, runs/exp{sep}3, ... etc. path = Path(path) # os-agnostic - if not path.exists() or exist_ok: - return str(path) - else: + if path.exists() and not exist_ok: suffix = path.suffix path = path.with_suffix('') dirs = glob.glob(f"{path}{sep}*") # similar paths matches = [re.search(rf"%s{sep}(\d+)" % path.stem, d) for d in dirs] i = [int(m.groups()[0]) for m in matches if m] # indices n = max(i) + 1 if i else 2 # increment number - return f"{path}{sep}{n}{suffix}" # update path + path = Path(f"{path}{sep}{n}{suffix}") # update path + dir = path if path.suffix == '' else path.parent # directory + if not dir.exists() and mkdir: + dir.mkdir(parents=True, exist_ok=True) # make directory + return path From f7bc685c2c0f57005b83355715cb7282e61416eb Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 21 Apr 2021 14:34:45 +0200 Subject: [PATCH 0139/1976] Implement yaml.safe_load() (#2876) * Implement yaml.safe_load() * yaml.safe_dump() --- data/coco.yaml | 2 +- models/yolo.py | 2 +- test.py | 2 +- train.py | 19 ++++++++++--------- utils/autoanchor.py | 2 +- utils/aws/resume.py | 2 +- utils/general.py | 2 +- utils/plots.py | 2 +- utils/wandb_logging/log_dataset.py | 2 +- utils/wandb_logging/wandb_utils.py | 10 +++++----- 10 files changed, 23 insertions(+), 22 deletions(-) diff --git a/data/coco.yaml b/data/coco.yaml index b9da2bf5919b..fa33a1210004 100644 --- a/data/coco.yaml +++ b/data/coco.yaml @@ -30,6 +30,6 @@ names: [ 'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', ' # Print classes # with open('data/coco.yaml') as f: -# d = yaml.load(f, Loader=yaml.FullLoader) # dict +# d = yaml.safe_load(f) # dict # for i, x in enumerate(d['names']): # print(i, x) diff --git a/models/yolo.py b/models/yolo.py index f730a1efa3b3..7db0e7da2629 100644 --- a/models/yolo.py +++ b/models/yolo.py @@ -72,7 +72,7 @@ def __init__(self, cfg='yolov5s.yaml', ch=3, nc=None, anchors=None): # model, i import yaml # for torch hub self.yaml_file = Path(cfg).name with open(cfg) as f: - self.yaml = yaml.load(f, Loader=yaml.SafeLoader) # model dict + self.yaml = yaml.safe_load(f) # model dict # Define model ch = self.yaml['ch'] = self.yaml.get('ch', ch) # input channels diff --git a/test.py b/test.py index db1651d07f65..43c03cf0e094 100644 --- a/test.py +++ b/test.py @@ -71,7 +71,7 @@ def test(data, if isinstance(data, str): is_coco = data.endswith('coco.yaml') with open(data) as f: - data = yaml.load(f, Loader=yaml.SafeLoader) + data = yaml.safe_load(f) check_dataset(data) # check nc = 1 if single_cls else int(data['nc']) # number of classes iouv = torch.linspace(0.5, 0.95, 10).to(device) # iou vector for mAP@0.5:0.95 diff --git a/train.py b/train.py index 17b5ac5dda50..acfc9ef5527b 100644 --- a/train.py +++ b/train.py @@ -41,7 +41,7 @@ def train(hyp, opt, device, tb_writer=None): logger.info(colorstr('hyperparameters: ') + ', '.join(f'{k}={v}' for k, v in hyp.items())) save_dir, epochs, batch_size, total_batch_size, weights, rank = \ - opt.save_dir, opt.epochs, opt.batch_size, opt.total_batch_size, opt.weights, opt.global_rank + Path(opt.save_dir), opt.epochs, opt.batch_size, opt.total_batch_size, opt.weights, opt.global_rank # Directories wdir = save_dir / 'weights' @@ -52,16 +52,16 @@ def train(hyp, opt, device, tb_writer=None): # Save run settings with open(save_dir / 'hyp.yaml', 'w') as f: - yaml.dump(hyp, f, sort_keys=False) + yaml.safe_dump(hyp, f, sort_keys=False) with open(save_dir / 'opt.yaml', 'w') as f: - yaml.dump(vars(opt), f, sort_keys=False) + yaml.safe_dump(vars(opt), f, sort_keys=False) # Configure plots = not opt.evolve # create plots cuda = device.type != 'cpu' init_seeds(2 + rank) with open(opt.data) as f: - data_dict = yaml.load(f, Loader=yaml.SafeLoader) # data dict + data_dict = yaml.safe_load(f) # data dict is_coco = opt.data.endswith('coco.yaml') # Logging- Doing this before checking the dataset. Might update data_dict @@ -506,8 +506,9 @@ def train(hyp, opt, device, tb_writer=None): assert os.path.isfile(ckpt), 'ERROR: --resume checkpoint does not exist' apriori = opt.global_rank, opt.local_rank with open(Path(ckpt).parent.parent / 'opt.yaml') as f: - opt = argparse.Namespace(**yaml.load(f, Loader=yaml.SafeLoader)) # replace - opt.cfg, opt.weights, opt.resume, opt.batch_size, opt.global_rank, opt.local_rank = '', ckpt, True, opt.total_batch_size, *apriori # reinstate + opt = argparse.Namespace(**yaml.safe_load(f)) # replace + opt.cfg, opt.weights, opt.resume, opt.batch_size, opt.global_rank, opt.local_rank = \ + '', ckpt, True, opt.total_batch_size, *apriori # reinstate logger.info('Resuming training from %s' % ckpt) else: # opt.hyp = opt.hyp or ('hyp.finetune.yaml' if opt.weights else 'hyp.scratch.yaml') @@ -515,7 +516,7 @@ def train(hyp, opt, device, tb_writer=None): assert len(opt.cfg) or len(opt.weights), 'either --cfg or --weights must be specified' opt.img_size.extend([opt.img_size[-1]] * (2 - len(opt.img_size))) # extend to 2 sizes (train, test) opt.name = 'evolve' if opt.evolve else opt.name - opt.save_dir = increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok | opt.evolve) # increment run + opt.save_dir = str(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok | opt.evolve)) # DDP mode opt.total_batch_size = opt.batch_size @@ -530,7 +531,7 @@ def train(hyp, opt, device, tb_writer=None): # Hyperparameters with open(opt.hyp) as f: - hyp = yaml.load(f, Loader=yaml.SafeLoader) # load hyps + hyp = yaml.safe_load(f) # load hyps # Train logger.info(opt) @@ -577,7 +578,7 @@ def train(hyp, opt, device, tb_writer=None): assert opt.local_rank == -1, 'DDP mode not implemented for --evolve' opt.notest, opt.nosave = True, True # only test/save final epoch # ei = [isinstance(x, (int, float)) for x in hyp.values()] # evolvable indices - yaml_file = opt.save_dir / 'hyp_evolved.yaml' # save best result here + yaml_file = Path(opt.save_dir) / 'hyp_evolved.yaml' # save best result here if opt.bucket: os.system('gsutil cp gs://%s/evolve.txt .' % opt.bucket) # download evolve.txt if exists diff --git a/utils/autoanchor.py b/utils/autoanchor.py index 57777462e89f..75b350da729c 100644 --- a/utils/autoanchor.py +++ b/utils/autoanchor.py @@ -102,7 +102,7 @@ def print_results(k): if isinstance(path, str): # *.yaml file with open(path) as f: - data_dict = yaml.load(f, Loader=yaml.SafeLoader) # model dict + data_dict = yaml.safe_load(f) # model dict from utils.datasets import LoadImagesAndLabels dataset = LoadImagesAndLabels(data_dict['train'], augment=True, rect=True) else: diff --git a/utils/aws/resume.py b/utils/aws/resume.py index faad8d247411..4b0d4246b594 100644 --- a/utils/aws/resume.py +++ b/utils/aws/resume.py @@ -19,7 +19,7 @@ # Load opt.yaml with open(last.parent.parent / 'opt.yaml') as f: - opt = yaml.load(f, Loader=yaml.SafeLoader) + opt = yaml.safe_load(f) # Get device count d = opt['device'].split(',') # devices diff --git a/utils/general.py b/utils/general.py index 817023f33dd3..9898549d3eaf 100755 --- a/utils/general.py +++ b/utils/general.py @@ -550,7 +550,7 @@ def print_mutation(hyp, results, yaml_file='hyp_evolved.yaml', bucket=''): results = tuple(x[0, :7]) c = '%10.4g' * len(results) % results # results (P, R, mAP@0.5, mAP@0.5:0.95, val_losses x 3) f.write('# Hyperparameter Evolution Results\n# Generations: %g\n# Metrics: ' % len(x) + c + '\n\n') - yaml.dump(hyp, f, sort_keys=False) + yaml.safe_dump(hyp, f, sort_keys=False) if bucket: os.system('gsutil cp evolve.txt %s gs://%s' % (yaml_file, bucket)) # upload diff --git a/utils/plots.py b/utils/plots.py index 09b6bcd15a9f..f24513c6998d 100644 --- a/utils/plots.py +++ b/utils/plots.py @@ -323,7 +323,7 @@ def plot_labels(labels, names=(), save_dir=Path(''), loggers=None): def plot_evolution(yaml_file='data/hyp.finetune.yaml'): # from utils.plots import *; plot_evolution() # Plot hyperparameter evolution results in evolve.txt with open(yaml_file) as f: - hyp = yaml.load(f, Loader=yaml.SafeLoader) + hyp = yaml.safe_load(f) x = np.loadtxt('evolve.txt', ndmin=2) f = fitness(x) # weights = (f - f.min()) ** 2 # for weighted results diff --git a/utils/wandb_logging/log_dataset.py b/utils/wandb_logging/log_dataset.py index d7a521f1414b..f45a23011f15 100644 --- a/utils/wandb_logging/log_dataset.py +++ b/utils/wandb_logging/log_dataset.py @@ -9,7 +9,7 @@ def create_dataset_artifact(opt): with open(opt.data) as f: - data = yaml.load(f, Loader=yaml.SafeLoader) # data dict + data = yaml.safe_load(f) # data dict logger = WandbLogger(opt, '', None, data, job_type='Dataset Creation') diff --git a/utils/wandb_logging/wandb_utils.py b/utils/wandb_logging/wandb_utils.py index d8f50ae8a80e..d8fbd1ef42aa 100644 --- a/utils/wandb_logging/wandb_utils.py +++ b/utils/wandb_logging/wandb_utils.py @@ -55,7 +55,7 @@ def check_wandb_resume(opt): def process_wandb_config_ddp_mode(opt): with open(opt.data) as f: - data_dict = yaml.load(f, Loader=yaml.SafeLoader) # data dict + data_dict = yaml.safe_load(f) # data dict train_dir, val_dir = None, None if isinstance(data_dict['train'], str) and data_dict['train'].startswith(WANDB_ARTIFACT_PREFIX): api = wandb.Api() @@ -73,7 +73,7 @@ def process_wandb_config_ddp_mode(opt): if train_dir or val_dir: ddp_data_path = str(Path(val_dir) / 'wandb_local_data.yaml') with open(ddp_data_path, 'w') as f: - yaml.dump(data_dict, f) + yaml.safe_dump(data_dict, f) opt.data = ddp_data_path @@ -120,7 +120,7 @@ def check_and_upload_dataset(self, opt): 'YOLOv5' if opt.project == 'runs/train' else Path(opt.project).stem) print("Created dataset config file ", config_path) with open(config_path) as f: - wandb_data_dict = yaml.load(f, Loader=yaml.SafeLoader) + wandb_data_dict = yaml.safe_load(f) return wandb_data_dict def setup_training(self, opt, data_dict): @@ -192,7 +192,7 @@ def log_model(self, path, opt, epoch, fitness_score, best_model=False): def log_dataset_artifact(self, data_file, single_cls, project, overwrite_config=False): with open(data_file) as f: - data = yaml.load(f, Loader=yaml.SafeLoader) # data dict + data = yaml.safe_load(f) # data dict nc, names = (1, ['item']) if single_cls else (int(data['nc']), data['names']) names = {k: v for k, v in enumerate(names)} # to index dictionary self.train_artifact = self.create_dataset_table(LoadImagesAndLabels( @@ -206,7 +206,7 @@ def log_dataset_artifact(self, data_file, single_cls, project, overwrite_config= path = data_file if overwrite_config else '_wandb.'.join(data_file.rsplit('.', 1)) # updated data.yaml path data.pop('download', None) with open(path, 'w') as f: - yaml.dump(data, f) + yaml.safe_dump(data, f) if self.job_type == 'Training': # builds correct artifact pipeline graph self.wandb_run.use_artifact(self.val_artifact) From 5f7d39fede4de8af98472bd009c63c3a86568e2d Mon Sep 17 00:00:00 2001 From: JoshSong Date: Wed, 21 Apr 2021 23:50:28 +1000 Subject: [PATCH 0140/1976] Cleanup load_image() (#2871) * don't resize up in load_image if augmenting * cleanup Co-authored-by: Glenn Jocher --- utils/datasets.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/utils/datasets.py b/utils/datasets.py index b81c634dcb7a..3fcdddd7c013 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -634,10 +634,10 @@ def load_image(self, index): img = cv2.imread(path) # BGR assert img is not None, 'Image Not Found ' + path h0, w0 = img.shape[:2] # orig hw - r = self.img_size / max(h0, w0) # resize image to img_size - if r != 1: # always resize down, only resize up if training with augmentation - interp = cv2.INTER_AREA if r < 1 and not self.augment else cv2.INTER_LINEAR - img = cv2.resize(img, (int(w0 * r), int(h0 * r)), interpolation=interp) + r = self.img_size / max(h0, w0) # ratio + if r != 1: # if sizes are not equal + img = cv2.resize(img, (int(w0 * r), int(h0 * r)), + interpolation=cv2.INTER_AREA if r < 1 and not self.augment else cv2.INTER_LINEAR) return img, (h0, w0), img.shape[:2] # img, hw_original, hw_resized else: return self.imgs[index], self.img_hw0[index], self.img_hw[index] # img, hw_original, hw_resized From d48a34dca72c7df1f684a1ff33c18beebc0f0ed9 Mon Sep 17 00:00:00 2001 From: Michael Heilig <75843816+MichHeilig@users.noreply.github.com> Date: Thu, 22 Apr 2021 00:49:55 +0200 Subject: [PATCH 0141/1976] bug fix: switched rows and cols for correct detections in confusion matrix (#2883) --- utils/metrics.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/metrics.py b/utils/metrics.py index 666b8c7ec1c0..323c84b6c873 100644 --- a/utils/metrics.py +++ b/utils/metrics.py @@ -145,7 +145,7 @@ def process_batch(self, detections, labels): for i, gc in enumerate(gt_classes): j = m0 == i if n and sum(j) == 1: - self.matrix[gc, detection_classes[m1[j]]] += 1 # correct + self.matrix[detection_classes[m1[j]], gc] += 1 # correct else: self.matrix[self.nc, gc] += 1 # background FP From 78fd0776571589a2a85b9245b15798497ef104d3 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 22 Apr 2021 12:10:26 +0200 Subject: [PATCH 0142/1976] VisDrone2019-DET Dataset Auto-Download (#2882) * VisDrone Dataset Auto-Download * add visdrone.yaml * cleanup * add VisDrone2019-DET-test-dev * cleanup VOC --- data/argoverse_hd.yaml | 2 +- data/coco.yaml | 2 +- data/coco128.yaml | 2 +- data/scripts/get_argoverse_hd.sh | 2 +- data/scripts/get_coco.sh | 2 +- data/scripts/get_voc.sh | 113 ++++++++++++------------------- data/visdrone.yaml | 65 ++++++++++++++++++ data/voc.yaml | 2 +- utils/general.py | 32 +++++++-- 9 files changed, 144 insertions(+), 78 deletions(-) create mode 100644 data/visdrone.yaml diff --git a/data/argoverse_hd.yaml b/data/argoverse_hd.yaml index df7a9361e769..0ba314d82ce1 100644 --- a/data/argoverse_hd.yaml +++ b/data/argoverse_hd.yaml @@ -1,6 +1,6 @@ # Argoverse-HD dataset (ring-front-center camera) http://www.cs.cmu.edu/~mengtial/proj/streaming/ # Train command: python train.py --data argoverse_hd.yaml -# Default dataset location is next to /yolov5: +# Default dataset location is next to YOLOv5: # /parent_folder # /argoverse # /yolov5 diff --git a/data/coco.yaml b/data/coco.yaml index fa33a1210004..f818a49ff0fa 100644 --- a/data/coco.yaml +++ b/data/coco.yaml @@ -1,6 +1,6 @@ # COCO 2017 dataset http://cocodataset.org # Train command: python train.py --data coco.yaml -# Default dataset location is next to /yolov5: +# Default dataset location is next to YOLOv5: # /parent_folder # /coco # /yolov5 diff --git a/data/coco128.yaml b/data/coco128.yaml index c41bccf2b8d5..83fbc29d3404 100644 --- a/data/coco128.yaml +++ b/data/coco128.yaml @@ -1,6 +1,6 @@ # COCO 2017 dataset http://cocodataset.org - first 128 training images # Train command: python train.py --data coco128.yaml -# Default dataset location is next to /yolov5: +# Default dataset location is next to YOLOv5: # /parent_folder # /coco128 # /yolov5 diff --git a/data/scripts/get_argoverse_hd.sh b/data/scripts/get_argoverse_hd.sh index caec61efed78..18131a6764d6 100644 --- a/data/scripts/get_argoverse_hd.sh +++ b/data/scripts/get_argoverse_hd.sh @@ -2,7 +2,7 @@ # Argoverse-HD dataset (ring-front-center camera) http://www.cs.cmu.edu/~mengtial/proj/streaming/ # Download command: bash data/scripts/get_argoverse_hd.sh # Train command: python train.py --data argoverse_hd.yaml -# Default dataset location is next to /yolov5: +# Default dataset location is next to YOLOv5: # /parent_folder # /argoverse # /yolov5 diff --git a/data/scripts/get_coco.sh b/data/scripts/get_coco.sh index bbb1e9291d5b..caae37504780 100755 --- a/data/scripts/get_coco.sh +++ b/data/scripts/get_coco.sh @@ -2,7 +2,7 @@ # COCO 2017 dataset http://cocodataset.org # Download command: bash data/scripts/get_coco.sh # Train command: python train.py --data coco.yaml -# Default dataset location is next to /yolov5: +# Default dataset location is next to YOLOv5: # /parent_folder # /coco # /yolov5 diff --git a/data/scripts/get_voc.sh b/data/scripts/get_voc.sh index 13b83c28d706..4c04aaa95a29 100644 --- a/data/scripts/get_voc.sh +++ b/data/scripts/get_voc.sh @@ -2,7 +2,7 @@ # PASCAL VOC dataset http://host.robots.ox.ac.uk/pascal/VOC/ # Download command: bash data/scripts/get_voc.sh # Train command: python train.py --data voc.yaml -# Default dataset location is next to /yolov5: +# Default dataset location is next to YOLOv5: # /parent_folder # /VOC # /yolov5 @@ -29,34 +29,27 @@ echo "Completed in" $runtime "seconds" echo "Splitting dataset..." python3 - "$@" <train.txt cat 2007_train.txt 2007_val.txt 2007_test.txt 2012_train.txt 2012_val.txt >train.all.txt -python3 - "$@" < Date: Thu, 22 Apr 2021 16:51:21 +0200 Subject: [PATCH 0143/1976] Uppercase model filenames enabled (#2890) --- utils/google_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/google_utils.py b/utils/google_utils.py index 0a7ca3b896d6..db36fa9d6822 100644 --- a/utils/google_utils.py +++ b/utils/google_utils.py @@ -18,7 +18,7 @@ def gsutil_getsize(url=''): def attempt_download(file, repo='ultralytics/yolov5'): # Attempt file download if does not exist - file = Path(str(file).strip().replace("'", '').lower()) + file = Path(str(file).strip().replace("'", '')) if not file.exists(): try: From 264d860f8dc36e7d9125d6fc347a02d34a7d5e37 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 22 Apr 2021 17:26:05 +0200 Subject: [PATCH 0144/1976] ACON activation function (#2893) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * ACON Activation Function ## 🚀 Feature There is a new activation function [ACON (CVPR 2021)](https://arxiv.org/pdf/2009.04759.pdf) that unifies ReLU and Swish. ACON is simple but very effective, code is here: https://github.com/nmaac/acon/blob/main/acon.py#L19 ![image](https://user-images.githubusercontent.com/5032208/115676962-a38dfe80-a382-11eb-9883-61fa3216e3e6.png) The improvements are very significant: ![image](https://user-images.githubusercontent.com/5032208/115680180-eac9be80-a385-11eb-9c7a-8643db552c69.png) ## Alternatives It also has an enhanced version meta-ACON that uses a small network to learn beta explicitly, which may influence the speed a bit. ## Additional context [Code](https://github.com/nmaac/acon) and [paper](https://arxiv.org/pdf/2009.04759.pdf). * Update activations.py --- utils/activations.py | 58 +++++++++++++++++++++++++++++++------------- 1 file changed, 41 insertions(+), 17 deletions(-) diff --git a/utils/activations.py b/utils/activations.py index aa3ddf071d28..1d095c1cf0f1 100644 --- a/utils/activations.py +++ b/utils/activations.py @@ -19,23 +19,6 @@ def forward(x): return x * F.hardtanh(x + 3, 0., 6.) / 6. # for torchscript, CoreML and ONNX -class MemoryEfficientSwish(nn.Module): - class F(torch.autograd.Function): - @staticmethod - def forward(ctx, x): - ctx.save_for_backward(x) - return x * torch.sigmoid(x) - - @staticmethod - def backward(ctx, grad_output): - x = ctx.saved_tensors[0] - sx = torch.sigmoid(x) - return grad_output * (sx * (1 + x * (1 - sx))) - - def forward(self, x): - return self.F.apply(x) - - # Mish https://github.com/digantamisra98/Mish -------------------------------------------------------------------------- class Mish(nn.Module): @staticmethod @@ -70,3 +53,44 @@ def __init__(self, c1, k=3): # ch_in, kernel def forward(self, x): return torch.max(x, self.bn(self.conv(x))) + + +# ACON https://arxiv.org/pdf/2009.04759.pdf ---------------------------------------------------------------------------- +class AconC(nn.Module): + r""" ACON activation (activate or not). + AconC: (p1*x-p2*x) * sigmoid(beta*(p1*x-p2*x)) + p2*x, beta is a learnable parameter + according to "Activate or Not: Learning Customized Activation" . + """ + + def __init__(self, c1): + super().__init__() + self.p1 = nn.Parameter(torch.randn(1, c1, 1, 1)) + self.p2 = nn.Parameter(torch.randn(1, c1, 1, 1)) + self.beta = nn.Parameter(torch.ones(1, c1, 1, 1)) + + def forward(self, x): + dpx = (self.p1 - self.p2) * x + return dpx * torch.sigmoid(self.beta * dpx) + self.p2 * x + + +class MetaAconC(nn.Module): + r""" ACON activation (activate or not). + MetaAconC: (p1*x-p2*x) * sigmoid(beta*(p1*x-p2*x)) + p2*x, beta is generated by a small network + according to "Activate or Not: Learning Customized Activation" . + """ + + def __init__(self, c1, k=1, s=1, r=16): # ch_in, kernel, stride, r + super().__init__() + c2 = max(r, c1 // r) + self.p1 = nn.Parameter(torch.randn(1, c1, 1, 1)) + self.p2 = nn.Parameter(torch.randn(1, c1, 1, 1)) + self.fc1 = nn.Conv2d(c1, c2, k, s, bias=False) + self.bn1 = nn.BatchNorm2d(c2) + self.fc2 = nn.Conv2d(c2, c1, k, s, bias=False) + self.bn2 = nn.BatchNorm2d(c1) + + def forward(self, x): + y = x.mean(dim=2, keepdims=True).mean(dim=3, keepdims=True) + beta = torch.sigmoid(self.bn2(self.fc2(self.bn1(self.fc1(y))))) + dpx = (self.p1 - self.p2) * x + return dpx * torch.sigmoid(beta * dpx) + self.p2 * x From b40dd991674e78dc73e67d2f7d415c65073592fc Mon Sep 17 00:00:00 2001 From: fcakyon <34196005+fcakyon@users.noreply.github.com> Date: Thu, 22 Apr 2021 19:17:30 +0300 Subject: [PATCH 0145/1976] Explicit opt function arguments (#2817) * more explicit function arguments * fix typo in detect.py * revert import order * revert import order * remove default value --- detect.py | 6 +++--- test.py | 8 +++++--- 2 files changed, 8 insertions(+), 6 deletions(-) diff --git a/detect.py b/detect.py index 081ae3d89e2e..d90d2aa8c4f5 100644 --- a/detect.py +++ b/detect.py @@ -15,7 +15,7 @@ from utils.torch_utils import select_device, load_classifier, time_synchronized -def detect(): +def detect(opt): source, weights, view_img, save_txt, imgsz = opt.source, opt.weights, opt.view_img, opt.save_txt, opt.img_size save_img = not opt.nosave and not source.endswith('.txt') # save inference images webcam = source.isnumeric() or source.endswith('.txt') or source.lower().startswith( @@ -176,7 +176,7 @@ def detect(): with torch.no_grad(): if opt.update: # update all models (to fix SourceChangeWarning) for opt.weights in ['yolov5s.pt', 'yolov5m.pt', 'yolov5l.pt', 'yolov5x.pt']: - detect() + detect(opt=opt) strip_optimizer(opt.weights) else: - detect() + detect(opt=opt) diff --git a/test.py b/test.py index 43c03cf0e094..91b2b981c45b 100644 --- a/test.py +++ b/test.py @@ -38,7 +38,8 @@ def test(data, wandb_logger=None, compute_loss=None, half_precision=True, - is_coco=False): + is_coco=False, + opt=None): # Initialize/load model and set device training = model is not None if training: # called by train.py @@ -323,11 +324,12 @@ def test(data, save_txt=opt.save_txt | opt.save_hybrid, save_hybrid=opt.save_hybrid, save_conf=opt.save_conf, + opt=opt ) elif opt.task == 'speed': # speed benchmarks for w in opt.weights: - test(opt.data, w, opt.batch_size, opt.img_size, 0.25, 0.45, save_json=False, plots=False) + test(opt.data, w, opt.batch_size, opt.img_size, 0.25, 0.45, save_json=False, plots=False, opt=opt) elif opt.task == 'study': # run over a range of settings and save/plot # python test.py --task study --data coco.yaml --iou 0.7 --weights yolov5s.pt yolov5m.pt yolov5l.pt yolov5x.pt @@ -338,7 +340,7 @@ def test(data, for i in x: # img-size print(f'\nRunning {f} point {i}...') r, _, t = test(opt.data, w, opt.batch_size, i, opt.conf_thres, opt.iou_thres, opt.save_json, - plots=False) + plots=False, opt=opt) y.append(r + t) # results and times np.savetxt(f, y, fmt='%10.4g') # save os.system('zip -r study.zip study_*.txt') From cdb678f4181bdbad01a6c88e2840871e4058b7cb Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 22 Apr 2021 19:27:21 +0200 Subject: [PATCH 0146/1976] Update yolo.py (#2899) --- models/yolo.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/models/yolo.py b/models/yolo.py index 7db0e7da2629..36fa27e89134 100644 --- a/models/yolo.py +++ b/models/yolo.py @@ -264,14 +264,14 @@ def parse_model(d, ch): # model_dict, input_channels(3) # Create model model = Model(opt.cfg).to(device) model.train() - + # Profile - # img = torch.rand(8 if torch.cuda.is_available() else 1, 3, 640, 640).to(device) + # img = torch.rand(8 if torch.cuda.is_available() else 1, 3, 320, 320).to(device) # y = model(img, profile=True) - # Tensorboard + # Tensorboard (not working https://github.com/ultralytics/yolov5/issues/2898) # from torch.utils.tensorboard import SummaryWriter - # tb_writer = SummaryWriter() - # print("Run 'tensorboard --logdir=models/runs' to view tensorboard at http://localhost:6006/") - # tb_writer.add_graph(model.model, img) # add model to tensorboard + # tb_writer = SummaryWriter('.') + # print("Run 'tensorboard --logdir=models' to view tensorboard at http://localhost:6006/") + # tb_writer.add_graph(torch.jit.trace(model, img, strict=False), []) # add model graph # tb_writer.add_image('test', img[0], dataformats='CWH') # add model to tensorboard From 14d2d2d75fff27a9deb183c9cb76f107f43ca3ad Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 22 Apr 2021 20:27:32 +0200 Subject: [PATCH 0147/1976] Update google_utils.py (#2900) --- utils/google_utils.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/utils/google_utils.py b/utils/google_utils.py index db36fa9d6822..6a4660bad509 100644 --- a/utils/google_utils.py +++ b/utils/google_utils.py @@ -26,8 +26,12 @@ def attempt_download(file, repo='ultralytics/yolov5'): assets = [x['name'] for x in response['assets']] # release assets, i.e. ['yolov5s.pt', 'yolov5m.pt', ...] tag = response['tag_name'] # i.e. 'v1.0' except: # fallback plan - assets = ['yolov5s.pt', 'yolov5m.pt', 'yolov5l.pt', 'yolov5x.pt'] - tag = subprocess.check_output('git tag', shell=True).decode().split()[-1] + assets = ['yolov5s.pt', 'yolov5m.pt', 'yolov5l.pt', 'yolov5x.pt', + 'yolov5s6.pt', 'yolov5m6.pt', 'yolov5l6.pt', 'yolov5x6.pt'] + try: + tag = subprocess.check_output('git tag', shell=True, stderr=subprocess.STDOUT).decode().split()[-1] + except: + tag = 'v5.0' # current release name = file.name if name in assets: From f662c1850739fafaf7e76b1157e9c936032e4cc4 Mon Sep 17 00:00:00 2001 From: Maximilian Peters Date: Fri, 23 Apr 2021 21:07:48 +0200 Subject: [PATCH 0148/1976] Add detect.py --hide-conf --hide-labels --line-thickness options (#2658) * command line option for line thickness and hiding labels * command line option for line thickness and hiding labels * command line option for line thickness and hiding labels * command line option for line thickness and hiding labels * command line option for line thickness and hiding labels * command line option for hiding confidence values * Update detect.py Co-authored-by: Glenn Jocher --- detect.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/detect.py b/detect.py index d90d2aa8c4f5..358ef9e3eb1c 100644 --- a/detect.py +++ b/detect.py @@ -110,8 +110,9 @@ def detect(opt): if save_img or opt.save_crop or view_img: # Add bbox to image c = int(cls) # integer class - label = f'{names[c]} {conf:.2f}' - plot_one_box(xyxy, im0, label=label, color=colors[c], line_thickness=3) + label = None if opt.hide_labels else (names[c] if opt.hide_conf else f'{names[c]} {conf:.2f}') + + plot_one_box(xyxy, im0, label=label, color=colors[c], line_thickness=opt.line_thickness) if opt.save_crop: save_one_box(xyxy, im0s, file=save_dir / 'crops' / names[c] / f'{p.stem}.jpg', BGR=True) @@ -169,6 +170,9 @@ def detect(opt): parser.add_argument('--project', default='runs/detect', help='save results to project/name') parser.add_argument('--name', default='exp', help='save results to project/name') parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment') + parser.add_argument('--line-thickness', default=3, type=int, help='bounding box thickness (pixels)') + parser.add_argument('--hide-labels', default=True, action='store_true', help='hide labels') + parser.add_argument('--hide-conf', default=True, action='store_true', help='hide confidences') opt = parser.parse_args() print(opt) check_requirements(exclude=('pycocotools', 'thop')) From 28db23763904bf0800fe9647fc7e25b10f4f8e3c Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 23 Apr 2021 21:21:58 +0200 Subject: [PATCH 0149/1976] Default optimize_for_mobile() on TorchScript models (#2908) Per https://pytorch.org/tutorials/recipes/script_optimized.html this should improve performance on torchscript models (and maybe coreml models also since coremltools operates on a torchscript model input, though this still requires testing). --- models/export.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/models/export.py b/models/export.py index c527a47951cb..312e949f56ac 100644 --- a/models/export.py +++ b/models/export.py @@ -12,6 +12,7 @@ import torch import torch.nn as nn +from torch.utils.mobile_optimizer import optimize_for_mobile import models from models.experimental import attempt_load @@ -65,6 +66,7 @@ print(f'\n{prefix} starting export with torch {torch.__version__}...') f = opt.weights.replace('.pt', '.torchscript.pt') # filename ts = torch.jit.trace(model, img, strict=False) + ts = optimize_for_mobile(ts) # https://pytorch.org/tutorials/recipes/script_optimized.html ts.save(f) print(f'{prefix} export success, saved as {f}') except Exception as e: From a2a514dec8a7a96c4442f50885a46abdb4b7fba1 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 23 Apr 2021 23:50:02 +0200 Subject: [PATCH 0150/1976] Update export.py (#2909) --- models/export.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/models/export.py b/models/export.py index 312e949f56ac..5b7b6bda08ae 100644 --- a/models/export.py +++ b/models/export.py @@ -112,7 +112,7 @@ try: import coremltools as ct - print(f'{prefix} starting export with coremltools {onnx.__version__}...') + print(f'{prefix} starting export with coremltools {ct.__version__}...') # convert model from torchscript and apply pixel scaling as per detect.py model = ct.convert(ts, inputs=[ct.ImageType(name='image', shape=img.shape, scale=1 / 255.0, bias=[0, 0, 0])]) f = opt.weights.replace('.pt', '.mlmodel') # filename From 646386ff09f8fce34cb8665a99dfd523f2dc138c Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 24 Apr 2021 00:10:38 +0200 Subject: [PATCH 0151/1976] Update export.py for 2 dry runs (#2910) * Update export.py for 2 dry runs * Update export.py --- models/export.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/models/export.py b/models/export.py index 5b7b6bda08ae..a820ff94ac17 100644 --- a/models/export.py +++ b/models/export.py @@ -58,7 +58,8 @@ # elif isinstance(m, models.yolo.Detect): # m.forward = m.forward_export # assign forward (optional) model.model[-1].export = not opt.grid # set Detect() layer grid export - y = model(img) # dry run + for _ in range(2): + y = model(img) # dry runs # TorchScript export ----------------------------------------------------------------------------------------------- prefix = colorstr('TorchScript:') @@ -80,7 +81,6 @@ print(f'{prefix} starting export with onnx {onnx.__version__}...') f = opt.weights.replace('.pt', '.onnx') # filename torch.onnx.export(model, img, f, verbose=False, opset_version=12, input_names=['images'], - output_names=['classes', 'boxes'] if y is None else ['output'], dynamic_axes={'images': {0: 'batch', 2: 'height', 3: 'width'}, # size(1,3,640,640) 'output': {0: 'batch', 2: 'y', 3: 'x'}} if opt.dynamic else None) From 1b1ab4cca20aff2b88fedefc01c0482fcdc1a475 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 24 Apr 2021 01:31:11 +0200 Subject: [PATCH 0152/1976] Add file_size() function (#2911) * Add file_size() function * Update export.py --- models/export.py | 9 +++++---- utils/general.py | 5 +++++ 2 files changed, 10 insertions(+), 4 deletions(-) diff --git a/models/export.py b/models/export.py index a820ff94ac17..38fefa7e891c 100644 --- a/models/export.py +++ b/models/export.py @@ -17,7 +17,7 @@ import models from models.experimental import attempt_load from utils.activations import Hardswish, SiLU -from utils.general import colorstr, check_img_size, check_requirements, set_logging +from utils.general import colorstr, check_img_size, check_requirements, file_size, set_logging from utils.torch_utils import select_device if __name__ == '__main__': @@ -60,6 +60,7 @@ model.model[-1].export = not opt.grid # set Detect() layer grid export for _ in range(2): y = model(img) # dry runs + print(f"\n{colorstr('PyTorch:')} starting from {opt.weights} ({file_size(opt.weights):.1f} MB)") # TorchScript export ----------------------------------------------------------------------------------------------- prefix = colorstr('TorchScript:') @@ -69,7 +70,7 @@ ts = torch.jit.trace(model, img, strict=False) ts = optimize_for_mobile(ts) # https://pytorch.org/tutorials/recipes/script_optimized.html ts.save(f) - print(f'{prefix} export success, saved as {f}') + print(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') except Exception as e: print(f'{prefix} export failure: {e}') @@ -103,7 +104,7 @@ onnx.save(model_onnx, f) except Exception as e: print(f'{prefix} simplifier failure: {e}') - print(f'{prefix} export success, saved as {f}') + print(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') except Exception as e: print(f'{prefix} export failure: {e}') @@ -117,7 +118,7 @@ model = ct.convert(ts, inputs=[ct.ImageType(name='image', shape=img.shape, scale=1 / 255.0, bias=[0, 0, 0])]) f = opt.weights.replace('.pt', '.mlmodel') # filename model.save(f) - print(f'{prefix} export success, saved as {f}') + print(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') except Exception as e: print(f'{prefix} export failure: {e}') diff --git a/utils/general.py b/utils/general.py index 92c6ca5df208..ba88759c2983 100755 --- a/utils/general.py +++ b/utils/general.py @@ -61,6 +61,11 @@ def emojis(str=''): return str.encode().decode('ascii', 'ignore') if platform.system() == 'Windows' else str +def file_size(file): + # Return file size in MB + return Path(file).stat().st_size / 1e6 + + def check_online(): # Check internet connectivity import socket From 45632b27049734e5c73289b10d90a5dc7c2dd6f3 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 24 Apr 2021 15:53:15 +0200 Subject: [PATCH 0153/1976] Update download() for tar.gz files (#2919) * Update download() for tar.gz files * Update general.py --- utils/general.py | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/utils/general.py b/utils/general.py index ba88759c2983..8efeb5ea59cc 100755 --- a/utils/general.py +++ b/utils/general.py @@ -184,14 +184,19 @@ def check_dataset(dict): def download(url, dir='.', multi_thread=False): - # Multi-threaded file download function + # Multi-threaded file download and unzip function def download_one(url, dir): # Download 1 file f = dir / Path(url).name # filename - print(f'Downloading {url} to {f}...') - torch.hub.download_url_to_file(url, f, progress=True) # download - if f.suffix == '.zip': - os.system(f'unzip -qo {f} -d {dir} && rm {f}') # unzip -quiet -overwrite + if not f.exists(): + print(f'Downloading {url} to {f}...') + torch.hub.download_url_to_file(url, f, progress=True) # download + if f.suffix in ('.zip', '.gz'): + print(f'Unzipping {f}...') + if f.suffix == '.zip': + os.system(f'unzip -qo {f} -d {dir} && rm {f}') # unzip -quiet -overwrite + elif f.suffix == '.gz': + os.system(f'tar xfz {f} --directory {f.parent} && rm {f}') # unzip dir = Path(dir) dir.mkdir(parents=True, exist_ok=True) # make directory From de19d396e713b8517e555f12d05d906e9d6891b3 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 24 Apr 2021 16:28:27 +0200 Subject: [PATCH 0154/1976] Update visdrone.yaml (#2921) --- data/visdrone.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/data/visdrone.yaml b/data/visdrone.yaml index 59f597a9c6f1..c23e6bc286f8 100644 --- a/data/visdrone.yaml +++ b/data/visdrone.yaml @@ -56,7 +56,7 @@ download: | dir = Path('../VisDrone') # dataset directory urls = ['https://github.com/ultralytics/yolov5/releases/download/v1.0/VisDrone2019-DET-train.zip', 'https://github.com/ultralytics/yolov5/releases/download/v1.0/VisDrone2019-DET-val.zip', - 'https://github.com/ultralytics/yolov5/releases/download/v1.0/VisDrone2019-DET-test-dev.zip' + 'https://github.com/ultralytics/yolov5/releases/download/v1.0/VisDrone2019-DET-test-dev.zip', 'https://github.com/ultralytics/yolov5/releases/download/v1.0/VisDrone2019-DET-test-challenge.zip'] download(urls, dir=dir) From eae28a93b026de2ea2d9b9f535b0c9fb747b19f6 Mon Sep 17 00:00:00 2001 From: albinxavi <62288451+albinxavi@users.noreply.github.com> Date: Sat, 24 Apr 2021 23:28:02 +0530 Subject: [PATCH 0155/1976] Change default value of hide label argument to False (#2923) --- detect.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/detect.py b/detect.py index 358ef9e3eb1c..a2b3045bc500 100644 --- a/detect.py +++ b/detect.py @@ -171,7 +171,7 @@ def detect(opt): parser.add_argument('--name', default='exp', help='save results to project/name') parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment') parser.add_argument('--line-thickness', default=3, type=int, help='bounding box thickness (pixels)') - parser.add_argument('--hide-labels', default=True, action='store_true', help='hide labels') + parser.add_argument('--hide-labels', default=False, action='store_true', help='hide labels') parser.add_argument('--hide-conf', default=True, action='store_true', help='hide confidences') opt = parser.parse_args() print(opt) From aa78069c585115e29ba5759a2d856be0222bc12c Mon Sep 17 00:00:00 2001 From: albinxavi <62288451+albinxavi@users.noreply.github.com> Date: Sun, 25 Apr 2021 17:48:14 +0530 Subject: [PATCH 0156/1976] Change default value of hide-conf argument to false (#2925) --- detect.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/detect.py b/detect.py index a2b3045bc500..f5e53d991504 100644 --- a/detect.py +++ b/detect.py @@ -172,7 +172,7 @@ def detect(opt): parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment') parser.add_argument('--line-thickness', default=3, type=int, help='bounding box thickness (pixels)') parser.add_argument('--hide-labels', default=False, action='store_true', help='hide labels') - parser.add_argument('--hide-conf', default=True, action='store_true', help='hide confidences') + parser.add_argument('--hide-conf', default=False, action='store_true', help='hide confidences') opt = parser.parse_args() print(opt) check_requirements(exclude=('pycocotools', 'thop')) From 3665c0f59bf00fc8cda90323cf189364f9a28974 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 25 Apr 2021 20:05:16 +0200 Subject: [PATCH 0157/1976] test.py native --single-cls (#2928) --- test.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/test.py b/test.py index 91b2b981c45b..2b9e90c05367 100644 --- a/test.py +++ b/test.py @@ -119,7 +119,7 @@ def test(data, targets[:, 2:] *= torch.Tensor([width, height, width, height]).to(device) # to pixels lb = [targets[targets[:, 0] == i, 1:] for i in range(nb)] if save_hybrid else [] # for autolabelling t = time_synchronized() - out = non_max_suppression(out, conf_thres=conf_thres, iou_thres=iou_thres, labels=lb, multi_label=True) + out = non_max_suppression(out, conf_thres, iou_thres, labels=lb, multi_label=True, agnostic=single_cls) t1 += time_synchronized() - t # Statistics per image @@ -136,6 +136,8 @@ def test(data, continue # Predictions + if single_cls: + pred[:, 5] = 0 predn = pred.clone() scale_coords(img[si].shape[1:], predn[:, :4], shapes[si][0], shapes[si][1]) # native-space pred From c0d3f80544ab7a99556eb40c021f817caada9c31 Mon Sep 17 00:00:00 2001 From: NanoCode012 Date: Mon, 26 Apr 2021 03:01:05 +0700 Subject: [PATCH 0158/1976] Add verbose option to pytorch hub models (#2926) * Add verbose and update print to logging * Fix positonal param * Revert auto formatting changes * Update hubconf.py Co-authored-by: Glenn Jocher --- hubconf.py | 53 +++++++++++++++++++++++++----------------------- models/yolo.py | 22 ++++++++++---------- utils/general.py | 4 ++-- 3 files changed, 41 insertions(+), 38 deletions(-) diff --git a/hubconf.py b/hubconf.py index d89502f4ee76..e42d0b59bd2a 100644 --- a/hubconf.py +++ b/hubconf.py @@ -16,10 +16,9 @@ dependencies = ['torch', 'yaml'] check_requirements(Path(__file__).parent / 'requirements.txt', exclude=('pycocotools', 'thop')) -set_logging() -def create(name, pretrained, channels, classes, autoshape): +def create(name, pretrained, channels, classes, autoshape, verbose): """Creates a specified YOLOv5 model Arguments: @@ -32,6 +31,8 @@ def create(name, pretrained, channels, classes, autoshape): pytorch model """ try: + set_logging(verbose=verbose) + cfg = list((Path(__file__).parent / 'models').rglob(f'{name}.yaml'))[0] # model.yaml path model = Model(cfg, channels, classes) if pretrained: @@ -55,7 +56,7 @@ def create(name, pretrained, channels, classes, autoshape): raise Exception(s) from e -def custom(path_or_model='path/to/model.pt', autoshape=True): +def custom(path_or_model='path/to/model.pt', autoshape=True, verbose=True): """YOLOv5-custom model https://github.com/ultralytics/yolov5 Arguments (3 options): @@ -66,6 +67,8 @@ def custom(path_or_model='path/to/model.pt', autoshape=True): Returns: pytorch model """ + set_logging(verbose=verbose) + model = torch.load(path_or_model) if isinstance(path_or_model, str) else path_or_model # load checkpoint if isinstance(model, dict): model = model['ema' if model.get('ema') else 'model'] # load model @@ -79,49 +82,49 @@ def custom(path_or_model='path/to/model.pt', autoshape=True): return hub_model.to(device) -def yolov5s(pretrained=True, channels=3, classes=80, autoshape=True): +def yolov5s(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True): # YOLOv5-small model https://github.com/ultralytics/yolov5 - return create('yolov5s', pretrained, channels, classes, autoshape) + return create('yolov5s', pretrained, channels, classes, autoshape, verbose) -def yolov5m(pretrained=True, channels=3, classes=80, autoshape=True): +def yolov5m(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True): # YOLOv5-medium model https://github.com/ultralytics/yolov5 - return create('yolov5m', pretrained, channels, classes, autoshape) + return create('yolov5m', pretrained, channels, classes, autoshape, verbose) -def yolov5l(pretrained=True, channels=3, classes=80, autoshape=True): +def yolov5l(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True): # YOLOv5-large model https://github.com/ultralytics/yolov5 - return create('yolov5l', pretrained, channels, classes, autoshape) + return create('yolov5l', pretrained, channels, classes, autoshape, verbose) -def yolov5x(pretrained=True, channels=3, classes=80, autoshape=True): +def yolov5x(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True): # YOLOv5-xlarge model https://github.com/ultralytics/yolov5 - return create('yolov5x', pretrained, channels, classes, autoshape) + return create('yolov5x', pretrained, channels, classes, autoshape, verbose) -def yolov5s6(pretrained=True, channels=3, classes=80, autoshape=True): - # YOLOv5-small model https://github.com/ultralytics/yolov5 - return create('yolov5s6', pretrained, channels, classes, autoshape) +def yolov5s6(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True): + # YOLOv5-small-P6 model https://github.com/ultralytics/yolov5 + return create('yolov5s6', pretrained, channels, classes, autoshape, verbose) -def yolov5m6(pretrained=True, channels=3, classes=80, autoshape=True): - # YOLOv5-medium model https://github.com/ultralytics/yolov5 - return create('yolov5m6', pretrained, channels, classes, autoshape) +def yolov5m6(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True): + # YOLOv5-medium-P6 model https://github.com/ultralytics/yolov5 + return create('yolov5m6', pretrained, channels, classes, autoshape, verbose) -def yolov5l6(pretrained=True, channels=3, classes=80, autoshape=True): - # YOLOv5-large model https://github.com/ultralytics/yolov5 - return create('yolov5l6', pretrained, channels, classes, autoshape) +def yolov5l6(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True): + # YOLOv5-large-P6 model https://github.com/ultralytics/yolov5 + return create('yolov5l6', pretrained, channels, classes, autoshape, verbose) -def yolov5x6(pretrained=True, channels=3, classes=80, autoshape=True): - # YOLOv5-xlarge model https://github.com/ultralytics/yolov5 - return create('yolov5x6', pretrained, channels, classes, autoshape) +def yolov5x6(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True): + # YOLOv5-xlarge-P6 model https://github.com/ultralytics/yolov5 + return create('yolov5x6', pretrained, channels, classes, autoshape, verbose) if __name__ == '__main__': - model = create(name='yolov5s', pretrained=True, channels=3, classes=80, autoshape=True) # pretrained example - # model = custom(path_or_model='path/to/model.pt') # custom example + model = create(name='yolov5s', pretrained=True, channels=3, classes=80, autoshape=True, verbose=True) # pretrained + # model = custom(path_or_model='path/to/model.pt') # custom # Verify inference import cv2 diff --git a/models/yolo.py b/models/yolo.py index 36fa27e89134..dd505e22a68d 100644 --- a/models/yolo.py +++ b/models/yolo.py @@ -84,7 +84,7 @@ def __init__(self, cfg='yolov5s.yaml', ch=3, nc=None, anchors=None): # model, i self.yaml['anchors'] = round(anchors) # override yaml value self.model, self.save = parse_model(deepcopy(self.yaml), ch=[ch]) # model, savelist self.names = [str(i) for i in range(self.yaml['nc'])] # default names - # print([x.shape for x in self.forward(torch.zeros(1, ch, 64, 64))]) + # logger.info([x.shape for x in self.forward(torch.zeros(1, ch, 64, 64))]) # Build strides, anchors m = self.model[-1] # Detect() @@ -95,7 +95,7 @@ def __init__(self, cfg='yolov5s.yaml', ch=3, nc=None, anchors=None): # model, i check_anchor_order(m) self.stride = m.stride self._initialize_biases() # only run once - # print('Strides: %s' % m.stride.tolist()) + # logger.info('Strides: %s' % m.stride.tolist()) # Init weights, biases initialize_weights(self) @@ -134,13 +134,13 @@ def forward_once(self, x, profile=False): for _ in range(10): _ = m(x) dt.append((time_synchronized() - t) * 100) - print('%10.1f%10.0f%10.1fms %-40s' % (o, m.np, dt[-1], m.type)) + logger.info('%10.1f%10.0f%10.1fms %-40s' % (o, m.np, dt[-1], m.type)) x = m(x) # run y.append(x if m.i in self.save else None) # save output if profile: - print('%.1fms total' % sum(dt)) + logger.info('%.1fms total' % sum(dt)) return x def _initialize_biases(self, cf=None): # initialize biases into Detect(), cf is class frequency @@ -157,15 +157,15 @@ def _print_biases(self): m = self.model[-1] # Detect() module for mi in m.m: # from b = mi.bias.detach().view(m.na, -1).T # conv.bias(255) to (3,85) - print(('%6g Conv2d.bias:' + '%10.3g' * 6) % (mi.weight.shape[1], *b[:5].mean(1).tolist(), b[5:].mean())) + logger.info(('%6g Conv2d.bias:' + '%10.3g' * 6) % (mi.weight.shape[1], *b[:5].mean(1).tolist(), b[5:].mean())) # def _print_weights(self): # for m in self.model.modules(): # if type(m) is Bottleneck: - # print('%10.3g' % (m.w.detach().sigmoid() * 2)) # shortcut weights + # logger.info('%10.3g' % (m.w.detach().sigmoid() * 2)) # shortcut weights def fuse(self): # fuse model Conv2d() + BatchNorm2d() layers - print('Fusing layers... ') + logger.info('Fusing layers... ') for m in self.model.modules(): if type(m) is Conv and hasattr(m, 'bn'): m.conv = fuse_conv_and_bn(m.conv, m.bn) # update conv @@ -177,19 +177,19 @@ def fuse(self): # fuse model Conv2d() + BatchNorm2d() layers def nms(self, mode=True): # add or remove NMS module present = type(self.model[-1]) is NMS # last layer is NMS if mode and not present: - print('Adding NMS... ') + logger.info('Adding NMS... ') m = NMS() # module m.f = -1 # from m.i = self.model[-1].i + 1 # index self.model.add_module(name='%s' % m.i, module=m) # add self.eval() elif not mode and present: - print('Removing NMS... ') + logger.info('Removing NMS... ') self.model = self.model[:-1] # remove return self def autoshape(self): # add autoShape module - print('Adding autoShape... ') + logger.info('Adding autoShape... ') m = autoShape(self) # wrap model copy_attr(m, self, include=('yaml', 'nc', 'hyp', 'names', 'stride'), exclude=()) # copy attributes return m @@ -272,6 +272,6 @@ def parse_model(d, ch): # model_dict, input_channels(3) # Tensorboard (not working https://github.com/ultralytics/yolov5/issues/2898) # from torch.utils.tensorboard import SummaryWriter # tb_writer = SummaryWriter('.') - # print("Run 'tensorboard --logdir=models' to view tensorboard at http://localhost:6006/") + # logger.info("Run 'tensorboard --logdir=models' to view tensorboard at http://localhost:6006/") # tb_writer.add_graph(torch.jit.trace(model, img, strict=False), []) # add model graph # tb_writer.add_image('test', img[0], dataformats='CWH') # add model to tensorboard diff --git a/utils/general.py b/utils/general.py index 8efeb5ea59cc..f77ae3331538 100755 --- a/utils/general.py +++ b/utils/general.py @@ -32,10 +32,10 @@ os.environ['NUMEXPR_MAX_THREADS'] = str(min(os.cpu_count(), 8)) # NumExpr max threads -def set_logging(rank=-1): +def set_logging(rank=-1, verbose=True): logging.basicConfig( format="%(message)s", - level=logging.INFO if rank in [-1, 0] else logging.WARN) + level=logging.INFO if (verbose and rank in [-1, 0]) else logging.WARN) def init_seeds(seed=0): From 9c7bb5a52cc716166c2145ce1a878a0ad2cf93be Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 25 Apr 2021 22:54:07 +0200 Subject: [PATCH 0159/1976] ACON Activation batch-size 1 bug patch (#2901) * ACON Activation batch-size 1 bug path This is not a great solution to https://github.com/nmaac/acon/issues/4 but it's all I could think of at the moment. WARNING: YOLOv5 models with MetaAconC() activations are incapable of running inference at batch-size 1 properly due to a known bug in https://github.com/nmaac/acon/issues/4 with no known solution. * Update activations.py * Update activations.py * Update activations.py * Update activations.py --- utils/activations.py | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/utils/activations.py b/utils/activations.py index 1d095c1cf0f1..92a3b5eaa54b 100644 --- a/utils/activations.py +++ b/utils/activations.py @@ -84,13 +84,15 @@ def __init__(self, c1, k=1, s=1, r=16): # ch_in, kernel, stride, r c2 = max(r, c1 // r) self.p1 = nn.Parameter(torch.randn(1, c1, 1, 1)) self.p2 = nn.Parameter(torch.randn(1, c1, 1, 1)) - self.fc1 = nn.Conv2d(c1, c2, k, s, bias=False) - self.bn1 = nn.BatchNorm2d(c2) - self.fc2 = nn.Conv2d(c2, c1, k, s, bias=False) - self.bn2 = nn.BatchNorm2d(c1) + self.fc1 = nn.Conv2d(c1, c2, k, s, bias=True) + self.fc2 = nn.Conv2d(c2, c1, k, s, bias=True) + # self.bn1 = nn.BatchNorm2d(c2) + # self.bn2 = nn.BatchNorm2d(c1) def forward(self, x): y = x.mean(dim=2, keepdims=True).mean(dim=3, keepdims=True) - beta = torch.sigmoid(self.bn2(self.fc2(self.bn1(self.fc1(y))))) + # batch-size 1 bug/instabilities https://github.com/ultralytics/yolov5/issues/2891 + # beta = torch.sigmoid(self.bn2(self.fc2(self.bn1(self.fc1(y))))) # bug/unstable + beta = torch.sigmoid(self.fc2(self.fc1(y))) # bug patch BN layers removed dpx = (self.p1 - self.p2) * x return dpx * torch.sigmoid(beta * dpx) + self.p2 * x From 184991672636838453e796f72268833dff788d07 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 25 Apr 2021 23:33:25 +0200 Subject: [PATCH 0160/1976] Check_requirements() enclosing apostrophe bug fix (#2929) This fixes a bug where the '>' symbol in python package requirements was not running correctly with subprocess.check_output() commands. --- utils/general.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/utils/general.py b/utils/general.py index f77ae3331538..fbb99b9e7f99 100755 --- a/utils/general.py +++ b/utils/general.py @@ -117,8 +117,8 @@ def check_requirements(requirements='requirements.txt', exclude=()): pkg.require(r) except Exception as e: # DistributionNotFound or VersionConflict if requirements not met n += 1 - print(f"{prefix} {e.req} not found and is required by YOLOv5, attempting auto-update...") - print(subprocess.check_output(f"pip install {e.req}", shell=True).decode()) + print(f"{prefix} {r} not found and is required by YOLOv5, attempting auto-update...") + print(subprocess.check_output(f"pip install '{r}'", shell=True).decode()) if n: # if packages updated source = file.resolve() if 'file' in locals() else requirements From 6c1290fe034e08cb60790d641507d75dbe3e2d61 Mon Sep 17 00:00:00 2001 From: BZFYS Date: Tue, 27 Apr 2021 05:56:25 +0800 Subject: [PATCH 0161/1976] Update README.md (#2934) * Update README.md dependencies: ImportError: libGL.so.1: cannot open shared object file: No such file or directory ImportError: libgthread-2.0.so.0: cannot open shared object file: No such file or directory ImportError: libSM.so.6: cannot open shared object file: No such file or directory ImportError: libXrender.so.1: cannot open shared object file: No such file or directory * replace older apt-get with apt Code commented for now until a better understanding of the issue, and also code is not cross-platform compatible. Co-authored-by: Glenn Jocher --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index 27ea18da1932..d98bd7bfa7da 100755 --- a/README.md +++ b/README.md @@ -58,6 +58,7 @@ Model |size
(pixels) |mAPval
0.5:0.95 |mAPtest
0.5:0.95 ## Requirements Python 3.8 or later with all [requirements.txt](https://github.com/ultralytics/yolov5/blob/master/requirements.txt) dependencies installed, including `torch>=1.7`. To install run: + ```bash $ pip install -r requirements.txt ``` From 4890499344e21950d985e1a77e84a0a4161d1db0 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 26 Apr 2021 23:58:21 +0200 Subject: [PATCH 0162/1976] Improved yolo.py profiling (#2940) * Improved yolo.py profiling Improved column order and labelling. * Update yolo.py --- models/yolo.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/models/yolo.py b/models/yolo.py index dd505e22a68d..d573c5a290e2 100644 --- a/models/yolo.py +++ b/models/yolo.py @@ -134,7 +134,9 @@ def forward_once(self, x, profile=False): for _ in range(10): _ = m(x) dt.append((time_synchronized() - t) * 100) - logger.info('%10.1f%10.0f%10.1fms %-40s' % (o, m.np, dt[-1], m.type)) + if m == self.model[0]: + logger.info(f"{'time (ms)':>10s} {'GFLOPS':>10s} {'params':>10s} {'module'}") + logger.info(f'{dt[-1]:10.2f} {o:10.2f} {m.np:10.0f} {m.type}') x = m(x) # run y.append(x if m.i in self.save else None) # save output @@ -157,7 +159,8 @@ def _print_biases(self): m = self.model[-1] # Detect() module for mi in m.m: # from b = mi.bias.detach().view(m.na, -1).T # conv.bias(255) to (3,85) - logger.info(('%6g Conv2d.bias:' + '%10.3g' * 6) % (mi.weight.shape[1], *b[:5].mean(1).tolist(), b[5:].mean())) + logger.info( + ('%6g Conv2d.bias:' + '%10.3g' * 6) % (mi.weight.shape[1], *b[:5].mean(1).tolist(), b[5:].mean())) # def _print_weights(self): # for m in self.model.modules(): From 4200674a136a5589972f352790f76d3f37e98dd6 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 27 Apr 2021 17:02:07 +0200 Subject: [PATCH 0163/1976] Add yolov5/ to sys.path() for *.py subdir exec (#2949) * Add yolov5/ to sys.path() for *.py subdir exec * Update export.py --- models/export.py | 3 ++- models/yolo.py | 5 +++-- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/models/export.py b/models/export.py index 38fefa7e891c..da15079149a1 100644 --- a/models/export.py +++ b/models/export.py @@ -7,8 +7,9 @@ import argparse import sys import time +from pathlib import Path -sys.path.append('./') # to run '$ python *.py' files in subdirectories +sys.path.append(Path(__file__).parent.parent.absolute().__str__()) # to run '$ python *.py' files in subdirectories import torch import torch.nn as nn diff --git a/models/yolo.py b/models/yolo.py index d573c5a290e2..cbff70fc83d4 100644 --- a/models/yolo.py +++ b/models/yolo.py @@ -4,8 +4,9 @@ import logging import sys from copy import deepcopy +from pathlib import Path -sys.path.append('./') # to run '$ python *.py' files in subdirectories +sys.path.append(Path(__file__).parent.parent.absolute().__str__()) # to run '$ python *.py' files in subdirectories logger = logging.getLogger(__name__) from models.common import * @@ -267,7 +268,7 @@ def parse_model(d, ch): # model_dict, input_channels(3) # Create model model = Model(opt.cfg).to(device) model.train() - + # Profile # img = torch.rand(8 if torch.cuda.is_available() else 1, 3, 320, 320).to(device) # y = model(img, profile=True) From 57812df68c6040b82265882d5f67eb9e3abde9ac Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 28 Apr 2021 16:05:14 +0200 Subject: [PATCH 0164/1976] New Colors() class (#2963) --- detect.py | 9 +++------ models/common.py | 5 ++--- utils/plots.py | 24 ++++++++++++++++-------- 3 files changed, 21 insertions(+), 17 deletions(-) diff --git a/detect.py b/detect.py index f5e53d991504..ba42f349dbaf 100644 --- a/detect.py +++ b/detect.py @@ -11,7 +11,7 @@ from utils.datasets import LoadStreams, LoadImages from utils.general import check_img_size, check_requirements, check_imshow, non_max_suppression, apply_classifier, \ scale_coords, xyxy2xywh, strip_optimizer, set_logging, increment_path, save_one_box -from utils.plots import plot_one_box +from utils.plots import colors, plot_one_box from utils.torch_utils import select_device, load_classifier, time_synchronized @@ -34,6 +34,7 @@ def detect(opt): model = attempt_load(weights, map_location=device) # load FP32 model stride = int(model.stride.max()) # model stride imgsz = check_img_size(imgsz, s=stride) # check img_size + names = model.module.names if hasattr(model, 'module') else model.names # get class names if half: model.half() # to FP16 @@ -52,10 +53,6 @@ def detect(opt): else: dataset = LoadImages(source, img_size=imgsz, stride=stride) - # Get names and colors - names = model.module.names if hasattr(model, 'module') else model.names - colors = [[random.randint(0, 255) for _ in range(3)] for _ in names] - # Run inference if device.type != 'cpu': model(torch.zeros(1, 3, imgsz, imgsz).to(device).type_as(next(model.parameters()))) # run once @@ -112,7 +109,7 @@ def detect(opt): c = int(cls) # integer class label = None if opt.hide_labels else (names[c] if opt.hide_conf else f'{names[c]} {conf:.2f}') - plot_one_box(xyxy, im0, label=label, color=colors[c], line_thickness=opt.line_thickness) + plot_one_box(xyxy, im0, label=label, color=colors(c, True), line_thickness=opt.line_thickness) if opt.save_crop: save_one_box(xyxy, im0s, file=save_dir / 'crops' / names[c] / f'{p.stem}.jpg', BGR=True) diff --git a/models/common.py b/models/common.py index a28621904b0e..9764d4c3a6c0 100644 --- a/models/common.py +++ b/models/common.py @@ -14,7 +14,7 @@ from utils.datasets import letterbox from utils.general import non_max_suppression, make_divisible, scale_coords, increment_path, xyxy2xywh, save_one_box -from utils.plots import color_list, plot_one_box +from utils.plots import colors, plot_one_box from utils.torch_utils import time_synchronized @@ -312,7 +312,6 @@ def __init__(self, imgs, pred, files, times=None, names=None, shape=None): self.s = shape # inference BCHW shape def display(self, pprint=False, show=False, save=False, crop=False, render=False, save_dir=Path('')): - colors = color_list() for i, (im, pred) in enumerate(zip(self.imgs, self.pred)): str = f'image {i + 1}/{len(self.pred)}: {im.shape[0]}x{im.shape[1]} ' if pred is not None: @@ -325,7 +324,7 @@ def display(self, pprint=False, show=False, save=False, crop=False, render=False if crop: save_one_box(box, im, file=save_dir / 'crops' / self.names[int(cls)] / self.files[i]) else: # all others - plot_one_box(box, im, label=label, color=colors[int(cls) % 10]) + plot_one_box(box, im, label=label, color=colors(cls)) im = Image.fromarray(im.astype(np.uint8)) if isinstance(im, np.ndarray) else im # from np if pprint: diff --git a/utils/plots.py b/utils/plots.py index f24513c6998d..ab6448aa96eb 100644 --- a/utils/plots.py +++ b/utils/plots.py @@ -26,12 +26,22 @@ matplotlib.use('Agg') # for writing to files only -def color_list(): - # Return first 10 plt colors as (r,g,b) https://stackoverflow.com/questions/51350872/python-from-color-name-to-rgb - def hex2rgb(h): +class Colors: + # Ultralytics color palette https://ultralytics.com/ + def __init__(self): + self.palette = [self.hex2rgb(c) for c in matplotlib.colors.TABLEAU_COLORS.values()] + self.n = len(self.palette) + + def __call__(self, i, bgr=False): + c = self.palette[int(i) % self.n] + return (c[2], c[1], c[0]) if bgr else c + + @staticmethod + def hex2rgb(h): # rgb order (PIL) return tuple(int(h[1 + i:1 + i + 2], 16) for i in (0, 2, 4)) - return [hex2rgb(h) for h in matplotlib.colors.TABLEAU_COLORS.values()] # or BASE_ (8), CSS4_ (148), XKCD_ (949) + +colors = Colors() # create instance for 'from utils.plots import colors' def hist2d(x, y, n=100): @@ -137,7 +147,6 @@ def plot_images(images, targets, paths=None, fname='images.jpg', names=None, max h = math.ceil(scale_factor * h) w = math.ceil(scale_factor * w) - colors = color_list() # list of colors mosaic = np.full((int(ns * h), int(ns * w), 3), 255, dtype=np.uint8) # init for i, img in enumerate(images): if i == max_subplots: # if last batch has fewer images than we expect @@ -168,7 +177,7 @@ def plot_images(images, targets, paths=None, fname='images.jpg', names=None, max boxes[[1, 3]] += block_y for j, box in enumerate(boxes.T): cls = int(classes[j]) - color = colors[cls % len(colors)] + color = colors(cls) cls = names[cls] if names else cls if labels or conf[j] > 0.25: # 0.25 conf thresh label = '%s' % cls if labels else '%s %.1f' % (cls, conf[j]) @@ -276,7 +285,6 @@ def plot_labels(labels, names=(), save_dir=Path(''), loggers=None): print('Plotting labels... ') c, b = labels[:, 0], labels[:, 1:].transpose() # classes, boxes nc = int(c.max() + 1) # number of classes - colors = color_list() x = pd.DataFrame(b.transpose(), columns=['x', 'y', 'width', 'height']) # seaborn correlogram @@ -302,7 +310,7 @@ def plot_labels(labels, names=(), save_dir=Path(''), loggers=None): labels[:, 1:] = xywh2xyxy(labels[:, 1:]) * 2000 img = Image.fromarray(np.ones((2000, 2000, 3), dtype=np.uint8) * 255) for cls, *box in labels[:1000]: - ImageDraw.Draw(img).rectangle(box, width=1, outline=colors[int(cls) % 10]) # plot + ImageDraw.Draw(img).rectangle(box, width=1, outline=colors(cls)) # plot ax[1].imshow(img) ax[1].axis('off') From 2c7c075fb10537fa5c0f8781701f81de99d8906d Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 28 Apr 2021 17:50:07 +0200 Subject: [PATCH 0165/1976] Update restapi.py (#2967) --- utils/flask_rest_api/restapi.py | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/utils/flask_rest_api/restapi.py b/utils/flask_rest_api/restapi.py index 9d88f618905d..a54e2309715c 100644 --- a/utils/flask_rest_api/restapi.py +++ b/utils/flask_rest_api/restapi.py @@ -24,15 +24,14 @@ def predict(): img = Image.open(io.BytesIO(image_bytes)) - results = model(img, size=640) - data = results.pandas().xyxy[0].to_json(orient="records") - return data + results = model(img, size=640) # reduce size=320 for faster inference + return results.pandas().xyxy[0].to_json(orient="records") if __name__ == "__main__": - parser = argparse.ArgumentParser(description="Flask api exposing yolov5 model") + parser = argparse.ArgumentParser(description="Flask API exposing YOLOv5 model") parser.add_argument("--port", default=5000, type=int, help="port number") args = parser.parse_args() - model = torch.hub.load("ultralytics/yolov5", "yolov5s", force_reload=True).autoshape() # force_reload to recache + model = torch.hub.load("ultralytics/yolov5", "yolov5s", force_reload=True) # force_reload to recache app.run(host="0.0.0.0", port=args.port) # debug=True causes Restarting with stat From 33712d6dd0cc54e28b97d56cb999aa050a1c94ef Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 28 Apr 2021 20:11:02 +0200 Subject: [PATCH 0166/1976] Global Wheat Detection 2020 Dataset Auto-Download (#2968) * Create GlobalWheat2020.yaml * Update and rename visdrone.yaml to VisDrone.yaml * Update GlobalWheat2020.yaml --- data/GlobalWheat2020.yaml | 55 +++++++++++++++++++++++++++ data/{visdrone.yaml => VisDrone.yaml} | 8 +--- 2 files changed, 57 insertions(+), 6 deletions(-) create mode 100644 data/GlobalWheat2020.yaml rename data/{visdrone.yaml => VisDrone.yaml} (95%) diff --git a/data/GlobalWheat2020.yaml b/data/GlobalWheat2020.yaml new file mode 100644 index 000000000000..b6f812d70383 --- /dev/null +++ b/data/GlobalWheat2020.yaml @@ -0,0 +1,55 @@ +# Global Wheat 2020 dataset http://www.global-wheat.com/ +# Train command: python train.py --data GlobalWheat2020.yaml +# Default dataset location is next to YOLOv5: +# /parent_folder +# /datasets/GlobalWheat2020 +# /yolov5 + + +# train and val data as 1) directory: path/images/, 2) file: path/images.txt, or 3) list: [path1/images/, path2/images/] +train: # 3422 images + - ../datasets/GlobalWheat2020/images/arvalis_1 + - ../datasets/GlobalWheat2020/images/arvalis_2 + - ../datasets/GlobalWheat2020/images/arvalis_3 + - ../datasets/GlobalWheat2020/images/ethz_1 + - ../datasets/GlobalWheat2020/images/rres_1 + - ../datasets/GlobalWheat2020/images/inrae_1 + - ../datasets/GlobalWheat2020/images/usask_1 + +val: # 748 images (WARNING: train set contains ethz_1) + - ../datasets/GlobalWheat2020/images/ethz_1 + +test: # 1276 + - ../datasets/GlobalWheat2020/images/utokyo_1 + - ../datasets/GlobalWheat2020/images/utokyo_2 + - ../datasets/GlobalWheat2020/images/nau_1 + - ../datasets/GlobalWheat2020/images/uq_1 + +# number of classes +nc: 1 + +# class names +names: [ 'wheat_head' ] + + +# download command/URL (optional) -------------------------------------------------------------------------------------- +download: | + from utils.general import download, Path + + # Download + dir = Path('../datasets/GlobalWheat2020') # dataset directory + urls = ['https://zenodo.org/record/4298502/files/global-wheat-codalab-official.zip', + 'https://github.com/ultralytics/yolov5/releases/download/v1.0/GlobalWheat2020_labels.zip'] + download(urls, dir=dir) + + # Make Directories + for p in 'annotations', 'images', 'labels': + (dir / p).mkdir(parents=True, exist_ok=True) + + # Move + for p in 'arvalis_1', 'arvalis_2', 'arvalis_3', 'ethz_1', 'rres_1', 'inrae_1', 'usask_1', \ + 'utokyo_1', 'utokyo_2', 'nau_1', 'uq_1': + (dir / p).rename(dir / 'images' / p) # move to /images + f = (dir / p).with_suffix('.json') # json file + if f.exists(): + f.rename((dir / 'annotations' / p).with_suffix('.json')) # move to /annotations diff --git a/data/visdrone.yaml b/data/VisDrone.yaml similarity index 95% rename from data/visdrone.yaml rename to data/VisDrone.yaml index c23e6bc286f8..c4603b200132 100644 --- a/data/visdrone.yaml +++ b/data/VisDrone.yaml @@ -1,5 +1,5 @@ # VisDrone2019-DET dataset https://github.com/VisDrone/VisDrone-Dataset -# Train command: python train.py --data visdrone.yaml +# Train command: python train.py --data VisDrone.yaml # Default dataset location is next to YOLOv5: # /parent_folder # /VisDrone @@ -20,11 +20,7 @@ names: [ 'pedestrian', 'people', 'bicycle', 'car', 'van', 'truck', 'tricycle', ' # download command/URL (optional) -------------------------------------------------------------------------------------- download: | - import os - from pathlib import Path - - from utils.general import download - + from utils.general import download, os, Path def visdrone2yolo(dir): from PIL import Image From dbce1bc54c321a97bb48f1d4ea7374882849ff47 Mon Sep 17 00:00:00 2001 From: Ferdinand Loesch Date: Thu, 29 Apr 2021 18:43:33 +0000 Subject: [PATCH 0167/1976] Objects365 Dataset (#2932) * add object365 * ADD CONVERSION SCRIPT * fix transcript * Reformat and simplify * spelling * Update get_objects365.py Co-authored-by: Glenn Jocher --- data/hyp.finetune_objects365.yaml | 28 +++++++++++++++ data/objects365.yaml | 57 +++++++++++++++++++++++++++++++ data/scripts/get_argoverse_hd.sh | 5 ++- data/scripts/get_objects365.py | 33 ++++++++++++++++++ 4 files changed, 120 insertions(+), 3 deletions(-) create mode 100644 data/hyp.finetune_objects365.yaml create mode 100644 data/objects365.yaml create mode 100644 data/scripts/get_objects365.py diff --git a/data/hyp.finetune_objects365.yaml b/data/hyp.finetune_objects365.yaml new file mode 100644 index 000000000000..2b104ef2d9bf --- /dev/null +++ b/data/hyp.finetune_objects365.yaml @@ -0,0 +1,28 @@ +lr0: 0.00258 +lrf: 0.17 +momentum: 0.779 +weight_decay: 0.00058 +warmup_epochs: 1.33 +warmup_momentum: 0.86 +warmup_bias_lr: 0.0711 +box: 0.0539 +cls: 0.299 +cls_pw: 0.825 +obj: 0.632 +obj_pw: 1.0 +iou_t: 0.2 +anchor_t: 3.44 +anchors: 3.2 +fl_gamma: 0.0 +hsv_h: 0.0188 +hsv_s: 0.704 +hsv_v: 0.36 +degrees: 0.0 +translate: 0.0902 +scale: 0.491 +shear: 0.0 +perspective: 0.0 +flipud: 0.0 +fliplr: 0.5 +mosaic: 1.0 +mixup: 0.0 diff --git a/data/objects365.yaml b/data/objects365.yaml new file mode 100644 index 000000000000..14464694f53a --- /dev/null +++ b/data/objects365.yaml @@ -0,0 +1,57 @@ +# Objects365 dataset https://www.objects365.org/ +# Train command: python train.py --data objects365.yaml +# Default dataset location is next to YOLOv5: +# /parent_folder +# /datasets/objects365 +# /yolov5 + +# train and val data as 1) directory: path/images/, 2) file: path/images.txt, or 3) list: [path1/images/, path2/images/] +train: ../datasets/objects365/images/train # 1.7 Million images +val: ../datasets/objects365/images/val # 5570 images + +# number of classes +nc: 365 + +# class names +names: [ 'Person', 'Sneakers', 'Chair', 'Other Shoes', 'Hat', 'Car', 'Lamp', 'Glasses', 'Bottle', 'Desk', 'Cup', + 'Street Lights', 'Cabinet/shelf', 'Handbag/Satchel', 'Bracelet', 'Plate', 'Picture/Frame', 'Helmet', 'Book', + 'Gloves', 'Storage box', 'Boat', 'Leather Shoes', 'Flower', 'Bench', 'Potted Plant', 'Bowl/Basin', 'Flag', + 'Pillow', 'Boots', 'Vase', 'Microphone', 'Necklace', 'Ring', 'SUV', 'Wine Glass', 'Belt', 'Monitor/TV', + 'Backpack', 'Umbrella', 'Traffic Light', 'Speaker', 'Watch', 'Tie', 'Trash bin Can', 'Slippers', 'Bicycle', + 'Stool', 'Barrel/bucket', 'Van', 'Couch', 'Sandals', 'Basket', 'Drum', 'Pen/Pencil', 'Bus', 'Wild Bird', + 'High Heels', 'Motorcycle', 'Guitar', 'Carpet', 'Cell Phone', 'Bread', 'Camera', 'Canned', 'Truck', + 'Traffic cone', 'Cymbal', 'Lifesaver', 'Towel', 'Stuffed Toy', 'Candle', 'Sailboat', 'Laptop', 'Awning', + 'Bed', 'Faucet', 'Tent', 'Horse', 'Mirror', 'Power outlet', 'Sink', 'Apple', 'Air Conditioner', 'Knife', + 'Hockey Stick', 'Paddle', 'Pickup Truck', 'Fork', 'Traffic Sign', 'Balloon', 'Tripod', 'Dog', 'Spoon', 'Clock', + 'Pot', 'Cow', 'Cake', 'Dinning Table', 'Sheep', 'Hanger', 'Blackboard/Whiteboard', 'Napkin', 'Other Fish', + 'Orange/Tangerine', 'Toiletry', 'Keyboard', 'Tomato', 'Lantern', 'Machinery Vehicle', 'Fan', + 'Green Vegetables', 'Banana', 'Baseball Glove', 'Airplane', 'Mouse', 'Train', 'Pumpkin', 'Soccer', 'Skiboard', + 'Luggage', 'Nightstand', 'Tea pot', 'Telephone', 'Trolley', 'Head Phone', 'Sports Car', 'Stop Sign', + 'Dessert', 'Scooter', 'Stroller', 'Crane', 'Remote', 'Refrigerator', 'Oven', 'Lemon', 'Duck', 'Baseball Bat', + 'Surveillance Camera', 'Cat', 'Jug', 'Broccoli', 'Piano', 'Pizza', 'Elephant', 'Skateboard', 'Surfboard', + 'Gun', 'Skating and Skiing shoes', 'Gas stove', 'Donut', 'Bow Tie', 'Carrot', 'Toilet', 'Kite', 'Strawberry', + 'Other Balls', 'Shovel', 'Pepper', 'Computer Box', 'Toilet Paper', 'Cleaning Products', 'Chopsticks', + 'Microwave', 'Pigeon', 'Baseball', 'Cutting/chopping Board', 'Coffee Table', 'Side Table', 'Scissors', + 'Marker', 'Pie', 'Ladder', 'Snowboard', 'Cookies', 'Radiator', 'Fire Hydrant', 'Basketball', 'Zebra', 'Grape', + 'Giraffe', 'Potato', 'Sausage', 'Tricycle', 'Violin', 'Egg', 'Fire Extinguisher', 'Candy', 'Fire Truck', + 'Billiards', 'Converter', 'Bathtub', 'Wheelchair', 'Golf Club', 'Briefcase', 'Cucumber', 'Cigar/Cigarette', + 'Paint Brush', 'Pear', 'Heavy Truck', 'Hamburger', 'Extractor', 'Extension Cord', 'Tong', 'Tennis Racket', + 'Folder', 'American Football', 'earphone', 'Mask', 'Kettle', 'Tennis', 'Ship', 'Swing', 'Coffee Machine', + 'Slide', 'Carriage', 'Onion', 'Green beans', 'Projector', 'Frisbee', 'Washing Machine/Drying Machine', + 'Chicken', 'Printer', 'Watermelon', 'Saxophone', 'Tissue', 'Toothbrush', 'Ice cream', 'Hot-air balloon', + 'Cello', 'French Fries', 'Scale', 'Trophy', 'Cabbage', 'Hot dog', 'Blender', 'Peach', 'Rice', 'Wallet/Purse', + 'Volleyball', 'Deer', 'Goose', 'Tape', 'Tablet', 'Cosmetics', 'Trumpet', 'Pineapple', 'Golf Ball', + 'Ambulance', 'Parking meter', 'Mango', 'Key', 'Hurdle', 'Fishing Rod', 'Medal', 'Flute', 'Brush', 'Penguin', + 'Megaphone', 'Corn', 'Lettuce', 'Garlic', 'Swan', 'Helicopter', 'Green Onion', 'Sandwich', 'Nuts', + 'Speed Limit Sign', 'Induction Cooker', 'Broom', 'Trombone', 'Plum', 'Rickshaw', 'Goldfish', 'Kiwi fruit', + 'Router/modem', 'Poker Card', 'Toaster', 'Shrimp', 'Sushi', 'Cheese', 'Notepaper', 'Cherry', 'Pliers', 'CD', + 'Pasta', 'Hammer', 'Cue', 'Avocado', 'Hamimelon', 'Flask', 'Mushroom', 'Screwdriver', 'Soap', 'Recorder', + 'Bear', 'Eggplant', 'Board Eraser', 'Coconut', 'Tape Measure/Ruler', 'Pig', 'Showerhead', 'Globe', 'Chips', + 'Steak', 'Crosswalk Sign', 'Stapler', 'Camel', 'Formula 1', 'Pomegranate', 'Dishwasher', 'Crab', + 'Hoverboard', 'Meat ball', 'Rice Cooker', 'Tuba', 'Calculator', 'Papaya', 'Antelope', 'Parrot', 'Seal', + 'Butterfly', 'Dumbbell', 'Donkey', 'Lion', 'Urinal', 'Dolphin', 'Electric Drill', 'Hair Dryer', 'Egg tart', + 'Jellyfish', 'Treadmill', 'Lighter', 'Grapefruit', 'Game board', 'Mop', 'Radish', 'Baozi', 'Target', 'French', + 'Spring Rolls', 'Monkey', 'Rabbit', 'Pencil Case', 'Yak', 'Red Cabbage', 'Binoculars', 'Asparagus', 'Barbell', + 'Scallop', 'Noddles', 'Comb', 'Dumpling', 'Oyster', 'Table Tennis paddle', 'Cosmetics Brush/Eyeliner Pencil', + 'Chainsaw', 'Eraser', 'Lobster', 'Durian', 'Okra', 'Lipstick', 'Cosmetics Mirror', 'Curling', 'Table Tennis' ] + diff --git a/data/scripts/get_argoverse_hd.sh b/data/scripts/get_argoverse_hd.sh index 18131a6764d6..331509914568 100644 --- a/data/scripts/get_argoverse_hd.sh +++ b/data/scripts/get_argoverse_hd.sh @@ -36,7 +36,7 @@ for val in annotation_files: img_name = a['images'][img_id]['name'] img_label_name = img_name[:-3] + "txt" - obj_class = annot['category_id'] + cls = annot['category_id'] # instance class id x_center, y_center, width, height = annot['bbox'] x_center = (x_center + width / 2) / 1920. # offset and scale y_center = (y_center + height / 2) / 1200. # offset and scale @@ -46,11 +46,10 @@ for val in annotation_files: img_dir = "./labels/" + a['seq_dirs'][a['images'][annot['image_id']]['sid']] Path(img_dir).mkdir(parents=True, exist_ok=True) - if img_dir + "/" + img_label_name not in label_dict: label_dict[img_dir + "/" + img_label_name] = [] - label_dict[img_dir + "/" + img_label_name].append(f"{obj_class} {x_center} {y_center} {width} {height}\n") + label_dict[img_dir + "/" + img_label_name].append(f"{cls} {x_center} {y_center} {width} {height}\n") for filename in label_dict: with open(filename, "w") as file: diff --git a/data/scripts/get_objects365.py b/data/scripts/get_objects365.py new file mode 100644 index 000000000000..309e6d3f2b64 --- /dev/null +++ b/data/scripts/get_objects365.py @@ -0,0 +1,33 @@ +# Objects365 https://www.objects365.org labels JSON to YOLO script +# 1. Download Object 365 from the Object 365 website And unpack all images in datasets/object365/images +# 2. Place this file and zhiyuan_objv2_train.json file in datasets/objects365 +# 3. Execute this file from datasets/object365 path +# /datasets +# /objects365 +# /images +# /labels + +from pycocotools.coco import COCO + +coco = COCO("zhiyuan_objv2_train.json") +cats = coco.loadCats(coco.getCatIds()) +nms = [cat["name"] for cat in cats] +print("COCO categories: \n{}\n".format(" ".join(nms))) +for categoryId, cat in enumerate(nms): + catIds = coco.getCatIds(catNms=[cat]) + imgIds = coco.getImgIds(catIds=catIds) + print(cat) + # Create a subfolder in this directory called "labels". This is where the annotations will be saved in YOLO format + for im in coco.loadImgs(imgIds): + width, height = im["width"], im["height"] + path = im["file_name"].split("/")[-1] # image filename + try: + with open("labels/train/" + path.replace(".jpg", ".txt"), "a+") as file: + annIds = coco.getAnnIds(imgIds=im["id"], catIds=catIds, iscrowd=None) + for a in coco.loadAnns(annIds): + x, y, w, h = a['bbox'] # bounding box in xywh (xy top-left corner) + x, y = x + w / 2, y + h / 2 # xy to center + file.write(f"{categoryId} {x / width:.5f} {y / height:.5f} {w / width:.5f} {h / height:.5f}\n") + + except Exception as e: + print(e) From a833ee2a466d939b8b09322edbf92233d4ceb0cd Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 29 Apr 2021 21:16:23 +0200 Subject: [PATCH 0168/1976] Update check_requirements() exclude list (#2974) --- detect.py | 2 +- hubconf.py | 2 +- test.py | 2 +- train.py | 2 +- utils/autoanchor.py | 3 ++- utils/plots.py | 3 ++- 6 files changed, 8 insertions(+), 6 deletions(-) diff --git a/detect.py b/detect.py index ba42f349dbaf..af1660b6608f 100644 --- a/detect.py +++ b/detect.py @@ -172,7 +172,7 @@ def detect(opt): parser.add_argument('--hide-conf', default=False, action='store_true', help='hide confidences') opt = parser.parse_args() print(opt) - check_requirements(exclude=('pycocotools', 'thop')) + check_requirements(exclude=('tensorboard', 'pycocotools', 'thop')) with torch.no_grad(): if opt.update: # update all models (to fix SourceChangeWarning) diff --git a/hubconf.py b/hubconf.py index e42d0b59bd2a..747f7d41bcec 100644 --- a/hubconf.py +++ b/hubconf.py @@ -15,7 +15,7 @@ from utils.torch_utils import select_device dependencies = ['torch', 'yaml'] -check_requirements(Path(__file__).parent / 'requirements.txt', exclude=('pycocotools', 'thop')) +check_requirements(Path(__file__).parent / 'requirements.txt', exclude=('tensorboard', 'pycocotools', 'thop')) def create(name, pretrained, channels, classes, autoshape, verbose): diff --git a/test.py b/test.py index 2b9e90c05367..0093b8c09f54 100644 --- a/test.py +++ b/test.py @@ -310,7 +310,7 @@ def test(data, opt.save_json |= opt.data.endswith('coco.yaml') opt.data = check_file(opt.data) # check file print(opt) - check_requirements() + check_requirements(exclude=('tensorboard', 'pycocotools', 'thop')) if opt.task in ('train', 'val', 'test'): # run normally test(opt.data, diff --git a/train.py b/train.py index acfc9ef5527b..5bee57fe0bbe 100644 --- a/train.py +++ b/train.py @@ -497,7 +497,7 @@ def train(hyp, opt, device, tb_writer=None): set_logging(opt.global_rank) if opt.global_rank in [-1, 0]: check_git_status() - check_requirements() + check_requirements(exclude=('pycocotools', 'thop')) # Resume wandb_run = check_wandb_resume(opt) diff --git a/utils/autoanchor.py b/utils/autoanchor.py index 75b350da729c..87dc394c832e 100644 --- a/utils/autoanchor.py +++ b/utils/autoanchor.py @@ -3,7 +3,6 @@ import numpy as np import torch import yaml -from scipy.cluster.vq import kmeans from tqdm import tqdm from utils.general import colorstr @@ -76,6 +75,8 @@ def kmean_anchors(path='./data/coco128.yaml', n=9, img_size=640, thr=4.0, gen=10 Usage: from utils.autoanchor import *; _ = kmean_anchors() """ + from scipy.cluster.vq import kmeans + thr = 1. / thr prefix = colorstr('autoanchor: ') diff --git a/utils/plots.py b/utils/plots.py index ab6448aa96eb..e5a2d99e0abd 100644 --- a/utils/plots.py +++ b/utils/plots.py @@ -16,7 +16,6 @@ import torch import yaml from PIL import Image, ImageDraw, ImageFont -from scipy.signal import butter, filtfilt from utils.general import xywh2xyxy, xyxy2xywh from utils.metrics import fitness @@ -54,6 +53,8 @@ def hist2d(x, y, n=100): def butter_lowpass_filtfilt(data, cutoff=1500, fs=50000, order=5): + from scipy.signal import butter, filtfilt + # https://stackoverflow.com/questions/28536191/how-to-filter-smooth-with-scipy-numpy def butter_lowpass(cutoff, fs, order): nyq = 0.5 * fs From 955eea8b96397b1f1f7666c4ee42d1c6411d0755 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 30 Apr 2021 00:56:44 +0200 Subject: [PATCH 0169/1976] Make cache saving optional (#2977) --- utils/datasets.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/utils/datasets.py b/utils/datasets.py index 3fcdddd7c013..43e4e5973a25 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -502,8 +502,11 @@ def cache_labels(self, path=Path('./labels.cache'), prefix=''): x['hash'] = get_hash(self.label_files + self.img_files) x['results'] = nf, nm, ne, nc, i + 1 x['version'] = 0.1 # cache version - torch.save(x, path) # save for next time - logging.info(f'{prefix}New cache created: {path}') + try: + torch.save(x, path) # save for next time + logging.info(f'{prefix}New cache created: {path}') + except Exception as e: + logging.info(f'{prefix}WARNING: Cache directory {path.parent} is not writeable: {e}') # path not writeable return x def __len__(self): From 41f5cc563776853e167e3f95323cfa1ac22d6272 Mon Sep 17 00:00:00 2001 From: jluntamazon <73358667+jluntamazon@users.noreply.github.com> Date: Fri, 30 Apr 2021 03:54:48 -0700 Subject: [PATCH 0170/1976] YOLOv5 AWS Inferentia Inplace compatibility updates (#2953) * Added flag to enable/disable all inplace and assignment operations * Removed shape print statements * Scope Detect/Model import to avoid circular dependency * PEP8 * create _descale_pred() * replace lost space * replace list with tuple Co-authored-by: Glenn Jocher --- models/experimental.py | 8 +++--- models/yolo.py | 60 +++++++++++++++++++++++++++++------------- 2 files changed, 47 insertions(+), 21 deletions(-) diff --git a/models/experimental.py b/models/experimental.py index 548353c93be0..85a22d42fa54 100644 --- a/models/experimental.py +++ b/models/experimental.py @@ -110,7 +110,9 @@ def forward(self, x, augment=False): return y, None # inference, train output -def attempt_load(weights, map_location=None): +def attempt_load(weights, map_location=None, inplace=True): + from models.yolo import Detect, Model + # Loads an ensemble of models weights=[a,b,c] or a single model weights=[a] or weights=a model = Ensemble() for w in weights if isinstance(weights, list) else [weights]: @@ -120,8 +122,8 @@ def attempt_load(weights, map_location=None): # Compatibility updates for m in model.modules(): - if type(m) in [nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6, nn.SiLU]: - m.inplace = True # pytorch 1.7.0 compatibility + if type(m) in [nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6, nn.SiLU, Detect, Model]: + m.inplace = inplace # pytorch 1.7.0 compatibility elif type(m) is Conv: m._non_persistent_buffers_set = set() # pytorch 1.6.0 compatibility diff --git a/models/yolo.py b/models/yolo.py index cbff70fc83d4..520047ff3a99 100644 --- a/models/yolo.py +++ b/models/yolo.py @@ -26,7 +26,7 @@ class Detect(nn.Module): stride = None # strides computed during build export = False # onnx export - def __init__(self, nc=80, anchors=(), ch=()): # detection layer + def __init__(self, nc=80, anchors=(), ch=(), inplace=True): # detection layer super(Detect, self).__init__() self.nc = nc # number of classes self.no = nc + 5 # number of outputs per anchor @@ -37,6 +37,7 @@ def __init__(self, nc=80, anchors=(), ch=()): # detection layer self.register_buffer('anchors', a) # shape(nl,na,2) self.register_buffer('anchor_grid', a.clone().view(self.nl, 1, -1, 1, 1, 2)) # shape(nl,1,na,1,1,2) self.m = nn.ModuleList(nn.Conv2d(x, self.no * self.na, 1) for x in ch) # output conv + self.inplace = inplace # use in-place ops (e.g. slice assignment) def forward(self, x): # x = x.copy() # for profiling @@ -52,8 +53,13 @@ def forward(self, x): self.grid[i] = self._make_grid(nx, ny).to(x[i].device) y = x[i].sigmoid() - y[..., 0:2] = (y[..., 0:2] * 2. - 0.5 + self.grid[i]) * self.stride[i] # xy - y[..., 2:4] = (y[..., 2:4] * 2) ** 2 * self.anchor_grid[i] # wh + if self.inplace: + y[..., 0:2] = (y[..., 0:2] * 2. - 0.5 + self.grid[i]) * self.stride[i] # xy + y[..., 2:4] = (y[..., 2:4] * 2) ** 2 * self.anchor_grid[i] # wh + else: # for YOLOv5 on AWS Inferentia https://github.com/ultralytics/yolov5/pull/2953 + xy = (y[..., 0:2] * 2. - 0.5 + self.grid[i]) * self.stride[i] # xy + wh = (y[..., 2:4] * 2) ** 2 * self.anchor_grid[i] # wh + y = torch.cat((xy, wh, y[..., 4:]), -1) z.append(y.view(bs, -1, self.no)) return x if self.training else (torch.cat(z, 1), x) @@ -85,12 +91,14 @@ def __init__(self, cfg='yolov5s.yaml', ch=3, nc=None, anchors=None): # model, i self.yaml['anchors'] = round(anchors) # override yaml value self.model, self.save = parse_model(deepcopy(self.yaml), ch=[ch]) # model, savelist self.names = [str(i) for i in range(self.yaml['nc'])] # default names + self.inplace = self.yaml.get('inplace', True) # logger.info([x.shape for x in self.forward(torch.zeros(1, ch, 64, 64))]) # Build strides, anchors m = self.model[-1] # Detect() if isinstance(m, Detect): s = 256 # 2x min stride + m.inplace = self.inplace m.stride = torch.tensor([s / x.shape[-2] for x in self.forward(torch.zeros(1, ch, s, s))]) # forward m.anchors /= m.stride.view(-1, 1, 1) check_anchor_order(m) @@ -105,24 +113,23 @@ def __init__(self, cfg='yolov5s.yaml', ch=3, nc=None, anchors=None): # model, i def forward(self, x, augment=False, profile=False): if augment: - img_size = x.shape[-2:] # height, width - s = [1, 0.83, 0.67] # scales - f = [None, 3, None] # flips (2-ud, 3-lr) - y = [] # outputs - for si, fi in zip(s, f): - xi = scale_img(x.flip(fi) if fi else x, si, gs=int(self.stride.max())) - yi = self.forward_once(xi)[0] # forward - # cv2.imwrite(f'img_{si}.jpg', 255 * xi[0].cpu().numpy().transpose((1, 2, 0))[:, :, ::-1]) # save - yi[..., :4] /= si # de-scale - if fi == 2: - yi[..., 1] = img_size[0] - yi[..., 1] # de-flip ud - elif fi == 3: - yi[..., 0] = img_size[1] - yi[..., 0] # de-flip lr - y.append(yi) - return torch.cat(y, 1), None # augmented inference, train + return self.forward_augment(x) # augmented inference, None else: return self.forward_once(x, profile) # single-scale inference, train + def forward_augment(self, x): + img_size = x.shape[-2:] # height, width + s = [1, 0.83, 0.67] # scales + f = [None, 3, None] # flips (2-ud, 3-lr) + y = [] # outputs + for si, fi in zip(s, f): + xi = scale_img(x.flip(fi) if fi else x, si, gs=int(self.stride.max())) + yi = self.forward_once(xi)[0] # forward + # cv2.imwrite(f'img_{si}.jpg', 255 * xi[0].cpu().numpy().transpose((1, 2, 0))[:, :, ::-1]) # save + yi = self._descale_pred(yi, fi, si, img_size) + y.append(yi) + return torch.cat(y, 1), None # augmented inference, train + def forward_once(self, x, profile=False): y, dt = [], [] # outputs for m in self.model: @@ -146,6 +153,23 @@ def forward_once(self, x, profile=False): logger.info('%.1fms total' % sum(dt)) return x + def _descale_pred(self, p, flips, scale, img_size): + # de-scale predictions following augmented inference (inverse operation) + if self.inplace: + p[..., :4] /= scale # de-scale + if flips == 2: + p[..., 1] = img_size[0] - p[..., 1] # de-flip ud + elif flips == 3: + p[..., 0] = img_size[1] - p[..., 0] # de-flip lr + else: + x, y, wh = p[..., 0:1] / scale, p[..., 1:2] / scale, p[..., 2:4] / scale # de-scale + if flips == 2: + y = img_size[0] - y # de-flip ud + elif flips == 3: + x = img_size[1] - x # de-flip lr + p = torch.cat((x, y, wh, p[..., 4:]), -1) + return p + def _initialize_biases(self, cf=None): # initialize biases into Detect(), cf is class frequency # https://arxiv.org/abs/1708.02002 section 3.3 # cf = torch.bincount(torch.tensor(np.concatenate(dataset.labels, 0)[:, 0]).long(), minlength=nc) + 1. From 9b91db6d1aab7267db5153f0be79c8c25e84f75a Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 30 Apr 2021 13:15:31 +0200 Subject: [PATCH 0171/1976] Created using Colaboratory --- tutorial.ipynb | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/tutorial.ipynb b/tutorial.ipynb index 245b46aa7d9f..3954feadfcb2 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -563,7 +563,7 @@ "clear_output()\n", "print(f\"Setup complete. Using torch {torch.__version__} ({torch.cuda.get_device_properties(0).name if torch.cuda.is_available() else 'CPU'})\")" ], - "execution_count": 31, + "execution_count": null, "outputs": [ { "output_type": "stream", @@ -889,7 +889,7 @@ "id": "bOy5KI2ncnWd" }, "source": [ - "# Tensorboard (optional)\n", + "# Tensorboard (optional)\n", "%load_ext tensorboard\n", "%tensorboard --logdir runs/train" ], @@ -902,9 +902,10 @@ "id": "2fLAV42oNb7M" }, "source": [ - "# Weights & Biases (optional)\n", - "%pip install -q wandb \n", - "!wandb login # use 'wandb disabled' or 'wandb enabled' to disable or enable" + "# Weights & Biases (optional)\n", + "%pip install -q wandb\n", + "import wandb\n", + "wandb.login()" ], "execution_count": null, "outputs": [] From d08575ee5e5259b65e1f39ac6880cd33804c4d1a Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 30 Apr 2021 14:59:51 +0200 Subject: [PATCH 0172/1976] PyTorch Hub load directly when possible (#2986) --- hubconf.py | 42 +++++++++++++++++++++++------------------- 1 file changed, 23 insertions(+), 19 deletions(-) diff --git a/hubconf.py b/hubconf.py index 747f7d41bcec..ee5a7d87224d 100644 --- a/hubconf.py +++ b/hubconf.py @@ -9,7 +9,7 @@ import torch -from models.yolo import Model +from models.yolo import Model, attempt_load from utils.general import check_requirements, set_logging from utils.google_utils import attempt_download from utils.torch_utils import select_device @@ -26,33 +26,37 @@ def create(name, pretrained, channels, classes, autoshape, verbose): pretrained (bool): load pretrained weights into the model channels (int): number of input channels classes (int): number of model classes + autoshape (bool): apply YOLOv5 .autoshape() wrapper to model + verbose (bool): print all information to screen Returns: - pytorch model + YOLOv5 pytorch model """ + set_logging(verbose=verbose) + fname = f'{name}.pt' # checkpoint filename try: - set_logging(verbose=verbose) - - cfg = list((Path(__file__).parent / 'models').rglob(f'{name}.yaml'))[0] # model.yaml path - model = Model(cfg, channels, classes) - if pretrained: - fname = f'{name}.pt' # checkpoint filename - attempt_download(fname) # download if not found locally - ckpt = torch.load(fname, map_location=torch.device('cpu')) # load - msd = model.state_dict() # model state_dict - csd = ckpt['model'].float().state_dict() # checkpoint state_dict as FP32 - csd = {k: v for k, v in csd.items() if msd[k].shape == v.shape} # filter - model.load_state_dict(csd, strict=False) # load - if len(ckpt['model'].names) == classes: - model.names = ckpt['model'].names # set class names attribute - if autoshape: - model = model.autoshape() # for file/URI/PIL/cv2/np inputs and NMS + if pretrained and channels == 3 and classes == 80: + model = attempt_load(fname, map_location=torch.device('cpu')) # download/load FP32 model + else: + cfg = list((Path(__file__).parent / 'models').rglob(f'{name}.yaml'))[0] # model.yaml path + model = Model(cfg, channels, classes) # create model + if pretrained: + attempt_download(fname) # download if not found locally + ckpt = torch.load(fname, map_location=torch.device('cpu')) # load + msd = model.state_dict() # model state_dict + csd = ckpt['model'].float().state_dict() # checkpoint state_dict as FP32 + csd = {k: v for k, v in csd.items() if msd[k].shape == v.shape} # filter + model.load_state_dict(csd, strict=False) # load + if len(ckpt['model'].names) == classes: + model.names = ckpt['model'].names # set class names attribute + if autoshape: + model = model.autoshape() # for file/URI/PIL/cv2/np inputs and NMS device = select_device('0' if torch.cuda.is_available() else 'cpu') # default to GPU if available return model.to(device) except Exception as e: help_url = 'https://github.com/ultralytics/yolov5/issues/36' - s = 'Cache maybe be out of date, try force_reload=True. See %s for help.' % help_url + s = 'Cache may be out of date, try `force_reload=True`. See %s for help.' % help_url raise Exception(s) from e From 801b4698784226adefa514182d96c703066375c1 Mon Sep 17 00:00:00 2001 From: Ayush Chaurasia Date: Sat, 1 May 2021 01:24:26 +0530 Subject: [PATCH 0173/1976] Improve performance of dataset Logger (#2943) * Improve performance of Dataset Logger * Fix scaling bug --- utils/wandb_logging/wandb_utils.py | 14 +++++--------- 1 file changed, 5 insertions(+), 9 deletions(-) diff --git a/utils/wandb_logging/wandb_utils.py b/utils/wandb_logging/wandb_utils.py index d8fbd1ef42aa..928f577fd460 100644 --- a/utils/wandb_logging/wandb_utils.py +++ b/utils/wandb_logging/wandb_utils.py @@ -196,9 +196,9 @@ def log_dataset_artifact(self, data_file, single_cls, project, overwrite_config= nc, names = (1, ['item']) if single_cls else (int(data['nc']), data['names']) names = {k: v for k, v in enumerate(names)} # to index dictionary self.train_artifact = self.create_dataset_table(LoadImagesAndLabels( - data['train']), names, name='train') if data.get('train') else None + data['train'], rect=True, batch_size=1), names, name='train') if data.get('train') else None self.val_artifact = self.create_dataset_table(LoadImagesAndLabels( - data['val']), names, name='val') if data.get('val') else None + data['val'], rect=True, batch_size=1), names, name='val') if data.get('val') else None if data.get('train'): data['train'] = WANDB_ARTIFACT_PREFIX + str(Path(project) / 'train') if data.get('val'): @@ -243,16 +243,12 @@ def create_dataset_table(self, dataset, class_to_id, name='dataset'): table = wandb.Table(columns=["id", "train_image", "Classes", "name"]) class_set = wandb.Classes([{'id': id, 'name': name} for id, name in class_to_id.items()]) for si, (img, labels, paths, shapes) in enumerate(tqdm(dataset)): - height, width = shapes[0] - labels[:, 2:] = (xywh2xyxy(labels[:, 2:].view(-1, 4))) * torch.Tensor([width, height, width, height]) box_data, img_classes = [], {} - for cls, *xyxy in labels[:, 1:].tolist(): + for cls, *xywh in labels[:, 1:].tolist(): cls = int(cls) - box_data.append({"position": {"minX": xyxy[0], "minY": xyxy[1], "maxX": xyxy[2], "maxY": xyxy[3]}, + box_data.append({"position": {"middle": [xywh[0], xywh[1]], "width": xywh[2], "height": xywh[3]}, "class_id": cls, - "box_caption": "%s" % (class_to_id[cls]), - "scores": {"acc": 1}, - "domain": "pixel"}) + "box_caption": "%s" % (class_to_id[cls])}) img_classes[cls] = class_to_id[cls] boxes = {"ground_truth": {"box_data": box_data, "class_labels": class_to_id}} # inference-space table.add_data(si, wandb.Image(paths, classes=class_set, boxes=boxes), json.dumps(img_classes), From 54652fe3ff484c58614353020bd6b391fb931b76 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 1 May 2021 00:16:58 +0200 Subject: [PATCH 0174/1976] Objects365 update --- data/scripts/get_objects365.py | 30 +++++++++++++++++++++--------- utils/general.py | 6 +++--- 2 files changed, 24 insertions(+), 12 deletions(-) diff --git a/data/scripts/get_objects365.py b/data/scripts/get_objects365.py index 309e6d3f2b64..cb5832cdc5b2 100644 --- a/data/scripts/get_objects365.py +++ b/data/scripts/get_objects365.py @@ -7,22 +7,34 @@ # /images # /labels + from pycocotools.coco import COCO -coco = COCO("zhiyuan_objv2_train.json") -cats = coco.loadCats(coco.getCatIds()) -nms = [cat["name"] for cat in cats] -print("COCO categories: \n{}\n".format(" ".join(nms))) -for categoryId, cat in enumerate(nms): +from utils.general import download, Path + +# Make Directories +dir = Path('../datasets/objects365') # dataset directory +for p in 'images', 'labels': + (dir / p).mkdir(parents=True, exist_ok=True) + for q in 'train', 'val': + (dir / p / q).mkdir(parents=True, exist_ok=True) + +# Download +url = "https://dorc.ks3-cn-beijing.ksyun.com/data-set/2020Objects365%E6%95%B0%E6%8D%AE%E9%9B%86/train/" +download(url + 'zhiyuan_objv2_train.tar.gz', dir=dir, threads=8) # annotations json +download([url + f for f in [f'patch{i}.tar.gz' for i in range(51)]], dir=dir / 'images' / 'train', threads=8) + +# Labels +coco = COCO(dir / 'zhiyuan_objv2_train.json') +names = [x["name"] for x in coco.loadCats(coco.getCatIds())] +for categoryId, cat in enumerate(names): catIds = coco.getCatIds(catNms=[cat]) imgIds = coco.getImgIds(catIds=catIds) - print(cat) - # Create a subfolder in this directory called "labels". This is where the annotations will be saved in YOLO format for im in coco.loadImgs(imgIds): width, height = im["width"], im["height"] - path = im["file_name"].split("/")[-1] # image filename + path = Path(im["file_name"]) # image filename try: - with open("labels/train/" + path.replace(".jpg", ".txt"), "a+") as file: + with open(dir / 'labels' / 'train' / path.with_suffix('.txt').name, 'a') as file: annIds = coco.getAnnIds(imgIds=im["id"], catIds=catIds, iscrowd=None) for a in coco.loadAnns(annIds): x, y, w, h = a['bbox'] # bounding box in xywh (xy top-left corner) diff --git a/utils/general.py b/utils/general.py index fbb99b9e7f99..bad20bbedd44 100755 --- a/utils/general.py +++ b/utils/general.py @@ -183,7 +183,7 @@ def check_dataset(dict): raise Exception('Dataset not found.') -def download(url, dir='.', multi_thread=False): +def download(url, dir='.', threads=1): # Multi-threaded file download and unzip function def download_one(url, dir): # Download 1 file @@ -200,8 +200,8 @@ def download_one(url, dir): dir = Path(dir) dir.mkdir(parents=True, exist_ok=True) # make directory - if multi_thread: - ThreadPool(8).imap(lambda x: download_one(*x), zip(url, repeat(dir))) # 8 threads + if threads > 1: + ThreadPool(threads).imap(lambda x: download_one(*x), zip(url, repeat(dir))) # multi-threaded else: for u in tuple(url) if isinstance(url, str) else url: download_one(u, dir) From f55730ee9cd5d58e6f6443ce35e1995598e85665 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 1 May 2021 01:03:09 +0200 Subject: [PATCH 0175/1976] Objects365 bug fix --- data/scripts/get_objects365.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/data/scripts/get_objects365.py b/data/scripts/get_objects365.py index cb5832cdc5b2..307dfe0da599 100644 --- a/data/scripts/get_objects365.py +++ b/data/scripts/get_objects365.py @@ -21,7 +21,7 @@ # Download url = "https://dorc.ks3-cn-beijing.ksyun.com/data-set/2020Objects365%E6%95%B0%E6%8D%AE%E9%9B%86/train/" -download(url + 'zhiyuan_objv2_train.tar.gz', dir=dir, threads=8) # annotations json +download([url + 'zhiyuan_objv2_train.tar.gz'], dir=dir) # annotations json download([url + f for f in [f'patch{i}.tar.gz' for i in range(51)]], dir=dir / 'images' / 'train', threads=8) # Labels From 4d7bca764860f5a03ed7eed10d8ccf59b73a1f16 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 1 May 2021 12:37:41 +0200 Subject: [PATCH 0176/1976] Add unzip flag to download() (#3002) --- utils/general.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/utils/general.py b/utils/general.py index bad20bbedd44..7e50a7333dea 100755 --- a/utils/general.py +++ b/utils/general.py @@ -183,7 +183,7 @@ def check_dataset(dict): raise Exception('Dataset not found.') -def download(url, dir='.', threads=1): +def download(url, dir='.', unzip=True, threads=1): # Multi-threaded file download and unzip function def download_one(url, dir): # Download 1 file @@ -191,7 +191,7 @@ def download_one(url, dir): if not f.exists(): print(f'Downloading {url} to {f}...') torch.hub.download_url_to_file(url, f, progress=True) # download - if f.suffix in ('.zip', '.gz'): + if unzip and f.suffix in ('.zip', '.gz'): print(f'Unzipping {f}...') if f.suffix == '.zip': os.system(f'unzip -qo {f} -d {dir} && rm {f}') # unzip -quiet -overwrite From 37eaffec7d7a8d82d24082392dd5dc95e56881d3 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 1 May 2021 17:29:51 +0200 Subject: [PATCH 0177/1976] Curl update (#3004) * Curl update * Curl update --- data/scripts/get_objects365.py | 2 +- utils/general.py | 7 +++++-- utils/google_utils.py | 2 +- 3 files changed, 7 insertions(+), 4 deletions(-) diff --git a/data/scripts/get_objects365.py b/data/scripts/get_objects365.py index 307dfe0da599..d77b26d60691 100644 --- a/data/scripts/get_objects365.py +++ b/data/scripts/get_objects365.py @@ -22,7 +22,7 @@ # Download url = "https://dorc.ks3-cn-beijing.ksyun.com/data-set/2020Objects365%E6%95%B0%E6%8D%AE%E9%9B%86/train/" download([url + 'zhiyuan_objv2_train.tar.gz'], dir=dir) # annotations json -download([url + f for f in [f'patch{i}.tar.gz' for i in range(51)]], dir=dir / 'images' / 'train', threads=8) +download([url + f for f in [f'patch{i}.tar.gz' for i in range(51)]], dir=dir / 'images' / 'train', curl=True, threads=8) # Labels coco = COCO(dir / 'zhiyuan_objv2_train.json') diff --git a/utils/general.py b/utils/general.py index 7e50a7333dea..0c5928d8d06f 100755 --- a/utils/general.py +++ b/utils/general.py @@ -183,14 +183,17 @@ def check_dataset(dict): raise Exception('Dataset not found.') -def download(url, dir='.', unzip=True, threads=1): +def download(url, dir='.', unzip=True, curl=False, threads=1): # Multi-threaded file download and unzip function def download_one(url, dir): # Download 1 file f = dir / Path(url).name # filename if not f.exists(): print(f'Downloading {url} to {f}...') - torch.hub.download_url_to_file(url, f, progress=True) # download + if curl: + os.system(f"curl -L '{url}' -o '{f}' --retry 9 -C -") # curl download, retry and resume on fail + else: + torch.hub.download_url_to_file(url, f, progress=True) # torch download if unzip and f.suffix in ('.zip', '.gz'): print(f'Unzipping {f}...') if f.suffix == '.zip': diff --git a/utils/google_utils.py b/utils/google_utils.py index 6a4660bad509..eae2d1b9ffcc 100644 --- a/utils/google_utils.py +++ b/utils/google_utils.py @@ -47,7 +47,7 @@ def attempt_download(file, repo='ultralytics/yolov5'): assert redundant, 'No secondary mirror' url = f'https://storage.googleapis.com/{repo}/ckpt/{name}' print(f'Downloading {url} to {file}...') - os.system(f'curl -L {url} -o {file}') # torch.hub.download_url_to_file(url, weights) + os.system(f"curl -L '{url}' -o '{file}' --retry 3 -C -") # curl download, retry and resume on fail finally: if not file.exists() or file.stat().st_size < 1E6: # check file.unlink(missing_ok=True) # remove partial downloads From c1a44ed9c5d03ec8a582bb96cf5f3f29a1e1ade6 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 1 May 2021 17:35:02 +0200 Subject: [PATCH 0178/1976] Update hubconf.py for unified loading (#3005) --- hubconf.py | 34 +++++++--------------------------- 1 file changed, 7 insertions(+), 27 deletions(-) diff --git a/hubconf.py b/hubconf.py index ee5a7d87224d..7f897d15c314 100644 --- a/hubconf.py +++ b/hubconf.py @@ -18,7 +18,7 @@ check_requirements(Path(__file__).parent / 'requirements.txt', exclude=('tensorboard', 'pycocotools', 'thop')) -def create(name, pretrained, channels, classes, autoshape, verbose): +def create(name, pretrained, channels=3, classes=80, autoshape=True, verbose=True): """Creates a specified YOLOv5 model Arguments: @@ -33,7 +33,7 @@ def create(name, pretrained, channels, classes, autoshape, verbose): YOLOv5 pytorch model """ set_logging(verbose=verbose) - fname = f'{name}.pt' # checkpoint filename + fname = Path(name).with_suffix('.pt') # checkpoint filename try: if pretrained and channels == 3 and classes == 80: model = attempt_load(fname, map_location=torch.device('cpu')) # download/load FP32 model @@ -60,30 +60,9 @@ def create(name, pretrained, channels, classes, autoshape, verbose): raise Exception(s) from e -def custom(path_or_model='path/to/model.pt', autoshape=True, verbose=True): - """YOLOv5-custom model https://github.com/ultralytics/yolov5 - - Arguments (3 options): - path_or_model (str): 'path/to/model.pt' - path_or_model (dict): torch.load('path/to/model.pt') - path_or_model (nn.Module): torch.load('path/to/model.pt')['model'] - - Returns: - pytorch model - """ - set_logging(verbose=verbose) - - model = torch.load(path_or_model) if isinstance(path_or_model, str) else path_or_model # load checkpoint - if isinstance(model, dict): - model = model['ema' if model.get('ema') else 'model'] # load model - - hub_model = Model(model.yaml).to(next(model.parameters()).device) # create - hub_model.load_state_dict(model.float().state_dict()) # load state_dict - hub_model.names = model.names # class names - if autoshape: - hub_model = hub_model.autoshape() # for file/URI/PIL/cv2/np inputs and NMS - device = select_device('0' if torch.cuda.is_available() else 'cpu') # default to GPU if available - return hub_model.to(device) +def custom(path='path/to/model.pt', autoshape=True, verbose=True): + # YOLOv5 custom or local model + return create(path, autoshape, verbose) def yolov5s(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True): @@ -127,7 +106,8 @@ def yolov5x6(pretrained=True, channels=3, classes=80, autoshape=True, verbose=Tr if __name__ == '__main__': - model = create(name='yolov5s', pretrained=True, channels=3, classes=80, autoshape=True, verbose=True) # pretrained + model = create(name='weights/yolov5s.pt', pretrained=True, channels=3, classes=80, autoshape=True, + verbose=True) # pretrained # model = custom(path_or_model='path/to/model.pt') # custom # Verify inference From 7c89c829e3e299fec1f8e6280f69474572a51359 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 1 May 2021 17:48:12 +0200 Subject: [PATCH 0179/1976] cleanup --- hubconf.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/hubconf.py b/hubconf.py index 7f897d15c314..7359a58bd324 100644 --- a/hubconf.py +++ b/hubconf.py @@ -18,7 +18,7 @@ check_requirements(Path(__file__).parent / 'requirements.txt', exclude=('tensorboard', 'pycocotools', 'thop')) -def create(name, pretrained, channels=3, classes=80, autoshape=True, verbose=True): +def create(name, pretrained=True, channels=3, classes=80, autoshape=True, verbose=True): """Creates a specified YOLOv5 model Arguments: @@ -106,9 +106,8 @@ def yolov5x6(pretrained=True, channels=3, classes=80, autoshape=True, verbose=Tr if __name__ == '__main__': - model = create(name='weights/yolov5s.pt', pretrained=True, channels=3, classes=80, autoshape=True, - verbose=True) # pretrained - # model = custom(path_or_model='path/to/model.pt') # custom + model = create(name='yolov5s', pretrained=True, channels=3, classes=80, autoshape=True, verbose=True) # pretrained + # model = custom(path='path/to/model.pt') # custom # Verify inference import cv2 From 31ee54c28ca088c6b59b6a36f9ccd71e35229e74 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 1 May 2021 18:15:17 +0200 Subject: [PATCH 0180/1976] Hubconf.py bug fix (#3007) --- hubconf.py | 2 +- utils/google_utils.py | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/hubconf.py b/hubconf.py index 7359a58bd324..898b63b5d0b1 100644 --- a/hubconf.py +++ b/hubconf.py @@ -62,7 +62,7 @@ def create(name, pretrained=True, channels=3, classes=80, autoshape=True, verbos def custom(path='path/to/model.pt', autoshape=True, verbose=True): # YOLOv5 custom or local model - return create(path, autoshape, verbose) + return create(path, autoshape=autoshape, verbose=verbose) def yolov5s(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True): diff --git a/utils/google_utils.py b/utils/google_utils.py index eae2d1b9ffcc..63d3e5b212f3 100644 --- a/utils/google_utils.py +++ b/utils/google_utils.py @@ -21,6 +21,7 @@ def attempt_download(file, repo='ultralytics/yolov5'): file = Path(str(file).strip().replace("'", '')) if not file.exists(): + file.parent.mkdir(parents=True, exist_ok=True) # make parent dir (if required) try: response = requests.get(f'https://api.github.com/repos/{repo}/releases/latest').json() # github api assets = [x['name'] for x in response['assets']] # release assets, i.e. ['yolov5s.pt', 'yolov5m.pt', ...] From e2a80c6c0f46a571efd4ad06b34637339a2088dc Mon Sep 17 00:00:00 2001 From: Hodovo <83190024+hodovo@users.noreply.github.com> Date: Sun, 2 May 2021 13:42:33 -0700 Subject: [PATCH 0181/1976] Add support for FP16 (half) to export.py (#3010) * Added support for fp16 (half) to export.py * minimize code additions Co-authored-by: Glenn Jocher --- models/export.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/models/export.py b/models/export.py index da15079149a1..90855d2588da 100644 --- a/models/export.py +++ b/models/export.py @@ -28,6 +28,7 @@ parser.add_argument('--batch-size', type=int, default=1, help='batch size') parser.add_argument('--grid', action='store_true', help='export Detect() layer grid') parser.add_argument('--device', default='cpu', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') + parser.add_argument('--half', action='store_true', help='FP16 half-precision export') parser.add_argument('--dynamic', action='store_true', help='dynamic ONNX axes') # ONNX-only parser.add_argument('--simplify', action='store_true', help='simplify ONNX model') # ONNX-only opt = parser.parse_args() @@ -44,11 +45,14 @@ # Checks gs = int(max(model.stride)) # grid size (max stride) opt.img_size = [check_img_size(x, gs) for x in opt.img_size] # verify img_size are gs-multiples + assert not (opt.device.lower() == "cpu" and opt.half), '--half only compatible with GPU export, i.e. use --device 0' # Input img = torch.zeros(opt.batch_size, 3, *opt.img_size).to(device) # image size(1,3,320,192) iDetection # Update model + if opt.half: + img, model = img.half(), model.half() # to FP16 for k, m in model.named_modules(): m._non_persistent_buffers_set = set() # pytorch 1.6.0 compatibility if isinstance(m, models.common.Conv): # assign export-friendly activations From fcd57028d2b0c73039e94b614b10b9bb7251315b Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 3 May 2021 12:37:50 +0200 Subject: [PATCH 0182/1976] Add is_colab() function (#3018) --- utils/general.py | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-) diff --git a/utils/general.py b/utils/general.py index 0c5928d8d06f..7c06965e3f41 100755 --- a/utils/general.py +++ b/utils/general.py @@ -51,11 +51,20 @@ def get_latest_run(search_dir='.'): return max(last_list, key=os.path.getctime) if last_list else '' -def isdocker(): +def is_docker(): # Is environment a Docker container return Path('/workspace').exists() # or Path('/.dockerenv').exists() +def is_colab(): + # Is environment a Google Colab instance + try: + import google.colab + return True + except Exception as e: + return False + + def emojis(str=''): # Return platform-dependent emoji-safe version of string return str.encode().decode('ascii', 'ignore') if platform.system() == 'Windows' else str @@ -81,7 +90,7 @@ def check_git_status(): print(colorstr('github: '), end='') try: assert Path('.git').exists(), 'skipping check (not a git repository)' - assert not isdocker(), 'skipping check (Docker image)' + assert not is_docker(), 'skipping check (Docker image)' assert check_online(), 'skipping check (offline)' cmd = 'git fetch && git config --get remote.origin.url' @@ -138,7 +147,8 @@ def check_img_size(img_size, s=32): def check_imshow(): # Check if environment supports image displays try: - assert not isdocker(), 'cv2.imshow() is disabled in Docker environments' + assert not is_docker(), 'cv2.imshow() is disabled in Docker environments' + assert not is_colab(), 'cv2.imshow() is disabled in Google Colab environments' cv2.imshow('test', np.zeros((1, 1, 3))) cv2.waitKey(1) cv2.destroyAllWindows() From 41cc7caee64f78b8364db27b5326d1a53b91cb97 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 3 May 2021 15:07:20 +0200 Subject: [PATCH 0183/1976] Add NMS threshold checks (#3020) * Add NMS threshold checks * fix --- utils/general.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/utils/general.py b/utils/general.py index 7c06965e3f41..fd20a7e677e0 100755 --- a/utils/general.py +++ b/utils/general.py @@ -476,6 +476,10 @@ def non_max_suppression(prediction, conf_thres=0.25, iou_thres=0.45, classes=Non nc = prediction.shape[2] - 5 # number of classes xc = prediction[..., 4] > conf_thres # candidates + # Checks + assert 0 <= conf_thres <= 1, f'Invalid Confidence threshold {conf_thres}, valid values are between 0.0 and 1.0' + assert 0 <= iou_thres <= 1, f'Invalid IoU {iou_thres}, valid values are between 0.0 and 1.0' + # Settings min_wh, max_wh = 2, 4096 # (pixels) minimum and maximum box width and height max_det = 300 # maximum number of detections per image From b292837e36e593df38340f087172bf7bef04cdfc Mon Sep 17 00:00:00 2001 From: jylink Date: Tue, 4 May 2021 01:01:29 +0800 Subject: [PATCH 0184/1976] Fix ONNX export using --grid --simplify --dynamic simultaneously (#2982) * Update yolo.py * Update export.py * fix export grid * Update export.py, remove detect export attribute * rearrange if order * remove --grid, default inplace=False * rename exp_dynamic to onnx_dynamic, comment * replace bs with 1 in anchor_grid[i] index 0 * Update export.py Co-authored-by: Glenn Jocher --- models/export.py | 10 ++++++---- models/yolo.py | 7 +++---- 2 files changed, 9 insertions(+), 8 deletions(-) diff --git a/models/export.py b/models/export.py index 90855d2588da..6a9f1df57e8f 100644 --- a/models/export.py +++ b/models/export.py @@ -26,9 +26,9 @@ parser.add_argument('--weights', type=str, default='./yolov5s.pt', help='weights path') parser.add_argument('--img-size', nargs='+', type=int, default=[640, 640], help='image size') # height, width parser.add_argument('--batch-size', type=int, default=1, help='batch size') - parser.add_argument('--grid', action='store_true', help='export Detect() layer grid') parser.add_argument('--device', default='cpu', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') parser.add_argument('--half', action='store_true', help='FP16 half-precision export') + parser.add_argument('--inplace', action='store_true', help='set YOLOv5 Detect() inplace=True') parser.add_argument('--dynamic', action='store_true', help='dynamic ONNX axes') # ONNX-only parser.add_argument('--simplify', action='store_true', help='simplify ONNX model') # ONNX-only opt = parser.parse_args() @@ -60,9 +60,11 @@ m.act = Hardswish() elif isinstance(m.act, nn.SiLU): m.act = SiLU() - # elif isinstance(m, models.yolo.Detect): - # m.forward = m.forward_export # assign forward (optional) - model.model[-1].export = not opt.grid # set Detect() layer grid export + elif isinstance(m, models.yolo.Detect): + m.inplace = opt.inplace + m.onnx_dynamic = opt.dynamic + # m.forward = m.forward_export # assign forward (optional) + for _ in range(2): y = model(img) # dry runs print(f"\n{colorstr('PyTorch:')} starting from {opt.weights} ({file_size(opt.weights):.1f} MB)") diff --git a/models/yolo.py b/models/yolo.py index 520047ff3a99..314fd806f5e7 100644 --- a/models/yolo.py +++ b/models/yolo.py @@ -24,7 +24,7 @@ class Detect(nn.Module): stride = None # strides computed during build - export = False # onnx export + onnx_dynamic = False # ONNX export parameter def __init__(self, nc=80, anchors=(), ch=(), inplace=True): # detection layer super(Detect, self).__init__() @@ -42,14 +42,13 @@ def __init__(self, nc=80, anchors=(), ch=(), inplace=True): # detection layer def forward(self, x): # x = x.copy() # for profiling z = [] # inference output - self.training |= self.export for i in range(self.nl): x[i] = self.m[i](x[i]) # conv bs, _, ny, nx = x[i].shape # x(bs,255,20,20) to x(bs,3,20,20,85) x[i] = x[i].view(bs, self.na, self.no, ny, nx).permute(0, 1, 3, 4, 2).contiguous() if not self.training: # inference - if self.grid[i].shape[2:4] != x[i].shape[2:4]: + if self.grid[i].shape[2:4] != x[i].shape[2:4] or self.onnx_dynamic: self.grid[i] = self._make_grid(nx, ny).to(x[i].device) y = x[i].sigmoid() @@ -58,7 +57,7 @@ def forward(self, x): y[..., 2:4] = (y[..., 2:4] * 2) ** 2 * self.anchor_grid[i] # wh else: # for YOLOv5 on AWS Inferentia https://github.com/ultralytics/yolov5/pull/2953 xy = (y[..., 0:2] * 2. - 0.5 + self.grid[i]) * self.stride[i] # xy - wh = (y[..., 2:4] * 2) ** 2 * self.anchor_grid[i] # wh + wh = (y[..., 2:4] * 2) ** 2 * self.anchor_grid[i].view(1, self.na, 1, 1, 2) # wh y = torch.cat((xy, wh, y[..., 4:]), -1) z.append(y.view(bs, -1, self.no)) From 5189b3addb954e7fed4d68eb6ab5cd209665503e Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 4 May 2021 02:25:14 +0200 Subject: [PATCH 0185/1976] Objects365 Update --- data/objects365.yaml | 40 ++++++++++++++++++++++++++++++ data/scripts/get_objects365.py | 45 ---------------------------------- utils/general.py | 9 ++++--- 3 files changed, 46 insertions(+), 48 deletions(-) delete mode 100644 data/scripts/get_objects365.py diff --git a/data/objects365.yaml b/data/objects365.yaml index 14464694f53a..5d19ab5cabbb 100644 --- a/data/objects365.yaml +++ b/data/objects365.yaml @@ -55,3 +55,43 @@ names: [ 'Person', 'Sneakers', 'Chair', 'Other Shoes', 'Hat', 'Car', 'Lamp', 'Gl 'Scallop', 'Noddles', 'Comb', 'Dumpling', 'Oyster', 'Table Tennis paddle', 'Cosmetics Brush/Eyeliner Pencil', 'Chainsaw', 'Eraser', 'Lobster', 'Durian', 'Okra', 'Lipstick', 'Cosmetics Mirror', 'Curling', 'Table Tennis' ] + +# download command/URL (optional) -------------------------------------------------------------------------------------- +download: | + from pycocotools.coco import COCO + from tqdm import tqdm + + from utils.general import download, Path + + # Make Directories + dir = Path('../datasets/objects365') # dataset directory + for p in 'images', 'labels': + (dir / p).mkdir(parents=True, exist_ok=True) + for q in 'train', 'val': + (dir / p / q).mkdir(parents=True, exist_ok=True) + + # Download + url = "https://dorc.ks3-cn-beijing.ksyun.com/data-set/2020Objects365%E6%95%B0%E6%8D%AE%E9%9B%86/train/" + download([url + 'zhiyuan_objv2_train.tar.gz'], dir=dir) # annotations json + download([url + f for f in [f'patch{i}.tar.gz' for i in range(51)]], dir=dir / 'images' / 'train', + curl=True, delete=False, threads=8) + + # Labels + coco = COCO(dir / 'zhiyuan_objv2_train.json') + names = [x["name"] for x in coco.loadCats(coco.getCatIds())] + for cid, cat in enumerate(names): + catIds = coco.getCatIds(catNms=[cat]) + imgIds = coco.getImgIds(catIds=catIds) + for im in tqdm(coco.loadImgs(imgIds), desc=f'Class {cid}/{len(names)} {cat}'): + width, height = im["width"], im["height"] + path = Path(im["file_name"]) # image filename + try: + with open(dir / 'labels' / 'train' / path.with_suffix('.txt').name, 'a') as file: + annIds = coco.getAnnIds(imgIds=im["id"], catIds=catIds, iscrowd=None) + for a in coco.loadAnns(annIds): + x, y, w, h = a['bbox'] # bounding box in xywh (xy top-left corner) + x, y = x + w / 2, y + h / 2 # xy to center + file.write(f"{cid} {x / width:.5f} {y / height:.5f} {w / width:.5f} {h / height:.5f}\n") + + except Exception as e: + print(e) diff --git a/data/scripts/get_objects365.py b/data/scripts/get_objects365.py deleted file mode 100644 index d77b26d60691..000000000000 --- a/data/scripts/get_objects365.py +++ /dev/null @@ -1,45 +0,0 @@ -# Objects365 https://www.objects365.org labels JSON to YOLO script -# 1. Download Object 365 from the Object 365 website And unpack all images in datasets/object365/images -# 2. Place this file and zhiyuan_objv2_train.json file in datasets/objects365 -# 3. Execute this file from datasets/object365 path -# /datasets -# /objects365 -# /images -# /labels - - -from pycocotools.coco import COCO - -from utils.general import download, Path - -# Make Directories -dir = Path('../datasets/objects365') # dataset directory -for p in 'images', 'labels': - (dir / p).mkdir(parents=True, exist_ok=True) - for q in 'train', 'val': - (dir / p / q).mkdir(parents=True, exist_ok=True) - -# Download -url = "https://dorc.ks3-cn-beijing.ksyun.com/data-set/2020Objects365%E6%95%B0%E6%8D%AE%E9%9B%86/train/" -download([url + 'zhiyuan_objv2_train.tar.gz'], dir=dir) # annotations json -download([url + f for f in [f'patch{i}.tar.gz' for i in range(51)]], dir=dir / 'images' / 'train', curl=True, threads=8) - -# Labels -coco = COCO(dir / 'zhiyuan_objv2_train.json') -names = [x["name"] for x in coco.loadCats(coco.getCatIds())] -for categoryId, cat in enumerate(names): - catIds = coco.getCatIds(catNms=[cat]) - imgIds = coco.getImgIds(catIds=catIds) - for im in coco.loadImgs(imgIds): - width, height = im["width"], im["height"] - path = Path(im["file_name"]) # image filename - try: - with open(dir / 'labels' / 'train' / path.with_suffix('.txt').name, 'a') as file: - annIds = coco.getAnnIds(imgIds=im["id"], catIds=catIds, iscrowd=None) - for a in coco.loadAnns(annIds): - x, y, w, h = a['bbox'] # bounding box in xywh (xy top-left corner) - x, y = x + w / 2, y + h / 2 # xy to center - file.write(f"{categoryId} {x / width:.5f} {y / height:.5f} {w / width:.5f} {h / height:.5f}\n") - - except Exception as e: - print(e) diff --git a/utils/general.py b/utils/general.py index fd20a7e677e0..4de240a03c83 100755 --- a/utils/general.py +++ b/utils/general.py @@ -193,7 +193,7 @@ def check_dataset(dict): raise Exception('Dataset not found.') -def download(url, dir='.', unzip=True, curl=False, threads=1): +def download(url, dir='.', unzip=True, delete=True, curl=False, threads=1): # Multi-threaded file download and unzip function def download_one(url, dir): # Download 1 file @@ -207,9 +207,12 @@ def download_one(url, dir): if unzip and f.suffix in ('.zip', '.gz'): print(f'Unzipping {f}...') if f.suffix == '.zip': - os.system(f'unzip -qo {f} -d {dir} && rm {f}') # unzip -quiet -overwrite + s = f'unzip -qo {f} -d {dir} && rm {f}' # unzip -quiet -overwrite elif f.suffix == '.gz': - os.system(f'tar xfz {f} --directory {f.parent} && rm {f}') # unzip + s = f'tar xfz {f} --directory {f.parent}' # unzip + if delete: # delete zip file after unzip + s += f' && rm {f}' + os.system(s) dir = Path(dir) dir.mkdir(parents=True, exist_ok=True) # make directory From 8cab907f60b0debdde5107fae985d8b444db72af Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 4 May 2021 23:37:31 +0200 Subject: [PATCH 0186/1976] download() ThreadPool update (#3027) * download() ThreadPool update * update train image count * cid + 1 --- data/objects365.yaml | 11 ++++++++--- utils/general.py | 5 ++++- 2 files changed, 12 insertions(+), 4 deletions(-) diff --git a/data/objects365.yaml b/data/objects365.yaml index 5d19ab5cabbb..eb99995903cf 100644 --- a/data/objects365.yaml +++ b/data/objects365.yaml @@ -6,7 +6,7 @@ # /yolov5 # train and val data as 1) directory: path/images/, 2) file: path/images.txt, or 3) list: [path1/images/, path2/images/] -train: ../datasets/objects365/images/train # 1.7 Million images +train: ../datasets/objects365/images/train # 1742289 images val: ../datasets/objects365/images/val # 5570 images # number of classes @@ -72,17 +72,22 @@ download: | # Download url = "https://dorc.ks3-cn-beijing.ksyun.com/data-set/2020Objects365%E6%95%B0%E6%8D%AE%E9%9B%86/train/" - download([url + 'zhiyuan_objv2_train.tar.gz'], dir=dir) # annotations json + download([url + 'zhiyuan_objv2_train.tar.gz'], dir=dir, delete=False) # annotations json download([url + f for f in [f'patch{i}.tar.gz' for i in range(51)]], dir=dir / 'images' / 'train', curl=True, delete=False, threads=8) + # Move + train = dir / 'images' / 'train' + for f in tqdm(train.rglob('*.jpg'), desc=f'Moving images'): + f.rename(train / f.name) # move to /images/train + # Labels coco = COCO(dir / 'zhiyuan_objv2_train.json') names = [x["name"] for x in coco.loadCats(coco.getCatIds())] for cid, cat in enumerate(names): catIds = coco.getCatIds(catNms=[cat]) imgIds = coco.getImgIds(catIds=catIds) - for im in tqdm(coco.loadImgs(imgIds), desc=f'Class {cid}/{len(names)} {cat}'): + for im in tqdm(coco.loadImgs(imgIds), desc=f'Class {cid + 1}/{len(names)} {cat}'): width, height = im["width"], im["height"] path = Path(im["file_name"]) # image filename try: diff --git a/utils/general.py b/utils/general.py index 4de240a03c83..c47432b4529c 100755 --- a/utils/general.py +++ b/utils/general.py @@ -217,7 +217,10 @@ def download_one(url, dir): dir = Path(dir) dir.mkdir(parents=True, exist_ok=True) # make directory if threads > 1: - ThreadPool(threads).imap(lambda x: download_one(*x), zip(url, repeat(dir))) # multi-threaded + pool = ThreadPool(threads) + pool.imap(lambda x: download_one(*x), zip(url, repeat(dir))) # multi-threaded + pool.close() + pool.join() else: for u in tuple(url) if isinstance(url, str) else url: download_one(u, dir) From 4c7d686d7e524736c69b193441a64376140ba085 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 5 May 2021 00:16:35 +0200 Subject: [PATCH 0187/1976] FROM nvcr.io/nvidia/pytorch:21.04-py3 (#3030) --- Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Dockerfile b/Dockerfile index b47e5bbff194..423c9c5b37f9 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,5 +1,5 @@ # Start FROM Nvidia PyTorch image https://ngc.nvidia.com/catalog/containers/nvidia:pytorch -FROM nvcr.io/nvidia/pytorch:21.03-py3 +FROM nvcr.io/nvidia/pytorch:21.04-py3 # Install linux packages RUN apt update && apt install -y zip htop screen libgl1-mesa-glx From 85b6fdda0f730c6b7b79744b0e5d83fc8e07a900 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 5 May 2021 01:00:12 +0200 Subject: [PATCH 0188/1976] FROM nvcr.io/nvidia/pytorch:21.03-py3 Issues with 21.04 --- Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Dockerfile b/Dockerfile index 423c9c5b37f9..b47e5bbff194 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,5 +1,5 @@ # Start FROM Nvidia PyTorch image https://ngc.nvidia.com/catalog/containers/nvidia:pytorch -FROM nvcr.io/nvidia/pytorch:21.04-py3 +FROM nvcr.io/nvidia/pytorch:21.03-py3 # Install linux packages RUN apt update && apt install -y zip htop screen libgl1-mesa-glx From b18ca31ff5412d30ee56eb9c4adbe731f9d2148e Mon Sep 17 00:00:00 2001 From: Ayush Chaurasia Date: Wed, 5 May 2021 17:58:19 +0530 Subject: [PATCH 0189/1976] use check_file for --data (#3035) --- utils/wandb_logging/wandb_utils.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/utils/wandb_logging/wandb_utils.py b/utils/wandb_logging/wandb_utils.py index 928f577fd460..4a676551fdef 100644 --- a/utils/wandb_logging/wandb_utils.py +++ b/utils/wandb_logging/wandb_utils.py @@ -9,7 +9,7 @@ sys.path.append(str(Path(__file__).parent.parent.parent)) # add utils/ to path from utils.datasets import LoadImagesAndLabels from utils.datasets import img2label_paths -from utils.general import colorstr, xywh2xyxy, check_dataset +from utils.general import colorstr, xywh2xyxy, check_dataset, check_file try: import wandb @@ -54,7 +54,7 @@ def check_wandb_resume(opt): def process_wandb_config_ddp_mode(opt): - with open(opt.data) as f: + with open(check_file(opt.data)) as f: data_dict = yaml.safe_load(f) # data dict train_dir, val_dir = None, None if isinstance(data_dict['train'], str) and data_dict['train'].startswith(WANDB_ARTIFACT_PREFIX): @@ -115,7 +115,7 @@ def __init__(self, opt, name, run_id, data_dict, job_type='Training'): def check_and_upload_dataset(self, opt): assert wandb, 'Install wandb to upload dataset' check_dataset(self.data_dict) - config_path = self.log_dataset_artifact(opt.data, + config_path = self.log_dataset_artifact(check_file(opt.data), opt.single_cls, 'YOLOv5' if opt.project == 'runs/train' else Path(opt.project).stem) print("Created dataset config file ", config_path) From b8be76f915207ef0759bfb0f1c0707c79877b763 Mon Sep 17 00:00:00 2001 From: Jiacong Fang Date: Thu, 6 May 2021 19:44:14 +0800 Subject: [PATCH 0190/1976] Add get_coco128.sh for downloading the coco128 dataset (#3047) * Add get_coco128.sh * Update get_coco128.sh Co-authored-by: Glenn Jocher --- data/scripts/get_coco128.sh | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) create mode 100644 data/scripts/get_coco128.sh diff --git a/data/scripts/get_coco128.sh b/data/scripts/get_coco128.sh new file mode 100644 index 000000000000..395043b5b2dc --- /dev/null +++ b/data/scripts/get_coco128.sh @@ -0,0 +1,17 @@ +#!/bin/bash +# COCO128 dataset https://www.kaggle.com/ultralytics/coco128 +# Download command: bash data/scripts/get_coco128.sh +# Train command: python train.py --data coco128.yaml +# Default dataset location is next to /yolov5: +# /parent_folder +# /coco128 +# /yolov5 + +# Download/unzip images and labels +d='../' # unzip directory +url=https://github.com/ultralytics/yolov5/releases/download/v1.0/ +f='coco128.zip' # or 'coco2017labels-segments.zip', 68 MB +echo 'Downloading' $url$f ' ...' +curl -L $url$f -o $f && unzip -q $f -d $d && rm $f & # download, unzip, remove in background + +wait # finish background tasks From 3ef3a95cfa536f3977676b6ed18f7bebf391fa2c Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 6 May 2021 17:23:33 +0200 Subject: [PATCH 0191/1976] Do not optimize CoreML TorchScript model (#3055) --- models/export.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/models/export.py b/models/export.py index 6a9f1df57e8f..b6058158fa97 100644 --- a/models/export.py +++ b/models/export.py @@ -75,8 +75,7 @@ print(f'\n{prefix} starting export with torch {torch.__version__}...') f = opt.weights.replace('.pt', '.torchscript.pt') # filename ts = torch.jit.trace(model, img, strict=False) - ts = optimize_for_mobile(ts) # https://pytorch.org/tutorials/recipes/script_optimized.html - ts.save(f) + optimize_for_mobile(ts).save(f) # https://pytorch.org/tutorials/recipes/script_optimized.html print(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') except Exception as e: print(f'{prefix} export failure: {e}') From f2de1ad2aa03542077143563eb32c4096b08a472 Mon Sep 17 00:00:00 2001 From: kepler62f Date: Fri, 7 May 2021 18:28:55 +0800 Subject: [PATCH 0192/1976] Comment fix (#3058) Co-authored-by: kepler62f <> --- test.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/test.py b/test.py index 0093b8c09f54..f8936d3b4f9d 100644 --- a/test.py +++ b/test.py @@ -188,8 +188,8 @@ def test(data, # Per target class for cls in torch.unique(tcls_tensor): - ti = (cls == tcls_tensor).nonzero(as_tuple=False).view(-1) # prediction indices - pi = (cls == pred[:, 5]).nonzero(as_tuple=False).view(-1) # target indices + ti = (cls == tcls_tensor).nonzero(as_tuple=False).view(-1) # target indices + pi = (cls == pred[:, 5]).nonzero(as_tuple=False).view(-1) # prediction indices # Search for detections if pi.shape[0]: From e97d129db40d3b3592fca7d1c8a6fe39133e7e47 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 7 May 2021 13:10:07 +0200 Subject: [PATCH 0193/1976] Update export.py with --train mode argument (#3066) --- models/export.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/models/export.py b/models/export.py index b6058158fa97..acf83622aa75 100644 --- a/models/export.py +++ b/models/export.py @@ -29,6 +29,7 @@ parser.add_argument('--device', default='cpu', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') parser.add_argument('--half', action='store_true', help='FP16 half-precision export') parser.add_argument('--inplace', action='store_true', help='set YOLOv5 Detect() inplace=True') + parser.add_argument('--train', action='store_true', help='model.train() mode') parser.add_argument('--dynamic', action='store_true', help='dynamic ONNX axes') # ONNX-only parser.add_argument('--simplify', action='store_true', help='simplify ONNX model') # ONNX-only opt = parser.parse_args() @@ -53,6 +54,8 @@ # Update model if opt.half: img, model = img.half(), model.half() # to FP16 + if opt.train: + model.train() # training mode (no grid construction in Detect layer) for k, m in model.named_modules(): m._non_persistent_buffers_set = set() # pytorch 1.6.0 compatibility if isinstance(m, models.common.Conv): # assign export-friendly activations From d2a17289c99ad45cb901ea81db5932fa0ca9b711 Mon Sep 17 00:00:00 2001 From: Ayush Chaurasia Date: Fri, 7 May 2021 23:23:56 +0530 Subject: [PATCH 0194/1976] Explicitly convert artifact path to posix_path (#3067) * Explicitly convert artifact path to posix_path * Remove redudant str() casting --- utils/wandb_logging/wandb_utils.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/utils/wandb_logging/wandb_utils.py b/utils/wandb_logging/wandb_utils.py index 4a676551fdef..72a11018b429 100644 --- a/utils/wandb_logging/wandb_utils.py +++ b/utils/wandb_logging/wandb_utils.py @@ -158,7 +158,8 @@ def setup_training(self, opt, data_dict): def download_dataset_artifact(self, path, alias): if isinstance(path, str) and path.startswith(WANDB_ARTIFACT_PREFIX): - dataset_artifact = wandb.use_artifact(remove_prefix(path, WANDB_ARTIFACT_PREFIX) + ":" + alias) + artifact_path = Path(remove_prefix(path, WANDB_ARTIFACT_PREFIX) + ":" + alias) + dataset_artifact = wandb.use_artifact(artifact_path.as_posix()) assert dataset_artifact is not None, "'Error: W&B dataset artifact doesn\'t exist'" datadir = dataset_artifact.download() return datadir, dataset_artifact From 251aeafcb16ebc4c9d9a6641b3677aaac2f2d2cb Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 8 May 2021 19:06:12 +0200 Subject: [PATCH 0195/1976] Update P5 + P6 model ensembling (#3082) --- models/experimental.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/models/experimental.py b/models/experimental.py index 85a22d42fa54..afa787907104 100644 --- a/models/experimental.py +++ b/models/experimental.py @@ -130,7 +130,8 @@ def attempt_load(weights, map_location=None, inplace=True): if len(model) == 1: return model[-1] # return model else: - print('Ensemble created with %s\n' % weights) - for k in ['names', 'stride']: + print(f'Ensemble created with {weights}\n') + for k in ['names']: setattr(model, k, getattr(model[-1], k)) + model.stride = model[torch.argmax(torch.tensor([m.stride.max() for m in model])).int()].stride # max stride return model # return ensemble From 91547edec1a33121b3607e8c946e051514182b4f Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 9 May 2021 13:01:58 +0200 Subject: [PATCH 0196/1976] Update detect.py (#3087) * Update detect.py * Update detect.py --- detect.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/detect.py b/detect.py index af1660b6608f..0222a91253fa 100644 --- a/detect.py +++ b/detect.py @@ -5,7 +5,6 @@ import cv2 import torch import torch.backends.cudnn as cudnn -from numpy import random from models.experimental import attempt_load from utils.datasets import LoadStreams, LoadImages @@ -79,7 +78,7 @@ def detect(opt): # Process detections for i, det in enumerate(pred): # detections per image if webcam: # batch_size >= 1 - p, s, im0, frame = path[i], '%g: ' % i, im0s[i].copy(), dataset.count + p, s, im0, frame = path[i], f'{i}: ', im0s[i].copy(), dataset.count else: p, s, im0, frame = path, '', im0s.copy(), getattr(dataset, 'frame', 0) @@ -108,7 +107,6 @@ def detect(opt): if save_img or opt.save_crop or view_img: # Add bbox to image c = int(cls) # integer class label = None if opt.hide_labels else (names[c] if opt.hide_conf else f'{names[c]} {conf:.2f}') - plot_one_box(xyxy, im0, label=label, color=colors(c, True), line_thickness=opt.line_thickness) if opt.save_crop: save_one_box(xyxy, im0s, file=save_dir / 'crops' / names[c] / f'{p.stem}.jpg', BGR=True) From 57b0d3a60593cb32530ee84c6c51189098b8bf74 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 9 May 2021 15:33:38 +0200 Subject: [PATCH 0197/1976] Add check_python() (#3088) * Add check_python() Checks python version against minimum version of 3.7.0. * remove packaging dependency * refactor import --- utils/general.py | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/utils/general.py b/utils/general.py index c47432b4529c..a4c745d1dcaf 100755 --- a/utils/general.py +++ b/utils/general.py @@ -16,6 +16,7 @@ import cv2 import numpy as np import pandas as pd +import pkg_resources as pkg import torch import torchvision import yaml @@ -107,10 +108,19 @@ def check_git_status(): print(e) +def check_python(minimum='3.7.0', required=True): + # Check current python version vs. required python version + current = platform.python_version() + result = pkg.parse_version(current) >= pkg.parse_version(minimum) + if required: + assert result, f'Python {minimum} required by YOLOv5, but Python {current} is currently installed' + return result + + def check_requirements(requirements='requirements.txt', exclude=()): # Check installed dependencies meet requirements (pass *.txt file or list of packages) - import pkg_resources as pkg prefix = colorstr('red', 'bold', 'requirements:') + check_python() # check python version if isinstance(requirements, (str, Path)): # requirements.txt file file = Path(requirements) if not file.exists(): From 525f4f86a92a8037aec9af34460ffa2b83f1d404 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 10 May 2021 12:13:44 +0200 Subject: [PATCH 0198/1976] Add --optimize argument (#3093) Fix for c++ runtime errors in https://github.com/ultralytics/yolov5/issues/2973 --- models/export.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/models/export.py b/models/export.py index acf83622aa75..3415088c787a 100644 --- a/models/export.py +++ b/models/export.py @@ -30,6 +30,7 @@ parser.add_argument('--half', action='store_true', help='FP16 half-precision export') parser.add_argument('--inplace', action='store_true', help='set YOLOv5 Detect() inplace=True') parser.add_argument('--train', action='store_true', help='model.train() mode') + parser.add_argument('--optimize', action='store_true', help='optimize TorchScript for mobile') # TorchScript-only parser.add_argument('--dynamic', action='store_true', help='dynamic ONNX axes') # ONNX-only parser.add_argument('--simplify', action='store_true', help='simplify ONNX model') # ONNX-only opt = parser.parse_args() @@ -78,7 +79,7 @@ print(f'\n{prefix} starting export with torch {torch.__version__}...') f = opt.weights.replace('.pt', '.torchscript.pt') # filename ts = torch.jit.trace(model, img, strict=False) - optimize_for_mobile(ts).save(f) # https://pytorch.org/tutorials/recipes/script_optimized.html + (optimize_for_mobile(ts) if opt.optimize else ts).save(f) print(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') except Exception as e: print(f'{prefix} export failure: {e}') @@ -123,7 +124,6 @@ import coremltools as ct print(f'{prefix} starting export with coremltools {ct.__version__}...') - # convert model from torchscript and apply pixel scaling as per detect.py model = ct.convert(ts, inputs=[ct.ImageType(name='image', shape=img.shape, scale=1 / 255.0, bias=[0, 0, 0])]) f = opt.weights.replace('.pt', '.mlmodel') # filename model.save(f) From 60fe54449d67827132c56d65db722525fe78f4c9 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 10 May 2021 15:26:23 +0200 Subject: [PATCH 0199/1976] Update train.py (#3099) --- train.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/train.py b/train.py index 5bee57fe0bbe..5330e4ac0ecd 100644 --- a/train.py +++ b/train.py @@ -367,8 +367,6 @@ def train(hyp, opt, device, tb_writer=None): # Write with open(results_file, 'a') as f: f.write(s + '%10.4g' * 7 % results + '\n') # append metrics, val_loss - if len(opt.name) and opt.bucket: - os.system('gsutil cp %s gs://%s/results/results%s.txt' % (results_file, opt.bucket, opt.name)) # Log tags = ['train/box_loss', 'train/obj_loss', 'train/cls_loss', # train loss From d6a71ae5f27c2b75c5a61846d5a756db534757ad Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 10 May 2021 16:04:10 +0200 Subject: [PATCH 0200/1976] Update GlobalWheat2020.yaml test: # 1276 images (#3101) --- data/GlobalWheat2020.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/data/GlobalWheat2020.yaml b/data/GlobalWheat2020.yaml index b6f812d70383..f45182b43e25 100644 --- a/data/GlobalWheat2020.yaml +++ b/data/GlobalWheat2020.yaml @@ -19,7 +19,7 @@ train: # 3422 images val: # 748 images (WARNING: train set contains ethz_1) - ../datasets/GlobalWheat2020/images/ethz_1 -test: # 1276 +test: # 1276 images - ../datasets/GlobalWheat2020/images/utokyo_1 - ../datasets/GlobalWheat2020/images/utokyo_2 - ../datasets/GlobalWheat2020/images/nau_1 From 25f8ab835ef82a5664f6434934c7f40088117f65 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 10 May 2021 16:39:37 +0200 Subject: [PATCH 0201/1976] detect.py streaming source `--save-crop` bug fix (#3102) * detect.py streaming source --save-crop bug fix Possible fix for #3100. * () parenthesis --- detect.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/detect.py b/detect.py index 0222a91253fa..0001f93704bf 100644 --- a/detect.py +++ b/detect.py @@ -87,6 +87,7 @@ def detect(opt): txt_path = str(save_dir / 'labels' / p.stem) + ('' if dataset.mode == 'image' else f'_{frame}') # img.txt s += '%gx%g ' % img.shape[2:] # print string gn = torch.tensor(im0.shape)[[1, 0, 1, 0]] # normalization gain whwh + imc = im0.copy() if opt.save_crop else im0 # for opt.save_crop if len(det): # Rescale boxes from img_size to im0 size det[:, :4] = scale_coords(img.shape[2:], det[:, :4], im0.shape).round() @@ -109,7 +110,7 @@ def detect(opt): label = None if opt.hide_labels else (names[c] if opt.hide_conf else f'{names[c]} {conf:.2f}') plot_one_box(xyxy, im0, label=label, color=colors(c, True), line_thickness=opt.line_thickness) if opt.save_crop: - save_one_box(xyxy, im0s, file=save_dir / 'crops' / names[c] / f'{p.stem}.jpg', BGR=True) + save_one_box(xyxy, imc, file=save_dir / 'crops' / names[c] / f'{p.stem}.jpg', BGR=True) # Print time (inference + NMS) print(f'{s}Done. ({t2 - t1:.3f}s)') From abfcf9eb79877971acd238cafe6149711c5056ad Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 10 May 2021 17:07:16 +0200 Subject: [PATCH 0202/1976] Replace print() with logging.info() in trainloader (#3103) Might indirectly help #3095 by providing better visibility on source of corruption. --- utils/datasets.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/utils/datasets.py b/utils/datasets.py index 43e4e5973a25..f380b20b97d4 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -490,14 +490,14 @@ def cache_labels(self, path=Path('./labels.cache'), prefix=''): x[im_file] = [l, shape, segments] except Exception as e: nc += 1 - print(f'{prefix}WARNING: Ignoring corrupted image and/or label {im_file}: {e}') + logging.info(f'{prefix}WARNING: Ignoring corrupted image and/or label {im_file}: {e}') pbar.desc = f"{prefix}Scanning '{path.parent / path.stem}' images and labels... " \ f"{nf} found, {nm} missing, {ne} empty, {nc} corrupted" pbar.close() if nf == 0: - print(f'{prefix}WARNING: No labels found in {path}. See {help_url}') + logging.info(f'{prefix}WARNING: No labels found in {path}. See {help_url}') x['hash'] = get_hash(self.label_files + self.img_files) x['results'] = nf, nm, ne, nc, i + 1 From 52c05707d6ef02ed34c154b84e0f6a5d1f36ae37 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 12 May 2021 17:41:11 +0200 Subject: [PATCH 0203/1976] Update Colors() (#3046) * Update Colors() * update colors * update colors --- utils/plots.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/utils/plots.py b/utils/plots.py index e5a2d99e0abd..cb180c2a928e 100644 --- a/utils/plots.py +++ b/utils/plots.py @@ -28,7 +28,10 @@ class Colors: # Ultralytics color palette https://ultralytics.com/ def __init__(self): - self.palette = [self.hex2rgb(c) for c in matplotlib.colors.TABLEAU_COLORS.values()] + # hex = matplotlib.colors.TABLEAU_COLORS.values() + hex = ('FF3838', 'FF9D97', 'FF701F', 'FFB21D', 'CFD231', '48F90A', '92CC17', '3DDB86', '1A9334', '00D4BB', + '2C99A8', '00C2FF', '344593', '6473FF', '0018EC', '8438FF', '520085', 'CB38FF', 'FF95C8', 'FF37C7') + self.palette = [self.hex2rgb('#' + c) for c in hex] self.n = len(self.palette) def __call__(self, i, bgr=False): From c1c7eb023f4b55e063efc1e450928adb59189dd1 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 12 May 2021 18:23:50 +0200 Subject: [PATCH 0204/1976] Update JSON response (#3139) --- utils/flask_rest_api/README.md | 65 +++++++++++++++++++++------------- 1 file changed, 41 insertions(+), 24 deletions(-) diff --git a/utils/flask_rest_api/README.md b/utils/flask_rest_api/README.md index 0cdc51be692d..324c2416dcd9 100644 --- a/utils/flask_rest_api/README.md +++ b/utils/flask_rest_api/README.md @@ -1,5 +1,5 @@ # Flask REST API -[REST](https://en.wikipedia.org/wiki/Representational_state_transfer) [API](https://en.wikipedia.org/wiki/API)s are commonly used to expose Machine Learning (ML) models to other services. This folder contains an example REST API created using Flask to expose the `yolov5s` model from [PyTorch Hub](https://pytorch.org/hub/ultralytics_yolov5/). +[REST](https://en.wikipedia.org/wiki/Representational_state_transfer) [API](https://en.wikipedia.org/wiki/API)s are commonly used to expose Machine Learning (ML) models to other services. This folder contains an example REST API created using Flask to expose the YOLOv5s model from [PyTorch Hub](https://pytorch.org/hub/ultralytics_yolov5/). ## Requirements @@ -22,30 +22,47 @@ Then use [curl](https://curl.se/) to perform a request: $ curl -X POST -F image=@zidane.jpg 'http://localhost:5000/v1/object-detection/yolov5s'` ``` -The model inference results are returned: +The model inference results are returned as a JSON response: -```shell -[{'class': 0, - 'confidence': 0.8197850585, - 'name': 'person', - 'xmax': 1159.1403808594, - 'xmin': 750.912902832, - 'ymax': 711.2583007812, - 'ymin': 44.0350036621}, - {'class': 0, - 'confidence': 0.5667674541, - 'name': 'person', - 'xmax': 1065.5523681641, - 'xmin': 116.0448303223, - 'ymax': 713.8904418945, - 'ymin': 198.4603881836}, - {'class': 27, - 'confidence': 0.5661227107, - 'name': 'tie', - 'xmax': 516.7975463867, - 'xmin': 416.6880187988, - 'ymax': 717.0524902344, - 'ymin': 429.2020568848}] +```json +[ + { + "class": 0, + "confidence": 0.8900438547, + "height": 0.9318675399, + "name": "person", + "width": 0.3264600933, + "xcenter": 0.7438579798, + "ycenter": 0.5207948685 + }, + { + "class": 0, + "confidence": 0.8440024257, + "height": 0.7155083418, + "name": "person", + "width": 0.6546785235, + "xcenter": 0.427829951, + "ycenter": 0.6334488392 + }, + { + "class": 27, + "confidence": 0.3771208823, + "height": 0.3902671337, + "name": "tie", + "width": 0.0696444362, + "xcenter": 0.3675483763, + "ycenter": 0.7991207838 + }, + { + "class": 27, + "confidence": 0.3527112305, + "height": 0.1540903747, + "name": "tie", + "width": 0.0336618312, + "xcenter": 0.7814827561, + "ycenter": 0.5065554976 + } +] ``` An example python script to perform inference using [requests](https://docs.python-requests.org/en/master/) is given in `example_request.py` From 7918eed040e49a179228fe25043e95e8f2f82ce8 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 12 May 2021 18:30:31 +0200 Subject: [PATCH 0205/1976] Update https://ultralytics.com/images/zidane.jpg (#3140) --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index d98bd7bfa7da..ba4424fa550c 100755 --- a/README.md +++ b/README.md @@ -130,7 +130,7 @@ import torch model = torch.hub.load('ultralytics/yolov5', 'yolov5s') # Image -img = 'https://github.com/ultralytics/yolov5/raw/master/data/images/zidane.jpg' +img = 'https://ultralytics.com/images/zidane.jpg' # Inference results = model(img) From bd6f6a784a5d70a2da001bf4393a2a44f1c4d608 Mon Sep 17 00:00:00 2001 From: KC-Zhang Date: Wed, 12 May 2021 12:50:35 -0400 Subject: [PATCH 0206/1976] Add yolov5/__init__.py (#3127) --- __init__.py | 0 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 __init__.py diff --git a/__init__.py b/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 From d9b4e6b748e370d9aaf06825e3ed1ddddf702cd7 Mon Sep 17 00:00:00 2001 From: Cristi Fati Date: Wed, 12 May 2021 20:46:32 +0300 Subject: [PATCH 0207/1976] Add `--include torchscript onnx coreml` argument (#3137) * Allow users to skip exporting in formats that they don't care about * Correct comments * Update export.py renamed --skip-format to --exclude * Switched format from exclude to include (as instructed by @glenn-jocher) * cleanup Co-authored-by: Glenn Jocher --- models/export.py | 116 +++++++++++++++++++++++++---------------------- 1 file changed, 61 insertions(+), 55 deletions(-) diff --git a/models/export.py b/models/export.py index 3415088c787a..662509b3ea78 100644 --- a/models/export.py +++ b/models/export.py @@ -1,7 +1,7 @@ -"""Exports a YOLOv5 *.pt model to ONNX and TorchScript formats +"""Exports a YOLOv5 *.pt model to TorchScript, ONNX, CoreML formats Usage: - $ export PYTHONPATH="$PWD" && python models/export.py --weights yolov5s.pt --img 640 --batch 1 + $ python path/to/models/export.py --weights yolov5s.pt --img 640 --batch 1 """ import argparse @@ -27,6 +27,7 @@ parser.add_argument('--img-size', nargs='+', type=int, default=[640, 640], help='image size') # height, width parser.add_argument('--batch-size', type=int, default=1, help='batch size') parser.add_argument('--device', default='cpu', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') + parser.add_argument('--include', nargs='+', default=['torchscript', 'onnx', 'coreml'], help='include formats') parser.add_argument('--half', action='store_true', help='FP16 half-precision export') parser.add_argument('--inplace', action='store_true', help='set YOLOv5 Detect() inplace=True') parser.add_argument('--train', action='store_true', help='model.train() mode') @@ -35,6 +36,7 @@ parser.add_argument('--simplify', action='store_true', help='simplify ONNX model') # ONNX-only opt = parser.parse_args() opt.img_size *= 2 if len(opt.img_size) == 1 else 1 # expand + opt.include = [x.lower() for x in opt.include] print(opt) set_logging() t = time.time() @@ -47,7 +49,7 @@ # Checks gs = int(max(model.stride)) # grid size (max stride) opt.img_size = [check_img_size(x, gs) for x in opt.img_size] # verify img_size are gs-multiples - assert not (opt.device.lower() == "cpu" and opt.half), '--half only compatible with GPU export, i.e. use --device 0' + assert not (opt.device.lower() == 'cpu' and opt.half), '--half only compatible with GPU export, i.e. use --device 0' # Input img = torch.zeros(opt.batch_size, 3, *opt.img_size).to(device) # image size(1,3,320,192) iDetection @@ -74,62 +76,66 @@ print(f"\n{colorstr('PyTorch:')} starting from {opt.weights} ({file_size(opt.weights):.1f} MB)") # TorchScript export ----------------------------------------------------------------------------------------------- - prefix = colorstr('TorchScript:') - try: - print(f'\n{prefix} starting export with torch {torch.__version__}...') - f = opt.weights.replace('.pt', '.torchscript.pt') # filename - ts = torch.jit.trace(model, img, strict=False) - (optimize_for_mobile(ts) if opt.optimize else ts).save(f) - print(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') - except Exception as e: - print(f'{prefix} export failure: {e}') + if 'torchscript' in opt.include or 'coreml' in opt.include: + prefix = colorstr('TorchScript:') + try: + print(f'\n{prefix} starting export with torch {torch.__version__}...') + f = opt.weights.replace('.pt', '.torchscript.pt') # filename + ts = torch.jit.trace(model, img, strict=False) + (optimize_for_mobile(ts) if opt.optimize else ts).save(f) + print(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') + except Exception as e: + print(f'{prefix} export failure: {e}') # ONNX export ------------------------------------------------------------------------------------------------------ - prefix = colorstr('ONNX:') - try: - import onnx - - print(f'{prefix} starting export with onnx {onnx.__version__}...') - f = opt.weights.replace('.pt', '.onnx') # filename - torch.onnx.export(model, img, f, verbose=False, opset_version=12, input_names=['images'], - dynamic_axes={'images': {0: 'batch', 2: 'height', 3: 'width'}, # size(1,3,640,640) - 'output': {0: 'batch', 2: 'y', 3: 'x'}} if opt.dynamic else None) - - # Checks - model_onnx = onnx.load(f) # load onnx model - onnx.checker.check_model(model_onnx) # check onnx model - # print(onnx.helper.printable_graph(model_onnx.graph)) # print - - # Simplify - if opt.simplify: - try: - check_requirements(['onnx-simplifier']) - import onnxsim - - print(f'{prefix} simplifying with onnx-simplifier {onnxsim.__version__}...') - model_onnx, check = onnxsim.simplify(model_onnx, - dynamic_input_shape=opt.dynamic, - input_shapes={'images': list(img.shape)} if opt.dynamic else None) - assert check, 'assert check failed' - onnx.save(model_onnx, f) - except Exception as e: - print(f'{prefix} simplifier failure: {e}') - print(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') - except Exception as e: - print(f'{prefix} export failure: {e}') + if 'onnx' in opt.include: + prefix = colorstr('ONNX:') + try: + import onnx + + print(f'{prefix} starting export with onnx {onnx.__version__}...') + f = opt.weights.replace('.pt', '.onnx') # filename + torch.onnx.export(model, img, f, verbose=False, opset_version=12, input_names=['images'], + dynamic_axes={'images': {0: 'batch', 2: 'height', 3: 'width'}, # size(1,3,640,640) + 'output': {0: 'batch', 2: 'y', 3: 'x'}} if opt.dynamic else None) + + # Checks + model_onnx = onnx.load(f) # load onnx model + onnx.checker.check_model(model_onnx) # check onnx model + # print(onnx.helper.printable_graph(model_onnx.graph)) # print + + # Simplify + if opt.simplify: + try: + check_requirements(['onnx-simplifier']) + import onnxsim + + print(f'{prefix} simplifying with onnx-simplifier {onnxsim.__version__}...') + model_onnx, check = onnxsim.simplify( + model_onnx, + dynamic_input_shape=opt.dynamic, + input_shapes={'images': list(img.shape)} if opt.dynamic else None) + assert check, 'assert check failed' + onnx.save(model_onnx, f) + except Exception as e: + print(f'{prefix} simplifier failure: {e}') + print(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') + except Exception as e: + print(f'{prefix} export failure: {e}') # CoreML export ---------------------------------------------------------------------------------------------------- - prefix = colorstr('CoreML:') - try: - import coremltools as ct - - print(f'{prefix} starting export with coremltools {ct.__version__}...') - model = ct.convert(ts, inputs=[ct.ImageType(name='image', shape=img.shape, scale=1 / 255.0, bias=[0, 0, 0])]) - f = opt.weights.replace('.pt', '.mlmodel') # filename - model.save(f) - print(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') - except Exception as e: - print(f'{prefix} export failure: {e}') + if 'coreml' in opt.include: + prefix = colorstr('CoreML:') + try: + import coremltools as ct + + print(f'{prefix} starting export with coremltools {ct.__version__}...') + model = ct.convert(ts, inputs=[ct.ImageType('image', shape=img.shape, scale=1 / 255.0, bias=[0, 0, 0])]) + f = opt.weights.replace('.pt', '.mlmodel') # filename + model.save(f) + print(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') + except Exception as e: + print(f'{prefix} export failure: {e}') # Finish print(f'\nExport complete ({time.time() - t:.2f}s). Visualize with https://github.com/lutzroeder/netron.') From f12cef85a3f97777d4394f4d1fc3bd0fb6bac61c Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 12 May 2021 19:49:12 +0200 Subject: [PATCH 0208/1976] TorchScript, ONNX, CoreML Export tutorial title (#3142) --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index ba4424fa550c..b25c6fca983c 100755 --- a/README.md +++ b/README.md @@ -72,7 +72,7 @@ $ pip install -r requirements.txt * [Supervisely Ecosystem](https://github.com/ultralytics/yolov5/issues/2518)  🌟 NEW * [Multi-GPU Training](https://github.com/ultralytics/yolov5/issues/475) * [PyTorch Hub](https://github.com/ultralytics/yolov5/issues/36)  ⭐ NEW -* [ONNX and TorchScript Export](https://github.com/ultralytics/yolov5/issues/251) +* [TorchScript, ONNX, CoreML Export](https://github.com/ultralytics/yolov5/issues/251) 🚀 * [Test-Time Augmentation (TTA)](https://github.com/ultralytics/yolov5/issues/303) * [Model Ensembling](https://github.com/ultralytics/yolov5/issues/318) * [Model Pruning/Sparsity](https://github.com/ultralytics/yolov5/issues/304) From b098600763b769b07a05001adf367dea6d7231fc Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 12 May 2021 19:56:12 +0200 Subject: [PATCH 0209/1976] Update requirements.txt `onnx>=1.9.0` (#3143) --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index fd187eb56cfe..8b14f7421fc6 100755 --- a/requirements.txt +++ b/requirements.txt @@ -21,7 +21,7 @@ pandas # export -------------------------------------- # coremltools>=4.1 -# onnx>=1.8.1 +# onnx>=1.9.0 # scikit-learn==0.19.2 # for coreml quantization # extras -------------------------------------- From 193526695140662a9b2089ba30031cbc9f7104a4 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 12 May 2021 20:18:32 +0200 Subject: [PATCH 0210/1976] Scope imports for torch.hub.list() improvement (#3144) --- hubconf.py | 29 +++++++++++++++-------------- 1 file changed, 15 insertions(+), 14 deletions(-) diff --git a/hubconf.py b/hubconf.py index 898b63b5d0b1..1876441d8a89 100644 --- a/hubconf.py +++ b/hubconf.py @@ -9,16 +9,13 @@ import torch -from models.yolo import Model, attempt_load from utils.general import check_requirements, set_logging -from utils.google_utils import attempt_download -from utils.torch_utils import select_device dependencies = ['torch', 'yaml'] check_requirements(Path(__file__).parent / 'requirements.txt', exclude=('tensorboard', 'pycocotools', 'thop')) -def create(name, pretrained=True, channels=3, classes=80, autoshape=True, verbose=True): +def _create(name, pretrained=True, channels=3, classes=80, autoshape=True, verbose=True): """Creates a specified YOLOv5 model Arguments: @@ -32,6 +29,10 @@ def create(name, pretrained=True, channels=3, classes=80, autoshape=True, verbos Returns: YOLOv5 pytorch model """ + from models.yolo import Model, attempt_load + from utils.google_utils import attempt_download + from utils.torch_utils import select_device + set_logging(verbose=verbose) fname = Path(name).with_suffix('.pt') # checkpoint filename try: @@ -62,51 +63,51 @@ def create(name, pretrained=True, channels=3, classes=80, autoshape=True, verbos def custom(path='path/to/model.pt', autoshape=True, verbose=True): # YOLOv5 custom or local model - return create(path, autoshape=autoshape, verbose=verbose) + return _create(path, autoshape=autoshape, verbose=verbose) def yolov5s(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True): # YOLOv5-small model https://github.com/ultralytics/yolov5 - return create('yolov5s', pretrained, channels, classes, autoshape, verbose) + return _create('yolov5s', pretrained, channels, classes, autoshape, verbose) def yolov5m(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True): # YOLOv5-medium model https://github.com/ultralytics/yolov5 - return create('yolov5m', pretrained, channels, classes, autoshape, verbose) + return _create('yolov5m', pretrained, channels, classes, autoshape, verbose) def yolov5l(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True): # YOLOv5-large model https://github.com/ultralytics/yolov5 - return create('yolov5l', pretrained, channels, classes, autoshape, verbose) + return _create('yolov5l', pretrained, channels, classes, autoshape, verbose) def yolov5x(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True): # YOLOv5-xlarge model https://github.com/ultralytics/yolov5 - return create('yolov5x', pretrained, channels, classes, autoshape, verbose) + return _create('yolov5x', pretrained, channels, classes, autoshape, verbose) def yolov5s6(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True): # YOLOv5-small-P6 model https://github.com/ultralytics/yolov5 - return create('yolov5s6', pretrained, channels, classes, autoshape, verbose) + return _create('yolov5s6', pretrained, channels, classes, autoshape, verbose) def yolov5m6(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True): # YOLOv5-medium-P6 model https://github.com/ultralytics/yolov5 - return create('yolov5m6', pretrained, channels, classes, autoshape, verbose) + return _create('yolov5m6', pretrained, channels, classes, autoshape, verbose) def yolov5l6(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True): # YOLOv5-large-P6 model https://github.com/ultralytics/yolov5 - return create('yolov5l6', pretrained, channels, classes, autoshape, verbose) + return _create('yolov5l6', pretrained, channels, classes, autoshape, verbose) def yolov5x6(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True): # YOLOv5-xlarge-P6 model https://github.com/ultralytics/yolov5 - return create('yolov5x6', pretrained, channels, classes, autoshape, verbose) + return _create('yolov5x6', pretrained, channels, classes, autoshape, verbose) if __name__ == '__main__': - model = create(name='yolov5s', pretrained=True, channels=3, classes=80, autoshape=True, verbose=True) # pretrained + model = _create(name='yolov5s', pretrained=True, channels=3, classes=80, autoshape=True, verbose=True) # pretrained # model = custom(path='path/to/model.pt') # custom # Verify inference From 06372b1465f5a58463bf8c32bdf65fc679c17ebf Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 12 May 2021 20:28:26 +0200 Subject: [PATCH 0211/1976] Scope all hubconf.py imports for torch.hub.list() (#3145) * Scope all hubconf.py imports for torch.hub.list() * Update hubconf.py --- hubconf.py | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/hubconf.py b/hubconf.py index 1876441d8a89..3b3dfe0e9e23 100644 --- a/hubconf.py +++ b/hubconf.py @@ -5,15 +5,8 @@ model = torch.hub.load('ultralytics/yolov5', 'yolov5s') """ -from pathlib import Path - import torch -from utils.general import check_requirements, set_logging - -dependencies = ['torch', 'yaml'] -check_requirements(Path(__file__).parent / 'requirements.txt', exclude=('tensorboard', 'pycocotools', 'thop')) - def _create(name, pretrained=True, channels=3, classes=80, autoshape=True, verbose=True): """Creates a specified YOLOv5 model @@ -29,11 +22,16 @@ def _create(name, pretrained=True, channels=3, classes=80, autoshape=True, verbo Returns: YOLOv5 pytorch model """ + from pathlib import Path + from models.yolo import Model, attempt_load + from utils.general import check_requirements, set_logging from utils.google_utils import attempt_download from utils.torch_utils import select_device + check_requirements(Path(__file__).parent / 'requirements.txt', exclude=('tensorboard', 'pycocotools', 'thop')) set_logging(verbose=verbose) + fname = Path(name).with_suffix('.pt') # checkpoint filename try: if pretrained and channels == 3 and classes == 80: From 17b0f71538d3e9990e0e6c4b5c7c48375956efa3 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 14 May 2021 17:05:26 +0200 Subject: [PATCH 0212/1976] SKU-110K CVPR2019 Dataset Auto-Download (#3167) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * SKU-110K CVPR2019 Dataset Auto-Download This PR adds the **SKU-110K CVPR2019 Dataset** https://github.com/eg4000/SKU110K_CVPR19 to the list of YOLOv5 🚀 auto-download list. - [x] [COCO](https://cocodataset.org/#home) - [x] [COCO128](https://www.kaggle.com/ultralytics/coco128) - [x] [VOC](http://host.robots.ox.ac.uk/pascal/VOC/) - [x] [Argoverse-HD](http://www.cs.cmu.edu/~mengtial/proj/streaming/) - [x] [VisDrone2019-DET](https://github.com/VisDrone/VisDrone-Dataset) - [x] [GlobalWheat-2020](http://www.global-wheat.com/) - [ ] [KITTI](https://www.kaggle.com/twaldo/kitti-object-detection) - [x] [SKU110](https://github.com/eg4000/SKU110K_CVPR19) (grocery store items) - [ ] [CityScapes](https://www.cityscapes-dataset.com/) - [x] [Objects365](https://www.objects365.org/overview.html) - [ ] [OpenImages](https://storage.googleapis.com/openimages/web/index.html) ```yaml # SKU-110K retail items dataset https://github.com/eg4000/SKU110K_CVPR19 # Train command: python train.py --data SKU-110K.yaml # Default dataset location is next to YOLOv5: # /parent_folder # /datasets/SKU-110K # /yolov5 ``` * Update SKU-110K.yaml * Update SKU-110K.yaml --- data/SKU-110K.yaml | 52 ++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 52 insertions(+) create mode 100644 data/SKU-110K.yaml diff --git a/data/SKU-110K.yaml b/data/SKU-110K.yaml new file mode 100644 index 000000000000..a8c1f25b385a --- /dev/null +++ b/data/SKU-110K.yaml @@ -0,0 +1,52 @@ +# SKU-110K retail items dataset https://github.com/eg4000/SKU110K_CVPR19 +# Train command: python train.py --data SKU-110K.yaml +# Default dataset location is next to YOLOv5: +# /parent_folder +# /datasets/SKU-110K +# /yolov5 + + +# train and val data as 1) directory: path/images/, 2) file: path/images.txt, or 3) list: [path1/images/, path2/images/] +train: ../datasets/SKU-110K/train.txt # 8219 images +val: ../datasets/SKU-110K/val.txt # 588 images +test: ../datasets/SKU-110K/test.txt # 2936 images + +# number of classes +nc: 1 + +# class names +names: [ 'object' ] + + +# download command/URL (optional) -------------------------------------------------------------------------------------- +download: | + import shutil + from tqdm import tqdm + from utils.general import np, pd, Path, download, xyxy2xywh + + # Download + datasets = Path('../datasets') # download directory + urls = ['http://trax-geometry.s3.amazonaws.com/cvpr_challenge/SKU110K_fixed.tar.gz'] + download(urls, dir=datasets, delete=False) + + # Rename directories + dir = (datasets / 'SKU-110K') + if dir.exists(): + shutil.rmtree(dir) + (datasets / 'SKU110K_fixed').rename(dir) # rename dir + (dir / 'labels').mkdir(parents=True, exist_ok=True) # create labels dir + + # Convert labels + names = 'image', 'x1', 'y1', 'x2', 'y2', 'class', 'image_width', 'image_height' # column names + for d in 'annotations_train.csv', 'annotations_val.csv', 'annotations_test.csv': + x = pd.read_csv(dir / 'annotations' / d, names=names).values # annotations + images, unique_images = x[:, 0], np.unique(x[:, 0]) + with open((dir / d).with_suffix('.txt').__str__().replace('annotations_', ''), 'w') as f: + f.writelines(f'./images/{s}\n' for s in unique_images) + for im in tqdm(unique_images, desc=f'Converting {dir / d}'): + cls = 0 # single-class dataset + with open((dir / 'labels' / im).with_suffix('.txt'), 'a') as f: + for r in x[images == im]: + w, h = r[6], r[7] # image width, height + xywh = xyxy2xywh(np.array([[r[1] / w, r[2] / h, r[3] / w, r[4] / h]]))[0] # instance + f.write(f"{cls} {xywh[0]:.5f} {xywh[1]:.5f} {xywh[2]:.5f} {xywh[3]:.5f}\n") # write label From be86c21c73171d954a9ffe6c2d3ffff1b4e04afe Mon Sep 17 00:00:00 2001 From: Yonghye Kwon Date: Sun, 16 May 2021 22:46:45 +0900 Subject: [PATCH 0213/1976] rename class autoShape -> AutoShape (#3173) * rename class autoShape -> AutoShape follow other class naming convention * rename class autoShape -> AutoShape follow other classes' naming convention * rename class autoShape -> AutoShape --- models/common.py | 6 +++--- models/yolo.py | 6 +++--- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/models/common.py b/models/common.py index 9764d4c3a6c0..689aa0f3ed7c 100644 --- a/models/common.py +++ b/models/common.py @@ -223,18 +223,18 @@ def forward(self, x): return non_max_suppression(x[0], conf_thres=self.conf, iou_thres=self.iou, classes=self.classes) -class autoShape(nn.Module): +class AutoShape(nn.Module): # input-robust model wrapper for passing cv2/np/PIL/torch inputs. Includes preprocessing, inference and NMS conf = 0.25 # NMS confidence threshold iou = 0.45 # NMS IoU threshold classes = None # (optional list) filter by class def __init__(self, model): - super(autoShape, self).__init__() + super(AutoShape, self).__init__() self.model = model.eval() def autoshape(self): - print('autoShape already enabled, skipping... ') # model already converted to model.autoshape() + print('AutoShape already enabled, skipping... ') # model already converted to model.autoshape() return self @torch.no_grad() diff --git a/models/yolo.py b/models/yolo.py index 314fd806f5e7..06b80032d3d3 100644 --- a/models/yolo.py +++ b/models/yolo.py @@ -215,9 +215,9 @@ def nms(self, mode=True): # add or remove NMS module self.model = self.model[:-1] # remove return self - def autoshape(self): # add autoShape module - logger.info('Adding autoShape... ') - m = autoShape(self) # wrap model + def autoshape(self): # add AutoShape module + logger.info('Adding AutoShape... ') + m = AutoShape(self) # wrap model copy_attr(m, self, include=('yaml', 'nc', 'hyp', 'names', 'stride'), exclude=()) # copy attributes return m From 9ab561dbfce1441f4f0e44347bb02b9fe8b70d0e Mon Sep 17 00:00:00 2001 From: Cristi Fati Date: Sun, 16 May 2021 17:13:03 +0300 Subject: [PATCH 0214/1976] Parameterize ONNX `--opset-version` (#3154) --- models/export.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/models/export.py b/models/export.py index 662509b3ea78..de4adb6a7e24 100644 --- a/models/export.py +++ b/models/export.py @@ -34,6 +34,7 @@ parser.add_argument('--optimize', action='store_true', help='optimize TorchScript for mobile') # TorchScript-only parser.add_argument('--dynamic', action='store_true', help='dynamic ONNX axes') # ONNX-only parser.add_argument('--simplify', action='store_true', help='simplify ONNX model') # ONNX-only + parser.add_argument('--opset-version', type=int, default=12, help='ONNX opset version') # ONNX-only opt = parser.parse_args() opt.img_size *= 2 if len(opt.img_size) == 1 else 1 # expand opt.include = [x.lower() for x in opt.include] @@ -95,7 +96,7 @@ print(f'{prefix} starting export with onnx {onnx.__version__}...') f = opt.weights.replace('.pt', '.onnx') # filename - torch.onnx.export(model, img, f, verbose=False, opset_version=12, input_names=['images'], + torch.onnx.export(model, img, f, verbose=False, opset_version=opt.opset_version, input_names=['images'], dynamic_axes={'images': {0: 'batch', 2: 'height', 3: 'width'}, # size(1,3,640,640) 'output': {0: 'batch', 2: 'y', 3: 'x'}} if opt.dynamic else None) From b133baa33653419a75bb8a1577082c2739e5fb14 Mon Sep 17 00:00:00 2001 From: Christoph Gerum Date: Sun, 16 May 2021 17:41:26 +0200 Subject: [PATCH 0215/1976] Add `device` argument to PyTorch Hub models (#3104) * Allow to manual selection of device for torchhub models * single line device nested torch.device(torch.device(device)) ok Co-authored-by: Glenn Jocher --- hubconf.py | 41 +++++++++++++++++++++-------------------- 1 file changed, 21 insertions(+), 20 deletions(-) diff --git a/hubconf.py b/hubconf.py index 3b3dfe0e9e23..f74e70c85a65 100644 --- a/hubconf.py +++ b/hubconf.py @@ -8,7 +8,7 @@ import torch -def _create(name, pretrained=True, channels=3, classes=80, autoshape=True, verbose=True): +def _create(name, pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None): """Creates a specified YOLOv5 model Arguments: @@ -18,6 +18,7 @@ def _create(name, pretrained=True, channels=3, classes=80, autoshape=True, verbo classes (int): number of model classes autoshape (bool): apply YOLOv5 .autoshape() wrapper to model verbose (bool): print all information to screen + device (str, torch.device, None): device to use for model parameters Returns: YOLOv5 pytorch model @@ -50,7 +51,7 @@ def _create(name, pretrained=True, channels=3, classes=80, autoshape=True, verbo model.names = ckpt['model'].names # set class names attribute if autoshape: model = model.autoshape() # for file/URI/PIL/cv2/np inputs and NMS - device = select_device('0' if torch.cuda.is_available() else 'cpu') # default to GPU if available + device = select_device('0' if torch.cuda.is_available() else 'cpu') if device is None else torch.device(device) return model.to(device) except Exception as e: @@ -59,49 +60,49 @@ def _create(name, pretrained=True, channels=3, classes=80, autoshape=True, verbo raise Exception(s) from e -def custom(path='path/to/model.pt', autoshape=True, verbose=True): +def custom(path='path/to/model.pt', autoshape=True, verbose=True, device=None): # YOLOv5 custom or local model - return _create(path, autoshape=autoshape, verbose=verbose) + return _create(path, autoshape=autoshape, verbose=verbose, device=device) -def yolov5s(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True): +def yolov5s(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None): # YOLOv5-small model https://github.com/ultralytics/yolov5 - return _create('yolov5s', pretrained, channels, classes, autoshape, verbose) + return _create('yolov5s', pretrained, channels, classes, autoshape, verbose, device) -def yolov5m(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True): +def yolov5m(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None): # YOLOv5-medium model https://github.com/ultralytics/yolov5 - return _create('yolov5m', pretrained, channels, classes, autoshape, verbose) + return _create('yolov5m', pretrained, channels, classes, autoshape, verbose, device) -def yolov5l(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True): +def yolov5l(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None): # YOLOv5-large model https://github.com/ultralytics/yolov5 - return _create('yolov5l', pretrained, channels, classes, autoshape, verbose) + return _create('yolov5l', pretrained, channels, classes, autoshape, verbose, device) -def yolov5x(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True): +def yolov5x(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None): # YOLOv5-xlarge model https://github.com/ultralytics/yolov5 - return _create('yolov5x', pretrained, channels, classes, autoshape, verbose) + return _create('yolov5x', pretrained, channels, classes, autoshape, verbose, device) -def yolov5s6(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True): +def yolov5s6(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None): # YOLOv5-small-P6 model https://github.com/ultralytics/yolov5 - return _create('yolov5s6', pretrained, channels, classes, autoshape, verbose) + return _create('yolov5s6', pretrained, channels, classes, autoshape, verbose, device) -def yolov5m6(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True): +def yolov5m6(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None): # YOLOv5-medium-P6 model https://github.com/ultralytics/yolov5 - return _create('yolov5m6', pretrained, channels, classes, autoshape, verbose) + return _create('yolov5m6', pretrained, channels, classes, autoshape, verbose, device) -def yolov5l6(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True): +def yolov5l6(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None): # YOLOv5-large-P6 model https://github.com/ultralytics/yolov5 - return _create('yolov5l6', pretrained, channels, classes, autoshape, verbose) + return _create('yolov5l6', pretrained, channels, classes, autoshape, verbose, device) -def yolov5x6(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True): +def yolov5x6(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None): # YOLOv5-xlarge-P6 model https://github.com/ultralytics/yolov5 - return _create('yolov5x6', pretrained, channels, classes, autoshape, verbose) + return _create('yolov5x6', pretrained, channels, classes, autoshape, verbose, device) if __name__ == '__main__': From ae53f50ae75d83d792927b8196b70702218eb69e Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 17 May 2021 11:44:22 +0200 Subject: [PATCH 0216/1976] Plot labels histogram colors (#3192) --- utils/plots.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/utils/plots.py b/utils/plots.py index cb180c2a928e..22ccdcee8a21 100644 --- a/utils/plots.py +++ b/utils/plots.py @@ -299,7 +299,8 @@ def plot_labels(labels, names=(), save_dir=Path(''), loggers=None): # matplotlib labels matplotlib.use('svg') # faster ax = plt.subplots(2, 2, figsize=(8, 8), tight_layout=True)[1].ravel() - ax[0].hist(c, bins=np.linspace(0, nc, nc + 1) - 0.5, rwidth=0.8) + y = ax[0].hist(c, bins=np.linspace(0, nc, nc + 1) - 0.5, rwidth=0.8) + [y[2].patches[i].set_color([x / 255 for x in colors(i)]) for i in range(nc)] # update colors ax[0].set_ylabel('instances') if 0 < len(names) < 30: ax[0].set_xticks(range(len(names))) From 36b0a9e7d01637a71ef2050d16d945de5dad7929 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 17 May 2021 12:27:40 +0200 Subject: [PATCH 0217/1976] Add CAP_PROP_FRAME_COUNT for YouTube sources (#3193) --- utils/datasets.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/utils/datasets.py b/utils/datasets.py index f380b20b97d4..56db9d2f13b5 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -172,7 +172,7 @@ def __next__(self): ret_val, img0 = self.cap.read() self.frame += 1 - print(f'video {self.count + 1}/{self.nf} ({self.frame}/{self.nframes}) {path}: ', end='') + print(f'video {self.count + 1}/{self.nf} ({self.frame}/{self.frames}) {path}: ', end='') else: # Read image @@ -193,7 +193,7 @@ def __next__(self): def new_video(self, path): self.frame = 0 self.cap = cv2.VideoCapture(path) - self.nframes = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT)) + self.frames = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT)) def __len__(self): return self.nf # number of files @@ -285,10 +285,11 @@ def __init__(self, sources='streams.txt', img_size=640, stride=32): w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) self.fps = cap.get(cv2.CAP_PROP_FPS) % 100 + self.frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) _, self.imgs[i] = cap.read() # guarantee first frame thread = Thread(target=self.update, args=([i, cap]), daemon=True) - print(f' success ({w}x{h} at {self.fps:.2f} FPS).') + print(f" success ({f'{self.frames} frames ' if self.frames else ''}{w}x{h} at {self.fps:.2f} FPS).") thread.start() print('') # newline From ffb47ffbebaef1d54d177bc339a108a7003357f8 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 17 May 2021 17:47:30 +0200 Subject: [PATCH 0218/1976] Silent Patches Bug Fix (#3214) --- utils/plots.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/plots.py b/utils/plots.py index 22ccdcee8a21..ade9322ce4ec 100644 --- a/utils/plots.py +++ b/utils/plots.py @@ -300,7 +300,7 @@ def plot_labels(labels, names=(), save_dir=Path(''), loggers=None): matplotlib.use('svg') # faster ax = plt.subplots(2, 2, figsize=(8, 8), tight_layout=True)[1].ravel() y = ax[0].hist(c, bins=np.linspace(0, nc, nc + 1) - 0.5, rwidth=0.8) - [y[2].patches[i].set_color([x / 255 for x in colors(i)]) for i in range(nc)] # update colors + # [y[2].patches[i].set_color([x / 255 for x in colors(i)]) for i in range(nc)] # update colors bug #3195 ax[0].set_ylabel('instances') if 0 < len(names) < 30: ax[0].set_xticks(range(len(names))) From 13a1c726996a540fe66842ea67b05d3c0dd856f8 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 17 May 2021 22:24:26 +0200 Subject: [PATCH 0219/1976] Update datasets.py (#3216) --- utils/datasets.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/datasets.py b/utils/datasets.py index 56db9d2f13b5..a14099875455 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -284,7 +284,7 @@ def __init__(self, sources='streams.txt', img_size=640, stride=32): assert cap.isOpened(), f'Failed to open {s}' w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) - self.fps = cap.get(cv2.CAP_PROP_FPS) % 100 + self.fps = (cap.get(cv2.CAP_PROP_FPS) % 100) or 30.0 # assume 30 FPS if cap gets 0 FPS self.frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) _, self.imgs[i] = cap.read() # guarantee first frame From 3f74cd9ed1a17de94ed4a19dcefbaa3b27d16a95 Mon Sep 17 00:00:00 2001 From: Adrian Holovaty Date: Mon, 17 May 2021 22:47:44 +0200 Subject: [PATCH 0220/1976] Parameterize max_det + inference default at 1000 (#3215) * Added max_det parameters in various places * 120 character line * PEP8 * 120 character line * Update inference default to 1000 instances * Update inference default to 1000 instances Co-authored-by: Glenn Jocher --- detect.py | 4 +++- models/common.py | 6 ++++-- utils/general.py | 3 +-- 3 files changed, 8 insertions(+), 5 deletions(-) diff --git a/detect.py b/detect.py index 0001f93704bf..732fec698006 100644 --- a/detect.py +++ b/detect.py @@ -68,7 +68,8 @@ def detect(opt): pred = model(img, augment=opt.augment)[0] # Apply NMS - pred = non_max_suppression(pred, opt.conf_thres, opt.iou_thres, classes=opt.classes, agnostic=opt.agnostic_nms) + pred = non_max_suppression(pred, opt.conf_thres, opt.iou_thres, opt.classes, opt.agnostic_nms, + max_det=opt.max_det) t2 = time_synchronized() # Apply Classifier @@ -153,6 +154,7 @@ def detect(opt): parser.add_argument('--img-size', type=int, default=640, help='inference size (pixels)') parser.add_argument('--conf-thres', type=float, default=0.25, help='object confidence threshold') parser.add_argument('--iou-thres', type=float, default=0.45, help='IOU threshold for NMS') + parser.add_argument('--max-det', type=int, default=1000, help='maximum number of detections per image') parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') parser.add_argument('--view-img', action='store_true', help='display results') parser.add_argument('--save-txt', action='store_true', help='save results to *.txt') diff --git a/models/common.py b/models/common.py index 689aa0f3ed7c..4211db406c3d 100644 --- a/models/common.py +++ b/models/common.py @@ -215,12 +215,13 @@ class NMS(nn.Module): conf = 0.25 # confidence threshold iou = 0.45 # IoU threshold classes = None # (optional list) filter by class + max_det = 1000 # maximum number of detections per image def __init__(self): super(NMS, self).__init__() def forward(self, x): - return non_max_suppression(x[0], conf_thres=self.conf, iou_thres=self.iou, classes=self.classes) + return non_max_suppression(x[0], self.conf, iou_thres=self.iou, classes=self.classes, max_det=self.max_det) class AutoShape(nn.Module): @@ -228,6 +229,7 @@ class AutoShape(nn.Module): conf = 0.25 # NMS confidence threshold iou = 0.45 # NMS IoU threshold classes = None # (optional list) filter by class + max_det = 1000 # maximum number of detections per image def __init__(self, model): super(AutoShape, self).__init__() @@ -285,7 +287,7 @@ def forward(self, imgs, size=640, augment=False, profile=False): t.append(time_synchronized()) # Post-process - y = non_max_suppression(y, conf_thres=self.conf, iou_thres=self.iou, classes=self.classes) # NMS + y = non_max_suppression(y, self.conf, iou_thres=self.iou, classes=self.classes, max_det=self.max_det) # NMS for i in range(n): scale_coords(shape1, y[i][:, :4], shape0[i]) diff --git a/utils/general.py b/utils/general.py index a4c745d1dcaf..4b3d4ab3b189 100755 --- a/utils/general.py +++ b/utils/general.py @@ -482,7 +482,7 @@ def wh_iou(wh1, wh2): def non_max_suppression(prediction, conf_thres=0.25, iou_thres=0.45, classes=None, agnostic=False, multi_label=False, - labels=()): + labels=(), max_det=300): """Runs Non-Maximum Suppression (NMS) on inference results Returns: @@ -498,7 +498,6 @@ def non_max_suppression(prediction, conf_thres=0.25, iou_thres=0.45, classes=Non # Settings min_wh, max_wh = 2, 4096 # (pixels) minimum and maximum box width and height - max_det = 300 # maximum number of detections per image max_nms = 30000 # maximum number of boxes into torchvision.ops.nms() time_limit = 10.0 # seconds to quit after redundant = True # require redundant detections From b7cd1f540d5815b0a1cf2e23ce82a5fdb8f6b525 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 19 May 2021 13:51:55 +0200 Subject: [PATCH 0221/1976] TensorBoard add_graph() fix (#3236) --- train.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/train.py b/train.py index 5330e4ac0ecd..78d91fa37ba4 100644 --- a/train.py +++ b/train.py @@ -330,9 +330,9 @@ def train(hyp, opt, device, tb_writer=None): if plots and ni < 3: f = save_dir / f'train_batch{ni}.jpg' # filename Thread(target=plot_images, args=(imgs, targets, paths, f), daemon=True).start() - # if tb_writer: - # tb_writer.add_image(f, result, dataformats='HWC', global_step=epoch) - # tb_writer.add_graph(torch.jit.trace(model, imgs, strict=False), []) # add model graph + if tb_writer: + tb_writer.add_graph(torch.jit.trace(model, imgs, strict=False), []) # add model graph + # tb_writer.add_image(f, result, dataformats='HWC', global_step=epoch) elif plots and ni == 10 and wandb_logger.wandb: wandb_logger.log({"Mosaics": [wandb_logger.wandb.Image(str(x), caption=x.name) for x in save_dir.glob('train*.jpg') if x.exists()]}) From 7b0eb952cafa9f30710732c6351ac1123e36caae Mon Sep 17 00:00:00 2001 From: yeric1789 <76454253+yeric1789@users.noreply.github.com> Date: Wed, 19 May 2021 13:47:36 -0400 Subject: [PATCH 0222/1976] `plot_one_box()` default `color=(128, 128, 128)` (#3240) * Color can be none by default * `plot_one_box()` default `color=(128, 128, 128)` Co-authored-by: Glenn Jocher --- utils/plots.py | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/utils/plots.py b/utils/plots.py index ade9322ce4ec..8313ef210f90 100644 --- a/utils/plots.py +++ b/utils/plots.py @@ -68,11 +68,10 @@ def butter_lowpass(cutoff, fs, order): return filtfilt(b, a, data) # forward-backward filter -def plot_one_box(x, im, color=None, label=None, line_thickness=3): +def plot_one_box(x, im, color=(128, 128, 128), label=None, line_thickness=3): # Plots one bounding box on image 'im' using OpenCV assert im.data.contiguous, 'Image not contiguous. Apply np.ascontiguousarray(im) to plot_on_box() input image.' tl = line_thickness or round(0.002 * (im.shape[0] + im.shape[1]) / 2) + 1 # line/font thickness - color = color or [random.randint(0, 255) for _ in range(3)] c1, c2 = (int(x[0]), int(x[1])), (int(x[2]), int(x[3])) cv2.rectangle(im, c1, c2, color, thickness=tl, lineType=cv2.LINE_AA) if label: @@ -83,17 +82,16 @@ def plot_one_box(x, im, color=None, label=None, line_thickness=3): cv2.putText(im, label, (c1[0], c1[1] - 2), 0, tl / 3, [225, 255, 255], thickness=tf, lineType=cv2.LINE_AA) -def plot_one_box_PIL(box, im, color=None, label=None, line_thickness=None): +def plot_one_box_PIL(box, im, color=(128, 128, 128), label=None, line_thickness=None): # Plots one bounding box on image 'im' using PIL im = Image.fromarray(im) draw = ImageDraw.Draw(im) line_thickness = line_thickness or max(int(min(im.size) / 200), 2) - draw.rectangle(box, width=line_thickness, outline=tuple(color)) # plot + draw.rectangle(box, width=line_thickness, outline=color) # plot if label: - fontsize = max(round(max(im.size) / 40), 12) - font = ImageFont.truetype("Arial.ttf", fontsize) + font = ImageFont.truetype("Arial.ttf", size=max(round(max(im.size) / 40), 12)) txt_width, txt_height = font.getsize(label) - draw.rectangle([box[0], box[1] - txt_height + 4, box[0] + txt_width, box[1]], fill=tuple(color)) + draw.rectangle([box[0], box[1] - txt_height + 4, box[0] + txt_width, box[1]], fill=color) draw.text((box[0], box[1] - txt_height + 1), label, fill=(255, 255, 255), font=font) return np.asarray(im) From 7b863a34dcacf8b310396d0bc1021f404ec85797 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 19 May 2021 19:58:53 +0200 Subject: [PATCH 0223/1976] Add Cython (#3217) Cython required by pycocotools in certain cases, i.e. https://github.com/cocodataset/cocoapi/issues/172 --- requirements.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/requirements.txt b/requirements.txt index 8b14f7421fc6..e0896648a3c8 100755 --- a/requirements.txt +++ b/requirements.txt @@ -26,4 +26,5 @@ pandas # extras -------------------------------------- thop # FLOPS computation +Cython # for pycocotools https://github.com/cocodataset/cocoapi/issues/172 pycocotools>=2.0 # COCO mAP From 7b36e38cf8f3d3c08e973b18913ae8e41ff970b2 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 20 May 2021 15:22:36 +0200 Subject: [PATCH 0224/1976] Check CoreML models.train() mode (#3262) * Check CoreML models.train() mode * Update export.py --- models/export.py | 1 + 1 file changed, 1 insertion(+) diff --git a/models/export.py b/models/export.py index de4adb6a7e24..65721f65d888 100644 --- a/models/export.py +++ b/models/export.py @@ -131,6 +131,7 @@ import coremltools as ct print(f'{prefix} starting export with coremltools {ct.__version__}...') + assert opt.train, 'CoreML exports should be placed in model.train() mode with `python export.py --train`' model = ct.convert(ts, inputs=[ct.ImageType('image', shape=img.shape, scale=1 / 255.0, bias=[0, 0, 0])]) f = opt.weights.replace('.pt', '.mlmodel') # filename model.save(f) From 10d56d784e7249aea0691b9c386c4828740dd9be Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 21 May 2021 14:46:42 +0200 Subject: [PATCH 0225/1976] Assert `--image-weights` not combined with DDP (#3275) --- train.py | 1 + 1 file changed, 1 insertion(+) diff --git a/train.py b/train.py index 78d91fa37ba4..3aad065b8f1b 100644 --- a/train.py +++ b/train.py @@ -525,6 +525,7 @@ def train(hyp, opt, device, tb_writer=None): device = torch.device('cuda', opt.local_rank) dist.init_process_group(backend='nccl', init_method='env://') # distributed backend assert opt.batch_size % opt.world_size == 0, '--batch-size must be multiple of CUDA device count' + assert not opt.image_weights, '--image-weights argument is not compatible with DDP training' opt.batch_size = opt.total_batch_size // opt.world_size # Hyperparameters From f3402353fb5566eacb612c1673f64aa48816d3b1 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 21 May 2021 15:20:03 +0200 Subject: [PATCH 0226/1976] check `batch_size % utilized_device_count` (#3276) Bug fix to check batch_size divisibility of utilized CUDA device count vs total system CUDA device count. --- utils/torch_utils.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/utils/torch_utils.py b/utils/torch_utils.py index 9991e5ec87d8..5074fa95ae4b 100644 --- a/utils/torch_utils.py +++ b/utils/torch_utils.py @@ -72,11 +72,12 @@ def select_device(device='', batch_size=None): cuda = not cpu and torch.cuda.is_available() if cuda: - n = torch.cuda.device_count() - if n > 1 and batch_size: # check that batch_size is compatible with device_count + devices = device.split(',') if device else range(torch.cuda.device_count()) # i.e. 0,1,6,7 + n = len(devices) # device count + if n > 1 and batch_size: # check batch_size is divisible by device_count assert batch_size % n == 0, f'batch-size {batch_size} not multiple of GPU count {n}' space = ' ' * len(s) - for i, d in enumerate(device.split(',') if device else range(n)): + for i, d in enumerate(devices): p = torch.cuda.get_device_properties(i) s += f"{'' if i == 0 else space}CUDA:{d} ({p.name}, {p.total_memory / 1024 ** 2}MB)\n" # bytes to MB else: From 683cefead4b9f2a8d062f953a912e46e456ed6ad Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 21 May 2021 16:51:07 +0200 Subject: [PATCH 0227/1976] YouTube stream ending fix (#3277) * YouTube stream ending fix Properly terminates YouTube streams on video end. Should resolve issues #2769 and #3220. * Update datasets.py --- utils/datasets.py | 31 +++++++++++++++---------------- 1 file changed, 15 insertions(+), 16 deletions(-) diff --git a/utils/datasets.py b/utils/datasets.py index a14099875455..b05763386dc2 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -270,7 +270,7 @@ def __init__(self, sources='streams.txt', img_size=640, stride=32): sources = [sources] n = len(sources) - self.imgs = [None] * n + self.imgs, self.fps, self.frames, self.threads = [None] * n, [0] * n, [0] * n, [None] * n self.sources = [clean_str(x) for x in sources] # clean source names for later for i, s in enumerate(sources): # index, source # Start thread to read frames from video stream @@ -284,13 +284,13 @@ def __init__(self, sources='streams.txt', img_size=640, stride=32): assert cap.isOpened(), f'Failed to open {s}' w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) - self.fps = (cap.get(cv2.CAP_PROP_FPS) % 100) or 30.0 # assume 30 FPS if cap gets 0 FPS - self.frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) + self.fps[i] = (cap.get(cv2.CAP_PROP_FPS) % 100) or 30.0 # assume 30 FPS if cap gets 0 FPS + self.frames[i] = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) or float('inf') # assume infinite stream if 0 len _, self.imgs[i] = cap.read() # guarantee first frame - thread = Thread(target=self.update, args=([i, cap]), daemon=True) - print(f" success ({f'{self.frames} frames ' if self.frames else ''}{w}x{h} at {self.fps:.2f} FPS).") - thread.start() + self.threads[i] = Thread(target=self.update, args=([i, cap]), daemon=True) + print(f" success ({self.frames[i]} frames {w}x{h} at {self.fps[i]:.2f} FPS)") + self.threads[i].start() print('') # newline # check for common shapes @@ -299,18 +299,17 @@ def __init__(self, sources='streams.txt', img_size=640, stride=32): if not self.rect: print('WARNING: Different stream shapes detected. For optimal performance supply similarly-shaped streams.') - def update(self, index, cap): - # Read next stream frame in a daemon thread - n = 0 - while cap.isOpened(): + def update(self, i, cap): + # Read stream `i` frames in daemon thread + n, f = 0, self.frames[i] + while cap.isOpened() and n < f: n += 1 # _, self.imgs[index] = cap.read() cap.grab() - if n == 4: # read every 4th frame + if n % 4: # read every 4th frame success, im = cap.retrieve() - self.imgs[index] = im if success else self.imgs[index] * 0 - n = 0 - time.sleep(1 / self.fps) # wait time + self.imgs[i] = im if success else self.imgs[i] * 0 + time.sleep(1 / self.fps[i]) # wait time def __iter__(self): self.count = -1 @@ -318,12 +317,12 @@ def __iter__(self): def __next__(self): self.count += 1 - img0 = self.imgs.copy() - if cv2.waitKey(1) == ord('q'): # q to quit + if not all(x.is_alive() for x in self.threads) or cv2.waitKey(1) == ord('q'): # q to quit cv2.destroyAllWindows() raise StopIteration # Letterbox + img0 = self.imgs.copy() img = [letterbox(x, self.img_size, auto=self.rect, stride=self.stride)[0] for x in img0] # Stack From dd7f0b7e05e7658e9cd4fc8f02de5b7df060785d Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 21 May 2021 23:35:31 +0200 Subject: [PATCH 0228/1976] Fix TypeError: 'PosixPath' object is not iterable (#3285) --- train.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/train.py b/train.py index 3aad065b8f1b..a3ae79a6cee7 100644 --- a/train.py +++ b/train.py @@ -419,7 +419,7 @@ def train(hyp, opt, device, tb_writer=None): # Test best.pt logger.info('%g epochs completed in %.3f hours.\n' % (epoch - start_epoch + 1, (time.time() - t0) / 3600)) if opt.data.endswith('coco.yaml') and nc == 80: # if COCO - for m in (last, best) if best.exists() else (last): # speed, mAP tests + for m in [last, best] if best.exists() else [last]: # speed, mAP tests results, _, _ = test.test(opt.data, batch_size=batch_size * 2, imgsz=imgsz_test, From 19100ba00709243976b1a31033be019816496cbd Mon Sep 17 00:00:00 2001 From: Charles Frye Date: Fri, 21 May 2021 14:42:53 -0700 Subject: [PATCH 0229/1976] Improves docs and handling of entities and resuming by WandbLogger (#3264) * adds latest tag to match wandb defaults * adds entity handling, 'last' tag * fixes bug causing finished runs to resume * removes redundant "last" tag for wandb artifact --- train.py | 2 +- utils/wandb_logging/wandb_utils.py | 33 ++++++++++++++++++++++-------- 2 files changed, 25 insertions(+), 10 deletions(-) diff --git a/train.py b/train.py index a3ae79a6cee7..c8d617fc228f 100644 --- a/train.py +++ b/train.py @@ -443,7 +443,7 @@ def train(hyp, opt, device, tb_writer=None): if wandb_logger.wandb and not opt.evolve: # Log the stripped model wandb_logger.wandb.log_artifact(str(final), type='model', name='run_' + wandb_logger.wandb_run.id + '_model', - aliases=['last', 'best', 'stripped']) + aliases=['latest', 'best', 'stripped']) wandb_logger.finish_run() else: dist.destroy_process_group() diff --git a/utils/wandb_logging/wandb_utils.py b/utils/wandb_logging/wandb_utils.py index 72a11018b429..57ce9035a777 100644 --- a/utils/wandb_logging/wandb_utils.py +++ b/utils/wandb_logging/wandb_utils.py @@ -1,3 +1,4 @@ +"""Utilities and tools for tracking runs with Weights & Biases.""" import json import sys from pathlib import Path @@ -35,8 +36,9 @@ def get_run_info(run_path): run_path = Path(remove_prefix(run_path, WANDB_ARTIFACT_PREFIX)) run_id = run_path.stem project = run_path.parent.stem + entity = run_path.parent.parent.stem model_artifact_name = 'run_' + run_id + '_model' - return run_id, project, model_artifact_name + return entity, project, run_id, model_artifact_name def check_wandb_resume(opt): @@ -44,9 +46,9 @@ def check_wandb_resume(opt): if isinstance(opt.resume, str): if opt.resume.startswith(WANDB_ARTIFACT_PREFIX): if opt.global_rank not in [-1, 0]: # For resuming DDP runs - run_id, project, model_artifact_name = get_run_info(opt.resume) + entity, project, run_id, model_artifact_name = get_run_info(opt.resume) api = wandb.Api() - artifact = api.artifact(project + '/' + model_artifact_name + ':latest') + artifact = api.artifact(entity + '/' + project + '/' + model_artifact_name + ':latest') modeldir = artifact.download() opt.weights = str(Path(modeldir) / "last.pt") return True @@ -78,6 +80,18 @@ def process_wandb_config_ddp_mode(opt): class WandbLogger(): + """Log training runs, datasets, models, and predictions to Weights & Biases. + + This logger sends information to W&B at wandb.ai. By default, this information + includes hyperparameters, system configuration and metrics, model metrics, + and basic data metrics and analyses. + + By providing additional command line arguments to train.py, datasets, + models and predictions can also be logged. + + For more on how this logger is used, see the Weights & Biases documentation: + https://docs.wandb.com/guides/integrations/yolov5 + """ def __init__(self, opt, name, run_id, data_dict, job_type='Training'): # Pre-training routine -- self.job_type = job_type @@ -85,16 +99,17 @@ def __init__(self, opt, name, run_id, data_dict, job_type='Training'): # It's more elegant to stick to 1 wandb.init call, but useful config data is overwritten in the WandbLogger's wandb.init call if isinstance(opt.resume, str): # checks resume from artifact if opt.resume.startswith(WANDB_ARTIFACT_PREFIX): - run_id, project, model_artifact_name = get_run_info(opt.resume) + entity, project, run_id, model_artifact_name = get_run_info(opt.resume) model_artifact_name = WANDB_ARTIFACT_PREFIX + model_artifact_name assert wandb, 'install wandb to resume wandb runs' # Resume wandb-artifact:// runs here| workaround for not overwriting wandb.config - self.wandb_run = wandb.init(id=run_id, project=project, resume='allow') + self.wandb_run = wandb.init(id=run_id, project=project, entity=entity, resume='allow') opt.resume = model_artifact_name elif self.wandb: self.wandb_run = wandb.init(config=opt, resume="allow", project='YOLOv5' if opt.project == 'runs/train' else Path(opt.project).stem, + entity=opt.entity, name=name, job_type=job_type, id=run_id) if not wandb.run else wandb.run @@ -172,8 +187,8 @@ def download_model_artifact(self, opt): modeldir = model_artifact.download() epochs_trained = model_artifact.metadata.get('epochs_trained') total_epochs = model_artifact.metadata.get('total_epochs') - assert epochs_trained < total_epochs, 'training to %g epochs is finished, nothing to resume.' % ( - total_epochs) + is_finished = total_epochs is None + assert not is_finished, 'training is finished, can only resume incomplete runs.' return modeldir, model_artifact return None, None @@ -188,7 +203,7 @@ def log_model(self, path, opt, epoch, fitness_score, best_model=False): }) model_artifact.add_file(str(path / 'last.pt'), name='last.pt') wandb.log_artifact(model_artifact, - aliases=['latest', 'epoch ' + str(self.current_epoch), 'best' if best_model else '']) + aliases=['latest', 'last', 'epoch ' + str(self.current_epoch), 'best' if best_model else '']) print("Saving model artifact on epoch ", epoch + 1) def log_dataset_artifact(self, data_file, single_cls, project, overwrite_config=False): @@ -291,7 +306,7 @@ def end_epoch(self, best_result=False): if self.result_artifact: train_results = wandb.JoinedTable(self.val_table, self.result_table, "id") self.result_artifact.add(train_results, 'result') - wandb.log_artifact(self.result_artifact, aliases=['latest', 'epoch ' + str(self.current_epoch), + wandb.log_artifact(self.result_artifact, aliases=['latest', 'last', 'epoch ' + str(self.current_epoch), ('best' if best_result else '')]) self.result_table = wandb.Table(["epoch", "id", "prediction", "avg_confidence"]) self.result_artifact = wandb.Artifact("run_" + wandb.run.id + "_progress", "evaluation") From 0e2f2cbb512fb2120fef7916ab9c2725d3c172f7 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 23 May 2021 14:55:42 +0200 Subject: [PATCH 0230/1976] Update LoadStreams init fallbacks (#3295) --- utils/datasets.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/utils/datasets.py b/utils/datasets.py index b05763386dc2..36416b14e138 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -284,8 +284,8 @@ def __init__(self, sources='streams.txt', img_size=640, stride=32): assert cap.isOpened(), f'Failed to open {s}' w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) - self.fps[i] = (cap.get(cv2.CAP_PROP_FPS) % 100) or 30.0 # assume 30 FPS if cap gets 0 FPS - self.frames[i] = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) or float('inf') # assume infinite stream if 0 len + self.fps[i] = max(cap.get(cv2.CAP_PROP_FPS) % 100, 0) or 30.0 # 30 FPS fallback + self.frames[i] = max(int(cap.get(cv2.CAP_PROP_FRAME_COUNT)), 0) or float('inf') # infinite stream fallback _, self.imgs[i] = cap.read() # guarantee first frame self.threads[i] = Thread(target=self.update, args=([i, cap]), daemon=True) From a039b7796dc495a5e5ec5ac5ba60cbecfb8b3c60 Mon Sep 17 00:00:00 2001 From: yeric1789 <76454253+yeric1789@users.noreply.github.com> Date: Sun, 23 May 2021 10:00:55 -0400 Subject: [PATCH 0231/1976] PyTorch Hub `crops = results.crop()` return values (#3282) * Changing save_one_box Made to work with other changes to common.py * PEP8 and single line BGR Co-authored-by: Glenn Jocher --- utils/general.py | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/utils/general.py b/utils/general.py index 4b3d4ab3b189..47eea78eda30 100755 --- a/utils/general.py +++ b/utils/general.py @@ -657,8 +657,8 @@ def apply_classifier(x, model, img, im0): return x -def save_one_box(xyxy, im, file='image.jpg', gain=1.02, pad=10, square=False, BGR=False): - # Save an image crop as {file} with crop size multiplied by {gain} and padded by {pad} pixels +def save_one_box(xyxy, im, file='image.jpg', gain=1.02, pad=10, square=False, BGR=False, save=True): + # Save image crop as {file} with crop size multiple {gain} and {pad} pixels. Save and/or return crop xyxy = torch.tensor(xyxy).view(-1, 4) b = xyxy2xywh(xyxy) # boxes if square: @@ -666,8 +666,10 @@ def save_one_box(xyxy, im, file='image.jpg', gain=1.02, pad=10, square=False, BG b[:, 2:] = b[:, 2:] * gain + pad # box wh * gain + pad xyxy = xywh2xyxy(b).long() clip_coords(xyxy, im.shape) - crop = im[int(xyxy[0, 1]):int(xyxy[0, 3]), int(xyxy[0, 0]):int(xyxy[0, 2])] - cv2.imwrite(str(increment_path(file, mkdir=True).with_suffix('.jpg')), crop if BGR else crop[..., ::-1]) + crop = im[int(xyxy[0, 1]):int(xyxy[0, 3]), int(xyxy[0, 0]):int(xyxy[0, 2]), ::(1 if BGR else -1)] + if save: + cv2.imwrite(str(increment_path(file, mkdir=True).with_suffix('.jpg')), crop) + return crop def increment_path(path, exist_ok=False, sep='', mkdir=False): From ee24ae11eaced854cb111c0a5a5ae386eb5dc36f Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 23 May 2021 17:53:54 +0200 Subject: [PATCH 0232/1976] Comment Cython (#3297) Ultralytics REST API fix. --- requirements.txt | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/requirements.txt b/requirements.txt index e0896648a3c8..1c07c651150e 100755 --- a/requirements.txt +++ b/requirements.txt @@ -25,6 +25,6 @@ pandas # scikit-learn==0.19.2 # for coreml quantization # extras -------------------------------------- -thop # FLOPS computation -Cython # for pycocotools https://github.com/cocodataset/cocoapi/issues/172 +# Cython # for pycocotools https://github.com/cocodataset/cocoapi/issues/172 pycocotools>=2.0 # COCO mAP +thop # FLOPS computation From 9f3a388cea66ca71b9bbe057929eed8fbc920a7e Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 23 May 2021 18:00:23 +0200 Subject: [PATCH 0233/1976] Improved check_requirements() robustness (#3298) Add try: except clause on missing requirements install to catch install failures. Prompted by Ultralytics YOLOv5 API failure on Cython. --- utils/general.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/utils/general.py b/utils/general.py index 47eea78eda30..9a882715f0ad 100755 --- a/utils/general.py +++ b/utils/general.py @@ -137,7 +137,10 @@ def check_requirements(requirements='requirements.txt', exclude=()): except Exception as e: # DistributionNotFound or VersionConflict if requirements not met n += 1 print(f"{prefix} {r} not found and is required by YOLOv5, attempting auto-update...") - print(subprocess.check_output(f"pip install '{r}'", shell=True).decode()) + try: + print(subprocess.check_output(f"pip install '{r}'", shell=True).decode()) + except Exception as e: + print(f'{prefix} {e}') if n: # if packages updated source = file.resolve() if 'file' in locals() else requirements From 73a92dc1b6cb04c8f56a0f458d9aaaacf26402c7 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 24 May 2021 12:42:36 +0200 Subject: [PATCH 0234/1976] Explicit `git clone` master (#3311) --- utils/aws/userdata.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/aws/userdata.sh b/utils/aws/userdata.sh index 890606b76a06..5846fedb16f9 100644 --- a/utils/aws/userdata.sh +++ b/utils/aws/userdata.sh @@ -7,7 +7,7 @@ cd home/ubuntu if [ ! -d yolov5 ]; then echo "Running first-time script." # install dependencies, download COCO, pull Docker - git clone https://github.com/ultralytics/yolov5 && sudo chmod -R 777 yolov5 + git clone https://github.com/ultralytics/yolov5 -b master && sudo chmod -R 777 yolov5 cd yolov5 bash data/scripts/get_coco.sh && echo "Data done." & sudo docker pull ultralytics/yolov5:latest && echo "Docker done." & From 61ea23c3fe9b86e476cc1c79a12c03ebb3636254 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 24 May 2021 13:23:09 +0200 Subject: [PATCH 0235/1976] Implement `@torch.no_grad()` decorator (#3312) * `@torch.no_grad()` decorator * Update detect.py --- detect.py | 12 ++++++------ test.py | 32 ++++++++++++++++---------------- 2 files changed, 22 insertions(+), 22 deletions(-) diff --git a/detect.py b/detect.py index 732fec698006..c6b76d981541 100644 --- a/detect.py +++ b/detect.py @@ -14,6 +14,7 @@ from utils.torch_utils import select_device, load_classifier, time_synchronized +@torch.no_grad() def detect(opt): source, weights, view_img, save_txt, imgsz = opt.source, opt.weights, opt.view_img, opt.save_txt, opt.img_size save_img = not opt.nosave and not source.endswith('.txt') # save inference images @@ -175,10 +176,9 @@ def detect(opt): print(opt) check_requirements(exclude=('tensorboard', 'pycocotools', 'thop')) - with torch.no_grad(): - if opt.update: # update all models (to fix SourceChangeWarning) - for opt.weights in ['yolov5s.pt', 'yolov5m.pt', 'yolov5l.pt', 'yolov5x.pt']: - detect(opt=opt) - strip_optimizer(opt.weights) - else: + if opt.update: # update all models (to fix SourceChangeWarning) + for opt.weights in ['yolov5s.pt', 'yolov5m.pt', 'yolov5l.pt', 'yolov5x.pt']: detect(opt=opt) + strip_optimizer(opt.weights) + else: + detect(opt=opt) diff --git a/test.py b/test.py index f8936d3b4f9d..0716c5d8b93c 100644 --- a/test.py +++ b/test.py @@ -18,6 +18,7 @@ from utils.torch_utils import select_device, time_synchronized +@torch.no_grad() def test(data, weights=None, batch_size=32, @@ -105,22 +106,21 @@ def test(data, targets = targets.to(device) nb, _, height, width = img.shape # batch size, channels, height, width - with torch.no_grad(): - # Run model - t = time_synchronized() - out, train_out = model(img, augment=augment) # inference and training outputs - t0 += time_synchronized() - t - - # Compute loss - if compute_loss: - loss += compute_loss([x.float() for x in train_out], targets)[1][:3] # box, obj, cls - - # Run NMS - targets[:, 2:] *= torch.Tensor([width, height, width, height]).to(device) # to pixels - lb = [targets[targets[:, 0] == i, 1:] for i in range(nb)] if save_hybrid else [] # for autolabelling - t = time_synchronized() - out = non_max_suppression(out, conf_thres, iou_thres, labels=lb, multi_label=True, agnostic=single_cls) - t1 += time_synchronized() - t + # Run model + t = time_synchronized() + out, train_out = model(img, augment=augment) # inference and training outputs + t0 += time_synchronized() - t + + # Compute loss + if compute_loss: + loss += compute_loss([x.float() for x in train_out], targets)[1][:3] # box, obj, cls + + # Run NMS + targets[:, 2:] *= torch.Tensor([width, height, width, height]).to(device) # to pixels + lb = [targets[targets[:, 0] == i, 1:] for i in range(nb)] if save_hybrid else [] # for autolabelling + t = time_synchronized() + out = non_max_suppression(out, conf_thres, iou_thres, labels=lb, multi_label=True, agnostic=single_cls) + t1 += time_synchronized() - t # Statistics per image for si, pred in enumerate(out): From 407dc5008e47b1aad5ce69f0c91b4f1ec321dd7f Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 24 May 2021 17:17:32 +0200 Subject: [PATCH 0236/1976] Update README.md (#3320) --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index b25c6fca983c..a638657b313b 100755 --- a/README.md +++ b/README.md @@ -162,9 +162,9 @@ Ultralytics is a U.S.-based particle physics and AI startup with over 6 years of - **Edge AI** integrated into custom iOS and Android apps for realtime **30 FPS video inference.** - **Custom data training**, hyperparameter evolution, and model exportation to any destination. -For business inquiries and professional support requests please visit us at https://www.ultralytics.com. +For business inquiries and professional support requests please visit us at https://ultralytics.com. ## Contact -**Issues should be raised directly in the repository.** For business inquiries or professional support requests please visit https://www.ultralytics.com or email Glenn Jocher at glenn.jocher@ultralytics.com. +**Issues should be raised directly in the repository.** For business inquiries or professional support requests please visit https://ultralytics.com or email Glenn Jocher at glenn.jocher@ultralytics.com. From aad99b63d6ac63278021a66f8a096e5232ed24b0 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 25 May 2021 11:45:24 +0200 Subject: [PATCH 0237/1976] TensorBoard DP/DDP graph fix (#3325) --- train.py | 6 +++--- utils/torch_utils.py | 6 ++++++ 2 files changed, 9 insertions(+), 3 deletions(-) diff --git a/train.py b/train.py index c8d617fc228f..3e8d5075aef1 100644 --- a/train.py +++ b/train.py @@ -32,7 +32,7 @@ from utils.google_utils import attempt_download from utils.loss import ComputeLoss from utils.plots import plot_images, plot_labels, plot_results, plot_evolution -from utils.torch_utils import ModelEMA, select_device, intersect_dicts, torch_distributed_zero_first, is_parallel +from utils.torch_utils import ModelEMA, select_device, intersect_dicts, torch_distributed_zero_first, de_parallel from utils.wandb_logging.wandb_utils import WandbLogger, check_wandb_resume logger = logging.getLogger(__name__) @@ -331,7 +331,7 @@ def train(hyp, opt, device, tb_writer=None): f = save_dir / f'train_batch{ni}.jpg' # filename Thread(target=plot_images, args=(imgs, targets, paths, f), daemon=True).start() if tb_writer: - tb_writer.add_graph(torch.jit.trace(model, imgs, strict=False), []) # add model graph + tb_writer.add_graph(torch.jit.trace(de_parallel(model), imgs, strict=False), []) # model graph # tb_writer.add_image(f, result, dataformats='HWC', global_step=epoch) elif plots and ni == 10 and wandb_logger.wandb: wandb_logger.log({"Mosaics": [wandb_logger.wandb.Image(str(x), caption=x.name) for x in @@ -390,7 +390,7 @@ def train(hyp, opt, device, tb_writer=None): ckpt = {'epoch': epoch, 'best_fitness': best_fitness, 'training_results': results_file.read_text(), - 'model': deepcopy(model.module if is_parallel(model) else model).half(), + 'model': deepcopy(de_parallel(model)).half(), 'ema': deepcopy(ema.ema).half(), 'updates': ema.updates, 'optimizer': optimizer.state_dict(), diff --git a/utils/torch_utils.py b/utils/torch_utils.py index 5074fa95ae4b..aa54c3cf561e 100644 --- a/utils/torch_utils.py +++ b/utils/torch_utils.py @@ -134,9 +134,15 @@ def profile(x, ops, n=100, device=None): def is_parallel(model): + # Returns True if model is of type DP or DDP return type(model) in (nn.parallel.DataParallel, nn.parallel.DistributedDataParallel) +def de_parallel(model): + # De-parallelize a model: returns single-GPU model if model is of type DP or DDP + return model.module if is_parallel(model) else model + + def intersect_dicts(da, db, exclude=()): # Dictionary intersection of matching keys and shapes, omitting 'exclude' keys, using da values return {k: v for k, v in da.items() if k in db and not any(x in k for x in exclude) and v.shape == db[k].shape} From 1f8d716ec943d9265cd33422d29560716e8b483c Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 26 May 2021 12:06:08 +0200 Subject: [PATCH 0238/1976] yolo.py header (#3347) --- models/yolo.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/models/yolo.py b/models/yolo.py index 06b80032d3d3..2844cd0410e0 100644 --- a/models/yolo.py +++ b/models/yolo.py @@ -1,4 +1,8 @@ -# YOLOv5 YOLO-specific modules +"""YOLOv5-specific modules + +Usage: + $ python path/to/models/yolo.py --cfg yolov5s.yaml +""" import argparse import logging From c6b5bfca8592c3426ce0b5f65e559c45d42ff378 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 26 May 2021 14:26:52 +0200 Subject: [PATCH 0239/1976] Updated cache v0.2 with `hashlib` (#3350) * Update cache v0.2 to include parent hash Possible fix for https://github.com/ultralytics/yolov5/issues/3349 * Update datasets.py --- utils/datasets.py | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/utils/datasets.py b/utils/datasets.py index 36416b14e138..882c7764c4ab 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -1,6 +1,7 @@ # Dataset utils and dataloaders import glob +import hashlib import logging import math import os @@ -36,9 +37,12 @@ break -def get_hash(files): - # Returns a single hash value of a list of files - return sum(os.path.getsize(f) for f in files if os.path.isfile(f)) +def get_hash(paths): + # Returns a single hash value of a list of paths (files or dirs) + size = sum(os.path.getsize(p) for p in paths if os.path.exists(p)) # sizes + h = hashlib.md5(str(size).encode()) # hash sizes + h.update(''.join(paths).encode()) # hash paths + return h.hexdigest() # return hash def exif_size(img): @@ -383,7 +387,7 @@ def __init__(self, path, img_size=640, batch_size=16, augment=False, hyp=None, r cache_path = (p if p.is_file() else Path(self.label_files[0]).parent).with_suffix('.cache') # cached labels if cache_path.is_file(): cache, exists = torch.load(cache_path), True # load - if cache['hash'] != get_hash(self.label_files + self.img_files) or 'version' not in cache: # changed + if cache['hash'] != get_hash(self.label_files + self.img_files): # changed cache, exists = self.cache_labels(cache_path, prefix), False # re-cache else: cache, exists = self.cache_labels(cache_path, prefix), False # cache @@ -501,9 +505,9 @@ def cache_labels(self, path=Path('./labels.cache'), prefix=''): x['hash'] = get_hash(self.label_files + self.img_files) x['results'] = nf, nm, ne, nc, i + 1 - x['version'] = 0.1 # cache version + x['version'] = 0.2 # cache version try: - torch.save(x, path) # save for next time + torch.save(x, path) # save cache for next time logging.info(f'{prefix}New cache created: {path}') except Exception as e: logging.info(f'{prefix}WARNING: Cache directory {path.parent} is not writeable: {e}') # path not writeable From 2435bfe8968cd80f3caa5ba46f4ec0fe3ad0aa2b Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 26 May 2021 15:51:49 +0200 Subject: [PATCH 0240/1976] Add URL download to check_file() (#3330) * Add URL file download to check_file() * cleanup * pathlib bug fix --- utils/general.py | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) diff --git a/utils/general.py b/utils/general.py index 9a882715f0ad..006e64859f32 100755 --- a/utils/general.py +++ b/utils/general.py @@ -173,12 +173,19 @@ def check_imshow(): def check_file(file): - # Search for file if not found - if Path(file).is_file() or file == '': + # Search/download file (if necessary) and return path + file = str(file) # convert to str() + if Path(file).is_file() or file == '': # exists return file - else: + elif file.startswith(('http://', 'https://')): # download + url, file = file, Path(file).name + print(f'Downloading {url} to {file}...') + torch.hub.download_url_to_file(url, file) + assert Path(file).exists() and Path(file).stat().st_size > 0, f'File download failed: {url}' # check + return file + else: # search files = glob.glob('./**/' + file, recursive=True) # find file - assert len(files), f'File Not Found: {file}' # assert file was found + assert len(files), f'File not found: {file}' # assert file was found assert len(files) == 1, f"Multiple files match '{file}', specify exact path: {files}" # assert unique return files[0] # return file From ef4d53818d720f4c1d742125fbb48eafe481fd21 Mon Sep 17 00:00:00 2001 From: WangChaofeng Date: Thu, 27 May 2021 20:10:14 +0800 Subject: [PATCH 0241/1976] ONNX export in .train() mode fix (#3362) --- models/export.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/models/export.py b/models/export.py index 65721f65d888..0d1147938e37 100644 --- a/models/export.py +++ b/models/export.py @@ -97,6 +97,8 @@ print(f'{prefix} starting export with onnx {onnx.__version__}...') f = opt.weights.replace('.pt', '.onnx') # filename torch.onnx.export(model, img, f, verbose=False, opset_version=opt.opset_version, input_names=['images'], + training=torch.onnx.TrainingMode.TRAINING if opt.train else torch.onnx.TrainingMode.EVAL, + do_constant_folding=not opt.train, dynamic_axes={'images': {0: 'batch', 2: 'height', 3: 'width'}, # size(1,3,640,640) 'output': {0: 'batch', 2: 'y', 3: 'x'}} if opt.dynamic else None) From 4d4a2b05208ec82d11d43767a6e8df2c35de85ea Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 27 May 2021 14:31:26 +0200 Subject: [PATCH 0242/1976] Ignore blank lines in `*.txt` labels (#3366) Fix for https://github.com/ultralytics/yolov5/issues/958#issuecomment-849512083 --- utils/datasets.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/datasets.py b/utils/datasets.py index 882c7764c4ab..7dd181400da5 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -474,7 +474,7 @@ def cache_labels(self, path=Path('./labels.cache'), prefix=''): if os.path.isfile(lb_file): nf += 1 # label found with open(lb_file, 'r') as f: - l = [x.split() for x in f.read().strip().splitlines()] + l = [x.split() for x in f.read().strip().splitlines() if len(x)] if any([len(x) > 8 for x in l]): # is segment classes = np.array([x[0] for x in l], dtype=np.float32) segments = [np.array(x[1:], dtype=np.float32).reshape(-1, 2) for x in l] # (cls, xy1...) From bb131238aabf94fe619199bc2bee81be70989776 Mon Sep 17 00:00:00 2001 From: Piotr Skalski Date: Thu, 27 May 2021 17:01:36 +0200 Subject: [PATCH 0243/1976] update ci-testing.yml (#3322) * update ci-testing.yml * update greetings.yml * bring back os matrix --- .github/workflows/ci-testing.yml | 6 ++---- .github/workflows/greetings.yml | 2 +- 2 files changed, 3 insertions(+), 5 deletions(-) diff --git a/.github/workflows/ci-testing.yml b/.github/workflows/ci-testing.yml index df508474a955..bb8b173cdb31 100644 --- a/.github/workflows/ci-testing.yml +++ b/.github/workflows/ci-testing.yml @@ -2,12 +2,10 @@ name: CI CPU testing on: # https://help.github.com/en/actions/reference/events-that-trigger-workflows push: - branches: [ master ] + branches: [ master, develop ] pull_request: # The branches below must be a subset of the branches above - branches: [ master ] - schedule: - - cron: '0 0 * * *' # Runs at 00:00 UTC every day + branches: [ master, develop ] jobs: cpu-tests: diff --git a/.github/workflows/greetings.yml b/.github/workflows/greetings.yml index ee472297107e..4e502fe9af7b 100644 --- a/.github/workflows/greetings.yml +++ b/.github/workflows/greetings.yml @@ -16,7 +16,7 @@ jobs: git remote add upstream https://github.com/ultralytics/yolov5.git git fetch upstream git checkout feature # <----- replace 'feature' with local branch name - git rebase upstream/master + git rebase upstream/develop git push -u origin -f ``` - ✅ Verify all Continuous Integration (CI) **checks are passing**. From 3fea06838468a669d189c4498f999be2d4b3c0ce Mon Sep 17 00:00:00 2001 From: Piotr Skalski Date: Thu, 27 May 2021 17:01:36 +0200 Subject: [PATCH 0244/1976] update ci-testing.yml (#3322) * update ci-testing.yml * update greetings.yml * bring back os matrix --- .github/workflows/ci-testing.yml | 6 ++---- .github/workflows/greetings.yml | 2 +- 2 files changed, 3 insertions(+), 5 deletions(-) diff --git a/.github/workflows/ci-testing.yml b/.github/workflows/ci-testing.yml index df508474a955..bb8b173cdb31 100644 --- a/.github/workflows/ci-testing.yml +++ b/.github/workflows/ci-testing.yml @@ -2,12 +2,10 @@ name: CI CPU testing on: # https://help.github.com/en/actions/reference/events-that-trigger-workflows push: - branches: [ master ] + branches: [ master, develop ] pull_request: # The branches below must be a subset of the branches above - branches: [ master ] - schedule: - - cron: '0 0 * * *' # Runs at 00:00 UTC every day + branches: [ master, develop ] jobs: cpu-tests: diff --git a/.github/workflows/greetings.yml b/.github/workflows/greetings.yml index ee472297107e..4e502fe9af7b 100644 --- a/.github/workflows/greetings.yml +++ b/.github/workflows/greetings.yml @@ -16,7 +16,7 @@ jobs: git remote add upstream https://github.com/ultralytics/yolov5.git git fetch upstream git checkout feature # <----- replace 'feature' with local branch name - git rebase upstream/master + git rebase upstream/develop git push -u origin -f ``` - ✅ Verify all Continuous Integration (CI) **checks are passing**. From ba6f3f974bfc4a2968964dbe5eedea73c9f5efcb Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 28 May 2021 15:18:44 +0200 Subject: [PATCH 0245/1976] Enable direct `--weights URL` definition (#3373) * Enable direct `--weights URL` definition @KalenMike this PR will enable direct --weights URL definition. Example use case: ``` python train.py --weights https://storage.googleapis.com/bucket/dir/model.pt ``` * cleanup * bug fixes * weights = attempt_download(weights) * Update experimental.py * Update hubconf.py * return bug fix * comment mirror * min_bytes --- hubconf.py | 3 +-- models/experimental.py | 3 +-- train.py | 2 +- utils/google_utils.py | 53 ++++++++++++++++++++++++++---------------- 4 files changed, 36 insertions(+), 25 deletions(-) diff --git a/hubconf.py b/hubconf.py index f74e70c85a65..40bbb1ed0826 100644 --- a/hubconf.py +++ b/hubconf.py @@ -41,8 +41,7 @@ def _create(name, pretrained=True, channels=3, classes=80, autoshape=True, verbo cfg = list((Path(__file__).parent / 'models').rglob(f'{name}.yaml'))[0] # model.yaml path model = Model(cfg, channels, classes) # create model if pretrained: - attempt_download(fname) # download if not found locally - ckpt = torch.load(fname, map_location=torch.device('cpu')) # load + ckpt = torch.load(attempt_download(fname), map_location=torch.device('cpu')) # load msd = model.state_dict() # model state_dict csd = ckpt['model'].float().state_dict() # checkpoint state_dict as FP32 csd = {k: v for k, v in csd.items() if msd[k].shape == v.shape} # filter diff --git a/models/experimental.py b/models/experimental.py index afa787907104..d316b18373c3 100644 --- a/models/experimental.py +++ b/models/experimental.py @@ -116,8 +116,7 @@ def attempt_load(weights, map_location=None, inplace=True): # Loads an ensemble of models weights=[a,b,c] or a single model weights=[a] or weights=a model = Ensemble() for w in weights if isinstance(weights, list) else [weights]: - attempt_download(w) - ckpt = torch.load(w, map_location=map_location) # load + ckpt = torch.load(attempt_download(w), map_location=map_location) # load model.append(ckpt['ema' if ckpt.get('ema') else 'model'].float().fuse().eval()) # FP32 model # Compatibility updates diff --git a/train.py b/train.py index 3e8d5075aef1..b74cdb28be66 100644 --- a/train.py +++ b/train.py @@ -83,7 +83,7 @@ def train(hyp, opt, device, tb_writer=None): pretrained = weights.endswith('.pt') if pretrained: with torch_distributed_zero_first(rank): - attempt_download(weights) # download if not found locally + weights = attempt_download(weights) # download if not found locally ckpt = torch.load(weights, map_location=device) # load checkpoint model = Model(opt.cfg or ckpt['model'].yaml, ch=3, nc=nc, anchors=hyp.get('anchors')).to(device) # create exclude = ['anchor'] if (opt.cfg or hyp.get('anchors')) and not opt.resume else [] # exclude keys diff --git a/utils/google_utils.py b/utils/google_utils.py index 63d3e5b212f3..ac5c54dba97f 100644 --- a/utils/google_utils.py +++ b/utils/google_utils.py @@ -16,11 +16,37 @@ def gsutil_getsize(url=''): return eval(s.split(' ')[0]) if len(s) else 0 # bytes +def safe_download(file, url, url2=None, min_bytes=1E0, error_msg=''): + # Attempts to download file from url or url2, checks and removes incomplete downloads < min_bytes + file = Path(file) + try: # GitHub + print(f'Downloading {url} to {file}...') + torch.hub.download_url_to_file(url, str(file)) + assert file.exists() and file.stat().st_size > min_bytes # check + except Exception as e: # GCP + file.unlink(missing_ok=True) # remove partial downloads + print(f'Download error: {e}\nRe-attempting {url2 or url} to {file}...') + os.system(f"curl -L '{url2 or url}' -o '{file}' --retry 3 -C -") # curl download, retry and resume on fail + finally: + if not file.exists() or file.stat().st_size < min_bytes: # check + file.unlink(missing_ok=True) # remove partial downloads + print(f'ERROR: Download failure: {error_msg or url}') + print('') + + def attempt_download(file, repo='ultralytics/yolov5'): # Attempt file download if does not exist file = Path(str(file).strip().replace("'", '')) if not file.exists(): + # URL specified + name = file.name + if str(file).startswith(('http:/', 'https:/')): # download + url = str(file).replace(':/', '://') # Pathlib turns :// -> :/ + safe_download(file=name, url=url, min_bytes=1E5) + return name + + # GitHub assets file.parent.mkdir(parents=True, exist_ok=True) # make parent dir (if required) try: response = requests.get(f'https://api.github.com/repos/{repo}/releases/latest').json() # github api @@ -34,27 +60,14 @@ def attempt_download(file, repo='ultralytics/yolov5'): except: tag = 'v5.0' # current release - name = file.name if name in assets: - msg = f'{file} missing, try downloading from https://github.com/{repo}/releases/' - redundant = False # second download option - try: # GitHub - url = f'https://github.com/{repo}/releases/download/{tag}/{name}' - print(f'Downloading {url} to {file}...') - torch.hub.download_url_to_file(url, file) - assert file.exists() and file.stat().st_size > 1E6 # check - except Exception as e: # GCP - print(f'Download error: {e}') - assert redundant, 'No secondary mirror' - url = f'https://storage.googleapis.com/{repo}/ckpt/{name}' - print(f'Downloading {url} to {file}...') - os.system(f"curl -L '{url}' -o '{file}' --retry 3 -C -") # curl download, retry and resume on fail - finally: - if not file.exists() or file.stat().st_size < 1E6: # check - file.unlink(missing_ok=True) # remove partial downloads - print(f'ERROR: Download failure: {msg}') - print('') - return + safe_download(file, + url=f'https://github.com/{repo}/releases/download/{tag}/{name}', + # url2=f'https://storage.googleapis.com/{repo}/ckpt/{name}', # backup url (optional) + min_bytes=1E5, + error_msg=f'{file} missing, try downloading from https://github.com/{repo}/releases/') + + return str(file) def gdrive_download(id='16TiPfZj7htmTyhntwcZyEEAejOUxuT6m', file='tmp.zip'): From 57f773b0ae2a2d477c1f85d07d37827f3ba82c6e Mon Sep 17 00:00:00 2001 From: Peretz Cohen Date: Sat, 29 May 2021 11:49:24 -0700 Subject: [PATCH 0246/1976] Update tutorial.ipynb (#3368) add Open in Kaggle badge --- tutorial.ipynb | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/tutorial.ipynb b/tutorial.ipynb index 3954feadfcb2..1bc9a8cda032 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -517,7 +517,8 @@ "colab_type": "text" }, "source": [ - "\"Open" + "\"Open", + "\"Kaggle\"" ] }, { @@ -1260,4 +1261,4 @@ "outputs": [] } ] -} \ No newline at end of file +} From 21a9607e00f1365b21d8c4bd81bdbf5fc0efea24 Mon Sep 17 00:00:00 2001 From: tudoulei <34886368+tudoulei@users.noreply.github.com> Date: Sun, 30 May 2021 03:12:01 +0800 Subject: [PATCH 0247/1976] `cv2.imread(img, -1)` for IMREAD_UNCHANGED (#3379) * Update datasets.py * comment Co-authored-by: Glenn Jocher --- utils/datasets.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/datasets.py b/utils/datasets.py index 7dd181400da5..331df8ffd047 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -181,7 +181,7 @@ def __next__(self): else: # Read image self.count += 1 - img0 = cv2.imread(path) # BGR + img0 = cv2.imread(path, -1) # BGR (-1 is IMREAD_UNCHANGED) assert img0 is not None, 'Image Not Found ' + path print(f'image {self.count}/{self.nf} {path}: ', end='') From 4b52e19a61a39870fc4234da8906daa495def792 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 29 May 2021 22:49:34 +0200 Subject: [PATCH 0248/1976] COCO evolution fix (#3388) * COCO evolution fix * cleanup * update print * print fix --- train.py | 58 +++++++++++++++++++++++++++----------------------------- 1 file changed, 28 insertions(+), 30 deletions(-) diff --git a/train.py b/train.py index b74cdb28be66..1041ec30c257 100644 --- a/train.py +++ b/train.py @@ -62,7 +62,6 @@ def train(hyp, opt, device, tb_writer=None): init_seeds(2 + rank) with open(opt.data) as f: data_dict = yaml.safe_load(f) # data dict - is_coco = opt.data.endswith('coco.yaml') # Logging- Doing this before checking the dataset. Might update data_dict loggers = {'wandb': None} # loggers dict @@ -78,6 +77,7 @@ def train(hyp, opt, device, tb_writer=None): nc = 1 if opt.single_cls else int(data_dict['nc']) # number of classes names = ['item'] if opt.single_cls and len(data_dict['names']) != 1 else data_dict['names'] # class names assert len(names) == nc, '%g names found for nc=%g dataset in %s' % (len(names), nc, opt.data) # check + is_coco = opt.data.endswith('coco.yaml') and nc == 80 # COCO dataset # Model pretrained = weights.endswith('.pt') @@ -358,6 +358,7 @@ def train(hyp, opt, device, tb_writer=None): single_cls=opt.single_cls, dataloader=testloader, save_dir=save_dir, + save_json=is_coco and final_epoch, verbose=nc < 50 and final_epoch, plots=plots and final_epoch, wandb_logger=wandb_logger, @@ -409,41 +410,38 @@ def train(hyp, opt, device, tb_writer=None): # end epoch ---------------------------------------------------------------------------------------------------- # end training if rank in [-1, 0]: - # Plots + logger.info(f'{epoch - start_epoch + 1} epochs completed in {(time.time() - t0) / 3600:.3f} hours.\n') if plots: plot_results(save_dir=save_dir) # save as results.png if wandb_logger.wandb: files = ['results.png', 'confusion_matrix.png', *[f'{x}_curve.png' for x in ('F1', 'PR', 'P', 'R')]] wandb_logger.log({"Results": [wandb_logger.wandb.Image(str(save_dir / f), caption=f) for f in files if (save_dir / f).exists()]}) - # Test best.pt - logger.info('%g epochs completed in %.3f hours.\n' % (epoch - start_epoch + 1, (time.time() - t0) / 3600)) - if opt.data.endswith('coco.yaml') and nc == 80: # if COCO - for m in [last, best] if best.exists() else [last]: # speed, mAP tests - results, _, _ = test.test(opt.data, - batch_size=batch_size * 2, - imgsz=imgsz_test, - conf_thres=0.001, - iou_thres=0.7, - model=attempt_load(m, device).half(), - single_cls=opt.single_cls, - dataloader=testloader, - save_dir=save_dir, - save_json=True, - plots=False, - is_coco=is_coco) - - # Strip optimizers - final = best if best.exists() else last # final model - for f in last, best: - if f.exists(): - strip_optimizer(f) # strip optimizers - if opt.bucket: - os.system(f'gsutil cp {final} gs://{opt.bucket}/weights') # upload - if wandb_logger.wandb and not opt.evolve: # Log the stripped model - wandb_logger.wandb.log_artifact(str(final), type='model', - name='run_' + wandb_logger.wandb_run.id + '_model', - aliases=['latest', 'best', 'stripped']) + + if not opt.evolve: + if is_coco: # COCO dataset + for m in [last, best] if best.exists() else [last]: # speed, mAP tests + results, _, _ = test.test(opt.data, + batch_size=batch_size * 2, + imgsz=imgsz_test, + conf_thres=0.001, + iou_thres=0.7, + model=attempt_load(m, device).half(), + single_cls=opt.single_cls, + dataloader=testloader, + save_dir=save_dir, + save_json=True, + plots=False, + is_coco=is_coco) + + # Strip optimizers + for f in last, best: + if f.exists(): + strip_optimizer(f) # strip optimizers + if wandb_logger.wandb: # Log the stripped model + wandb_logger.wandb.log_artifact(str(best if best.exists() else last), type='model', + name='run_' + wandb_logger.wandb_run.id + '_model', + aliases=['latest', 'best', 'stripped']) wandb_logger.finish_run() else: dist.destroy_process_group() From d833ab3d2529626d4cc4c6ae28ce7858b9ca738f Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 30 May 2021 20:52:42 +0200 Subject: [PATCH 0249/1976] Create `is_pip()` function (#3391) Returns `True` if file is part of pip package. Useful for contextual behavior modification. ```python def is_pip(): # Is file in a pip package? return 'site-packages' in Path(__file__).absolute().parts ``` --- utils/general.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/utils/general.py b/utils/general.py index 006e64859f32..1f805c56af29 100755 --- a/utils/general.py +++ b/utils/general.py @@ -53,12 +53,12 @@ def get_latest_run(search_dir='.'): def is_docker(): - # Is environment a Docker container + # Is environment a Docker container? return Path('/workspace').exists() # or Path('/.dockerenv').exists() def is_colab(): - # Is environment a Google Colab instance + # Is environment a Google Colab instance? try: import google.colab return True @@ -66,6 +66,11 @@ def is_colab(): return False +def is_pip(): + # Is file in a pip package? + return 'site-packages' in Path(__file__).absolute().parts + + def emojis(str=''): # Return platform-dependent emoji-safe version of string return str.encode().decode('ascii', 'ignore') if platform.system() == 'Windows' else str From fdbe527dc02f6f8891a1fd0baa3c5638ed5f53a4 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 31 May 2021 10:39:00 +0200 Subject: [PATCH 0250/1976] Revert "`cv2.imread(img, -1)` for IMREAD_UNCHANGED (#3379)" (#3395) This reverts commit 21a9607e00f1365b21d8c4bd81bdbf5fc0efea24. --- utils/datasets.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/datasets.py b/utils/datasets.py index 331df8ffd047..7dd181400da5 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -181,7 +181,7 @@ def __next__(self): else: # Read image self.count += 1 - img0 = cv2.imread(path, -1) # BGR (-1 is IMREAD_UNCHANGED) + img0 = cv2.imread(path) # BGR assert img0 is not None, 'Image Not Found ' + path print(f'image {self.count}/{self.nf} {path}: ', end='') From 3cb9ad4fc49872cf21ea529277708f1707649cbb Mon Sep 17 00:00:00 2001 From: chocosaj Date: Thu, 3 Jun 2021 18:31:51 +0800 Subject: [PATCH 0251/1976] Update FLOPs description (#3422) * Update README.md * Changing FLOPS to FLOPs. Co-authored-by: BuildTools --- README.md | 4 ++-- models/yolo.py | 6 +++--- requirements.txt | 2 +- tutorial.ipynb | 6 +++--- utils/torch_utils.py | 12 ++++++------ 5 files changed, 15 insertions(+), 15 deletions(-) diff --git a/README.md b/README.md index a638657b313b..1601efdee3b7 100755 --- a/README.md +++ b/README.md @@ -30,7 +30,7 @@ This repository represents Ultralytics open-source research into future object d [assets]: https://github.com/ultralytics/yolov5/releases -Model |size
(pixels) |mAPval
0.5:0.95 |mAPtest
0.5:0.95 |mAPval
0.5 |Speed
V100 (ms) | |params
(M) |FLOPS
640 (B) +Model |size
(pixels) |mAPval
0.5:0.95 |mAPtest
0.5:0.95 |mAPval
0.5 |Speed
V100 (ms) | |params
(M) |
640 (B) --- |--- |--- |--- |--- |--- |---|--- |--- [YOLOv5s][assets] |640 |36.7 |36.7 |55.4 |**2.0** | |7.3 |17.0 [YOLOv5m][assets] |640 |44.5 |44.5 |63.1 |2.7 | |21.4 |51.3 @@ -112,7 +112,7 @@ Namespace(agnostic_nms=False, augment=False, classes=None, conf_thres=0.25, devi YOLOv5 v4.0-96-g83dc1b4 torch 1.7.0+cu101 CUDA:0 (Tesla V100-SXM2-16GB, 16160.5MB) Fusing layers... -Model Summary: 224 layers, 7266973 parameters, 0 gradients, 17.0 GFLOPS +Model Summary: 224 layers, 7266973 parameters, 0 gradients, 17.0 GFLOPs image 1/2 /content/yolov5/data/images/bus.jpg: 640x480 4 persons, 1 bus, Done. (0.010s) image 2/2 /content/yolov5/data/images/zidane.jpg: 384x640 2 persons, 1 tie, Done. (0.011s) Results saved to runs/detect/exp2 diff --git a/models/yolo.py b/models/yolo.py index 2844cd0410e0..1a7be913023c 100644 --- a/models/yolo.py +++ b/models/yolo.py @@ -21,7 +21,7 @@ select_device, copy_attr try: - import thop # for FLOPS computation + import thop # for FLOPs computation except ImportError: thop = None @@ -140,13 +140,13 @@ def forward_once(self, x, profile=False): x = y[m.f] if isinstance(m.f, int) else [x if j == -1 else y[j] for j in m.f] # from earlier layers if profile: - o = thop.profile(m, inputs=(x,), verbose=False)[0] / 1E9 * 2 if thop else 0 # FLOPS + o = thop.profile(m, inputs=(x,), verbose=False)[0] / 1E9 * 2 if thop else 0 # FLOPs t = time_synchronized() for _ in range(10): _ = m(x) dt.append((time_synchronized() - t) * 100) if m == self.model[0]: - logger.info(f"{'time (ms)':>10s} {'GFLOPS':>10s} {'params':>10s} {'module'}") + logger.info(f"{'time (ms)':>10s} {'GFLOPs':>10s} {'params':>10s} {'module'}") logger.info(f'{dt[-1]:10.2f} {o:10.2f} {m.np:10.0f} {m.type}') x = m(x) # run diff --git a/requirements.txt b/requirements.txt index 1c07c651150e..a20fb6ad0ea5 100755 --- a/requirements.txt +++ b/requirements.txt @@ -27,4 +27,4 @@ pandas # extras -------------------------------------- # Cython # for pycocotools https://github.com/cocodataset/cocoapi/issues/172 pycocotools>=2.0 # COCO mAP -thop # FLOPS computation +thop # FLOPs computation diff --git a/tutorial.ipynb b/tutorial.ipynb index 1bc9a8cda032..97b128182d85 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -611,7 +611,7 @@ "YOLOv5 🚀 v5.0-1-g0f395b3 torch 1.8.1+cu101 CUDA:0 (Tesla V100-SXM2-16GB, 16160.5MB)\n", "\n", "Fusing layers... \n", - "Model Summary: 224 layers, 7266973 parameters, 0 gradients, 17.0 GFLOPS\n", + "Model Summary: 224 layers, 7266973 parameters, 0 gradients, 17.0 GFLOPs\n", "image 1/2 /content/yolov5/data/images/bus.jpg: 640x480 4 persons, 1 bus, Done. (0.008s)\n", "image 2/2 /content/yolov5/data/images/zidane.jpg: 384x640 2 persons, 2 ties, Done. (0.008s)\n", "Results saved to runs/detect/exp\n", @@ -734,7 +734,7 @@ "100% 168M/168M [00:05<00:00, 32.3MB/s]\n", "\n", "Fusing layers... \n", - "Model Summary: 476 layers, 87730285 parameters, 0 gradients, 218.8 GFLOPS\n", + "Model Summary: 476 layers, 87730285 parameters, 0 gradients, 218.8 GFLOPs\n", "\u001b[34m\u001b[1mval: \u001b[0mScanning '../coco/val2017' images and labels... 4952 found, 48 missing, 0 empty, 0 corrupted: 100% 5000/5000 [00:01<00:00, 3102.29it/s]\n", "\u001b[34m\u001b[1mval: \u001b[0mNew cache created: ../coco/val2017.cache\n", " Class Images Labels P R mAP@.5 mAP@.5:.95: 100% 157/157 [01:23<00:00, 1.87it/s]\n", @@ -964,7 +964,7 @@ " 22 [-1, 10] 1 0 models.common.Concat [1] \n", " 23 -1 1 1182720 models.common.C3 [512, 512, 1, False] \n", " 24 [17, 20, 23] 1 229245 models.yolo.Detect [80, [[10, 13, 16, 30, 33, 23], [30, 61, 62, 45, 59, 119], [116, 90, 156, 198, 373, 326]], [128, 256, 512]]\n", - "Model Summary: 283 layers, 7276605 parameters, 7276605 gradients, 17.1 GFLOPS\n", + "Model Summary: 283 layers, 7276605 parameters, 7276605 gradients, 17.1 GFLOPs\n", "\n", "Transferred 362/362 items from yolov5s.pt\n", "Scaled weight_decay = 0.0005\n", diff --git a/utils/torch_utils.py b/utils/torch_utils.py index aa54c3cf561e..6a7d07634813 100644 --- a/utils/torch_utils.py +++ b/utils/torch_utils.py @@ -18,7 +18,7 @@ import torchvision try: - import thop # for FLOPS computation + import thop # for FLOPs computation except ImportError: thop = None logger = logging.getLogger(__name__) @@ -105,13 +105,13 @@ def profile(x, ops, n=100, device=None): x = x.to(device) x.requires_grad = True print(torch.__version__, device.type, torch.cuda.get_device_properties(0) if device.type == 'cuda' else '') - print(f"\n{'Params':>12s}{'GFLOPS':>12s}{'forward (ms)':>16s}{'backward (ms)':>16s}{'input':>24s}{'output':>24s}") + print(f"\n{'Params':>12s}{'GFLOPs':>12s}{'forward (ms)':>16s}{'backward (ms)':>16s}{'input':>24s}{'output':>24s}") for m in ops if isinstance(ops, list) else [ops]: m = m.to(device) if hasattr(m, 'to') else m # device m = m.half() if hasattr(m, 'half') and isinstance(x, torch.Tensor) and x.dtype is torch.float16 else m # type dtf, dtb, t = 0., 0., [0., 0., 0.] # dt forward, backward try: - flops = thop.profile(m, inputs=(x,), verbose=False)[0] / 1E9 * 2 # GFLOPS + flops = thop.profile(m, inputs=(x,), verbose=False)[0] / 1E9 * 2 # GFLOPs except: flops = 0 @@ -219,13 +219,13 @@ def model_info(model, verbose=False, img_size=640): print('%5g %40s %9s %12g %20s %10.3g %10.3g' % (i, name, p.requires_grad, p.numel(), list(p.shape), p.mean(), p.std())) - try: # FLOPS + try: # FLOPs from thop import profile stride = max(int(model.stride.max()), 32) if hasattr(model, 'stride') else 32 img = torch.zeros((1, model.yaml.get('ch', 3), stride, stride), device=next(model.parameters()).device) # input - flops = profile(deepcopy(model), inputs=(img,), verbose=False)[0] / 1E9 * 2 # stride GFLOPS + flops = profile(deepcopy(model), inputs=(img,), verbose=False)[0] / 1E9 * 2 # stride GFLOPs img_size = img_size if isinstance(img_size, list) else [img_size, img_size] # expand if int/float - fs = ', %.1f GFLOPS' % (flops * img_size[0] / stride * img_size[1] / stride) # 640x640 GFLOPS + fs = ', %.1f GFLOPs' % (flops * img_size[0] / stride * img_size[1] / stride) # 640x640 GFLOPs except (ImportError, Exception): fs = '' From f8651c388fa7af3d32a4f7968da6afd4ebb0e533 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 3 Jun 2021 18:44:58 +0200 Subject: [PATCH 0252/1976] Parse URL authentication (#3424) * Parse URL authentication * urllib.parse.unquote() * improved error handling * improved error handling * remove %3F * update check_file() --- utils/general.py | 4 +++- utils/google_utils.py | 17 ++++++++++------- 2 files changed, 13 insertions(+), 8 deletions(-) diff --git a/utils/general.py b/utils/general.py index 1f805c56af29..546fccd84066 100755 --- a/utils/general.py +++ b/utils/general.py @@ -9,6 +9,7 @@ import re import subprocess import time +import urllib from itertools import repeat from multiprocessing.pool import ThreadPool from pathlib import Path @@ -183,7 +184,8 @@ def check_file(file): if Path(file).is_file() or file == '': # exists return file elif file.startswith(('http://', 'https://')): # download - url, file = file, Path(file).name + url, file = file, Path(urllib.parse.unquote(str(file))).name # url, file (decode '%2F' to '/' etc.) + file = file.split('?')[0] # parse authentication https://url.com/file.txt?auth... print(f'Downloading {url} to {file}...') torch.hub.download_url_to_file(url, file) assert Path(file).exists() and Path(file).stat().st_size > 0, f'File download failed: {url}' # check diff --git a/utils/google_utils.py b/utils/google_utils.py index ac5c54dba97f..aefc7de2db2e 100644 --- a/utils/google_utils.py +++ b/utils/google_utils.py @@ -4,6 +4,7 @@ import platform import subprocess import time +import urllib from pathlib import Path import requests @@ -19,30 +20,32 @@ def gsutil_getsize(url=''): def safe_download(file, url, url2=None, min_bytes=1E0, error_msg=''): # Attempts to download file from url or url2, checks and removes incomplete downloads < min_bytes file = Path(file) - try: # GitHub + assert_msg = f"Downloaded file '{file}' does not exist or size is < min_bytes={min_bytes}" + try: # url1 print(f'Downloading {url} to {file}...') torch.hub.download_url_to_file(url, str(file)) - assert file.exists() and file.stat().st_size > min_bytes # check - except Exception as e: # GCP + assert file.exists() and file.stat().st_size > min_bytes, assert_msg # check + except Exception as e: # url2 file.unlink(missing_ok=True) # remove partial downloads - print(f'Download error: {e}\nRe-attempting {url2 or url} to {file}...') + print(f'ERROR: {e}\nRe-attempting {url2 or url} to {file}...') os.system(f"curl -L '{url2 or url}' -o '{file}' --retry 3 -C -") # curl download, retry and resume on fail finally: if not file.exists() or file.stat().st_size < min_bytes: # check file.unlink(missing_ok=True) # remove partial downloads - print(f'ERROR: Download failure: {error_msg or url}') + print(f"ERROR: {assert_msg}\n{error_msg}") print('') -def attempt_download(file, repo='ultralytics/yolov5'): +def attempt_download(file, repo='ultralytics/yolov5'): # from utils.google_utils import *; attempt_download() # Attempt file download if does not exist file = Path(str(file).strip().replace("'", '')) if not file.exists(): # URL specified - name = file.name + name = Path(urllib.parse.unquote(str(file))).name # decode '%2F' to '/' etc. if str(file).startswith(('http:/', 'https:/')): # download url = str(file).replace(':/', '://') # Pathlib turns :// -> :/ + name = name.split('?')[0] # parse authentication https://url.com/file.txt?auth... safe_download(file=name, url=url, min_bytes=1E5) return name From af2bc3a1c3414ce75e49f884f828be96be556e97 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 4 Jun 2021 11:46:40 +0200 Subject: [PATCH 0253/1976] Add FLOPs title to table (#3453) --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 1601efdee3b7..cefb82b0e9fd 100755 --- a/README.md +++ b/README.md @@ -30,7 +30,7 @@ This repository represents Ultralytics open-source research into future object d [assets]: https://github.com/ultralytics/yolov5/releases -Model |size
(pixels) |mAPval
0.5:0.95 |mAPtest
0.5:0.95 |mAPval
0.5 |Speed
V100 (ms) | |params
(M) |
640 (B) +Model |size
(pixels) |mAPval
0.5:0.95 |mAPtest
0.5:0.95 |mAPval
0.5 |Speed
V100 (ms) | |params
(M) |FLOPs
640 (B) --- |--- |--- |--- |--- |--- |---|--- |--- [YOLOv5s][assets] |640 |36.7 |36.7 |55.4 |**2.0** | |7.3 |17.0 [YOLOv5m][assets] |640 |44.5 |44.5 |63.1 |2.7 | |21.4 |51.3 From 4aa2959101dc42559104d3c5f5bf734b5c7fd40e Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 4 Jun 2021 12:37:41 +0200 Subject: [PATCH 0254/1976] Suppress jit trace warning + graph once (#3454) * Suppress jit trace warning + graph once Suppress harmless jit trace warning on TensorBoard add_graph call. Also fix multiple add_graph() calls bug, now only on batch 0. * Update train.py --- train.py | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/train.py b/train.py index 1041ec30c257..093a6197ff06 100644 --- a/train.py +++ b/train.py @@ -4,6 +4,7 @@ import os import random import time +import warnings from copy import deepcopy from pathlib import Path from threading import Thread @@ -323,18 +324,19 @@ def train(hyp, opt, device, tb_writer=None): mloss = (mloss * i + loss_items) / (i + 1) # update mean losses mem = '%.3gG' % (torch.cuda.memory_reserved() / 1E9 if torch.cuda.is_available() else 0) # (GB) s = ('%10s' * 2 + '%10.4g' * 6) % ( - '%g/%g' % (epoch, epochs - 1), mem, *mloss, targets.shape[0], imgs.shape[-1]) + f'{epoch}/{epochs - 1}', mem, *mloss, targets.shape[0], imgs.shape[-1]) pbar.set_description(s) # Plot if plots and ni < 3: f = save_dir / f'train_batch{ni}.jpg' # filename Thread(target=plot_images, args=(imgs, targets, paths, f), daemon=True).start() - if tb_writer: - tb_writer.add_graph(torch.jit.trace(de_parallel(model), imgs, strict=False), []) # model graph - # tb_writer.add_image(f, result, dataformats='HWC', global_step=epoch) + if tb_writer and ni == 0: + with warnings.catch_warnings(): + warnings.simplefilter('ignore') # suppress jit trace warning + tb_writer.add_graph(torch.jit.trace(de_parallel(model), imgs, strict=False), []) # graph elif plots and ni == 10 and wandb_logger.wandb: - wandb_logger.log({"Mosaics": [wandb_logger.wandb.Image(str(x), caption=x.name) for x in + wandb_logger.log({'Mosaics': [wandb_logger.wandb.Image(str(x), caption=x.name) for x in save_dir.glob('train*.jpg') if x.exists()]}) # end batch ------------------------------------------------------------------------------------------------ From 8e3b4a0bf3be599ef7316059130547a1837a7030 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 4 Jun 2021 12:47:53 +0200 Subject: [PATCH 0255/1976] Update MixUp augmentation `alpha=beta=32.0` (#3455) Per VOC empirical results https://github.com/ultralytics/yolov5/issues/3380#issuecomment-853001307 by @developer0hye --- utils/datasets.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/datasets.py b/utils/datasets.py index 7dd181400da5..350fa53cc443 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -535,7 +535,7 @@ def __getitem__(self, index): # MixUp https://arxiv.org/pdf/1710.09412.pdf if random.random() < hyp['mixup']: img2, labels2 = load_mosaic(self, random.randint(0, self.n - 1)) - r = np.random.beta(8.0, 8.0) # mixup ratio, alpha=beta=8.0 + r = np.random.beta(32.0, 32.0) # mixup ratio, alpha=beta=32.0 img = (img * r + img2 * (1 - r)).astype(np.uint8) labels = np.concatenate((labels, labels2), 0) From d40481acc5f73a06fa5ced5fd2cfa8fce73a744d Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 4 Jun 2021 15:23:33 +0200 Subject: [PATCH 0256/1976] Add `timeout()` class (#3460) * Add `timeout()` class * rearrange order --- utils/general.py | 24 +++++++++++++++++++++++- 1 file changed, 23 insertions(+), 1 deletion(-) diff --git a/utils/general.py b/utils/general.py index 546fccd84066..591fc8474339 100755 --- a/utils/general.py +++ b/utils/general.py @@ -1,5 +1,6 @@ # YOLOv5 general utils +import contextlib import glob import logging import math @@ -7,6 +8,7 @@ import platform import random import re +import signal import subprocess import time import urllib @@ -34,6 +36,26 @@ os.environ['NUMEXPR_MAX_THREADS'] = str(min(os.cpu_count(), 8)) # NumExpr max threads +class timeout(contextlib.ContextDecorator): + # Usage: @timeout(seconds) decorator or 'with timeout(seconds):' context manager + def __init__(self, seconds, *, timeout_message="", suppress_timeout_errors=True): + self.seconds = int(seconds) + self.timeout_message = timeout_message + self.suppress = bool(suppress_timeout_errors) + + def _timeout_handler(self, signum, frame): + raise TimeoutError(self.timeout_message) + + def __enter__(self): + signal.signal(signal.SIGALRM, self._timeout_handler) # Set handler for SIGALRM + signal.alarm(self.seconds) # start countdown for SIGALRM to be raised + + def __exit__(self, exc_type, exc_val, exc_tb): + signal.alarm(0) # Cancel SIGALRM if it's scheduled + if self.suppress and exc_type is TimeoutError: # Suppress TimeoutError + return True + + def set_logging(rank=-1, verbose=True): logging.basicConfig( format="%(message)s", @@ -86,7 +108,7 @@ def check_online(): # Check internet connectivity import socket try: - socket.create_connection(("1.1.1.1", 443), 5) # check host accesability + socket.create_connection(("1.1.1.1", 443), 5) # check host accessibility return True except OSError: return False From c37f072ba73a7b0286b041936a1ebf3d86beafa2 Mon Sep 17 00:00:00 2001 From: Yonghye Kwon Date: Sat, 5 Jun 2021 03:02:20 +0900 Subject: [PATCH 0257/1976] Faster HSV augmentation (#3462) remove datatype conversion process that can be skipped --- utils/datasets.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/utils/datasets.py b/utils/datasets.py index 350fa53cc443..b6e43b94cfe9 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -655,12 +655,12 @@ def augment_hsv(img, hgain=0.5, sgain=0.5, vgain=0.5): hue, sat, val = cv2.split(cv2.cvtColor(img, cv2.COLOR_BGR2HSV)) dtype = img.dtype # uint8 - x = np.arange(0, 256, dtype=np.int16) + x = np.arange(0, 256, dtype=r.dtype) lut_hue = ((x * r[0]) % 180).astype(dtype) lut_sat = np.clip(x * r[1], 0, 255).astype(dtype) lut_val = np.clip(x * r[2], 0, 255).astype(dtype) - img_hsv = cv2.merge((cv2.LUT(hue, lut_hue), cv2.LUT(sat, lut_sat), cv2.LUT(val, lut_val))).astype(dtype) + img_hsv = cv2.merge((cv2.LUT(hue, lut_hue), cv2.LUT(sat, lut_sat), cv2.LUT(val, lut_val))) cv2.cvtColor(img_hsv, cv2.COLOR_HSV2BGR, dst=img) # no return needed From 563ea9475a580b959bcddbb280261c41d80fd798 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 4 Jun 2021 21:17:36 +0200 Subject: [PATCH 0258/1976] Add `check_git_status()` 5 second timeout (#3464) * Add check_git_status() 5 second timeout This should prevent the SSH Git bug that we were discussing @KalenMike * cleanup * replace timeout with check_output built-in timeout --- utils/general.py | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/utils/general.py b/utils/general.py index 591fc8474339..d9ee432dcae3 100755 --- a/utils/general.py +++ b/utils/general.py @@ -9,12 +9,12 @@ import random import re import signal -import subprocess import time import urllib from itertools import repeat from multiprocessing.pool import ThreadPool from pathlib import Path +from subprocess import check_output import cv2 import numpy as np @@ -38,9 +38,9 @@ class timeout(contextlib.ContextDecorator): # Usage: @timeout(seconds) decorator or 'with timeout(seconds):' context manager - def __init__(self, seconds, *, timeout_message="", suppress_timeout_errors=True): + def __init__(self, seconds, *, timeout_msg='', suppress_timeout_errors=True): self.seconds = int(seconds) - self.timeout_message = timeout_message + self.timeout_message = timeout_msg self.suppress = bool(suppress_timeout_errors) def _timeout_handler(self, signum, frame): @@ -114,7 +114,7 @@ def check_online(): return False -def check_git_status(): +def check_git_status(err_msg=', for updates see https://github.com/ultralytics/yolov5'): # Recommend 'git pull' if code is out of date print(colorstr('github: '), end='') try: @@ -123,9 +123,9 @@ def check_git_status(): assert check_online(), 'skipping check (offline)' cmd = 'git fetch && git config --get remote.origin.url' - url = subprocess.check_output(cmd, shell=True).decode().strip().rstrip('.git') # github repo url - branch = subprocess.check_output('git rev-parse --abbrev-ref HEAD', shell=True).decode().strip() # checked out - n = int(subprocess.check_output(f'git rev-list {branch}..origin/master --count', shell=True)) # commits behind + url = check_output(cmd, shell=True, timeout=5).decode().strip().rstrip('.git') # git fetch + branch = check_output('git rev-parse --abbrev-ref HEAD', shell=True).decode().strip() # checked out + n = int(check_output(f'git rev-list {branch}..origin/master --count', shell=True)) # commits behind if n > 0: s = f"⚠️ WARNING: code is out of date by {n} commit{'s' * (n > 1)}. " \ f"Use 'git pull' to update or 'git clone {url}' to download latest." @@ -133,7 +133,7 @@ def check_git_status(): s = f'up to date with {url} ✅' print(emojis(s)) # emoji-safe except Exception as e: - print(e) + print(f'{e}{err_msg}') def check_python(minimum='3.7.0', required=True): @@ -166,7 +166,7 @@ def check_requirements(requirements='requirements.txt', exclude=()): n += 1 print(f"{prefix} {r} not found and is required by YOLOv5, attempting auto-update...") try: - print(subprocess.check_output(f"pip install '{r}'", shell=True).decode()) + print(check_output(f"pip install '{r}'", shell=True).decode()) except Exception as e: print(f'{prefix} {e}') From 317f2ccc9d4a16661cc102660cab54084421b516 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 4 Jun 2021 21:34:36 +0200 Subject: [PATCH 0259/1976] Improved `check_requirements()` offline-handling (#3466) Improve robustness of `check_requirements()` function to offline environments (do not attempt pip installs when offline). --- utils/general.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/utils/general.py b/utils/general.py index d9ee432dcae3..a12b0aafba0e 100755 --- a/utils/general.py +++ b/utils/general.py @@ -163,10 +163,11 @@ def check_requirements(requirements='requirements.txt', exclude=()): try: pkg.require(r) except Exception as e: # DistributionNotFound or VersionConflict if requirements not met - n += 1 print(f"{prefix} {r} not found and is required by YOLOv5, attempting auto-update...") try: + assert check_online(), f"'pip install {r}' skipped (offline)" print(check_output(f"pip install '{r}'", shell=True).decode()) + n += 1 except Exception as e: print(f'{prefix} {e}') From 044daafd9da4a14331a3885711695592a0317b39 Mon Sep 17 00:00:00 2001 From: Sam_S Date: Sat, 5 Jun 2021 00:01:58 +0400 Subject: [PATCH 0260/1976] Add `output_names` argument for ONNX export with dynamic axes (#3456) * Add output names & dynamic axes for onnx export Add output_names and dynamic_axes names for all outputs in torch.onnx.export. The first four outputs of the model will have names output0, output1, output2, output3 * use first output only + cleanup Co-authored-by: Samridha Shrestha Co-authored-by: Glenn Jocher --- models/export.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/models/export.py b/models/export.py index 0d1147938e37..2db6a7699953 100644 --- a/models/export.py +++ b/models/export.py @@ -96,11 +96,14 @@ print(f'{prefix} starting export with onnx {onnx.__version__}...') f = opt.weights.replace('.pt', '.onnx') # filename - torch.onnx.export(model, img, f, verbose=False, opset_version=opt.opset_version, input_names=['images'], + torch.onnx.export(model, img, f, verbose=False, opset_version=opt.opset_version, training=torch.onnx.TrainingMode.TRAINING if opt.train else torch.onnx.TrainingMode.EVAL, do_constant_folding=not opt.train, - dynamic_axes={'images': {0: 'batch', 2: 'height', 3: 'width'}, # size(1,3,640,640) - 'output': {0: 'batch', 2: 'y', 3: 'x'}} if opt.dynamic else None) + input_names=['images'], + output_names=['output'], + dynamic_axes={'images': {0: 'batch', 2: 'height', 3: 'width'}, # shape(1,3,640,640) + 'output': {0: 'batch', 1: 'anchors'} # shape(1,25200,85) + } if opt.dynamic else None) # Checks model_onnx = onnx.load(f) # load onnx model From b31229ae897f6a93438882b6d2f45607a86c9640 Mon Sep 17 00:00:00 2001 From: edificewang <609552430@qq.com> Date: Sat, 5 Jun 2021 04:28:34 +0800 Subject: [PATCH 0261/1976] Revert FP16 `test.py` and `detect.py` inference to FP32 default (#3423) * fixed inference bug ,while use half precision * replace --use-half with --half * replace space and PEP8 in detect.py * PEP8 detect.py * update --half help comment * Update test.py * revert space Co-authored-by: Glenn Jocher --- detect.py | 3 ++- test.py | 2 ++ 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/detect.py b/detect.py index c6b76d981541..aba87687e666 100644 --- a/detect.py +++ b/detect.py @@ -28,7 +28,7 @@ def detect(opt): # Initialize set_logging() device = select_device(opt.device) - half = device.type != 'cpu' # half precision only supported on CUDA + half = opt.half and device.type != 'cpu' # half precision only supported on CUDA # Load model model = attempt_load(weights, map_location=device) # load FP32 model @@ -172,6 +172,7 @@ def detect(opt): parser.add_argument('--line-thickness', default=3, type=int, help='bounding box thickness (pixels)') parser.add_argument('--hide-labels', default=False, action='store_true', help='hide labels') parser.add_argument('--hide-conf', default=False, action='store_true', help='hide confidences') + parser.add_argument('--half', type=bool, default=False, help='use FP16 half-precision inference') opt = parser.parse_args() print(opt) check_requirements(exclude=('tensorboard', 'pycocotools', 'thop')) diff --git a/test.py b/test.py index 0716c5d8b93c..113316ff0b8b 100644 --- a/test.py +++ b/test.py @@ -306,6 +306,7 @@ def test(data, parser.add_argument('--project', default='runs/test', help='save to project/name') parser.add_argument('--name', default='exp', help='save to project/name') parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment') + parser.add_argument('--half', type=bool, default=False, help='use FP16 half-precision inference') opt = parser.parse_args() opt.save_json |= opt.data.endswith('coco.yaml') opt.data = check_file(opt.data) # check file @@ -326,6 +327,7 @@ def test(data, save_txt=opt.save_txt | opt.save_hybrid, save_hybrid=opt.save_hybrid, save_conf=opt.save_conf, + half_precision=opt.half, opt=opt ) From 739451da5a5d28e03f745175361f310bafd99707 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 4 Jun 2021 23:45:24 +0200 Subject: [PATCH 0262/1976] Add additional links/resources to stale.yml message (#3467) * Update stale.yml * cleanup * Update stale.yml * reformat --- .github/workflows/stale.yml | 22 ++++++++++++++++++++-- 1 file changed, 20 insertions(+), 2 deletions(-) diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml index 0a094e237b34..2332cf5d53db 100644 --- a/.github/workflows/stale.yml +++ b/.github/workflows/stale.yml @@ -10,8 +10,26 @@ jobs: - uses: actions/stale@v3 with: repo-token: ${{ secrets.GITHUB_TOKEN }} - stale-issue-message: 'This issue has been automatically marked as stale because it has not had recent activity. It will be closed if no further activity occurs. Thank you for your contributions.' - stale-pr-message: 'This issue has been automatically marked as stale because it has not had recent activity. It will be closed if no further activity occurs. Thank you for your contributions.' + stale-issue-message: | + 👋 Hello @${{ github.actor }}, this issue has been automatically marked as stale because it has not had recent activity. Please note it will be closed if no further activity occurs. + + Access additional [YOLOv5](https://ultralytics.com/yolov5) 🚀 resources: + - **Wiki** – https://github.com/ultralytics/yolov5/wiki + - **Tutorials** – https://github.com/ultralytics/yolov5#tutorials + - **Docs** – https://docs.ultralytics.com + + Access additional [Ultralytics](https://ultralytics.com) ⚡ resources: + - **Ultralytics HUB** – https://hub.ultralytics.com + - **Vision API** – https://ultralytics.com/yolov5 + - **About Us** – https://ultralytics.com/about + - **Join Our Team** – https://ultralytics.com/work + - **Contact Us** – https://ultralytics.com/contact + + Feel free to inform us of any other **issues** you discover or **feature requests** that come to mind in the future. Pull Requests (PRs) are also always welcomed! + + Thank you for your contributions to YOLOv5 🚀 and Vision AI ⭐! + + stale-pr-message: 'This issue has been automatically marked as stale because it has not had recent activity. It will be closed if no further activity occurs. Thank you for your contributions YOLOv5 🚀 and Vision AI ⭐.' days-before-stale: 30 days-before-close: 5 exempt-issue-labels: 'documentation,tutorial' From 3597d280eeee5cd2049999f7b1a5640cf0e1c89a Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 4 Jun 2021 23:49:43 +0200 Subject: [PATCH 0263/1976] Update stale.yml HUB URL (#3468) --- .github/workflows/stale.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml index 2332cf5d53db..ec24517fd659 100644 --- a/.github/workflows/stale.yml +++ b/.github/workflows/stale.yml @@ -19,7 +19,7 @@ jobs: - **Docs** – https://docs.ultralytics.com Access additional [Ultralytics](https://ultralytics.com) ⚡ resources: - - **Ultralytics HUB** – https://hub.ultralytics.com + - **Ultralytics HUB** – https://ultralytics.com/pricing - **Vision API** – https://ultralytics.com/yolov5 - **About Us** – https://ultralytics.com/about - **Join Our Team** – https://ultralytics.com/work From cf4f95bc5f5ee2027e5819e5ec7c3f9ae822d433 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 6 Jun 2021 18:06:24 +0200 Subject: [PATCH 0264/1976] Stale `github.actor` bug fix (#3483) --- .github/workflows/stale.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml index ec24517fd659..a81e4007cffb 100644 --- a/.github/workflows/stale.yml +++ b/.github/workflows/stale.yml @@ -11,7 +11,7 @@ jobs: with: repo-token: ${{ secrets.GITHUB_TOKEN }} stale-issue-message: | - 👋 Hello @${{ github.actor }}, this issue has been automatically marked as stale because it has not had recent activity. Please note it will be closed if no further activity occurs. + 👋 Hello, this issue has been automatically marked as stale because it has not had recent activity. Please note it will be closed if no further activity occurs. Access additional [YOLOv5](https://ultralytics.com/yolov5) 🚀 resources: - **Wiki** – https://github.com/ultralytics/yolov5/wiki From a1c3572bc9e0db60f9978dcf047435a703f58a93 Mon Sep 17 00:00:00 2001 From: Yonghye Kwon Date: Mon, 7 Jun 2021 01:39:36 +0900 Subject: [PATCH 0265/1976] Explicit `model.eval()` call `if opt.train=False` (#3475) * call model.eval() when opt.train is False call model.eval() when opt.train is False * single-line if statement * cleanup Co-authored-by: Glenn Jocher --- models/export.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/models/export.py b/models/export.py index 2db6a7699953..6e3e1207f659 100644 --- a/models/export.py +++ b/models/export.py @@ -58,8 +58,7 @@ # Update model if opt.half: img, model = img.half(), model.half() # to FP16 - if opt.train: - model.train() # training mode (no grid construction in Detect layer) + model.train() if opt.train else model.eval() # training mode = no Detect() layer grid construction for k, m in model.named_modules(): m._non_persistent_buffers_set = set() # pytorch 1.6.0 compatibility if isinstance(m, models.common.Conv): # assign export-friendly activations From 90b7895d652c3bd3d361b2d6e9aee900fd67f5f7 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 7 Jun 2021 10:03:47 +0200 Subject: [PATCH 0266/1976] check_requirements() exclude `opencv-python` (#3495) Fix for 3rd party or contrib versions of installed OpenCV as in https://github.com/ultralytics/yolov5/issues/3494. --- hubconf.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/hubconf.py b/hubconf.py index 40bbb1ed0826..bedbee18f87f 100644 --- a/hubconf.py +++ b/hubconf.py @@ -30,7 +30,8 @@ def _create(name, pretrained=True, channels=3, classes=80, autoshape=True, verbo from utils.google_utils import attempt_download from utils.torch_utils import select_device - check_requirements(Path(__file__).parent / 'requirements.txt', exclude=('tensorboard', 'pycocotools', 'thop')) + check_requirements(requirements=Path(__file__).parent / 'requirements.txt', + exclude=('tensorboard', 'pycocotools', 'thop', 'opencv-python')) set_logging(verbose=verbose) fname = Path(name).with_suffix('.pt') # checkpoint filename From 3f03acb3dba3b4b3a4674fa9bdd6e73fbcbfae6c Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 7 Jun 2021 15:38:51 +0200 Subject: [PATCH 0267/1976] check_requirements() exclude `opencv-python` (#3507) Duplicate of #3495 merged to `develop`. This PR will be merged to `master`. Fixes https://github.com/ultralytics/yolov5/issues/3494. --- hubconf.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/hubconf.py b/hubconf.py index f74e70c85a65..a52aae9fd1b7 100644 --- a/hubconf.py +++ b/hubconf.py @@ -30,7 +30,8 @@ def _create(name, pretrained=True, channels=3, classes=80, autoshape=True, verbo from utils.google_utils import attempt_download from utils.torch_utils import select_device - check_requirements(Path(__file__).parent / 'requirements.txt', exclude=('tensorboard', 'pycocotools', 'thop')) + check_requirements(requirements=Path(__file__).parent / 'requirements.txt', + exclude=('tensorboard', 'pycocotools', 'thop', 'opencv-python')) set_logging(verbose=verbose) fname = Path(name).with_suffix('.pt') # checkpoint filename From 8d1ddc93c717c0708f9478636b7647a774e07521 Mon Sep 17 00:00:00 2001 From: Yonghye Kwon Date: Tue, 8 Jun 2021 01:56:41 +0900 Subject: [PATCH 0268/1976] Earlier `assert` for cpu and half option (#3508) * early assert for cpu and half option early assert for cpu and half option * Modified comment Modified comment --- models/export.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/models/export.py b/models/export.py index 6e3e1207f659..c03770178829 100644 --- a/models/export.py +++ b/models/export.py @@ -44,15 +44,13 @@ # Load PyTorch model device = select_device(opt.device) + assert not (opt.device.lower() == 'cpu' and opt.half), '--half only compatible with GPU export, i.e. use --device 0' model = attempt_load(opt.weights, map_location=device) # load FP32 model labels = model.names - # Checks + # Input gs = int(max(model.stride)) # grid size (max stride) opt.img_size = [check_img_size(x, gs) for x in opt.img_size] # verify img_size are gs-multiples - assert not (opt.device.lower() == 'cpu' and opt.half), '--half only compatible with GPU export, i.e. use --device 0' - - # Input img = torch.zeros(opt.batch_size, 3, *opt.img_size).to(device) # image size(1,3,320,192) iDetection # Update model From eede7dc48c8abd1b1ba7cae657e556a505e80549 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 7 Jun 2021 22:52:16 +0200 Subject: [PATCH 0269/1976] Update tutorial.ipynb (#3510) --- tutorial.ipynb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tutorial.ipynb b/tutorial.ipynb index 97b128182d85..4e760b13bb41 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -530,7 +530,7 @@ "\n", "\n", "This is the **official YOLOv5 🚀 notebook** authored by **Ultralytics**, and is freely available for redistribution under the [GPL-3.0 license](https://choosealicense.com/licenses/gpl-3.0/). \n", - "For more information please visit https://github.com/ultralytics/yolov5 and https://www.ultralytics.com. Thank you!" + "For more information please visit https://github.com/ultralytics/yolov5 and https://ultralytics.com. Thank you!" ] }, { From d986145b9a57b3c055e8cdea6b40cb979ebfe2e7 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 7 Jun 2021 23:21:34 +0200 Subject: [PATCH 0270/1976] Reduce test.py results spacing (#3511) --- test.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/test.py b/test.py index 113316ff0b8b..12141f71c2c1 100644 --- a/test.py +++ b/test.py @@ -95,7 +95,7 @@ def test(data, confusion_matrix = ConfusionMatrix(nc=nc) names = {k: v for k, v in enumerate(model.names if hasattr(model, 'names') else model.module.names)} coco91class = coco80_to_coco91_class() - s = ('%20s' + '%12s' * 6) % ('Class', 'Images', 'Labels', 'P', 'R', 'mAP@.5', 'mAP@.5:.95') + s = ('%20s' + '%11s' * 6) % ('Class', 'Images', 'Labels', 'P', 'R', 'mAP@.5', 'mAP@.5:.95') p, r, f1, mp, mr, map50, map, t0, t1 = 0., 0., 0., 0., 0., 0., 0., 0., 0. loss = torch.zeros(3, device=device) jdict, stats, ap, ap_class, wandb_images = [], [], [], [], [] @@ -228,7 +228,7 @@ def test(data, nt = torch.zeros(1) # Print results - pf = '%20s' + '%12i' * 2 + '%12.3g' * 4 # print format + pf = '%20s' + '%11i' * 2 + '%11.3g' * 4 # print format print(pf % ('all', seen, nt.sum(), mp, mr, map50, map)) # Print results per class From abb2a96e91340df74b2526d925f2ecba24973dec Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 8 Jun 2021 00:39:02 +0200 Subject: [PATCH 0271/1976] Update README.md (#3512) * Update README.md Minor modifications * 850 width --- README.md | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/README.md b/README.md index cefb82b0e9fd..3a785cc85003 100755 --- a/README.md +++ b/README.md @@ -1,5 +1,5 @@ - +   CI CPU testing @@ -30,19 +30,19 @@ This repository represents Ultralytics open-source research into future object d [assets]: https://github.com/ultralytics/yolov5/releases -Model |size
(pixels) |mAPval
0.5:0.95 |mAPtest
0.5:0.95 |mAPval
0.5 |Speed
V100 (ms) | |params
(M) |FLOPs
640 (B) ---- |--- |--- |--- |--- |--- |---|--- |--- -[YOLOv5s][assets] |640 |36.7 |36.7 |55.4 |**2.0** | |7.3 |17.0 -[YOLOv5m][assets] |640 |44.5 |44.5 |63.1 |2.7 | |21.4 |51.3 -[YOLOv5l][assets] |640 |48.2 |48.2 |66.9 |3.8 | |47.0 |115.4 -[YOLOv5x][assets] |640 |**50.4** |**50.4** |**68.8** |6.1 | |87.7 |218.8 +|Model |size
(pixels) |mAPval
0.5:0.95 |mAPtest
0.5:0.95 |mAPval
0.5 |Speed
V100 (ms) | |params
(M) |FLOPs
640 (B) +|--- |--- |--- |--- |--- |--- |---|--- |--- +|[YOLOv5s][assets] |640 |36.7 |36.7 |55.4 |**2.0** | |7.3 |17.0 +|[YOLOv5m][assets] |640 |44.5 |44.5 |63.1 |2.7 | |21.4 |51.3 +|[YOLOv5l][assets] |640 |48.2 |48.2 |66.9 |3.8 | |47.0 |115.4 +|[YOLOv5x][assets] |640 |**50.4** |**50.4** |**68.8** |6.1 | |87.7 |218.8 | | | | | | || | -[YOLOv5s6][assets] |1280 |43.3 |43.3 |61.9 |**4.3** | |12.7 |17.4 -[YOLOv5m6][assets] |1280 |50.5 |50.5 |68.7 |8.4 | |35.9 |52.4 -[YOLOv5l6][assets] |1280 |53.4 |53.4 |71.1 |12.3 | |77.2 |117.7 -[YOLOv5x6][assets] |1280 |**54.4** |**54.4** |**72.0** |22.4 | |141.8 |222.9 +|[YOLOv5s6][assets] |1280 |43.3 |43.3 |61.9 |**4.3** | |12.7 |17.4 +|[YOLOv5m6][assets] |1280 |50.5 |50.5 |68.7 |8.4 | |35.9 |52.4 +|[YOLOv5l6][assets] |1280 |53.4 |53.4 |71.1 |12.3 | |77.2 |117.7 +|[YOLOv5x6][assets] |1280 |**54.4** |**54.4** |**72.0** |22.4 | |141.8 |222.9 | | | | | | || | -[YOLOv5x6][assets] TTA |1280 |**55.0** |**55.0** |**72.0** |70.8 | |- |- +|[YOLOv5x6][assets] TTA |1280 |**55.0** |**55.0** |**72.0** |70.8 | |- |-
Table Notes (click to expand) From c058a61e3bb0e2ea4e862ee790afe709d86ca3d2 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 8 Jun 2021 10:11:06 +0200 Subject: [PATCH 0272/1976] Update greetings.yml revert greeting change as PRs will now merge to master. --- .github/workflows/greetings.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/greetings.yml b/.github/workflows/greetings.yml index 4e502fe9af7b..ee472297107e 100644 --- a/.github/workflows/greetings.yml +++ b/.github/workflows/greetings.yml @@ -16,7 +16,7 @@ jobs: git remote add upstream https://github.com/ultralytics/yolov5.git git fetch upstream git checkout feature # <----- replace 'feature' with local branch name - git rebase upstream/develop + git rebase upstream/master git push -u origin -f ``` - ✅ Verify all Continuous Integration (CI) **checks are passing**. From f3c3d2ce5d85ba77336a9d0a87c6a446732cdda6 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 8 Jun 2021 10:22:10 +0200 Subject: [PATCH 0273/1976] Merge `develop` branch into `master` (#3518) * update ci-testing.yml (#3322) * update ci-testing.yml * update greetings.yml * bring back os matrix * update ci-testing.yml (#3322) * update ci-testing.yml * update greetings.yml * bring back os matrix * Enable direct `--weights URL` definition (#3373) * Enable direct `--weights URL` definition @KalenMike this PR will enable direct --weights URL definition. Example use case: ``` python train.py --weights https://storage.googleapis.com/bucket/dir/model.pt ``` * cleanup * bug fixes * weights = attempt_download(weights) * Update experimental.py * Update hubconf.py * return bug fix * comment mirror * min_bytes * Update tutorial.ipynb (#3368) add Open in Kaggle badge * `cv2.imread(img, -1)` for IMREAD_UNCHANGED (#3379) * Update datasets.py * comment Co-authored-by: Glenn Jocher * COCO evolution fix (#3388) * COCO evolution fix * cleanup * update print * print fix * Create `is_pip()` function (#3391) Returns `True` if file is part of pip package. Useful for contextual behavior modification. ```python def is_pip(): # Is file in a pip package? return 'site-packages' in Path(__file__).absolute().parts ``` * Revert "`cv2.imread(img, -1)` for IMREAD_UNCHANGED (#3379)" (#3395) This reverts commit 21a9607e00f1365b21d8c4bd81bdbf5fc0efea24. * Update FLOPs description (#3422) * Update README.md * Changing FLOPS to FLOPs. Co-authored-by: BuildTools * Parse URL authentication (#3424) * Parse URL authentication * urllib.parse.unquote() * improved error handling * improved error handling * remove %3F * update check_file() * Add FLOPs title to table (#3453) * Suppress jit trace warning + graph once (#3454) * Suppress jit trace warning + graph once Suppress harmless jit trace warning on TensorBoard add_graph call. Also fix multiple add_graph() calls bug, now only on batch 0. * Update train.py * Update MixUp augmentation `alpha=beta=32.0` (#3455) Per VOC empirical results https://github.com/ultralytics/yolov5/issues/3380#issuecomment-853001307 by @developer0hye * Add `timeout()` class (#3460) * Add `timeout()` class * rearrange order * Faster HSV augmentation (#3462) remove datatype conversion process that can be skipped * Add `check_git_status()` 5 second timeout (#3464) * Add check_git_status() 5 second timeout This should prevent the SSH Git bug that we were discussing @KalenMike * cleanup * replace timeout with check_output built-in timeout * Improved `check_requirements()` offline-handling (#3466) Improve robustness of `check_requirements()` function to offline environments (do not attempt pip installs when offline). * Add `output_names` argument for ONNX export with dynamic axes (#3456) * Add output names & dynamic axes for onnx export Add output_names and dynamic_axes names for all outputs in torch.onnx.export. The first four outputs of the model will have names output0, output1, output2, output3 * use first output only + cleanup Co-authored-by: Samridha Shrestha Co-authored-by: Glenn Jocher * Revert FP16 `test.py` and `detect.py` inference to FP32 default (#3423) * fixed inference bug ,while use half precision * replace --use-half with --half * replace space and PEP8 in detect.py * PEP8 detect.py * update --half help comment * Update test.py * revert space Co-authored-by: Glenn Jocher * Add additional links/resources to stale.yml message (#3467) * Update stale.yml * cleanup * Update stale.yml * reformat * Update stale.yml HUB URL (#3468) * Stale `github.actor` bug fix (#3483) * Explicit `model.eval()` call `if opt.train=False` (#3475) * call model.eval() when opt.train is False call model.eval() when opt.train is False * single-line if statement * cleanup Co-authored-by: Glenn Jocher * check_requirements() exclude `opencv-python` (#3495) Fix for 3rd party or contrib versions of installed OpenCV as in https://github.com/ultralytics/yolov5/issues/3494. * Earlier `assert` for cpu and half option (#3508) * early assert for cpu and half option early assert for cpu and half option * Modified comment Modified comment * Update tutorial.ipynb (#3510) * Reduce test.py results spacing (#3511) * Update README.md (#3512) * Update README.md Minor modifications * 850 width * Update greetings.yml revert greeting change as PRs will now merge to master. Co-authored-by: Piotr Skalski Co-authored-by: SkalskiP Co-authored-by: Peretz Cohen Co-authored-by: tudoulei <34886368+tudoulei@users.noreply.github.com> Co-authored-by: chocosaj Co-authored-by: BuildTools Co-authored-by: Yonghye Kwon Co-authored-by: Sam_S Co-authored-by: Samridha Shrestha Co-authored-by: edificewang <609552430@qq.com> --- .github/workflows/ci-testing.yml | 6 +-- .github/workflows/stale.yml | 22 +++++++++- README.md | 26 ++++++------ detect.py | 3 +- hubconf.py | 3 +- models/experimental.py | 3 +- models/export.py | 18 ++++---- models/yolo.py | 6 +-- requirements.txt | 2 +- test.py | 6 ++- train.py | 72 ++++++++++++++++---------------- tutorial.ipynb | 13 +++--- utils/datasets.py | 6 +-- utils/general.py | 54 ++++++++++++++++++------ utils/google_utils.py | 58 +++++++++++++++---------- utils/torch_utils.py | 12 +++--- 16 files changed, 187 insertions(+), 123 deletions(-) diff --git a/.github/workflows/ci-testing.yml b/.github/workflows/ci-testing.yml index df508474a955..bb8b173cdb31 100644 --- a/.github/workflows/ci-testing.yml +++ b/.github/workflows/ci-testing.yml @@ -2,12 +2,10 @@ name: CI CPU testing on: # https://help.github.com/en/actions/reference/events-that-trigger-workflows push: - branches: [ master ] + branches: [ master, develop ] pull_request: # The branches below must be a subset of the branches above - branches: [ master ] - schedule: - - cron: '0 0 * * *' # Runs at 00:00 UTC every day + branches: [ master, develop ] jobs: cpu-tests: diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml index 0a094e237b34..a81e4007cffb 100644 --- a/.github/workflows/stale.yml +++ b/.github/workflows/stale.yml @@ -10,8 +10,26 @@ jobs: - uses: actions/stale@v3 with: repo-token: ${{ secrets.GITHUB_TOKEN }} - stale-issue-message: 'This issue has been automatically marked as stale because it has not had recent activity. It will be closed if no further activity occurs. Thank you for your contributions.' - stale-pr-message: 'This issue has been automatically marked as stale because it has not had recent activity. It will be closed if no further activity occurs. Thank you for your contributions.' + stale-issue-message: | + 👋 Hello, this issue has been automatically marked as stale because it has not had recent activity. Please note it will be closed if no further activity occurs. + + Access additional [YOLOv5](https://ultralytics.com/yolov5) 🚀 resources: + - **Wiki** – https://github.com/ultralytics/yolov5/wiki + - **Tutorials** – https://github.com/ultralytics/yolov5#tutorials + - **Docs** – https://docs.ultralytics.com + + Access additional [Ultralytics](https://ultralytics.com) ⚡ resources: + - **Ultralytics HUB** – https://ultralytics.com/pricing + - **Vision API** – https://ultralytics.com/yolov5 + - **About Us** – https://ultralytics.com/about + - **Join Our Team** – https://ultralytics.com/work + - **Contact Us** – https://ultralytics.com/contact + + Feel free to inform us of any other **issues** you discover or **feature requests** that come to mind in the future. Pull Requests (PRs) are also always welcomed! + + Thank you for your contributions to YOLOv5 🚀 and Vision AI ⭐! + + stale-pr-message: 'This issue has been automatically marked as stale because it has not had recent activity. It will be closed if no further activity occurs. Thank you for your contributions YOLOv5 🚀 and Vision AI ⭐.' days-before-stale: 30 days-before-close: 5 exempt-issue-labels: 'documentation,tutorial' diff --git a/README.md b/README.md index a638657b313b..3a785cc85003 100755 --- a/README.md +++ b/README.md @@ -1,5 +1,5 @@ - +   CI CPU testing @@ -30,19 +30,19 @@ This repository represents Ultralytics open-source research into future object d [assets]: https://github.com/ultralytics/yolov5/releases -Model |size
(pixels) |mAPval
0.5:0.95 |mAPtest
0.5:0.95 |mAPval
0.5 |Speed
V100 (ms) | |params
(M) |FLOPS
640 (B) ---- |--- |--- |--- |--- |--- |---|--- |--- -[YOLOv5s][assets] |640 |36.7 |36.7 |55.4 |**2.0** | |7.3 |17.0 -[YOLOv5m][assets] |640 |44.5 |44.5 |63.1 |2.7 | |21.4 |51.3 -[YOLOv5l][assets] |640 |48.2 |48.2 |66.9 |3.8 | |47.0 |115.4 -[YOLOv5x][assets] |640 |**50.4** |**50.4** |**68.8** |6.1 | |87.7 |218.8 +|Model |size
(pixels) |mAPval
0.5:0.95 |mAPtest
0.5:0.95 |mAPval
0.5 |Speed
V100 (ms) | |params
(M) |FLOPs
640 (B) +|--- |--- |--- |--- |--- |--- |---|--- |--- +|[YOLOv5s][assets] |640 |36.7 |36.7 |55.4 |**2.0** | |7.3 |17.0 +|[YOLOv5m][assets] |640 |44.5 |44.5 |63.1 |2.7 | |21.4 |51.3 +|[YOLOv5l][assets] |640 |48.2 |48.2 |66.9 |3.8 | |47.0 |115.4 +|[YOLOv5x][assets] |640 |**50.4** |**50.4** |**68.8** |6.1 | |87.7 |218.8 | | | | | | || | -[YOLOv5s6][assets] |1280 |43.3 |43.3 |61.9 |**4.3** | |12.7 |17.4 -[YOLOv5m6][assets] |1280 |50.5 |50.5 |68.7 |8.4 | |35.9 |52.4 -[YOLOv5l6][assets] |1280 |53.4 |53.4 |71.1 |12.3 | |77.2 |117.7 -[YOLOv5x6][assets] |1280 |**54.4** |**54.4** |**72.0** |22.4 | |141.8 |222.9 +|[YOLOv5s6][assets] |1280 |43.3 |43.3 |61.9 |**4.3** | |12.7 |17.4 +|[YOLOv5m6][assets] |1280 |50.5 |50.5 |68.7 |8.4 | |35.9 |52.4 +|[YOLOv5l6][assets] |1280 |53.4 |53.4 |71.1 |12.3 | |77.2 |117.7 +|[YOLOv5x6][assets] |1280 |**54.4** |**54.4** |**72.0** |22.4 | |141.8 |222.9 | | | | | | || | -[YOLOv5x6][assets] TTA |1280 |**55.0** |**55.0** |**72.0** |70.8 | |- |- +|[YOLOv5x6][assets] TTA |1280 |**55.0** |**55.0** |**72.0** |70.8 | |- |-
Table Notes (click to expand) @@ -112,7 +112,7 @@ Namespace(agnostic_nms=False, augment=False, classes=None, conf_thres=0.25, devi YOLOv5 v4.0-96-g83dc1b4 torch 1.7.0+cu101 CUDA:0 (Tesla V100-SXM2-16GB, 16160.5MB) Fusing layers... -Model Summary: 224 layers, 7266973 parameters, 0 gradients, 17.0 GFLOPS +Model Summary: 224 layers, 7266973 parameters, 0 gradients, 17.0 GFLOPs image 1/2 /content/yolov5/data/images/bus.jpg: 640x480 4 persons, 1 bus, Done. (0.010s) image 2/2 /content/yolov5/data/images/zidane.jpg: 384x640 2 persons, 1 tie, Done. (0.011s) Results saved to runs/detect/exp2 diff --git a/detect.py b/detect.py index c6b76d981541..aba87687e666 100644 --- a/detect.py +++ b/detect.py @@ -28,7 +28,7 @@ def detect(opt): # Initialize set_logging() device = select_device(opt.device) - half = device.type != 'cpu' # half precision only supported on CUDA + half = opt.half and device.type != 'cpu' # half precision only supported on CUDA # Load model model = attempt_load(weights, map_location=device) # load FP32 model @@ -172,6 +172,7 @@ def detect(opt): parser.add_argument('--line-thickness', default=3, type=int, help='bounding box thickness (pixels)') parser.add_argument('--hide-labels', default=False, action='store_true', help='hide labels') parser.add_argument('--hide-conf', default=False, action='store_true', help='hide confidences') + parser.add_argument('--half', type=bool, default=False, help='use FP16 half-precision inference') opt = parser.parse_args() print(opt) check_requirements(exclude=('tensorboard', 'pycocotools', 'thop')) diff --git a/hubconf.py b/hubconf.py index a52aae9fd1b7..bedbee18f87f 100644 --- a/hubconf.py +++ b/hubconf.py @@ -42,8 +42,7 @@ def _create(name, pretrained=True, channels=3, classes=80, autoshape=True, verbo cfg = list((Path(__file__).parent / 'models').rglob(f'{name}.yaml'))[0] # model.yaml path model = Model(cfg, channels, classes) # create model if pretrained: - attempt_download(fname) # download if not found locally - ckpt = torch.load(fname, map_location=torch.device('cpu')) # load + ckpt = torch.load(attempt_download(fname), map_location=torch.device('cpu')) # load msd = model.state_dict() # model state_dict csd = ckpt['model'].float().state_dict() # checkpoint state_dict as FP32 csd = {k: v for k, v in csd.items() if msd[k].shape == v.shape} # filter diff --git a/models/experimental.py b/models/experimental.py index afa787907104..d316b18373c3 100644 --- a/models/experimental.py +++ b/models/experimental.py @@ -116,8 +116,7 @@ def attempt_load(weights, map_location=None, inplace=True): # Loads an ensemble of models weights=[a,b,c] or a single model weights=[a] or weights=a model = Ensemble() for w in weights if isinstance(weights, list) else [weights]: - attempt_download(w) - ckpt = torch.load(w, map_location=map_location) # load + ckpt = torch.load(attempt_download(w), map_location=map_location) # load model.append(ckpt['ema' if ckpt.get('ema') else 'model'].float().fuse().eval()) # FP32 model # Compatibility updates diff --git a/models/export.py b/models/export.py index 0d1147938e37..c03770178829 100644 --- a/models/export.py +++ b/models/export.py @@ -44,22 +44,19 @@ # Load PyTorch model device = select_device(opt.device) + assert not (opt.device.lower() == 'cpu' and opt.half), '--half only compatible with GPU export, i.e. use --device 0' model = attempt_load(opt.weights, map_location=device) # load FP32 model labels = model.names - # Checks + # Input gs = int(max(model.stride)) # grid size (max stride) opt.img_size = [check_img_size(x, gs) for x in opt.img_size] # verify img_size are gs-multiples - assert not (opt.device.lower() == 'cpu' and opt.half), '--half only compatible with GPU export, i.e. use --device 0' - - # Input img = torch.zeros(opt.batch_size, 3, *opt.img_size).to(device) # image size(1,3,320,192) iDetection # Update model if opt.half: img, model = img.half(), model.half() # to FP16 - if opt.train: - model.train() # training mode (no grid construction in Detect layer) + model.train() if opt.train else model.eval() # training mode = no Detect() layer grid construction for k, m in model.named_modules(): m._non_persistent_buffers_set = set() # pytorch 1.6.0 compatibility if isinstance(m, models.common.Conv): # assign export-friendly activations @@ -96,11 +93,14 @@ print(f'{prefix} starting export with onnx {onnx.__version__}...') f = opt.weights.replace('.pt', '.onnx') # filename - torch.onnx.export(model, img, f, verbose=False, opset_version=opt.opset_version, input_names=['images'], + torch.onnx.export(model, img, f, verbose=False, opset_version=opt.opset_version, training=torch.onnx.TrainingMode.TRAINING if opt.train else torch.onnx.TrainingMode.EVAL, do_constant_folding=not opt.train, - dynamic_axes={'images': {0: 'batch', 2: 'height', 3: 'width'}, # size(1,3,640,640) - 'output': {0: 'batch', 2: 'y', 3: 'x'}} if opt.dynamic else None) + input_names=['images'], + output_names=['output'], + dynamic_axes={'images': {0: 'batch', 2: 'height', 3: 'width'}, # shape(1,3,640,640) + 'output': {0: 'batch', 1: 'anchors'} # shape(1,25200,85) + } if opt.dynamic else None) # Checks model_onnx = onnx.load(f) # load onnx model diff --git a/models/yolo.py b/models/yolo.py index 2844cd0410e0..1a7be913023c 100644 --- a/models/yolo.py +++ b/models/yolo.py @@ -21,7 +21,7 @@ select_device, copy_attr try: - import thop # for FLOPS computation + import thop # for FLOPs computation except ImportError: thop = None @@ -140,13 +140,13 @@ def forward_once(self, x, profile=False): x = y[m.f] if isinstance(m.f, int) else [x if j == -1 else y[j] for j in m.f] # from earlier layers if profile: - o = thop.profile(m, inputs=(x,), verbose=False)[0] / 1E9 * 2 if thop else 0 # FLOPS + o = thop.profile(m, inputs=(x,), verbose=False)[0] / 1E9 * 2 if thop else 0 # FLOPs t = time_synchronized() for _ in range(10): _ = m(x) dt.append((time_synchronized() - t) * 100) if m == self.model[0]: - logger.info(f"{'time (ms)':>10s} {'GFLOPS':>10s} {'params':>10s} {'module'}") + logger.info(f"{'time (ms)':>10s} {'GFLOPs':>10s} {'params':>10s} {'module'}") logger.info(f'{dt[-1]:10.2f} {o:10.2f} {m.np:10.0f} {m.type}') x = m(x) # run diff --git a/requirements.txt b/requirements.txt index 1c07c651150e..a20fb6ad0ea5 100755 --- a/requirements.txt +++ b/requirements.txt @@ -27,4 +27,4 @@ pandas # extras -------------------------------------- # Cython # for pycocotools https://github.com/cocodataset/cocoapi/issues/172 pycocotools>=2.0 # COCO mAP -thop # FLOPS computation +thop # FLOPs computation diff --git a/test.py b/test.py index 0716c5d8b93c..12141f71c2c1 100644 --- a/test.py +++ b/test.py @@ -95,7 +95,7 @@ def test(data, confusion_matrix = ConfusionMatrix(nc=nc) names = {k: v for k, v in enumerate(model.names if hasattr(model, 'names') else model.module.names)} coco91class = coco80_to_coco91_class() - s = ('%20s' + '%12s' * 6) % ('Class', 'Images', 'Labels', 'P', 'R', 'mAP@.5', 'mAP@.5:.95') + s = ('%20s' + '%11s' * 6) % ('Class', 'Images', 'Labels', 'P', 'R', 'mAP@.5', 'mAP@.5:.95') p, r, f1, mp, mr, map50, map, t0, t1 = 0., 0., 0., 0., 0., 0., 0., 0., 0. loss = torch.zeros(3, device=device) jdict, stats, ap, ap_class, wandb_images = [], [], [], [], [] @@ -228,7 +228,7 @@ def test(data, nt = torch.zeros(1) # Print results - pf = '%20s' + '%12i' * 2 + '%12.3g' * 4 # print format + pf = '%20s' + '%11i' * 2 + '%11.3g' * 4 # print format print(pf % ('all', seen, nt.sum(), mp, mr, map50, map)) # Print results per class @@ -306,6 +306,7 @@ def test(data, parser.add_argument('--project', default='runs/test', help='save to project/name') parser.add_argument('--name', default='exp', help='save to project/name') parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment') + parser.add_argument('--half', type=bool, default=False, help='use FP16 half-precision inference') opt = parser.parse_args() opt.save_json |= opt.data.endswith('coco.yaml') opt.data = check_file(opt.data) # check file @@ -326,6 +327,7 @@ def test(data, save_txt=opt.save_txt | opt.save_hybrid, save_hybrid=opt.save_hybrid, save_conf=opt.save_conf, + half_precision=opt.half, opt=opt ) diff --git a/train.py b/train.py index 3e8d5075aef1..093a6197ff06 100644 --- a/train.py +++ b/train.py @@ -4,6 +4,7 @@ import os import random import time +import warnings from copy import deepcopy from pathlib import Path from threading import Thread @@ -62,7 +63,6 @@ def train(hyp, opt, device, tb_writer=None): init_seeds(2 + rank) with open(opt.data) as f: data_dict = yaml.safe_load(f) # data dict - is_coco = opt.data.endswith('coco.yaml') # Logging- Doing this before checking the dataset. Might update data_dict loggers = {'wandb': None} # loggers dict @@ -78,12 +78,13 @@ def train(hyp, opt, device, tb_writer=None): nc = 1 if opt.single_cls else int(data_dict['nc']) # number of classes names = ['item'] if opt.single_cls and len(data_dict['names']) != 1 else data_dict['names'] # class names assert len(names) == nc, '%g names found for nc=%g dataset in %s' % (len(names), nc, opt.data) # check + is_coco = opt.data.endswith('coco.yaml') and nc == 80 # COCO dataset # Model pretrained = weights.endswith('.pt') if pretrained: with torch_distributed_zero_first(rank): - attempt_download(weights) # download if not found locally + weights = attempt_download(weights) # download if not found locally ckpt = torch.load(weights, map_location=device) # load checkpoint model = Model(opt.cfg or ckpt['model'].yaml, ch=3, nc=nc, anchors=hyp.get('anchors')).to(device) # create exclude = ['anchor'] if (opt.cfg or hyp.get('anchors')) and not opt.resume else [] # exclude keys @@ -323,18 +324,19 @@ def train(hyp, opt, device, tb_writer=None): mloss = (mloss * i + loss_items) / (i + 1) # update mean losses mem = '%.3gG' % (torch.cuda.memory_reserved() / 1E9 if torch.cuda.is_available() else 0) # (GB) s = ('%10s' * 2 + '%10.4g' * 6) % ( - '%g/%g' % (epoch, epochs - 1), mem, *mloss, targets.shape[0], imgs.shape[-1]) + f'{epoch}/{epochs - 1}', mem, *mloss, targets.shape[0], imgs.shape[-1]) pbar.set_description(s) # Plot if plots and ni < 3: f = save_dir / f'train_batch{ni}.jpg' # filename Thread(target=plot_images, args=(imgs, targets, paths, f), daemon=True).start() - if tb_writer: - tb_writer.add_graph(torch.jit.trace(de_parallel(model), imgs, strict=False), []) # model graph - # tb_writer.add_image(f, result, dataformats='HWC', global_step=epoch) + if tb_writer and ni == 0: + with warnings.catch_warnings(): + warnings.simplefilter('ignore') # suppress jit trace warning + tb_writer.add_graph(torch.jit.trace(de_parallel(model), imgs, strict=False), []) # graph elif plots and ni == 10 and wandb_logger.wandb: - wandb_logger.log({"Mosaics": [wandb_logger.wandb.Image(str(x), caption=x.name) for x in + wandb_logger.log({'Mosaics': [wandb_logger.wandb.Image(str(x), caption=x.name) for x in save_dir.glob('train*.jpg') if x.exists()]}) # end batch ------------------------------------------------------------------------------------------------ @@ -358,6 +360,7 @@ def train(hyp, opt, device, tb_writer=None): single_cls=opt.single_cls, dataloader=testloader, save_dir=save_dir, + save_json=is_coco and final_epoch, verbose=nc < 50 and final_epoch, plots=plots and final_epoch, wandb_logger=wandb_logger, @@ -409,41 +412,38 @@ def train(hyp, opt, device, tb_writer=None): # end epoch ---------------------------------------------------------------------------------------------------- # end training if rank in [-1, 0]: - # Plots + logger.info(f'{epoch - start_epoch + 1} epochs completed in {(time.time() - t0) / 3600:.3f} hours.\n') if plots: plot_results(save_dir=save_dir) # save as results.png if wandb_logger.wandb: files = ['results.png', 'confusion_matrix.png', *[f'{x}_curve.png' for x in ('F1', 'PR', 'P', 'R')]] wandb_logger.log({"Results": [wandb_logger.wandb.Image(str(save_dir / f), caption=f) for f in files if (save_dir / f).exists()]}) - # Test best.pt - logger.info('%g epochs completed in %.3f hours.\n' % (epoch - start_epoch + 1, (time.time() - t0) / 3600)) - if opt.data.endswith('coco.yaml') and nc == 80: # if COCO - for m in [last, best] if best.exists() else [last]: # speed, mAP tests - results, _, _ = test.test(opt.data, - batch_size=batch_size * 2, - imgsz=imgsz_test, - conf_thres=0.001, - iou_thres=0.7, - model=attempt_load(m, device).half(), - single_cls=opt.single_cls, - dataloader=testloader, - save_dir=save_dir, - save_json=True, - plots=False, - is_coco=is_coco) - - # Strip optimizers - final = best if best.exists() else last # final model - for f in last, best: - if f.exists(): - strip_optimizer(f) # strip optimizers - if opt.bucket: - os.system(f'gsutil cp {final} gs://{opt.bucket}/weights') # upload - if wandb_logger.wandb and not opt.evolve: # Log the stripped model - wandb_logger.wandb.log_artifact(str(final), type='model', - name='run_' + wandb_logger.wandb_run.id + '_model', - aliases=['latest', 'best', 'stripped']) + + if not opt.evolve: + if is_coco: # COCO dataset + for m in [last, best] if best.exists() else [last]: # speed, mAP tests + results, _, _ = test.test(opt.data, + batch_size=batch_size * 2, + imgsz=imgsz_test, + conf_thres=0.001, + iou_thres=0.7, + model=attempt_load(m, device).half(), + single_cls=opt.single_cls, + dataloader=testloader, + save_dir=save_dir, + save_json=True, + plots=False, + is_coco=is_coco) + + # Strip optimizers + for f in last, best: + if f.exists(): + strip_optimizer(f) # strip optimizers + if wandb_logger.wandb: # Log the stripped model + wandb_logger.wandb.log_artifact(str(best if best.exists() else last), type='model', + name='run_' + wandb_logger.wandb_run.id + '_model', + aliases=['latest', 'best', 'stripped']) wandb_logger.finish_run() else: dist.destroy_process_group() diff --git a/tutorial.ipynb b/tutorial.ipynb index 3954feadfcb2..4e760b13bb41 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -517,7 +517,8 @@ "colab_type": "text" }, "source": [ - "\"Open" + "\"Open", + "\"Kaggle\"" ] }, { @@ -529,7 +530,7 @@ "\n", "\n", "This is the **official YOLOv5 🚀 notebook** authored by **Ultralytics**, and is freely available for redistribution under the [GPL-3.0 license](https://choosealicense.com/licenses/gpl-3.0/). \n", - "For more information please visit https://github.com/ultralytics/yolov5 and https://www.ultralytics.com. Thank you!" + "For more information please visit https://github.com/ultralytics/yolov5 and https://ultralytics.com. Thank you!" ] }, { @@ -610,7 +611,7 @@ "YOLOv5 🚀 v5.0-1-g0f395b3 torch 1.8.1+cu101 CUDA:0 (Tesla V100-SXM2-16GB, 16160.5MB)\n", "\n", "Fusing layers... \n", - "Model Summary: 224 layers, 7266973 parameters, 0 gradients, 17.0 GFLOPS\n", + "Model Summary: 224 layers, 7266973 parameters, 0 gradients, 17.0 GFLOPs\n", "image 1/2 /content/yolov5/data/images/bus.jpg: 640x480 4 persons, 1 bus, Done. (0.008s)\n", "image 2/2 /content/yolov5/data/images/zidane.jpg: 384x640 2 persons, 2 ties, Done. (0.008s)\n", "Results saved to runs/detect/exp\n", @@ -733,7 +734,7 @@ "100% 168M/168M [00:05<00:00, 32.3MB/s]\n", "\n", "Fusing layers... \n", - "Model Summary: 476 layers, 87730285 parameters, 0 gradients, 218.8 GFLOPS\n", + "Model Summary: 476 layers, 87730285 parameters, 0 gradients, 218.8 GFLOPs\n", "\u001b[34m\u001b[1mval: \u001b[0mScanning '../coco/val2017' images and labels... 4952 found, 48 missing, 0 empty, 0 corrupted: 100% 5000/5000 [00:01<00:00, 3102.29it/s]\n", "\u001b[34m\u001b[1mval: \u001b[0mNew cache created: ../coco/val2017.cache\n", " Class Images Labels P R mAP@.5 mAP@.5:.95: 100% 157/157 [01:23<00:00, 1.87it/s]\n", @@ -963,7 +964,7 @@ " 22 [-1, 10] 1 0 models.common.Concat [1] \n", " 23 -1 1 1182720 models.common.C3 [512, 512, 1, False] \n", " 24 [17, 20, 23] 1 229245 models.yolo.Detect [80, [[10, 13, 16, 30, 33, 23], [30, 61, 62, 45, 59, 119], [116, 90, 156, 198, 373, 326]], [128, 256, 512]]\n", - "Model Summary: 283 layers, 7276605 parameters, 7276605 gradients, 17.1 GFLOPS\n", + "Model Summary: 283 layers, 7276605 parameters, 7276605 gradients, 17.1 GFLOPs\n", "\n", "Transferred 362/362 items from yolov5s.pt\n", "Scaled weight_decay = 0.0005\n", @@ -1260,4 +1261,4 @@ "outputs": [] } ] -} \ No newline at end of file +} diff --git a/utils/datasets.py b/utils/datasets.py index 7dd181400da5..b6e43b94cfe9 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -535,7 +535,7 @@ def __getitem__(self, index): # MixUp https://arxiv.org/pdf/1710.09412.pdf if random.random() < hyp['mixup']: img2, labels2 = load_mosaic(self, random.randint(0, self.n - 1)) - r = np.random.beta(8.0, 8.0) # mixup ratio, alpha=beta=8.0 + r = np.random.beta(32.0, 32.0) # mixup ratio, alpha=beta=32.0 img = (img * r + img2 * (1 - r)).astype(np.uint8) labels = np.concatenate((labels, labels2), 0) @@ -655,12 +655,12 @@ def augment_hsv(img, hgain=0.5, sgain=0.5, vgain=0.5): hue, sat, val = cv2.split(cv2.cvtColor(img, cv2.COLOR_BGR2HSV)) dtype = img.dtype # uint8 - x = np.arange(0, 256, dtype=np.int16) + x = np.arange(0, 256, dtype=r.dtype) lut_hue = ((x * r[0]) % 180).astype(dtype) lut_sat = np.clip(x * r[1], 0, 255).astype(dtype) lut_val = np.clip(x * r[2], 0, 255).astype(dtype) - img_hsv = cv2.merge((cv2.LUT(hue, lut_hue), cv2.LUT(sat, lut_sat), cv2.LUT(val, lut_val))).astype(dtype) + img_hsv = cv2.merge((cv2.LUT(hue, lut_hue), cv2.LUT(sat, lut_sat), cv2.LUT(val, lut_val))) cv2.cvtColor(img_hsv, cv2.COLOR_HSV2BGR, dst=img) # no return needed diff --git a/utils/general.py b/utils/general.py index 006e64859f32..a12b0aafba0e 100755 --- a/utils/general.py +++ b/utils/general.py @@ -1,5 +1,6 @@ # YOLOv5 general utils +import contextlib import glob import logging import math @@ -7,11 +8,13 @@ import platform import random import re -import subprocess +import signal import time +import urllib from itertools import repeat from multiprocessing.pool import ThreadPool from pathlib import Path +from subprocess import check_output import cv2 import numpy as np @@ -33,6 +36,26 @@ os.environ['NUMEXPR_MAX_THREADS'] = str(min(os.cpu_count(), 8)) # NumExpr max threads +class timeout(contextlib.ContextDecorator): + # Usage: @timeout(seconds) decorator or 'with timeout(seconds):' context manager + def __init__(self, seconds, *, timeout_msg='', suppress_timeout_errors=True): + self.seconds = int(seconds) + self.timeout_message = timeout_msg + self.suppress = bool(suppress_timeout_errors) + + def _timeout_handler(self, signum, frame): + raise TimeoutError(self.timeout_message) + + def __enter__(self): + signal.signal(signal.SIGALRM, self._timeout_handler) # Set handler for SIGALRM + signal.alarm(self.seconds) # start countdown for SIGALRM to be raised + + def __exit__(self, exc_type, exc_val, exc_tb): + signal.alarm(0) # Cancel SIGALRM if it's scheduled + if self.suppress and exc_type is TimeoutError: # Suppress TimeoutError + return True + + def set_logging(rank=-1, verbose=True): logging.basicConfig( format="%(message)s", @@ -53,12 +76,12 @@ def get_latest_run(search_dir='.'): def is_docker(): - # Is environment a Docker container + # Is environment a Docker container? return Path('/workspace').exists() # or Path('/.dockerenv').exists() def is_colab(): - # Is environment a Google Colab instance + # Is environment a Google Colab instance? try: import google.colab return True @@ -66,6 +89,11 @@ def is_colab(): return False +def is_pip(): + # Is file in a pip package? + return 'site-packages' in Path(__file__).absolute().parts + + def emojis(str=''): # Return platform-dependent emoji-safe version of string return str.encode().decode('ascii', 'ignore') if platform.system() == 'Windows' else str @@ -80,13 +108,13 @@ def check_online(): # Check internet connectivity import socket try: - socket.create_connection(("1.1.1.1", 443), 5) # check host accesability + socket.create_connection(("1.1.1.1", 443), 5) # check host accessibility return True except OSError: return False -def check_git_status(): +def check_git_status(err_msg=', for updates see https://github.com/ultralytics/yolov5'): # Recommend 'git pull' if code is out of date print(colorstr('github: '), end='') try: @@ -95,9 +123,9 @@ def check_git_status(): assert check_online(), 'skipping check (offline)' cmd = 'git fetch && git config --get remote.origin.url' - url = subprocess.check_output(cmd, shell=True).decode().strip().rstrip('.git') # github repo url - branch = subprocess.check_output('git rev-parse --abbrev-ref HEAD', shell=True).decode().strip() # checked out - n = int(subprocess.check_output(f'git rev-list {branch}..origin/master --count', shell=True)) # commits behind + url = check_output(cmd, shell=True, timeout=5).decode().strip().rstrip('.git') # git fetch + branch = check_output('git rev-parse --abbrev-ref HEAD', shell=True).decode().strip() # checked out + n = int(check_output(f'git rev-list {branch}..origin/master --count', shell=True)) # commits behind if n > 0: s = f"⚠️ WARNING: code is out of date by {n} commit{'s' * (n > 1)}. " \ f"Use 'git pull' to update or 'git clone {url}' to download latest." @@ -105,7 +133,7 @@ def check_git_status(): s = f'up to date with {url} ✅' print(emojis(s)) # emoji-safe except Exception as e: - print(e) + print(f'{e}{err_msg}') def check_python(minimum='3.7.0', required=True): @@ -135,10 +163,11 @@ def check_requirements(requirements='requirements.txt', exclude=()): try: pkg.require(r) except Exception as e: # DistributionNotFound or VersionConflict if requirements not met - n += 1 print(f"{prefix} {r} not found and is required by YOLOv5, attempting auto-update...") try: - print(subprocess.check_output(f"pip install '{r}'", shell=True).decode()) + assert check_online(), f"'pip install {r}' skipped (offline)" + print(check_output(f"pip install '{r}'", shell=True).decode()) + n += 1 except Exception as e: print(f'{prefix} {e}') @@ -178,7 +207,8 @@ def check_file(file): if Path(file).is_file() or file == '': # exists return file elif file.startswith(('http://', 'https://')): # download - url, file = file, Path(file).name + url, file = file, Path(urllib.parse.unquote(str(file))).name # url, file (decode '%2F' to '/' etc.) + file = file.split('?')[0] # parse authentication https://url.com/file.txt?auth... print(f'Downloading {url} to {file}...') torch.hub.download_url_to_file(url, file) assert Path(file).exists() and Path(file).stat().st_size > 0, f'File download failed: {url}' # check diff --git a/utils/google_utils.py b/utils/google_utils.py index 63d3e5b212f3..aefc7de2db2e 100644 --- a/utils/google_utils.py +++ b/utils/google_utils.py @@ -4,6 +4,7 @@ import platform import subprocess import time +import urllib from pathlib import Path import requests @@ -16,11 +17,39 @@ def gsutil_getsize(url=''): return eval(s.split(' ')[0]) if len(s) else 0 # bytes -def attempt_download(file, repo='ultralytics/yolov5'): +def safe_download(file, url, url2=None, min_bytes=1E0, error_msg=''): + # Attempts to download file from url or url2, checks and removes incomplete downloads < min_bytes + file = Path(file) + assert_msg = f"Downloaded file '{file}' does not exist or size is < min_bytes={min_bytes}" + try: # url1 + print(f'Downloading {url} to {file}...') + torch.hub.download_url_to_file(url, str(file)) + assert file.exists() and file.stat().st_size > min_bytes, assert_msg # check + except Exception as e: # url2 + file.unlink(missing_ok=True) # remove partial downloads + print(f'ERROR: {e}\nRe-attempting {url2 or url} to {file}...') + os.system(f"curl -L '{url2 or url}' -o '{file}' --retry 3 -C -") # curl download, retry and resume on fail + finally: + if not file.exists() or file.stat().st_size < min_bytes: # check + file.unlink(missing_ok=True) # remove partial downloads + print(f"ERROR: {assert_msg}\n{error_msg}") + print('') + + +def attempt_download(file, repo='ultralytics/yolov5'): # from utils.google_utils import *; attempt_download() # Attempt file download if does not exist file = Path(str(file).strip().replace("'", '')) if not file.exists(): + # URL specified + name = Path(urllib.parse.unquote(str(file))).name # decode '%2F' to '/' etc. + if str(file).startswith(('http:/', 'https:/')): # download + url = str(file).replace(':/', '://') # Pathlib turns :// -> :/ + name = name.split('?')[0] # parse authentication https://url.com/file.txt?auth... + safe_download(file=name, url=url, min_bytes=1E5) + return name + + # GitHub assets file.parent.mkdir(parents=True, exist_ok=True) # make parent dir (if required) try: response = requests.get(f'https://api.github.com/repos/{repo}/releases/latest').json() # github api @@ -34,27 +63,14 @@ def attempt_download(file, repo='ultralytics/yolov5'): except: tag = 'v5.0' # current release - name = file.name if name in assets: - msg = f'{file} missing, try downloading from https://github.com/{repo}/releases/' - redundant = False # second download option - try: # GitHub - url = f'https://github.com/{repo}/releases/download/{tag}/{name}' - print(f'Downloading {url} to {file}...') - torch.hub.download_url_to_file(url, file) - assert file.exists() and file.stat().st_size > 1E6 # check - except Exception as e: # GCP - print(f'Download error: {e}') - assert redundant, 'No secondary mirror' - url = f'https://storage.googleapis.com/{repo}/ckpt/{name}' - print(f'Downloading {url} to {file}...') - os.system(f"curl -L '{url}' -o '{file}' --retry 3 -C -") # curl download, retry and resume on fail - finally: - if not file.exists() or file.stat().st_size < 1E6: # check - file.unlink(missing_ok=True) # remove partial downloads - print(f'ERROR: Download failure: {msg}') - print('') - return + safe_download(file, + url=f'https://github.com/{repo}/releases/download/{tag}/{name}', + # url2=f'https://storage.googleapis.com/{repo}/ckpt/{name}', # backup url (optional) + min_bytes=1E5, + error_msg=f'{file} missing, try downloading from https://github.com/{repo}/releases/') + + return str(file) def gdrive_download(id='16TiPfZj7htmTyhntwcZyEEAejOUxuT6m', file='tmp.zip'): diff --git a/utils/torch_utils.py b/utils/torch_utils.py index aa54c3cf561e..6a7d07634813 100644 --- a/utils/torch_utils.py +++ b/utils/torch_utils.py @@ -18,7 +18,7 @@ import torchvision try: - import thop # for FLOPS computation + import thop # for FLOPs computation except ImportError: thop = None logger = logging.getLogger(__name__) @@ -105,13 +105,13 @@ def profile(x, ops, n=100, device=None): x = x.to(device) x.requires_grad = True print(torch.__version__, device.type, torch.cuda.get_device_properties(0) if device.type == 'cuda' else '') - print(f"\n{'Params':>12s}{'GFLOPS':>12s}{'forward (ms)':>16s}{'backward (ms)':>16s}{'input':>24s}{'output':>24s}") + print(f"\n{'Params':>12s}{'GFLOPs':>12s}{'forward (ms)':>16s}{'backward (ms)':>16s}{'input':>24s}{'output':>24s}") for m in ops if isinstance(ops, list) else [ops]: m = m.to(device) if hasattr(m, 'to') else m # device m = m.half() if hasattr(m, 'half') and isinstance(x, torch.Tensor) and x.dtype is torch.float16 else m # type dtf, dtb, t = 0., 0., [0., 0., 0.] # dt forward, backward try: - flops = thop.profile(m, inputs=(x,), verbose=False)[0] / 1E9 * 2 # GFLOPS + flops = thop.profile(m, inputs=(x,), verbose=False)[0] / 1E9 * 2 # GFLOPs except: flops = 0 @@ -219,13 +219,13 @@ def model_info(model, verbose=False, img_size=640): print('%5g %40s %9s %12g %20s %10.3g %10.3g' % (i, name, p.requires_grad, p.numel(), list(p.shape), p.mean(), p.std())) - try: # FLOPS + try: # FLOPs from thop import profile stride = max(int(model.stride.max()), 32) if hasattr(model, 'stride') else 32 img = torch.zeros((1, model.yaml.get('ch', 3), stride, stride), device=next(model.parameters()).device) # input - flops = profile(deepcopy(model), inputs=(img,), verbose=False)[0] / 1E9 * 2 # stride GFLOPS + flops = profile(deepcopy(model), inputs=(img,), verbose=False)[0] / 1E9 * 2 # stride GFLOPs img_size = img_size if isinstance(img_size, list) else [img_size, img_size] # expand if int/float - fs = ', %.1f GFLOPS' % (flops * img_size[0] / stride * img_size[1] / stride) # 640x640 GFLOPS + fs = ', %.1f GFLOPs' % (flops * img_size[0] / stride * img_size[1] / stride) # 640x640 GFLOPs except (ImportError, Exception): fs = '' From 28bff22df8e45e60e37fbf3af2c508a9536a73c7 Mon Sep 17 00:00:00 2001 From: Dean Mark <2552482+deanmark@users.noreply.github.com> Date: Tue, 8 Jun 2021 19:00:21 +0300 Subject: [PATCH 0274/1976] Use multi-threading in cache_labels (#3505) * Use multi threading in cache_labels * PEP8 reformat * Add num_threads * changed ThreadPool.imap_unordered to Pool.imap_unordered * Remove inplace additions * Update datasets.py refactor initial desc Co-authored-by: Glenn Jocher --- utils/datasets.py | 99 +++++++++++++++++++++++++++-------------------- 1 file changed, 56 insertions(+), 43 deletions(-) diff --git a/utils/datasets.py b/utils/datasets.py index b6e43b94cfe9..bda435776629 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -9,7 +9,7 @@ import shutil import time from itertools import repeat -from multiprocessing.pool import ThreadPool +from multiprocessing.pool import ThreadPool, Pool from pathlib import Path from threading import Thread @@ -29,6 +29,7 @@ help_url = 'https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data' img_formats = ['bmp', 'jpg', 'jpeg', 'png', 'tif', 'tiff', 'dng', 'webp', 'mpo'] # acceptable image suffixes vid_formats = ['mov', 'avi', 'mp4', 'mpg', 'mpeg', 'm4v', 'wmv', 'mkv'] # acceptable video suffixes +num_threads = min(8, os.cpu_count()) # number of multiprocessing threads logger = logging.getLogger(__name__) # Get orientation exif tag @@ -447,7 +448,7 @@ def __init__(self, path, img_size=640, batch_size=16, augment=False, hyp=None, r if cache_images: gb = 0 # Gigabytes of cached images self.img_hw0, self.img_hw = [None] * n, [None] * n - results = ThreadPool(8).imap(lambda x: load_image(*x), zip(repeat(self), range(n))) # 8 threads + results = ThreadPool(num_threads).imap(lambda x: load_image(*x), zip(repeat(self), range(n))) pbar = tqdm(enumerate(results), total=n) for i, x in pbar: self.imgs[i], self.img_hw0[i], self.img_hw[i] = x # img, hw_original, hw_resized = load_image(self, i) @@ -458,53 +459,24 @@ def __init__(self, path, img_size=640, batch_size=16, augment=False, hyp=None, r def cache_labels(self, path=Path('./labels.cache'), prefix=''): # Cache dataset labels, check images and read shapes x = {} # dict - nm, nf, ne, nc = 0, 0, 0, 0 # number missing, found, empty, duplicate - pbar = tqdm(zip(self.img_files, self.label_files), desc='Scanning images', total=len(self.img_files)) - for i, (im_file, lb_file) in enumerate(pbar): - try: - # verify images - im = Image.open(im_file) - im.verify() # PIL verify - shape = exif_size(im) # image size - segments = [] # instance segments - assert (shape[0] > 9) & (shape[1] > 9), f'image size {shape} <10 pixels' - assert im.format.lower() in img_formats, f'invalid image format {im.format}' - - # verify labels - if os.path.isfile(lb_file): - nf += 1 # label found - with open(lb_file, 'r') as f: - l = [x.split() for x in f.read().strip().splitlines() if len(x)] - if any([len(x) > 8 for x in l]): # is segment - classes = np.array([x[0] for x in l], dtype=np.float32) - segments = [np.array(x[1:], dtype=np.float32).reshape(-1, 2) for x in l] # (cls, xy1...) - l = np.concatenate((classes.reshape(-1, 1), segments2boxes(segments)), 1) # (cls, xywh) - l = np.array(l, dtype=np.float32) - if len(l): - assert l.shape[1] == 5, 'labels require 5 columns each' - assert (l >= 0).all(), 'negative labels' - assert (l[:, 1:] <= 1).all(), 'non-normalized or out of bounds coordinate labels' - assert np.unique(l, axis=0).shape[0] == l.shape[0], 'duplicate labels' - else: - ne += 1 # label empty - l = np.zeros((0, 5), dtype=np.float32) - else: - nm += 1 # label missing - l = np.zeros((0, 5), dtype=np.float32) - x[im_file] = [l, shape, segments] - except Exception as e: - nc += 1 - logging.info(f'{prefix}WARNING: Ignoring corrupted image and/or label {im_file}: {e}') - - pbar.desc = f"{prefix}Scanning '{path.parent / path.stem}' images and labels... " \ - f"{nf} found, {nm} missing, {ne} empty, {nc} corrupted" + nm, nf, ne, nc = 0, 0, 0, 0 # number missing, found, empty, corrupt + desc = f"{prefix}Scanning '{path.parent / path.stem}' images and labels..." + with Pool(num_threads) as pool: + pbar = tqdm(pool.imap_unordered(verify_image_label, + zip(self.img_files, self.label_files, repeat(prefix))), + desc=desc, total=len(self.img_files)) + for im_file, l, shape, segments, nm_f, nf_f, ne_f, nc_f in pbar: + if im_file: + x[im_file] = [l, shape, segments] + nm, nf, ne, nc = nm + nm_f, nf + nf_f, ne + ne_f, nc + nc_f + pbar.desc = f"{desc}{nf} found, {nm} missing, {ne} empty, {nc} corrupted" pbar.close() if nf == 0: logging.info(f'{prefix}WARNING: No labels found in {path}. See {help_url}') x['hash'] = get_hash(self.label_files + self.img_files) - x['results'] = nf, nm, ne, nc, i + 1 + x['results'] = nf, nm, ne, nc, len(self.img_files) x['version'] = 0.2 # cache version try: torch.save(x, path) # save cache for next time @@ -1069,3 +1041,44 @@ def autosplit(path='../coco128', weights=(0.9, 0.1, 0.0), annotated_only=False): if not annotated_only or Path(img2label_paths([str(img)])[0]).exists(): # check label with open(path / txt[i], 'a') as f: f.write(str(img) + '\n') # add image to txt file + + +def verify_image_label(params): + # Verify one image-label pair + im_file, lb_file, prefix = params + nm, nf, ne, nc = 0, 0, 0, 0 # number missing, found, empty, corrupt + try: + # verify images + im = Image.open(im_file) + im.verify() # PIL verify + shape = exif_size(im) # image size + segments = [] # instance segments + assert (shape[0] > 9) & (shape[1] > 9), f'image size {shape} <10 pixels' + assert im.format.lower() in img_formats, f'invalid image format {im.format}' + + # verify labels + if os.path.isfile(lb_file): + nf = 1 # label found + with open(lb_file, 'r') as f: + l = [x.split() for x in f.read().strip().splitlines() if len(x)] + if any([len(x) > 8 for x in l]): # is segment + classes = np.array([x[0] for x in l], dtype=np.float32) + segments = [np.array(x[1:], dtype=np.float32).reshape(-1, 2) for x in l] # (cls, xy1...) + l = np.concatenate((classes.reshape(-1, 1), segments2boxes(segments)), 1) # (cls, xywh) + l = np.array(l, dtype=np.float32) + if len(l): + assert l.shape[1] == 5, 'labels require 5 columns each' + assert (l >= 0).all(), 'negative labels' + assert (l[:, 1:] <= 1).all(), 'non-normalized or out of bounds coordinate labels' + assert np.unique(l, axis=0).shape[0] == l.shape[0], 'duplicate labels' + else: + ne = 1 # label empty + l = np.zeros((0, 5), dtype=np.float32) + else: + nm = 1 # label missing + l = np.zeros((0, 5), dtype=np.float32) + return im_file, l, shape, segments, nm, nf, ne, nc + except Exception as e: + nc = 1 + logging.info(f'{prefix}WARNING: Ignoring corrupted image and/or label {im_file}: {e}') + return [None] * 4 + [nm, nf, ne, nc] From 8d52c1c5c58a4c5cf64a6fa718cfb4e5350a2045 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 8 Jun 2021 18:36:40 +0200 Subject: [PATCH 0275/1976] Update datasets.py (#3531) Minor updates to https://github.com/ultralytics/yolov5/pull/3505, inplace accumulation. --- utils/datasets.py | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/utils/datasets.py b/utils/datasets.py index bda435776629..daaa8d24855e 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -462,19 +462,20 @@ def cache_labels(self, path=Path('./labels.cache'), prefix=''): nm, nf, ne, nc = 0, 0, 0, 0 # number missing, found, empty, corrupt desc = f"{prefix}Scanning '{path.parent / path.stem}' images and labels..." with Pool(num_threads) as pool: - pbar = tqdm(pool.imap_unordered(verify_image_label, - zip(self.img_files, self.label_files, repeat(prefix))), + pbar = tqdm(pool.imap_unordered(verify_image_label, zip(self.img_files, self.label_files, repeat(prefix))), desc=desc, total=len(self.img_files)) for im_file, l, shape, segments, nm_f, nf_f, ne_f, nc_f in pbar: + nm += nm_f + nf += nf_f + ne += ne_f + nc += nc_f if im_file: x[im_file] = [l, shape, segments] - nm, nf, ne, nc = nm + nm_f, nf + nf_f, ne + ne_f, nc + nc_f pbar.desc = f"{desc}{nf} found, {nm} missing, {ne} empty, {nc} corrupted" - pbar.close() + pbar.close() if nf == 0: logging.info(f'{prefix}WARNING: No labels found in {path}. See {help_url}') - x['hash'] = get_hash(self.label_files + self.img_files) x['results'] = nf, nm, ne, nc, len(self.img_files) x['version'] = 0.2 # cache version From c6b51f4189efbda055a08709cc35fcf5743379fd Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 8 Jun 2021 18:47:13 +0200 Subject: [PATCH 0276/1976] Update FP16 `--half` argument for test.py and detect.py (#3532) * Update FP16 `--half` argument for test.py and detect.py * Update detect.py --- detect.py | 2 +- test.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/detect.py b/detect.py index aba87687e666..537f47dfafab 100644 --- a/detect.py +++ b/detect.py @@ -172,7 +172,7 @@ def detect(opt): parser.add_argument('--line-thickness', default=3, type=int, help='bounding box thickness (pixels)') parser.add_argument('--hide-labels', default=False, action='store_true', help='hide labels') parser.add_argument('--hide-conf', default=False, action='store_true', help='hide confidences') - parser.add_argument('--half', type=bool, default=False, help='use FP16 half-precision inference') + parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference') opt = parser.parse_args() print(opt) check_requirements(exclude=('tensorboard', 'pycocotools', 'thop')) diff --git a/test.py b/test.py index 12141f71c2c1..6a2a4e47c142 100644 --- a/test.py +++ b/test.py @@ -306,7 +306,7 @@ def test(data, parser.add_argument('--project', default='runs/test', help='save to project/name') parser.add_argument('--name', default='exp', help='save to project/name') parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment') - parser.add_argument('--half', type=bool, default=False, help='use FP16 half-precision inference') + parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference') opt = parser.parse_args() opt.save_json |= opt.data.endswith('coco.yaml') opt.data = check_file(opt.data) # check file From 78cf4885565302603fd1b211d498160bdf88ad38 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 8 Jun 2021 18:54:33 +0200 Subject: [PATCH 0277/1976] Created using Colaboratory --- tutorial.ipynb | 259 +++++++++++++++++++++++++++++++------------------ 1 file changed, 164 insertions(+), 95 deletions(-) diff --git a/tutorial.ipynb b/tutorial.ipynb index 4e760b13bb41..4429c1044cfe 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -16,7 +16,7 @@ "accelerator": "GPU", "widgets": { "application/vnd.jupyter.widget-state+json": { - "8815626359d84416a2f44a95500580a4": { + "cef5e9351ca743bcba5febac0b096a30": { "model_module": "@jupyter-widgets/controls", "model_name": "HBoxModel", "state": { @@ -28,15 +28,15 @@ "_view_count": null, "_view_module_version": "1.5.0", "box_style": "", - "layout": "IPY_MODEL_3b85609c4ce94a74823f2cfe141ce68e", + "layout": "IPY_MODEL_ec326c52378f4410920c328f221e0514", "_model_module": "@jupyter-widgets/controls", "children": [ - "IPY_MODEL_876609753c2946248890344722963d44", - "IPY_MODEL_8abfdd8778e44b7ca0d29881cb1ada05" + "IPY_MODEL_83000c64a11c4ae8abd6f0ef2f108cef", + "IPY_MODEL_0f7899eb719f4a9c9852426551f97be9" ] } }, - "3b85609c4ce94a74823f2cfe141ce68e": { + "ec326c52378f4410920c328f221e0514": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "state": { @@ -87,12 +87,12 @@ "left": null } }, - "876609753c2946248890344722963d44": { + "83000c64a11c4ae8abd6f0ef2f108cef": { "model_module": "@jupyter-widgets/controls", "model_name": "FloatProgressModel", "state": { "_view_name": "ProgressView", - "style": "IPY_MODEL_78c6c3d97c484916b8ee167c63556800", + "style": "IPY_MODEL_886ac5b18b3c4c82bf15ad5055f1e17e", "_dom_classes": [], "description": "100%", "_model_name": "FloatProgressModel", @@ -107,30 +107,30 @@ "min": 0, "description_tooltip": null, "_model_module": "@jupyter-widgets/controls", - "layout": "IPY_MODEL_9dd0f182db5d45378ceafb855e486eb8" + "layout": "IPY_MODEL_4e67b3c3a49849c7a7ba28b7eec96e7a" } }, - "8abfdd8778e44b7ca0d29881cb1ada05": { + "0f7899eb719f4a9c9852426551f97be9": { "model_module": "@jupyter-widgets/controls", "model_name": "HTMLModel", "state": { "_view_name": "HTMLView", - "style": "IPY_MODEL_a3dab28b45c247089a3d1b8b09f327de", + "style": "IPY_MODEL_62c3682ff1804571a483d46664533969", "_dom_classes": [], "description": "", "_model_name": "HTMLModel", "placeholder": "​", "_view_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", - "value": " 781M/781M [08:43<00:00, 1.56MB/s]", + "value": " 781M/781M [00:12<00:00, 67.1MB/s]", "_view_count": null, "_view_module_version": "1.5.0", "description_tooltip": null, "_model_module": "@jupyter-widgets/controls", - "layout": "IPY_MODEL_32451332b7a94ba9aacddeaa6ac94d50" + "layout": "IPY_MODEL_599dda3b608b432393760b2ca4ae7c7d" } }, - "78c6c3d97c484916b8ee167c63556800": { + "886ac5b18b3c4c82bf15ad5055f1e17e": { "model_module": "@jupyter-widgets/controls", "model_name": "ProgressStyleModel", "state": { @@ -145,7 +145,7 @@ "_model_module": "@jupyter-widgets/controls" } }, - "9dd0f182db5d45378ceafb855e486eb8": { + "4e67b3c3a49849c7a7ba28b7eec96e7a": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "state": { @@ -196,7 +196,7 @@ "left": null } }, - "a3dab28b45c247089a3d1b8b09f327de": { + "62c3682ff1804571a483d46664533969": { "model_module": "@jupyter-widgets/controls", "model_name": "DescriptionStyleModel", "state": { @@ -210,7 +210,7 @@ "_model_module": "@jupyter-widgets/controls" } }, - "32451332b7a94ba9aacddeaa6ac94d50": { + "599dda3b608b432393760b2ca4ae7c7d": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "state": { @@ -261,7 +261,7 @@ "left": null } }, - "0fffa335322b41658508e06aed0acbf0": { + "217ca488c82a4b7a80318b70887a556e": { "model_module": "@jupyter-widgets/controls", "model_name": "HBoxModel", "state": { @@ -273,15 +273,15 @@ "_view_count": null, "_view_module_version": "1.5.0", "box_style": "", - "layout": "IPY_MODEL_a354c6f80ce347e5a3ef64af87c0eccb", + "layout": "IPY_MODEL_4e63af16f1084ca98a6fa5a282f2a81e", "_model_module": "@jupyter-widgets/controls", "children": [ - "IPY_MODEL_85823e71fea54c39bd11e2e972348836", - "IPY_MODEL_fb11acd663fa4e71b041d67310d045fd" + "IPY_MODEL_49f4b3c7f6ff42b4b9132a8550e12186", + "IPY_MODEL_8ec9e1a4883245daaf029458ee09721f" ] } }, - "a354c6f80ce347e5a3ef64af87c0eccb": { + "4e63af16f1084ca98a6fa5a282f2a81e": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "state": { @@ -332,12 +332,12 @@ "left": null } }, - "85823e71fea54c39bd11e2e972348836": { + "49f4b3c7f6ff42b4b9132a8550e12186": { "model_module": "@jupyter-widgets/controls", "model_name": "FloatProgressModel", "state": { "_view_name": "ProgressView", - "style": "IPY_MODEL_8a919053b780449aae5523658ad611fa", + "style": "IPY_MODEL_9d3e775ee11e4cf4b587b64fbc3cc6f7", "_dom_classes": [], "description": "100%", "_model_name": "FloatProgressModel", @@ -352,30 +352,30 @@ "min": 0, "description_tooltip": null, "_model_module": "@jupyter-widgets/controls", - "layout": "IPY_MODEL_5bae9393a58b44f7b69fb04816f94f6f" + "layout": "IPY_MODEL_70f68a9a51ac46e6ab7e51fb4fc6bda3" } }, - "fb11acd663fa4e71b041d67310d045fd": { + "8ec9e1a4883245daaf029458ee09721f": { "model_module": "@jupyter-widgets/controls", "model_name": "HTMLModel", "state": { "_view_name": "HTMLView", - "style": "IPY_MODEL_d26c6d16c7f24030ab2da5285bf198ee", + "style": "IPY_MODEL_fdb8ab377c114bc3b862ba76eb93cef7", "_dom_classes": [], "description": "", "_model_name": "HTMLModel", "placeholder": "​", "_view_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", - "value": " 21.1M/21.1M [00:02<00:00, 9.36MB/s]", + "value": " 21.1M/21.1M [00:36<00:00, 605kB/s]", "_view_count": null, "_view_module_version": "1.5.0", "description_tooltip": null, "_model_module": "@jupyter-widgets/controls", - "layout": "IPY_MODEL_f7767886b2364c8d9efdc79e175ad8eb" + "layout": "IPY_MODEL_cd267c153c244621a1f50706d2ddc897" } }, - "8a919053b780449aae5523658ad611fa": { + "9d3e775ee11e4cf4b587b64fbc3cc6f7": { "model_module": "@jupyter-widgets/controls", "model_name": "ProgressStyleModel", "state": { @@ -390,7 +390,7 @@ "_model_module": "@jupyter-widgets/controls" } }, - "5bae9393a58b44f7b69fb04816f94f6f": { + "70f68a9a51ac46e6ab7e51fb4fc6bda3": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "state": { @@ -441,7 +441,7 @@ "left": null } }, - "d26c6d16c7f24030ab2da5285bf198ee": { + "fdb8ab377c114bc3b862ba76eb93cef7": { "model_module": "@jupyter-widgets/controls", "model_name": "DescriptionStyleModel", "state": { @@ -455,7 +455,7 @@ "_model_module": "@jupyter-widgets/controls" } }, - "f7767886b2364c8d9efdc79e175ad8eb": { + "cd267c153c244621a1f50706d2ddc897": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "state": { @@ -517,8 +517,7 @@ "colab_type": "text" }, "source": [ - "\"Open", - "\"Kaggle\"" + "\"Open" ] }, { @@ -551,7 +550,7 @@ "colab": { "base_uri": "https://localhost:8080/" }, - "outputId": "9b022435-4197-41fc-abea-81f86ce857d0" + "outputId": "0cabe440-e06c-48b9-9180-4b4ea1790ff5" }, "source": [ "!git clone https://github.com/ultralytics/yolov5 # clone repo\n", @@ -564,7 +563,7 @@ "clear_output()\n", "print(f\"Setup complete. Using torch {torch.__version__} ({torch.cuda.get_device_properties(0).name if torch.cuda.is_available() else 'CPU'})\")" ], - "execution_count": null, + "execution_count": 1, "outputs": [ { "output_type": "stream", @@ -663,32 +662,32 @@ "id": "WQPtK1QYVaD_", "colab": { "base_uri": "https://localhost:8080/", - "height": 65, + "height": 66, "referenced_widgets": [ - "8815626359d84416a2f44a95500580a4", - "3b85609c4ce94a74823f2cfe141ce68e", - "876609753c2946248890344722963d44", - "8abfdd8778e44b7ca0d29881cb1ada05", - "78c6c3d97c484916b8ee167c63556800", - "9dd0f182db5d45378ceafb855e486eb8", - "a3dab28b45c247089a3d1b8b09f327de", - "32451332b7a94ba9aacddeaa6ac94d50" + "cef5e9351ca743bcba5febac0b096a30", + "ec326c52378f4410920c328f221e0514", + "83000c64a11c4ae8abd6f0ef2f108cef", + "0f7899eb719f4a9c9852426551f97be9", + "886ac5b18b3c4c82bf15ad5055f1e17e", + "4e67b3c3a49849c7a7ba28b7eec96e7a", + "62c3682ff1804571a483d46664533969", + "599dda3b608b432393760b2ca4ae7c7d" ] }, - "outputId": "81521192-cf67-4a47-a4cc-434cb0ebc363" + "outputId": "56b6402a-81d5-41d0-a3c8-8889db1fca6c" }, "source": [ "# Download COCO val2017\n", "torch.hub.download_url_to_file('https://github.com/ultralytics/yolov5/releases/download/v1.0/coco2017val.zip', 'tmp.zip')\n", "!unzip -q tmp.zip -d ../ && rm tmp.zip" ], - "execution_count": null, + "execution_count": 2, "outputs": [ { "output_type": "display_data", "data": { "application/vnd.jupyter.widget-view+json": { - "model_id": "8815626359d84416a2f44a95500580a4", + "model_id": "cef5e9351ca743bcba5febac0b096a30", "version_minor": 0, "version_major": 2 }, @@ -716,45 +715,45 @@ "colab": { "base_uri": "https://localhost:8080/" }, - "outputId": "2340b131-9943-4cd6-fd3a-8272aeb0774f" + "outputId": "a5d41761-f1a0-41fe-d0bb-4cceebd7c4a6" }, "source": [ "# Run YOLOv5x on COCO val2017\n", - "!python test.py --weights yolov5x.pt --data coco.yaml --img 640 --iou 0.65" + "!python test.py --weights yolov5x.pt --data coco.yaml --img 640 --iou 0.65 --half" ], - "execution_count": null, + "execution_count": 3, "outputs": [ { "output_type": "stream", "text": [ - "Namespace(augment=False, batch_size=32, conf_thres=0.001, data='./data/coco.yaml', device='', exist_ok=False, img_size=640, iou_thres=0.65, name='exp', project='runs/test', save_conf=False, save_hybrid=False, save_json=True, save_txt=False, single_cls=False, task='val', verbose=False, weights=['yolov5x.pt'])\n", - "YOLOv5 🚀 v5.0-1-g0f395b3 torch 1.8.1+cu101 CUDA:0 (Tesla V100-SXM2-16GB, 16160.5MB)\n", + "Namespace(augment=False, batch_size=32, conf_thres=0.001, data='./data/coco.yaml', device='', exist_ok=False, half=True, img_size=640, iou_thres=0.65, name='exp', project='runs/test', save_conf=False, save_hybrid=False, save_json=True, save_txt=False, single_cls=False, task='val', verbose=False, weights=['yolov5x.pt'])\n", + "YOLOv5 🚀 v5.0-157-gc6b51f4 torch 1.8.1+cu101 CUDA:0 (Tesla V100-SXM2-16GB, 16160.5MB)\n", "\n", "Downloading https://github.com/ultralytics/yolov5/releases/download/v5.0/yolov5x.pt to yolov5x.pt...\n", - "100% 168M/168M [00:05<00:00, 32.3MB/s]\n", + "100% 168M/168M [00:01<00:00, 156MB/s]\n", "\n", "Fusing layers... \n", - "Model Summary: 476 layers, 87730285 parameters, 0 gradients, 218.8 GFLOPs\n", - "\u001b[34m\u001b[1mval: \u001b[0mScanning '../coco/val2017' images and labels... 4952 found, 48 missing, 0 empty, 0 corrupted: 100% 5000/5000 [00:01<00:00, 3102.29it/s]\n", + "Model Summary: 476 layers, 87730285 parameters, 0 gradients\n", + "\u001b[34m\u001b[1mval: \u001b[0mScanning '../coco/val2017' images and labels...4952 found, 48 missing, 0 empty, 0 corrupted: 100% 5000/5000 [00:01<00:00, 3008.87it/s]\n", "\u001b[34m\u001b[1mval: \u001b[0mNew cache created: ../coco/val2017.cache\n", - " Class Images Labels P R mAP@.5 mAP@.5:.95: 100% 157/157 [01:23<00:00, 1.87it/s]\n", - " all 5000 36335 0.745 0.627 0.68 0.49\n", - "Speed: 5.3/1.6/6.9 ms inference/NMS/total per 640x640 image at batch-size 32\n", + " Class Images Labels P R mAP@.5 mAP@.5:.95: 100% 157/157 [01:17<00:00, 2.02it/s]\n", + " all 5000 36335 0.746 0.626 0.68 0.49\n", + "Speed: 5.3/1.5/6.8 ms inference/NMS/total per 640x640 image at batch-size 32\n", "\n", "Evaluating pycocotools mAP... saving runs/test/exp/yolov5x_predictions.json...\n", "loading annotations into memory...\n", - "Done (t=0.48s)\n", + "Done (t=0.44s)\n", "creating index...\n", "index created!\n", "Loading and preparing results...\n", - "DONE (t=5.08s)\n", + "DONE (t=4.88s)\n", "creating index...\n", "index created!\n", "Running per image evaluation...\n", "Evaluate annotation type *bbox*\n", - "DONE (t=90.51s).\n", + "DONE (t=83.47s).\n", "Accumulating evaluation results...\n", - "DONE (t=15.16s).\n", + "DONE (t=12.96s).\n", " Average Precision (AP) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.504\n", " Average Precision (AP) @[ IoU=0.50 | area= all | maxDets=100 ] = 0.688\n", " Average Precision (AP) @[ IoU=0.75 | area= all | maxDets=100 ] = 0.546\n", @@ -827,32 +826,32 @@ "id": "Knxi2ncxWffW", "colab": { "base_uri": "https://localhost:8080/", - "height": 65, + "height": 66, "referenced_widgets": [ - "0fffa335322b41658508e06aed0acbf0", - "a354c6f80ce347e5a3ef64af87c0eccb", - "85823e71fea54c39bd11e2e972348836", - "fb11acd663fa4e71b041d67310d045fd", - "8a919053b780449aae5523658ad611fa", - "5bae9393a58b44f7b69fb04816f94f6f", - "d26c6d16c7f24030ab2da5285bf198ee", - "f7767886b2364c8d9efdc79e175ad8eb" + "217ca488c82a4b7a80318b70887a556e", + "4e63af16f1084ca98a6fa5a282f2a81e", + "49f4b3c7f6ff42b4b9132a8550e12186", + "8ec9e1a4883245daaf029458ee09721f", + "9d3e775ee11e4cf4b587b64fbc3cc6f7", + "70f68a9a51ac46e6ab7e51fb4fc6bda3", + "fdb8ab377c114bc3b862ba76eb93cef7", + "cd267c153c244621a1f50706d2ddc897" ] }, - "outputId": "b41ac253-9e1b-4c26-d78b-700ea0154f43" + "outputId": "9e4788c2-e1d4-4a13-c3d2-984f5df7ffab" }, "source": [ "# Download COCO128\n", "torch.hub.download_url_to_file('https://github.com/ultralytics/yolov5/releases/download/v1.0/coco128.zip', 'tmp.zip')\n", "!unzip -q tmp.zip -d ../ && rm tmp.zip" ], - "execution_count": null, + "execution_count": 2, "outputs": [ { "output_type": "display_data", "data": { "application/vnd.jupyter.widget-view+json": { - "model_id": "0fffa335322b41658508e06aed0acbf0", + "model_id": "217ca488c82a4b7a80318b70887a556e", "version_minor": 0, "version_major": 2 }, @@ -918,23 +917,93 @@ "colab": { "base_uri": "https://localhost:8080/" }, - "outputId": "e715d09c-5d93-4912-a0df-9da0893f2014" + "outputId": "70004839-0c90-4bc0-c0e5-9a92f3e65b01" }, "source": [ "# Train YOLOv5s on COCO128 for 3 epochs\n", - "!python train.py --img 640 --batch 16 --epochs 3 --data coco128.yaml --weights yolov5s.pt --nosave --cache" + "!python train.py --img 640 --batch 16 --epochs 3 --data coco128.yaml --weights yolov5s.pt --cache" ], - "execution_count": null, + "execution_count": 4, "outputs": [ { "output_type": "stream", "text": [ "\u001b[34m\u001b[1mgithub: \u001b[0mup to date with https://github.com/ultralytics/yolov5 ✅\n", - "YOLOv5 🚀 v5.0-2-g54d6516 torch 1.8.1+cu101 CUDA:0 (Tesla V100-SXM2-16GB, 16160.5MB)\n", + "YOLOv5 🚀 v5.0-157-gc6b51f4 torch 1.8.1+cu101 CUDA:0 (Tesla V100-SXM2-16GB, 16160.5MB)\n", + "\n", + "Namespace(adam=False, artifact_alias='latest', batch_size=16, bbox_interval=-1, bucket='', cache_images=True, cfg='', data='./data/coco128.yaml', device='', entity=None, epochs=1, evolve=False, exist_ok=False, global_rank=-1, hyp='data/hyp.scratch.yaml', image_weights=False, img_size=[640, 640], label_smoothing=0.0, linear_lr=False, local_rank=-1, multi_scale=False, name='exp', noautoanchor=False, nosave=False, notest=False, project='runs/train', quad=False, rect=False, resume=False, save_dir='runs/train/exp', save_period=-1, single_cls=False, sync_bn=False, total_batch_size=16, upload_dataset=False, weights='yolov5s.pt', workers=8, world_size=1)\n", + "\u001b[34m\u001b[1mtensorboard: \u001b[0mStart with 'tensorboard --logdir runs/train', view at http://localhost:6006/\n", + "2021-06-08 16:52:25.719745: I tensorflow/stream_executor/platform/default/dso_loader.cc:53] Successfully opened dynamic library libcudart.so.11.0\n", + "\u001b[34m\u001b[1mhyperparameters: \u001b[0mlr0=0.01, lrf=0.2, momentum=0.937, weight_decay=0.0005, warmup_epochs=3.0, warmup_momentum=0.8, warmup_bias_lr=0.1, box=0.05, cls=0.5, cls_pw=1.0, obj=1.0, obj_pw=1.0, iou_t=0.2, anchor_t=4.0, fl_gamma=0.0, hsv_h=0.015, hsv_s=0.7, hsv_v=0.4, degrees=0.0, translate=0.1, scale=0.5, shear=0.0, perspective=0.0, flipud=0.0, fliplr=0.5, mosaic=1.0, mixup=0.0\n", + "\u001b[34m\u001b[1mwandb: \u001b[0mInstall Weights & Biases for YOLOv5 logging with 'pip install wandb' (recommended)\n", + "Downloading https://github.com/ultralytics/yolov5/releases/download/v5.0/yolov5s.pt to yolov5s.pt...\n", + "100% 14.1M/14.1M [00:00<00:00, 18.7MB/s]\n", + "\n", + "\n", + " from n params module arguments \n", + " 0 -1 1 3520 models.common.Focus [3, 32, 3] \n", + " 1 -1 1 18560 models.common.Conv [32, 64, 3, 2] \n", + " 2 -1 1 18816 models.common.C3 [64, 64, 1] \n", + " 3 -1 1 73984 models.common.Conv [64, 128, 3, 2] \n", + " 4 -1 1 156928 models.common.C3 [128, 128, 3] \n", + " 5 -1 1 295424 models.common.Conv [128, 256, 3, 2] \n", + " 6 -1 1 625152 models.common.C3 [256, 256, 3] \n", + " 7 -1 1 1180672 models.common.Conv [256, 512, 3, 2] \n", + " 8 -1 1 656896 models.common.SPP [512, 512, [5, 9, 13]] \n", + " 9 -1 1 1182720 models.common.C3 [512, 512, 1, False] \n", + " 10 -1 1 131584 models.common.Conv [512, 256, 1, 1] \n", + " 11 -1 1 0 torch.nn.modules.upsampling.Upsample [None, 2, 'nearest'] \n", + " 12 [-1, 6] 1 0 models.common.Concat [1] \n", + " 13 -1 1 361984 models.common.C3 [512, 256, 1, False] \n", + " 14 -1 1 33024 models.common.Conv [256, 128, 1, 1] \n", + " 15 -1 1 0 torch.nn.modules.upsampling.Upsample [None, 2, 'nearest'] \n", + " 16 [-1, 4] 1 0 models.common.Concat [1] \n", + " 17 -1 1 90880 models.common.C3 [256, 128, 1, False] \n", + " 18 -1 1 147712 models.common.Conv [128, 128, 3, 2] \n", + " 19 [-1, 14] 1 0 models.common.Concat [1] \n", + " 20 -1 1 296448 models.common.C3 [256, 256, 1, False] \n", + " 21 -1 1 590336 models.common.Conv [256, 256, 3, 2] \n", + " 22 [-1, 10] 1 0 models.common.Concat [1] \n", + " 23 -1 1 1182720 models.common.C3 [512, 512, 1, False] \n", + " 24 [17, 20, 23] 1 229245 models.yolo.Detect [80, [[10, 13, 16, 30, 33, 23], [30, 61, 62, 45, 59, 119], [116, 90, 156, 198, 373, 326]], [128, 256, 512]]\n", + "Model Summary: 283 layers, 7276605 parameters, 7276605 gradients, 17.1 GFLOPs\n", + "\n", + "Transferred 362/362 items from yolov5s.pt\n", + "\n", + "WARNING: Dataset not found, nonexistent paths: ['/content/coco128/images/train2017']\n", + "Downloading https://github.com/ultralytics/yolov5/releases/download/v1.0/coco128.zip ...\n", + "100% 21.1M/21.1M [00:00<00:00, 68.2MB/s]\n", + "Dataset autodownload success\n", + "\n", + "Scaled weight_decay = 0.0005\n", + "Optimizer groups: 62 .bias, 62 conv.weight, 59 other\n", + "\u001b[34m\u001b[1mtrain: \u001b[0mScanning '../coco128/labels/train2017' images and labels...128 found, 0 missing, 2 empty, 0 corrupted: 100% 128/128 [00:00<00:00, 2036.51it/s]\n", + "\u001b[34m\u001b[1mtrain: \u001b[0mNew cache created: ../coco128/labels/train2017.cache\n", + "\u001b[34m\u001b[1mtrain: \u001b[0mCaching images (0.1GB): 100% 128/128 [00:00<00:00, 189.76it/s]\n", + "\u001b[34m\u001b[1mval: \u001b[0mScanning '../coco128/labels/train2017.cache' images and labels... 128 found, 0 missing, 2 empty, 0 corrupted: 100% 128/128 [00:00<00:00, 687414.74it/s]\n", + "\u001b[34m\u001b[1mval: \u001b[0mCaching images (0.1GB): 100% 128/128 [00:01<00:00, 93.37it/s]\n", + "Plotting labels... \n", + "\n", + "\u001b[34m\u001b[1mautoanchor: \u001b[0mAnalyzing anchors... anchors/target = 4.26, Best Possible Recall (BPR) = 0.9946\n", + "Image sizes 640 train, 640 test\n", + "Using 2 dataloader workers\n", + "Logging results to runs/train/exp\n", + "Starting training for 1 epochs...\n", + "\n", + " Epoch gpu_mem box obj cls total labels img_size\n", + " 0/0 10.8G 0.04226 0.06068 0.02005 0.123 158 640: 100% 8/8 [00:05<00:00, 1.35it/s]\n", + " Class Images Labels P R mAP@.5 mAP@.5:.95: 100% 4/4 [00:06<00:00, 1.53s/it]\n", + " all 128 929 0.633 0.641 0.668 0.439\n", + "1 epochs completed in 0.005 hours.\n", + "\n", + "Optimizer stripped from runs/train/exp/weights/last.pt, 14.8MB\n", + "Optimizer stripped from runs/train/exp/weights/best.pt, 14.8MB\n", + "\u001b[34m\u001b[1mgithub: \u001b[0mup to date with https://github.com/ultralytics/yolov5 ✅\n", + "YOLOv5 🚀 v5.0-157-gc6b51f4 torch 1.8.1+cu101 CUDA:0 (Tesla V100-SXM2-16GB, 16160.5MB)\n", "\n", - "Namespace(adam=False, artifact_alias='latest', batch_size=16, bbox_interval=-1, bucket='', cache_images=True, cfg='', data='./data/coco128.yaml', device='', entity=None, epochs=3, evolve=False, exist_ok=False, global_rank=-1, hyp='data/hyp.scratch.yaml', image_weights=False, img_size=[640, 640], label_smoothing=0.0, linear_lr=False, local_rank=-1, multi_scale=False, name='exp', noautoanchor=False, nosave=True, notest=False, project='runs/train', quad=False, rect=False, resume=False, save_dir='runs/train/exp', save_period=-1, single_cls=False, sync_bn=False, total_batch_size=16, upload_dataset=False, weights='yolov5s.pt', workers=8, world_size=1)\n", + "Namespace(adam=False, artifact_alias='latest', batch_size=16, bbox_interval=-1, bucket='', cache_images=True, cfg='', data='./data/coco128.yaml', device='', entity=None, epochs=3, evolve=False, exist_ok=False, global_rank=-1, hyp='data/hyp.scratch.yaml', image_weights=False, img_size=[640, 640], label_smoothing=0.0, linear_lr=False, local_rank=-1, multi_scale=False, name='exp', noautoanchor=False, nosave=False, notest=False, project='runs/train', quad=False, rect=False, resume=False, save_dir='runs/train/exp', save_period=-1, single_cls=False, sync_bn=False, total_batch_size=16, upload_dataset=False, weights='yolov5s.pt', workers=8, world_size=1)\n", "\u001b[34m\u001b[1mtensorboard: \u001b[0mStart with 'tensorboard --logdir runs/train', view at http://localhost:6006/\n", - "2021-04-12 10:29:58.539457: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcudart.so.11.0\n", + "2021-06-08 16:53:03.275914: I tensorflow/stream_executor/platform/default/dso_loader.cc:53] Successfully opened dynamic library libcudart.so.11.0\n", "\u001b[34m\u001b[1mhyperparameters: \u001b[0mlr0=0.01, lrf=0.2, momentum=0.937, weight_decay=0.0005, warmup_epochs=3.0, warmup_momentum=0.8, warmup_bias_lr=0.1, box=0.05, cls=0.5, cls_pw=1.0, obj=1.0, obj_pw=1.0, iou_t=0.2, anchor_t=4.0, fl_gamma=0.0, hsv_h=0.015, hsv_s=0.7, hsv_v=0.4, degrees=0.0, translate=0.1, scale=0.5, shear=0.0, perspective=0.0, flipud=0.0, fliplr=0.5, mosaic=1.0, mixup=0.0\n", "\u001b[34m\u001b[1mwandb: \u001b[0mInstall Weights & Biases for YOLOv5 logging with 'pip install wandb' (recommended)\n", "\n", @@ -969,10 +1038,10 @@ "Transferred 362/362 items from yolov5s.pt\n", "Scaled weight_decay = 0.0005\n", "Optimizer groups: 62 .bias, 62 conv.weight, 59 other\n", - "\u001b[34m\u001b[1mtrain: \u001b[0mScanning '../coco128/labels/train2017.cache' images and labels... 128 found, 0 missing, 2 empty, 0 corrupted: 100% 128/128 [00:00<00:00, 796544.38it/s]\n", - "\u001b[34m\u001b[1mtrain: \u001b[0mCaching images (0.1GB): 100% 128/128 [00:00<00:00, 176.73it/s]\n", - "\u001b[34m\u001b[1mval: \u001b[0mScanning '../coco128/labels/train2017.cache' images and labels... 128 found, 0 missing, 2 empty, 0 corrupted: 100% 128/128 [00:00<00:00, 500812.42it/s]\n", - "\u001b[34m\u001b[1mval: \u001b[0mCaching images (0.1GB): 100% 128/128 [00:00<00:00, 134.10it/s]\n", + "\u001b[34m\u001b[1mtrain: \u001b[0mScanning '../coco128/labels/train2017.cache' images and labels... 128 found, 0 missing, 2 empty, 0 corrupted: 100% 128/128 [00:00<00:00, 824686.50it/s]\n", + "\u001b[34m\u001b[1mtrain: \u001b[0mCaching images (0.1GB): 100% 128/128 [00:00<00:00, 201.90it/s]\n", + "\u001b[34m\u001b[1mval: \u001b[0mScanning '../coco128/labels/train2017.cache' images and labels... 128 found, 0 missing, 2 empty, 0 corrupted: 100% 128/128 [00:00<00:00, 23766.92it/s]\n", + "\u001b[34m\u001b[1mval: \u001b[0mCaching images (0.1GB): 100% 128/128 [00:01<00:00, 98.35it/s]\n", "Plotting labels... \n", "\n", "\u001b[34m\u001b[1mautoanchor: \u001b[0mAnalyzing anchors... anchors/target = 4.26, Best Possible Recall (BPR) = 0.9946\n", @@ -982,19 +1051,19 @@ "Starting training for 3 epochs...\n", "\n", " Epoch gpu_mem box obj cls total labels img_size\n", - " 0/2 3.29G 0.04368 0.065 0.02127 0.1299 183 640: 100% 8/8 [00:03<00:00, 2.21it/s]\n", - " Class Images Labels P R mAP@.5 mAP@.5:.95: 100% 4/4 [00:04<00:00, 1.09s/it]\n", - " all 128 929 0.605 0.657 0.666 0.434\n", + " 0/2 10.8G 0.04226 0.06067 0.02005 0.123 158 640: 100% 8/8 [00:05<00:00, 1.41it/s]\n", + " Class Images Labels P R mAP@.5 mAP@.5:.95: 100% 4/4 [00:04<00:00, 1.21s/it]\n", + " all 128 929 0.633 0.641 0.668 0.439\n", "\n", " Epoch gpu_mem box obj cls total labels img_size\n", - " 1/2 6.65G 0.04556 0.0651 0.01987 0.1305 166 640: 100% 8/8 [00:01<00:00, 5.18it/s]\n", - " Class Images Labels P R mAP@.5 mAP@.5:.95: 100% 4/4 [00:01<00:00, 2.72it/s]\n", - " all 128 929 0.61 0.66 0.669 0.438\n", + " 1/2 8.29G 0.04571 0.06616 0.01952 0.1314 164 640: 100% 8/8 [00:01<00:00, 5.65it/s]\n", + " Class Images Labels P R mAP@.5 mAP@.5:.95: 100% 4/4 [00:01<00:00, 3.21it/s]\n", + " all 128 929 0.613 0.659 0.669 0.438\n", "\n", " Epoch gpu_mem box obj cls total labels img_size\n", - " 2/2 6.65G 0.04624 0.06923 0.0196 0.1351 182 640: 100% 8/8 [00:01<00:00, 5.19it/s]\n", - " Class Images Labels P R mAP@.5 mAP@.5:.95: 100% 4/4 [00:03<00:00, 1.27it/s]\n", - " all 128 929 0.618 0.659 0.671 0.438\n", + " 2/2 8.29G 0.04542 0.0718 0.01861 0.1358 191 640: 100% 8/8 [00:01<00:00, 4.89it/s]\n", + " Class Images Labels P R mAP@.5 mAP@.5:.95: 100% 4/4 [00:02<00:00, 1.48it/s]\n", + " all 128 929 0.636 0.652 0.67 0.44\n", "3 epochs completed in 0.007 hours.\n", "\n", "Optimizer stripped from runs/train/exp/weights/last.pt, 14.8MB\n", @@ -1261,4 +1330,4 @@ "outputs": [] } ] -} +} \ No newline at end of file From ac8691e20827ec6103c6f521397bb9f699ac8a52 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 8 Jun 2021 19:01:52 +0200 Subject: [PATCH 0278/1976] Created using Colaboratory --- tutorial.ipynb | 104 ++++++++----------------------------------------- 1 file changed, 17 insertions(+), 87 deletions(-) diff --git a/tutorial.ipynb b/tutorial.ipynb index 4429c1044cfe..b6d672d10e52 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -917,93 +917,23 @@ "colab": { "base_uri": "https://localhost:8080/" }, - "outputId": "70004839-0c90-4bc0-c0e5-9a92f3e65b01" + "outputId": "c4dfc591-b6f9-4a60-9149-ee7eff970c90" }, "source": [ "# Train YOLOv5s on COCO128 for 3 epochs\n", "!python train.py --img 640 --batch 16 --epochs 3 --data coco128.yaml --weights yolov5s.pt --cache" ], - "execution_count": 4, + "execution_count": 9, "outputs": [ { "output_type": "stream", "text": [ "\u001b[34m\u001b[1mgithub: \u001b[0mup to date with https://github.com/ultralytics/yolov5 ✅\n", - "YOLOv5 🚀 v5.0-157-gc6b51f4 torch 1.8.1+cu101 CUDA:0 (Tesla V100-SXM2-16GB, 16160.5MB)\n", - "\n", - "Namespace(adam=False, artifact_alias='latest', batch_size=16, bbox_interval=-1, bucket='', cache_images=True, cfg='', data='./data/coco128.yaml', device='', entity=None, epochs=1, evolve=False, exist_ok=False, global_rank=-1, hyp='data/hyp.scratch.yaml', image_weights=False, img_size=[640, 640], label_smoothing=0.0, linear_lr=False, local_rank=-1, multi_scale=False, name='exp', noautoanchor=False, nosave=False, notest=False, project='runs/train', quad=False, rect=False, resume=False, save_dir='runs/train/exp', save_period=-1, single_cls=False, sync_bn=False, total_batch_size=16, upload_dataset=False, weights='yolov5s.pt', workers=8, world_size=1)\n", - "\u001b[34m\u001b[1mtensorboard: \u001b[0mStart with 'tensorboard --logdir runs/train', view at http://localhost:6006/\n", - "2021-06-08 16:52:25.719745: I tensorflow/stream_executor/platform/default/dso_loader.cc:53] Successfully opened dynamic library libcudart.so.11.0\n", - "\u001b[34m\u001b[1mhyperparameters: \u001b[0mlr0=0.01, lrf=0.2, momentum=0.937, weight_decay=0.0005, warmup_epochs=3.0, warmup_momentum=0.8, warmup_bias_lr=0.1, box=0.05, cls=0.5, cls_pw=1.0, obj=1.0, obj_pw=1.0, iou_t=0.2, anchor_t=4.0, fl_gamma=0.0, hsv_h=0.015, hsv_s=0.7, hsv_v=0.4, degrees=0.0, translate=0.1, scale=0.5, shear=0.0, perspective=0.0, flipud=0.0, fliplr=0.5, mosaic=1.0, mixup=0.0\n", - "\u001b[34m\u001b[1mwandb: \u001b[0mInstall Weights & Biases for YOLOv5 logging with 'pip install wandb' (recommended)\n", - "Downloading https://github.com/ultralytics/yolov5/releases/download/v5.0/yolov5s.pt to yolov5s.pt...\n", - "100% 14.1M/14.1M [00:00<00:00, 18.7MB/s]\n", - "\n", - "\n", - " from n params module arguments \n", - " 0 -1 1 3520 models.common.Focus [3, 32, 3] \n", - " 1 -1 1 18560 models.common.Conv [32, 64, 3, 2] \n", - " 2 -1 1 18816 models.common.C3 [64, 64, 1] \n", - " 3 -1 1 73984 models.common.Conv [64, 128, 3, 2] \n", - " 4 -1 1 156928 models.common.C3 [128, 128, 3] \n", - " 5 -1 1 295424 models.common.Conv [128, 256, 3, 2] \n", - " 6 -1 1 625152 models.common.C3 [256, 256, 3] \n", - " 7 -1 1 1180672 models.common.Conv [256, 512, 3, 2] \n", - " 8 -1 1 656896 models.common.SPP [512, 512, [5, 9, 13]] \n", - " 9 -1 1 1182720 models.common.C3 [512, 512, 1, False] \n", - " 10 -1 1 131584 models.common.Conv [512, 256, 1, 1] \n", - " 11 -1 1 0 torch.nn.modules.upsampling.Upsample [None, 2, 'nearest'] \n", - " 12 [-1, 6] 1 0 models.common.Concat [1] \n", - " 13 -1 1 361984 models.common.C3 [512, 256, 1, False] \n", - " 14 -1 1 33024 models.common.Conv [256, 128, 1, 1] \n", - " 15 -1 1 0 torch.nn.modules.upsampling.Upsample [None, 2, 'nearest'] \n", - " 16 [-1, 4] 1 0 models.common.Concat [1] \n", - " 17 -1 1 90880 models.common.C3 [256, 128, 1, False] \n", - " 18 -1 1 147712 models.common.Conv [128, 128, 3, 2] \n", - " 19 [-1, 14] 1 0 models.common.Concat [1] \n", - " 20 -1 1 296448 models.common.C3 [256, 256, 1, False] \n", - " 21 -1 1 590336 models.common.Conv [256, 256, 3, 2] \n", - " 22 [-1, 10] 1 0 models.common.Concat [1] \n", - " 23 -1 1 1182720 models.common.C3 [512, 512, 1, False] \n", - " 24 [17, 20, 23] 1 229245 models.yolo.Detect [80, [[10, 13, 16, 30, 33, 23], [30, 61, 62, 45, 59, 119], [116, 90, 156, 198, 373, 326]], [128, 256, 512]]\n", - "Model Summary: 283 layers, 7276605 parameters, 7276605 gradients, 17.1 GFLOPs\n", - "\n", - "Transferred 362/362 items from yolov5s.pt\n", - "\n", - "WARNING: Dataset not found, nonexistent paths: ['/content/coco128/images/train2017']\n", - "Downloading https://github.com/ultralytics/yolov5/releases/download/v1.0/coco128.zip ...\n", - "100% 21.1M/21.1M [00:00<00:00, 68.2MB/s]\n", - "Dataset autodownload success\n", - "\n", - "Scaled weight_decay = 0.0005\n", - "Optimizer groups: 62 .bias, 62 conv.weight, 59 other\n", - "\u001b[34m\u001b[1mtrain: \u001b[0mScanning '../coco128/labels/train2017' images and labels...128 found, 0 missing, 2 empty, 0 corrupted: 100% 128/128 [00:00<00:00, 2036.51it/s]\n", - "\u001b[34m\u001b[1mtrain: \u001b[0mNew cache created: ../coco128/labels/train2017.cache\n", - "\u001b[34m\u001b[1mtrain: \u001b[0mCaching images (0.1GB): 100% 128/128 [00:00<00:00, 189.76it/s]\n", - "\u001b[34m\u001b[1mval: \u001b[0mScanning '../coco128/labels/train2017.cache' images and labels... 128 found, 0 missing, 2 empty, 0 corrupted: 100% 128/128 [00:00<00:00, 687414.74it/s]\n", - "\u001b[34m\u001b[1mval: \u001b[0mCaching images (0.1GB): 100% 128/128 [00:01<00:00, 93.37it/s]\n", - "Plotting labels... \n", - "\n", - "\u001b[34m\u001b[1mautoanchor: \u001b[0mAnalyzing anchors... anchors/target = 4.26, Best Possible Recall (BPR) = 0.9946\n", - "Image sizes 640 train, 640 test\n", - "Using 2 dataloader workers\n", - "Logging results to runs/train/exp\n", - "Starting training for 1 epochs...\n", - "\n", - " Epoch gpu_mem box obj cls total labels img_size\n", - " 0/0 10.8G 0.04226 0.06068 0.02005 0.123 158 640: 100% 8/8 [00:05<00:00, 1.35it/s]\n", - " Class Images Labels P R mAP@.5 mAP@.5:.95: 100% 4/4 [00:06<00:00, 1.53s/it]\n", - " all 128 929 0.633 0.641 0.668 0.439\n", - "1 epochs completed in 0.005 hours.\n", - "\n", - "Optimizer stripped from runs/train/exp/weights/last.pt, 14.8MB\n", - "Optimizer stripped from runs/train/exp/weights/best.pt, 14.8MB\n", - "\u001b[34m\u001b[1mgithub: \u001b[0mup to date with https://github.com/ultralytics/yolov5 ✅\n", - "YOLOv5 🚀 v5.0-157-gc6b51f4 torch 1.8.1+cu101 CUDA:0 (Tesla V100-SXM2-16GB, 16160.5MB)\n", + "YOLOv5 🚀 v5.0-158-g78cf488 torch 1.8.1+cu101 CUDA:0 (Tesla V100-SXM2-16GB, 16160.5MB)\n", "\n", "Namespace(adam=False, artifact_alias='latest', batch_size=16, bbox_interval=-1, bucket='', cache_images=True, cfg='', data='./data/coco128.yaml', device='', entity=None, epochs=3, evolve=False, exist_ok=False, global_rank=-1, hyp='data/hyp.scratch.yaml', image_weights=False, img_size=[640, 640], label_smoothing=0.0, linear_lr=False, local_rank=-1, multi_scale=False, name='exp', noautoanchor=False, nosave=False, notest=False, project='runs/train', quad=False, rect=False, resume=False, save_dir='runs/train/exp', save_period=-1, single_cls=False, sync_bn=False, total_batch_size=16, upload_dataset=False, weights='yolov5s.pt', workers=8, world_size=1)\n", "\u001b[34m\u001b[1mtensorboard: \u001b[0mStart with 'tensorboard --logdir runs/train', view at http://localhost:6006/\n", - "2021-06-08 16:53:03.275914: I tensorflow/stream_executor/platform/default/dso_loader.cc:53] Successfully opened dynamic library libcudart.so.11.0\n", + "2021-06-08 17:00:55.016221: I tensorflow/stream_executor/platform/default/dso_loader.cc:53] Successfully opened dynamic library libcudart.so.11.0\n", "\u001b[34m\u001b[1mhyperparameters: \u001b[0mlr0=0.01, lrf=0.2, momentum=0.937, weight_decay=0.0005, warmup_epochs=3.0, warmup_momentum=0.8, warmup_bias_lr=0.1, box=0.05, cls=0.5, cls_pw=1.0, obj=1.0, obj_pw=1.0, iou_t=0.2, anchor_t=4.0, fl_gamma=0.0, hsv_h=0.015, hsv_s=0.7, hsv_v=0.4, degrees=0.0, translate=0.1, scale=0.5, shear=0.0, perspective=0.0, flipud=0.0, fliplr=0.5, mosaic=1.0, mixup=0.0\n", "\u001b[34m\u001b[1mwandb: \u001b[0mInstall Weights & Biases for YOLOv5 logging with 'pip install wandb' (recommended)\n", "\n", @@ -1038,10 +968,10 @@ "Transferred 362/362 items from yolov5s.pt\n", "Scaled weight_decay = 0.0005\n", "Optimizer groups: 62 .bias, 62 conv.weight, 59 other\n", - "\u001b[34m\u001b[1mtrain: \u001b[0mScanning '../coco128/labels/train2017.cache' images and labels... 128 found, 0 missing, 2 empty, 0 corrupted: 100% 128/128 [00:00<00:00, 824686.50it/s]\n", - "\u001b[34m\u001b[1mtrain: \u001b[0mCaching images (0.1GB): 100% 128/128 [00:00<00:00, 201.90it/s]\n", - "\u001b[34m\u001b[1mval: \u001b[0mScanning '../coco128/labels/train2017.cache' images and labels... 128 found, 0 missing, 2 empty, 0 corrupted: 100% 128/128 [00:00<00:00, 23766.92it/s]\n", - "\u001b[34m\u001b[1mval: \u001b[0mCaching images (0.1GB): 100% 128/128 [00:01<00:00, 98.35it/s]\n", + "\u001b[34m\u001b[1mtrain: \u001b[0mScanning '../coco128/labels/train2017.cache' images and labels... 128 found, 0 missing, 2 empty, 0 corrupted: 100% 128/128 [00:00<00:00, 1503840.09it/s]\n", + "\u001b[34m\u001b[1mtrain: \u001b[0mCaching images (0.1GB): 100% 128/128 [00:00<00:00, 198.74it/s]\n", + "\u001b[34m\u001b[1mval: \u001b[0mScanning '../coco128/labels/train2017.cache' images and labels... 128 found, 0 missing, 2 empty, 0 corrupted: 100% 128/128 [00:00<00:00, 475107.00it/s]\n", + "\u001b[34m\u001b[1mval: \u001b[0mCaching images (0.1GB): 100% 128/128 [00:01<00:00, 98.63it/s]\n", "Plotting labels... \n", "\n", "\u001b[34m\u001b[1mautoanchor: \u001b[0mAnalyzing anchors... anchors/target = 4.26, Best Possible Recall (BPR) = 0.9946\n", @@ -1051,19 +981,19 @@ "Starting training for 3 epochs...\n", "\n", " Epoch gpu_mem box obj cls total labels img_size\n", - " 0/2 10.8G 0.04226 0.06067 0.02005 0.123 158 640: 100% 8/8 [00:05<00:00, 1.41it/s]\n", - " Class Images Labels P R mAP@.5 mAP@.5:.95: 100% 4/4 [00:04<00:00, 1.21s/it]\n", - " all 128 929 0.633 0.641 0.668 0.439\n", + " 0/2 10.8G 0.04226 0.06067 0.02005 0.123 158 640: 100% 8/8 [00:05<00:00, 1.45it/s]\n", + " Class Images Labels P R mAP@.5 mAP@.5:.95: 100% 4/4 [00:04<00:00, 1.17s/it]\n", + " all 128 929 0.633 0.641 0.668 0.438\n", "\n", " Epoch gpu_mem box obj cls total labels img_size\n", - " 1/2 8.29G 0.04571 0.06616 0.01952 0.1314 164 640: 100% 8/8 [00:01<00:00, 5.65it/s]\n", - " Class Images Labels P R mAP@.5 mAP@.5:.95: 100% 4/4 [00:01<00:00, 3.21it/s]\n", - " all 128 929 0.613 0.659 0.669 0.438\n", + " 1/2 6.66G 0.04571 0.06615 0.01952 0.1314 164 640: 100% 8/8 [00:01<00:00, 5.10it/s]\n", + " Class Images Labels P R mAP@.5 mAP@.5:.95: 100% 4/4 [00:01<00:00, 3.88it/s]\n", + " all 128 929 0.614 0.661 0.67 0.438\n", "\n", " Epoch gpu_mem box obj cls total labels img_size\n", - " 2/2 8.29G 0.04542 0.0718 0.01861 0.1358 191 640: 100% 8/8 [00:01<00:00, 4.89it/s]\n", - " Class Images Labels P R mAP@.5 mAP@.5:.95: 100% 4/4 [00:02<00:00, 1.48it/s]\n", - " all 128 929 0.636 0.652 0.67 0.44\n", + " 2/2 6.66G 0.04542 0.07179 0.01861 0.1358 191 640: 100% 8/8 [00:01<00:00, 5.40it/s]\n", + " Class Images Labels P R mAP@.5 mAP@.5:.95: 100% 4/4 [00:02<00:00, 1.43it/s]\n", + " all 128 929 0.636 0.652 0.67 0.439\n", "3 epochs completed in 0.007 hours.\n", "\n", "Optimizer stripped from runs/train/exp/weights/last.pt, 14.8MB\n", From b6fdd2e5e54aa3464b360fe6d9c6f3cb216f3778 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 8 Jun 2021 23:09:45 +0200 Subject: [PATCH 0279/1976] Create `dataset_stats()` for HUB --- utils/datasets.py | 36 ++++++++++++++++++++++++++++++++++-- 1 file changed, 34 insertions(+), 2 deletions(-) diff --git a/utils/datasets.py b/utils/datasets.py index daaa8d24855e..7c74d2c01322 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -17,12 +17,13 @@ import numpy as np import torch import torch.nn.functional as F +import yaml from PIL import Image, ExifTags from torch.utils.data import Dataset from tqdm import tqdm -from utils.general import check_requirements, xyxy2xywh, xywh2xyxy, xywhn2xyxy, xyn2xy, segment2box, segments2boxes, \ - resample_segments, clean_str +from utils.general import check_requirements, check_file, check_dataset, xyxy2xywh, xywh2xyxy, xywhn2xyxy, xyn2xy, \ + segment2box, segments2boxes, resample_segments, clean_str from utils.torch_utils import torch_distributed_zero_first # Parameters @@ -1083,3 +1084,34 @@ def verify_image_label(params): nc = 1 logging.info(f'{prefix}WARNING: Ignoring corrupted image and/or label {im_file}: {e}') return [None] * 4 + [nm, nf, ne, nc] + + +def dataset_stats(path='data/coco128.yaml', verbose=False): + """ Return dataset statistics dictionary with images and instances counts per split per class + Usage: from utils.datasets import *; dataset_stats('data/coco128.yaml') + Arguments + path: Path to data.yaml + verbose: Print stats dictionary + """ + path = check_file(Path(path)) + with open(path) as f: + data = yaml.safe_load(f) # data dict + check_dataset(data) # download dataset if missing + + nc = data['nc'] # number of classes + stats = {'nc': nc, 'names': data['names']} # statistics dictionary + for split in 'train', 'val', 'test': + if split not in data: + stats[split] = None # i.e. no test set + continue + x = [] + dataset = LoadImagesAndLabels(data[split], augment=False, rect=True) # load dataset + for label in tqdm(dataset.labels, total=dataset.n, desc='Statistics'): + x.append(np.bincount(label[:, 0].astype(int), minlength=nc)) + x = np.array(x) # shape(128x80) + stats[split] = {'instances': {'total': int(x.sum()), 'per_class': x.sum(0).tolist()}, + 'images': {'total': dataset.n, 'unlabelled': int(np.all(x == 0, 1).sum()), + 'per_class': (x > 0).sum(0).tolist()}} + if verbose: + print(yaml.dump([stats], sort_keys=False, default_flow_style=False)) + return stats From 1b5edb6f8eb14a12f21ed0370e9a0e74085424e3 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 9 Jun 2021 10:56:11 +0200 Subject: [PATCH 0280/1976] Update `dataset_stats()` for HUB (#3536) * Update `dataset_stats()` for HUB Cleanup of b6fdd2e * autodownload flag * Update general.py * cleanup --- utils/datasets.py | 11 +++++------ utils/general.py | 6 +++--- 2 files changed, 8 insertions(+), 9 deletions(-) diff --git a/utils/datasets.py b/utils/datasets.py index 7c74d2c01322..108005c8de65 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -1086,18 +1086,17 @@ def verify_image_label(params): return [None] * 4 + [nm, nf, ne, nc] -def dataset_stats(path='data/coco128.yaml', verbose=False): +def dataset_stats(path='coco128.yaml', autodownload=False, verbose=False): """ Return dataset statistics dictionary with images and instances counts per split per class - Usage: from utils.datasets import *; dataset_stats('data/coco128.yaml') + Usage: from utils.datasets import *; dataset_stats('coco128.yaml', verbose=True) Arguments path: Path to data.yaml + autodownload: Attempt to download dataset if not found locally verbose: Print stats dictionary """ - path = check_file(Path(path)) - with open(path) as f: + with open(check_file(Path(path))) as f: data = yaml.safe_load(f) # data dict - check_dataset(data) # download dataset if missing - + check_dataset(data, autodownload) # download dataset if missing nc = data['nc'] # number of classes stats = {'nc': nc, 'names': data['names']} # statistics dictionary for split in 'train', 'val', 'test': diff --git a/utils/general.py b/utils/general.py index a12b0aafba0e..367f30b925f4 100755 --- a/utils/general.py +++ b/utils/general.py @@ -220,14 +220,14 @@ def check_file(file): return files[0] # return file -def check_dataset(dict): +def check_dataset(data, autodownload=True): # Download dataset if not found locally - val, s = dict.get('val'), dict.get('download') + val, s = data.get('val'), data.get('download') if val and len(val): val = [Path(x).resolve() for x in (val if isinstance(val, list) else [val])] # val path if not all(x.exists() for x in val): print('\nWARNING: Dataset not found, nonexistent paths: %s' % [str(x) for x in val if not x.exists()]) - if s and len(s): # download script + if s and len(s) and autodownload: # download script if s.startswith('http') and s.endswith('.zip'): # URL f = Path(s).name # filename print(f'Downloading {s} ...') From ef0b5c9d29192ae1c4a931f9db808114bb486001 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 9 Jun 2021 11:22:21 +0200 Subject: [PATCH 0281/1976] On-demand `pycocotools` pip install (#3547) --- detect.py | 2 +- hubconf.py | 2 +- requirements.txt | 2 +- test.py | 3 ++- train.py | 2 +- 5 files changed, 6 insertions(+), 5 deletions(-) diff --git a/detect.py b/detect.py index 537f47dfafab..8dbb656ed95f 100644 --- a/detect.py +++ b/detect.py @@ -175,7 +175,7 @@ def detect(opt): parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference') opt = parser.parse_args() print(opt) - check_requirements(exclude=('tensorboard', 'pycocotools', 'thop')) + check_requirements(exclude=('tensorboard', 'thop')) if opt.update: # update all models (to fix SourceChangeWarning) for opt.weights in ['yolov5s.pt', 'yolov5m.pt', 'yolov5l.pt', 'yolov5x.pt']: diff --git a/hubconf.py b/hubconf.py index bedbee18f87f..429e61bbab1b 100644 --- a/hubconf.py +++ b/hubconf.py @@ -31,7 +31,7 @@ def _create(name, pretrained=True, channels=3, classes=80, autoshape=True, verbo from utils.torch_utils import select_device check_requirements(requirements=Path(__file__).parent / 'requirements.txt', - exclude=('tensorboard', 'pycocotools', 'thop', 'opencv-python')) + exclude=('tensorboard', 'thop', 'opencv-python')) set_logging(verbose=verbose) fname = Path(name).with_suffix('.pt') # checkpoint filename diff --git a/requirements.txt b/requirements.txt index a20fb6ad0ea5..b413ec01b31c 100755 --- a/requirements.txt +++ b/requirements.txt @@ -26,5 +26,5 @@ pandas # extras -------------------------------------- # Cython # for pycocotools https://github.com/cocodataset/cocoapi/issues/172 -pycocotools>=2.0 # COCO mAP +# pycocotools>=2.0 # COCO mAP thop # FLOPs computation diff --git a/test.py b/test.py index 6a2a4e47c142..515b984bc7be 100644 --- a/test.py +++ b/test.py @@ -260,6 +260,7 @@ def test(data, json.dump(jdict, f) try: # https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocoEvalDemo.ipynb + check_requirements(['pycocotools']) from pycocotools.coco import COCO from pycocotools.cocoeval import COCOeval @@ -311,7 +312,7 @@ def test(data, opt.save_json |= opt.data.endswith('coco.yaml') opt.data = check_file(opt.data) # check file print(opt) - check_requirements(exclude=('tensorboard', 'pycocotools', 'thop')) + check_requirements(exclude=('tensorboard', 'thop')) if opt.task in ('train', 'val', 'test'): # run normally test(opt.data, diff --git a/train.py b/train.py index 093a6197ff06..aad8ff258d6e 100644 --- a/train.py +++ b/train.py @@ -495,7 +495,7 @@ def train(hyp, opt, device, tb_writer=None): set_logging(opt.global_rank) if opt.global_rank in [-1, 0]: check_git_status() - check_requirements(exclude=('pycocotools', 'thop')) + check_requirements(exclude=['thop']) # Resume wandb_run = check_wandb_resume(opt) From f8ec71e1c2ca1c01763f754332eff393b24c23d7 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 9 Jun 2021 11:34:15 +0200 Subject: [PATCH 0282/1976] Update `check_python(minimum=3.6.2)` (#3548) --- utils/general.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/general.py b/utils/general.py index 367f30b925f4..1d61f16d7771 100755 --- a/utils/general.py +++ b/utils/general.py @@ -136,7 +136,7 @@ def check_git_status(err_msg=', for updates see https://github.com/ultralytics/y print(f'{e}{err_msg}') -def check_python(minimum='3.7.0', required=True): +def check_python(minimum='3.6.2', required=True): # Check current python version vs. required python version current = platform.python_version() result = pkg.parse_version(current) >= pkg.parse_version(minimum) From 0cfc5b2c181fd02f5613227aa5ae31e29b99d6b4 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 9 Jun 2021 11:49:25 +0200 Subject: [PATCH 0283/1976] Update README.md (#3550) Add permanent splash URL and update hyperlink from iOS landing page to Ultralytics YOLOv5 landing page at https://ultralytics.com/yolov5 --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 3a785cc85003..08a6eb272117 100755 --- a/README.md +++ b/README.md @@ -1,5 +1,5 @@ - - + +   CI CPU testing From 958ab92dc1a29f41f4c813937fda2bc99e1f147b Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 9 Jun 2021 13:14:56 +0200 Subject: [PATCH 0284/1976] Remove `opt` from `create_dataloader()`` (#3552) --- test.py | 2 +- train.py | 17 +++++++++-------- utils/datasets.py | 6 +++--- 3 files changed, 13 insertions(+), 12 deletions(-) diff --git a/test.py b/test.py index 515b984bc7be..16a31fd17a54 100644 --- a/test.py +++ b/test.py @@ -88,7 +88,7 @@ def test(data, if device.type != 'cpu': model(torch.zeros(1, 3, imgsz, imgsz).to(device).type_as(next(model.parameters()))) # run once task = opt.task if opt.task in ('train', 'val', 'test') else 'val' # path to train/val/test images - dataloader = create_dataloader(data[task], imgsz, batch_size, gs, opt, pad=0.5, rect=True, + dataloader = create_dataloader(data[task], imgsz, batch_size, gs, single_cls, pad=0.5, rect=True, prefix=colorstr(f'{task}: '))[0] seen = 0 diff --git a/train.py b/train.py index aad8ff258d6e..2465a8c22a37 100644 --- a/train.py +++ b/train.py @@ -41,8 +41,9 @@ def train(hyp, opt, device, tb_writer=None): logger.info(colorstr('hyperparameters: ') + ', '.join(f'{k}={v}' for k, v in hyp.items())) - save_dir, epochs, batch_size, total_batch_size, weights, rank = \ - Path(opt.save_dir), opt.epochs, opt.batch_size, opt.total_batch_size, opt.weights, opt.global_rank + save_dir, epochs, batch_size, total_batch_size, weights, rank, single_cls = \ + Path(opt.save_dir), opt.epochs, opt.batch_size, opt.total_batch_size, opt.weights, opt.global_rank, \ + opt.single_cls # Directories wdir = save_dir / 'weights' @@ -75,8 +76,8 @@ def train(hyp, opt, device, tb_writer=None): if wandb_logger.wandb: weights, epochs, hyp = opt.weights, opt.epochs, opt.hyp # WandbLogger might update weights, epochs if resuming - nc = 1 if opt.single_cls else int(data_dict['nc']) # number of classes - names = ['item'] if opt.single_cls and len(data_dict['names']) != 1 else data_dict['names'] # class names + nc = 1 if single_cls else int(data_dict['nc']) # number of classes + names = ['item'] if single_cls and len(data_dict['names']) != 1 else data_dict['names'] # class names assert len(names) == nc, '%g names found for nc=%g dataset in %s' % (len(names), nc, opt.data) # check is_coco = opt.data.endswith('coco.yaml') and nc == 80 # COCO dataset @@ -187,7 +188,7 @@ def train(hyp, opt, device, tb_writer=None): logger.info('Using SyncBatchNorm()') # Trainloader - dataloader, dataset = create_dataloader(train_path, imgsz, batch_size, gs, opt, + dataloader, dataset = create_dataloader(train_path, imgsz, batch_size, gs, single_cls, hyp=hyp, augment=True, cache=opt.cache_images, rect=opt.rect, rank=rank, world_size=opt.world_size, workers=opt.workers, image_weights=opt.image_weights, quad=opt.quad, prefix=colorstr('train: ')) @@ -197,7 +198,7 @@ def train(hyp, opt, device, tb_writer=None): # Process 0 if rank in [-1, 0]: - testloader = create_dataloader(test_path, imgsz_test, batch_size * 2, gs, opt, # testloader + testloader = create_dataloader(test_path, imgsz_test, batch_size * 2, gs, single_cls, hyp=hyp, cache=opt.cache_images and not opt.notest, rect=True, rank=-1, world_size=opt.world_size, workers=opt.workers, pad=0.5, prefix=colorstr('val: '))[0] @@ -357,7 +358,7 @@ def train(hyp, opt, device, tb_writer=None): batch_size=batch_size * 2, imgsz=imgsz_test, model=ema.ema, - single_cls=opt.single_cls, + single_cls=single_cls, dataloader=testloader, save_dir=save_dir, save_json=is_coco and final_epoch, @@ -429,7 +430,7 @@ def train(hyp, opt, device, tb_writer=None): conf_thres=0.001, iou_thres=0.7, model=attempt_load(m, device).half(), - single_cls=opt.single_cls, + single_cls=single_cls, dataloader=testloader, save_dir=save_dir, save_json=True, diff --git a/utils/datasets.py b/utils/datasets.py index 108005c8de65..444b3ff2f60c 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -62,8 +62,8 @@ def exif_size(img): return s -def create_dataloader(path, imgsz, batch_size, stride, opt, hyp=None, augment=False, cache=False, pad=0.0, rect=False, - rank=-1, world_size=1, workers=8, image_weights=False, quad=False, prefix=''): +def create_dataloader(path, imgsz, batch_size, stride, single_cls=False, hyp=None, augment=False, cache=False, pad=0.0, + rect=False, rank=-1, world_size=1, workers=8, image_weights=False, quad=False, prefix=''): # Make sure only the first process in DDP process the dataset first, and the following others can use the cache with torch_distributed_zero_first(rank): dataset = LoadImagesAndLabels(path, imgsz, batch_size, @@ -71,7 +71,7 @@ def create_dataloader(path, imgsz, batch_size, stride, opt, hyp=None, augment=Fa hyp=hyp, # augmentation hyperparameters rect=rect, # rectangular training cache_images=cache, - single_cls=opt.single_cls, + single_cls=single_cls, stride=int(stride), pad=pad, image_weights=image_weights, From 63157d214d09ab9c3b4588347dcf3307d85d4410 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 9 Jun 2021 15:09:51 +0200 Subject: [PATCH 0285/1976] Remove `is_coco` argument from `test()` (#3553) --- test.py | 3 +-- train.py | 6 ++---- 2 files changed, 3 insertions(+), 6 deletions(-) diff --git a/test.py b/test.py index 16a31fd17a54..b17415431615 100644 --- a/test.py +++ b/test.py @@ -39,7 +39,6 @@ def test(data, wandb_logger=None, compute_loss=None, half_precision=True, - is_coco=False, opt=None): # Initialize/load model and set device training = model is not None @@ -71,10 +70,10 @@ def test(data, # Configure model.eval() if isinstance(data, str): - is_coco = data.endswith('coco.yaml') with open(data) as f: data = yaml.safe_load(f) check_dataset(data) # check + is_coco = data['val'].endswith('coco/val2017.txt') # COCO dataset nc = 1 if single_cls else int(data['nc']) # number of classes iouv = torch.linspace(0.5, 0.95, 10).to(device) # iou vector for mAP@0.5:0.95 niou = iouv.numel() diff --git a/train.py b/train.py index 2465a8c22a37..43c63b6ff3c1 100644 --- a/train.py +++ b/train.py @@ -365,8 +365,7 @@ def train(hyp, opt, device, tb_writer=None): verbose=nc < 50 and final_epoch, plots=plots and final_epoch, wandb_logger=wandb_logger, - compute_loss=compute_loss, - is_coco=is_coco) + compute_loss=compute_loss) # Write with open(results_file, 'a') as f: @@ -434,8 +433,7 @@ def train(hyp, opt, device, tb_writer=None): dataloader=testloader, save_dir=save_dir, save_json=True, - plots=False, - is_coco=is_coco) + plots=False) # Strip optimizers for f in last, best: From 8b5086c21ba227c0257d94ea34cb46124a9c559a Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 9 Jun 2021 15:31:47 +0200 Subject: [PATCH 0286/1976] Multi-GPU default to single device 0 (#3554) * Multi-GPU default to single device 0 * Multi-GPU default to single device 0 * add space --- utils/torch_utils.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/utils/torch_utils.py b/utils/torch_utils.py index 6a7d07634813..b690dbe96700 100644 --- a/utils/torch_utils.py +++ b/utils/torch_utils.py @@ -72,11 +72,11 @@ def select_device(device='', batch_size=None): cuda = not cpu and torch.cuda.is_available() if cuda: - devices = device.split(',') if device else range(torch.cuda.device_count()) # i.e. 0,1,6,7 + devices = device.split(',') if device else '0' # range(torch.cuda.device_count()) # i.e. 0,1,6,7 n = len(devices) # device count if n > 1 and batch_size: # check batch_size is divisible by device_count assert batch_size % n == 0, f'batch-size {batch_size} not multiple of GPU count {n}' - space = ' ' * len(s) + space = ' ' * (len(s) + 1) for i, d in enumerate(devices): p = torch.cuda.get_device_properties(i) s += f"{'' if i == 0 else space}CUDA:{d} ({p.name}, {p.total_memory / 1024 ** 2}MB)\n" # bytes to MB From 5948f20a3d29fa3e0589538650afc17431420e28 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 9 Jun 2021 16:25:17 +0200 Subject: [PATCH 0287/1976] Update test.py profiling (#3555) * Update test.py profiling * half_precision to half * inplace --- test.py | 30 +++++++++++++++++------------- train.py | 26 +++++++++++++------------- utils/plots.py | 17 +++++++++-------- 3 files changed, 39 insertions(+), 34 deletions(-) diff --git a/test.py b/test.py index b17415431615..4e554cf1fe43 100644 --- a/test.py +++ b/test.py @@ -38,7 +38,7 @@ def test(data, plots=True, wandb_logger=None, compute_loss=None, - half_precision=True, + half=True, opt=None): # Initialize/load model and set device training = model is not None @@ -63,7 +63,7 @@ def test(data, # model = nn.DataParallel(model) # Half - half = device.type != 'cpu' and half_precision # half precision only supported on CUDA + half &= device.type != 'cpu' # half precision only supported on CUDA if half: model.half() @@ -95,20 +95,22 @@ def test(data, names = {k: v for k, v in enumerate(model.names if hasattr(model, 'names') else model.module.names)} coco91class = coco80_to_coco91_class() s = ('%20s' + '%11s' * 6) % ('Class', 'Images', 'Labels', 'P', 'R', 'mAP@.5', 'mAP@.5:.95') - p, r, f1, mp, mr, map50, map, t0, t1 = 0., 0., 0., 0., 0., 0., 0., 0., 0. + p, r, f1, mp, mr, map50, map, t0, t1, t2 = 0., 0., 0., 0., 0., 0., 0., 0., 0., 0. loss = torch.zeros(3, device=device) jdict, stats, ap, ap_class, wandb_images = [], [], [], [], [] for batch_i, (img, targets, paths, shapes) in enumerate(tqdm(dataloader, desc=s)): + t_ = time_synchronized() img = img.to(device, non_blocking=True) img = img.half() if half else img.float() # uint8 to fp16/32 img /= 255.0 # 0 - 255 to 0.0 - 1.0 targets = targets.to(device) nb, _, height, width = img.shape # batch size, channels, height, width + t = time_synchronized() + t0 += t - t_ # Run model - t = time_synchronized() out, train_out = model(img, augment=augment) # inference and training outputs - t0 += time_synchronized() - t + t1 += time_synchronized() - t # Compute loss if compute_loss: @@ -119,7 +121,7 @@ def test(data, lb = [targets[targets[:, 0] == i, 1:] for i in range(nb)] if save_hybrid else [] # for autolabelling t = time_synchronized() out = non_max_suppression(out, conf_thres, iou_thres, labels=lb, multi_label=True, agnostic=single_cls) - t1 += time_synchronized() - t + t2 += time_synchronized() - t # Statistics per image for si, pred in enumerate(out): @@ -236,9 +238,10 @@ def test(data, print(pf % (names[c], seen, nt[c], p[i], r[i], ap50[i], ap[i])) # Print speeds - t = tuple(x / seen * 1E3 for x in (t0, t1, t0 + t1)) + (imgsz, imgsz, batch_size) # tuple + t = tuple(x / seen * 1E3 for x in (t0, t1, t2)) # speeds per image if not training: - print('Speed: %.1f/%.1f/%.1f ms inference/NMS/total per %gx%g image at batch-size %g' % t) + shape = (batch_size, 3, imgsz, imgsz) + print(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {shape}' % t) # Plots if plots: @@ -327,24 +330,25 @@ def test(data, save_txt=opt.save_txt | opt.save_hybrid, save_hybrid=opt.save_hybrid, save_conf=opt.save_conf, - half_precision=opt.half, + half=opt.half, opt=opt ) elif opt.task == 'speed': # speed benchmarks - for w in opt.weights: - test(opt.data, w, opt.batch_size, opt.img_size, 0.25, 0.45, save_json=False, plots=False, opt=opt) + for w in opt.weights if isinstance(opt.weights, list) else [opt.weights]: + test(opt.data, w, opt.batch_size, opt.img_size, 0.25, 0.45, save_json=False, plots=False, half=True, + opt=opt) elif opt.task == 'study': # run over a range of settings and save/plot # python test.py --task study --data coco.yaml --iou 0.7 --weights yolov5s.pt yolov5m.pt yolov5l.pt yolov5x.pt x = list(range(256, 1536 + 128, 128)) # x axis (image sizes) - for w in opt.weights: + for w in opt.weights if isinstance(opt.weights, list) else [opt.weights]: f = f'study_{Path(opt.data).stem}_{Path(w).stem}.txt' # filename to save to y = [] # y axis for i in x: # img-size print(f'\nRunning {f} point {i}...') r, _, t = test(opt.data, w, opt.batch_size, i, opt.conf_thres, opt.iou_thres, opt.save_json, - plots=False, opt=opt) + plots=False, half=True, opt=opt) y.append(r + t) # results and times np.savetxt(f, y, fmt='%10.4g') # save os.system('zip -r study.zip study_*.txt') diff --git a/train.py b/train.py index 43c63b6ff3c1..b92936d762b5 100644 --- a/train.py +++ b/train.py @@ -74,7 +74,7 @@ def train(hyp, opt, device, tb_writer=None): loggers['wandb'] = wandb_logger.wandb data_dict = wandb_logger.data_dict if wandb_logger.wandb: - weights, epochs, hyp = opt.weights, opt.epochs, opt.hyp # WandbLogger might update weights, epochs if resuming + weights, epochs, hyp = opt.weights, opt.epochs, opt.hyp # may update weights, epochs if resuming nc = 1 if single_cls else int(data_dict['nc']) # number of classes names = ['item'] if single_cls and len(data_dict['names']) != 1 else data_dict['names'] # class names @@ -354,18 +354,18 @@ def train(hyp, opt, device, tb_writer=None): final_epoch = epoch + 1 == epochs if not opt.notest or final_epoch: # Calculate mAP wandb_logger.current_epoch = epoch + 1 - results, maps, times = test.test(data_dict, - batch_size=batch_size * 2, - imgsz=imgsz_test, - model=ema.ema, - single_cls=single_cls, - dataloader=testloader, - save_dir=save_dir, - save_json=is_coco and final_epoch, - verbose=nc < 50 and final_epoch, - plots=plots and final_epoch, - wandb_logger=wandb_logger, - compute_loss=compute_loss) + results, maps, _ = test.test(data_dict, + batch_size=batch_size * 2, + imgsz=imgsz_test, + model=ema.ema, + single_cls=single_cls, + dataloader=testloader, + save_dir=save_dir, + save_json=is_coco and final_epoch, + verbose=nc < 50 and final_epoch, + plots=plots and final_epoch, + wandb_logger=wandb_logger, + compute_loss=compute_loss) # Write with open(results_file, 'a') as f: diff --git a/utils/plots.py b/utils/plots.py index 8313ef210f90..973b9ae19b54 100644 --- a/utils/plots.py +++ b/utils/plots.py @@ -3,7 +3,6 @@ import glob import math import os -import random from copy import copy from pathlib import Path @@ -252,21 +251,23 @@ def plot_targets_txt(): # from utils.plots import *; plot_targets_txt() def plot_study_txt(path='', x=None): # from utils.plots import *; plot_study_txt() # Plot study.txt generated by test.py - fig, ax = plt.subplots(2, 4, figsize=(10, 6), tight_layout=True) - # ax = ax.ravel() + plot2 = False # plot additional results + if plot2: + ax = plt.subplots(2, 4, figsize=(10, 6), tight_layout=True)[1].ravel() fig2, ax2 = plt.subplots(1, 1, figsize=(8, 4), tight_layout=True) # for f in [Path(path) / f'study_coco_{x}.txt' for x in ['yolov5s6', 'yolov5m6', 'yolov5l6', 'yolov5x6']]: for f in sorted(Path(path).glob('study*.txt')): y = np.loadtxt(f, dtype=np.float32, usecols=[0, 1, 2, 3, 7, 8, 9], ndmin=2).T x = np.arange(y.shape[1]) if x is None else np.array(x) - s = ['P', 'R', 'mAP@.5', 'mAP@.5:.95', 't_inference (ms/img)', 't_NMS (ms/img)', 't_total (ms/img)'] - # for i in range(7): - # ax[i].plot(x, y[i], '.-', linewidth=2, markersize=8) - # ax[i].set_title(s[i]) + if plot2: + s = ['P', 'R', 'mAP@.5', 'mAP@.5:.95', 't_preprocess (ms/img)', 't_inference (ms/img)', 't_NMS (ms/img)'] + for i in range(7): + ax[i].plot(x, y[i], '.-', linewidth=2, markersize=8) + ax[i].set_title(s[i]) j = y[3].argmax() + 1 - ax2.plot(y[6, 1:j], y[3, 1:j] * 1E2, '.-', linewidth=2, markersize=8, + ax2.plot(y[5, 1:j], y[3, 1:j] * 1E2, '.-', linewidth=2, markersize=8, label=f.stem.replace('study_coco_', '').replace('yolo', 'YOLO')) ax2.plot(1E3 / np.array([209, 140, 97, 58, 35, 18]), [34.6, 40.5, 43.0, 47.5, 49.7, 51.5], From 4ef92618700cb1a4fc54de970f05e3126283d0da Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 9 Jun 2021 16:30:01 +0200 Subject: [PATCH 0288/1976] Remove redundant speed/study `half` argument (#3557) --- test.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/test.py b/test.py index 4e554cf1fe43..971c4b005ca1 100644 --- a/test.py +++ b/test.py @@ -336,8 +336,7 @@ def test(data, elif opt.task == 'speed': # speed benchmarks for w in opt.weights if isinstance(opt.weights, list) else [opt.weights]: - test(opt.data, w, opt.batch_size, opt.img_size, 0.25, 0.45, save_json=False, plots=False, half=True, - opt=opt) + test(opt.data, w, opt.batch_size, opt.img_size, 0.25, 0.45, save_json=False, plots=False, opt=opt) elif opt.task == 'study': # run over a range of settings and save/plot # python test.py --task study --data coco.yaml --iou 0.7 --weights yolov5s.pt yolov5m.pt yolov5l.pt yolov5x.pt @@ -348,7 +347,7 @@ def test(data, for i in x: # img-size print(f'\nRunning {f} point {i}...') r, _, t = test(opt.data, w, opt.batch_size, i, opt.conf_thres, opt.iou_thres, opt.save_json, - plots=False, half=True, opt=opt) + plots=False, opt=opt) y.append(r + t) # results and times np.savetxt(f, y, fmt='%10.4g') # save os.system('zip -r study.zip study_*.txt') From c6deb73a895bd09b6110236cf29594211a2a42f9 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 9 Jun 2021 21:14:09 +0200 Subject: [PATCH 0289/1976] Bump pip from 18.1 to 19.2 in /utils/google_app_engine (#3561) Bumps [pip](https://github.com/pypa/pip) from 18.1 to 19.2. - [Release notes](https://github.com/pypa/pip/releases) - [Changelog](https://github.com/pypa/pip/blob/main/NEWS.rst) - [Commits](https://github.com/pypa/pip/compare/18.1...19.2) --- updated-dependencies: - dependency-name: pip dependency-type: direct:production ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- utils/google_app_engine/additional_requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/google_app_engine/additional_requirements.txt b/utils/google_app_engine/additional_requirements.txt index 5fcc30524a59..2f81c8b40056 100644 --- a/utils/google_app_engine/additional_requirements.txt +++ b/utils/google_app_engine/additional_requirements.txt @@ -1,4 +1,4 @@ # add these requirements in your app on top of the existing ones -pip==18.1 +pip==19.2 Flask==1.0.2 gunicorn==19.9.0 From a9553c04a7d32d5c8f29b0917fbeb6b1ef6cfe5f Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 9 Jun 2021 21:36:10 +0200 Subject: [PATCH 0290/1976] Refactor test.py arguments (#3558) * remove opt from test() * pass kwargs * update comments * revert accidental default change * multiple --img options * add comments --- detect.py | 2 +- test.py | 46 ++++++++++++++++++---------------------------- 2 files changed, 19 insertions(+), 29 deletions(-) diff --git a/detect.py b/detect.py index 8dbb656ed95f..5551824a4110 100644 --- a/detect.py +++ b/detect.py @@ -33,7 +33,7 @@ def detect(opt): # Load model model = attempt_load(weights, map_location=device) # load FP32 model stride = int(model.stride.max()) # model stride - imgsz = check_img_size(imgsz, s=stride) # check img_size + imgsz = check_img_size(imgsz, s=stride) # check image size names = model.module.names if hasattr(model, 'module') else model.names # get class names if half: model.half() # to FP16 diff --git a/test.py b/test.py index 971c4b005ca1..a10f0f88f8e6 100644 --- a/test.py +++ b/test.py @@ -22,9 +22,9 @@ def test(data, weights=None, batch_size=32, - imgsz=640, - conf_thres=0.001, - iou_thres=0.6, # for NMS + imgsz=640, # image size + conf_thres=0.001, # confidence threshold + iou_thres=0.6, # NMS IoU threshold save_json=False, single_cls=False, augment=False, @@ -38,8 +38,12 @@ def test(data, plots=True, wandb_logger=None, compute_loss=None, - half=True, - opt=None): + half=True, # FP16 half-precision inference + project='runs/test', + name='exp', + exist_ok=False, + task='val', + device=''): # Initialize/load model and set device training = model is not None if training: # called by train.py @@ -47,16 +51,16 @@ def test(data, else: # called directly set_logging() - device = select_device(opt.device, batch_size=batch_size) + device = select_device(device, batch_size=batch_size) # Directories - save_dir = increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok) # increment run + save_dir = increment_path(Path(project) / name, exist_ok=exist_ok) # increment run (save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir # Load model model = attempt_load(weights, map_location=device) # load FP32 model gs = max(int(model.stride.max()), 32) # grid size (max stride) - imgsz = check_img_size(imgsz, s=gs) # check img_size + imgsz = check_img_size(imgsz, s=gs) # check image size # Multi-GPU disabled, incompatible with .half() https://github.com/ultralytics/yolov5/issues/99 # if device.type != 'cpu' and torch.cuda.device_count() > 1: @@ -86,7 +90,7 @@ def test(data, if not training: if device.type != 'cpu': model(torch.zeros(1, 3, imgsz, imgsz).to(device).type_as(next(model.parameters()))) # run once - task = opt.task if opt.task in ('train', 'val', 'test') else 'val' # path to train/val/test images + task = task if task in ('train', 'val', 'test') else 'val' # path to train/val/test images dataloader = create_dataloader(data[task], imgsz, batch_size, gs, single_cls, pad=0.5, rect=True, prefix=colorstr(f'{task}: '))[0] @@ -294,7 +298,7 @@ def test(data, parser.add_argument('--weights', nargs='+', type=str, default='yolov5s.pt', help='model.pt path(s)') parser.add_argument('--data', type=str, default='data/coco128.yaml', help='*.data path') parser.add_argument('--batch-size', type=int, default=32, help='size of each image batch') - parser.add_argument('--img-size', type=int, default=640, help='inference size (pixels)') + parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=640, help='inference size (pixels)') parser.add_argument('--conf-thres', type=float, default=0.001, help='object confidence threshold') parser.add_argument('--iou-thres', type=float, default=0.6, help='IOU threshold for NMS') parser.add_argument('--task', default='val', help='train, val, test, speed or study') @@ -312,31 +316,17 @@ def test(data, parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference') opt = parser.parse_args() opt.save_json |= opt.data.endswith('coco.yaml') + opt.save_txt |= opt.save_hybrid opt.data = check_file(opt.data) # check file print(opt) check_requirements(exclude=('tensorboard', 'thop')) if opt.task in ('train', 'val', 'test'): # run normally - test(opt.data, - opt.weights, - opt.batch_size, - opt.img_size, - opt.conf_thres, - opt.iou_thres, - opt.save_json, - opt.single_cls, - opt.augment, - opt.verbose, - save_txt=opt.save_txt | opt.save_hybrid, - save_hybrid=opt.save_hybrid, - save_conf=opt.save_conf, - half=opt.half, - opt=opt - ) + test(**vars(opt)) elif opt.task == 'speed': # speed benchmarks for w in opt.weights if isinstance(opt.weights, list) else [opt.weights]: - test(opt.data, w, opt.batch_size, opt.img_size, 0.25, 0.45, save_json=False, plots=False, opt=opt) + test(opt.data, w, opt.batch_size, opt.imgsz, 0.25, 0.45, save_json=False, plots=False) elif opt.task == 'study': # run over a range of settings and save/plot # python test.py --task study --data coco.yaml --iou 0.7 --weights yolov5s.pt yolov5m.pt yolov5l.pt yolov5x.pt @@ -347,7 +337,7 @@ def test(data, for i in x: # img-size print(f'\nRunning {f} point {i}...') r, _, t = test(opt.data, w, opt.batch_size, i, opt.conf_thres, opt.iou_thres, opt.save_json, - plots=False, opt=opt) + plots=False) y.append(r + t) # results and times np.savetxt(f, y, fmt='%10.4g') # save os.system('zip -r study.zip study_*.txt') From 66cf5c28c1c9c593532b71610c81b7292af2bebd Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 9 Jun 2021 22:19:34 +0200 Subject: [PATCH 0291/1976] Refactor detect.py arguments (#3559) * Refactor detect.py arguments @SkalskiP @KalenMike * unused ok * comment arguments --- detect.py | 73 +++++++++++++++++++++++++++++++++++-------------------- 1 file changed, 46 insertions(+), 27 deletions(-) diff --git a/detect.py b/detect.py index 5551824a4110..5a13b5303238 100644 --- a/detect.py +++ b/detect.py @@ -15,20 +15,42 @@ @torch.no_grad() -def detect(opt): - source, weights, view_img, save_txt, imgsz = opt.source, opt.weights, opt.view_img, opt.save_txt, opt.img_size - save_img = not opt.nosave and not source.endswith('.txt') # save inference images +def detect(weights='yolov5s.pt', # model.pt path(s) + source='data/images', # file/dir/URL/glob, 0 for webcam + imgsz=640, # inference size (pixels) + conf_thres=0.25, # confidence threshold + iou_thres=0.45, # NMS IOU threshold + max_det=1000, # maximum detections per image + device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu + view_img=False, # show results + save_txt=False, # save results to *.txt + save_conf=False, # save confidences in --save-txt labels + save_crop=False, # save cropped prediction boxes + nosave=False, # do not save images/videos + classes=None, # filter by class: --class 0, or --class 0 2 3 + agnostic_nms=False, # class-agnostic NMS + augment=False, # augmented inference + update=False, # update all models + project='runs/detect', # save results to project/name + name='exp', # save results to project/name + exist_ok=False, # existing project/name ok, do not increment + line_thickness=3, # bounding box thickness (pixels) + hide_labels=False, # hide labels + hide_conf=False, # hide confidences + half=False, # use FP16 half-precision inference + ): + save_img = not nosave and not source.endswith('.txt') # save inference images webcam = source.isnumeric() or source.endswith('.txt') or source.lower().startswith( ('rtsp://', 'rtmp://', 'http://', 'https://')) # Directories - save_dir = increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok) # increment run + save_dir = increment_path(Path(project) / name, exist_ok=exist_ok) # increment run (save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir # Initialize set_logging() - device = select_device(opt.device) - half = opt.half and device.type != 'cpu' # half precision only supported on CUDA + device = select_device(device) + half &= device.type != 'cpu' # half precision only supported on CUDA # Load model model = attempt_load(weights, map_location=device) # load FP32 model @@ -66,11 +88,10 @@ def detect(opt): # Inference t1 = time_synchronized() - pred = model(img, augment=opt.augment)[0] + pred = model(img, augment=augment)[0] # Apply NMS - pred = non_max_suppression(pred, opt.conf_thres, opt.iou_thres, opt.classes, opt.agnostic_nms, - max_det=opt.max_det) + pred = non_max_suppression(pred, conf_thres, iou_thres, classes, agnostic_nms, max_det=max_det) t2 = time_synchronized() # Apply Classifier @@ -89,7 +110,7 @@ def detect(opt): txt_path = str(save_dir / 'labels' / p.stem) + ('' if dataset.mode == 'image' else f'_{frame}') # img.txt s += '%gx%g ' % img.shape[2:] # print string gn = torch.tensor(im0.shape)[[1, 0, 1, 0]] # normalization gain whwh - imc = im0.copy() if opt.save_crop else im0 # for opt.save_crop + imc = im0.copy() if save_crop else im0 # for save_crop if len(det): # Rescale boxes from img_size to im0 size det[:, :4] = scale_coords(img.shape[2:], det[:, :4], im0.shape).round() @@ -103,15 +124,15 @@ def detect(opt): for *xyxy, conf, cls in reversed(det): if save_txt: # Write to file xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh - line = (cls, *xywh, conf) if opt.save_conf else (cls, *xywh) # label format + line = (cls, *xywh, conf) if save_conf else (cls, *xywh) # label format with open(txt_path + '.txt', 'a') as f: f.write(('%g ' * len(line)).rstrip() % line + '\n') - if save_img or opt.save_crop or view_img: # Add bbox to image + if save_img or save_crop or view_img: # Add bbox to image c = int(cls) # integer class - label = None if opt.hide_labels else (names[c] if opt.hide_conf else f'{names[c]} {conf:.2f}') - plot_one_box(xyxy, im0, label=label, color=colors(c, True), line_thickness=opt.line_thickness) - if opt.save_crop: + label = None if hide_labels else (names[c] if hide_conf else f'{names[c]} {conf:.2f}') + plot_one_box(xyxy, im0, label=label, color=colors(c, True), line_thickness=line_thickness) + if save_crop: save_one_box(xyxy, imc, file=save_dir / 'crops' / names[c] / f'{p.stem}.jpg', BGR=True) # Print time (inference + NMS) @@ -145,19 +166,22 @@ def detect(opt): s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else '' print(f"Results saved to {save_dir}{s}") + if update: + strip_optimizer(weights) # update model (to fix SourceChangeWarning) + print(f'Done. ({time.time() - t0:.3f}s)') if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--weights', nargs='+', type=str, default='yolov5s.pt', help='model.pt path(s)') - parser.add_argument('--source', type=str, default='data/images', help='source') # file/folder, 0 for webcam - parser.add_argument('--img-size', type=int, default=640, help='inference size (pixels)') - parser.add_argument('--conf-thres', type=float, default=0.25, help='object confidence threshold') - parser.add_argument('--iou-thres', type=float, default=0.45, help='IOU threshold for NMS') - parser.add_argument('--max-det', type=int, default=1000, help='maximum number of detections per image') + parser.add_argument('--source', type=str, default='data/images', help='file/dir/URL/glob, 0 for webcam') + parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=640, help='inference size (pixels)') + parser.add_argument('--conf-thres', type=float, default=0.25, help='confidence threshold') + parser.add_argument('--iou-thres', type=float, default=0.45, help='NMS IOU threshold') + parser.add_argument('--max-det', type=int, default=1000, help='maximum detections per image') parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') - parser.add_argument('--view-img', action='store_true', help='display results') + parser.add_argument('--view-img', action='store_true', help='show results') parser.add_argument('--save-txt', action='store_true', help='save results to *.txt') parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels') parser.add_argument('--save-crop', action='store_true', help='save cropped prediction boxes') @@ -177,9 +201,4 @@ def detect(opt): print(opt) check_requirements(exclude=('tensorboard', 'thop')) - if opt.update: # update all models (to fix SourceChangeWarning) - for opt.weights in ['yolov5s.pt', 'yolov5m.pt', 'yolov5l.pt', 'yolov5x.pt']: - detect(opt=opt) - strip_optimizer(opt.weights) - else: - detect(opt=opt) + detect(**vars(opt)) From 0e5cfdbea756716d5bbdfe6f3b26b2731e2facc4 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 9 Jun 2021 22:43:46 +0200 Subject: [PATCH 0292/1976] Refactor models/export.py arguments (#3564) * Refactor models/export.py arguments * cleanup * cleanup --- models/export.py | 108 +++++++++++++++++++++++++++-------------------- 1 file changed, 63 insertions(+), 45 deletions(-) diff --git a/models/export.py b/models/export.py index c03770178829..6f8799e55593 100644 --- a/models/export.py +++ b/models/export.py @@ -1,4 +1,4 @@ -"""Exports a YOLOv5 *.pt model to TorchScript, ONNX, CoreML formats +"""Export a YOLOv5 *.pt model to TorchScript, ONNX, CoreML formats Usage: $ python path/to/models/export.py --weights yolov5s.pt --img 640 --batch 1 @@ -21,42 +21,39 @@ from utils.general import colorstr, check_img_size, check_requirements, file_size, set_logging from utils.torch_utils import select_device -if __name__ == '__main__': - parser = argparse.ArgumentParser() - parser.add_argument('--weights', type=str, default='./yolov5s.pt', help='weights path') - parser.add_argument('--img-size', nargs='+', type=int, default=[640, 640], help='image size') # height, width - parser.add_argument('--batch-size', type=int, default=1, help='batch size') - parser.add_argument('--device', default='cpu', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') - parser.add_argument('--include', nargs='+', default=['torchscript', 'onnx', 'coreml'], help='include formats') - parser.add_argument('--half', action='store_true', help='FP16 half-precision export') - parser.add_argument('--inplace', action='store_true', help='set YOLOv5 Detect() inplace=True') - parser.add_argument('--train', action='store_true', help='model.train() mode') - parser.add_argument('--optimize', action='store_true', help='optimize TorchScript for mobile') # TorchScript-only - parser.add_argument('--dynamic', action='store_true', help='dynamic ONNX axes') # ONNX-only - parser.add_argument('--simplify', action='store_true', help='simplify ONNX model') # ONNX-only - parser.add_argument('--opset-version', type=int, default=12, help='ONNX opset version') # ONNX-only - opt = parser.parse_args() - opt.img_size *= 2 if len(opt.img_size) == 1 else 1 # expand - opt.include = [x.lower() for x in opt.include] - print(opt) - set_logging() + +def export(weights='./yolov5s.pt', # weights path + img_size=(640, 640), # image (height, width) + batch_size=1, # batch size + device='cpu', # cuda device, i.e. 0 or 0,1,2,3 or cpu + include=('torchscript', 'onnx', 'coreml'), # include formats + half=False, # FP16 half-precision export + inplace=False, # set YOLOv5 Detect() inplace=True + train=False, # model.train() mode + optimize=False, # TorchScript: optimize for mobile + dynamic=False, # ONNX: dynamic axes + simplify=False, # ONNX: simplify model + opset_version=12, # ONNX: opset version + ): t = time.time() + include = [x.lower() for x in include] + img_size *= 2 if len(img_size) == 1 else 1 # expand # Load PyTorch model - device = select_device(opt.device) - assert not (opt.device.lower() == 'cpu' and opt.half), '--half only compatible with GPU export, i.e. use --device 0' - model = attempt_load(opt.weights, map_location=device) # load FP32 model + device = select_device(device) + assert not (device.type == 'cpu' and opt.half), '--half only compatible with GPU export, i.e. use --device 0' + model = attempt_load(weights, map_location=device) # load FP32 model labels = model.names # Input gs = int(max(model.stride)) # grid size (max stride) - opt.img_size = [check_img_size(x, gs) for x in opt.img_size] # verify img_size are gs-multiples - img = torch.zeros(opt.batch_size, 3, *opt.img_size).to(device) # image size(1,3,320,192) iDetection + img_size = [check_img_size(x, gs) for x in img_size] # verify img_size are gs-multiples + img = torch.zeros(batch_size, 3, *img_size).to(device) # image size(1,3,320,192) iDetection # Update model - if opt.half: + if half: img, model = img.half(), model.half() # to FP16 - model.train() if opt.train else model.eval() # training mode = no Detect() layer grid construction + model.train() if train else model.eval() # training mode = no Detect() layer grid construction for k, m in model.named_modules(): m._non_persistent_buffers_set = set() # pytorch 1.6.0 compatibility if isinstance(m, models.common.Conv): # assign export-friendly activations @@ -65,42 +62,42 @@ elif isinstance(m.act, nn.SiLU): m.act = SiLU() elif isinstance(m, models.yolo.Detect): - m.inplace = opt.inplace - m.onnx_dynamic = opt.dynamic + m.inplace = inplace + m.onnx_dynamic = dynamic # m.forward = m.forward_export # assign forward (optional) for _ in range(2): y = model(img) # dry runs - print(f"\n{colorstr('PyTorch:')} starting from {opt.weights} ({file_size(opt.weights):.1f} MB)") + print(f"\n{colorstr('PyTorch:')} starting from {weights} ({file_size(weights):.1f} MB)") # TorchScript export ----------------------------------------------------------------------------------------------- - if 'torchscript' in opt.include or 'coreml' in opt.include: + if 'torchscript' in include or 'coreml' in include: prefix = colorstr('TorchScript:') try: print(f'\n{prefix} starting export with torch {torch.__version__}...') - f = opt.weights.replace('.pt', '.torchscript.pt') # filename + f = weights.replace('.pt', '.torchscript.pt') # filename ts = torch.jit.trace(model, img, strict=False) - (optimize_for_mobile(ts) if opt.optimize else ts).save(f) + (optimize_for_mobile(ts) if optimize else ts).save(f) print(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') except Exception as e: print(f'{prefix} export failure: {e}') # ONNX export ------------------------------------------------------------------------------------------------------ - if 'onnx' in opt.include: + if 'onnx' in include: prefix = colorstr('ONNX:') try: import onnx print(f'{prefix} starting export with onnx {onnx.__version__}...') - f = opt.weights.replace('.pt', '.onnx') # filename - torch.onnx.export(model, img, f, verbose=False, opset_version=opt.opset_version, - training=torch.onnx.TrainingMode.TRAINING if opt.train else torch.onnx.TrainingMode.EVAL, - do_constant_folding=not opt.train, + f = weights.replace('.pt', '.onnx') # filename + torch.onnx.export(model, img, f, verbose=False, opset_version=opset_version, + training=torch.onnx.TrainingMode.TRAINING if train else torch.onnx.TrainingMode.EVAL, + do_constant_folding=not train, input_names=['images'], output_names=['output'], dynamic_axes={'images': {0: 'batch', 2: 'height', 3: 'width'}, # shape(1,3,640,640) 'output': {0: 'batch', 1: 'anchors'} # shape(1,25200,85) - } if opt.dynamic else None) + } if dynamic else None) # Checks model_onnx = onnx.load(f) # load onnx model @@ -108,7 +105,7 @@ # print(onnx.helper.printable_graph(model_onnx.graph)) # print # Simplify - if opt.simplify: + if simplify: try: check_requirements(['onnx-simplifier']) import onnxsim @@ -116,8 +113,8 @@ print(f'{prefix} simplifying with onnx-simplifier {onnxsim.__version__}...') model_onnx, check = onnxsim.simplify( model_onnx, - dynamic_input_shape=opt.dynamic, - input_shapes={'images': list(img.shape)} if opt.dynamic else None) + dynamic_input_shape=dynamic, + input_shapes={'images': list(img.shape)} if dynamic else None) assert check, 'assert check failed' onnx.save(model_onnx, f) except Exception as e: @@ -127,15 +124,15 @@ print(f'{prefix} export failure: {e}') # CoreML export ---------------------------------------------------------------------------------------------------- - if 'coreml' in opt.include: + if 'coreml' in include: prefix = colorstr('CoreML:') try: import coremltools as ct print(f'{prefix} starting export with coremltools {ct.__version__}...') - assert opt.train, 'CoreML exports should be placed in model.train() mode with `python export.py --train`' + assert train, 'CoreML exports should be placed in model.train() mode with `python export.py --train`' model = ct.convert(ts, inputs=[ct.ImageType('image', shape=img.shape, scale=1 / 255.0, bias=[0, 0, 0])]) - f = opt.weights.replace('.pt', '.mlmodel') # filename + f = weights.replace('.pt', '.mlmodel') # filename model.save(f) print(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') except Exception as e: @@ -143,3 +140,24 @@ # Finish print(f'\nExport complete ({time.time() - t:.2f}s). Visualize with https://github.com/lutzroeder/netron.') + + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument('--weights', type=str, default='./yolov5s.pt', help='weights path') + parser.add_argument('--img-size', nargs='+', type=int, default=[640, 640], help='image (height, width)') + parser.add_argument('--batch-size', type=int, default=1, help='batch size') + parser.add_argument('--device', default='cpu', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') + parser.add_argument('--include', nargs='+', default=['torchscript', 'onnx', 'coreml'], help='include formats') + parser.add_argument('--half', action='store_true', help='FP16 half-precision export') + parser.add_argument('--inplace', action='store_true', help='set YOLOv5 Detect() inplace=True') + parser.add_argument('--train', action='store_true', help='model.train() mode') + parser.add_argument('--optimize', action='store_true', help='TorchScript: optimize for mobile') + parser.add_argument('--dynamic', action='store_true', help='ONNX: dynamic axes') + parser.add_argument('--simplify', action='store_true', help='ONNX: simplify model') + parser.add_argument('--opset-version', type=int, default=12, help='ONNX: opset version') + opt = parser.parse_args() + print(opt) + set_logging() + + export(**vars(opt)) From 4695ca8314269c9a9f4b8cf89c7962205f27fdad Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 9 Jun 2021 22:50:27 +0200 Subject: [PATCH 0293/1976] Refactoring cleanup (#3565) * Refactoring cleanup * Update test.py --- detect.py | 2 +- test.py | 52 +++++++++++++++++++++++++++------------------------- train.py | 2 +- 3 files changed, 29 insertions(+), 27 deletions(-) diff --git a/detect.py b/detect.py index 5a13b5303238..67916c652742 100644 --- a/detect.py +++ b/detect.py @@ -178,7 +178,7 @@ def detect(weights='yolov5s.pt', # model.pt path(s) parser.add_argument('--source', type=str, default='data/images', help='file/dir/URL/glob, 0 for webcam') parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=640, help='inference size (pixels)') parser.add_argument('--conf-thres', type=float, default=0.25, help='confidence threshold') - parser.add_argument('--iou-thres', type=float, default=0.45, help='NMS IOU threshold') + parser.add_argument('--iou-thres', type=float, default=0.45, help='NMS IoU threshold') parser.add_argument('--max-det', type=int, default=1000, help='maximum detections per image') parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') parser.add_argument('--view-img', action='store_true', help='show results') diff --git a/test.py b/test.py index a10f0f88f8e6..cbc97b420155 100644 --- a/test.py +++ b/test.py @@ -20,30 +20,31 @@ @torch.no_grad() def test(data, - weights=None, - batch_size=32, - imgsz=640, # image size + weights=None, # model.pt path(s) + batch_size=32, # batch size + imgsz=640, # inference size (pixels) conf_thres=0.001, # confidence threshold iou_thres=0.6, # NMS IoU threshold - save_json=False, - single_cls=False, - augment=False, - verbose=False, + task='val', # train, val, test, speed or study + device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu + single_cls=False, # treat as single-class dataset + augment=False, # augmented inference + verbose=False, # verbose output + save_txt=False, # save results to *.txt + save_hybrid=False, # save label+prediction hybrid results to *.txt + save_conf=False, # save confidences in --save-txt labels + save_json=False, # save a cocoapi-compatible JSON results file + project='runs/test', # save to project/name + name='exp', # save to project/name + exist_ok=False, # existing project/name ok, do not increment + half=True, # use FP16 half-precision inference model=None, dataloader=None, - save_dir=Path(''), # for saving images - save_txt=False, # for auto-labelling - save_hybrid=False, # for hybrid auto-labelling - save_conf=False, # save auto-label confidences + save_dir=Path(''), plots=True, wandb_logger=None, compute_loss=None, - half=True, # FP16 half-precision inference - project='runs/test', - name='exp', - exist_ok=False, - task='val', - device=''): + ): # Initialize/load model and set device training = model is not None if training: # called by train.py @@ -155,7 +156,7 @@ def test(data, with open(save_dir / 'labels' / (path.stem + '.txt'), 'a') as f: f.write(('%g ' * len(line)).rstrip() % line + '\n') - # W&B logging - Media Panel Plots + # W&B logging - Media Panel plots if len(wandb_images) < log_imgs and wandb_logger.current_epoch > 0: # Check for test operation if wandb_logger.current_epoch % wandb_logger.bbox_interval == 0: box_data = [{"position": {"minX": xyxy[0], "minY": xyxy[1], "maxX": xyxy[2], "maxY": xyxy[3]}, @@ -295,12 +296,12 @@ def test(data, if __name__ == '__main__': parser = argparse.ArgumentParser(prog='test.py') + parser.add_argument('--data', type=str, default='data/coco128.yaml', help='dataset.yaml path') parser.add_argument('--weights', nargs='+', type=str, default='yolov5s.pt', help='model.pt path(s)') - parser.add_argument('--data', type=str, default='data/coco128.yaml', help='*.data path') - parser.add_argument('--batch-size', type=int, default=32, help='size of each image batch') + parser.add_argument('--batch-size', type=int, default=32, help='batch size') parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=640, help='inference size (pixels)') - parser.add_argument('--conf-thres', type=float, default=0.001, help='object confidence threshold') - parser.add_argument('--iou-thres', type=float, default=0.6, help='IOU threshold for NMS') + parser.add_argument('--conf-thres', type=float, default=0.001, help='confidence threshold') + parser.add_argument('--iou-thres', type=float, default=0.6, help='NMS IoU threshold') parser.add_argument('--task', default='val', help='train, val, test, speed or study') parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') parser.add_argument('--single-cls', action='store_true', help='treat as single-class dataset') @@ -326,7 +327,8 @@ def test(data, elif opt.task == 'speed': # speed benchmarks for w in opt.weights if isinstance(opt.weights, list) else [opt.weights]: - test(opt.data, w, opt.batch_size, opt.imgsz, 0.25, 0.45, save_json=False, plots=False) + test(opt.data, weights=w, batch_size=opt.batch_size, imgsz=opt.imgsz, conf_thres=.25, iou_thres=.45, + save_json=False, plots=False) elif opt.task == 'study': # run over a range of settings and save/plot # python test.py --task study --data coco.yaml --iou 0.7 --weights yolov5s.pt yolov5m.pt yolov5l.pt yolov5x.pt @@ -336,8 +338,8 @@ def test(data, y = [] # y axis for i in x: # img-size print(f'\nRunning {f} point {i}...') - r, _, t = test(opt.data, w, opt.batch_size, i, opt.conf_thres, opt.iou_thres, opt.save_json, - plots=False) + r, _, t = test(opt.data, weights=w, batch_size=opt.batch_size, imgsz=i, conf_thres=opt.conf_thres, + iou_thres=opt.iou_thres, save_json=opt.save_json, plots=False) y.append(r + t) # results and times np.savetxt(f, y, fmt='%10.4g') # save os.system('zip -r study.zip study_*.txt') diff --git a/train.py b/train.py index b92936d762b5..505556075af5 100644 --- a/train.py +++ b/train.py @@ -454,7 +454,7 @@ def train(hyp, opt, device, tb_writer=None): parser = argparse.ArgumentParser() parser.add_argument('--weights', type=str, default='yolov5s.pt', help='initial weights path') parser.add_argument('--cfg', type=str, default='', help='model.yaml path') - parser.add_argument('--data', type=str, default='data/coco128.yaml', help='data.yaml path') + parser.add_argument('--data', type=str, default='data/coco128.yaml', help='dataset.yaml path') parser.add_argument('--hyp', type=str, default='data/hyp.scratch.yaml', help='hyperparameters path') parser.add_argument('--epochs', type=int, default=300) parser.add_argument('--batch-size', type=int, default=16, help='total batch size for all GPUs') From 095197bd4a011b867f1bb7118d1735dd84ac5ee6 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 10 Jun 2021 13:51:29 +0200 Subject: [PATCH 0294/1976] Ignore Seaborn plot warnings (#3576) * Ignore Seaborn plot warnings * Update plots.py * Update metrics.py --- utils/metrics.py | 9 ++++++--- utils/plots.py | 8 ++++---- 2 files changed, 10 insertions(+), 7 deletions(-) diff --git a/utils/metrics.py b/utils/metrics.py index 323c84b6c873..6b61d6d6ef02 100644 --- a/utils/metrics.py +++ b/utils/metrics.py @@ -1,5 +1,6 @@ # Model validation metrics +import warnings from pathlib import Path import matplotlib.pyplot as plt @@ -167,9 +168,11 @@ def plot(self, save_dir='', names=()): fig = plt.figure(figsize=(12, 9), tight_layout=True) sn.set(font_scale=1.0 if self.nc < 50 else 0.8) # for label size labels = (0 < len(names) < 99) and len(names) == self.nc # apply names to ticklabels - sn.heatmap(array, annot=self.nc < 30, annot_kws={"size": 8}, cmap='Blues', fmt='.2f', square=True, - xticklabels=names + ['background FP'] if labels else "auto", - yticklabels=names + ['background FN'] if labels else "auto").set_facecolor((1, 1, 1)) + with warnings.catch_warnings(): + warnings.simplefilter('ignore') # suppress empty matrix RuntimeWarning: All-NaN slice encountered + sn.heatmap(array, annot=self.nc < 30, annot_kws={"size": 8}, cmap='Blues', fmt='.2f', square=True, + xticklabels=names + ['background FP'] if labels else "auto", + yticklabels=names + ['background FN'] if labels else "auto").set_facecolor((1, 1, 1)) fig.axes[0].set_xlabel('True') fig.axes[0].set_ylabel('Predicted') fig.savefig(Path(save_dir) / 'confusion_matrix.png', dpi=250) diff --git a/utils/plots.py b/utils/plots.py index 973b9ae19b54..66a30918190e 100644 --- a/utils/plots.py +++ b/utils/plots.py @@ -11,7 +11,7 @@ import matplotlib.pyplot as plt import numpy as np import pandas as pd -import seaborn as sns +import seaborn as sn import torch import yaml from PIL import Image, ImageDraw, ImageFont @@ -291,7 +291,7 @@ def plot_labels(labels, names=(), save_dir=Path(''), loggers=None): x = pd.DataFrame(b.transpose(), columns=['x', 'y', 'width', 'height']) # seaborn correlogram - sns.pairplot(x, corner=True, diag_kind='auto', kind='hist', diag_kws=dict(bins=50), plot_kws=dict(pmax=0.9)) + sn.pairplot(x, corner=True, diag_kind='auto', kind='hist', diag_kws=dict(bins=50), plot_kws=dict(pmax=0.9)) plt.savefig(save_dir / 'labels_correlogram.jpg', dpi=200) plt.close() @@ -306,8 +306,8 @@ def plot_labels(labels, names=(), save_dir=Path(''), loggers=None): ax[0].set_xticklabels(names, rotation=90, fontsize=10) else: ax[0].set_xlabel('classes') - sns.histplot(x, x='x', y='y', ax=ax[2], bins=50, pmax=0.9) - sns.histplot(x, x='width', y='height', ax=ax[3], bins=50, pmax=0.9) + sn.histplot(x, x='x', y='y', ax=ax[2], bins=50, pmax=0.9) + sn.histplot(x, x='width', y='height', ax=ax[3], bins=50, pmax=0.9) # rectangles labels[:, 1:3] = 0.5 # center From 53ed872c282fea6d909d2052b25be53c9c05cfb6 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 10 Jun 2021 15:35:22 +0200 Subject: [PATCH 0295/1976] Update export.py, yolo.py `sys.path.append()` (#3579) --- models/export.py | 12 +++++++----- models/yolo.py | 6 ++++-- 2 files changed, 11 insertions(+), 7 deletions(-) diff --git a/models/export.py b/models/export.py index 6f8799e55593..3c04b07fdc95 100644 --- a/models/export.py +++ b/models/export.py @@ -9,13 +9,15 @@ import time from pathlib import Path -sys.path.append(Path(__file__).parent.parent.absolute().__str__()) # to run '$ python *.py' files in subdirectories - import torch import torch.nn as nn from torch.utils.mobile_optimizer import optimize_for_mobile -import models +FILE = Path(__file__).absolute() +sys.path.append(FILE.parents[1].as_posix()) # add yolov5/ to path + +from models.common import Conv +from models.yolo import Detect from models.experimental import attempt_load from utils.activations import Hardswish, SiLU from utils.general import colorstr, check_img_size, check_requirements, file_size, set_logging @@ -56,12 +58,12 @@ def export(weights='./yolov5s.pt', # weights path model.train() if train else model.eval() # training mode = no Detect() layer grid construction for k, m in model.named_modules(): m._non_persistent_buffers_set = set() # pytorch 1.6.0 compatibility - if isinstance(m, models.common.Conv): # assign export-friendly activations + if isinstance(m, Conv): # assign export-friendly activations if isinstance(m.act, nn.Hardswish): m.act = Hardswish() elif isinstance(m.act, nn.SiLU): m.act = SiLU() - elif isinstance(m, models.yolo.Detect): + elif isinstance(m, Detect): m.inplace = inplace m.onnx_dynamic = dynamic # m.forward = m.forward_export # assign forward (optional) diff --git a/models/yolo.py b/models/yolo.py index 1a7be913023c..4a2514edd295 100644 --- a/models/yolo.py +++ b/models/yolo.py @@ -10,8 +10,8 @@ from copy import deepcopy from pathlib import Path -sys.path.append(Path(__file__).parent.parent.absolute().__str__()) # to run '$ python *.py' files in subdirectories -logger = logging.getLogger(__name__) +FILE = Path(__file__).absolute() +sys.path.append(FILE.parents[1].as_posix()) # add yolov5/ to path from models.common import * from models.experimental import * @@ -25,6 +25,8 @@ except ImportError: thop = None +logger = logging.getLogger(__name__) + class Detect(nn.Module): stride = None # strides computed during build From 5c32bd3080c8643aed9c167bb2fc655f502facaf Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 10 Jun 2021 16:01:34 +0200 Subject: [PATCH 0296/1976] Created using Colaboratory --- tutorial.ipynb | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/tutorial.ipynb b/tutorial.ipynb index b6d672d10e52..48780f94c856 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -523,10 +523,11 @@ { "cell_type": "markdown", "metadata": { - "id": "HvhYZrIZCEyo" + "id": "t6MPjfT5NrKQ" }, "source": [ - "\n", + "\n", + "\n", "\n", "This is the **official YOLOv5 🚀 notebook** authored by **Ultralytics**, and is freely available for redistribution under the [GPL-3.0 license](https://choosealicense.com/licenses/gpl-3.0/). \n", "For more information please visit https://github.com/ultralytics/yolov5 and https://ultralytics.com. Thank you!" @@ -563,7 +564,7 @@ "clear_output()\n", "print(f\"Setup complete. Using torch {torch.__version__} ({torch.cuda.get_device_properties(0).name if torch.cuda.is_available() else 'CPU'})\")" ], - "execution_count": 1, + "execution_count": null, "outputs": [ { "output_type": "stream", @@ -681,7 +682,7 @@ "torch.hub.download_url_to_file('https://github.com/ultralytics/yolov5/releases/download/v1.0/coco2017val.zip', 'tmp.zip')\n", "!unzip -q tmp.zip -d ../ && rm tmp.zip" ], - "execution_count": 2, + "execution_count": null, "outputs": [ { "output_type": "display_data", @@ -721,7 +722,7 @@ "# Run YOLOv5x on COCO val2017\n", "!python test.py --weights yolov5x.pt --data coco.yaml --img 640 --iou 0.65 --half" ], - "execution_count": 3, + "execution_count": null, "outputs": [ { "output_type": "stream", @@ -845,7 +846,7 @@ "torch.hub.download_url_to_file('https://github.com/ultralytics/yolov5/releases/download/v1.0/coco128.zip', 'tmp.zip')\n", "!unzip -q tmp.zip -d ../ && rm tmp.zip" ], - "execution_count": 2, + "execution_count": null, "outputs": [ { "output_type": "display_data", @@ -923,7 +924,7 @@ "# Train YOLOv5s on COCO128 for 3 epochs\n", "!python train.py --img 640 --batch 16 --epochs 3 --data coco128.yaml --weights yolov5s.pt --cache" ], - "execution_count": 9, + "execution_count": null, "outputs": [ { "output_type": "stream", From 46e1fdfbc65c450c7bac9f7f0438a6b542dbe2ec Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 11 Jun 2021 11:23:39 +0200 Subject: [PATCH 0297/1976] Update stale.yml (#3585) --- .github/workflows/stale.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml index a81e4007cffb..d620e540706a 100644 --- a/.github/workflows/stale.yml +++ b/.github/workflows/stale.yml @@ -19,7 +19,7 @@ jobs: - **Docs** – https://docs.ultralytics.com Access additional [Ultralytics](https://ultralytics.com) ⚡ resources: - - **Ultralytics HUB** – https://ultralytics.com/pricing + - **Ultralytics HUB** – https://ultralytics.com - **Vision API** – https://ultralytics.com/yolov5 - **About Us** – https://ultralytics.com/about - **Join Our Team** – https://ultralytics.com/work From ec2da4a82c92dc594d8d05112cbded1d8576bdd2 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 11 Jun 2021 11:37:08 +0200 Subject: [PATCH 0298/1976] Add ConfusionMatrix `normalize=True` flag (#3586) --- utils/metrics.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/utils/metrics.py b/utils/metrics.py index 6b61d6d6ef02..09b994414ffc 100644 --- a/utils/metrics.py +++ b/utils/metrics.py @@ -158,11 +158,12 @@ def process_batch(self, detections, labels): def matrix(self): return self.matrix - def plot(self, save_dir='', names=()): + def plot(self, normalize=True, save_dir='', names=()): try: import seaborn as sn - - array = self.matrix / (self.matrix.sum(0).reshape(1, self.nc + 1) + 1E-6) # normalize + + if normalize: + array = self.matrix / (self.matrix.sum(0).reshape(1, self.nc + 1) + 1E-6) # normalize columns array[array < 0.005] = np.nan # don't annotate (would appear as 0.00) fig = plt.figure(figsize=(12, 9), tight_layout=True) From e8c52374035fd2fb5a0b0029eaa5e5705186df17 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 11 Jun 2021 11:46:05 +0200 Subject: [PATCH 0299/1976] ConfusionMatrix `normalize=True` fix (#3587) --- utils/metrics.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/utils/metrics.py b/utils/metrics.py index 09b994414ffc..8512197956e7 100644 --- a/utils/metrics.py +++ b/utils/metrics.py @@ -161,9 +161,8 @@ def matrix(self): def plot(self, normalize=True, save_dir='', names=()): try: import seaborn as sn - - if normalize: - array = self.matrix / (self.matrix.sum(0).reshape(1, self.nc + 1) + 1E-6) # normalize columns + + array = self.matrix / ((self.matrix.sum(0).reshape(1, -1) + 1E-6) if normalize else 1) # normalize columns array[array < 0.005] = np.nan # don't annotate (would appear as 0.00) fig = plt.figure(figsize=(12, 9), tight_layout=True) @@ -178,7 +177,7 @@ def plot(self, normalize=True, save_dir='', names=()): fig.axes[0].set_ylabel('Predicted') fig.savefig(Path(save_dir) / 'confusion_matrix.png', dpi=250) except Exception as e: - pass + print(f'WARNING: ConfusionMatrix plot failure: {e}') def print(self): for i in range(self.nc + 1): From 4984cf54be4eb88f00ccf33a05f57681b2a770ab Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 11 Jun 2021 20:24:03 +0200 Subject: [PATCH 0300/1976] train.py GPU memory fix (#3590) * train.py GPU memory fix * ema * cuda * cuda * zeros input * to device * batch index 0 --- train.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/train.py b/train.py index 505556075af5..142268b273f1 100644 --- a/train.py +++ b/train.py @@ -335,7 +335,7 @@ def train(hyp, opt, device, tb_writer=None): if tb_writer and ni == 0: with warnings.catch_warnings(): warnings.simplefilter('ignore') # suppress jit trace warning - tb_writer.add_graph(torch.jit.trace(de_parallel(model), imgs, strict=False), []) # graph + tb_writer.add_graph(torch.jit.trace(de_parallel(model), imgs[0:1], strict=False), []) elif plots and ni == 10 and wandb_logger.wandb: wandb_logger.log({'Mosaics': [wandb_logger.wandb.Image(str(x), caption=x.name) for x in save_dir.glob('train*.jpg') if x.exists()]}) From 53d4fc2e265c469112e86f3ed1dec9817a7c9936 Mon Sep 17 00:00:00 2001 From: Ayush Chaurasia Date: Sat, 12 Jun 2021 02:48:57 +0530 Subject: [PATCH 0301/1976] W&B: Allow changed in config variable #3588 --- utils/wandb_logging/wandb_utils.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/utils/wandb_logging/wandb_utils.py b/utils/wandb_logging/wandb_utils.py index 57ce9035a777..9975af63d02c 100644 --- a/utils/wandb_logging/wandb_utils.py +++ b/utils/wandb_logging/wandb_utils.py @@ -103,7 +103,11 @@ def __init__(self, opt, name, run_id, data_dict, job_type='Training'): model_artifact_name = WANDB_ARTIFACT_PREFIX + model_artifact_name assert wandb, 'install wandb to resume wandb runs' # Resume wandb-artifact:// runs here| workaround for not overwriting wandb.config - self.wandb_run = wandb.init(id=run_id, project=project, entity=entity, resume='allow') + self.wandb_run = wandb.init(id=run_id, + project=project, + entity=entity, + resume='allow', + allow_val_change=True) opt.resume = model_artifact_name elif self.wandb: self.wandb_run = wandb.init(config=opt, @@ -112,7 +116,8 @@ def __init__(self, opt, name, run_id, data_dict, job_type='Training'): entity=opt.entity, name=name, job_type=job_type, - id=run_id) if not wandb.run else wandb.run + id=run_id, + allow_val_change=True) if not wandb.run else wandb.run if self.wandb_run: if self.job_type == 'Training': if not opt.resume: From 7a565f130a257aed46a0cac77cca945b489696bf Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 12 Jun 2021 13:26:41 +0200 Subject: [PATCH 0302/1976] Update `dataset_stats()` (#3593) @KalenMike this is a PR to add image filenames and labels to our stats dictionary and to save the dictionary to JSON. Save location is next to the train labels.cache file. The single JSON contains all stats for entire dataset. Usage example: ```python from utils.datasets import * dataset_stats('coco128.yaml', verbose=True) ``` --- utils/datasets.py | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/utils/datasets.py b/utils/datasets.py index 444b3ff2f60c..f18569a7665b 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -2,6 +2,7 @@ import glob import hashlib +import json import logging import math import os @@ -1105,12 +1106,20 @@ def dataset_stats(path='coco128.yaml', autodownload=False, verbose=False): continue x = [] dataset = LoadImagesAndLabels(data[split], augment=False, rect=True) # load dataset + if split == 'train': + cache_path = Path(dataset.label_files[0]).parent.with_suffix('.cache') # *.cache path for label in tqdm(dataset.labels, total=dataset.n, desc='Statistics'): x.append(np.bincount(label[:, 0].astype(int), minlength=nc)) x = np.array(x) # shape(128x80) - stats[split] = {'instances': {'total': int(x.sum()), 'per_class': x.sum(0).tolist()}, - 'images': {'total': dataset.n, 'unlabelled': int(np.all(x == 0, 1).sum()), - 'per_class': (x > 0).sum(0).tolist()}} + stats[split] = {'instance_stats': {'total': int(x.sum()), 'per_class': x.sum(0).tolist()}, + 'image_stats': {'total': dataset.n, 'unlabelled': int(np.all(x == 0, 1).sum()), + 'per_class': (x > 0).sum(0).tolist()}, + 'labels': {str(Path(k).name): v.tolist() for k, v in zip(dataset.img_files, dataset.labels)}} + + # Save, print and return + with open(cache_path.with_suffix('.json'), 'w') as f: + json.dump(stats, f) # save stats *.json if verbose: print(yaml.dump([stats], sort_keys=False, default_flow_style=False)) + # print(json.dumps(stats, indent=2, sort_keys=False)) return stats From 88b1945241dd0ef491da2ae0ce89f15ab67733e9 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 12 Jun 2021 15:21:37 +0200 Subject: [PATCH 0303/1976] Delete __init__.py (#3596) --- __init__.py | 0 1 file changed, 0 insertions(+), 0 deletions(-) delete mode 100644 __init__.py diff --git a/__init__.py b/__init__.py deleted file mode 100644 index e69de29bb2d1..000000000000 From 31336075609a3fbcb4afe398eba2967b22056bfa Mon Sep 17 00:00:00 2001 From: Kalen Michael Date: Sun, 13 Jun 2021 02:37:20 +0200 Subject: [PATCH 0304/1976] Simplify README.md (#3530) * Update README.md * added hosted images * added new logo * testing image hosting * changed svgs to pngs * removed old header * Update README.md * correct colab image source * splash.jpg * rocket and W&B fix * added contributing template * added social media to top section * increased size of top social media * cleanup and updates * rearrange quickstarts * API cleanup * PyTorch Hub cleanup * Add tutorials * cleanup * update CONTRIBUTING.md * Update README.md * update wandb link * Update README.md * remove tutorials header * update environments and integrations * Comment API image * Update README.md * Update README.md * Update README.md * Update README.md * Update README.md * double spaces after section * Update README.md * Update README.md Co-authored-by: Glenn Jocher --- CONTRIBUTING.md | 49 +++++++ README.md | 337 +++++++++++++++++++++++++++++++----------------- 2 files changed, 268 insertions(+), 118 deletions(-) create mode 100644 CONTRIBUTING.md diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 000000000000..acf74448c1fd --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,49 @@ +## Contributing to YOLOv5 🚀 + +We love your input! We want to make contributing to YOLOv5 as easy and transparent as possible, whether it's: + +- Reporting a bug +- Discussing the current state of the code +- Submitting a fix +- Proposing a new feature +- Becoming a maintainer + +YOLOv5 works so well due to our combined community effort, and for every small improvement you contribute you will be helping push the frontiers of what's possible in AI 😃! + + +## Submitting a Pull Request (PR) 🛠️ + +To allow your work to be integrated as seamlessly as possible, we advise you to: +- ✅ Verify your PR is **up-to-date with origin/master.** If your PR is behind origin/master an automatic [GitHub actions](https://github.com/ultralytics/yolov5/blob/master/.github/workflows/rebase.yml) rebase may be attempted by including the /rebase command in a comment body, or by running the following code, replacing 'feature' with the name of your local branch: +```bash +git remote add upstream https://github.com/ultralytics/yolov5.git +git fetch upstream +git checkout feature # <----- replace 'feature' with local branch name +git merge upstream/master +git push -u origin -f +``` +- ✅ Verify all Continuous Integration (CI) **checks are passing**. +- ✅ Reduce changes to the absolute **minimum** required for your bug fix or feature addition. _"It is not daily increase but daily decrease, hack away the unessential. The closer to the source, the less wastage there is."_ -Bruce Lee + + +## Submitting a Bug Report 🐛 + +For us to investigate an issue we would need to be able to reproduce it ourselves first. We've created a few short guidelines below to help users provide what we need in order to get started investigating a possible problem. + +When asking a question, people will be better able to provide help if you provide **code** that they can easily understand and use to **reproduce** the problem. This is referred to by community members as creating a [minimum reproducible example](https://stackoverflow.com/help/minimal-reproducible-example). Your code that reproduces the problem should be: + +* ✅ **Minimal** – Use as little code as possible that still produces the same problem +* ✅ **Complete** – Provide **all** parts someone else needs to reproduce your problem in the question itself +* ✅ **Reproducible** – Test the code you're about to provide to make sure it reproduces the problem + +In addition to the above requirements, for [Ultralytics](https://ultralytics.com/) to provide assistance your code should be: + +* ✅ **Current** – Verify that your code is up-to-date with current GitHub [master](https://github.com/ultralytics/yolov5/tree/master), and if necessary `git pull` or `git clone` a new copy to ensure your problem has not already been resolved by previous commits. +* ✅ **Unmodified** – Your problem must be reproducible without any modifications to the codebase in this repository. [Ultralytics](https://ultralytics.com/) does not provide support for custom code ⚠️. + +If you believe your problem meets all of the above criteria, please close this issue and raise a new one using the 🐛 **Bug Report** [template](https://github.com/ultralytics/yolov5/issues/new/choose) and providing a [minimum reproducible example](https://stackoverflow.com/help/minimal-reproducible-example) to help us better understand and diagnose your problem. + + +## License + +By contributing, you agree that your contributions will be licensed under the [GPL-3.0 license](https://choosealicense.com/licenses/gpl-3.0/) diff --git a/README.md b/README.md index 08a6eb272117..e3caa6d77f14 100755 --- a/README.md +++ b/README.md @@ -1,70 +1,136 @@ +
+

-  - +

+
+
CI CPU testing +Open In Kaggle +
+Open In Colab +Open In Kaggle +Docker Pulls +
+
+ + +
+

+YOLOv5 🚀 is a family of object detection architectures and models pretrained on the COCO dataset, and represents Ultralytics + open-source research into future vision AI methods, incorporating lessons learned and best practices evolved over thousands of hours of research and development. +

+ + + +
+ + +##
Documentation
+ +See the [YOLOv5 Docs](https://docs.ultralytics.com) for full documentation on training, testing and deployment. + + +##
Quick Start Examples
+ + +
+ +Install + + +Python >= 3.6.0 required with all [requirements.txt](https://github.com/ultralytics/yolov5/blob/master/requirements.txt) dependencies installed: + +```bash +$ git clone https://github.com/ultralytics/yolov5 +$ pip install -r requirements.txt +``` +
-This repository represents Ultralytics open-source research into future object detection methods, and incorporates lessons learned and best practices evolved over thousands of hours of training and evolution on anonymized client datasets. **All code and models are under active development, and are subject to modification or deletion without notice.** Use at your own risk. +
+Inference -

-
- YOLOv5-P5 640 Figure (click to expand) - -

-
-
- Figure Notes (click to expand) - - * GPU Speed measures end-to-end time per image averaged over 5000 COCO val2017 images using a V100 GPU with batch size 32, and includes image preprocessing, PyTorch FP16 inference, postprocessing and NMS. - * EfficientDet data from [google/automl](https://github.com/google/automl) at batch size 8. - * **Reproduce** by `python test.py --task study --data coco.yaml --iou 0.7 --weights yolov5s6.pt yolov5m6.pt yolov5l6.pt yolov5x6.pt` -
+Inference with YOLOv5 and [PyTorch Hub](https://github.com/ultralytics/yolov5/issues/36). Models automatically download from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases). -- **April 11, 2021**: [v5.0 release](https://github.com/ultralytics/yolov5/releases/tag/v5.0): YOLOv5-P6 1280 models, [AWS](https://github.com/ultralytics/yolov5/wiki/AWS-Quickstart), [Supervise.ly](https://github.com/ultralytics/yolov5/issues/2518) and [YouTube](https://github.com/ultralytics/yolov5/pull/2752) integrations. -- **January 5, 2021**: [v4.0 release](https://github.com/ultralytics/yolov5/releases/tag/v4.0): nn.SiLU() activations, [Weights & Biases](https://wandb.ai/site?utm_campaign=repo_yolo_readme) logging, [PyTorch Hub](https://pytorch.org/hub/ultralytics_yolov5/) integration. -- **August 13, 2020**: [v3.0 release](https://github.com/ultralytics/yolov5/releases/tag/v3.0): nn.Hardswish() activations, data autodownload, native AMP. -- **July 23, 2020**: [v2.0 release](https://github.com/ultralytics/yolov5/releases/tag/v2.0): improved model definition, training and mAP. +```python +import torch +# Model +model = torch.hub.load('ultralytics/yolov5', 'yolov5s') # or yolov5m, yolov5x, custom -## Pretrained Checkpoints +# Images +img = 'https://ultralytics.com/images/zidane.jpg' # or file, PIL, OpenCV, numpy, multiple -[assets]: https://github.com/ultralytics/yolov5/releases +# Inference +results = model(img) -|Model |size
(pixels) |mAPval
0.5:0.95 |mAPtest
0.5:0.95 |mAPval
0.5 |Speed
V100 (ms) | |params
(M) |FLOPs
640 (B) -|--- |--- |--- |--- |--- |--- |---|--- |--- -|[YOLOv5s][assets] |640 |36.7 |36.7 |55.4 |**2.0** | |7.3 |17.0 -|[YOLOv5m][assets] |640 |44.5 |44.5 |63.1 |2.7 | |21.4 |51.3 -|[YOLOv5l][assets] |640 |48.2 |48.2 |66.9 |3.8 | |47.0 |115.4 -|[YOLOv5x][assets] |640 |**50.4** |**50.4** |**68.8** |6.1 | |87.7 |218.8 -| | | | | | || | -|[YOLOv5s6][assets] |1280 |43.3 |43.3 |61.9 |**4.3** | |12.7 |17.4 -|[YOLOv5m6][assets] |1280 |50.5 |50.5 |68.7 |8.4 | |35.9 |52.4 -|[YOLOv5l6][assets] |1280 |53.4 |53.4 |71.1 |12.3 | |77.2 |117.7 -|[YOLOv5x6][assets] |1280 |**54.4** |**54.4** |**72.0** |22.4 | |141.8 |222.9 -| | | | | | || | -|[YOLOv5x6][assets] TTA |1280 |**55.0** |**55.0** |**72.0** |70.8 | |- |- +# Results +results.print() # or .show(), .save(), .crop(), .pandas(), etc. +``` -
- Table Notes (click to expand) - - * APtest denotes COCO [test-dev2017](http://cocodataset.org/#upload) server results, all other AP results denote val2017 accuracy. - * AP values are for single-model single-scale unless otherwise noted. **Reproduce mAP** by `python test.py --data coco.yaml --img 640 --conf 0.001 --iou 0.65` - * SpeedGPU averaged over 5000 COCO val2017 images using a GCP [n1-standard-16](https://cloud.google.com/compute/docs/machine-types#n1_standard_machine_types) V100 instance, and includes FP16 inference, postprocessing and NMS. **Reproduce speed** by `python test.py --data coco.yaml --img 640 --conf 0.25 --iou 0.45` - * All checkpoints are trained to 300 epochs with default settings and hyperparameters (no autoaugmentation). - * Test Time Augmentation ([TTA](https://github.com/ultralytics/yolov5/issues/303)) includes reflection and scale augmentation. **Reproduce TTA** by `python test.py --data coco.yaml --img 1536 --iou 0.7 --augment`
-## Requirements -Python 3.8 or later with all [requirements.txt](https://github.com/ultralytics/yolov5/blob/master/requirements.txt) dependencies installed, including `torch>=1.7`. To install run: - +
+Inference with detect.py + +`detect.py` runs inference on a variety of sources, downloading models automatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases) and saving results to `runs/detect`. ```bash -$ pip install -r requirements.txt +$ python detect.py --source 0 # webcam + file.jpg # image + file.mp4 # video + path/ # directory + path/*.jpg # glob + 'https://youtu.be/NUsoVlDFqZg' # YouTube video + 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream ``` +
+ +
+Training + +Run commands below to reproduce results on [COCO](https://github.com/ultralytics/yolov5/blob/master/data/scripts/get_coco.sh) dataset (dataset auto-downloads on first use). Training times for YOLOv5s/m/l/x are 2/4/6/8 days on a single V100 (multi-GPU times faster). Use the largest `--batch-size` your GPU allows (batch sizes shown for 16 GB devices). +```bash +$ python train.py --data coco.yaml --cfg yolov5s.yaml --weights '' --batch-size 64 + yolov5m 40 + yolov5l 24 + yolov5x 16 +``` + + +
-## Tutorials +
+Tutorials * [Train Custom Data](https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data)  🚀 RECOMMENDED * [Tips for Best Training Results](https://github.com/ultralytics/yolov5/wiki/Tips-for-Best-Training-Results)  ☘️ RECOMMENDED @@ -80,91 +146,126 @@ $ pip install -r requirements.txt * [Transfer Learning with Frozen Layers](https://github.com/ultralytics/yolov5/issues/1314)  ⭐ NEW * [TensorRT Deployment](https://github.com/wang-xinyu/tensorrtx) +
-## Environments - -YOLOv5 may be run in any of the following up-to-date verified environments (with all dependencies including [CUDA](https://developer.nvidia.com/cuda)/[CUDNN](https://developer.nvidia.com/cudnn), [Python](https://www.python.org/) and [PyTorch](https://pytorch.org/) preinstalled): - -- **Google Colab and Kaggle** notebooks with free GPU: Open In Colab Open In Kaggle -- **Google Cloud** Deep Learning VM. See [GCP Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/GCP-Quickstart) -- **Amazon** Deep Learning AMI. See [AWS Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/AWS-Quickstart) -- **Docker Image**. See [Docker Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/Docker-Quickstart) Docker Pulls - - -## Inference - -`detect.py` runs inference on a variety of sources, downloading models automatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases) and saving results to `runs/detect`. -```bash -$ python detect.py --source 0 # webcam - file.jpg # image - file.mp4 # video - path/ # directory - path/*.jpg # glob - 'https://youtu.be/NUsoVlDFqZg' # YouTube video - 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream -``` - -To run inference on example images in `data/images`: -```bash -$ python detect.py --source data/images --weights yolov5s.pt --conf 0.25 - -Namespace(agnostic_nms=False, augment=False, classes=None, conf_thres=0.25, device='', exist_ok=False, img_size=640, iou_thres=0.45, name='exp', project='runs/detect', save_conf=False, save_txt=False, source='data/images/', update=False, view_img=False, weights=['yolov5s.pt']) -YOLOv5 v4.0-96-g83dc1b4 torch 1.7.0+cu101 CUDA:0 (Tesla V100-SXM2-16GB, 16160.5MB) - -Fusing layers... -Model Summary: 224 layers, 7266973 parameters, 0 gradients, 17.0 GFLOPs -image 1/2 /content/yolov5/data/images/bus.jpg: 640x480 4 persons, 1 bus, Done. (0.010s) -image 2/2 /content/yolov5/data/images/zidane.jpg: 384x640 2 persons, 1 tie, Done. (0.011s) -Results saved to runs/detect/exp2 -Done. (0.103s) -``` - - -### PyTorch Hub -Inference with YOLOv5 and [PyTorch Hub](https://github.com/ultralytics/yolov5/issues/36): -```python -import torch +##
Environments and Integrations
-# Model -model = torch.hub.load('ultralytics/yolov5', 'yolov5s') +Get started in seconds with our verified environments and integrations, including [Weights & Biases](https://wandb.ai/site?utm_campaign=repo_yolo_readme) for automatic YOLOv5 experiment logging. Click each icon below for details. -# Image -img = 'https://ultralytics.com/images/zidane.jpg' + -# Inference -results = model(img) -results.print() # or .show(), .save() -``` +##
Compete and Win
-## Training +We are super excited about our first-ever Ultralytics YOLOv5 🚀 EXPORT Competition with **$10,000** in cash prizes! -Run commands below to reproduce results on [COCO](https://github.com/ultralytics/yolov5/blob/master/data/scripts/get_coco.sh) dataset (dataset auto-downloads on first use). Training times for YOLOv5s/m/l/x are 2/4/6/8 days on a single V100 (multi-GPU times faster). Use the largest `--batch-size` your GPU allows (batch sizes shown for 16 GB devices). -```bash -$ python train.py --data coco.yaml --cfg yolov5s.yaml --weights '' --batch-size 64 - yolov5m 40 - yolov5l 24 - yolov5x 16 -``` - +
+ + + +
-## Citation +##
Why YOLOv5
-[![DOI](https://zenodo.org/badge/264818686.svg)](https://zenodo.org/badge/latestdoi/264818686) +

+
+ YOLOv5-P5 640 Figure (click to expand) + +

+
+
+ Figure Notes (click to expand) + + * GPU Speed measures end-to-end time per image averaged over 5000 COCO val2017 images using a V100 GPU with batch size 32, and includes image preprocessing, PyTorch FP16 inference, postprocessing and NMS. + * EfficientDet data from [google/automl](https://github.com/google/automl) at batch size 8. + * **Reproduce** by `python test.py --task study --data coco.yaml --iou 0.7 --weights yolov5s6.pt yolov5m6.pt yolov5l6.pt yolov5x6.pt` +
-## About Us +### Pretrained Checkpoints -Ultralytics is a U.S.-based particle physics and AI startup with over 6 years of expertise supporting government, academic and business clients. We offer a wide range of vision AI services, spanning from simple expert advice up to delivery of fully customized, end-to-end production solutions, including: -- **Cloud-based AI** systems operating on **hundreds of HD video streams in realtime.** -- **Edge AI** integrated into custom iOS and Android apps for realtime **30 FPS video inference.** -- **Custom data training**, hyperparameter evolution, and model exportation to any destination. +[assets]: https://github.com/ultralytics/yolov5/releases -For business inquiries and professional support requests please visit us at https://ultralytics.com. +|Model |size
(pixels) |mAPval
0.5:0.95 |mAPtest
0.5:0.95 |mAPval
0.5 |Speed
V100 (ms) | |params
(M) |FLOPs
640 (B) +|--- |--- |--- |--- |--- |--- |---|--- |--- +|[YOLOv5s][assets] |640 |36.7 |36.7 |55.4 |**2.0** | |7.3 |17.0 +|[YOLOv5m][assets] |640 |44.5 |44.5 |63.1 |2.7 | |21.4 |51.3 +|[YOLOv5l][assets] |640 |48.2 |48.2 |66.9 |3.8 | |47.0 |115.4 +|[YOLOv5x][assets] |640 |**50.4** |**50.4** |**68.8** |6.1 | |87.7 |218.8 +| | | | | | | | | +|[YOLOv5s6][assets] |1280 |43.3 |43.3 |61.9 |**4.3** | |12.7 |17.4 +|[YOLOv5m6][assets] |1280 |50.5 |50.5 |68.7 |8.4 | |35.9 |52.4 +|[YOLOv5l6][assets] |1280 |53.4 |53.4 |71.1 |12.3 | |77.2 |117.7 +|[YOLOv5x6][assets] |1280 |**54.4** |**54.4** |**72.0** |22.4 | |141.8 |222.9 +| | | | | | | | | +|[YOLOv5x6][assets] TTA |1280 |**55.0** |**55.0** |**72.0** |70.8 | |- |- +
+ Table Notes (click to expand) + + * APtest denotes COCO [test-dev2017](http://cocodataset.org/#upload) server results, all other AP results denote val2017 accuracy. + * AP values are for single-model single-scale unless otherwise noted. **Reproduce mAP** by `python test.py --data coco.yaml --img 640 --conf 0.001 --iou 0.65` + * SpeedGPU averaged over 5000 COCO val2017 images using a GCP [n1-standard-16](https://cloud.google.com/compute/docs/machine-types#n1_standard_machine_types) V100 instance, and includes FP16 inference, postprocessing and NMS. **Reproduce speed** by `python test.py --data coco.yaml --img 640 --conf 0.25 --iou 0.45` + * All checkpoints are trained to 300 epochs with default settings and hyperparameters (no autoaugmentation). + * Test Time Augmentation ([TTA](https://github.com/ultralytics/yolov5/issues/303)) includes reflection and scale augmentation. **Reproduce TTA** by `python test.py --data coco.yaml --img 1536 --iou 0.7 --augment` +
-## Contact -**Issues should be raised directly in the repository.** For business inquiries or professional support requests please visit https://ultralytics.com or email Glenn Jocher at glenn.jocher@ultralytics.com. +##
Contribute
+ +We love your input! We want to make contributing to YOLOv5 as easy and transparent as possible. Please see our [Contributing Guide](CONTRIBUTING.md) to get started. + + +##
Contact
+ +For issues running YOLOv5 please visit [GitHub Issues](https://github.com/ultralytics/yolov5/issues). For business or professional support requests please visit +[https://ultralytics.com/contact](https://ultralytics.com/contact). + +
+ + From 6062319ec3415573f280ece31cdb0d5585e032c4 Mon Sep 17 00:00:00 2001 From: masood azhar Date: Mon, 14 Jun 2021 03:28:18 -0700 Subject: [PATCH 0305/1976] Update datasets.py (#3591) * 'changes-in_dataset' * Update datasets.py Co-authored-by: Glenn Jocher From 239a11c19777d8b5d4e2a69aac2cc83796313fd3 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 14 Jun 2021 13:58:54 +0200 Subject: [PATCH 0306/1976] Download COCO and VOC by default (#3608) --- utils/aws/userdata.sh | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/utils/aws/userdata.sh b/utils/aws/userdata.sh index 5846fedb16f9..52c0fe33d90f 100644 --- a/utils/aws/userdata.sh +++ b/utils/aws/userdata.sh @@ -9,7 +9,8 @@ if [ ! -d yolov5 ]; then echo "Running first-time script." # install dependencies, download COCO, pull Docker git clone https://github.com/ultralytics/yolov5 -b master && sudo chmod -R 777 yolov5 cd yolov5 - bash data/scripts/get_coco.sh && echo "Data done." & + bash data/scripts/get_coco.sh && echo "COCO done." & + bash data/scripts/get_voc.sh && echo "VOC done." & sudo docker pull ultralytics/yolov5:latest && echo "Docker done." & python -m pip install --upgrade pip && pip install -r requirements.txt && python detect.py && echo "Requirements done." & wait && echo "All tasks done." # finish background tasks From daab682b06f8416319c99bdf25aec56616bf6ac1 Mon Sep 17 00:00:00 2001 From: Ayush Chaurasia Date: Mon, 14 Jun 2021 22:24:58 +0530 Subject: [PATCH 0307/1976] Suppress wandb images size mismatch warning (#3611) * supress wandb images size mismatch warning * supress wandb images size mismatch warning * PEP8 reformat and optimize imports Co-authored-by: Glenn Jocher --- utils/wandb_logging/wandb_utils.py | 32 +++++++++++++++++++++++------- 1 file changed, 25 insertions(+), 7 deletions(-) diff --git a/utils/wandb_logging/wandb_utils.py b/utils/wandb_logging/wandb_utils.py index 9975af63d02c..7652f964f2c0 100644 --- a/utils/wandb_logging/wandb_utils.py +++ b/utils/wandb_logging/wandb_utils.py @@ -1,16 +1,16 @@ """Utilities and tools for tracking runs with Weights & Biases.""" -import json +import logging import sys +from contextlib import contextmanager from pathlib import Path -import torch import yaml from tqdm import tqdm sys.path.append(str(Path(__file__).parent.parent.parent)) # add utils/ to path from utils.datasets import LoadImagesAndLabels from utils.datasets import img2label_paths -from utils.general import colorstr, xywh2xyxy, check_dataset, check_file +from utils.general import colorstr, check_dataset, check_file try: import wandb @@ -92,6 +92,7 @@ class WandbLogger(): For more on how this logger is used, see the Weights & Biases documentation: https://docs.wandb.com/guides/integrations/yolov5 """ + def __init__(self, opt, name, run_id, data_dict, job_type='Training'): # Pre-training routine -- self.job_type = job_type @@ -272,7 +273,7 @@ def create_dataset_table(self, dataset, class_to_id, name='dataset'): "box_caption": "%s" % (class_to_id[cls])}) img_classes[cls] = class_to_id[cls] boxes = {"ground_truth": {"box_data": box_data, "class_labels": class_to_id}} # inference-space - table.add_data(si, wandb.Image(paths, classes=class_set, boxes=boxes), json.dumps(img_classes), + table.add_data(si, wandb.Image(paths, classes=class_set, boxes=boxes), list(img_classes.values()), Path(paths).name) artifact.add(table, name) return artifact @@ -306,8 +307,9 @@ def log(self, log_dict): def end_epoch(self, best_result=False): if self.wandb_run: - wandb.log(self.log_dict) - self.log_dict = {} + with all_logging_disabled(): + wandb.log(self.log_dict) + self.log_dict = {} if self.result_artifact: train_results = wandb.JoinedTable(self.val_table, self.result_table, "id") self.result_artifact.add(train_results, 'result') @@ -319,5 +321,21 @@ def end_epoch(self, best_result=False): def finish_run(self): if self.wandb_run: if self.log_dict: - wandb.log(self.log_dict) + with all_logging_disabled(): + wandb.log(self.log_dict) wandb.run.finish() + + +@contextmanager +def all_logging_disabled(highest_level=logging.CRITICAL): + """ source - https://gist.github.com/simon-weber/7853144 + A context manager that will prevent any logging messages triggered during the body from being processed. + :param highest_level: the maximum logging level in use. + This would only need to be changed if a custom level greater than CRITICAL is defined. + """ + previous_level = logging.root.manager.disable + logging.disable(highest_level) + try: + yield + finally: + logging.disable(previous_level) From 4c5d9bff80526b1120b2f78ce81bd20ec1a50b4e Mon Sep 17 00:00:00 2001 From: Wei Quan Date: Tue, 15 Jun 2021 05:24:56 -0400 Subject: [PATCH 0308/1976] Fix incorrect end epoch comment (#3612) --- train.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/train.py b/train.py index 142268b273f1..85bdf1bf9a1f 100644 --- a/train.py +++ b/train.py @@ -341,8 +341,7 @@ def train(hyp, opt, device, tb_writer=None): save_dir.glob('train*.jpg') if x.exists()]}) # end batch ------------------------------------------------------------------------------------------------ - # end epoch ---------------------------------------------------------------------------------------------------- - + # Scheduler lr = [x['lr'] for x in optimizer.param_groups] # for tensorboard scheduler.step() From 7d3686a686478c78beb2b32cf8a35c1a5dbe81b8 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 15 Jun 2021 13:21:04 +0200 Subject: [PATCH 0309/1976] Update `check_file()` (#3622) * Update `check_file()` * Update datasets.py --- utils/datasets.py | 2 +- utils/general.py | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/utils/datasets.py b/utils/datasets.py index f18569a7665b..0bb657f30414 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -1095,7 +1095,7 @@ def dataset_stats(path='coco128.yaml', autodownload=False, verbose=False): autodownload: Attempt to download dataset if not found locally verbose: Print stats dictionary """ - with open(check_file(Path(path))) as f: + with open(check_file(path)) as f: data = yaml.safe_load(f) # data dict check_dataset(data, autodownload) # download dataset if missing nc = data['nc'] # number of classes diff --git a/utils/general.py b/utils/general.py index 1d61f16d7771..3e3bd6997a7c 100755 --- a/utils/general.py +++ b/utils/general.py @@ -206,9 +206,9 @@ def check_file(file): file = str(file) # convert to str() if Path(file).is_file() or file == '': # exists return file - elif file.startswith(('http://', 'https://')): # download - url, file = file, Path(urllib.parse.unquote(str(file))).name # url, file (decode '%2F' to '/' etc.) - file = file.split('?')[0] # parse authentication https://url.com/file.txt?auth... + elif file.startswith(('http:/', 'https:/')): # download + url = str(Path(file)).replace(':/', '://') # Pathlib turns :// -> :/ + file = Path(urllib.parse.unquote(file)).name.split('?')[0] # '%2F' to '/', split https://url.com/file.txt?auth print(f'Downloading {url} to {file}...') torch.hub.download_url_to_file(url, file) assert Path(file).exists() and Path(file).stat().st_size > 0, f'File download failed: {url}' # check From 30e4c4f09297b67afedf8b2bcd851833ddc9dead Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 15 Jun 2021 15:44:10 +0200 Subject: [PATCH 0310/1976] Update README.md (#3624) --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index e3caa6d77f14..57188f687cc1 100755 --- a/README.md +++ b/README.md @@ -70,6 +70,7 @@ Python >= 3.6.0 required with all [requirements.txt](https://github.com/ultralyt ```bash $ git clone https://github.com/ultralytics/yolov5 +$ cd yolov5 $ pip install -r requirements.txt ```
From de56813ba8165fdbcaad2618beea693bd02ea6a5 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 16 Jun 2021 10:40:31 +0200 Subject: [PATCH 0311/1976] FROM nvcr.io/nvidia/pytorch:21.05-py3 (#3633) --- Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Dockerfile b/Dockerfile index b47e5bbff194..be19e3036187 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,5 +1,5 @@ # Start FROM Nvidia PyTorch image https://ngc.nvidia.com/catalog/containers/nvidia:pytorch -FROM nvcr.io/nvidia/pytorch:21.03-py3 +FROM nvcr.io/nvidia/pytorch:21.05-py3 # Install linux packages RUN apt update && apt install -y zip htop screen libgl1-mesa-glx From 65f81bfefa7ea1f4fdd019dae9b675b7914e0c21 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 16 Jun 2021 10:49:50 +0200 Subject: [PATCH 0312/1976] Add `**/*.torchscript.pt` (#3634) --- .dockerignore | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.dockerignore b/.dockerignore index 3c6b6ab02e03..9c9663f006ca 100644 --- a/.dockerignore +++ b/.dockerignore @@ -12,12 +12,12 @@ data/samples/* *.jpg # Neural Network weights ----------------------------------------------------------------------------------------------- -**/*.weights **/*.pt **/*.pth **/*.onnx **/*.mlmodel **/*.torchscript +**/*.torchscript.pt # Below Copied From .gitignore ----------------------------------------------------------------------------------------- From 6c0e1d9fd7fe83a28972d4f35b2111553de0fcb6 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 16 Jun 2021 11:12:15 +0200 Subject: [PATCH 0313/1976] Update `verify_image_label()` (#3635) --- utils/datasets.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/utils/datasets.py b/utils/datasets.py index 0bb657f30414..20109e739c02 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -1046,20 +1046,20 @@ def autosplit(path='../coco128', weights=(0.9, 0.1, 0.0), annotated_only=False): f.write(str(img) + '\n') # add image to txt file -def verify_image_label(params): +def verify_image_label(args): # Verify one image-label pair - im_file, lb_file, prefix = params + im_file, lb_file, prefix = args nm, nf, ne, nc = 0, 0, 0, 0 # number missing, found, empty, corrupt try: # verify images im = Image.open(im_file) im.verify() # PIL verify shape = exif_size(im) # image size - segments = [] # instance segments assert (shape[0] > 9) & (shape[1] > 9), f'image size {shape} <10 pixels' assert im.format.lower() in img_formats, f'invalid image format {im.format}' # verify labels + segments = [] # instance segments if os.path.isfile(lb_file): nf = 1 # label found with open(lb_file, 'r') as f: @@ -1084,7 +1084,7 @@ def verify_image_label(params): except Exception as e: nc = 1 logging.info(f'{prefix}WARNING: Ignoring corrupted image and/or label {im_file}: {e}') - return [None] * 4 + [nm, nf, ne, nc] + return [None, None, None, None, nm, nf, ne, nc] def dataset_stats(path='coco128.yaml', autodownload=False, verbose=False): From bb79e13d521c54b20b06555fe79cdff055f28721 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 16 Jun 2021 12:35:33 +0200 Subject: [PATCH 0314/1976] RUN pip install --no-cache -U torch torchvision (#3637) --- Dockerfile | 1 + 1 file changed, 1 insertion(+) diff --git a/Dockerfile b/Dockerfile index be19e3036187..ecf6d1e3723c 100644 --- a/Dockerfile +++ b/Dockerfile @@ -9,6 +9,7 @@ COPY requirements.txt . RUN python -m pip install --upgrade pip RUN pip uninstall -y nvidia-tensorboard nvidia-tensorboard-plugin-dlprof RUN pip install --no-cache -r requirements.txt coremltools onnx gsutil notebook +RUN pip install --no-cache -U torch torchvision # Create working directory RUN mkdir -p /usr/src/app From d808855f7703f12025c0a169136c624397add112 Mon Sep 17 00:00:00 2001 From: xiaowk5516 <59595896+xiaowk5516@users.noreply.github.com> Date: Wed, 16 Jun 2021 19:31:26 +0800 Subject: [PATCH 0315/1976] Assert non-premature end of JPEG images (#3638) * premature end of JPEG images * PEP8 reformat Co-authored-by: Glenn Jocher --- utils/datasets.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/utils/datasets.py b/utils/datasets.py index 20109e739c02..a1a8fa8f32a9 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -1057,6 +1057,10 @@ def verify_image_label(args): shape = exif_size(im) # image size assert (shape[0] > 9) & (shape[1] > 9), f'image size {shape} <10 pixels' assert im.format.lower() in img_formats, f'invalid image format {im.format}' + if im.format.lower() in ('jpg', 'jpeg'): + with open(im_file, 'rb') as f: + f.seek(-2, 2) + assert f.read() == b'\xff\xd9', 'corrupted JPEG' # verify labels segments = [] # instance segments From 3ce0db89b05e62b352befdadc33a148088a33e03 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 16 Jun 2021 19:48:30 +0200 Subject: [PATCH 0316/1976] Update CONTRIBUTING.md (#3645) * Update CONTRIBUTING.md * Update CONTRIBUTING.md * Update CONTRIBUTING.md * Update CONTRIBUTING.md --- CONTRIBUTING.md | 23 ++++++++++++++++++++++- 1 file changed, 22 insertions(+), 1 deletion(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index acf74448c1fd..09d93b0573ba 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -12,6 +12,25 @@ YOLOv5 works so well due to our combined community effort, and for every small i ## Submitting a Pull Request (PR) 🛠️ +Submitting a PR is easy! This example shows how to submit a PR for updating `requirements.txt` in 4 steps: + +### 1. Select File to Update +Select `requirements.txt` to update by clicking on it in GitHub. +PR_step1 + +### 2. Click 'Edit this file' +Button is in top-right corner. +PR_step2 + +### 3. Make Changes +Change `matplotlib` version from `3.2.2` to `3.3`. +PR_step3 + +### 4. Preview Changes and Submit PR +Click on the **Preview changes** tab to verify your updates. At the bottom of the screen select 'Create a **new branch** for this commit', assign your branch a descriptive name such as `fix/matplotlib_version` and click the green **Propose changes** button. All done, your PR is now submitted to YOLOv5 for review and approval 😃! +PR_step4 + +### PR recommendations To allow your work to be integrated as seamlessly as possible, we advise you to: - ✅ Verify your PR is **up-to-date with origin/master.** If your PR is behind origin/master an automatic [GitHub actions](https://github.com/ultralytics/yolov5/blob/master/.github/workflows/rebase.yml) rebase may be attempted by including the /rebase command in a comment body, or by running the following code, replacing 'feature' with the name of your local branch: @@ -28,7 +47,9 @@ git push -u origin -f ## Submitting a Bug Report 🐛 -For us to investigate an issue we would need to be able to reproduce it ourselves first. We've created a few short guidelines below to help users provide what we need in order to get started investigating a possible problem. +If you spot a problem with YOLOv5 please submit a Bug Report! + +For us to start investigating a possibel problem we need to be able to reproduce it ourselves first. We've created a few short guidelines below to help users provide what we need in order to get started. When asking a question, people will be better able to provide help if you provide **code** that they can easily understand and use to **reproduce** the problem. This is referred to by community members as creating a [minimum reproducible example](https://stackoverflow.com/help/minimal-reproducible-example). Your code that reproduces the problem should be: From 6187edcb53eb7982a23c5b0d3f1ab35d5d906ba6 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 16 Jun 2021 19:57:11 +0200 Subject: [PATCH 0317/1976] Update CONTRIBUTING.md (#3647) --- CONTRIBUTING.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 09d93b0573ba..7c0ba3ae9f18 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -16,19 +16,19 @@ Submitting a PR is easy! This example shows how to submit a PR for updating `req ### 1. Select File to Update Select `requirements.txt` to update by clicking on it in GitHub. -PR_step1 +

PR_step1

### 2. Click 'Edit this file' Button is in top-right corner. -PR_step2 +

PR_step2

### 3. Make Changes Change `matplotlib` version from `3.2.2` to `3.3`. -PR_step3 +

PR_step3

### 4. Preview Changes and Submit PR Click on the **Preview changes** tab to verify your updates. At the bottom of the screen select 'Create a **new branch** for this commit', assign your branch a descriptive name such as `fix/matplotlib_version` and click the green **Propose changes** button. All done, your PR is now submitted to YOLOv5 for review and approval 😃! -PR_step4 +

PR_step4

### PR recommendations From fa29125f1816e87b44763675bc661452868fdced Mon Sep 17 00:00:00 2001 From: Mai Thanh Minh Date: Thu, 17 Jun 2021 03:56:16 +0700 Subject: [PATCH 0318/1976] `is_coco` list fix (#3646) --- test.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test.py b/test.py index cbc97b420155..a38298da54da 100644 --- a/test.py +++ b/test.py @@ -78,7 +78,7 @@ def test(data, with open(data) as f: data = yaml.safe_load(f) check_dataset(data) # check - is_coco = data['val'].endswith('coco/val2017.txt') # COCO dataset + is_coco = type(data['val']) is str and data['val'].endswith('coco/val2017.txt') # COCO dataset nc = 1 if single_cls else int(data['nc']) # number of classes iouv = torch.linspace(0.5, 0.95, 10).to(device) # iou vector for mAP@0.5:0.95 niou = iouv.numel() From df7706d8026c8277fa47ab04a89238f16c9a19b7 Mon Sep 17 00:00:00 2001 From: SpongeBab <2078825250@qq.com> Date: Thu, 17 Jun 2021 18:37:53 +0800 Subject: [PATCH 0319/1976] Update README.md (#3650) Be more user-friendly to new users --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 57188f687cc1..ab8f60169947 100755 --- a/README.md +++ b/README.md @@ -130,7 +130,7 @@ $ python train.py --data coco.yaml --cfg yolov5s.yaml --weights '' --batch-size
-
+
Tutorials * [Train Custom Data](https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data)  🚀 RECOMMENDED From 9b6dba6207182f5b1bca596a947fc32d4150db2f Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 17 Jun 2021 13:59:52 +0200 Subject: [PATCH 0320/1976] Update `dataset_stats()` to list of dicts (#3657) * Update `dataset_stats()` to list of dicts @KalenMike * Update datasets.py --- utils/datasets.py | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/utils/datasets.py b/utils/datasets.py index a1a8fa8f32a9..bcb8c36e0e64 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -1099,6 +1099,11 @@ def dataset_stats(path='coco128.yaml', autodownload=False, verbose=False): autodownload: Attempt to download dataset if not found locally verbose: Print stats dictionary """ + + def round_labels(labels): + # Update labels to integer class and 6 decimal place floats + return [[int(c), *[round(x, 6) for x in points]] for c, *points in labels] + with open(check_file(path)) as f: data = yaml.safe_load(f) # data dict check_dataset(data, autodownload) # download dataset if missing @@ -1118,12 +1123,13 @@ def dataset_stats(path='coco128.yaml', autodownload=False, verbose=False): stats[split] = {'instance_stats': {'total': int(x.sum()), 'per_class': x.sum(0).tolist()}, 'image_stats': {'total': dataset.n, 'unlabelled': int(np.all(x == 0, 1).sum()), 'per_class': (x > 0).sum(0).tolist()}, - 'labels': {str(Path(k).name): v.tolist() for k, v in zip(dataset.img_files, dataset.labels)}} + 'labels': [{str(Path(k).name): round_labels(v.tolist())} for k, v in + zip(dataset.img_files, dataset.labels)]} # Save, print and return with open(cache_path.with_suffix('.json'), 'w') as f: json.dump(stats, f) # save stats *.json if verbose: - print(yaml.dump([stats], sort_keys=False, default_flow_style=False)) - # print(json.dumps(stats, indent=2, sort_keys=False)) + print(json.dumps(stats, indent=2, sort_keys=False)) + # print(yaml.dump([stats], sort_keys=False, default_flow_style=False)) return stats From 2754adad463e6f097521946f20e601c8370a6728 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 17 Jun 2021 14:44:12 +0200 Subject: [PATCH 0321/1976] Remove `/weights` directory (#3659) * Remove `/weights` directory * cleanup --- .github/workflows/ci-testing.yml | 10 +++++----- Dockerfile | 3 --- {weights => data/scripts}/download_weights.sh | 0 detect.py | 4 ++-- 4 files changed, 7 insertions(+), 10 deletions(-) rename {weights => data/scripts}/download_weights.sh (100%) diff --git a/.github/workflows/ci-testing.yml b/.github/workflows/ci-testing.yml index bb8b173cdb31..36318f6ae562 100644 --- a/.github/workflows/ci-testing.yml +++ b/.github/workflows/ci-testing.yml @@ -64,15 +64,15 @@ jobs: di=cpu # inference devices # define device # train - python train.py --img 128 --batch 16 --weights weights/${{ matrix.model }}.pt --cfg models/${{ matrix.model }}.yaml --epochs 1 --device $di + python train.py --img 128 --batch 16 --weights ${{ matrix.model }}.pt --cfg ${{ matrix.model }}.yaml --epochs 1 --device $di # detect - python detect.py --weights weights/${{ matrix.model }}.pt --device $di + python detect.py --weights ${{ matrix.model }}.pt --device $di python detect.py --weights runs/train/exp/weights/last.pt --device $di # test - python test.py --img 128 --batch 16 --weights weights/${{ matrix.model }}.pt --device $di + python test.py --img 128 --batch 16 --weights ${{ matrix.model }}.pt --device $di python test.py --img 128 --batch 16 --weights runs/train/exp/weights/last.pt --device $di python hubconf.py # hub - python models/yolo.py --cfg models/${{ matrix.model }}.yaml # inspect - python models/export.py --img 128 --batch 1 --weights weights/${{ matrix.model }}.pt # export + python models/yolo.py --cfg ${{ matrix.model }}.yaml # inspect + python models/export.py --img 128 --batch 1 --weights ${{ matrix.model }}.pt # export shell: bash diff --git a/Dockerfile b/Dockerfile index ecf6d1e3723c..d32e3960046b 100644 --- a/Dockerfile +++ b/Dockerfile @@ -46,8 +46,5 @@ ENV HOME=/usr/src/app # Bash into stopped container # id=$(sudo docker ps -qa) && sudo docker start $id && sudo docker exec -it $id bash -# Send weights to GCP -# python -c "from utils.general import *; strip_optimizer('runs/train/exp0_*/weights/best.pt', 'tmp.pt')" && gsutil cp tmp.pt gs://*.pt - # Clean up # docker system prune -a --volumes diff --git a/weights/download_weights.sh b/data/scripts/download_weights.sh similarity index 100% rename from weights/download_weights.sh rename to data/scripts/download_weights.sh diff --git a/detect.py b/detect.py index 67916c652742..7daa87436daa 100644 --- a/detect.py +++ b/detect.py @@ -63,8 +63,8 @@ def detect(weights='yolov5s.pt', # model.pt path(s) # Second-stage classifier classify = False if classify: - modelc = load_classifier(name='resnet101', n=2) # initialize - modelc.load_state_dict(torch.load('weights/resnet101.pt', map_location=device)['model']).to(device).eval() + modelc = load_classifier(name='resnet50', n=2) # initialize + modelc.load_state_dict(torch.load('resnet50.pt', map_location=device)['model']).to(device).eval() # Set Dataloader vid_path, vid_writer = None, None From ac34834563cfc90f499248fc14b11812da5f14af Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 17 Jun 2021 16:21:10 +0200 Subject: [PATCH 0322/1976] Update download_weights.sh comment (#3662) --- data/scripts/download_weights.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/data/scripts/download_weights.sh b/data/scripts/download_weights.sh index 43c8e31d80fd..6a279f1636fc 100755 --- a/data/scripts/download_weights.sh +++ b/data/scripts/download_weights.sh @@ -1,7 +1,7 @@ #!/bin/bash # Download latest models from https://github.com/ultralytics/yolov5/releases # Usage: -# $ bash weights/download_weights.sh +# $ bash path/to/download_weights.sh python - < Date: Thu, 17 Jun 2021 21:32:39 +0200 Subject: [PATCH 0323/1976] Update train.py (#3667) --- train.py | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/train.py b/train.py index 85bdf1bf9a1f..27f42c9a9c1d 100644 --- a/train.py +++ b/train.py @@ -22,7 +22,7 @@ from torch.utils.tensorboard import SummaryWriter from tqdm import tqdm -import test # import test.py to get mAP after each epoch +import test # for end-of-epoch mAP from models.experimental import attempt_load from models.yolo import Model from utils.autoanchor import check_anchors @@ -39,7 +39,11 @@ logger = logging.getLogger(__name__) -def train(hyp, opt, device, tb_writer=None): +def train(hyp, + opt, + device, + tb_writer=None + ): logger.info(colorstr('hyperparameters: ') + ', '.join(f'{k}={v}' for k, v in hyp.items())) save_dir, epochs, batch_size, total_batch_size, weights, rank, single_cls = \ Path(opt.save_dir), opt.epochs, opt.batch_size, opt.total_batch_size, opt.weights, opt.global_rank, \ @@ -341,7 +345,7 @@ def train(hyp, opt, device, tb_writer=None): save_dir.glob('train*.jpg') if x.exists()]}) # end batch ------------------------------------------------------------------------------------------------ - + # Scheduler lr = [x['lr'] for x in optimizer.param_groups] # for tensorboard scheduler.step() @@ -404,12 +408,11 @@ def train(hyp, opt, device, tb_writer=None): torch.save(ckpt, best) if wandb_logger.wandb: if ((epoch + 1) % opt.save_period == 0 and not final_epoch) and opt.save_period != -1: - wandb_logger.log_model( - last.parent, opt, epoch, fi, best_model=best_fitness == fi) + wandb_logger.log_model(last.parent, opt, epoch, fi, best_model=best_fitness == fi) del ckpt # end epoch ---------------------------------------------------------------------------------------------------- - # end training + # end training ----------------------------------------------------------------------------------------------------- if rank in [-1, 0]: logger.info(f'{epoch - start_epoch + 1} epochs completed in {(time.time() - t0) / 3600:.3f} hours.\n') if plots: From fa201f968ecb552774c37da01b8ea1ac01f3d261 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 17 Jun 2021 22:03:25 +0200 Subject: [PATCH 0324/1976] Update `train(hyp, *args)` to accept `hyp` file or dict (#3668) --- train.py | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/train.py b/train.py index 27f42c9a9c1d..113d084336ad 100644 --- a/train.py +++ b/train.py @@ -39,12 +39,11 @@ logger = logging.getLogger(__name__) -def train(hyp, +def train(hyp, # path/to/hyp.yaml or hyp dictionary opt, device, tb_writer=None ): - logger.info(colorstr('hyperparameters: ') + ', '.join(f'{k}={v}' for k, v in hyp.items())) save_dir, epochs, batch_size, total_batch_size, weights, rank, single_cls = \ Path(opt.save_dir), opt.epochs, opt.batch_size, opt.total_batch_size, opt.weights, opt.global_rank, \ opt.single_cls @@ -56,6 +55,12 @@ def train(hyp, best = wdir / 'best.pt' results_file = save_dir / 'results.txt' + # Hyperparameters + if isinstance(hyp, str): + with open(hyp) as f: + hyp = yaml.safe_load(f) # load hyps dict + logger.info(colorstr('hyperparameters: ') + ', '.join(f'{k}={v}' for k, v in hyp.items())) + # Save run settings with open(save_dir / 'hyp.yaml', 'w') as f: yaml.safe_dump(hyp, f, sort_keys=False) @@ -529,10 +534,6 @@ def train(hyp, assert not opt.image_weights, '--image-weights argument is not compatible with DDP training' opt.batch_size = opt.total_batch_size // opt.world_size - # Hyperparameters - with open(opt.hyp) as f: - hyp = yaml.safe_load(f) # load hyps - # Train logger.info(opt) if not opt.evolve: @@ -541,7 +542,7 @@ def train(hyp, prefix = colorstr('tensorboard: ') logger.info(f"{prefix}Start with 'tensorboard --logdir {opt.project}', view at http://localhost:6006/") tb_writer = SummaryWriter(opt.save_dir) # Tensorboard - train(hyp, opt, device, tb_writer) + train(opt.hyp, opt, device, tb_writer) # Evolve hyperparameters (optional) else: @@ -575,6 +576,8 @@ def train(hyp, 'mosaic': (1, 0.0, 1.0), # image mixup (probability) 'mixup': (1, 0.0, 1.0)} # image mixup (probability) + with open(opt.hyp) as f: + hyp = yaml.safe_load(f) # load hyps dict assert opt.local_rank == -1, 'DDP mode not implemented for --evolve' opt.notest, opt.nosave = True, True # only test/save final epoch # ei = [isinstance(x, (int, float)) for x in hyp.values()] # evolvable indices From 045d5d86299a4a724fca40faaf0225ded91a68b4 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 17 Jun 2021 22:12:42 +0200 Subject: [PATCH 0325/1976] Update TensorBoard (#3669) --- train.py | 33 +++++++++++++++++---------------- 1 file changed, 17 insertions(+), 16 deletions(-) diff --git a/train.py b/train.py index 113d084336ad..9d71e7056800 100644 --- a/train.py +++ b/train.py @@ -42,7 +42,6 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary opt, device, - tb_writer=None ): save_dir, epochs, batch_size, total_batch_size, weights, rank, single_cls = \ Path(opt.save_dir), opt.epochs, opt.batch_size, opt.total_batch_size, opt.weights, opt.global_rank, \ @@ -74,9 +73,16 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary with open(opt.data) as f: data_dict = yaml.safe_load(f) # data dict - # Logging- Doing this before checking the dataset. Might update data_dict - loggers = {'wandb': None} # loggers dict + # Loggers + loggers = {'wandb': None, 'tb': None} # loggers dict if rank in [-1, 0]: + # TensorBoard + if not opt.evolve: + prefix = colorstr('tensorboard: ') + logger.info(f"{prefix}Start with 'tensorboard --logdir {opt.project}', view at http://localhost:6006/") + loggers['tb'] = SummaryWriter(opt.save_dir) + + # W&B opt.hyp = hyp # add hyperparameters run_id = torch.load(weights).get('wandb_id') if weights.endswith('.pt') and os.path.isfile(weights) else None wandb_logger = WandbLogger(opt, save_dir.stem, run_id, data_dict) @@ -219,8 +225,8 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary # model._initialize_biases(cf.to(device)) if plots: plot_labels(labels, names, save_dir, loggers) - if tb_writer: - tb_writer.add_histogram('classes', c, 0) + if loggers['tb']: + loggers['tb'].add_histogram('classes', c, 0) # TensorBoard # Anchors if not opt.noautoanchor: @@ -341,10 +347,10 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary if plots and ni < 3: f = save_dir / f'train_batch{ni}.jpg' # filename Thread(target=plot_images, args=(imgs, targets, paths, f), daemon=True).start() - if tb_writer and ni == 0: + if loggers['tb'] and ni == 0: # TensorBoard with warnings.catch_warnings(): warnings.simplefilter('ignore') # suppress jit trace warning - tb_writer.add_graph(torch.jit.trace(de_parallel(model), imgs[0:1], strict=False), []) + loggers['tb'].add_graph(torch.jit.trace(de_parallel(model), imgs[0:1], strict=False), []) elif plots and ni == 10 and wandb_logger.wandb: wandb_logger.log({'Mosaics': [wandb_logger.wandb.Image(str(x), caption=x.name) for x in save_dir.glob('train*.jpg') if x.exists()]}) @@ -352,7 +358,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary # end batch ------------------------------------------------------------------------------------------------ # Scheduler - lr = [x['lr'] for x in optimizer.param_groups] # for tensorboard + lr = [x['lr'] for x in optimizer.param_groups] # for loggers scheduler.step() # DDP process 0 or single-GPU @@ -385,8 +391,8 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary 'val/box_loss', 'val/obj_loss', 'val/cls_loss', # val loss 'x/lr0', 'x/lr1', 'x/lr2'] # params for x, tag in zip(list(mloss[:-1]) + list(results) + lr, tags): - if tb_writer: - tb_writer.add_scalar(tag, x, epoch) # tensorboard + if loggers['tb']: + loggers['tb'].add_scalar(tag, x, epoch) # TensorBoard if wandb_logger.wandb: wandb_logger.log({tag: x}) # W&B @@ -537,12 +543,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary # Train logger.info(opt) if not opt.evolve: - tb_writer = None # init loggers - if opt.global_rank in [-1, 0]: - prefix = colorstr('tensorboard: ') - logger.info(f"{prefix}Start with 'tensorboard --logdir {opt.project}', view at http://localhost:6006/") - tb_writer = SummaryWriter(opt.save_dir) # Tensorboard - train(opt.hyp, opt, device, tb_writer) + train(opt.hyp, opt, device) # Evolve hyperparameters (optional) else: From 2296f1546fe252d7293b48ffb8e192d1e5f2a85b Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 17 Jun 2021 23:24:30 +0200 Subject: [PATCH 0326/1976] Update `WORLD_SIZE` and `RANK` retrieval (#3670) --- train.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/train.py b/train.py index 9d71e7056800..3eb866345d47 100644 --- a/train.py +++ b/train.py @@ -502,8 +502,8 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary opt = parser.parse_args() # Set DDP variables - opt.world_size = int(os.environ['WORLD_SIZE']) if 'WORLD_SIZE' in os.environ else 1 - opt.global_rank = int(os.environ['RANK']) if 'RANK' in os.environ else -1 + opt.world_size = int(getattr(os.environ, 'WORLD_SIZE', 1)) + opt.global_rank = int(getattr(os.environ, 'RANK', -1)) set_logging(opt.global_rank) if opt.global_rank in [-1, 0]: check_git_status() From f527704cd32c42bc0bba9cce04601783b8563204 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 18 Jun 2021 10:21:47 +0200 Subject: [PATCH 0327/1976] Cache v0.3: improved corrupt image/label reporting (#3676) * Cache v0.3: improved corrupt image/label reporting Fix for https://github.com/ultralytics/yolov5/issues/3656#issuecomment-863660899 * cleanup --- utils/datasets.py | 24 +++++++++++++++--------- 1 file changed, 15 insertions(+), 9 deletions(-) diff --git a/utils/datasets.py b/utils/datasets.py index bcb8c36e0e64..f927abb20f5a 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -390,7 +390,7 @@ def __init__(self, path, img_size=640, batch_size=16, augment=False, hyp=None, r cache_path = (p if p.is_file() else Path(self.label_files[0]).parent).with_suffix('.cache') # cached labels if cache_path.is_file(): cache, exists = torch.load(cache_path), True # load - if cache['hash'] != get_hash(self.label_files + self.img_files): # changed + if cache['hash'] != get_hash(self.label_files + self.img_files) or cache['version'] != 0.3: cache, exists = self.cache_labels(cache_path, prefix), False # re-cache else: cache, exists = self.cache_labels(cache_path, prefix), False # cache @@ -400,11 +400,12 @@ def __init__(self, path, img_size=640, batch_size=16, augment=False, hyp=None, r if exists: d = f"Scanning '{cache_path}' images and labels... {nf} found, {nm} missing, {ne} empty, {nc} corrupted" tqdm(None, desc=prefix + d, total=n, initial=n) # display cache results + if cache['msgs']: + logging.info('\n'.join(cache['msgs'])) # display warnings assert nf > 0 or not augment, f'{prefix}No labels in {cache_path}. Can not train without labels. See {help_url}' # Read cache - cache.pop('hash') # remove hash - cache.pop('version') # remove version + [cache.pop(k) for k in ('hash', 'version', 'msgs')] # remove items labels, shapes, self.segments = zip(*cache.values()) self.labels = list(labels) self.shapes = np.array(shapes, dtype=np.float64) @@ -461,26 +462,31 @@ def __init__(self, path, img_size=640, batch_size=16, augment=False, hyp=None, r def cache_labels(self, path=Path('./labels.cache'), prefix=''): # Cache dataset labels, check images and read shapes x = {} # dict - nm, nf, ne, nc = 0, 0, 0, 0 # number missing, found, empty, corrupt + nm, nf, ne, nc, msgs = 0, 0, 0, 0, [] # number missing, found, empty, corrupt, messages desc = f"{prefix}Scanning '{path.parent / path.stem}' images and labels..." with Pool(num_threads) as pool: pbar = tqdm(pool.imap_unordered(verify_image_label, zip(self.img_files, self.label_files, repeat(prefix))), desc=desc, total=len(self.img_files)) - for im_file, l, shape, segments, nm_f, nf_f, ne_f, nc_f in pbar: + for im_file, l, shape, segments, nm_f, nf_f, ne_f, nc_f, msg in pbar: nm += nm_f nf += nf_f ne += ne_f nc += nc_f if im_file: x[im_file] = [l, shape, segments] + if msg: + msgs.append(msg) pbar.desc = f"{desc}{nf} found, {nm} missing, {ne} empty, {nc} corrupted" pbar.close() + if msgs: + logging.info('\n'.join(msgs)) if nf == 0: logging.info(f'{prefix}WARNING: No labels found in {path}. See {help_url}') x['hash'] = get_hash(self.label_files + self.img_files) x['results'] = nf, nm, ne, nc, len(self.img_files) - x['version'] = 0.2 # cache version + x['msgs'] = msgs # warnings + x['version'] = 0.3 # cache version try: torch.save(x, path) # save cache for next time logging.info(f'{prefix}New cache created: {path}') @@ -1084,11 +1090,11 @@ def verify_image_label(args): else: nm = 1 # label missing l = np.zeros((0, 5), dtype=np.float32) - return im_file, l, shape, segments, nm, nf, ne, nc + return im_file, l, shape, segments, nm, nf, ne, nc, '' except Exception as e: nc = 1 - logging.info(f'{prefix}WARNING: Ignoring corrupted image and/or label {im_file}: {e}') - return [None, None, None, None, nm, nf, ne, nc] + msg = f'{prefix}WARNING: Ignoring corrupted image and/or label {im_file}: {e}' + return [None, None, None, None, nm, nf, ne, nc, msg] def dataset_stats(path='coco128.yaml', autodownload=False, verbose=False): From 2729761458cdf868af3dad496a0dfcb2fd1d5aa4 Mon Sep 17 00:00:00 2001 From: ZouJiu1 <34758215+ZouJiu1@users.noreply.github.com> Date: Fri, 18 Jun 2021 22:26:52 +0800 Subject: [PATCH 0328/1976] EMA changes for pre-model's batch_size (#3681) * EMA changes for pre-model's batch_size * Update train.py * Update torch_utils.py Co-authored-by: Glenn Jocher From 463628a4d88d2375d7b4c4556f77a24d44332772 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 18 Jun 2021 17:12:42 +0200 Subject: [PATCH 0329/1976] Update README.md (#3684) --- README.md | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/README.md b/README.md index ab8f60169947..82d408ef5cde 100755 --- a/README.md +++ b/README.md @@ -62,9 +62,7 @@ See the [YOLOv5 Docs](https://docs.ultralytics.com) for full documentation on tr
- -Install - +Install Python >= 3.6.0 required with all [requirements.txt](https://github.com/ultralytics/yolov5/blob/master/requirements.txt) dependencies installed: From 814806c61de06525dc7346334a08a2024272799c Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 19 Jun 2021 11:22:09 +0200 Subject: [PATCH 0330/1976] Update cache check (#3691) Swapped order of operations for faster first per https://github.com/ultralytics/yolov5/commit/f527704cd32c42bc0bba9cce04601783b8563204#r52362419 --- utils/datasets.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/datasets.py b/utils/datasets.py index f927abb20f5a..8fce61bb08a2 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -390,7 +390,7 @@ def __init__(self, path, img_size=640, batch_size=16, augment=False, hyp=None, r cache_path = (p if p.is_file() else Path(self.label_files[0]).parent).with_suffix('.cache') # cached labels if cache_path.is_file(): cache, exists = torch.load(cache_path), True # load - if cache['hash'] != get_hash(self.label_files + self.img_files) or cache['version'] != 0.3: + if cache['version'] != 0.3 or cache['hash'] != get_hash(self.label_files + self.img_files): cache, exists = self.cache_labels(cache_path, prefix), False # re-cache else: cache, exists = self.cache_labels(cache_path, prefix), False # cache From bf209f6fe92abc4c0ea66a549f6527c7fe8ec20a Mon Sep 17 00:00:00 2001 From: Mai Thanh Minh Date: Sat, 19 Jun 2021 16:51:21 +0700 Subject: [PATCH 0331/1976] Skip HSV augmentation when hyperparameters are [0, 0, 0] (#3686) * Create shortcircuit in augment_hsv when hyperparameter are zero * implement faster opt-in Co-authored-by: Glenn Jocher --- utils/datasets.py | 23 ++++++++++++----------- 1 file changed, 12 insertions(+), 11 deletions(-) diff --git a/utils/datasets.py b/utils/datasets.py index 8fce61bb08a2..21388db7ff46 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -632,17 +632,18 @@ def load_image(self, index): def augment_hsv(img, hgain=0.5, sgain=0.5, vgain=0.5): - r = np.random.uniform(-1, 1, 3) * [hgain, sgain, vgain] + 1 # random gains - hue, sat, val = cv2.split(cv2.cvtColor(img, cv2.COLOR_BGR2HSV)) - dtype = img.dtype # uint8 - - x = np.arange(0, 256, dtype=r.dtype) - lut_hue = ((x * r[0]) % 180).astype(dtype) - lut_sat = np.clip(x * r[1], 0, 255).astype(dtype) - lut_val = np.clip(x * r[2], 0, 255).astype(dtype) - - img_hsv = cv2.merge((cv2.LUT(hue, lut_hue), cv2.LUT(sat, lut_sat), cv2.LUT(val, lut_val))) - cv2.cvtColor(img_hsv, cv2.COLOR_HSV2BGR, dst=img) # no return needed + if hgain or sgain or vgain: + r = np.random.uniform(-1, 1, 3) * [hgain, sgain, vgain] + 1 # random gains + hue, sat, val = cv2.split(cv2.cvtColor(img, cv2.COLOR_BGR2HSV)) + dtype = img.dtype # uint8 + + x = np.arange(0, 256, dtype=r.dtype) + lut_hue = ((x * r[0]) % 180).astype(dtype) + lut_sat = np.clip(x * r[1], 0, 255).astype(dtype) + lut_val = np.clip(x * r[2], 0, 255).astype(dtype) + + img_hsv = cv2.merge((cv2.LUT(hue, lut_hue), cv2.LUT(sat, lut_sat), cv2.LUT(val, lut_val))) + cv2.cvtColor(img_hsv, cv2.COLOR_HSV2BGR, dst=img) # no return needed def hist_equalize(img, clahe=True, bgr=False): From bfb2276b1d32b5360312802fd6065661f3ea6b00 Mon Sep 17 00:00:00 2001 From: lb-desupervised <86119248+lb-desupervised@users.noreply.github.com> Date: Sat, 19 Jun 2021 12:06:59 +0200 Subject: [PATCH 0332/1976] Slightly modify CLI execution (#3687) * Slightly modify CLI execution This simple change makes it easier to run the primary functions of this repo (train/detect/test) from within Python. An object which represents `opt` can be constructed and fed to the `main` function of each of these modules, rather than having to call the lower level functions directly, or run the module as a script. * Update export.py Add CLI parsing update for more convenient module usage within Python. Co-authored-by: Lewis Belcher --- detect.py | 12 ++++++++++-- models/export.py | 12 ++++++++++-- test.py | 11 ++++++++++- train.py | 12 +++++++++++- 4 files changed, 41 insertions(+), 6 deletions(-) diff --git a/detect.py b/detect.py index 7daa87436daa..c51c6fa4e0c1 100644 --- a/detect.py +++ b/detect.py @@ -172,7 +172,7 @@ def detect(weights='yolov5s.pt', # model.pt path(s) print(f'Done. ({time.time() - t0:.3f}s)') -if __name__ == '__main__': +def parse_opt(): parser = argparse.ArgumentParser() parser.add_argument('--weights', nargs='+', type=str, default='yolov5s.pt', help='model.pt path(s)') parser.add_argument('--source', type=str, default='data/images', help='file/dir/URL/glob, 0 for webcam') @@ -198,7 +198,15 @@ def detect(weights='yolov5s.pt', # model.pt path(s) parser.add_argument('--hide-conf', default=False, action='store_true', help='hide confidences') parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference') opt = parser.parse_args() + return opt + + +def main(opt): print(opt) check_requirements(exclude=('tensorboard', 'thop')) - detect(**vars(opt)) + + +if __name__ == "__main__": + opt = parse_opt() + main(opt) diff --git a/models/export.py b/models/export.py index 3c04b07fdc95..8c491dabddc0 100644 --- a/models/export.py +++ b/models/export.py @@ -144,7 +144,7 @@ def export(weights='./yolov5s.pt', # weights path print(f'\nExport complete ({time.time() - t:.2f}s). Visualize with https://github.com/lutzroeder/netron.') -if __name__ == '__main__': +def parse_opt(): parser = argparse.ArgumentParser() parser.add_argument('--weights', type=str, default='./yolov5s.pt', help='weights path') parser.add_argument('--img-size', nargs='+', type=int, default=[640, 640], help='image (height, width)') @@ -159,7 +159,15 @@ def export(weights='./yolov5s.pt', # weights path parser.add_argument('--simplify', action='store_true', help='ONNX: simplify model') parser.add_argument('--opset-version', type=int, default=12, help='ONNX: opset version') opt = parser.parse_args() + return opt + + +def main(opt): print(opt) set_logging() - export(**vars(opt)) + + +if __name__ == "__main__": + opt = parse_opt() + main(opt) diff --git a/test.py b/test.py index a38298da54da..5ebfb36509ea 100644 --- a/test.py +++ b/test.py @@ -294,7 +294,7 @@ def test(data, return (mp, mr, map50, map, *(loss.cpu() / len(dataloader)).tolist()), maps, t -if __name__ == '__main__': +def parse_opt(): parser = argparse.ArgumentParser(prog='test.py') parser.add_argument('--data', type=str, default='data/coco128.yaml', help='dataset.yaml path') parser.add_argument('--weights', nargs='+', type=str, default='yolov5s.pt', help='model.pt path(s)') @@ -319,6 +319,10 @@ def test(data, opt.save_json |= opt.data.endswith('coco.yaml') opt.save_txt |= opt.save_hybrid opt.data = check_file(opt.data) # check file + return opt + + +def main(opt): print(opt) check_requirements(exclude=('tensorboard', 'thop')) @@ -344,3 +348,8 @@ def test(data, np.savetxt(f, y, fmt='%10.4g') # save os.system('zip -r study.zip study_*.txt') plot_study_txt(x=x) # plot + + +if __name__ == "__main__": + opt = parse_opt() + main(opt) diff --git a/train.py b/train.py index 3eb866345d47..8056183242a6 100644 --- a/train.py +++ b/train.py @@ -463,7 +463,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary return results -if __name__ == '__main__': +def parse_opt(): parser = argparse.ArgumentParser() parser.add_argument('--weights', type=str, default='yolov5s.pt', help='initial weights path') parser.add_argument('--cfg', type=str, default='', help='model.yaml path') @@ -504,6 +504,11 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary # Set DDP variables opt.world_size = int(getattr(os.environ, 'WORLD_SIZE', 1)) opt.global_rank = int(getattr(os.environ, 'RANK', -1)) + return opt + + +def main(opt): + print(opt) set_logging(opt.global_rank) if opt.global_rank in [-1, 0]: check_git_status() @@ -628,3 +633,8 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary plot_evolution(yaml_file) print(f'Hyperparameter evolution complete. Best results saved as: {yaml_file}\n' f'Command to train a new model with these hyperparameters: $ python train.py --hyp {yaml_file}') + + +if __name__ == "__main__": + opt = parse_opt() + main(opt) From 5bab9a28e45f119839c14d91dc93bbdedadaf7de Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 19 Jun 2021 14:21:28 +0200 Subject: [PATCH 0333/1976] Reformat (#3694) --- .github/ISSUE_TEMPLATE/feature-request.md | 1 + .github/dependabot.yml | 20 ++++---- .github/workflows/ci-testing.yml | 8 +-- .github/workflows/codeql-analysis.yml | 62 +++++++++++------------ .github/workflows/greetings.yml | 14 ++--- .github/workflows/rebase.yml | 2 +- 6 files changed, 54 insertions(+), 53 deletions(-) diff --git a/.github/ISSUE_TEMPLATE/feature-request.md b/.github/ISSUE_TEMPLATE/feature-request.md index 87db3eacbf02..02320771b5f5 100644 --- a/.github/ISSUE_TEMPLATE/feature-request.md +++ b/.github/ISSUE_TEMPLATE/feature-request.md @@ -8,6 +8,7 @@ assignees: '' --- ## 🚀 Feature + ## Motivation diff --git a/.github/dependabot.yml b/.github/dependabot.yml index 9910689197f5..c489a753aa95 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -1,12 +1,12 @@ version: 2 updates: -- package-ecosystem: pip - directory: "/" - schedule: - interval: weekly - time: "04:00" - open-pull-requests-limit: 10 - reviewers: - - glenn-jocher - labels: - - dependencies + - package-ecosystem: pip + directory: "/" + schedule: + interval: weekly + time: "04:00" + open-pull-requests-limit: 10 + reviewers: + - glenn-jocher + labels: + - dependencies diff --git a/.github/workflows/ci-testing.yml b/.github/workflows/ci-testing.yml index 36318f6ae562..956199314726 100644 --- a/.github/workflows/ci-testing.yml +++ b/.github/workflows/ci-testing.yml @@ -1,6 +1,6 @@ name: CI CPU testing -on: # https://help.github.com/en/actions/reference/events-that-trigger-workflows +on: # https://help.github.com/en/actions/reference/events-that-trigger-workflows push: branches: [ master, develop ] pull_request: @@ -14,9 +14,9 @@ jobs: strategy: fail-fast: false matrix: - os: [ubuntu-latest, macos-latest, windows-latest] - python-version: [3.8] - model: ['yolov5s'] # models to test + os: [ ubuntu-latest, macos-latest, windows-latest ] + python-version: [ 3.8 ] + model: [ 'yolov5s' ] # models to test # Timeout: https://stackoverflow.com/a/59076067/4521646 timeout-minutes: 50 diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index 1f07888509f8..458465d90eef 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -21,34 +21,34 @@ jobs: # https://docs.github.com/en/free-pro-team@latest/github/finding-security-vulnerabilities-and-errors-in-your-code/configuring-code-scanning#changing-the-languages-that-are-analyzed steps: - - name: Checkout repository - uses: actions/checkout@v2 - - # Initializes the CodeQL tools for scanning. - - name: Initialize CodeQL - uses: github/codeql-action/init@v1 - with: - languages: ${{ matrix.language }} - # If you wish to specify custom queries, you can do so here or in a config file. - # By default, queries listed here will override any specified in a config file. - # Prefix the list here with "+" to use these queries and those in the config file. - # queries: ./path/to/local/query, your-org/your-repo/queries@main - - # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). - # If this step fails, then you should remove it and run the build manually (see below) - - name: Autobuild - uses: github/codeql-action/autobuild@v1 - - # ℹ️ Command-line programs to run using the OS shell. - # 📚 https://git.io/JvXDl - - # ✏️ If the Autobuild fails above, remove it and uncomment the following three lines - # and modify them (or add more) to build your code if your project - # uses a compiled language - - #- run: | - # make bootstrap - # make release - - - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@v1 + - name: Checkout repository + uses: actions/checkout@v2 + + # Initializes the CodeQL tools for scanning. + - name: Initialize CodeQL + uses: github/codeql-action/init@v1 + with: + languages: ${{ matrix.language }} + # If you wish to specify custom queries, you can do so here or in a config file. + # By default, queries listed here will override any specified in a config file. + # Prefix the list here with "+" to use these queries and those in the config file. + # queries: ./path/to/local/query, your-org/your-repo/queries@main + + # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). + # If this step fails, then you should remove it and run the build manually (see below) + - name: Autobuild + uses: github/codeql-action/autobuild@v1 + + # ℹ️ Command-line programs to run using the OS shell. + # 📚 https://git.io/JvXDl + + # ✏️ If the Autobuild fails above, remove it and uncomment the following three lines + # and modify them (or add more) to build your code if your project + # uses a compiled language + + #- run: | + # make bootstrap + # make release + + - name: Perform CodeQL Analysis + uses: github/codeql-action/analyze@v1 diff --git a/.github/workflows/greetings.yml b/.github/workflows/greetings.yml index ee472297107e..bbbe8e676f82 100644 --- a/.github/workflows/greetings.yml +++ b/.github/workflows/greetings.yml @@ -1,6 +1,6 @@ name: Greetings -on: [pull_request_target, issues] +on: [ pull_request_target, issues ] jobs: greeting: @@ -39,18 +39,18 @@ jobs: ``` ## Environments - + YOLOv5 may be run in any of the following up-to-date verified environments (with all dependencies including [CUDA](https://developer.nvidia.com/cuda)/[CUDNN](https://developer.nvidia.com/cudnn), [Python](https://www.python.org/) and [PyTorch](https://pytorch.org/) preinstalled): - + - **Google Colab and Kaggle** notebooks with free GPU: Open In Colab Open In Kaggle - **Google Cloud** Deep Learning VM. See [GCP Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/GCP-Quickstart) - **Amazon** Deep Learning AMI. See [AWS Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/AWS-Quickstart) - **Docker Image**. See [Docker Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/Docker-Quickstart) Docker Pulls - - + + ## Status - + ![CI CPU testing](https://github.com/ultralytics/yolov5/workflows/CI%20CPU%20testing/badge.svg) - + If this badge is green, all [YOLOv5 GitHub Actions](https://github.com/ultralytics/yolov5/actions) Continuous Integration (CI) tests are currently passing. CI tests verify correct operation of YOLOv5 training ([train.py](https://github.com/ultralytics/yolov5/blob/master/train.py)), testing ([test.py](https://github.com/ultralytics/yolov5/blob/master/test.py)), inference ([detect.py](https://github.com/ultralytics/yolov5/blob/master/detect.py)) and export ([export.py](https://github.com/ultralytics/yolov5/blob/master/models/export.py)) on MacOS, Windows, and Ubuntu every 24 hours and on every commit. diff --git a/.github/workflows/rebase.yml b/.github/workflows/rebase.yml index e86c57744b84..38e14578216c 100644 --- a/.github/workflows/rebase.yml +++ b/.github/workflows/rebase.yml @@ -3,7 +3,7 @@ name: Automatic Rebase on: issue_comment: - types: [created] + types: [ created ] jobs: rebase: From fad27c004661692d715b31e8830122f93a09347f Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 19 Jun 2021 16:30:25 +0200 Subject: [PATCH 0334/1976] Update DDP for `torch.distributed.run` with `gloo` backend (#3680) * Update DDP for `torch.distributed.run` * Add LOCAL_RANK * remove opt.local_rank * backend="gloo|nccl" * print * print * debug * debug * os.getenv * gloo * gloo * gloo * cleanup * fix getenv * cleanup * cleanup destroy * try nccl * return opt * add --local_rank * add timeout * add init_method * gloo * move destroy * move destroy * move print(opt) under if RANK * destroy only RANK 0 * move destroy inside train() * restore destroy outside train() * update print(opt) * cleanup * nccl * gloo with 60 second timeout * update namespace printing --- detect.py | 6 +- models/export.py | 2 +- test.py | 4 +- train.py | 95 +++++++++++++++--------------- utils/datasets.py | 4 +- utils/torch_utils.py | 5 +- utils/wandb_logging/wandb_utils.py | 6 +- 7 files changed, 61 insertions(+), 61 deletions(-) diff --git a/detect.py b/detect.py index c51c6fa4e0c1..fb2d2702234d 100644 --- a/detect.py +++ b/detect.py @@ -8,8 +8,8 @@ from models.experimental import attempt_load from utils.datasets import LoadStreams, LoadImages -from utils.general import check_img_size, check_requirements, check_imshow, non_max_suppression, apply_classifier, \ - scale_coords, xyxy2xywh, strip_optimizer, set_logging, increment_path, save_one_box +from utils.general import check_img_size, check_requirements, check_imshow, colorstr, non_max_suppression, \ + apply_classifier, scale_coords, xyxy2xywh, strip_optimizer, set_logging, increment_path, save_one_box from utils.plots import colors, plot_one_box from utils.torch_utils import select_device, load_classifier, time_synchronized @@ -202,7 +202,7 @@ def parse_opt(): def main(opt): - print(opt) + print(colorstr('detect: ') + ', '.join(f'{k}={v}' for k, v in vars(opt).items())) check_requirements(exclude=('tensorboard', 'thop')) detect(**vars(opt)) diff --git a/models/export.py b/models/export.py index 8c491dabddc0..15d6a87ecea6 100644 --- a/models/export.py +++ b/models/export.py @@ -163,8 +163,8 @@ def parse_opt(): def main(opt): - print(opt) set_logging() + print(colorstr('export: ') + ', '.join(f'{k}={v}' for k, v in vars(opt).items())) export(**vars(opt)) diff --git a/test.py b/test.py index 5ebfb36509ea..1e82fd2d1611 100644 --- a/test.py +++ b/test.py @@ -51,7 +51,6 @@ def test(data, device = next(model.parameters()).device # get model device else: # called directly - set_logging() device = select_device(device, batch_size=batch_size) # Directories @@ -323,7 +322,8 @@ def parse_opt(): def main(opt): - print(opt) + set_logging() + print(colorstr('test: ') + ', '.join(f'{k}={v}' for k, v in vars(opt).items())) check_requirements(exclude=('tensorboard', 'thop')) if opt.task in ('train', 'val', 'test'): # run normally diff --git a/train.py b/train.py index 8056183242a6..8f206a9401c5 100644 --- a/train.py +++ b/train.py @@ -37,15 +37,17 @@ from utils.wandb_logging.wandb_utils import WandbLogger, check_wandb_resume logger = logging.getLogger(__name__) +LOCAL_RANK = int(os.getenv('LOCAL_RANK', -1)) # https://pytorch.org/docs/stable/elastic/run.html +RANK = int(os.getenv('RANK', -1)) +WORLD_SIZE = int(os.getenv('WORLD_SIZE', 1)) def train(hyp, # path/to/hyp.yaml or hyp dictionary opt, device, ): - save_dir, epochs, batch_size, total_batch_size, weights, rank, single_cls = \ - Path(opt.save_dir), opt.epochs, opt.batch_size, opt.total_batch_size, opt.weights, opt.global_rank, \ - opt.single_cls + save_dir, epochs, batch_size, total_batch_size, weights, single_cls = \ + Path(opt.save_dir), opt.epochs, opt.batch_size, opt.total_batch_size, opt.weights, opt.single_cls # Directories wdir = save_dir / 'weights' @@ -69,13 +71,13 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary # Configure plots = not opt.evolve # create plots cuda = device.type != 'cpu' - init_seeds(2 + rank) + init_seeds(2 + RANK) with open(opt.data) as f: data_dict = yaml.safe_load(f) # data dict # Loggers loggers = {'wandb': None, 'tb': None} # loggers dict - if rank in [-1, 0]: + if RANK in [-1, 0]: # TensorBoard if not opt.evolve: prefix = colorstr('tensorboard: ') @@ -99,7 +101,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary # Model pretrained = weights.endswith('.pt') if pretrained: - with torch_distributed_zero_first(rank): + with torch_distributed_zero_first(RANK): weights = attempt_download(weights) # download if not found locally ckpt = torch.load(weights, map_location=device) # load checkpoint model = Model(opt.cfg or ckpt['model'].yaml, ch=3, nc=nc, anchors=hyp.get('anchors')).to(device) # create @@ -110,7 +112,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary logger.info('Transferred %g/%g items from %s' % (len(state_dict), len(model.state_dict()), weights)) # report else: model = Model(opt.cfg, ch=3, nc=nc, anchors=hyp.get('anchors')).to(device) # create - with torch_distributed_zero_first(rank): + with torch_distributed_zero_first(RANK): check_dataset(data_dict) # check train_path = data_dict['train'] test_path = data_dict['val'] @@ -158,7 +160,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary # plot_lr_scheduler(optimizer, scheduler, epochs) # EMA - ema = ModelEMA(model) if rank in [-1, 0] else None + ema = ModelEMA(model) if RANK in [-1, 0] else None # Resume start_epoch, best_fitness = 0, 0.0 @@ -194,28 +196,28 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary imgsz, imgsz_test = [check_img_size(x, gs) for x in opt.img_size] # verify imgsz are gs-multiples # DP mode - if cuda and rank == -1 and torch.cuda.device_count() > 1: + if cuda and RANK == -1 and torch.cuda.device_count() > 1: model = torch.nn.DataParallel(model) # SyncBatchNorm - if opt.sync_bn and cuda and rank != -1: + if opt.sync_bn and cuda and RANK != -1: model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model).to(device) logger.info('Using SyncBatchNorm()') # Trainloader dataloader, dataset = create_dataloader(train_path, imgsz, batch_size, gs, single_cls, - hyp=hyp, augment=True, cache=opt.cache_images, rect=opt.rect, rank=rank, - world_size=opt.world_size, workers=opt.workers, + hyp=hyp, augment=True, cache=opt.cache_images, rect=opt.rect, rank=RANK, + workers=opt.workers, image_weights=opt.image_weights, quad=opt.quad, prefix=colorstr('train: ')) mlc = np.concatenate(dataset.labels, 0)[:, 0].max() # max label class nb = len(dataloader) # number of batches assert mlc < nc, 'Label class %g exceeds nc=%g in %s. Possible class labels are 0-%g' % (mlc, nc, opt.data, nc - 1) # Process 0 - if rank in [-1, 0]: + if RANK in [-1, 0]: testloader = create_dataloader(test_path, imgsz_test, batch_size * 2, gs, single_cls, hyp=hyp, cache=opt.cache_images and not opt.notest, rect=True, rank=-1, - world_size=opt.world_size, workers=opt.workers, + workers=opt.workers, pad=0.5, prefix=colorstr('val: '))[0] if not opt.resume: @@ -234,8 +236,8 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary model.half().float() # pre-reduce anchor precision # DDP mode - if cuda and rank != -1: - model = DDP(model, device_ids=[opt.local_rank], output_device=opt.local_rank, + if cuda and RANK != -1: + model = DDP(model, device_ids=[LOCAL_RANK], output_device=LOCAL_RANK, # nn.MultiheadAttention incompatibility with DDP https://github.com/pytorch/pytorch/issues/26698 find_unused_parameters=any(isinstance(layer, nn.MultiheadAttention) for layer in model.modules())) @@ -269,15 +271,15 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary # Update image weights (optional) if opt.image_weights: # Generate indices - if rank in [-1, 0]: + if RANK in [-1, 0]: cw = model.class_weights.cpu().numpy() * (1 - maps) ** 2 / nc # class weights iw = labels_to_image_weights(dataset.labels, nc=nc, class_weights=cw) # image weights dataset.indices = random.choices(range(dataset.n), weights=iw, k=dataset.n) # rand weighted idx # Broadcast if DDP - if rank != -1: - indices = (torch.tensor(dataset.indices) if rank == 0 else torch.zeros(dataset.n)).int() + if RANK != -1: + indices = (torch.tensor(dataset.indices) if RANK == 0 else torch.zeros(dataset.n)).int() dist.broadcast(indices, 0) - if rank != 0: + if RANK != 0: dataset.indices = indices.cpu().numpy() # Update mosaic border @@ -285,11 +287,11 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary # dataset.mosaic_border = [b - imgsz, -b] # height, width borders mloss = torch.zeros(4, device=device) # mean losses - if rank != -1: + if RANK != -1: dataloader.sampler.set_epoch(epoch) pbar = enumerate(dataloader) logger.info(('\n' + '%10s' * 8) % ('Epoch', 'gpu_mem', 'box', 'obj', 'cls', 'total', 'labels', 'img_size')) - if rank in [-1, 0]: + if RANK in [-1, 0]: pbar = tqdm(pbar, total=nb) # progress bar optimizer.zero_grad() for i, (imgs, targets, paths, _) in pbar: # batch ------------------------------------------------------------- @@ -319,8 +321,8 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary with amp.autocast(enabled=cuda): pred = model(imgs) # forward loss, loss_items = compute_loss(pred, targets.to(device)) # loss scaled by batch_size - if rank != -1: - loss *= opt.world_size # gradient averaged between devices in DDP mode + if RANK != -1: + loss *= WORLD_SIZE # gradient averaged between devices in DDP mode if opt.quad: loss *= 4. @@ -336,7 +338,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary ema.update(model) # Print - if rank in [-1, 0]: + if RANK in [-1, 0]: mloss = (mloss * i + loss_items) / (i + 1) # update mean losses mem = '%.3gG' % (torch.cuda.memory_reserved() / 1E9 if torch.cuda.is_available() else 0) # (GB) s = ('%10s' * 2 + '%10.4g' * 6) % ( @@ -362,7 +364,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary scheduler.step() # DDP process 0 or single-GPU - if rank in [-1, 0]: + if RANK in [-1, 0]: # mAP ema.update_attr(model, include=['yaml', 'nc', 'hyp', 'gr', 'names', 'stride', 'class_weights']) final_epoch = epoch + 1 == epochs @@ -424,7 +426,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary # end epoch ---------------------------------------------------------------------------------------------------- # end training ----------------------------------------------------------------------------------------------------- - if rank in [-1, 0]: + if RANK in [-1, 0]: logger.info(f'{epoch - start_epoch + 1} epochs completed in {(time.time() - t0) / 3600:.3f} hours.\n') if plots: plot_results(save_dir=save_dir) # save as results.png @@ -457,8 +459,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary name='run_' + wandb_logger.wandb_run.id + '_model', aliases=['latest', 'best', 'stripped']) wandb_logger.finish_run() - else: - dist.destroy_process_group() + torch.cuda.empty_cache() return results @@ -486,7 +487,6 @@ def parse_opt(): parser.add_argument('--single-cls', action='store_true', help='train multi-class data as single-class') parser.add_argument('--adam', action='store_true', help='use torch.optim.Adam() optimizer') parser.add_argument('--sync-bn', action='store_true', help='use SyncBatchNorm, only available in DDP mode') - parser.add_argument('--local_rank', type=int, default=-1, help='DDP parameter, do not modify') parser.add_argument('--workers', type=int, default=8, help='maximum number of dataloader workers') parser.add_argument('--project', default='runs/train', help='save to project/name') parser.add_argument('--entity', default=None, help='W&B entity') @@ -499,18 +499,15 @@ def parse_opt(): parser.add_argument('--bbox_interval', type=int, default=-1, help='Set bounding-box image logging interval for W&B') parser.add_argument('--save_period', type=int, default=-1, help='Log model after every "save_period" epoch') parser.add_argument('--artifact_alias', type=str, default="latest", help='version of dataset artifact to be used') + parser.add_argument('--local_rank', type=int, default=-1, help='DDP parameter, do not modify') opt = parser.parse_args() - - # Set DDP variables - opt.world_size = int(getattr(os.environ, 'WORLD_SIZE', 1)) - opt.global_rank = int(getattr(os.environ, 'RANK', -1)) return opt def main(opt): - print(opt) - set_logging(opt.global_rank) - if opt.global_rank in [-1, 0]: + set_logging(RANK) + if RANK in [-1, 0]: + print(colorstr('train: ') + ', '.join(f'{k}={v}' for k, v in vars(opt).items())) check_git_status() check_requirements(exclude=['thop']) @@ -519,11 +516,9 @@ def main(opt): if opt.resume and not wandb_run: # resume an interrupted run ckpt = opt.resume if isinstance(opt.resume, str) else get_latest_run() # specified or most recent path assert os.path.isfile(ckpt), 'ERROR: --resume checkpoint does not exist' - apriori = opt.global_rank, opt.local_rank with open(Path(ckpt).parent.parent / 'opt.yaml') as f: opt = argparse.Namespace(**yaml.safe_load(f)) # replace - opt.cfg, opt.weights, opt.resume, opt.batch_size, opt.global_rank, opt.local_rank = \ - '', ckpt, True, opt.total_batch_size, *apriori # reinstate + opt.cfg, opt.weights, opt.resume, opt.batch_size = '', ckpt, True, opt.total_batch_size # reinstate logger.info('Resuming training from %s' % ckpt) else: # opt.hyp = opt.hyp or ('hyp.finetune.yaml' if opt.weights else 'hyp.scratch.yaml') @@ -536,19 +531,21 @@ def main(opt): # DDP mode opt.total_batch_size = opt.batch_size device = select_device(opt.device, batch_size=opt.batch_size) - if opt.local_rank != -1: - assert torch.cuda.device_count() > opt.local_rank - torch.cuda.set_device(opt.local_rank) - device = torch.device('cuda', opt.local_rank) - dist.init_process_group(backend='nccl', init_method='env://') # distributed backend - assert opt.batch_size % opt.world_size == 0, '--batch-size must be multiple of CUDA device count' + if LOCAL_RANK != -1: + from datetime import timedelta + assert torch.cuda.device_count() > LOCAL_RANK, 'too few GPUS for DDP command' + torch.cuda.set_device(LOCAL_RANK) + device = torch.device('cuda', LOCAL_RANK) + dist.init_process_group(backend="gloo", timeout=timedelta(seconds=60)) + assert opt.batch_size % WORLD_SIZE == 0, '--batch-size must be multiple of CUDA device count' assert not opt.image_weights, '--image-weights argument is not compatible with DDP training' - opt.batch_size = opt.total_batch_size // opt.world_size + opt.batch_size = opt.total_batch_size // WORLD_SIZE # Train - logger.info(opt) if not opt.evolve: train(opt.hyp, opt, device) + if WORLD_SIZE > 1 and RANK == 0: + _ = [print('Destroying process group... ', end=''), dist.destroy_process_group(), print('Done.')] # Evolve hyperparameters (optional) else: @@ -584,7 +581,7 @@ def main(opt): with open(opt.hyp) as f: hyp = yaml.safe_load(f) # load hyps dict - assert opt.local_rank == -1, 'DDP mode not implemented for --evolve' + assert LOCAL_RANK == -1, 'DDP mode not implemented for --evolve' opt.notest, opt.nosave = True, True # only test/save final epoch # ei = [isinstance(x, (int, float)) for x in hyp.values()] # evolvable indices yaml_file = Path(opt.save_dir) / 'hyp_evolved.yaml' # save best result here diff --git a/utils/datasets.py b/utils/datasets.py index 21388db7ff46..93d6511ac658 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -64,7 +64,7 @@ def exif_size(img): def create_dataloader(path, imgsz, batch_size, stride, single_cls=False, hyp=None, augment=False, cache=False, pad=0.0, - rect=False, rank=-1, world_size=1, workers=8, image_weights=False, quad=False, prefix=''): + rect=False, rank=-1, workers=8, image_weights=False, quad=False, prefix=''): # Make sure only the first process in DDP process the dataset first, and the following others can use the cache with torch_distributed_zero_first(rank): dataset = LoadImagesAndLabels(path, imgsz, batch_size, @@ -79,7 +79,7 @@ def create_dataloader(path, imgsz, batch_size, stride, single_cls=False, hyp=Non prefix=prefix) batch_size = min(batch_size, len(dataset)) - nw = min([os.cpu_count() // world_size, batch_size if batch_size > 1 else 0, workers]) # number of workers + nw = min([os.cpu_count(), batch_size if batch_size > 1 else 0, workers]) # number of workers sampler = torch.utils.data.distributed.DistributedSampler(dataset) if rank != -1 else None loader = torch.utils.data.DataLoader if image_weights else InfiniteDataLoader # Use torch.utils.data.DataLoader() if dataset.properties will update during training else InfiniteDataLoader() diff --git a/utils/torch_utils.py b/utils/torch_utils.py index b690dbe96700..2d5382471e3c 100644 --- a/utils/torch_utils.py +++ b/utils/torch_utils.py @@ -13,6 +13,7 @@ import torch import torch.backends.cudnn as cudnn +import torch.distributed as dist import torch.nn as nn import torch.nn.functional as F import torchvision @@ -30,10 +31,10 @@ def torch_distributed_zero_first(local_rank: int): Decorator to make all processes in distributed training wait for each local_master to do something. """ if local_rank not in [-1, 0]: - torch.distributed.barrier() + dist.barrier() yield if local_rank == 0: - torch.distributed.barrier() + dist.barrier() def init_torch_seeds(seed=0): diff --git a/utils/wandb_logging/wandb_utils.py b/utils/wandb_logging/wandb_utils.py index 7652f964f2c0..43b4c3d04e8e 100644 --- a/utils/wandb_logging/wandb_utils.py +++ b/utils/wandb_logging/wandb_utils.py @@ -1,5 +1,6 @@ """Utilities and tools for tracking runs with Weights & Biases.""" import logging +import os import sys from contextlib import contextmanager from pathlib import Path @@ -18,6 +19,7 @@ except ImportError: wandb = None +RANK = int(os.getenv('RANK', -1)) WANDB_ARTIFACT_PREFIX = 'wandb-artifact://' @@ -42,10 +44,10 @@ def get_run_info(run_path): def check_wandb_resume(opt): - process_wandb_config_ddp_mode(opt) if opt.global_rank not in [-1, 0] else None + process_wandb_config_ddp_mode(opt) if RANK not in [-1, 0] else None if isinstance(opt.resume, str): if opt.resume.startswith(WANDB_ARTIFACT_PREFIX): - if opt.global_rank not in [-1, 0]: # For resuming DDP runs + if RANK not in [-1, 0]: # For resuming DDP runs entity, project, run_id, model_artifact_name = get_run_info(opt.resume) api = wandb.Api() artifact = api.artifact(entity + '/' + project + '/' + model_artifact_name + ':latest') From b3e2f4e08d6a16bf153c9b56bbc0001a52dd24e3 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 19 Jun 2021 19:14:59 +0200 Subject: [PATCH 0335/1976] Eliminate `total_batch_size` variable (#3697) * Eliminate `total_batch_size` variable * cleanup * Update train.py --- train.py | 25 ++++++++++++------------- 1 file changed, 12 insertions(+), 13 deletions(-) diff --git a/train.py b/train.py index 8f206a9401c5..5ad47fe9ea6a 100644 --- a/train.py +++ b/train.py @@ -46,10 +46,11 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary opt, device, ): - save_dir, epochs, batch_size, total_batch_size, weights, single_cls = \ - Path(opt.save_dir), opt.epochs, opt.batch_size, opt.total_batch_size, opt.weights, opt.single_cls + save_dir, epochs, batch_size, weights, single_cls = \ + opt.save_dir, opt.epochs, opt.batch_size, opt.weights, opt.single_cls # Directories + save_dir = Path(save_dir) wdir = save_dir / 'weights' wdir.mkdir(parents=True, exist_ok=True) # make dir last = wdir / 'last.pt' @@ -127,8 +128,8 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary # Optimizer nbs = 64 # nominal batch size - accumulate = max(round(nbs / total_batch_size), 1) # accumulate loss before optimizing - hyp['weight_decay'] *= total_batch_size * accumulate / nbs # scale weight_decay + accumulate = max(round(nbs / batch_size), 1) # accumulate loss before optimizing + hyp['weight_decay'] *= batch_size * accumulate / nbs # scale weight_decay logger.info(f"Scaled weight_decay = {hyp['weight_decay']}") pg0, pg1, pg2 = [], [], [] # optimizer parameter groups @@ -205,7 +206,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary logger.info('Using SyncBatchNorm()') # Trainloader - dataloader, dataset = create_dataloader(train_path, imgsz, batch_size, gs, single_cls, + dataloader, dataset = create_dataloader(train_path, imgsz, batch_size // WORLD_SIZE, gs, single_cls, hyp=hyp, augment=True, cache=opt.cache_images, rect=opt.rect, rank=RANK, workers=opt.workers, image_weights=opt.image_weights, quad=opt.quad, prefix=colorstr('train: ')) @@ -215,7 +216,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary # Process 0 if RANK in [-1, 0]: - testloader = create_dataloader(test_path, imgsz_test, batch_size * 2, gs, single_cls, + testloader = create_dataloader(test_path, imgsz_test, batch_size // WORLD_SIZE * 2, gs, single_cls, hyp=hyp, cache=opt.cache_images and not opt.notest, rect=True, rank=-1, workers=opt.workers, pad=0.5, prefix=colorstr('val: '))[0] @@ -302,7 +303,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary if ni <= nw: xi = [0, nw] # x interp # model.gr = np.interp(ni, xi, [0.0, 1.0]) # iou loss ratio (obj_loss = 1.0 or iou) - accumulate = max(1, np.interp(ni, xi, [1, nbs / total_batch_size]).round()) + accumulate = max(1, np.interp(ni, xi, [1, nbs / batch_size]).round()) for j, x in enumerate(optimizer.param_groups): # bias lr falls from 0.1 to lr0, all other lrs rise from 0.0 to lr0 x['lr'] = np.interp(ni, xi, [hyp['warmup_bias_lr'] if j == 2 else 0.0, x['initial_lr'] * lf(epoch)]) @@ -371,7 +372,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary if not opt.notest or final_epoch: # Calculate mAP wandb_logger.current_epoch = epoch + 1 results, maps, _ = test.test(data_dict, - batch_size=batch_size * 2, + batch_size=batch_size // WORLD_SIZE * 2, imgsz=imgsz_test, model=ema.ema, single_cls=single_cls, @@ -439,7 +440,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary if is_coco: # COCO dataset for m in [last, best] if best.exists() else [last]: # speed, mAP tests results, _, _ = test.test(opt.data, - batch_size=batch_size * 2, + batch_size=batch_size // WORLD_SIZE * 2, imgsz=imgsz_test, conf_thres=0.001, iou_thres=0.7, @@ -518,7 +519,7 @@ def main(opt): assert os.path.isfile(ckpt), 'ERROR: --resume checkpoint does not exist' with open(Path(ckpt).parent.parent / 'opt.yaml') as f: opt = argparse.Namespace(**yaml.safe_load(f)) # replace - opt.cfg, opt.weights, opt.resume, opt.batch_size = '', ckpt, True, opt.total_batch_size # reinstate + opt.cfg, opt.weights, opt.resume = '', ckpt, True # reinstate logger.info('Resuming training from %s' % ckpt) else: # opt.hyp = opt.hyp or ('hyp.finetune.yaml' if opt.weights else 'hyp.scratch.yaml') @@ -529,17 +530,15 @@ def main(opt): opt.save_dir = str(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok | opt.evolve)) # DDP mode - opt.total_batch_size = opt.batch_size device = select_device(opt.device, batch_size=opt.batch_size) if LOCAL_RANK != -1: from datetime import timedelta - assert torch.cuda.device_count() > LOCAL_RANK, 'too few GPUS for DDP command' + assert torch.cuda.device_count() > LOCAL_RANK, 'insufficient CUDA devices for DDP command' torch.cuda.set_device(LOCAL_RANK) device = torch.device('cuda', LOCAL_RANK) dist.init_process_group(backend="gloo", timeout=timedelta(seconds=60)) assert opt.batch_size % WORLD_SIZE == 0, '--batch-size must be multiple of CUDA device count' assert not opt.image_weights, '--image-weights argument is not compatible with DDP training' - opt.batch_size = opt.total_batch_size // WORLD_SIZE # Train if not opt.evolve: From c1af67dcd4372ac230e9dafe6d4c4023b59a3ceb Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 19 Jun 2021 19:50:46 +0200 Subject: [PATCH 0336/1976] Add torch DP warning (#3698) --- train.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/train.py b/train.py index 5ad47fe9ea6a..68cd7fab574c 100644 --- a/train.py +++ b/train.py @@ -198,6 +198,8 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary # DP mode if cuda and RANK == -1 and torch.cuda.device_count() > 1: + logging.warning('DP not recommended, instead use torch.distributed.run for best DDP Multi-GPU results.\n' + 'See Multi-GPU Tutorial at https://github.com/ultralytics/yolov5/issues/475 to get started.') model = torch.nn.DataParallel(model) # SyncBatchNorm From fbf41e09134b113f8e79ae01b4eee40d00797b2d Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 20 Jun 2021 15:06:58 +0200 Subject: [PATCH 0337/1976] Add `train.run()` method (#3700) * Update train.py explicit arguments * Update train.py * Add run method --- train.py | 81 +++++++++++++++++++++++++++++++------------------------- 1 file changed, 45 insertions(+), 36 deletions(-) diff --git a/train.py b/train.py index 68cd7fab574c..fbda7320839a 100644 --- a/train.py +++ b/train.py @@ -46,8 +46,9 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary opt, device, ): - save_dir, epochs, batch_size, weights, single_cls = \ - opt.save_dir, opt.epochs, opt.batch_size, opt.weights, opt.single_cls + save_dir, epochs, batch_size, weights, single_cls, evolve, data, cfg, resume, notest, nosave, workers, = \ + opt.save_dir, opt.epochs, opt.batch_size, opt.weights, opt.single_cls, opt.evolve, opt.data, opt.cfg, \ + opt.resume, opt.notest, opt.nosave, opt.workers # Directories save_dir = Path(save_dir) @@ -70,34 +71,34 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary yaml.safe_dump(vars(opt), f, sort_keys=False) # Configure - plots = not opt.evolve # create plots + plots = not evolve # create plots cuda = device.type != 'cpu' init_seeds(2 + RANK) - with open(opt.data) as f: + with open(data) as f: data_dict = yaml.safe_load(f) # data dict # Loggers loggers = {'wandb': None, 'tb': None} # loggers dict if RANK in [-1, 0]: # TensorBoard - if not opt.evolve: + if not evolve: prefix = colorstr('tensorboard: ') logger.info(f"{prefix}Start with 'tensorboard --logdir {opt.project}', view at http://localhost:6006/") - loggers['tb'] = SummaryWriter(opt.save_dir) + loggers['tb'] = SummaryWriter(str(save_dir)) # W&B opt.hyp = hyp # add hyperparameters run_id = torch.load(weights).get('wandb_id') if weights.endswith('.pt') and os.path.isfile(weights) else None wandb_logger = WandbLogger(opt, save_dir.stem, run_id, data_dict) loggers['wandb'] = wandb_logger.wandb - data_dict = wandb_logger.data_dict - if wandb_logger.wandb: + if loggers['wandb']: + data_dict = wandb_logger.data_dict weights, epochs, hyp = opt.weights, opt.epochs, opt.hyp # may update weights, epochs if resuming nc = 1 if single_cls else int(data_dict['nc']) # number of classes names = ['item'] if single_cls and len(data_dict['names']) != 1 else data_dict['names'] # class names - assert len(names) == nc, '%g names found for nc=%g dataset in %s' % (len(names), nc, opt.data) # check - is_coco = opt.data.endswith('coco.yaml') and nc == 80 # COCO dataset + assert len(names) == nc, '%g names found for nc=%g dataset in %s' % (len(names), nc, data) # check + is_coco = data.endswith('coco.yaml') and nc == 80 # COCO dataset # Model pretrained = weights.endswith('.pt') @@ -105,14 +106,14 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary with torch_distributed_zero_first(RANK): weights = attempt_download(weights) # download if not found locally ckpt = torch.load(weights, map_location=device) # load checkpoint - model = Model(opt.cfg or ckpt['model'].yaml, ch=3, nc=nc, anchors=hyp.get('anchors')).to(device) # create - exclude = ['anchor'] if (opt.cfg or hyp.get('anchors')) and not opt.resume else [] # exclude keys + model = Model(cfg or ckpt['model'].yaml, ch=3, nc=nc, anchors=hyp.get('anchors')).to(device) # create + exclude = ['anchor'] if (cfg or hyp.get('anchors')) and not resume else [] # exclude keys state_dict = ckpt['model'].float().state_dict() # to FP32 state_dict = intersect_dicts(state_dict, model.state_dict(), exclude=exclude) # intersect model.load_state_dict(state_dict, strict=False) # load logger.info('Transferred %g/%g items from %s' % (len(state_dict), len(model.state_dict()), weights)) # report else: - model = Model(opt.cfg, ch=3, nc=nc, anchors=hyp.get('anchors')).to(device) # create + model = Model(cfg, ch=3, nc=nc, anchors=hyp.get('anchors')).to(device) # create with torch_distributed_zero_first(RANK): check_dataset(data_dict) # check train_path = data_dict['train'] @@ -182,7 +183,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary # Epochs start_epoch = ckpt['epoch'] + 1 - if opt.resume: + if resume: assert start_epoch > 0, '%s training to %g epochs is finished, nothing to resume.' % (weights, epochs) if epochs < start_epoch: logger.info('%s has been trained for %g epochs. Fine-tuning for %g additional epochs.' % @@ -210,20 +211,20 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary # Trainloader dataloader, dataset = create_dataloader(train_path, imgsz, batch_size // WORLD_SIZE, gs, single_cls, hyp=hyp, augment=True, cache=opt.cache_images, rect=opt.rect, rank=RANK, - workers=opt.workers, + workers=workers, image_weights=opt.image_weights, quad=opt.quad, prefix=colorstr('train: ')) mlc = np.concatenate(dataset.labels, 0)[:, 0].max() # max label class nb = len(dataloader) # number of batches - assert mlc < nc, 'Label class %g exceeds nc=%g in %s. Possible class labels are 0-%g' % (mlc, nc, opt.data, nc - 1) + assert mlc < nc, 'Label class %g exceeds nc=%g in %s. Possible class labels are 0-%g' % (mlc, nc, data, nc - 1) # Process 0 if RANK in [-1, 0]: testloader = create_dataloader(test_path, imgsz_test, batch_size // WORLD_SIZE * 2, gs, single_cls, - hyp=hyp, cache=opt.cache_images and not opt.notest, rect=True, rank=-1, - workers=opt.workers, + hyp=hyp, cache=opt.cache_images and not notest, rect=True, rank=-1, + workers=workers, pad=0.5, prefix=colorstr('val: '))[0] - if not opt.resume: + if not resume: labels = np.concatenate(dataset.labels, 0) c = torch.tensor(labels[:, 0]) # classes # cf = torch.bincount(c.long(), minlength=nc) + 1. # frequency @@ -356,8 +357,8 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary with warnings.catch_warnings(): warnings.simplefilter('ignore') # suppress jit trace warning loggers['tb'].add_graph(torch.jit.trace(de_parallel(model), imgs[0:1], strict=False), []) - elif plots and ni == 10 and wandb_logger.wandb: - wandb_logger.log({'Mosaics': [wandb_logger.wandb.Image(str(x), caption=x.name) for x in + elif plots and ni == 10 and loggers['wandb']: + wandb_logger.log({'Mosaics': [loggers['wandb'].Image(str(x), caption=x.name) for x in save_dir.glob('train*.jpg') if x.exists()]}) # end batch ------------------------------------------------------------------------------------------------ @@ -371,7 +372,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary # mAP ema.update_attr(model, include=['yaml', 'nc', 'hyp', 'gr', 'names', 'stride', 'class_weights']) final_epoch = epoch + 1 == epochs - if not opt.notest or final_epoch: # Calculate mAP + if not notest or final_epoch: # Calculate mAP wandb_logger.current_epoch = epoch + 1 results, maps, _ = test.test(data_dict, batch_size=batch_size // WORLD_SIZE * 2, @@ -398,7 +399,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary for x, tag in zip(list(mloss[:-1]) + list(results) + lr, tags): if loggers['tb']: loggers['tb'].add_scalar(tag, x, epoch) # TensorBoard - if wandb_logger.wandb: + if loggers['wandb']: wandb_logger.log({tag: x}) # W&B # Update best mAP @@ -408,7 +409,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary wandb_logger.end_epoch(best_result=best_fitness == fi) # Save model - if (not opt.nosave) or (final_epoch and not opt.evolve): # if save + if (not nosave) or (final_epoch and not evolve): # if save ckpt = {'epoch': epoch, 'best_fitness': best_fitness, 'training_results': results_file.read_text(), @@ -416,13 +417,13 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary 'ema': deepcopy(ema.ema).half(), 'updates': ema.updates, 'optimizer': optimizer.state_dict(), - 'wandb_id': wandb_logger.wandb_run.id if wandb_logger.wandb else None} + 'wandb_id': wandb_logger.wandb_run.id if loggers['wandb'] else None} # Save last, best and delete torch.save(ckpt, last) if best_fitness == fi: torch.save(ckpt, best) - if wandb_logger.wandb: + if loggers['wandb']: if ((epoch + 1) % opt.save_period == 0 and not final_epoch) and opt.save_period != -1: wandb_logger.log_model(last.parent, opt, epoch, fi, best_model=best_fitness == fi) del ckpt @@ -433,15 +434,15 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary logger.info(f'{epoch - start_epoch + 1} epochs completed in {(time.time() - t0) / 3600:.3f} hours.\n') if plots: plot_results(save_dir=save_dir) # save as results.png - if wandb_logger.wandb: + if loggers['wandb']: files = ['results.png', 'confusion_matrix.png', *[f'{x}_curve.png' for x in ('F1', 'PR', 'P', 'R')]] - wandb_logger.log({"Results": [wandb_logger.wandb.Image(str(save_dir / f), caption=f) for f in files + wandb_logger.log({"Results": [loggers['wandb'].Image(str(save_dir / f), caption=f) for f in files if (save_dir / f).exists()]}) - if not opt.evolve: + if not evolve: if is_coco: # COCO dataset for m in [last, best] if best.exists() else [last]: # speed, mAP tests - results, _, _ = test.test(opt.data, + results, _, _ = test.test(data, batch_size=batch_size // WORLD_SIZE * 2, imgsz=imgsz_test, conf_thres=0.001, @@ -457,17 +458,17 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary for f in last, best: if f.exists(): strip_optimizer(f) # strip optimizers - if wandb_logger.wandb: # Log the stripped model - wandb_logger.wandb.log_artifact(str(best if best.exists() else last), type='model', - name='run_' + wandb_logger.wandb_run.id + '_model', - aliases=['latest', 'best', 'stripped']) + if loggers['wandb']: # Log the stripped model + loggers['wandb'].log_artifact(str(best if best.exists() else last), type='model', + name='run_' + wandb_logger.wandb_run.id + '_model', + aliases=['latest', 'best', 'stripped']) wandb_logger.finish_run() torch.cuda.empty_cache() return results -def parse_opt(): +def parse_opt(known=False): parser = argparse.ArgumentParser() parser.add_argument('--weights', type=str, default='yolov5s.pt', help='initial weights path') parser.add_argument('--cfg', type=str, default='', help='model.yaml path') @@ -503,7 +504,7 @@ def parse_opt(): parser.add_argument('--save_period', type=int, default=-1, help='Log model after every "save_period" epoch') parser.add_argument('--artifact_alias', type=str, default="latest", help='version of dataset artifact to be used') parser.add_argument('--local_rank', type=int, default=-1, help='DDP parameter, do not modify') - opt = parser.parse_args() + opt = parser.parse_known_args()[0] if known else parser.parse_args() return opt @@ -633,6 +634,14 @@ def main(opt): f'Command to train a new model with these hyperparameters: $ python train.py --hyp {yaml_file}') +def run(**kwargs): + # Usage: import train; train.run(imgsz=320, weights='yolov5m.pt') + opt = parse_opt(True) + for k, v in kwargs.items(): + setattr(opt, k, v) + main(opt) + + if __name__ == "__main__": opt = parse_opt() main(opt) From e8810a53e83ddb5dd6bf8e871c2ede701007047c Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 20 Jun 2021 17:15:42 +0200 Subject: [PATCH 0338/1976] Update DDP backend `if dist.is_nccl_available()` (#3705) --- train.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/train.py b/train.py index fbda7320839a..19bd97faca1f 100644 --- a/train.py +++ b/train.py @@ -539,7 +539,7 @@ def main(opt): assert torch.cuda.device_count() > LOCAL_RANK, 'insufficient CUDA devices for DDP command' torch.cuda.set_device(LOCAL_RANK) device = torch.device('cuda', LOCAL_RANK) - dist.init_process_group(backend="gloo", timeout=timedelta(seconds=60)) + dist.init_process_group(backend="nccl" if dist.is_nccl_available() else "gloo", timeout=timedelta(seconds=60)) assert opt.batch_size % WORLD_SIZE == 0, '--batch-size must be multiple of CUDA device count' assert not opt.image_weights, '--image-weights argument is not compatible with DDP training' From 75c0ff43af18d9d90b32ccfadd6029573b2a502a Mon Sep 17 00:00:00 2001 From: Ayush Chaurasia Date: Mon, 21 Jun 2021 17:30:25 +0530 Subject: [PATCH 0339/1976] [x]W&B: Don't resume transfer learning runs (#3604) * Allow config cahnge * Allow val change in wandb config * Don't resume transfer learning runs * Add entity in log dataset --- train.py | 1 + utils/wandb_logging/log_dataset.py | 2 ++ utils/wandb_logging/wandb_utils.py | 3 +-- 3 files changed, 4 insertions(+), 2 deletions(-) diff --git a/train.py b/train.py index 19bd97faca1f..67d835d60691 100644 --- a/train.py +++ b/train.py @@ -89,6 +89,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary # W&B opt.hyp = hyp # add hyperparameters run_id = torch.load(weights).get('wandb_id') if weights.endswith('.pt') and os.path.isfile(weights) else None + run_id = run_id if opt.resume else None # start fresh run if transfer learning wandb_logger = WandbLogger(opt, save_dir.stem, run_id, data_dict) loggers['wandb'] = wandb_logger.wandb if loggers['wandb']: diff --git a/utils/wandb_logging/log_dataset.py b/utils/wandb_logging/log_dataset.py index f45a23011f15..3a9a3d79fe01 100644 --- a/utils/wandb_logging/log_dataset.py +++ b/utils/wandb_logging/log_dataset.py @@ -18,6 +18,8 @@ def create_dataset_artifact(opt): parser.add_argument('--data', type=str, default='data/coco128.yaml', help='data.yaml path') parser.add_argument('--single-cls', action='store_true', help='train as single-class dataset') parser.add_argument('--project', type=str, default='YOLOv5', help='name of W&B Project') + parser.add_argument('--entity', default=None, help='W&B entity') + opt = parser.parse_args() opt.resume = False # Explicitly disallow resume check for dataset upload job diff --git a/utils/wandb_logging/wandb_utils.py b/utils/wandb_logging/wandb_utils.py index 43b4c3d04e8e..d82633c7e2f6 100644 --- a/utils/wandb_logging/wandb_utils.py +++ b/utils/wandb_logging/wandb_utils.py @@ -126,8 +126,7 @@ def __init__(self, opt, name, run_id, data_dict, job_type='Training'): if not opt.resume: wandb_data_dict = self.check_and_upload_dataset(opt) if opt.upload_dataset else data_dict # Info useful for resuming from artifacts - self.wandb_run.config.opt = vars(opt) - self.wandb_run.config.data_dict = wandb_data_dict + self.wandb_run.config.update({'opt': vars(opt), 'data_dict': data_dict}, allow_val_change=True) self.data_dict = self.setup_training(opt, data_dict) if self.job_type == 'Dataset Creation': self.data_dict = self.check_and_upload_dataset(opt) From 1f69d1259183321205dcd7b6d884e798e8bdcf61 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 21 Jun 2021 17:25:04 +0200 Subject: [PATCH 0340/1976] Update 4 main ops for paths and .run() (#3715) * Add yolov5/ to path * rename functions to run() * cleanup * rename fix * CI fix * cleanup find models/export.py --- .github/workflows/ci-testing.yml | 2 +- .github/workflows/greetings.yml | 2 +- detect.py | 60 +++++++++++++++----------- models/export.py => export.py | 32 +++++++------- test.py | 72 ++++++++++++++++++-------------- train.py | 58 ++++++++++++++----------- tutorial.ipynb | 4 +- 7 files changed, 130 insertions(+), 100 deletions(-) rename models/export.py => export.py (89%) diff --git a/.github/workflows/ci-testing.yml b/.github/workflows/ci-testing.yml index 956199314726..20c1d5b026b0 100644 --- a/.github/workflows/ci-testing.yml +++ b/.github/workflows/ci-testing.yml @@ -74,5 +74,5 @@ jobs: python hubconf.py # hub python models/yolo.py --cfg ${{ matrix.model }}.yaml # inspect - python models/export.py --img 128 --batch 1 --weights ${{ matrix.model }}.pt # export + python export.py --img 128 --batch 1 --weights ${{ matrix.model }}.pt # export shell: bash diff --git a/.github/workflows/greetings.yml b/.github/workflows/greetings.yml index bbbe8e676f82..fdf1cfae8df5 100644 --- a/.github/workflows/greetings.yml +++ b/.github/workflows/greetings.yml @@ -52,5 +52,5 @@ jobs: ![CI CPU testing](https://github.com/ultralytics/yolov5/workflows/CI%20CPU%20testing/badge.svg) - If this badge is green, all [YOLOv5 GitHub Actions](https://github.com/ultralytics/yolov5/actions) Continuous Integration (CI) tests are currently passing. CI tests verify correct operation of YOLOv5 training ([train.py](https://github.com/ultralytics/yolov5/blob/master/train.py)), testing ([test.py](https://github.com/ultralytics/yolov5/blob/master/test.py)), inference ([detect.py](https://github.com/ultralytics/yolov5/blob/master/detect.py)) and export ([export.py](https://github.com/ultralytics/yolov5/blob/master/models/export.py)) on MacOS, Windows, and Ubuntu every 24 hours and on every commit. + If this badge is green, all [YOLOv5 GitHub Actions](https://github.com/ultralytics/yolov5/actions) Continuous Integration (CI) tests are currently passing. CI tests verify correct operation of YOLOv5 training ([train.py](https://github.com/ultralytics/yolov5/blob/master/train.py)), testing ([test.py](https://github.com/ultralytics/yolov5/blob/master/test.py)), inference ([detect.py](https://github.com/ultralytics/yolov5/blob/master/detect.py)) and export ([export.py](https://github.com/ultralytics/yolov5/blob/master/export.py)) on MacOS, Windows, and Ubuntu every 24 hours and on every commit. diff --git a/detect.py b/detect.py index fb2d2702234d..808f3584c93d 100644 --- a/detect.py +++ b/detect.py @@ -1,4 +1,11 @@ +"""Run inference with a YOLOv5 model on images, videos, directories, streams + +Usage: + $ python path/to/detect.py --source path/to/img.jpg --weights yolov5s.pt --img 640 +""" + import argparse +import sys import time from pathlib import Path @@ -6,6 +13,9 @@ import torch import torch.backends.cudnn as cudnn +FILE = Path(__file__).absolute() +sys.path.append(FILE.parents[0].as_posix()) # add yolov5/ to path + from models.experimental import attempt_load from utils.datasets import LoadStreams, LoadImages from utils.general import check_img_size, check_requirements, check_imshow, colorstr, non_max_suppression, \ @@ -15,30 +25,30 @@ @torch.no_grad() -def detect(weights='yolov5s.pt', # model.pt path(s) - source='data/images', # file/dir/URL/glob, 0 for webcam - imgsz=640, # inference size (pixels) - conf_thres=0.25, # confidence threshold - iou_thres=0.45, # NMS IOU threshold - max_det=1000, # maximum detections per image - device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu - view_img=False, # show results - save_txt=False, # save results to *.txt - save_conf=False, # save confidences in --save-txt labels - save_crop=False, # save cropped prediction boxes - nosave=False, # do not save images/videos - classes=None, # filter by class: --class 0, or --class 0 2 3 - agnostic_nms=False, # class-agnostic NMS - augment=False, # augmented inference - update=False, # update all models - project='runs/detect', # save results to project/name - name='exp', # save results to project/name - exist_ok=False, # existing project/name ok, do not increment - line_thickness=3, # bounding box thickness (pixels) - hide_labels=False, # hide labels - hide_conf=False, # hide confidences - half=False, # use FP16 half-precision inference - ): +def run(weights='yolov5s.pt', # model.pt path(s) + source='data/images', # file/dir/URL/glob, 0 for webcam + imgsz=640, # inference size (pixels) + conf_thres=0.25, # confidence threshold + iou_thres=0.45, # NMS IOU threshold + max_det=1000, # maximum detections per image + device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu + view_img=False, # show results + save_txt=False, # save results to *.txt + save_conf=False, # save confidences in --save-txt labels + save_crop=False, # save cropped prediction boxes + nosave=False, # do not save images/videos + classes=None, # filter by class: --class 0, or --class 0 2 3 + agnostic_nms=False, # class-agnostic NMS + augment=False, # augmented inference + update=False, # update all models + project='runs/detect', # save results to project/name + name='exp', # save results to project/name + exist_ok=False, # existing project/name ok, do not increment + line_thickness=3, # bounding box thickness (pixels) + hide_labels=False, # hide labels + hide_conf=False, # hide confidences + half=False, # use FP16 half-precision inference + ): save_img = not nosave and not source.endswith('.txt') # save inference images webcam = source.isnumeric() or source.endswith('.txt') or source.lower().startswith( ('rtsp://', 'rtmp://', 'http://', 'https://')) @@ -204,7 +214,7 @@ def parse_opt(): def main(opt): print(colorstr('detect: ') + ', '.join(f'{k}={v}' for k, v in vars(opt).items())) check_requirements(exclude=('tensorboard', 'thop')) - detect(**vars(opt)) + run(**vars(opt)) if __name__ == "__main__": diff --git a/models/export.py b/export.py similarity index 89% rename from models/export.py rename to export.py index 15d6a87ecea6..8f4000cdad39 100644 --- a/models/export.py +++ b/export.py @@ -1,7 +1,7 @@ """Export a YOLOv5 *.pt model to TorchScript, ONNX, CoreML formats Usage: - $ python path/to/models/export.py --weights yolov5s.pt --img 640 --batch 1 + $ python path/to/export.py --weights yolov5s.pt --img 640 --batch 1 """ import argparse @@ -14,7 +14,7 @@ from torch.utils.mobile_optimizer import optimize_for_mobile FILE = Path(__file__).absolute() -sys.path.append(FILE.parents[1].as_posix()) # add yolov5/ to path +sys.path.append(FILE.parents[0].as_posix()) # add yolov5/ to path from models.common import Conv from models.yolo import Detect @@ -24,19 +24,19 @@ from utils.torch_utils import select_device -def export(weights='./yolov5s.pt', # weights path - img_size=(640, 640), # image (height, width) - batch_size=1, # batch size - device='cpu', # cuda device, i.e. 0 or 0,1,2,3 or cpu - include=('torchscript', 'onnx', 'coreml'), # include formats - half=False, # FP16 half-precision export - inplace=False, # set YOLOv5 Detect() inplace=True - train=False, # model.train() mode - optimize=False, # TorchScript: optimize for mobile - dynamic=False, # ONNX: dynamic axes - simplify=False, # ONNX: simplify model - opset_version=12, # ONNX: opset version - ): +def run(weights='./yolov5s.pt', # weights path + img_size=(640, 640), # image (height, width) + batch_size=1, # batch size + device='cpu', # cuda device, i.e. 0 or 0,1,2,3 or cpu + include=('torchscript', 'onnx', 'coreml'), # include formats + half=False, # FP16 half-precision export + inplace=False, # set YOLOv5 Detect() inplace=True + train=False, # model.train() mode + optimize=False, # TorchScript: optimize for mobile + dynamic=False, # ONNX: dynamic axes + simplify=False, # ONNX: simplify model + opset_version=12, # ONNX: opset version + ): t = time.time() include = [x.lower() for x in include] img_size *= 2 if len(img_size) == 1 else 1 # expand @@ -165,7 +165,7 @@ def parse_opt(): def main(opt): set_logging() print(colorstr('export: ') + ', '.join(f'{k}={v}' for k, v in vars(opt).items())) - export(**vars(opt)) + run(**vars(opt)) if __name__ == "__main__": diff --git a/test.py b/test.py index 1e82fd2d1611..0e0f01efa531 100644 --- a/test.py +++ b/test.py @@ -1,6 +1,13 @@ +"""Test a trained YOLOv5 model accuracy on a custom dataset + +Usage: + $ python path/to/test.py --data coco128.yaml --weights yolov5s.pt --img 640 +""" + import argparse import json import os +import sys from pathlib import Path from threading import Thread @@ -9,6 +16,9 @@ import yaml from tqdm import tqdm +FILE = Path(__file__).absolute() +sys.path.append(FILE.parents[0].as_posix()) # add yolov5/ to path + from models.experimental import attempt_load from utils.datasets import create_dataloader from utils.general import coco80_to_coco91_class, check_dataset, check_file, check_img_size, check_requirements, \ @@ -19,32 +29,32 @@ @torch.no_grad() -def test(data, - weights=None, # model.pt path(s) - batch_size=32, # batch size - imgsz=640, # inference size (pixels) - conf_thres=0.001, # confidence threshold - iou_thres=0.6, # NMS IoU threshold - task='val', # train, val, test, speed or study - device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu - single_cls=False, # treat as single-class dataset - augment=False, # augmented inference - verbose=False, # verbose output - save_txt=False, # save results to *.txt - save_hybrid=False, # save label+prediction hybrid results to *.txt - save_conf=False, # save confidences in --save-txt labels - save_json=False, # save a cocoapi-compatible JSON results file - project='runs/test', # save to project/name - name='exp', # save to project/name - exist_ok=False, # existing project/name ok, do not increment - half=True, # use FP16 half-precision inference - model=None, - dataloader=None, - save_dir=Path(''), - plots=True, - wandb_logger=None, - compute_loss=None, - ): +def run(data, + weights=None, # model.pt path(s) + batch_size=32, # batch size + imgsz=640, # inference size (pixels) + conf_thres=0.001, # confidence threshold + iou_thres=0.6, # NMS IoU threshold + task='val', # train, val, test, speed or study + device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu + single_cls=False, # treat as single-class dataset + augment=False, # augmented inference + verbose=False, # verbose output + save_txt=False, # save results to *.txt + save_hybrid=False, # save label+prediction hybrid results to *.txt + save_conf=False, # save confidences in --save-txt labels + save_json=False, # save a cocoapi-compatible JSON results file + project='runs/test', # save to project/name + name='exp', # save to project/name + exist_ok=False, # existing project/name ok, do not increment + half=True, # use FP16 half-precision inference + model=None, + dataloader=None, + save_dir=Path(''), + plots=True, + wandb_logger=None, + compute_loss=None, + ): # Initialize/load model and set device training = model is not None if training: # called by train.py @@ -327,12 +337,12 @@ def main(opt): check_requirements(exclude=('tensorboard', 'thop')) if opt.task in ('train', 'val', 'test'): # run normally - test(**vars(opt)) + run(**vars(opt)) elif opt.task == 'speed': # speed benchmarks for w in opt.weights if isinstance(opt.weights, list) else [opt.weights]: - test(opt.data, weights=w, batch_size=opt.batch_size, imgsz=opt.imgsz, conf_thres=.25, iou_thres=.45, - save_json=False, plots=False) + run(opt.data, weights=w, batch_size=opt.batch_size, imgsz=opt.imgsz, conf_thres=.25, iou_thres=.45, + save_json=False, plots=False) elif opt.task == 'study': # run over a range of settings and save/plot # python test.py --task study --data coco.yaml --iou 0.7 --weights yolov5s.pt yolov5m.pt yolov5l.pt yolov5x.pt @@ -342,8 +352,8 @@ def main(opt): y = [] # y axis for i in x: # img-size print(f'\nRunning {f} point {i}...') - r, _, t = test(opt.data, weights=w, batch_size=opt.batch_size, imgsz=i, conf_thres=opt.conf_thres, - iou_thres=opt.iou_thres, save_json=opt.save_json, plots=False) + r, _, t = run(opt.data, weights=w, batch_size=opt.batch_size, imgsz=i, conf_thres=opt.conf_thres, + iou_thres=opt.iou_thres, save_json=opt.save_json, plots=False) y.append(r + t) # results and times np.savetxt(f, y, fmt='%10.4g') # save os.system('zip -r study.zip study_*.txt') diff --git a/train.py b/train.py index 67d835d60691..05542b48bb59 100644 --- a/train.py +++ b/train.py @@ -1,8 +1,15 @@ +"""Train a YOLOv5 model on a custom dataset + +Usage: + $ python path/to/train.py --data coco128.yaml --weights yolov5s.pt --img 640 +""" + import argparse import logging import math import os import random +import sys import time import warnings from copy import deepcopy @@ -22,6 +29,9 @@ from torch.utils.tensorboard import SummaryWriter from tqdm import tqdm +FILE = Path(__file__).absolute() +sys.path.append(FILE.parents[0].as_posix()) # add yolov5/ to path + import test # for end-of-epoch mAP from models.experimental import attempt_load from models.yolo import Model @@ -89,7 +99,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary # W&B opt.hyp = hyp # add hyperparameters run_id = torch.load(weights).get('wandb_id') if weights.endswith('.pt') and os.path.isfile(weights) else None - run_id = run_id if opt.resume else None # start fresh run if transfer learning + run_id = run_id if opt.resume else None # start fresh run if transfer learning wandb_logger = WandbLogger(opt, save_dir.stem, run_id, data_dict) loggers['wandb'] = wandb_logger.wandb if loggers['wandb']: @@ -375,18 +385,18 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary final_epoch = epoch + 1 == epochs if not notest or final_epoch: # Calculate mAP wandb_logger.current_epoch = epoch + 1 - results, maps, _ = test.test(data_dict, - batch_size=batch_size // WORLD_SIZE * 2, - imgsz=imgsz_test, - model=ema.ema, - single_cls=single_cls, - dataloader=testloader, - save_dir=save_dir, - save_json=is_coco and final_epoch, - verbose=nc < 50 and final_epoch, - plots=plots and final_epoch, - wandb_logger=wandb_logger, - compute_loss=compute_loss) + results, maps, _ = test.run(data_dict, + batch_size=batch_size // WORLD_SIZE * 2, + imgsz=imgsz_test, + model=ema.ema, + single_cls=single_cls, + dataloader=testloader, + save_dir=save_dir, + save_json=is_coco and final_epoch, + verbose=nc < 50 and final_epoch, + plots=plots and final_epoch, + wandb_logger=wandb_logger, + compute_loss=compute_loss) # Write with open(results_file, 'a') as f: @@ -443,17 +453,17 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary if not evolve: if is_coco: # COCO dataset for m in [last, best] if best.exists() else [last]: # speed, mAP tests - results, _, _ = test.test(data, - batch_size=batch_size // WORLD_SIZE * 2, - imgsz=imgsz_test, - conf_thres=0.001, - iou_thres=0.7, - model=attempt_load(m, device).half(), - single_cls=single_cls, - dataloader=testloader, - save_dir=save_dir, - save_json=True, - plots=False) + results, _, _ = test.run(data, + batch_size=batch_size // WORLD_SIZE * 2, + imgsz=imgsz_test, + conf_thres=0.001, + iou_thres=0.7, + model=attempt_load(m, device).half(), + single_cls=single_cls, + dataloader=testloader, + save_dir=save_dir, + save_json=True, + plots=False) # Strip optimizers for f in last, best: diff --git a/tutorial.ipynb b/tutorial.ipynb index 48780f94c856..b45b321b42e4 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -1125,7 +1125,7 @@ "\n", "![CI CPU testing](https://github.com/ultralytics/yolov5/workflows/CI%20CPU%20testing/badge.svg)\n", "\n", - "If this badge is green, all [YOLOv5 GitHub Actions](https://github.com/ultralytics/yolov5/actions) Continuous Integration (CI) tests are currently passing. CI tests verify correct operation of YOLOv5 training ([train.py](https://github.com/ultralytics/yolov5/blob/master/train.py)), testing ([test.py](https://github.com/ultralytics/yolov5/blob/master/test.py)), inference ([detect.py](https://github.com/ultralytics/yolov5/blob/master/detect.py)) and export ([export.py](https://github.com/ultralytics/yolov5/blob/master/models/export.py)) on MacOS, Windows, and Ubuntu every 24 hours and on every commit.\n" + "If this badge is green, all [YOLOv5 GitHub Actions](https://github.com/ultralytics/yolov5/actions) Continuous Integration (CI) tests are currently passing. CI tests verify correct operation of YOLOv5 training ([train.py](https://github.com/ultralytics/yolov5/blob/master/train.py)), testing ([test.py](https://github.com/ultralytics/yolov5/blob/master/test.py)), inference ([detect.py](https://github.com/ultralytics/yolov5/blob/master/detect.py)) and export ([export.py](https://github.com/ultralytics/yolov5/blob/master/export.py)) on MacOS, Windows, and Ubuntu every 24 hours and on every commit.\n" ] }, { @@ -1212,7 +1212,7 @@ " done\n", " python hubconf.py # hub\n", " python models/yolo.py --cfg $m.yaml # inspect\n", - " python models/export.py --weights $m.pt --img 640 --batch 1 # export\n", + " python export.py --weights $m.pt --img 640 --batch 1 # export\n", "done" ], "execution_count": null, From b83e1a4adcf77ccafa72b22ade6cb3898ccb0e05 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 21 Jun 2021 22:50:56 +0200 Subject: [PATCH 0341/1976] Fix `img2label_paths()` order (#3720) * Fix `img2label_paths()` order * fix, 1 --- utils/datasets.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/datasets.py b/utils/datasets.py index 93d6511ac658..25a7b2f67355 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -348,7 +348,7 @@ def __len__(self): def img2label_paths(img_paths): # Define label paths as a function of image paths sa, sb = os.sep + 'images' + os.sep, os.sep + 'labels' + os.sep # /images/, /labels/ substrings - return ['txt'.join(x.replace(sa, sb, 1).rsplit(x.split('.')[-1], 1)) for x in img_paths] + return [sb.join(x.rsplit(sa, 1)).rsplit('.', 1)[0] + '.txt' for x in img_paths] class LoadImagesAndLabels(Dataset): # for training/testing From 0e2d0d54d76698111a446c2499786a1f8df856af Mon Sep 17 00:00:00 2001 From: fcakyon <34196005+fcakyon@users.noreply.github.com> Date: Tue, 22 Jun 2021 14:33:38 +0300 Subject: [PATCH 0342/1976] Fix typo (#3729) --- export.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/export.py b/export.py index 8f4000cdad39..b7ff0748ba93 100644 --- a/export.py +++ b/export.py @@ -43,7 +43,7 @@ def run(weights='./yolov5s.pt', # weights path # Load PyTorch model device = select_device(device) - assert not (device.type == 'cpu' and opt.half), '--half only compatible with GPU export, i.e. use --device 0' + assert not (device.type == 'cpu' and half), '--half only compatible with GPU export, i.e. use --device 0' model = attempt_load(weights, map_location=device) # load FP32 model labels = model.names From 9ac7d388a99c2344c2e1ddeb495faccf586b7dc3 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 22 Jun 2021 13:50:47 +0200 Subject: [PATCH 0343/1976] Backwards compatible cache version checks (#3730) --- utils/datasets.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/datasets.py b/utils/datasets.py index 25a7b2f67355..abb4a3650bfc 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -390,7 +390,7 @@ def __init__(self, path, img_size=640, batch_size=16, augment=False, hyp=None, r cache_path = (p if p.is_file() else Path(self.label_files[0]).parent).with_suffix('.cache') # cached labels if cache_path.is_file(): cache, exists = torch.load(cache_path), True # load - if cache['version'] != 0.3 or cache['hash'] != get_hash(self.label_files + self.img_files): + if cache.get('version') != 0.3 or cache.get('hash') != get_hash(self.label_files + self.img_files): cache, exists = self.cache_labels(cache_path, prefix), False # re-cache else: cache, exists = self.cache_labels(cache_path, prefix), False # cache From 63060910a68bfde238872d629ab88e2e7bc736e8 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 22 Jun 2021 16:05:38 +0200 Subject: [PATCH 0344/1976] Update `check_datasets()` for dynamic unzip path (#3732) @KalenMike --- utils/general.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/utils/general.py b/utils/general.py index 3e3bd6997a7c..e39f2ac09ca3 100755 --- a/utils/general.py +++ b/utils/general.py @@ -223,16 +223,17 @@ def check_file(file): def check_dataset(data, autodownload=True): # Download dataset if not found locally val, s = data.get('val'), data.get('download') - if val and len(val): + if val: + root = Path(val).parts[0] + os.sep # unzip directory i.e. '../' val = [Path(x).resolve() for x in (val if isinstance(val, list) else [val])] # val path if not all(x.exists() for x in val): print('\nWARNING: Dataset not found, nonexistent paths: %s' % [str(x) for x in val if not x.exists()]) - if s and len(s) and autodownload: # download script + if s and autodownload: # download script if s.startswith('http') and s.endswith('.zip'): # URL f = Path(s).name # filename print(f'Downloading {s} ...') torch.hub.download_url_to_file(s, f) - r = os.system(f'unzip -q {f} -d ../ && rm {f}') # unzip + r = os.system(f'unzip -q {f} -d {root} && rm {f}') # unzip elif s.startswith('bash '): # bash script print(f'Running {s} ...') r = os.system(s) From fdc22398fa06a96d6c3f0114ca4bc08a246ae67a Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 23 Jun 2021 12:49:38 +0200 Subject: [PATCH 0345/1976] Create `data/hyps` directory (#3747) --- .gitignore | 10 +--------- data/{ => hyps}/hyp.finetune.yaml | 0 data/{ => hyps}/hyp.finetune_objects365.yaml | 0 data/{ => hyps}/hyp.scratch.yaml | 0 train.py | 2 +- tutorial.ipynb | 2 +- 6 files changed, 3 insertions(+), 11 deletions(-) rename data/{ => hyps}/hyp.finetune.yaml (100%) rename data/{ => hyps}/hyp.finetune_objects365.yaml (100%) rename data/{ => hyps}/hyp.scratch.yaml (100%) diff --git a/.gitignore b/.gitignore index 91ce33fb931e..91299e263b86 100755 --- a/.gitignore +++ b/.gitignore @@ -19,26 +19,18 @@ *.avi *.data *.json - *.cfg !cfg/yolov3*.cfg storage.googleapis.com runs/* data/* +!data/hyps/* !data/images/zidane.jpg !data/images/bus.jpg -!data/coco.names -!data/coco_paper.names -!data/coco.data -!data/coco_*.data -!data/coco_*.txt -!data/trainvalno5k.shapes !data/*.sh -pycocotools/* results*.txt -gcp_test*.sh # Datasets ------------------------------------------------------------------------------------------------------------- coco/ diff --git a/data/hyp.finetune.yaml b/data/hyps/hyp.finetune.yaml similarity index 100% rename from data/hyp.finetune.yaml rename to data/hyps/hyp.finetune.yaml diff --git a/data/hyp.finetune_objects365.yaml b/data/hyps/hyp.finetune_objects365.yaml similarity index 100% rename from data/hyp.finetune_objects365.yaml rename to data/hyps/hyp.finetune_objects365.yaml diff --git a/data/hyp.scratch.yaml b/data/hyps/hyp.scratch.yaml similarity index 100% rename from data/hyp.scratch.yaml rename to data/hyps/hyp.scratch.yaml diff --git a/train.py b/train.py index 05542b48bb59..e934441d1182 100644 --- a/train.py +++ b/train.py @@ -484,7 +484,7 @@ def parse_opt(known=False): parser.add_argument('--weights', type=str, default='yolov5s.pt', help='initial weights path') parser.add_argument('--cfg', type=str, default='', help='model.yaml path') parser.add_argument('--data', type=str, default='data/coco128.yaml', help='dataset.yaml path') - parser.add_argument('--hyp', type=str, default='data/hyp.scratch.yaml', help='hyperparameters path') + parser.add_argument('--hyp', type=str, default='data/hyps/hyp.scratch.yaml', help='hyperparameters path') parser.add_argument('--epochs', type=int, default=300) parser.add_argument('--batch-size', type=int, default=16, help='total batch size for all GPUs') parser.add_argument('--img-size', nargs='+', type=int, default=[640, 640], help='[train, test] image sizes') diff --git a/tutorial.ipynb b/tutorial.ipynb index b45b321b42e4..bcdbc014dfb4 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -932,7 +932,7 @@ "\u001b[34m\u001b[1mgithub: \u001b[0mup to date with https://github.com/ultralytics/yolov5 ✅\n", "YOLOv5 🚀 v5.0-158-g78cf488 torch 1.8.1+cu101 CUDA:0 (Tesla V100-SXM2-16GB, 16160.5MB)\n", "\n", - "Namespace(adam=False, artifact_alias='latest', batch_size=16, bbox_interval=-1, bucket='', cache_images=True, cfg='', data='./data/coco128.yaml', device='', entity=None, epochs=3, evolve=False, exist_ok=False, global_rank=-1, hyp='data/hyp.scratch.yaml', image_weights=False, img_size=[640, 640], label_smoothing=0.0, linear_lr=False, local_rank=-1, multi_scale=False, name='exp', noautoanchor=False, nosave=False, notest=False, project='runs/train', quad=False, rect=False, resume=False, save_dir='runs/train/exp', save_period=-1, single_cls=False, sync_bn=False, total_batch_size=16, upload_dataset=False, weights='yolov5s.pt', workers=8, world_size=1)\n", + "Namespace(adam=False, artifact_alias='latest', batch_size=16, bbox_interval=-1, bucket='', cache_images=True, cfg='', data='./data/coco128.yaml', device='', entity=None, epochs=3, evolve=False, exist_ok=False, global_rank=-1, hyp='hyp.scratch.yaml', image_weights=False, img_size=[640, 640], label_smoothing=0.0, linear_lr=False, local_rank=-1, multi_scale=False, name='exp', noautoanchor=False, nosave=False, notest=False, project='runs/train', quad=False, rect=False, resume=False, save_dir='runs/train/exp', save_period=-1, single_cls=False, sync_bn=False, total_batch_size=16, upload_dataset=False, weights='yolov5s.pt', workers=8, world_size=1)\n", "\u001b[34m\u001b[1mtensorboard: \u001b[0mStart with 'tensorboard --logdir runs/train', view at http://localhost:6006/\n", "2021-06-08 17:00:55.016221: I tensorflow/stream_executor/platform/default/dso_loader.cc:53] Successfully opened dynamic library libcudart.so.11.0\n", "\u001b[34m\u001b[1mhyperparameters: \u001b[0mlr0=0.01, lrf=0.2, momentum=0.937, weight_decay=0.0005, warmup_epochs=3.0, warmup_momentum=0.8, warmup_bias_lr=0.1, box=0.05, cls=0.5, cls_pw=1.0, obj=1.0, obj_pw=1.0, iou_t=0.2, anchor_t=4.0, fl_gamma=0.0, hsv_h=0.015, hsv_s=0.7, hsv_v=0.4, degrees=0.0, translate=0.1, scale=0.5, shear=0.0, perspective=0.0, flipud=0.0, fliplr=0.5, mosaic=1.0, mixup=0.0\n", From ae4261c7749ff644f45c66b79ecb1fff06437052 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 23 Jun 2021 12:56:22 +0200 Subject: [PATCH 0346/1976] Force non-zero hyp evolution weights `w` (#3748) Fix for https://github.com/ultralytics/yolov5/issues/3741 --- train.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/train.py b/train.py index e934441d1182..ba84b432f660 100644 --- a/train.py +++ b/train.py @@ -608,7 +608,7 @@ def main(opt): x = np.loadtxt('evolve.txt', ndmin=2) n = min(5, len(x)) # number of previous results to consider x = x[np.argsort(-fitness(x))][:n] # top n mutations - w = fitness(x) - fitness(x).min() # weights + w = fitness(x) - fitness(x).min() + 1E-6 # weights (sum > 0) if parent == 'single' or len(x) == 1: # x = x[random.randint(0, n - 1)] # random selection x = x[random.choices(range(n), weights=w)[0]] # weighted selection From 417a2f425cd553c1f2a41374a6cd2710aa91d5f0 Mon Sep 17 00:00:00 2001 From: Yonghye Kwon Date: Thu, 24 Jun 2021 22:57:27 +0900 Subject: [PATCH 0347/1976] Edit comment (#3759) edit comment --- utils/datasets.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/datasets.py b/utils/datasets.py index abb4a3650bfc..d3714d745b88 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -574,7 +574,7 @@ def __getitem__(self, index): labels_out[:, 1:] = torch.from_numpy(labels) # Convert - img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416 + img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3 x img_height x img_width img = np.ascontiguousarray(img) return torch.from_numpy(img), labels_out, self.img_files[index], shapes From f79d7479daa5ed2af55159ab621be82fbbb8ef1a Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 25 Jun 2021 01:25:03 +0200 Subject: [PATCH 0348/1976] Add optional dataset.yaml `path` attribute (#3753) * Add optional dataset.yaml `path` attribute @KalenMike * pass locals to python scripts * handle lists * update coco128.yaml * Capitalize first letter * add test key * finalize GlobalWheat2020.yaml * finalize objects365.yaml * finalize SKU-110K.yaml * finalize SKU-110K.yaml * finalize VisDrone.yaml * NoneType fix * update download comment * voc to VOC * update * update VOC.yaml * update VOC.yaml * remove dashes * delete get_voc.sh * force coco and coco128 to ../datasets * Capitalize Argoverse_HD.yaml * Capitalize Objects365.yaml * update Argoverse_HD.yaml * coco segments fix * VOC single-thread * update Argoverse_HD.yaml * update data_dict in test handling * create root --- data/Argoverse_HD.yaml | 66 ++++++++++++ data/GlobalWheat2020.yaml | 55 +++++----- data/{objects365.yaml => Objects365.yaml} | 23 +++-- data/SKU-110K.yaml | 29 +++--- data/VOC.yaml | 79 +++++++++++++++ data/VisDrone.yaml | 23 ++--- data/argoverse_hd.yaml | 21 ---- data/coco.yaml | 46 +++++---- data/coco128.yaml | 27 ++--- data/hyps/hyp.finetune.yaml | 2 +- data/scripts/get_argoverse_hd.sh | 61 ------------ data/scripts/get_voc.sh | 116 ---------------------- data/voc.yaml | 21 ---- test.py | 9 +- train.py | 2 +- tutorial.ipynb | 2 +- utils/general.py | 15 ++- 17 files changed, 268 insertions(+), 329 deletions(-) create mode 100644 data/Argoverse_HD.yaml rename data/{objects365.yaml => Objects365.yaml} (92%) create mode 100644 data/VOC.yaml delete mode 100644 data/argoverse_hd.yaml delete mode 100644 data/scripts/get_argoverse_hd.sh delete mode 100644 data/scripts/get_voc.sh delete mode 100644 data/voc.yaml diff --git a/data/Argoverse_HD.yaml b/data/Argoverse_HD.yaml new file mode 100644 index 000000000000..ad1a52254d74 --- /dev/null +++ b/data/Argoverse_HD.yaml @@ -0,0 +1,66 @@ +# Argoverse-HD dataset (ring-front-center camera) http://www.cs.cmu.edu/~mengtial/proj/streaming/ +# Train command: python train.py --data Argoverse_HD.yaml +# Default dataset location is next to YOLOv5: +# /parent +# /datasets/Argoverse +# /yolov5 + + +# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..] +path: ../datasets/Argoverse # dataset root dir +train: Argoverse-1.1/images/train/ # train images (relative to 'path') 39384 images +val: Argoverse-1.1/images/val/ # val images (relative to 'path') 15062 images +test: Argoverse-1.1/images/test/ # test images (optional) https://eval.ai/web/challenges/challenge-page/800/overview + +# Classes +nc: 8 # number of classes +names: [ 'person', 'bicycle', 'car', 'motorcycle', 'bus', 'truck', 'traffic_light', 'stop_sign' ] # class names + + +# Download script/URL (optional) --------------------------------------------------------------------------------------- +download: | + import json + + from tqdm import tqdm + from utils.general import download, Path + + + def argoverse2yolo(set): + labels = {} + a = json.load(open(set, "rb")) + for annot in tqdm(a['annotations'], desc=f"Converting {set} to YOLOv5 format..."): + img_id = annot['image_id'] + img_name = a['images'][img_id]['name'] + img_label_name = img_name[:-3] + "txt" + + cls = annot['category_id'] # instance class id + x_center, y_center, width, height = annot['bbox'] + x_center = (x_center + width / 2) / 1920.0 # offset and scale + y_center = (y_center + height / 2) / 1200.0 # offset and scale + width /= 1920.0 # scale + height /= 1200.0 # scale + + img_dir = set.parents[2] / 'Argoverse-1.1' / 'labels' / a['seq_dirs'][a['images'][annot['image_id']]['sid']] + if not img_dir.exists(): + img_dir.mkdir(parents=True, exist_ok=True) + + k = str(img_dir / img_label_name) + if k not in labels: + labels[k] = [] + labels[k].append(f"{cls} {x_center} {y_center} {width} {height}\n") + + for k in labels: + with open(k, "w") as f: + f.writelines(labels[k]) + + + # Download + dir = Path('../datasets/Argoverse') # dataset root dir + urls = ['https://argoverse-hd.s3.us-east-2.amazonaws.com/Argoverse-HD-Full.zip'] + download(urls, dir=dir, delete=False) + + # Convert + annotations_dir = 'Argoverse-HD/annotations/' + (dir / 'Argoverse-1.1' / 'tracking').rename(dir / 'Argoverse-1.1' / 'images') # rename 'tracking' to 'images' + for d in "train.json", "val.json": + argoverse2yolo(dir / annotations_dir / d) # convert VisDrone annotations to YOLO labels diff --git a/data/GlobalWheat2020.yaml b/data/GlobalWheat2020.yaml index f45182b43e25..b77534944ed7 100644 --- a/data/GlobalWheat2020.yaml +++ b/data/GlobalWheat2020.yaml @@ -1,43 +1,40 @@ # Global Wheat 2020 dataset http://www.global-wheat.com/ # Train command: python train.py --data GlobalWheat2020.yaml # Default dataset location is next to YOLOv5: -# /parent_folder +# /parent # /datasets/GlobalWheat2020 # /yolov5 -# train and val data as 1) directory: path/images/, 2) file: path/images.txt, or 3) list: [path1/images/, path2/images/] -train: # 3422 images - - ../datasets/GlobalWheat2020/images/arvalis_1 - - ../datasets/GlobalWheat2020/images/arvalis_2 - - ../datasets/GlobalWheat2020/images/arvalis_3 - - ../datasets/GlobalWheat2020/images/ethz_1 - - ../datasets/GlobalWheat2020/images/rres_1 - - ../datasets/GlobalWheat2020/images/inrae_1 - - ../datasets/GlobalWheat2020/images/usask_1 - -val: # 748 images (WARNING: train set contains ethz_1) - - ../datasets/GlobalWheat2020/images/ethz_1 - -test: # 1276 images - - ../datasets/GlobalWheat2020/images/utokyo_1 - - ../datasets/GlobalWheat2020/images/utokyo_2 - - ../datasets/GlobalWheat2020/images/nau_1 - - ../datasets/GlobalWheat2020/images/uq_1 - -# number of classes -nc: 1 - -# class names -names: [ 'wheat_head' ] - - -# download command/URL (optional) -------------------------------------------------------------------------------------- +# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..] +path: ../datasets/GlobalWheat2020 # dataset root dir +train: # train images (relative to 'path') 3422 images + - images/arvalis_1 + - images/arvalis_2 + - images/arvalis_3 + - images/ethz_1 + - images/rres_1 + - images/inrae_1 + - images/usask_1 +val: # val images (relative to 'path') 748 images (WARNING: train set contains ethz_1) + - images/ethz_1 +test: # test images (optional) 1276 images + - images/utokyo_1 + - images/utokyo_2 + - images/nau_1 + - images/uq_1 + +# Classes +nc: 1 # number of classes +names: [ 'wheat_head' ] # class names + + +# Download script/URL (optional) --------------------------------------------------------------------------------------- download: | from utils.general import download, Path # Download - dir = Path('../datasets/GlobalWheat2020') # dataset directory + dir = Path(yaml['path']) # dataset root dir urls = ['https://zenodo.org/record/4298502/files/global-wheat-codalab-official.zip', 'https://github.com/ultralytics/yolov5/releases/download/v1.0/GlobalWheat2020_labels.zip'] download(urls, dir=dir) diff --git a/data/objects365.yaml b/data/Objects365.yaml similarity index 92% rename from data/objects365.yaml rename to data/Objects365.yaml index eb99995903cf..e365c82cab08 100644 --- a/data/objects365.yaml +++ b/data/Objects365.yaml @@ -1,18 +1,19 @@ # Objects365 dataset https://www.objects365.org/ -# Train command: python train.py --data objects365.yaml +# Train command: python train.py --data Objects365.yaml # Default dataset location is next to YOLOv5: -# /parent_folder -# /datasets/objects365 +# /parent +# /datasets/Objects365 # /yolov5 -# train and val data as 1) directory: path/images/, 2) file: path/images.txt, or 3) list: [path1/images/, path2/images/] -train: ../datasets/objects365/images/train # 1742289 images -val: ../datasets/objects365/images/val # 5570 images -# number of classes -nc: 365 +# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..] +path: ../datasets/Objects365 # dataset root dir +train: images/train # train images (relative to 'path') 1742289 images +val: images/val # val images (relative to 'path') 5570 images +test: # test images (optional) -# class names +# Classes +nc: 365 # number of classes names: [ 'Person', 'Sneakers', 'Chair', 'Other Shoes', 'Hat', 'Car', 'Lamp', 'Glasses', 'Bottle', 'Desk', 'Cup', 'Street Lights', 'Cabinet/shelf', 'Handbag/Satchel', 'Bracelet', 'Plate', 'Picture/Frame', 'Helmet', 'Book', 'Gloves', 'Storage box', 'Boat', 'Leather Shoes', 'Flower', 'Bench', 'Potted Plant', 'Bowl/Basin', 'Flag', @@ -56,7 +57,7 @@ names: [ 'Person', 'Sneakers', 'Chair', 'Other Shoes', 'Hat', 'Car', 'Lamp', 'Gl 'Chainsaw', 'Eraser', 'Lobster', 'Durian', 'Okra', 'Lipstick', 'Cosmetics Mirror', 'Curling', 'Table Tennis' ] -# download command/URL (optional) -------------------------------------------------------------------------------------- +# Download script/URL (optional) --------------------------------------------------------------------------------------- download: | from pycocotools.coco import COCO from tqdm import tqdm @@ -64,7 +65,7 @@ download: | from utils.general import download, Path # Make Directories - dir = Path('../datasets/objects365') # dataset directory + dir = Path(yaml['path']) # dataset root dir for p in 'images', 'labels': (dir / p).mkdir(parents=True, exist_ok=True) for q in 'train', 'val': diff --git a/data/SKU-110K.yaml b/data/SKU-110K.yaml index a8c1f25b385a..7087bb9c2893 100644 --- a/data/SKU-110K.yaml +++ b/data/SKU-110K.yaml @@ -1,39 +1,38 @@ # SKU-110K retail items dataset https://github.com/eg4000/SKU110K_CVPR19 # Train command: python train.py --data SKU-110K.yaml # Default dataset location is next to YOLOv5: -# /parent_folder +# /parent # /datasets/SKU-110K # /yolov5 -# train and val data as 1) directory: path/images/, 2) file: path/images.txt, or 3) list: [path1/images/, path2/images/] -train: ../datasets/SKU-110K/train.txt # 8219 images -val: ../datasets/SKU-110K/val.txt # 588 images -test: ../datasets/SKU-110K/test.txt # 2936 images +# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..] +path: ../datasets/SKU-110K # dataset root dir +train: train.txt # train images (relative to 'path') 8219 images +val: val.txt # val images (relative to 'path') 588 images +test: test.txt # test images (optional) 2936 images -# number of classes -nc: 1 +# Classes +nc: 1 # number of classes +names: [ 'object' ] # class names -# class names -names: [ 'object' ] - -# download command/URL (optional) -------------------------------------------------------------------------------------- +# Download script/URL (optional) --------------------------------------------------------------------------------------- download: | import shutil from tqdm import tqdm from utils.general import np, pd, Path, download, xyxy2xywh # Download - datasets = Path('../datasets') # download directory + dir = Path(yaml['path']) # dataset root dir + parent = Path(dir.parent) # download dir urls = ['http://trax-geometry.s3.amazonaws.com/cvpr_challenge/SKU110K_fixed.tar.gz'] - download(urls, dir=datasets, delete=False) + download(urls, dir=parent, delete=False) # Rename directories - dir = (datasets / 'SKU-110K') if dir.exists(): shutil.rmtree(dir) - (datasets / 'SKU110K_fixed').rename(dir) # rename dir + (parent / 'SKU110K_fixed').rename(dir) # rename dir (dir / 'labels').mkdir(parents=True, exist_ok=True) # create labels dir # Convert labels diff --git a/data/VOC.yaml b/data/VOC.yaml new file mode 100644 index 000000000000..3d878fa67a60 --- /dev/null +++ b/data/VOC.yaml @@ -0,0 +1,79 @@ +# PASCAL VOC dataset http://host.robots.ox.ac.uk/pascal/VOC/ +# Train command: python train.py --data VOC.yaml +# Default dataset location is next to YOLOv5: +# /parent +# /datasets/VOC +# /yolov5 + + +# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..] +path: ../datasets/VOC +train: # train images (relative to 'path') 16551 images + - images/train2012 + - images/train2007 + - images/val2012 + - images/val2007 +val: # val images (relative to 'path') 4952 images + - images/test2007 +test: # test images (optional) + - images/test2007 + +# Classes +nc: 20 # number of classes +names: [ 'aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog', + 'horse', 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor' ] # class names + + +# Download script/URL (optional) --------------------------------------------------------------------------------------- +download: | + import xml.etree.ElementTree as ET + + from tqdm import tqdm + from utils.general import download, Path + + + def convert_label(path, lb_path, year, image_id): + def convert_box(size, box): + dw, dh = 1. / size[0], 1. / size[1] + x, y, w, h = (box[0] + box[1]) / 2.0 - 1, (box[2] + box[3]) / 2.0 - 1, box[1] - box[0], box[3] - box[2] + return x * dw, y * dh, w * dw, h * dh + + in_file = open(path / f'VOC{year}/Annotations/{image_id}.xml') + out_file = open(lb_path, 'w') + tree = ET.parse(in_file) + root = tree.getroot() + size = root.find('size') + w = int(size.find('width').text) + h = int(size.find('height').text) + + for obj in root.iter('object'): + cls = obj.find('name').text + if cls in yaml['names'] and not int(obj.find('difficult').text) == 1: + xmlbox = obj.find('bndbox') + bb = convert_box((w, h), [float(xmlbox.find(x).text) for x in ('xmin', 'xmax', 'ymin', 'ymax')]) + cls_id = yaml['names'].index(cls) # class id + out_file.write(" ".join([str(a) for a in (cls_id, *bb)]) + '\n') + + + # Download + dir = Path(yaml['path']) # dataset root dir + url = 'https://github.com/ultralytics/yolov5/releases/download/v1.0/' + urls = [url + 'VOCtrainval_06-Nov-2007.zip', # 446MB, 5012 images + url + 'VOCtest_06-Nov-2007.zip', # 438MB, 4953 images + url + 'VOCtrainval_11-May-2012.zip'] # 1.95GB, 17126 images + download(urls, dir=dir / 'images', delete=False) + + # Convert + path = dir / f'images/VOCdevkit' + for year, image_set in ('2012', 'train'), ('2012', 'val'), ('2007', 'train'), ('2007', 'val'), ('2007', 'test'): + imgs_path = dir / 'images' / f'{image_set}{year}' + lbs_path = dir / 'labels' / f'{image_set}{year}' + imgs_path.mkdir(exist_ok=True, parents=True) + lbs_path.mkdir(exist_ok=True, parents=True) + + image_ids = open(path / f'VOC{year}/ImageSets/Main/{image_set}.txt').read().strip().split() + for id in tqdm(image_ids, desc=f'{image_set}{year}'): + f = path / f'VOC{year}/JPEGImages/{id}.jpg' # old img path + lb_path = (lbs_path / f.name).with_suffix('.txt') # new label path + f.rename(imgs_path / f.name) # move image + convert_label(path, lb_path, year, id) # convert labels to YOLO format diff --git a/data/VisDrone.yaml b/data/VisDrone.yaml index c4603b200132..c1cd38d1e10f 100644 --- a/data/VisDrone.yaml +++ b/data/VisDrone.yaml @@ -1,24 +1,23 @@ # VisDrone2019-DET dataset https://github.com/VisDrone/VisDrone-Dataset # Train command: python train.py --data VisDrone.yaml # Default dataset location is next to YOLOv5: -# /parent_folder -# /VisDrone +# /parent +# /datasets/VisDrone # /yolov5 -# train and val data as 1) directory: path/images/, 2) file: path/images.txt, or 3) list: [path1/images/, path2/images/] -train: ../VisDrone/VisDrone2019-DET-train/images # 6471 images -val: ../VisDrone/VisDrone2019-DET-val/images # 548 images -test: ../VisDrone/VisDrone2019-DET-test-dev/images # 1610 images +# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..] +path: ../datasets/VisDrone # dataset root dir +train: VisDrone2019-DET-train/images # train images (relative to 'path') 6471 images +val: VisDrone2019-DET-val/images # val images (relative to 'path') 548 images +test: VisDrone2019-DET-test-dev/images # test images (optional) 1610 images -# number of classes -nc: 10 - -# class names +# Classes +nc: 10 # number of classes names: [ 'pedestrian', 'people', 'bicycle', 'car', 'van', 'truck', 'tricycle', 'awning-tricycle', 'bus', 'motor' ] -# download command/URL (optional) -------------------------------------------------------------------------------------- +# Download script/URL (optional) --------------------------------------------------------------------------------------- download: | from utils.general import download, os, Path @@ -49,7 +48,7 @@ download: | # Download - dir = Path('../VisDrone') # dataset directory + dir = Path(yaml['path']) # dataset root dir urls = ['https://github.com/ultralytics/yolov5/releases/download/v1.0/VisDrone2019-DET-train.zip', 'https://github.com/ultralytics/yolov5/releases/download/v1.0/VisDrone2019-DET-val.zip', 'https://github.com/ultralytics/yolov5/releases/download/v1.0/VisDrone2019-DET-test-dev.zip', diff --git a/data/argoverse_hd.yaml b/data/argoverse_hd.yaml deleted file mode 100644 index 0ba314d82ce1..000000000000 --- a/data/argoverse_hd.yaml +++ /dev/null @@ -1,21 +0,0 @@ -# Argoverse-HD dataset (ring-front-center camera) http://www.cs.cmu.edu/~mengtial/proj/streaming/ -# Train command: python train.py --data argoverse_hd.yaml -# Default dataset location is next to YOLOv5: -# /parent_folder -# /argoverse -# /yolov5 - - -# download command/URL (optional) -download: bash data/scripts/get_argoverse_hd.sh - -# train and val data as 1) directory: path/images/, 2) file: path/images.txt, or 3) list: [path1/images/, path2/images/] -train: ../argoverse/Argoverse-1.1/images/train/ # 39384 images -val: ../argoverse/Argoverse-1.1/images/val/ # 15062 iamges -test: ../argoverse/Argoverse-1.1/images/test/ # Submit to: https://eval.ai/web/challenges/challenge-page/800/overview - -# number of classes -nc: 8 - -# class names -names: [ 'person', 'bicycle', 'car', 'motorcycle', 'bus', 'truck', 'traffic_light', 'stop_sign' ] diff --git a/data/coco.yaml b/data/coco.yaml index f818a49ff0fa..c6053c984bc0 100644 --- a/data/coco.yaml +++ b/data/coco.yaml @@ -1,23 +1,19 @@ # COCO 2017 dataset http://cocodataset.org # Train command: python train.py --data coco.yaml # Default dataset location is next to YOLOv5: -# /parent_folder -# /coco +# /parent +# /datasets/coco # /yolov5 -# download command/URL (optional) -download: bash data/scripts/get_coco.sh +# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..] +path: ../datasets/coco # dataset root dir +train: train2017.txt # train images (relative to 'path') 118287 images +val: val2017.txt # train images (relative to 'path') 5000 images +test: test-dev2017.txt # 20288 of 40670 images, submit to https://competitions.codalab.org/competitions/20794 -# train and val data as 1) directory: path/images/, 2) file: path/images.txt, or 3) list: [path1/images/, path2/images/] -train: ../coco/train2017.txt # 118287 images -val: ../coco/val2017.txt # 5000 images -test: ../coco/test-dev2017.txt # 20288 of 40670 images, submit to https://competitions.codalab.org/competitions/20794 - -# number of classes -nc: 80 - -# class names +# Classes +nc: 80 # number of classes names: [ 'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee', @@ -26,10 +22,22 @@ names: [ 'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', ' 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear', - 'hair drier', 'toothbrush' ] + 'hair drier', 'toothbrush' ] # class names + + +# Download script/URL (optional) +download: | + from utils.general import download, Path + + # Download labels + segments = False # segment or box labels + dir = Path(yaml['path']) # dataset root dir + url = 'https://github.com/ultralytics/yolov5/releases/download/v1.0/' + urls = [url + ('coco2017labels-segments.zip' if segments else 'coco2017labels.zip')] # labels + download(urls, dir=dir.parent) -# Print classes -# with open('data/coco.yaml') as f: -# d = yaml.safe_load(f) # dict -# for i, x in enumerate(d['names']): -# print(i, x) + # Download data + urls = ['http://images.cocodataset.org/zips/train2017.zip', # 19G, 118k images + 'http://images.cocodataset.org/zips/val2017.zip', # 1G, 5k images + 'http://images.cocodataset.org/zips/test2017.zip'] # 7G, 41k images (optional) + download(urls, dir=dir / 'images', threads=3) diff --git a/data/coco128.yaml b/data/coco128.yaml index 83fbc29d3404..e70ad687dd88 100644 --- a/data/coco128.yaml +++ b/data/coco128.yaml @@ -1,22 +1,19 @@ # COCO 2017 dataset http://cocodataset.org - first 128 training images # Train command: python train.py --data coco128.yaml # Default dataset location is next to YOLOv5: -# /parent_folder -# /coco128 +# /parent +# /datasets/coco128 # /yolov5 -# download command/URL (optional) -download: https://github.com/ultralytics/yolov5/releases/download/v1.0/coco128.zip +# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..] +path: ../datasets/coco128 # dataset root dir +train: images/train2017 # train images (relative to 'path') 128 images +val: images/train2017 # val images (relative to 'path') 128 images +test: # test images (optional) -# train and val data as 1) directory: path/images/, 2) file: path/images.txt, or 3) list: [path1/images/, path2/images/] -train: ../coco128/images/train2017/ # 128 images -val: ../coco128/images/train2017/ # 128 images - -# number of classes -nc: 80 - -# class names +# Classes +nc: 80 # number of classes names: [ 'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee', @@ -25,4 +22,8 @@ names: [ 'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', ' 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear', - 'hair drier', 'toothbrush' ] + 'hair drier', 'toothbrush' ] # class names + + +# Download script/URL (optional) +download: https://github.com/ultralytics/yolov5/releases/download/v1.0/coco128.zip \ No newline at end of file diff --git a/data/hyps/hyp.finetune.yaml b/data/hyps/hyp.finetune.yaml index 1b84cff95c2c..a77597741356 100644 --- a/data/hyps/hyp.finetune.yaml +++ b/data/hyps/hyp.finetune.yaml @@ -1,5 +1,5 @@ # Hyperparameters for VOC finetuning -# python train.py --batch 64 --weights yolov5m.pt --data voc.yaml --img 512 --epochs 50 +# python train.py --batch 64 --weights yolov5m.pt --data VOC.yaml --img 512 --epochs 50 # See tutorials for hyperparameter evolution https://github.com/ultralytics/yolov5#tutorials diff --git a/data/scripts/get_argoverse_hd.sh b/data/scripts/get_argoverse_hd.sh deleted file mode 100644 index 331509914568..000000000000 --- a/data/scripts/get_argoverse_hd.sh +++ /dev/null @@ -1,61 +0,0 @@ -#!/bin/bash -# Argoverse-HD dataset (ring-front-center camera) http://www.cs.cmu.edu/~mengtial/proj/streaming/ -# Download command: bash data/scripts/get_argoverse_hd.sh -# Train command: python train.py --data argoverse_hd.yaml -# Default dataset location is next to YOLOv5: -# /parent_folder -# /argoverse -# /yolov5 - -# Download/unzip images -d='../argoverse/' # unzip directory -mkdir $d -url=https://argoverse-hd.s3.us-east-2.amazonaws.com/ -f=Argoverse-HD-Full.zip -curl -L $url$f -o $f && unzip -q $f -d $d && rm $f &# download, unzip, remove in background -wait # finish background tasks - -cd ../argoverse/Argoverse-1.1/ -ln -s tracking images - -cd ../Argoverse-HD/annotations/ - -python3 - "$@" <train.txt -cat 2007_train.txt 2007_val.txt 2007_test.txt 2012_train.txt 2012_val.txt >train.all.txt - -mkdir ../VOC ../VOC/images ../VOC/images/train ../VOC/images/val -mkdir ../VOC/labels ../VOC/labels/train ../VOC/labels/val - -python3 - "$@" < 1: # model = nn.DataParallel(model) + # Data + with open(data) as f: + data = yaml.safe_load(f) + check_dataset(data) # check + # Half half &= device.type != 'cpu' # half precision only supported on CUDA if half: @@ -83,10 +88,6 @@ def run(data, # Configure model.eval() - if isinstance(data, str): - with open(data) as f: - data = yaml.safe_load(f) - check_dataset(data) # check is_coco = type(data['val']) is str and data['val'].endswith('coco/val2017.txt') # COCO dataset nc = 1 if single_cls else int(data['nc']) # number of classes iouv = torch.linspace(0.5, 0.95, 10).to(device) # iou vector for mAP@0.5:0.95 diff --git a/train.py b/train.py index ba84b432f660..6b04e8ff3a6a 100644 --- a/train.py +++ b/train.py @@ -453,7 +453,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary if not evolve: if is_coco: # COCO dataset for m in [last, best] if best.exists() else [last]: # speed, mAP tests - results, _, _ = test.run(data, + results, _, _ = test.run(data_dict, batch_size=batch_size // WORLD_SIZE * 2, imgsz=imgsz_test, conf_thres=0.001, diff --git a/tutorial.ipynb b/tutorial.ipynb index bcdbc014dfb4..d136803659fb 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -1255,7 +1255,7 @@ "source": [ "# VOC\n", "for b, m in zip([64, 48, 32, 16], ['yolov5s', 'yolov5m', 'yolov5l', 'yolov5x']): # zip(batch_size, model)\n", - " !python train.py --batch {b} --weights {m}.pt --data voc.yaml --epochs 50 --cache --img 512 --nosave --hyp hyp.finetune.yaml --project VOC --name {m}" + " !python train.py --batch {b} --weights {m}.pt --data VOC.yaml --epochs 50 --cache --img 512 --nosave --hyp hyp.finetune.yaml --project VOC --name {m}" ], "execution_count": null, "outputs": [] diff --git a/utils/general.py b/utils/general.py index e39f2ac09ca3..555975f07c5d 100755 --- a/utils/general.py +++ b/utils/general.py @@ -222,9 +222,14 @@ def check_file(file): def check_dataset(data, autodownload=True): # Download dataset if not found locally - val, s = data.get('val'), data.get('download') + path = Path(data.get('path', '')) # optional 'path' field + if path: + for k in 'train', 'val', 'test': + if data.get(k): # prepend path + data[k] = str(path / data[k]) if isinstance(data[k], str) else [str(path / x) for x in data[k]] + + train, val, test, s = [data.get(x) for x in ('train', 'val', 'test', 'download')] if val: - root = Path(val).parts[0] + os.sep # unzip directory i.e. '../' val = [Path(x).resolve() for x in (val if isinstance(val, list) else [val])] # val path if not all(x.exists() for x in val): print('\nWARNING: Dataset not found, nonexistent paths: %s' % [str(x) for x in val if not x.exists()]) @@ -233,12 +238,14 @@ def check_dataset(data, autodownload=True): f = Path(s).name # filename print(f'Downloading {s} ...') torch.hub.download_url_to_file(s, f) + root = path.parent if 'path' in data else '..' # unzip directory i.e. '../' + Path(root).mkdir(parents=True, exist_ok=True) # create root r = os.system(f'unzip -q {f} -d {root} && rm {f}') # unzip elif s.startswith('bash '): # bash script print(f'Running {s} ...') r = os.system(s) else: # python script - r = exec(s) # return None + r = exec(s, {'yaml': data}) # return None print('Dataset autodownload %s\n' % ('success' if r in (0, None) else 'failure')) # print result else: raise Exception('Dataset not found.') @@ -258,7 +265,7 @@ def download_one(url, dir): if unzip and f.suffix in ('.zip', '.gz'): print(f'Unzipping {f}...') if f.suffix == '.zip': - s = f'unzip -qo {f} -d {dir} && rm {f}' # unzip -quiet -overwrite + s = f'unzip -qo {f} -d {dir}' # unzip -quiet -overwrite elif f.suffix == '.gz': s = f'tar xfz {f} --directory {f.parent}' # unzip if delete: # delete zip file after unzip From 03281f8c7613ad808d7d356f0195152b2d46ab99 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 25 Jun 2021 01:55:53 +0200 Subject: [PATCH 0349/1976] COCO annotations JSON fix (#3764) --- test.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test.py b/test.py index 31d57221a3d5..643dc441e521 100644 --- a/test.py +++ b/test.py @@ -270,7 +270,7 @@ def run(data, # Save JSON if save_json and len(jdict): w = Path(weights[0] if isinstance(weights, list) else weights).stem if weights is not None else '' # weights - anno_json = '../coco/annotations/instances_val2017.json' # annotations json + anno_json = str(Path(data.get('path', '../coco')) / 'annotations/instances_val2017.json') # annotations json pred_json = str(save_dir / f"{w}_predictions.json") # predictions json print('\nEvaluating pycocotools mAP... saving %s...' % pred_json) with open(pred_json, 'w') as f: From 374957317a5469742b24291caa52dedfd9d31c99 Mon Sep 17 00:00:00 2001 From: Yonghye Kwon Date: Fri, 25 Jun 2021 18:47:46 +0900 Subject: [PATCH 0350/1976] Add `xyxy2xywhn()` (#3765) * Edit Comments for numpy2torch tensor process Edit Comments for numpy2torch tensor process * add xyxy2xywhn add xyxy2xywhn * add xyxy2xywhn * formatting * pass arguments pass arguments * edit comment for xyxy2xywhn() edit comment for xyxy2xywhn() * cleanup datasets.py Co-authored-by: Glenn Jocher --- utils/datasets.py | 14 ++++++-------- utils/general.py | 10 ++++++++++ 2 files changed, 16 insertions(+), 8 deletions(-) diff --git a/utils/datasets.py b/utils/datasets.py index d3714d745b88..eac0c7834308 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -23,8 +23,8 @@ from torch.utils.data import Dataset from tqdm import tqdm -from utils.general import check_requirements, check_file, check_dataset, xyxy2xywh, xywh2xyxy, xywhn2xyxy, xyn2xy, \ - segment2box, segments2boxes, resample_segments, clean_str +from utils.general import check_requirements, check_file, check_dataset, xywh2xyxy, xywhn2xyxy, xyxy2xywhn, \ + xyn2xy, segment2box, segments2boxes, resample_segments, clean_str from utils.torch_utils import torch_distributed_zero_first # Parameters @@ -192,7 +192,7 @@ def __next__(self): img = letterbox(img0, self.img_size, stride=self.stride)[0] # Convert - img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416 + img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB and HWC to CHW img = np.ascontiguousarray(img) return path, img, img0, self.cap @@ -255,7 +255,7 @@ def __next__(self): img = letterbox(img0, self.img_size, stride=self.stride)[0] # Convert - img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416 + img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB and HWC to CHW img = np.ascontiguousarray(img) return img_path, img, img0, None @@ -336,7 +336,7 @@ def __next__(self): img = np.stack(img, 0) # Convert - img = img[:, :, :, ::-1].transpose(0, 3, 1, 2) # BGR to RGB, to bsx3x416x416 + img = img[:, :, :, ::-1].transpose(0, 3, 1, 2) # BGR to RGB and BHWC to BCHW img = np.ascontiguousarray(img) return self.sources, img, img0, None @@ -552,9 +552,7 @@ def __getitem__(self, index): nL = len(labels) # number of labels if nL: - labels[:, 1:5] = xyxy2xywh(labels[:, 1:5]) # convert xyxy to xywh - labels[:, [2, 4]] /= img.shape[0] # normalized height 0-1 - labels[:, [1, 3]] /= img.shape[1] # normalized width 0-1 + labels[:, 1:5] = xyxy2xywhn(labels[:, 1:5], w=img.shape[1], h=img.shape[0]) # xyxy to xywh normalized if self.augment: # flip up-down diff --git a/utils/general.py b/utils/general.py index 555975f07c5d..6a5b42f374e6 100755 --- a/utils/general.py +++ b/utils/general.py @@ -393,6 +393,16 @@ def xywhn2xyxy(x, w=640, h=640, padw=0, padh=0): return y +def xyxy2xywhn(x, w=640, h=640): + # Convert nx4 boxes from [x1, y1, x2, y2] to [x, y, w, h] normalized where xy1=top-left, xy2=bottom-right + y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) + y[:, 0] = ((x[:, 0] + x[:, 2]) / 2) / w # x center + y[:, 1] = ((x[:, 1] + x[:, 3]) / 2) / h # y center + y[:, 2] = (x[:, 2] - x[:, 0]) / w # width + y[:, 3] = (x[:, 3] - x[:, 1]) / h # height + return y + + def xyn2xy(x, w=640, h=640, padw=0, padh=0): # Convert normalized segments into pixel segments, shape (n,2) y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) From f2d97ebb251e689f55879709179248d4acf8e2a3 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 25 Jun 2021 12:52:05 +0200 Subject: [PATCH 0351/1976] Remove DDP MultiHeadAttention fix (#3768) --- train.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/train.py b/train.py index 6b04e8ff3a6a..d4a1b48b5aa4 100644 --- a/train.py +++ b/train.py @@ -252,9 +252,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary # DDP mode if cuda and RANK != -1: - model = DDP(model, device_ids=[LOCAL_RANK], output_device=LOCAL_RANK, - # nn.MultiheadAttention incompatibility with DDP https://github.com/pytorch/pytorch/issues/26698 - find_unused_parameters=any(isinstance(layer, nn.MultiheadAttention) for layer in model.modules())) + model = DDP(model, device_ids=[LOCAL_RANK], output_device=LOCAL_RANK) # Model parameters hyp['box'] *= 3. / nl # scale to layers From 09246a5a331c05df230ab41d41f972bfa3f97d4c Mon Sep 17 00:00:00 2001 From: Piotr Skalski Date: Fri, 25 Jun 2021 16:16:18 +0200 Subject: [PATCH 0352/1976] fix/incorrect_fitness_import (#3770) --- train.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/train.py b/train.py index d4a1b48b5aa4..3390e838803a 100644 --- a/train.py +++ b/train.py @@ -38,13 +38,14 @@ from utils.autoanchor import check_anchors from utils.datasets import create_dataloader from utils.general import labels_to_class_weights, increment_path, labels_to_image_weights, init_seeds, \ - fitness, strip_optimizer, get_latest_run, check_dataset, check_file, check_git_status, check_img_size, \ + strip_optimizer, get_latest_run, check_dataset, check_file, check_git_status, check_img_size, \ check_requirements, print_mutation, set_logging, one_cycle, colorstr from utils.google_utils import attempt_download from utils.loss import ComputeLoss from utils.plots import plot_images, plot_labels, plot_results, plot_evolution from utils.torch_utils import ModelEMA, select_device, intersect_dicts, torch_distributed_zero_first, de_parallel from utils.wandb_logging.wandb_utils import WandbLogger, check_wandb_resume +from utils.metrics import fitness logger = logging.getLogger(__name__) LOCAL_RANK = int(os.getenv('LOCAL_RANK', -1)) # https://pytorch.org/docs/stable/elastic/run.html From ffb6e11050c1379de120af4e687d9623a0535b41 Mon Sep 17 00:00:00 2001 From: Ayush Chaurasia Date: Sat, 26 Jun 2021 00:15:33 +0530 Subject: [PATCH 0353/1976] W&B: Update Tables API and comply with new dataset_check (#3772) * Update tables API and windows path fix * update dataset check --- utils/wandb_logging/wandb_utils.py | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) diff --git a/utils/wandb_logging/wandb_utils.py b/utils/wandb_logging/wandb_utils.py index d82633c7e2f6..f031a819b977 100644 --- a/utils/wandb_logging/wandb_utils.py +++ b/utils/wandb_logging/wandb_utils.py @@ -136,7 +136,6 @@ def __init__(self, opt, name, run_id, data_dict, job_type='Training'): def check_and_upload_dataset(self, opt): assert wandb, 'Install wandb to upload dataset' - check_dataset(self.data_dict) config_path = self.log_dataset_artifact(check_file(opt.data), opt.single_cls, 'YOLOv5' if opt.project == 'runs/train' else Path(opt.project).stem) @@ -171,9 +170,11 @@ def setup_training(self, opt, data_dict): data_dict['val'] = str(val_path) self.val_table = self.val_artifact.get("val") self.map_val_table_path() + wandb.log({"validation dataset": self.val_table}) + if self.val_artifact is not None: self.result_artifact = wandb.Artifact("run_" + wandb.run.id + "_progress", "evaluation") - self.result_table = wandb.Table(["epoch", "id", "prediction", "avg_confidence"]) + self.result_table = wandb.Table(["epoch", "id", "ground truth", "prediction", "avg_confidence"]) if opt.bbox_interval == -1: self.bbox_interval = opt.bbox_interval = (opt.epochs // 10) if opt.epochs > 10 else 1 return data_dict @@ -181,7 +182,7 @@ def setup_training(self, opt, data_dict): def download_dataset_artifact(self, path, alias): if isinstance(path, str) and path.startswith(WANDB_ARTIFACT_PREFIX): artifact_path = Path(remove_prefix(path, WANDB_ARTIFACT_PREFIX) + ":" + alias) - dataset_artifact = wandb.use_artifact(artifact_path.as_posix()) + dataset_artifact = wandb.use_artifact(artifact_path.as_posix().replace("\\","/")) assert dataset_artifact is not None, "'Error: W&B dataset artifact doesn\'t exist'" datadir = dataset_artifact.download() return datadir, dataset_artifact @@ -216,6 +217,7 @@ def log_model(self, path, opt, epoch, fitness_score, best_model=False): def log_dataset_artifact(self, data_file, single_cls, project, overwrite_config=False): with open(data_file) as f: data = yaml.safe_load(f) # data dict + check_dataset(data) nc, names = (1, ['item']) if single_cls else (int(data['nc']), data['names']) names = {k: v for k, v in enumerate(names)} # to index dictionary self.train_artifact = self.create_dataset_table(LoadImagesAndLabels( @@ -228,6 +230,7 @@ def log_dataset_artifact(self, data_file, single_cls, project, overwrite_config= data['val'] = WANDB_ARTIFACT_PREFIX + str(Path(project) / 'val') path = data_file if overwrite_config else '_wandb.'.join(data_file.rsplit('.', 1)) # updated data.yaml path data.pop('download', None) + data.pop('path', None) with open(path, 'w') as f: yaml.safe_dump(data, f) @@ -297,6 +300,7 @@ def log_training_progress(self, predn, path, names): id = self.val_table_map[Path(path).name] self.result_table.add_data(self.current_epoch, id, + self.val_table.data[id][1], wandb.Image(self.val_table.data[id][1], boxes=boxes, classes=class_set), total_conf / max(1, len(box_data)) ) @@ -312,11 +316,12 @@ def end_epoch(self, best_result=False): wandb.log(self.log_dict) self.log_dict = {} if self.result_artifact: - train_results = wandb.JoinedTable(self.val_table, self.result_table, "id") - self.result_artifact.add(train_results, 'result') + self.result_artifact.add(self.result_table, 'result') wandb.log_artifact(self.result_artifact, aliases=['latest', 'last', 'epoch ' + str(self.current_epoch), ('best' if best_result else '')]) - self.result_table = wandb.Table(["epoch", "id", "prediction", "avg_confidence"]) + + wandb.log({"evaluation": self.result_table}) + self.result_table = wandb.Table(["epoch", "id", "ground truth", "prediction", "avg_confidence"]) self.result_artifact = wandb.Artifact("run_" + wandb.run.id + "_progress", "evaluation") def finish_run(self): From f89941711cc9b59f35f8991e6324a0ee80aad07e Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 26 Jun 2021 00:49:05 +0200 Subject: [PATCH 0354/1976] NGA xView 2018 Dataset Auto-Download (#3775) * update clip_coords for numpy * uncomment * cleanup * Add autosplits * fix * cleanup --- data/xView.yaml | 101 ++++++++++++++++++++++++++++++++++++++++++++++ utils/datasets.py | 21 +++++----- utils/general.py | 18 ++++++--- 3 files changed, 125 insertions(+), 15 deletions(-) create mode 100644 data/xView.yaml diff --git a/data/xView.yaml b/data/xView.yaml new file mode 100644 index 000000000000..5212193a0bf0 --- /dev/null +++ b/data/xView.yaml @@ -0,0 +1,101 @@ +# xView 2018 dataset https://challenge.xviewdataset.org +# ----> NOTE: DOWNLOAD DATA MANUALLY from URL above and unzip to /datasets/xView before running train command below +# Train command: python train.py --data xView.yaml +# Default dataset location is next to YOLOv5: +# /parent +# /datasets/xView +# /yolov5 + + +# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..] +path: ../datasets/xView # dataset root dir +train: images/autosplit_train.txt # train images (relative to 'path') 90% of 847 train images +val: images/autosplit_val.txt # train images (relative to 'path') 10% of 847 train images + +# Classes +nc: 60 # number of classes +names: [ 'Fixed-wing Aircraft', 'Small Aircraft', 'Cargo Plane', 'Helicopter', 'Passenger Vehicle', 'Small Car', 'Bus', + 'Pickup Truck', 'Utility Truck', 'Truck', 'Cargo Truck', 'Truck w/Box', 'Truck Tractor', 'Trailer', + 'Truck w/Flatbed', 'Truck w/Liquid', 'Crane Truck', 'Railway Vehicle', 'Passenger Car', 'Cargo Car', + 'Flat Car', 'Tank car', 'Locomotive', 'Maritime Vessel', 'Motorboat', 'Sailboat', 'Tugboat', 'Barge', + 'Fishing Vessel', 'Ferry', 'Yacht', 'Container Ship', 'Oil Tanker', 'Engineering Vehicle', 'Tower crane', + 'Container Crane', 'Reach Stacker', 'Straddle Carrier', 'Mobile Crane', 'Dump Truck', 'Haul Truck', + 'Scraper/Tractor', 'Front loader/Bulldozer', 'Excavator', 'Cement Mixer', 'Ground Grader', 'Hut/Tent', 'Shed', + 'Building', 'Aircraft Hangar', 'Damaged Building', 'Facility', 'Construction Site', 'Vehicle Lot', 'Helipad', + 'Storage Tank', 'Shipping container lot', 'Shipping Container', 'Pylon', 'Tower' ] # class names + + +# Download script/URL (optional) --------------------------------------------------------------------------------------- +download: | + import json + import os + from pathlib import Path + + import numpy as np + from PIL import Image + from tqdm import tqdm + + from utils.datasets import autosplit + from utils.general import download, xyxy2xywhn + + + def convert_labels(fname=Path('xView/xView_train.geojson')): + # Convert xView geoJSON labels to YOLO format + path = fname.parent + with open(fname) as f: + print(f'Loading {fname}...') + data = json.load(f) + + # Make dirs + labels = Path(path / 'labels' / 'train') + os.system(f'rm -rf {labels}') + labels.mkdir(parents=True, exist_ok=True) + + # xView classes 11-94 to 0-59 + xview_class2index = [-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 0, 1, 2, -1, 3, -1, 4, 5, 6, 7, 8, -1, 9, 10, 11, + 12, 13, 14, 15, -1, -1, 16, 17, 18, 19, 20, 21, 22, -1, 23, 24, 25, -1, 26, 27, -1, 28, -1, + 29, 30, 31, 32, 33, 34, 35, 36, 37, -1, 38, 39, 40, 41, 42, 43, 44, 45, -1, -1, -1, -1, 46, + 47, 48, 49, -1, 50, 51, -1, 52, -1, -1, -1, 53, 54, -1, 55, -1, -1, 56, -1, 57, -1, 58, 59] + + shapes = {} + for feature in tqdm(data['features'], desc=f'Converting {fname}'): + p = feature['properties'] + if p['bounds_imcoords']: + id = p['image_id'] + file = path / 'train_images' / id + if file.exists(): # 1395.tif missing + try: + box = np.array([int(num) for num in p['bounds_imcoords'].split(",")]) + assert box.shape[0] == 4, f'incorrect box shape {box.shape[0]}' + cls = p['type_id'] + cls = xview_class2index[int(cls)] # xView class to 0-60 + assert 59 >= cls >= 0, f'incorrect class index {cls}' + + # Write YOLO label + if id not in shapes: + shapes[id] = Image.open(file).size + box = xyxy2xywhn(box[None].astype(np.float), w=shapes[id][0], h=shapes[id][1], clip=True) + with open((labels / id).with_suffix('.txt'), 'a') as f: + f.write(f"{cls} {' '.join(f'{x:.6f}' for x in box[0])}\n") # write label.txt + except Exception as e: + print(f'WARNING: skipping one label for {file}: {e}') + + + # Download manually from https://challenge.xviewdataset.org + dir = Path(yaml['path']) # dataset root dir + # urls = ['https://d307kc0mrhucc3.cloudfront.net/train_labels.zip', # train labels + # 'https://d307kc0mrhucc3.cloudfront.net/train_images.zip', # 15G, 847 train images + # 'https://d307kc0mrhucc3.cloudfront.net/val_images.zip'] # 5G, 282 val images (no labels) + # download(urls, dir=dir, delete=False) + + # Convert labels + convert_labels(dir / 'xView_train.geojson') + + # Move images + images = Path(dir / 'images') + images.mkdir(parents=True, exist_ok=True) + Path(dir / 'train_images').rename(dir / 'images' / 'train') + Path(dir / 'val_images').rename(dir / 'images' / 'val') + + # Split + autosplit(dir / 'images' / 'train') diff --git a/utils/datasets.py b/utils/datasets.py index eac0c7834308..4658dc524be0 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -985,7 +985,7 @@ def create_folder(path='./new'): os.makedirs(path) # make new output folder -def flatten_recursive(path='../coco128'): +def flatten_recursive(path='../datasets/coco128'): # Flatten a recursive directory by bringing all files to top level new_path = Path(path + '_flat') create_folder(new_path) @@ -993,7 +993,7 @@ def flatten_recursive(path='../coco128'): shutil.copyfile(file, new_path / Path(file).name) -def extract_boxes(path='../coco128/'): # from utils.datasets import *; extract_boxes('../coco128') +def extract_boxes(path='../datasets/coco128'): # from utils.datasets import *; extract_boxes() # Convert detection dataset into classification dataset, with one directory per class path = Path(path) # images dir @@ -1028,27 +1028,28 @@ def extract_boxes(path='../coco128/'): # from utils.datasets import *; extract_ assert cv2.imwrite(str(f), im[b[1]:b[3], b[0]:b[2]]), f'box failure in {f}' -def autosplit(path='../coco128', weights=(0.9, 0.1, 0.0), annotated_only=False): +def autosplit(path='../datasets/coco128/images', weights=(0.9, 0.1, 0.0), annotated_only=False): """ Autosplit a dataset into train/val/test splits and save path/autosplit_*.txt files - Usage: from utils.datasets import *; autosplit('../coco128') + Usage: from utils.datasets import *; autosplit() Arguments - path: Path to images directory - weights: Train, val, test weights (list) - annotated_only: Only use images with an annotated txt file + path: Path to images directory + weights: Train, val, test weights (list, tuple) + annotated_only: Only use images with an annotated txt file """ path = Path(path) # images dir files = sum([list(path.rglob(f"*.{img_ext}")) for img_ext in img_formats], []) # image files only n = len(files) # number of files + random.seed(0) # for reproducibility indices = random.choices([0, 1, 2], weights=weights, k=n) # assign each image to a split txt = ['autosplit_train.txt', 'autosplit_val.txt', 'autosplit_test.txt'] # 3 txt files - [(path / x).unlink() for x in txt if (path / x).exists()] # remove existing + [(path.parent / x).unlink(missing_ok=True) for x in txt] # remove existing print(f'Autosplitting images from {path}' + ', using *.txt labeled images only' * annotated_only) for i, img in tqdm(zip(indices, files), total=n): if not annotated_only or Path(img2label_paths([str(img)])[0]).exists(): # check label - with open(path / txt[i], 'a') as f: - f.write(str(img) + '\n') # add image to txt file + with open(path.parent / txt[i], 'a') as f: + f.write('./' + img.relative_to(path.parent).as_posix() + '\n') # add image to txt file def verify_image_label(args): diff --git a/utils/general.py b/utils/general.py index 6a5b42f374e6..83eb95744678 100755 --- a/utils/general.py +++ b/utils/general.py @@ -393,8 +393,10 @@ def xywhn2xyxy(x, w=640, h=640, padw=0, padh=0): return y -def xyxy2xywhn(x, w=640, h=640): +def xyxy2xywhn(x, w=640, h=640, clip=False): # Convert nx4 boxes from [x1, y1, x2, y2] to [x, y, w, h] normalized where xy1=top-left, xy2=bottom-right + if clip: + clip_coords(x, (h, w)) # warning: inplace clip y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) y[:, 0] = ((x[:, 0] + x[:, 2]) / 2) / w # x center y[:, 1] = ((x[:, 1] + x[:, 3]) / 2) / h # y center @@ -455,10 +457,16 @@ def scale_coords(img1_shape, coords, img0_shape, ratio_pad=None): def clip_coords(boxes, img_shape): # Clip bounding xyxy bounding boxes to image shape (height, width) - boxes[:, 0].clamp_(0, img_shape[1]) # x1 - boxes[:, 1].clamp_(0, img_shape[0]) # y1 - boxes[:, 2].clamp_(0, img_shape[1]) # x2 - boxes[:, 3].clamp_(0, img_shape[0]) # y2 + if isinstance(boxes, torch.Tensor): + boxes[:, 0].clamp_(0, img_shape[1]) # x1 + boxes[:, 1].clamp_(0, img_shape[0]) # y1 + boxes[:, 2].clamp_(0, img_shape[1]) # x2 + boxes[:, 3].clamp_(0, img_shape[0]) # y2 + else: # np.array + boxes[:, 0].clip(0, img_shape[1], out=boxes[:, 0]) # x1 + boxes[:, 1].clip(0, img_shape[0], out=boxes[:, 1]) # y1 + boxes[:, 2].clip(0, img_shape[1], out=boxes[:, 2]) # x2 + boxes[:, 3].clip(0, img_shape[0], out=boxes[:, 3]) # y2 def bbox_iou(box1, box2, x1y1x2y2=True, GIoU=False, DIoU=False, CIoU=False, eps=1e-7): From 9dc5d35fce4c768427e790f20d4b425ecad15d08 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 26 Jun 2021 13:05:46 +0200 Subject: [PATCH 0355/1976] Update README.md fix banner width (#3785) --- README.md | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/README.md b/README.md index 82d408ef5cde..c26b3c264771 100755 --- a/README.md +++ b/README.md @@ -178,11 +178,8 @@ Get started in seconds with our verified environments and integrations, includin We are super excited about our first-ever Ultralytics YOLOv5 🚀 EXPORT Competition with **$10,000** in cash prizes! -
- - - -
+

+

##
Why YOLOv5
From 157aa2f88696061348d60a1d5019223a2126e258 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 26 Jun 2021 14:45:53 +0200 Subject: [PATCH 0356/1976] Objectness IoU Sort (#3610) Co-authored-by: U-LAPTOP-5N89P8V7\banhu --- utils/loss.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/utils/loss.py b/utils/loss.py index 9e78df17fdf3..576c1c79e6f8 100644 --- a/utils/loss.py +++ b/utils/loss.py @@ -133,7 +133,10 @@ def __call__(self, p, targets): # predictions, targets, model lbox += (1.0 - iou).mean() # iou loss # Objectness - tobj[b, a, gj, gi] = (1.0 - self.gr) + self.gr * iou.detach().clamp(0).type(tobj.dtype) # iou ratio + score_iou = iou.detach().clamp(0).type(tobj.dtype) + sort_id = torch.argsort(score_iou) + b, a, gj, gi, score_iou = b[sort_id], a[sort_id], gj[sort_id], gi[sort_id], score_iou[sort_id] + tobj[b, a, gj, gi] = (1.0 - self.gr) + self.gr * score_iou # iou ratio # Classification if self.nc > 1: # cls loss (only if multiple classes) From 8035b61682cba7b10be24b0ab35cc0295f14d6cd Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 26 Jun 2021 14:52:18 +0200 Subject: [PATCH 0357/1976] Update objectness IoU sort (#3786) --- utils/loss.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/utils/loss.py b/utils/loss.py index 576c1c79e6f8..d4c261a5cc97 100644 --- a/utils/loss.py +++ b/utils/loss.py @@ -89,6 +89,7 @@ class ComputeLoss: # Compute losses def __init__(self, model, autobalance=False): super(ComputeLoss, self).__init__() + self.sort_obj_iou = False device = next(model.parameters()).device # get model device h = model.hyp # hyperparameters @@ -134,8 +135,9 @@ def __call__(self, p, targets): # predictions, targets, model # Objectness score_iou = iou.detach().clamp(0).type(tobj.dtype) - sort_id = torch.argsort(score_iou) - b, a, gj, gi, score_iou = b[sort_id], a[sort_id], gj[sort_id], gi[sort_id], score_iou[sort_id] + if self.sort_obj_iou: + sort_id = torch.argsort(score_iou) + b, a, gj, gi, score_iou = b[sort_id], a[sort_id], gj[sort_id], gi[sort_id], score_iou[sort_id] tobj[b, a, gj, gi] = (1.0 - self.gr) + self.gr * score_iou # iou ratio # Classification From bd581b330bb33136653613e8c97d3478f0beaf1c Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 26 Jun 2021 15:13:06 +0200 Subject: [PATCH 0358/1976] Create hyp.scratch-p6.yaml (#3787) --- data/hyps/hyp.scratch-p6.yaml | 33 +++++++++++++++++++++++++++++++++ 1 file changed, 33 insertions(+) create mode 100644 data/hyps/hyp.scratch-p6.yaml diff --git a/data/hyps/hyp.scratch-p6.yaml b/data/hyps/hyp.scratch-p6.yaml new file mode 100644 index 000000000000..faf565423968 --- /dev/null +++ b/data/hyps/hyp.scratch-p6.yaml @@ -0,0 +1,33 @@ +# Hyperparameters for COCO training from scratch +# python train.py --batch 32 --cfg yolov5m6.yaml --weights '' --data coco.yaml --img 1280 --epochs 300 +# See tutorials for hyperparameter evolution https://github.com/ultralytics/yolov5#tutorials + + +lr0: 0.01 # initial learning rate (SGD=1E-2, Adam=1E-3) +lrf: 0.2 # final OneCycleLR learning rate (lr0 * lrf) +momentum: 0.937 # SGD momentum/Adam beta1 +weight_decay: 0.0005 # optimizer weight decay 5e-4 +warmup_epochs: 3.0 # warmup epochs (fractions ok) +warmup_momentum: 0.8 # warmup initial momentum +warmup_bias_lr: 0.1 # warmup initial bias lr +box: 0.05 # box loss gain +cls: 0.3 # cls loss gain +cls_pw: 1.0 # cls BCELoss positive_weight +obj: 0.7 # obj loss gain (scale with pixels) +obj_pw: 1.0 # obj BCELoss positive_weight +iou_t: 0.20 # IoU training threshold +anchor_t: 4.0 # anchor-multiple threshold +# anchors: 3 # anchors per output layer (0 to ignore) +fl_gamma: 0.0 # focal loss gamma (efficientDet default gamma=1.5) +hsv_h: 0.015 # image HSV-Hue augmentation (fraction) +hsv_s: 0.7 # image HSV-Saturation augmentation (fraction) +hsv_v: 0.4 # image HSV-Value augmentation (fraction) +degrees: 0.0 # image rotation (+/- deg) +translate: 0.1 # image translation (+/- fraction) +scale: 0.9 # image scale (+/- gain) +shear: 0.0 # image shear (+/- deg) +perspective: 0.0 # image perspective (+/- fraction), range 0-0.001 +flipud: 0.0 # image flip up-down (probability) +fliplr: 0.5 # image flip left-right (probability) +mosaic: 1.0 # image mosaic (probability) +mixup: 0.0 # image mixup (probability) From 8e7f285051ed394acaf561767306237a41f0642d Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 26 Jun 2021 15:29:16 +0200 Subject: [PATCH 0359/1976] Fix datasets for aws and get_coco.sh (#3788) * merge master * Update get_coco.sh --- data/scripts/get_coco.sh | 4 ++-- utils/aws/userdata.sh | 1 - 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/data/scripts/get_coco.sh b/data/scripts/get_coco.sh index caae37504780..bce692c29ae2 100755 --- a/data/scripts/get_coco.sh +++ b/data/scripts/get_coco.sh @@ -8,14 +8,14 @@ # /yolov5 # Download/unzip labels -d='../' # unzip directory +d='../datasets' # unzip directory url=https://github.com/ultralytics/yolov5/releases/download/v1.0/ f='coco2017labels.zip' # or 'coco2017labels-segments.zip', 68 MB echo 'Downloading' $url$f ' ...' curl -L $url$f -o $f && unzip -q $f -d $d && rm $f & # download, unzip, remove in background # Download/unzip images -d='../coco/images' # unzip directory +d='../datasets/coco/images' # unzip directory url=http://images.cocodataset.org/zips/ f1='train2017.zip' # 19G, 118k images f2='val2017.zip' # 1G, 5k images diff --git a/utils/aws/userdata.sh b/utils/aws/userdata.sh index 52c0fe33d90f..5fc1332ac1b0 100644 --- a/utils/aws/userdata.sh +++ b/utils/aws/userdata.sh @@ -10,7 +10,6 @@ if [ ! -d yolov5 ]; then git clone https://github.com/ultralytics/yolov5 -b master && sudo chmod -R 777 yolov5 cd yolov5 bash data/scripts/get_coco.sh && echo "COCO done." & - bash data/scripts/get_voc.sh && echo "VOC done." & sudo docker pull ultralytics/yolov5:latest && echo "Docker done." & python -m pip install --upgrade pip && pip install -r requirements.txt && python detect.py && echo "Requirements done." & wait && echo "All tasks done." # finish background tasks From 92d49fde354126405b866c69b055fb9156115d55 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 26 Jun 2021 15:42:40 +0200 Subject: [PATCH 0360/1976] Update seeds for single-GPU reproducibility (#3789) For seed=0 on single-GPU. --- train.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/train.py b/train.py index 3390e838803a..9ac12b12aacf 100644 --- a/train.py +++ b/train.py @@ -84,7 +84,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary # Configure plots = not evolve # create plots cuda = device.type != 'cpu' - init_seeds(2 + RANK) + init_seeds(1 + RANK) with open(data) as f: data_dict = yaml.safe_load(f) # data dict From 07166ba38cd8b15a48bad33c8f4455236304eb18 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 26 Jun 2021 16:09:56 +0200 Subject: [PATCH 0361/1976] Update Usage examples (#3790) --- Dockerfile | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/Dockerfile b/Dockerfile index d32e3960046b..eca690003fbd 100644 --- a/Dockerfile +++ b/Dockerfile @@ -22,17 +22,16 @@ COPY . /usr/src/app ENV HOME=/usr/src/app -# --------------------------------------------------- Extras Below --------------------------------------------------- +# Usage Examples ------------------------------------------------------------------------------------------------------- # Build and Push # t=ultralytics/yolov5:latest && sudo docker build -t $t . && sudo docker push $t -# for v in {300..303}; do t=ultralytics/coco:v$v && sudo docker build -t $t . && sudo docker push $t; done # Pull and Run # t=ultralytics/yolov5:latest && sudo docker pull $t && sudo docker run -it --ipc=host --gpus all $t # Pull and Run with local directory access -# t=ultralytics/yolov5:latest && sudo docker pull $t && sudo docker run -it --ipc=host --gpus all -v "$(pwd)"/coco:/usr/src/coco $t +# t=ultralytics/yolov5:latest && sudo docker pull $t && sudo docker run -it --ipc=host --gpus all -v "$(pwd)"/datasets:/usr/src/datasets $t # Kill all # sudo docker kill $(sudo docker ps -q) From 96c87f1711e3baa396ea4ec9ca6d390fd76013bb Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 26 Jun 2021 16:28:06 +0200 Subject: [PATCH 0362/1976] nvcr.io/nvidia/pytorch:21.06-py3 (#3791) --- Dockerfile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Dockerfile b/Dockerfile index eca690003fbd..4754801b5b56 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,5 +1,5 @@ # Start FROM Nvidia PyTorch image https://ngc.nvidia.com/catalog/containers/nvidia:pytorch -FROM nvcr.io/nvidia/pytorch:21.05-py3 +FROM nvcr.io/nvidia/pytorch:21.06-py3 # Install linux packages RUN apt update && apt install -y zip htop screen libgl1-mesa-glx @@ -9,7 +9,7 @@ COPY requirements.txt . RUN python -m pip install --upgrade pip RUN pip uninstall -y nvidia-tensorboard nvidia-tensorboard-plugin-dlprof RUN pip install --no-cache -r requirements.txt coremltools onnx gsutil notebook -RUN pip install --no-cache -U torch torchvision +RUN pip install --no-cache torch==1.9.0+cu111 torchvision==0.10.0+cu111 -f https://download.pytorch.org/whl/torch_stable.html # Create working directory RUN mkdir -p /usr/src/app From 5e6886c8605cf9f9687866b6ef766aabbdacf580 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 26 Jun 2021 16:51:17 +0200 Subject: [PATCH 0363/1976] Update Dockerfile (#3792) --- Dockerfile | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/Dockerfile b/Dockerfile index 4754801b5b56..a5b9da06a3d5 100644 --- a/Dockerfile +++ b/Dockerfile @@ -9,7 +9,8 @@ COPY requirements.txt . RUN python -m pip install --upgrade pip RUN pip uninstall -y nvidia-tensorboard nvidia-tensorboard-plugin-dlprof RUN pip install --no-cache -r requirements.txt coremltools onnx gsutil notebook -RUN pip install --no-cache torch==1.9.0+cu111 torchvision==0.10.0+cu111 -f https://download.pytorch.org/whl/torch_stable.html +RUN pip install --no-cache -U torch torchvision numpy +# RUN pip install --no-cache torch==1.9.0+cu111 torchvision==0.10.0+cu111 -f https://download.pytorch.org/whl/torch_stable.html # Create working directory RUN mkdir -p /usr/src/app From 47543f97b1e5602600529aef48dcdad878cbb73e Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 26 Jun 2021 17:12:43 +0200 Subject: [PATCH 0364/1976] FROM nvcr.io/nvidia/pytorch:21.05-py3 (#3794) --- Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Dockerfile b/Dockerfile index a5b9da06a3d5..e22c1106f23d 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,5 +1,5 @@ # Start FROM Nvidia PyTorch image https://ngc.nvidia.com/catalog/containers/nvidia:pytorch -FROM nvcr.io/nvidia/pytorch:21.06-py3 +FROM nvcr.io/nvidia/pytorch:21.05-py3 # Install linux packages RUN apt update && apt install -y zip htop screen libgl1-mesa-glx From 5e976a274d2458869ae9743a93d5c2f67d6fa79a Mon Sep 17 00:00:00 2001 From: batrlatom Date: Sun, 27 Jun 2021 15:28:50 +0200 Subject: [PATCH 0365/1976] Fix competition link (#3799) * link to the competition repaired * Update README.md Co-authored-by: Glenn Jocher --- README.md | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index c26b3c264771..360afd2cd7e6 100755 --- a/README.md +++ b/README.md @@ -178,8 +178,10 @@ Get started in seconds with our verified environments and integrations, includin We are super excited about our first-ever Ultralytics YOLOv5 🚀 EXPORT Competition with **$10,000** in cash prizes! -

-

+

+ + +

##
Why YOLOv5
From 3974d725b60308b46298b5f0a4cc4bb2c4b7807b Mon Sep 17 00:00:00 2001 From: yellowdolphin <42343818+yellowdolphin@users.noreply.github.com> Date: Mon, 28 Jun 2021 12:25:13 +0200 Subject: [PATCH 0366/1976] Fix warmup `accumulate` (#3722) * gradient accumulation during warmup in train.py Context: `accumulate` is the number of batches/gradients accumulated before calling the next optimizer.step(). During warmup, it is ramped up from 1 to the final value nbs / batch_size. Although I have not seen this in other libraries, I like the idea. During warmup, as grads are large, too large steps are more of on issue than gradient noise due to small steps. The bug: The condition to perform the opt step is wrong > if ni % accumulate == 0: This produces irregular step sizes if `accumulate` is not constant. It becomes relevant when batch_size is small and `accumulate` changes many times during warmup. This demo also shows the proposed solution, to use a ">=" condition instead: https://colab.research.google.com/drive/1MA2z2eCXYB_BC5UZqgXueqL_y1Tz_XVq?usp=sharing Further, I propose not to restrict the number of warmup iterations to >= 1000. If the user changes hyp['warmup_epochs'], this causes unexpected behavior. Also, it makes evolution unstable if this parameter was to be optimized. * replace last_opt_step tracking by do_step(ni) * add docstrings * move down nw * Update train.py * revert math import move Co-authored-by: Glenn Jocher --- train.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/train.py b/train.py index 9ac12b12aacf..257be065f641 100644 --- a/train.py +++ b/train.py @@ -270,6 +270,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary t0 = time.time() nw = max(round(hyp['warmup_epochs'] * nb), 1000) # number of warmup iterations, max(3 epochs, 1k iterations) # nw = min(nw, (epochs - start_epoch) / 2 * nb) # limit warmup to < 1/2 of training + last_opt_step = -1 maps = np.zeros(nc) # mAP per class results = (0, 0, 0, 0, 0, 0, 0) # P, R, mAP@.5, mAP@.5-.95, val_loss(box, obj, cls) scheduler.last_epoch = start_epoch - 1 # do not move @@ -344,12 +345,13 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary scaler.scale(loss).backward() # Optimize - if ni % accumulate == 0: + if ni - last_opt_step >= accumulate: scaler.step(optimizer) # optimizer.step scaler.update() optimizer.zero_grad() if ema: ema.update(model) + last_opt_step = ni # Print if RANK in [-1, 0]: From 20d45aa4f18d7df7148fc6dd69a9c0607003f004 Mon Sep 17 00:00:00 2001 From: Zigarss <32835472+Zigars@users.noreply.github.com> Date: Mon, 28 Jun 2021 19:18:45 +0800 Subject: [PATCH 0367/1976] Add feature map visualization (#3804) * Add feature map visualization Add a feature_visualization function to visualize the mid feature map of the model. * Update yolo.py * remove boolean from forward and reorder if statement * remove print from forward * General cleanup * Indent * Update plots.py Co-authored-by: Glenn Jocher --- models/yolo.py | 6 +++++- utils/plots.py | 30 ++++++++++++++++++++++++++++-- 2 files changed, 33 insertions(+), 3 deletions(-) diff --git a/models/yolo.py b/models/yolo.py index 4a2514edd295..4c9456edd687 100644 --- a/models/yolo.py +++ b/models/yolo.py @@ -17,6 +17,7 @@ from models.experimental import * from utils.autoanchor import check_anchor_order from utils.general import make_divisible, check_file, set_logging +from utils.plots import feature_visualization from utils.torch_utils import time_synchronized, fuse_conv_and_bn, model_info, scale_img, initialize_weights, \ select_device, copy_attr @@ -135,7 +136,7 @@ def forward_augment(self, x): y.append(yi) return torch.cat(y, 1), None # augmented inference, train - def forward_once(self, x, profile=False): + def forward_once(self, x, profile=False, feature_vis=False): y, dt = [], [] # outputs for m in self.model: if m.f != -1: # if not from previous layer @@ -153,6 +154,9 @@ def forward_once(self, x, profile=False): x = m(x) # run y.append(x if m.i in self.save else None) # save output + + if feature_vis and m.type == 'models.common.SPP': + feature_visualization(x, m.type, m.i) if profile: logger.info('%.1fms total' % sum(dt)) diff --git a/utils/plots.py b/utils/plots.py index 66a30918190e..36386371dbec 100644 --- a/utils/plots.py +++ b/utils/plots.py @@ -15,8 +15,9 @@ import torch import yaml from PIL import Image, ImageDraw, ImageFont +from torchvision import transforms -from utils.general import xywh2xyxy, xyxy2xywh +from utils.general import increment_path, xywh2xyxy, xyxy2xywh from utils.metrics import fitness # Settings @@ -299,7 +300,7 @@ def plot_labels(labels, names=(), save_dir=Path(''), loggers=None): matplotlib.use('svg') # faster ax = plt.subplots(2, 2, figsize=(8, 8), tight_layout=True)[1].ravel() y = ax[0].hist(c, bins=np.linspace(0, nc, nc + 1) - 0.5, rwidth=0.8) - # [y[2].patches[i].set_color([x / 255 for x in colors(i)]) for i in range(nc)] # update colors bug #3195 + # [y[2].patches[i].set_color([x / 255 for x in colors(i)]) for i in range(nc)] # update colors bug #3195 ax[0].set_ylabel('instances') if 0 < len(names) < 30: ax[0].set_xticks(range(len(names))) @@ -445,3 +446,28 @@ def plot_results(start=0, stop=0, bucket='', id=(), labels=(), save_dir=''): ax[1].legend() fig.savefig(Path(save_dir) / 'results.png', dpi=200) + + +def feature_visualization(features, module_type, module_idx, n=64): + """ + features: Features to be visualized + module_type: Module type + module_idx: Module layer index within model + n: Maximum number of feature maps to plot + """ + project, name = 'runs/features', 'exp' + save_dir = increment_path(Path(project) / name) # increment run + save_dir.mkdir(parents=True, exist_ok=True) # make dir + + plt.figure(tight_layout=True) + blocks = torch.chunk(features, features.shape[1], dim=1) # block by channel dimension + n = min(n, len(blocks)) + for i in range(n): + feature = transforms.ToPILImage()(blocks[i].squeeze()) + ax = plt.subplot(int(math.sqrt(n)), int(math.sqrt(n)), i + 1) + ax.axis('off') + plt.imshow(feature) # cmap='gray' + + f = f"layer_{module_idx}_{module_type.split('.')[-1]}_features.png" + print(f'Saving {save_dir / f}...') + plt.savefig(save_dir / f, dpi=300) From 02719dde52a99f28603be383334028a7ab9f1e06 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 28 Jun 2021 13:48:14 +0200 Subject: [PATCH 0368/1976] Update `feature_visualization()` (#3807) * Update `feature_visualization()` Only plot for data with height, width > 1 * cleanup * Cleanup --- utils/plots.py | 40 +++++++++++++++++++++------------------- 1 file changed, 21 insertions(+), 19 deletions(-) diff --git a/utils/plots.py b/utils/plots.py index 36386371dbec..4b6c63992ac7 100644 --- a/utils/plots.py +++ b/utils/plots.py @@ -448,26 +448,28 @@ def plot_results(start=0, stop=0, bucket='', id=(), labels=(), save_dir=''): fig.savefig(Path(save_dir) / 'results.png', dpi=200) -def feature_visualization(features, module_type, module_idx, n=64): +def feature_visualization(x, module_type, stage, n=64): """ - features: Features to be visualized + x: Features to be visualized module_type: Module type - module_idx: Module layer index within model + stage: Module stage within model n: Maximum number of feature maps to plot """ - project, name = 'runs/features', 'exp' - save_dir = increment_path(Path(project) / name) # increment run - save_dir.mkdir(parents=True, exist_ok=True) # make dir - - plt.figure(tight_layout=True) - blocks = torch.chunk(features, features.shape[1], dim=1) # block by channel dimension - n = min(n, len(blocks)) - for i in range(n): - feature = transforms.ToPILImage()(blocks[i].squeeze()) - ax = plt.subplot(int(math.sqrt(n)), int(math.sqrt(n)), i + 1) - ax.axis('off') - plt.imshow(feature) # cmap='gray' - - f = f"layer_{module_idx}_{module_type.split('.')[-1]}_features.png" - print(f'Saving {save_dir / f}...') - plt.savefig(save_dir / f, dpi=300) + batch, channels, height, width = x.shape # batch, channels, height, width + if height > 1 and width > 1: + project, name = 'runs/features', 'exp' + save_dir = increment_path(Path(project) / name) # increment run + save_dir.mkdir(parents=True, exist_ok=True) # make dir + + plt.figure(tight_layout=True) + blocks = torch.chunk(x, channels, dim=1) # block by channel dimension + n = min(n, len(blocks)) + for i in range(n): + feature = transforms.ToPILImage()(blocks[i].squeeze()) + ax = plt.subplot(int(math.sqrt(n)), int(math.sqrt(n)), i + 1) + ax.axis('off') + plt.imshow(feature) # cmap='gray' + + f = f"stage_{stage}_{module_type.split('.')[-1]}_features.png" + print(f'Saving {save_dir / f}...') + plt.savefig(save_dir / f, dpi=300) From 3213d8713f631072fd309bbe827d065a48160bb7 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 29 Jun 2021 12:44:59 +0200 Subject: [PATCH 0369/1976] Fix for `dataset_stats()` with updated data.yaml (#3819) @KalenMike --- utils/datasets.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/datasets.py b/utils/datasets.py index 4658dc524be0..c2859a148106 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -1116,7 +1116,7 @@ def round_labels(labels): nc = data['nc'] # number of classes stats = {'nc': nc, 'names': data['names']} # statistics dictionary for split in 'train', 'val', 'test': - if split not in data: + if data.get(split) is None: stats[split] = None # i.e. no test set continue x = [] From 5ea771d93d3d75502959168a44de39de9f45af1b Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 29 Jun 2021 13:18:13 +0200 Subject: [PATCH 0370/1976] Move IoU functions to metrics.py (#3820) --- utils/general.py | 80 +--------------------------------------------- utils/loss.py | 2 +- utils/metrics.py | 83 ++++++++++++++++++++++++++++++++++++++++++++++-- 3 files changed, 82 insertions(+), 83 deletions(-) diff --git a/utils/general.py b/utils/general.py index 83eb95744678..4606a8ec54f5 100755 --- a/utils/general.py +++ b/utils/general.py @@ -25,7 +25,7 @@ import yaml from utils.google_utils import gsutil_getsize -from utils.metrics import fitness +from utils.metrics import box_iou, fitness from utils.torch_utils import init_torch_seeds # Settings @@ -469,84 +469,6 @@ def clip_coords(boxes, img_shape): boxes[:, 3].clip(0, img_shape[0], out=boxes[:, 3]) # y2 -def bbox_iou(box1, box2, x1y1x2y2=True, GIoU=False, DIoU=False, CIoU=False, eps=1e-7): - # Returns the IoU of box1 to box2. box1 is 4, box2 is nx4 - box2 = box2.T - - # Get the coordinates of bounding boxes - if x1y1x2y2: # x1, y1, x2, y2 = box1 - b1_x1, b1_y1, b1_x2, b1_y2 = box1[0], box1[1], box1[2], box1[3] - b2_x1, b2_y1, b2_x2, b2_y2 = box2[0], box2[1], box2[2], box2[3] - else: # transform from xywh to xyxy - b1_x1, b1_x2 = box1[0] - box1[2] / 2, box1[0] + box1[2] / 2 - b1_y1, b1_y2 = box1[1] - box1[3] / 2, box1[1] + box1[3] / 2 - b2_x1, b2_x2 = box2[0] - box2[2] / 2, box2[0] + box2[2] / 2 - b2_y1, b2_y2 = box2[1] - box2[3] / 2, box2[1] + box2[3] / 2 - - # Intersection area - inter = (torch.min(b1_x2, b2_x2) - torch.max(b1_x1, b2_x1)).clamp(0) * \ - (torch.min(b1_y2, b2_y2) - torch.max(b1_y1, b2_y1)).clamp(0) - - # Union Area - w1, h1 = b1_x2 - b1_x1, b1_y2 - b1_y1 + eps - w2, h2 = b2_x2 - b2_x1, b2_y2 - b2_y1 + eps - union = w1 * h1 + w2 * h2 - inter + eps - - iou = inter / union - if GIoU or DIoU or CIoU: - cw = torch.max(b1_x2, b2_x2) - torch.min(b1_x1, b2_x1) # convex (smallest enclosing box) width - ch = torch.max(b1_y2, b2_y2) - torch.min(b1_y1, b2_y1) # convex height - if CIoU or DIoU: # Distance or Complete IoU https://arxiv.org/abs/1911.08287v1 - c2 = cw ** 2 + ch ** 2 + eps # convex diagonal squared - rho2 = ((b2_x1 + b2_x2 - b1_x1 - b1_x2) ** 2 + - (b2_y1 + b2_y2 - b1_y1 - b1_y2) ** 2) / 4 # center distance squared - if DIoU: - return iou - rho2 / c2 # DIoU - elif CIoU: # https://github.com/Zzh-tju/DIoU-SSD-pytorch/blob/master/utils/box/box_utils.py#L47 - v = (4 / math.pi ** 2) * torch.pow(torch.atan(w2 / h2) - torch.atan(w1 / h1), 2) - with torch.no_grad(): - alpha = v / (v - iou + (1 + eps)) - return iou - (rho2 / c2 + v * alpha) # CIoU - else: # GIoU https://arxiv.org/pdf/1902.09630.pdf - c_area = cw * ch + eps # convex area - return iou - (c_area - union) / c_area # GIoU - else: - return iou # IoU - - -def box_iou(box1, box2): - # https://github.com/pytorch/vision/blob/master/torchvision/ops/boxes.py - """ - Return intersection-over-union (Jaccard index) of boxes. - Both sets of boxes are expected to be in (x1, y1, x2, y2) format. - Arguments: - box1 (Tensor[N, 4]) - box2 (Tensor[M, 4]) - Returns: - iou (Tensor[N, M]): the NxM matrix containing the pairwise - IoU values for every element in boxes1 and boxes2 - """ - - def box_area(box): - # box = 4xn - return (box[2] - box[0]) * (box[3] - box[1]) - - area1 = box_area(box1.T) - area2 = box_area(box2.T) - - # inter(N,M) = (rb(N,M,2) - lt(N,M,2)).clamp(0).prod(2) - inter = (torch.min(box1[:, None, 2:], box2[:, 2:]) - torch.max(box1[:, None, :2], box2[:, :2])).clamp(0).prod(2) - return inter / (area1[:, None] + area2 - inter) # iou = inter / (area1 + area2 - inter) - - -def wh_iou(wh1, wh2): - # Returns the nxm IoU matrix. wh1 is nx2, wh2 is mx2 - wh1 = wh1[:, None] # [N,1,2] - wh2 = wh2[None] # [1,M,2] - inter = torch.min(wh1, wh2).prod(2) # [N,M] - return inter / (wh1.prod(2) + wh2.prod(2) - inter) # iou = inter / (area1 + area2 - inter) - - def non_max_suppression(prediction, conf_thres=0.25, iou_thres=0.45, classes=None, agnostic=False, multi_label=False, labels=(), max_det=300): """Runs Non-Maximum Suppression (NMS) on inference results diff --git a/utils/loss.py b/utils/loss.py index d4c261a5cc97..88f57693307c 100644 --- a/utils/loss.py +++ b/utils/loss.py @@ -3,7 +3,7 @@ import torch import torch.nn as nn -from utils.general import bbox_iou +from utils.metrics import bbox_iou from utils.torch_utils import is_parallel diff --git a/utils/metrics.py b/utils/metrics.py index 8512197956e7..4f001c046285 100644 --- a/utils/metrics.py +++ b/utils/metrics.py @@ -1,5 +1,6 @@ # Model validation metrics +import math import warnings from pathlib import Path @@ -7,8 +8,6 @@ import numpy as np import torch -from . import general - def fitness(x): # Model fitness as a weighted combination of metrics @@ -128,7 +127,7 @@ def process_batch(self, detections, labels): detections = detections[detections[:, 4] > self.conf] gt_classes = labels[:, 0].int() detection_classes = detections[:, 5].int() - iou = general.box_iou(labels[:, 1:], detections[:, :4]) + iou = box_iou(labels[:, 1:], detections[:, :4]) x = torch.where(iou > self.iou_thres) if x[0].shape[0]: @@ -184,6 +183,84 @@ def print(self): print(' '.join(map(str, self.matrix[i]))) +def bbox_iou(box1, box2, x1y1x2y2=True, GIoU=False, DIoU=False, CIoU=False, eps=1e-7): + # Returns the IoU of box1 to box2. box1 is 4, box2 is nx4 + box2 = box2.T + + # Get the coordinates of bounding boxes + if x1y1x2y2: # x1, y1, x2, y2 = box1 + b1_x1, b1_y1, b1_x2, b1_y2 = box1[0], box1[1], box1[2], box1[3] + b2_x1, b2_y1, b2_x2, b2_y2 = box2[0], box2[1], box2[2], box2[3] + else: # transform from xywh to xyxy + b1_x1, b1_x2 = box1[0] - box1[2] / 2, box1[0] + box1[2] / 2 + b1_y1, b1_y2 = box1[1] - box1[3] / 2, box1[1] + box1[3] / 2 + b2_x1, b2_x2 = box2[0] - box2[2] / 2, box2[0] + box2[2] / 2 + b2_y1, b2_y2 = box2[1] - box2[3] / 2, box2[1] + box2[3] / 2 + + # Intersection area + inter = (torch.min(b1_x2, b2_x2) - torch.max(b1_x1, b2_x1)).clamp(0) * \ + (torch.min(b1_y2, b2_y2) - torch.max(b1_y1, b2_y1)).clamp(0) + + # Union Area + w1, h1 = b1_x2 - b1_x1, b1_y2 - b1_y1 + eps + w2, h2 = b2_x2 - b2_x1, b2_y2 - b2_y1 + eps + union = w1 * h1 + w2 * h2 - inter + eps + + iou = inter / union + if GIoU or DIoU or CIoU: + cw = torch.max(b1_x2, b2_x2) - torch.min(b1_x1, b2_x1) # convex (smallest enclosing box) width + ch = torch.max(b1_y2, b2_y2) - torch.min(b1_y1, b2_y1) # convex height + if CIoU or DIoU: # Distance or Complete IoU https://arxiv.org/abs/1911.08287v1 + c2 = cw ** 2 + ch ** 2 + eps # convex diagonal squared + rho2 = ((b2_x1 + b2_x2 - b1_x1 - b1_x2) ** 2 + + (b2_y1 + b2_y2 - b1_y1 - b1_y2) ** 2) / 4 # center distance squared + if DIoU: + return iou - rho2 / c2 # DIoU + elif CIoU: # https://github.com/Zzh-tju/DIoU-SSD-pytorch/blob/master/utils/box/box_utils.py#L47 + v = (4 / math.pi ** 2) * torch.pow(torch.atan(w2 / h2) - torch.atan(w1 / h1), 2) + with torch.no_grad(): + alpha = v / (v - iou + (1 + eps)) + return iou - (rho2 / c2 + v * alpha) # CIoU + else: # GIoU https://arxiv.org/pdf/1902.09630.pdf + c_area = cw * ch + eps # convex area + return iou - (c_area - union) / c_area # GIoU + else: + return iou # IoU + + +def box_iou(box1, box2): + # https://github.com/pytorch/vision/blob/master/torchvision/ops/boxes.py + """ + Return intersection-over-union (Jaccard index) of boxes. + Both sets of boxes are expected to be in (x1, y1, x2, y2) format. + Arguments: + box1 (Tensor[N, 4]) + box2 (Tensor[M, 4]) + Returns: + iou (Tensor[N, M]): the NxM matrix containing the pairwise + IoU values for every element in boxes1 and boxes2 + """ + + def box_area(box): + # box = 4xn + return (box[2] - box[0]) * (box[3] - box[1]) + + area1 = box_area(box1.T) + area2 = box_area(box2.T) + + # inter(N,M) = (rb(N,M,2) - lt(N,M,2)).clamp(0).prod(2) + inter = (torch.min(box1[:, None, 2:], box2[:, 2:]) - torch.max(box1[:, None, :2], box2[:, :2])).clamp(0).prod(2) + return inter / (area1[:, None] + area2 - inter) # iou = inter / (area1 + area2 - inter) + + +def wh_iou(wh1, wh2): + # Returns the nxm IoU matrix. wh1 is nx2, wh2 is mx2 + wh1 = wh1[:, None] # [N,1,2] + wh2 = wh2[None] # [1,M,2] + inter = torch.min(wh1, wh2).prod(2) # [N,M] + return inter / (wh1.prod(2) + wh2.prod(2) - inter) # iou = inter / (area1 + area2 - inter) + + # Plots ---------------------------------------------------------------------------------------------------------------- def plot_pr_curve(px, py, ap, save_dir='pr_curve.png', names=()): From 57c5d02bbed8ad16b4ac3f8903d106e978448431 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 29 Jun 2021 16:03:10 +0200 Subject: [PATCH 0371/1976] Concise `TransformerBlock()` (#3821) --- models/common.py | 14 ++------------ 1 file changed, 2 insertions(+), 12 deletions(-) diff --git a/models/common.py b/models/common.py index 4211db406c3d..96d63a07a1b1 100644 --- a/models/common.py +++ b/models/common.py @@ -77,18 +77,8 @@ def forward(self, x): if self.conv is not None: x = self.conv(x) b, _, w, h = x.shape - p = x.flatten(2) - p = p.unsqueeze(0) - p = p.transpose(0, 3) - p = p.squeeze(3) - e = self.linear(p) - x = p + e - - x = self.tr(x) - x = x.unsqueeze(3) - x = x.transpose(0, 3) - x = x.reshape(b, self.c2, w, h) - return x + p = x.flatten(2).unsqueeze(0).transpose(0, 3).squeeze(3) + return self.tr(p + self.linear(p)).unsqueeze(3).transpose(0, 3).reshape(b, self.c2, w, h) class Bottleneck(nn.Module): From 7d6af6963883a87f63f63b00c6ab56cc4a4a1db6 Mon Sep 17 00:00:00 2001 From: Feras Oughali <47706157+feras-oughali@users.noreply.github.com> Date: Wed, 30 Jun 2021 13:11:29 +0300 Subject: [PATCH 0372/1976] Fix `LoadStreams()` dataloader frame skip issue (#3833) * Update datasets.py to read every 4th frame of streams * Update datasets.py Co-authored-by: Glenn Jocher --- utils/datasets.py | 31 +++++++------------------------ 1 file changed, 7 insertions(+), 24 deletions(-) diff --git a/utils/datasets.py b/utils/datasets.py index c2859a148106..5baf9c5b1906 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -4,7 +4,6 @@ import hashlib import json import logging -import math import os import random import shutil @@ -15,6 +14,7 @@ from threading import Thread import cv2 +import math import numpy as np import torch import torch.nn.functional as F @@ -210,15 +210,8 @@ class LoadWebcam: # for inference def __init__(self, pipe='0', img_size=640, stride=32): self.img_size = img_size self.stride = stride - - if pipe.isnumeric(): - pipe = eval(pipe) # local camera - # pipe = 'rtsp://192.168.1.64/1' # IP camera - # pipe = 'rtsp://username:password@192.168.1.64/1' # IP camera with login - # pipe = 'http://wmccpinetop.axiscam.net/mjpg/video.mjpg' # IP golf camera - - self.pipe = pipe - self.cap = cv2.VideoCapture(pipe) # video capture object + self.pipe = eval(pipe) if pipe.isnumeric() else pipe + self.cap = cv2.VideoCapture(self.pipe) # video capture object self.cap.set(cv2.CAP_PROP_BUFFERSIZE, 3) # set buffer size def __iter__(self): @@ -233,18 +226,8 @@ def __next__(self): raise StopIteration # Read frame - if self.pipe == 0: # local camera - ret_val, img0 = self.cap.read() - img0 = cv2.flip(img0, 1) # flip left-right - else: # IP camera - n = 0 - while True: - n += 1 - self.cap.grab() - if n % 30 == 0: # skip frames - ret_val, img0 = self.cap.retrieve() - if ret_val: - break + ret_val, img0 = self.cap.read() + img0 = cv2.flip(img0, 1) # flip left-right # Print assert ret_val, f'Camera Error {self.pipe}' @@ -308,12 +291,12 @@ def __init__(self, sources='streams.txt', img_size=640, stride=32): def update(self, i, cap): # Read stream `i` frames in daemon thread - n, f = 0, self.frames[i] + n, f, read = 0, self.frames[i], 1 # frame number, frame array, inference every 'read' frame while cap.isOpened() and n < f: n += 1 # _, self.imgs[index] = cap.read() cap.grab() - if n % 4: # read every 4th frame + if n % read == 0: success, im = cap.retrieve() self.imgs[i] = im if success else self.imgs[i] * 0 time.sleep(1 / self.fps[i]) # wait time From 25d1f2932c37a0b7cf7bf32e8cdcfb14dd5d3657 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 30 Jun 2021 15:10:40 +0200 Subject: [PATCH 0373/1976] Plot `AutoShape()` detections in ascending order (#3843) --- models/common.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/models/common.py b/models/common.py index 96d63a07a1b1..5ffb8440b60f 100644 --- a/models/common.py +++ b/models/common.py @@ -311,7 +311,7 @@ def display(self, pprint=False, show=False, save=False, crop=False, render=False n = (pred[:, -1] == c).sum() # detections per class str += f"{n} {self.names[int(c)]}{'s' * (n > 1)}, " # add to string if show or save or render or crop: - for *box, conf, cls in pred: # xyxy, confidence, class + for *box, conf, cls in reversed(pred): # xyxy, confidence, class label = f'{self.names[int(cls)]} {conf:.2f}' if crop: save_one_box(box, im, file=save_dir / 'crops' / self.names[int(cls)] / self.files[i]) From c6c88dc601fbdbe4e3391ba14245ec2740b5d01a Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 1 Jul 2021 00:35:04 +0200 Subject: [PATCH 0374/1976] Copy-Paste augmentation for YOLOv5 (#3845) * Copy-paste augmentation initial commit * if any segments * Add obscuration rejection * Add copy_paste hyperparameter * Update comments --- data/hyps/hyp.finetune.yaml | 1 + data/hyps/hyp.finetune_objects365.yaml | 1 + data/hyps/hyp.scratch-p6.yaml | 1 + data/hyps/hyp.scratch.yaml | 1 + train.py | 5 +-- utils/datasets.py | 44 +++++++++++++++----------- utils/metrics.py | 26 ++++++++++++++- 7 files changed, 58 insertions(+), 21 deletions(-) diff --git a/data/hyps/hyp.finetune.yaml b/data/hyps/hyp.finetune.yaml index a77597741356..237cd5bc19a1 100644 --- a/data/hyps/hyp.finetune.yaml +++ b/data/hyps/hyp.finetune.yaml @@ -36,3 +36,4 @@ flipud: 0.00856 fliplr: 0.5 mosaic: 1.0 mixup: 0.243 +copy_paste: 0.0 diff --git a/data/hyps/hyp.finetune_objects365.yaml b/data/hyps/hyp.finetune_objects365.yaml index 2b104ef2d9bf..435fa7a45119 100644 --- a/data/hyps/hyp.finetune_objects365.yaml +++ b/data/hyps/hyp.finetune_objects365.yaml @@ -26,3 +26,4 @@ flipud: 0.0 fliplr: 0.5 mosaic: 1.0 mixup: 0.0 +copy_paste: 0.0 diff --git a/data/hyps/hyp.scratch-p6.yaml b/data/hyps/hyp.scratch-p6.yaml index faf565423968..fc1d8ebe0876 100644 --- a/data/hyps/hyp.scratch-p6.yaml +++ b/data/hyps/hyp.scratch-p6.yaml @@ -31,3 +31,4 @@ flipud: 0.0 # image flip up-down (probability) fliplr: 0.5 # image flip left-right (probability) mosaic: 1.0 # image mosaic (probability) mixup: 0.0 # image mixup (probability) +copy_paste: 0.0 # segment copy-paste (probability) diff --git a/data/hyps/hyp.scratch.yaml b/data/hyps/hyp.scratch.yaml index 44f26b6658ae..b2cf2e32c638 100644 --- a/data/hyps/hyp.scratch.yaml +++ b/data/hyps/hyp.scratch.yaml @@ -31,3 +31,4 @@ flipud: 0.0 # image flip up-down (probability) fliplr: 0.5 # image flip left-right (probability) mosaic: 1.0 # image mosaic (probability) mixup: 0.0 # image mixup (probability) +copy_paste: 0.0 # segment copy-paste (probability) diff --git a/train.py b/train.py index 257be065f641..386f3d90dd73 100644 --- a/train.py +++ b/train.py @@ -6,7 +6,6 @@ import argparse import logging -import math import os import random import sys @@ -16,6 +15,7 @@ from pathlib import Path from threading import Thread +import math import numpy as np import torch.distributed as dist import torch.nn as nn @@ -591,7 +591,8 @@ def main(opt): 'flipud': (1, 0.0, 1.0), # image flip up-down (probability) 'fliplr': (0, 0.0, 1.0), # image flip left-right (probability) 'mosaic': (1, 0.0, 1.0), # image mixup (probability) - 'mixup': (1, 0.0, 1.0)} # image mixup (probability) + 'mixup': (1, 0.0, 1.0), # image mixup (probability) + 'copy_paste': (1, 0.0, 1.0)} # segment copy-paste (probability) with open(opt.hyp) as f: hyp = yaml.safe_load(f) # load hyps dict diff --git a/utils/datasets.py b/utils/datasets.py index 5baf9c5b1906..55f046cd56db 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -25,6 +25,7 @@ from utils.general import check_requirements, check_file, check_dataset, xywh2xyxy, xywhn2xyxy, xyxy2xywhn, \ xyn2xy, segment2box, segments2boxes, resample_segments, clean_str +from utils.metrics import bbox_ioa from utils.torch_utils import torch_distributed_zero_first # Parameters @@ -683,6 +684,7 @@ def load_mosaic(self, index): # img4, labels4 = replicate(img4, labels4) # replicate # Augment + img4, labels4, segments4 = copy_paste(img4, labels4, segments4, probability=self.hyp['copy_paste']) img4, labels4 = random_perspective(img4, labels4, segments4, degrees=self.hyp['degrees'], translate=self.hyp['translate'], @@ -907,6 +909,30 @@ def random_perspective(img, targets=(), segments=(), degrees=10, translate=.1, s return img, targets +def copy_paste(img, labels, segments, probability=0.5): + # Implement Copy-Paste augmentation https://arxiv.org/abs/2012.07177, labels as nx5 np.array(cls, xyxy) + n = len(segments) + if probability and n: + h, w, c = img.shape # height, width, channels + im_new = np.zeros(img.shape, np.uint8) + for j in random.sample(range(n), k=round(probability * n)): + l, s = labels[j], segments[j] + box = w - l[3], l[2], w - l[1], l[4] + ioa = bbox_ioa(box, labels[:, 1:5]) # intersection over area + if (ioa < 0.30).all(): # allow 30% obscuration of existing labels + labels = np.concatenate((labels, [[l[0], *box]]), 0) + segments.append(np.concatenate((w - s[:, 0:1], s[:, 1:2]), 1)) + cv2.drawContours(im_new, [segments[j].astype(np.int32)], -1, (255, 255, 255), cv2.FILLED) + + result = cv2.bitwise_and(src1=img, src2=im_new) + result = cv2.flip(result, 1) # augment segments (flip left-right) + i = result > 0 # pixels to replace + # i[:, :] = result.max(2).reshape(h, w, 1) # act over ch + img[i] = result[i] # cv2.imwrite('debug.jpg', img) # debug + + return img, labels, segments + + def box_candidates(box1, box2, wh_thr=2, ar_thr=20, area_thr=0.1, eps=1e-16): # box1(4,n), box2(4,n) # Compute candidate boxes: box1 before augment, box2 after augment, wh_thr (pixels), aspect_ratio_thr, area_ratio w1, h1 = box1[2] - box1[0], box1[3] - box1[1] @@ -919,24 +945,6 @@ def cutout(image, labels): # Applies image cutout augmentation https://arxiv.org/abs/1708.04552 h, w = image.shape[:2] - def bbox_ioa(box1, box2): - # Returns the intersection over box2 area given box1, box2. box1 is 4, box2 is nx4. boxes are x1y1x2y2 - box2 = box2.transpose() - - # Get the coordinates of bounding boxes - b1_x1, b1_y1, b1_x2, b1_y2 = box1[0], box1[1], box1[2], box1[3] - b2_x1, b2_y1, b2_x2, b2_y2 = box2[0], box2[1], box2[2], box2[3] - - # Intersection area - inter_area = (np.minimum(b1_x2, b2_x2) - np.maximum(b1_x1, b2_x1)).clip(0) * \ - (np.minimum(b1_y2, b2_y2) - np.maximum(b1_y1, b2_y1)).clip(0) - - # box2 area - box2_area = (b2_x2 - b2_x1) * (b2_y2 - b2_y1) + 1e-16 - - # Intersection over box2 area - return inter_area / box2_area - # create random masks scales = [0.5] * 1 + [0.25] * 2 + [0.125] * 4 + [0.0625] * 8 + [0.03125] * 16 # image size fraction for s in scales: diff --git a/utils/metrics.py b/utils/metrics.py index 4f001c046285..c94c4a76a964 100644 --- a/utils/metrics.py +++ b/utils/metrics.py @@ -1,9 +1,9 @@ # Model validation metrics -import math import warnings from pathlib import Path +import math import matplotlib.pyplot as plt import numpy as np import torch @@ -253,6 +253,30 @@ def box_area(box): return inter / (area1[:, None] + area2 - inter) # iou = inter / (area1 + area2 - inter) +def bbox_ioa(box1, box2, eps=1E-7): + """ Returns the intersection over box2 area given box1, box2. Boxes are x1y1x2y2 + box1: np.array of shape(4) + box2: np.array of shape(nx4) + returns: np.array of shape(n) + """ + + box2 = box2.transpose() + + # Get the coordinates of bounding boxes + b1_x1, b1_y1, b1_x2, b1_y2 = box1[0], box1[1], box1[2], box1[3] + b2_x1, b2_y1, b2_x2, b2_y2 = box2[0], box2[1], box2[2], box2[3] + + # Intersection area + inter_area = (np.minimum(b1_x2, b2_x2) - np.maximum(b1_x1, b2_x1)).clip(0) * \ + (np.minimum(b1_y2, b2_y2) - np.maximum(b1_y1, b2_y1)).clip(0) + + # box2 area + box2_area = (b2_x2 - b2_x1) * (b2_y2 - b2_y1) + eps + + # Intersection over box2 area + return inter_area / box2_area + + def wh_iou(wh1, wh2): # Returns the nxm IoU matrix. wh1 is nx2, wh2 is mx2 wh1 = wh1[:, None] # [N,1,2] From b6863385b571998984cc782d4d50ed34b9f631d0 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 2 Jul 2021 12:23:09 +0200 Subject: [PATCH 0375/1976] Created using Colaboratory --- tutorial.ipynb | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tutorial.ipynb b/tutorial.ipynb index d136803659fb..f2e35b3db12e 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -1074,7 +1074,7 @@ "id": "7KN5ghjE6ZWh" }, "source": [ - "Training losses and performance metrics are also logged to [Tensorboard](https://www.tensorflow.org/tensorboard) and a custom `results.txt` logfile which is plotted as `results.png` (below) after training completes. Here we show YOLOv5s trained on COCO128 to 300 epochs, starting from scratch (blue), and from pretrained `--weights yolov5s.pt` (orange)." + "Training results are automatically logged to [Tensorboard](https://www.tensorflow.org/tensorboard) and `runs/train/exp/results.txt`, which is plotted as `results.png` (below) after training completes. You can also plot any `results.txt` file manually:" ] }, { @@ -1084,7 +1084,7 @@ }, "source": [ "from utils.plots import plot_results \n", - "plot_results(save_dir='runs/train/exp') # plot all results*.txt as results.png\n", + "plot_results(save_dir='runs/train/exp') # plot all results*.txt files in 'runs/train/exp'\n", "Image(filename='runs/train/exp/results.png', width=800)" ], "execution_count": null, @@ -1096,7 +1096,7 @@ "id": "lfrEegCSW3fK" }, "source": [ - "\n" + "

\"COCO128

" ] }, { From 4717a3b0380c540210a60c38a20442e8884d5459 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 2 Jul 2021 12:24:27 +0200 Subject: [PATCH 0376/1976] Created using Colaboratory --- tutorial.ipynb | 14 -------------- 1 file changed, 14 deletions(-) diff --git a/tutorial.ipynb b/tutorial.ipynb index f2e35b3db12e..76e02e95a29d 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -1139,20 +1139,6 @@ "Optional extras below. Unit tests validate repo functionality and should be run on any PRs submitted.\n" ] }, - { - "cell_type": "code", - "metadata": { - "id": "gI6NoBev8Ib1" - }, - "source": [ - "# Re-clone repo\n", - "%cd ..\n", - "%rm -rf yolov5 && git clone https://github.com/ultralytics/yolov5\n", - "%cd yolov5" - ], - "execution_count": null, - "outputs": [] - }, { "cell_type": "code", "metadata": { From 831773f5a23926658ee76459ce37550643432123 Mon Sep 17 00:00:00 2001 From: Valentin Aliferov Date: Fri, 2 Jul 2021 14:25:54 +0300 Subject: [PATCH 0377/1976] Add EXIF rotation to YOLOv5 Hub inference (#3852) * rotating an image according to its exif tag * Update common.py * Update datasets.py * Update datasets.py faster * delete extraneous gpg file * Update common.py Co-authored-by: Glenn Jocher --- models/common.py | 9 +++++---- utils/datasets.py | 26 ++++++++++++++++++++++++++ 2 files changed, 31 insertions(+), 4 deletions(-) diff --git a/models/common.py b/models/common.py index 5ffb8440b60f..9911b207d060 100644 --- a/models/common.py +++ b/models/common.py @@ -1,9 +1,9 @@ # YOLOv5 common modules -import math from copy import copy from pathlib import Path +import math import numpy as np import pandas as pd import requests @@ -12,7 +12,7 @@ from PIL import Image from torch.cuda import amp -from utils.datasets import letterbox +from utils.datasets import exif_transpose, letterbox from utils.general import non_max_suppression, make_divisible, scale_coords, increment_path, xyxy2xywh, save_one_box from utils.plots import colors, plot_one_box from utils.torch_utils import time_synchronized @@ -252,9 +252,10 @@ def forward(self, imgs, size=640, augment=False, profile=False): for i, im in enumerate(imgs): f = f'image{i}' # filename if isinstance(im, str): # filename or uri - im, f = np.asarray(Image.open(requests.get(im, stream=True).raw if im.startswith('http') else im)), im + im, f = Image.open(requests.get(im, stream=True).raw if im.startswith('http') else im), im + im = np.asarray(exif_transpose(im)) elif isinstance(im, Image.Image): # PIL Image - im, f = np.asarray(im), getattr(im, 'filename', f) or f + im, f = np.asarray(exif_transpose(im)), getattr(im, 'filename') or f files.append(Path(f).with_suffix('.jpg').name) if im.shape[0] < 5: # image in CHW im = im.transpose((1, 2, 0)) # reverse dataloader .transpose(2, 0, 1) diff --git a/utils/datasets.py b/utils/datasets.py index 55f046cd56db..f7315522e375 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -64,6 +64,32 @@ def exif_size(img): return s +def exif_transpose(image): + """ + Transpose a PIL image accordingly if it has an EXIF Orientation tag. + From https://github.com/python-pillow/Pillow/blob/master/src/PIL/ImageOps.py + + :param image: The image to transpose. + :return: An image. + """ + exif = image.getexif() + orientation = exif.get(0x0112, 1) # default 1 + if orientation > 1: + method = {2: Image.FLIP_LEFT_RIGHT, + 3: Image.ROTATE_180, + 4: Image.FLIP_TOP_BOTTOM, + 5: Image.TRANSPOSE, + 6: Image.ROTATE_270, + 7: Image.TRANSVERSE, + 8: Image.ROTATE_90, + }.get(orientation) + if method is not None: + image = image.transpose(method) + del exif[0x0112] + image.info["exif"] = exif.tobytes() + return image + + def create_dataloader(path, imgsz, batch_size, stride, single_cls=False, hyp=None, augment=False, cache=False, pad=0.0, rect=False, rank=-1, workers=8, image_weights=False, quad=False, prefix=''): # Make sure only the first process in DDP process the dataset first, and the following others can use the cache From d3e9d69850b2b910e0ea5e4ffba372a241fede5a Mon Sep 17 00:00:00 2001 From: san-soucie <44901782+san-soucie@users.noreply.github.com> Date: Sun, 4 Jul 2021 06:14:35 -0400 Subject: [PATCH 0378/1976] `--evolve 300` generations CLI argument (#3863) * evolve command accepts argument for number of generations * evolve generations argument used in evolve for loop * evolve argument boolean fixes * default to 300 evolve generations * Update train.py Co-authored-by: John San Soucie Co-authored-by: Glenn Jocher --- train.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/train.py b/train.py index 386f3d90dd73..2e864a60cefc 100644 --- a/train.py +++ b/train.py @@ -494,7 +494,7 @@ def parse_opt(known=False): parser.add_argument('--nosave', action='store_true', help='only save final checkpoint') parser.add_argument('--notest', action='store_true', help='only test final epoch') parser.add_argument('--noautoanchor', action='store_true', help='disable autoanchor check') - parser.add_argument('--evolve', action='store_true', help='evolve hyperparameters') + parser.add_argument('--evolve', type=int, nargs='?', const=300, help='evolve hyperparameters for x generations') parser.add_argument('--bucket', type=str, default='', help='gsutil bucket') parser.add_argument('--cache-images', action='store_true', help='cache images for faster training') parser.add_argument('--image-weights', action='store_true', help='use weighted image selection for training') @@ -542,7 +542,7 @@ def main(opt): assert len(opt.cfg) or len(opt.weights), 'either --cfg or --weights must be specified' opt.img_size.extend([opt.img_size[-1]] * (2 - len(opt.img_size))) # extend to 2 sizes (train, test) opt.name = 'evolve' if opt.evolve else opt.name - opt.save_dir = str(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok | opt.evolve)) + opt.save_dir = str(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok or opt.evolve)) # DDP mode device = select_device(opt.device, batch_size=opt.batch_size) @@ -603,7 +603,7 @@ def main(opt): if opt.bucket: os.system('gsutil cp gs://%s/evolve.txt .' % opt.bucket) # download evolve.txt if exists - for _ in range(300): # generations to evolve + for _ in range(opt.evolve): # generations to evolve if Path('evolve.txt').exists(): # if evolve.txt exists: select best hyps and mutate # Select parent(s) parent = 'single' # parent selection method: 'single' or 'weighted' From 9d86b54eb37ea9d2b4ae3cda0dfe8ab7aa16f2c6 Mon Sep 17 00:00:00 2001 From: ketan-b <54092325+ketan-b@users.noreply.github.com> Date: Sun, 4 Jul 2021 16:25:57 +0530 Subject: [PATCH 0379/1976] Add multi-stream saving feature (#3864) * Added the recording feature for multiple streams Thanks for the very cool repo!! I was trying to record multiple feeds at the same time, but the current version of the detector only had one video writer and one vid_path! So the streams were not being saved and only were initialized with one frame and this process didn't record the whole thing. Fix: I made a list of `vid_writer` and `vid_path` and the `i` from the loop over the `pred` took care of the writer which need to work! I hope this helps, Thanks! * Cleanup list lengths * batch size variable * Update datasets.py Co-authored-by: Glenn Jocher --- detect.py | 18 ++++++++++-------- utils/datasets.py | 2 +- 2 files changed, 11 insertions(+), 9 deletions(-) diff --git a/detect.py b/detect.py index 808f3584c93d..a4542f7e8802 100644 --- a/detect.py +++ b/detect.py @@ -76,14 +76,16 @@ def run(weights='yolov5s.pt', # model.pt path(s) modelc = load_classifier(name='resnet50', n=2) # initialize modelc.load_state_dict(torch.load('resnet50.pt', map_location=device)['model']).to(device).eval() - # Set Dataloader - vid_path, vid_writer = None, None + # Dataloader if webcam: view_img = check_imshow() cudnn.benchmark = True # set True to speed up constant image size inference dataset = LoadStreams(source, img_size=imgsz, stride=stride) + bs = len(dataset) # batch_size else: dataset = LoadImages(source, img_size=imgsz, stride=stride) + bs = 1 # batch_size + vid_path, vid_writer = [None] * bs, [None] * bs # Run inference if device.type != 'cpu': @@ -158,10 +160,10 @@ def run(weights='yolov5s.pt', # model.pt path(s) if dataset.mode == 'image': cv2.imwrite(save_path, im0) else: # 'video' or 'stream' - if vid_path != save_path: # new video - vid_path = save_path - if isinstance(vid_writer, cv2.VideoWriter): - vid_writer.release() # release previous video writer + if vid_path[i] != save_path: # new video + vid_path[i] = save_path + if isinstance(vid_writer[i], cv2.VideoWriter): + vid_writer[i].release() # release previous video writer if vid_cap: # video fps = vid_cap.get(cv2.CAP_PROP_FPS) w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH)) @@ -169,8 +171,8 @@ def run(weights='yolov5s.pt', # model.pt path(s) else: # stream fps, w, h = 30, im0.shape[1], im0.shape[0] save_path += '.mp4' - vid_writer = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h)) - vid_writer.write(im0) + vid_writer[i] = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h)) + vid_writer[i].write(im0) if save_txt or save_img: s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else '' diff --git a/utils/datasets.py b/utils/datasets.py index f7315522e375..8560f7cfeb88 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -352,7 +352,7 @@ def __next__(self): return self.sources, img, img0, None def __len__(self): - return 0 # 1E12 frames = 32 streams at 30 FPS for 30 years + return len(self.sources) # 1E12 frames = 32 streams at 30 FPS for 30 years def img2label_paths(img_paths): From bd88e7f4f2cfd7cca6893262da6d748ca23e2807 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 4 Jul 2021 15:55:23 +0200 Subject: [PATCH 0380/1976] Created using Colaboratory --- tutorial.ipynb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tutorial.ipynb b/tutorial.ipynb index 76e02e95a29d..a87f787cca8e 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -680,7 +680,7 @@ "source": [ "# Download COCO val2017\n", "torch.hub.download_url_to_file('https://github.com/ultralytics/yolov5/releases/download/v1.0/coco2017val.zip', 'tmp.zip')\n", - "!unzip -q tmp.zip -d ../ && rm tmp.zip" + "!unzip -q tmp.zip -d ../datasets && rm tmp.zip" ], "execution_count": null, "outputs": [ From 81b31824f550dfd5ba9322a114864d9843de0c75 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 4 Jul 2021 16:55:08 +0200 Subject: [PATCH 0381/1976] Models `*.yaml` reformat (#3875) --- models/hub/yolov3-spp.yaml | 68 ++++++++++++++--------------- models/hub/yolov3-tiny.yaml | 50 ++++++++++----------- models/hub/yolov3.yaml | 68 ++++++++++++++--------------- models/hub/yolov5-fpn.yaml | 50 ++++++++++----------- models/hub/yolov5-p2.yaml | 4 +- models/hub/yolov5-p6.yaml | 4 +- models/hub/yolov5-p7.yaml | 4 +- models/hub/yolov5-panet.yaml | 60 ++++++++++++------------- models/hub/yolov5l6.yaml | 4 +- models/hub/yolov5m6.yaml | 4 +- models/hub/yolov5s-transformer.yaml | 60 ++++++++++++------------- models/hub/yolov5s6.yaml | 4 +- models/hub/yolov5x6.yaml | 4 +- models/yolo.py | 2 +- models/yolov5l.yaml | 4 +- models/yolov5m.yaml | 4 +- models/yolov5s.yaml | 4 +- models/yolov5x.yaml | 4 +- 18 files changed, 184 insertions(+), 218 deletions(-) diff --git a/models/hub/yolov3-spp.yaml b/models/hub/yolov3-spp.yaml index 38dcc449f0d0..0ca7b7f6577b 100644 --- a/models/hub/yolov3-spp.yaml +++ b/models/hub/yolov3-spp.yaml @@ -1,51 +1,49 @@ -# parameters +# Parameters nc: 80 # number of classes depth_multiple: 1.0 # model depth multiple width_multiple: 1.0 # layer channel multiple - -# anchors anchors: - - [10,13, 16,30, 33,23] # P3/8 - - [30,61, 62,45, 59,119] # P4/16 - - [116,90, 156,198, 373,326] # P5/32 + - [ 10,13, 16,30, 33,23 ] # P3/8 + - [ 30,61, 62,45, 59,119 ] # P4/16 + - [ 116,90, 156,198, 373,326 ] # P5/32 # darknet53 backbone backbone: # [from, number, module, args] - [[-1, 1, Conv, [32, 3, 1]], # 0 - [-1, 1, Conv, [64, 3, 2]], # 1-P1/2 - [-1, 1, Bottleneck, [64]], - [-1, 1, Conv, [128, 3, 2]], # 3-P2/4 - [-1, 2, Bottleneck, [128]], - [-1, 1, Conv, [256, 3, 2]], # 5-P3/8 - [-1, 8, Bottleneck, [256]], - [-1, 1, Conv, [512, 3, 2]], # 7-P4/16 - [-1, 8, Bottleneck, [512]], - [-1, 1, Conv, [1024, 3, 2]], # 9-P5/32 - [-1, 4, Bottleneck, [1024]], # 10 + [ [ -1, 1, Conv, [ 32, 3, 1 ] ], # 0 + [ -1, 1, Conv, [ 64, 3, 2 ] ], # 1-P1/2 + [ -1, 1, Bottleneck, [ 64 ] ], + [ -1, 1, Conv, [ 128, 3, 2 ] ], # 3-P2/4 + [ -1, 2, Bottleneck, [ 128 ] ], + [ -1, 1, Conv, [ 256, 3, 2 ] ], # 5-P3/8 + [ -1, 8, Bottleneck, [ 256 ] ], + [ -1, 1, Conv, [ 512, 3, 2 ] ], # 7-P4/16 + [ -1, 8, Bottleneck, [ 512 ] ], + [ -1, 1, Conv, [ 1024, 3, 2 ] ], # 9-P5/32 + [ -1, 4, Bottleneck, [ 1024 ] ], # 10 ] # YOLOv3-SPP head head: - [[-1, 1, Bottleneck, [1024, False]], - [-1, 1, SPP, [512, [5, 9, 13]]], - [-1, 1, Conv, [1024, 3, 1]], - [-1, 1, Conv, [512, 1, 1]], - [-1, 1, Conv, [1024, 3, 1]], # 15 (P5/32-large) + [ [ -1, 1, Bottleneck, [ 1024, False ] ], + [ -1, 1, SPP, [ 512, [ 5, 9, 13 ] ] ], + [ -1, 1, Conv, [ 1024, 3, 1 ] ], + [ -1, 1, Conv, [ 512, 1, 1 ] ], + [ -1, 1, Conv, [ 1024, 3, 1 ] ], # 15 (P5/32-large) - [-2, 1, Conv, [256, 1, 1]], - [-1, 1, nn.Upsample, [None, 2, 'nearest']], - [[-1, 8], 1, Concat, [1]], # cat backbone P4 - [-1, 1, Bottleneck, [512, False]], - [-1, 1, Bottleneck, [512, False]], - [-1, 1, Conv, [256, 1, 1]], - [-1, 1, Conv, [512, 3, 1]], # 22 (P4/16-medium) + [ -2, 1, Conv, [ 256, 1, 1 ] ], + [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], + [ [ -1, 8 ], 1, Concat, [ 1 ] ], # cat backbone P4 + [ -1, 1, Bottleneck, [ 512, False ] ], + [ -1, 1, Bottleneck, [ 512, False ] ], + [ -1, 1, Conv, [ 256, 1, 1 ] ], + [ -1, 1, Conv, [ 512, 3, 1 ] ], # 22 (P4/16-medium) - [-2, 1, Conv, [128, 1, 1]], - [-1, 1, nn.Upsample, [None, 2, 'nearest']], - [[-1, 6], 1, Concat, [1]], # cat backbone P3 - [-1, 1, Bottleneck, [256, False]], - [-1, 2, Bottleneck, [256, False]], # 27 (P3/8-small) + [ -2, 1, Conv, [ 128, 1, 1 ] ], + [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], + [ [ -1, 6 ], 1, Concat, [ 1 ] ], # cat backbone P3 + [ -1, 1, Bottleneck, [ 256, False ] ], + [ -1, 2, Bottleneck, [ 256, False ] ], # 27 (P3/8-small) - [[27, 22, 15], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) + [ [ 27, 22, 15 ], 1, Detect, [ nc, anchors ] ], # Detect(P3, P4, P5) ] diff --git a/models/hub/yolov3-tiny.yaml b/models/hub/yolov3-tiny.yaml index ff7638cad3be..d39a6b1f581c 100644 --- a/models/hub/yolov3-tiny.yaml +++ b/models/hub/yolov3-tiny.yaml @@ -1,41 +1,39 @@ -# parameters +# Parameters nc: 80 # number of classes depth_multiple: 1.0 # model depth multiple width_multiple: 1.0 # layer channel multiple - -# anchors anchors: - - [10,14, 23,27, 37,58] # P4/16 - - [81,82, 135,169, 344,319] # P5/32 + - [ 10,14, 23,27, 37,58 ] # P4/16 + - [ 81,82, 135,169, 344,319 ] # P5/32 # YOLOv3-tiny backbone backbone: # [from, number, module, args] - [[-1, 1, Conv, [16, 3, 1]], # 0 - [-1, 1, nn.MaxPool2d, [2, 2, 0]], # 1-P1/2 - [-1, 1, Conv, [32, 3, 1]], - [-1, 1, nn.MaxPool2d, [2, 2, 0]], # 3-P2/4 - [-1, 1, Conv, [64, 3, 1]], - [-1, 1, nn.MaxPool2d, [2, 2, 0]], # 5-P3/8 - [-1, 1, Conv, [128, 3, 1]], - [-1, 1, nn.MaxPool2d, [2, 2, 0]], # 7-P4/16 - [-1, 1, Conv, [256, 3, 1]], - [-1, 1, nn.MaxPool2d, [2, 2, 0]], # 9-P5/32 - [-1, 1, Conv, [512, 3, 1]], - [-1, 1, nn.ZeroPad2d, [[0, 1, 0, 1]]], # 11 - [-1, 1, nn.MaxPool2d, [2, 1, 0]], # 12 + [ [ -1, 1, Conv, [ 16, 3, 1 ] ], # 0 + [ -1, 1, nn.MaxPool2d, [ 2, 2, 0 ] ], # 1-P1/2 + [ -1, 1, Conv, [ 32, 3, 1 ] ], + [ -1, 1, nn.MaxPool2d, [ 2, 2, 0 ] ], # 3-P2/4 + [ -1, 1, Conv, [ 64, 3, 1 ] ], + [ -1, 1, nn.MaxPool2d, [ 2, 2, 0 ] ], # 5-P3/8 + [ -1, 1, Conv, [ 128, 3, 1 ] ], + [ -1, 1, nn.MaxPool2d, [ 2, 2, 0 ] ], # 7-P4/16 + [ -1, 1, Conv, [ 256, 3, 1 ] ], + [ -1, 1, nn.MaxPool2d, [ 2, 2, 0 ] ], # 9-P5/32 + [ -1, 1, Conv, [ 512, 3, 1 ] ], + [ -1, 1, nn.ZeroPad2d, [ [ 0, 1, 0, 1 ] ] ], # 11 + [ -1, 1, nn.MaxPool2d, [ 2, 1, 0 ] ], # 12 ] # YOLOv3-tiny head head: - [[-1, 1, Conv, [1024, 3, 1]], - [-1, 1, Conv, [256, 1, 1]], - [-1, 1, Conv, [512, 3, 1]], # 15 (P5/32-large) + [ [ -1, 1, Conv, [ 1024, 3, 1 ] ], + [ -1, 1, Conv, [ 256, 1, 1 ] ], + [ -1, 1, Conv, [ 512, 3, 1 ] ], # 15 (P5/32-large) - [-2, 1, Conv, [128, 1, 1]], - [-1, 1, nn.Upsample, [None, 2, 'nearest']], - [[-1, 8], 1, Concat, [1]], # cat backbone P4 - [-1, 1, Conv, [256, 3, 1]], # 19 (P4/16-medium) + [ -2, 1, Conv, [ 128, 1, 1 ] ], + [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], + [ [ -1, 8 ], 1, Concat, [ 1 ] ], # cat backbone P4 + [ -1, 1, Conv, [ 256, 3, 1 ] ], # 19 (P4/16-medium) - [[19, 15], 1, Detect, [nc, anchors]], # Detect(P4, P5) + [ [ 19, 15 ], 1, Detect, [ nc, anchors ] ], # Detect(P4, P5) ] diff --git a/models/hub/yolov3.yaml b/models/hub/yolov3.yaml index f2e761355469..09df0d9ef362 100644 --- a/models/hub/yolov3.yaml +++ b/models/hub/yolov3.yaml @@ -1,51 +1,49 @@ -# parameters +# Parameters nc: 80 # number of classes depth_multiple: 1.0 # model depth multiple width_multiple: 1.0 # layer channel multiple - -# anchors anchors: - - [10,13, 16,30, 33,23] # P3/8 - - [30,61, 62,45, 59,119] # P4/16 - - [116,90, 156,198, 373,326] # P5/32 + - [ 10,13, 16,30, 33,23 ] # P3/8 + - [ 30,61, 62,45, 59,119 ] # P4/16 + - [ 116,90, 156,198, 373,326 ] # P5/32 # darknet53 backbone backbone: # [from, number, module, args] - [[-1, 1, Conv, [32, 3, 1]], # 0 - [-1, 1, Conv, [64, 3, 2]], # 1-P1/2 - [-1, 1, Bottleneck, [64]], - [-1, 1, Conv, [128, 3, 2]], # 3-P2/4 - [-1, 2, Bottleneck, [128]], - [-1, 1, Conv, [256, 3, 2]], # 5-P3/8 - [-1, 8, Bottleneck, [256]], - [-1, 1, Conv, [512, 3, 2]], # 7-P4/16 - [-1, 8, Bottleneck, [512]], - [-1, 1, Conv, [1024, 3, 2]], # 9-P5/32 - [-1, 4, Bottleneck, [1024]], # 10 + [ [ -1, 1, Conv, [ 32, 3, 1 ] ], # 0 + [ -1, 1, Conv, [ 64, 3, 2 ] ], # 1-P1/2 + [ -1, 1, Bottleneck, [ 64 ] ], + [ -1, 1, Conv, [ 128, 3, 2 ] ], # 3-P2/4 + [ -1, 2, Bottleneck, [ 128 ] ], + [ -1, 1, Conv, [ 256, 3, 2 ] ], # 5-P3/8 + [ -1, 8, Bottleneck, [ 256 ] ], + [ -1, 1, Conv, [ 512, 3, 2 ] ], # 7-P4/16 + [ -1, 8, Bottleneck, [ 512 ] ], + [ -1, 1, Conv, [ 1024, 3, 2 ] ], # 9-P5/32 + [ -1, 4, Bottleneck, [ 1024 ] ], # 10 ] # YOLOv3 head head: - [[-1, 1, Bottleneck, [1024, False]], - [-1, 1, Conv, [512, [1, 1]]], - [-1, 1, Conv, [1024, 3, 1]], - [-1, 1, Conv, [512, 1, 1]], - [-1, 1, Conv, [1024, 3, 1]], # 15 (P5/32-large) + [ [ -1, 1, Bottleneck, [ 1024, False ] ], + [ -1, 1, Conv, [ 512, [ 1, 1 ] ] ], + [ -1, 1, Conv, [ 1024, 3, 1 ] ], + [ -1, 1, Conv, [ 512, 1, 1 ] ], + [ -1, 1, Conv, [ 1024, 3, 1 ] ], # 15 (P5/32-large) - [-2, 1, Conv, [256, 1, 1]], - [-1, 1, nn.Upsample, [None, 2, 'nearest']], - [[-1, 8], 1, Concat, [1]], # cat backbone P4 - [-1, 1, Bottleneck, [512, False]], - [-1, 1, Bottleneck, [512, False]], - [-1, 1, Conv, [256, 1, 1]], - [-1, 1, Conv, [512, 3, 1]], # 22 (P4/16-medium) + [ -2, 1, Conv, [ 256, 1, 1 ] ], + [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], + [ [ -1, 8 ], 1, Concat, [ 1 ] ], # cat backbone P4 + [ -1, 1, Bottleneck, [ 512, False ] ], + [ -1, 1, Bottleneck, [ 512, False ] ], + [ -1, 1, Conv, [ 256, 1, 1 ] ], + [ -1, 1, Conv, [ 512, 3, 1 ] ], # 22 (P4/16-medium) - [-2, 1, Conv, [128, 1, 1]], - [-1, 1, nn.Upsample, [None, 2, 'nearest']], - [[-1, 6], 1, Concat, [1]], # cat backbone P3 - [-1, 1, Bottleneck, [256, False]], - [-1, 2, Bottleneck, [256, False]], # 27 (P3/8-small) + [ -2, 1, Conv, [ 128, 1, 1 ] ], + [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], + [ [ -1, 6 ], 1, Concat, [ 1 ] ], # cat backbone P3 + [ -1, 1, Bottleneck, [ 256, False ] ], + [ -1, 2, Bottleneck, [ 256, False ] ], # 27 (P3/8-small) - [[27, 22, 15], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) + [ [ 27, 22, 15 ], 1, Detect, [ nc, anchors ] ], # Detect(P3, P4, P5) ] diff --git a/models/hub/yolov5-fpn.yaml b/models/hub/yolov5-fpn.yaml index e772bffecbbc..b8b7fc1a23d4 100644 --- a/models/hub/yolov5-fpn.yaml +++ b/models/hub/yolov5-fpn.yaml @@ -1,42 +1,40 @@ -# parameters +# Parameters nc: 80 # number of classes depth_multiple: 1.0 # model depth multiple width_multiple: 1.0 # layer channel multiple - -# anchors anchors: - - [10,13, 16,30, 33,23] # P3/8 - - [30,61, 62,45, 59,119] # P4/16 - - [116,90, 156,198, 373,326] # P5/32 + - [ 10,13, 16,30, 33,23 ] # P3/8 + - [ 30,61, 62,45, 59,119 ] # P4/16 + - [ 116,90, 156,198, 373,326 ] # P5/32 # YOLOv5 backbone backbone: # [from, number, module, args] - [[-1, 1, Focus, [64, 3]], # 0-P1/2 - [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 - [-1, 3, Bottleneck, [128]], - [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 - [-1, 9, BottleneckCSP, [256]], - [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 - [-1, 9, BottleneckCSP, [512]], - [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 - [-1, 1, SPP, [1024, [5, 9, 13]]], - [-1, 6, BottleneckCSP, [1024]], # 9 + [ [ -1, 1, Focus, [ 64, 3 ] ], # 0-P1/2 + [ -1, 1, Conv, [ 128, 3, 2 ] ], # 1-P2/4 + [ -1, 3, Bottleneck, [ 128 ] ], + [ -1, 1, Conv, [ 256, 3, 2 ] ], # 3-P3/8 + [ -1, 9, BottleneckCSP, [ 256 ] ], + [ -1, 1, Conv, [ 512, 3, 2 ] ], # 5-P4/16 + [ -1, 9, BottleneckCSP, [ 512 ] ], + [ -1, 1, Conv, [ 1024, 3, 2 ] ], # 7-P5/32 + [ -1, 1, SPP, [ 1024, [ 5, 9, 13 ] ] ], + [ -1, 6, BottleneckCSP, [ 1024 ] ], # 9 ] # YOLOv5 FPN head head: - [[-1, 3, BottleneckCSP, [1024, False]], # 10 (P5/32-large) + [ [ -1, 3, BottleneckCSP, [ 1024, False ] ], # 10 (P5/32-large) - [-1, 1, nn.Upsample, [None, 2, 'nearest']], - [[-1, 6], 1, Concat, [1]], # cat backbone P4 - [-1, 1, Conv, [512, 1, 1]], - [-1, 3, BottleneckCSP, [512, False]], # 14 (P4/16-medium) + [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], + [ [ -1, 6 ], 1, Concat, [ 1 ] ], # cat backbone P4 + [ -1, 1, Conv, [ 512, 1, 1 ] ], + [ -1, 3, BottleneckCSP, [ 512, False ] ], # 14 (P4/16-medium) - [-1, 1, nn.Upsample, [None, 2, 'nearest']], - [[-1, 4], 1, Concat, [1]], # cat backbone P3 - [-1, 1, Conv, [256, 1, 1]], - [-1, 3, BottleneckCSP, [256, False]], # 18 (P3/8-small) + [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], + [ [ -1, 4 ], 1, Concat, [ 1 ] ], # cat backbone P3 + [ -1, 1, Conv, [ 256, 1, 1 ] ], + [ -1, 3, BottleneckCSP, [ 256, False ] ], # 18 (P3/8-small) - [[18, 14, 10], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) + [ [ 18, 14, 10 ], 1, Detect, [ nc, anchors ] ], # Detect(P3, P4, P5) ] diff --git a/models/hub/yolov5-p2.yaml b/models/hub/yolov5-p2.yaml index 0633a90fd065..62122363df2d 100644 --- a/models/hub/yolov5-p2.yaml +++ b/models/hub/yolov5-p2.yaml @@ -1,9 +1,7 @@ -# parameters +# Parameters nc: 80 # number of classes depth_multiple: 1.0 # model depth multiple width_multiple: 1.0 # layer channel multiple - -# anchors anchors: 3 # YOLOv5 backbone diff --git a/models/hub/yolov5-p6.yaml b/models/hub/yolov5-p6.yaml index 3728a118f090..c5ef5177f0c8 100644 --- a/models/hub/yolov5-p6.yaml +++ b/models/hub/yolov5-p6.yaml @@ -1,9 +1,7 @@ -# parameters +# Parameters nc: 80 # number of classes depth_multiple: 1.0 # model depth multiple width_multiple: 1.0 # layer channel multiple - -# anchors anchors: 3 # YOLOv5 backbone diff --git a/models/hub/yolov5-p7.yaml b/models/hub/yolov5-p7.yaml index ca8f8492ce0e..505c590ca168 100644 --- a/models/hub/yolov5-p7.yaml +++ b/models/hub/yolov5-p7.yaml @@ -1,9 +1,7 @@ -# parameters +# Parameters nc: 80 # number of classes depth_multiple: 1.0 # model depth multiple width_multiple: 1.0 # layer channel multiple - -# anchors anchors: 3 # YOLOv5 backbone diff --git a/models/hub/yolov5-panet.yaml b/models/hub/yolov5-panet.yaml index 340f95a4dbc9..aee5dab01fa1 100644 --- a/models/hub/yolov5-panet.yaml +++ b/models/hub/yolov5-panet.yaml @@ -1,48 +1,46 @@ -# parameters +# Parameters nc: 80 # number of classes depth_multiple: 1.0 # model depth multiple width_multiple: 1.0 # layer channel multiple - -# anchors anchors: - - [10,13, 16,30, 33,23] # P3/8 - - [30,61, 62,45, 59,119] # P4/16 - - [116,90, 156,198, 373,326] # P5/32 + - [ 10,13, 16,30, 33,23 ] # P3/8 + - [ 30,61, 62,45, 59,119 ] # P4/16 + - [ 116,90, 156,198, 373,326 ] # P5/32 # YOLOv5 backbone backbone: # [from, number, module, args] - [[-1, 1, Focus, [64, 3]], # 0-P1/2 - [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 - [-1, 3, BottleneckCSP, [128]], - [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 - [-1, 9, BottleneckCSP, [256]], - [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 - [-1, 9, BottleneckCSP, [512]], - [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 - [-1, 1, SPP, [1024, [5, 9, 13]]], - [-1, 3, BottleneckCSP, [1024, False]], # 9 + [ [ -1, 1, Focus, [ 64, 3 ] ], # 0-P1/2 + [ -1, 1, Conv, [ 128, 3, 2 ] ], # 1-P2/4 + [ -1, 3, BottleneckCSP, [ 128 ] ], + [ -1, 1, Conv, [ 256, 3, 2 ] ], # 3-P3/8 + [ -1, 9, BottleneckCSP, [ 256 ] ], + [ -1, 1, Conv, [ 512, 3, 2 ] ], # 5-P4/16 + [ -1, 9, BottleneckCSP, [ 512 ] ], + [ -1, 1, Conv, [ 1024, 3, 2 ] ], # 7-P5/32 + [ -1, 1, SPP, [ 1024, [ 5, 9, 13 ] ] ], + [ -1, 3, BottleneckCSP, [ 1024, False ] ], # 9 ] # YOLOv5 PANet head head: - [[-1, 1, Conv, [512, 1, 1]], - [-1, 1, nn.Upsample, [None, 2, 'nearest']], - [[-1, 6], 1, Concat, [1]], # cat backbone P4 - [-1, 3, BottleneckCSP, [512, False]], # 13 + [ [ -1, 1, Conv, [ 512, 1, 1 ] ], + [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], + [ [ -1, 6 ], 1, Concat, [ 1 ] ], # cat backbone P4 + [ -1, 3, BottleneckCSP, [ 512, False ] ], # 13 - [-1, 1, Conv, [256, 1, 1]], - [-1, 1, nn.Upsample, [None, 2, 'nearest']], - [[-1, 4], 1, Concat, [1]], # cat backbone P3 - [-1, 3, BottleneckCSP, [256, False]], # 17 (P3/8-small) + [ -1, 1, Conv, [ 256, 1, 1 ] ], + [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], + [ [ -1, 4 ], 1, Concat, [ 1 ] ], # cat backbone P3 + [ -1, 3, BottleneckCSP, [ 256, False ] ], # 17 (P3/8-small) - [-1, 1, Conv, [256, 3, 2]], - [[-1, 14], 1, Concat, [1]], # cat head P4 - [-1, 3, BottleneckCSP, [512, False]], # 20 (P4/16-medium) + [ -1, 1, Conv, [ 256, 3, 2 ] ], + [ [ -1, 14 ], 1, Concat, [ 1 ] ], # cat head P4 + [ -1, 3, BottleneckCSP, [ 512, False ] ], # 20 (P4/16-medium) - [-1, 1, Conv, [512, 3, 2]], - [[-1, 10], 1, Concat, [1]], # cat head P5 - [-1, 3, BottleneckCSP, [1024, False]], # 23 (P5/32-large) + [ -1, 1, Conv, [ 512, 3, 2 ] ], + [ [ -1, 10 ], 1, Concat, [ 1 ] ], # cat head P5 + [ -1, 3, BottleneckCSP, [ 1024, False ] ], # 23 (P5/32-large) - [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) + [ [ 17, 20, 23 ], 1, Detect, [ nc, anchors ] ], # Detect(P3, P4, P5) ] diff --git a/models/hub/yolov5l6.yaml b/models/hub/yolov5l6.yaml index 11298b01f479..91c57da1939e 100644 --- a/models/hub/yolov5l6.yaml +++ b/models/hub/yolov5l6.yaml @@ -1,9 +1,7 @@ -# parameters +# Parameters nc: 80 # number of classes depth_multiple: 1.0 # model depth multiple width_multiple: 1.0 # layer channel multiple - -# anchors anchors: - [ 19,27, 44,40, 38,94 ] # P3/8 - [ 96,68, 86,152, 180,137 ] # P4/16 diff --git a/models/hub/yolov5m6.yaml b/models/hub/yolov5m6.yaml index 48afc865593a..4bef2e074a96 100644 --- a/models/hub/yolov5m6.yaml +++ b/models/hub/yolov5m6.yaml @@ -1,9 +1,7 @@ -# parameters +# Parameters nc: 80 # number of classes depth_multiple: 0.67 # model depth multiple width_multiple: 0.75 # layer channel multiple - -# anchors anchors: - [ 19,27, 44,40, 38,94 ] # P3/8 - [ 96,68, 86,152, 180,137 ] # P4/16 diff --git a/models/hub/yolov5s-transformer.yaml b/models/hub/yolov5s-transformer.yaml index f2d666722b30..8023ba480d24 100644 --- a/models/hub/yolov5s-transformer.yaml +++ b/models/hub/yolov5s-transformer.yaml @@ -1,48 +1,46 @@ -# parameters +# Parameters nc: 80 # number of classes depth_multiple: 0.33 # model depth multiple width_multiple: 0.50 # layer channel multiple - -# anchors anchors: - - [10,13, 16,30, 33,23] # P3/8 - - [30,61, 62,45, 59,119] # P4/16 - - [116,90, 156,198, 373,326] # P5/32 + - [ 10,13, 16,30, 33,23 ] # P3/8 + - [ 30,61, 62,45, 59,119 ] # P4/16 + - [ 116,90, 156,198, 373,326 ] # P5/32 # YOLOv5 backbone backbone: # [from, number, module, args] - [[-1, 1, Focus, [64, 3]], # 0-P1/2 - [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 - [-1, 3, C3, [128]], - [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 - [-1, 9, C3, [256]], - [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 - [-1, 9, C3, [512]], - [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 - [-1, 1, SPP, [1024, [5, 9, 13]]], - [-1, 3, C3TR, [1024, False]], # 9 <-------- C3TR() Transformer module + [ [ -1, 1, Focus, [ 64, 3 ] ], # 0-P1/2 + [ -1, 1, Conv, [ 128, 3, 2 ] ], # 1-P2/4 + [ -1, 3, C3, [ 128 ] ], + [ -1, 1, Conv, [ 256, 3, 2 ] ], # 3-P3/8 + [ -1, 9, C3, [ 256 ] ], + [ -1, 1, Conv, [ 512, 3, 2 ] ], # 5-P4/16 + [ -1, 9, C3, [ 512 ] ], + [ -1, 1, Conv, [ 1024, 3, 2 ] ], # 7-P5/32 + [ -1, 1, SPP, [ 1024, [ 5, 9, 13 ] ] ], + [ -1, 3, C3TR, [ 1024, False ] ], # 9 <-------- C3TR() Transformer module ] # YOLOv5 head head: - [[-1, 1, Conv, [512, 1, 1]], - [-1, 1, nn.Upsample, [None, 2, 'nearest']], - [[-1, 6], 1, Concat, [1]], # cat backbone P4 - [-1, 3, C3, [512, False]], # 13 + [ [ -1, 1, Conv, [ 512, 1, 1 ] ], + [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], + [ [ -1, 6 ], 1, Concat, [ 1 ] ], # cat backbone P4 + [ -1, 3, C3, [ 512, False ] ], # 13 - [-1, 1, Conv, [256, 1, 1]], - [-1, 1, nn.Upsample, [None, 2, 'nearest']], - [[-1, 4], 1, Concat, [1]], # cat backbone P3 - [-1, 3, C3, [256, False]], # 17 (P3/8-small) + [ -1, 1, Conv, [ 256, 1, 1 ] ], + [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], + [ [ -1, 4 ], 1, Concat, [ 1 ] ], # cat backbone P3 + [ -1, 3, C3, [ 256, False ] ], # 17 (P3/8-small) - [-1, 1, Conv, [256, 3, 2]], - [[-1, 14], 1, Concat, [1]], # cat head P4 - [-1, 3, C3, [512, False]], # 20 (P4/16-medium) + [ -1, 1, Conv, [ 256, 3, 2 ] ], + [ [ -1, 14 ], 1, Concat, [ 1 ] ], # cat head P4 + [ -1, 3, C3, [ 512, False ] ], # 20 (P4/16-medium) - [-1, 1, Conv, [512, 3, 2]], - [[-1, 10], 1, Concat, [1]], # cat head P5 - [-1, 3, C3, [1024, False]], # 23 (P5/32-large) + [ -1, 1, Conv, [ 512, 3, 2 ] ], + [ [ -1, 10 ], 1, Concat, [ 1 ] ], # cat head P5 + [ -1, 3, C3, [ 1024, False ] ], # 23 (P5/32-large) - [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) + [ [ 17, 20, 23 ], 1, Detect, [ nc, anchors ] ], # Detect(P3, P4, P5) ] diff --git a/models/hub/yolov5s6.yaml b/models/hub/yolov5s6.yaml index 1df577a2cc97..ba1025ec87ad 100644 --- a/models/hub/yolov5s6.yaml +++ b/models/hub/yolov5s6.yaml @@ -1,9 +1,7 @@ -# parameters +# Parameters nc: 80 # number of classes depth_multiple: 0.33 # model depth multiple width_multiple: 0.50 # layer channel multiple - -# anchors anchors: - [ 19,27, 44,40, 38,94 ] # P3/8 - [ 96,68, 86,152, 180,137 ] # P4/16 diff --git a/models/hub/yolov5x6.yaml b/models/hub/yolov5x6.yaml index 5ebc02124fe7..4fc9c9a119b8 100644 --- a/models/hub/yolov5x6.yaml +++ b/models/hub/yolov5x6.yaml @@ -1,9 +1,7 @@ -# parameters +# Parameters nc: 80 # number of classes depth_multiple: 1.33 # model depth multiple width_multiple: 1.25 # layer channel multiple - -# anchors anchors: - [ 19,27, 44,40, 38,94 ] # P3/8 - [ 96,68, 86,152, 180,137 ] # P4/16 diff --git a/models/yolo.py b/models/yolo.py index 4c9456edd687..826590bd9783 100644 --- a/models/yolo.py +++ b/models/yolo.py @@ -154,7 +154,7 @@ def forward_once(self, x, profile=False, feature_vis=False): x = m(x) # run y.append(x if m.i in self.save else None) # save output - + if feature_vis and m.type == 'models.common.SPP': feature_visualization(x, m.type, m.i) diff --git a/models/yolov5l.yaml b/models/yolov5l.yaml index 71ebf86e5791..0c130c1514af 100644 --- a/models/yolov5l.yaml +++ b/models/yolov5l.yaml @@ -1,9 +1,7 @@ -# parameters +# Parameters nc: 80 # number of classes depth_multiple: 1.0 # model depth multiple width_multiple: 1.0 # layer channel multiple - -# anchors anchors: - [10,13, 16,30, 33,23] # P3/8 - [30,61, 62,45, 59,119] # P4/16 diff --git a/models/yolov5m.yaml b/models/yolov5m.yaml index 3c749c916246..e477b3433d39 100644 --- a/models/yolov5m.yaml +++ b/models/yolov5m.yaml @@ -1,9 +1,7 @@ -# parameters +# Parameters nc: 80 # number of classes depth_multiple: 0.67 # model depth multiple width_multiple: 0.75 # layer channel multiple - -# anchors anchors: - [10,13, 16,30, 33,23] # P3/8 - [30,61, 62,45, 59,119] # P4/16 diff --git a/models/yolov5s.yaml b/models/yolov5s.yaml index aca669d60d8b..e85442dc9188 100644 --- a/models/yolov5s.yaml +++ b/models/yolov5s.yaml @@ -1,9 +1,7 @@ -# parameters +# Parameters nc: 80 # number of classes depth_multiple: 0.33 # model depth multiple width_multiple: 0.50 # layer channel multiple - -# anchors anchors: - [10,13, 16,30, 33,23] # P3/8 - [30,61, 62,45, 59,119] # P4/16 diff --git a/models/yolov5x.yaml b/models/yolov5x.yaml index d3babdf7baf0..c7ca03589ab8 100644 --- a/models/yolov5x.yaml +++ b/models/yolov5x.yaml @@ -1,9 +1,7 @@ -# parameters +# Parameters nc: 80 # number of classes depth_multiple: 1.33 # model depth multiple width_multiple: 1.25 # layer channel multiple - -# anchors anchors: - [10,13, 16,30, 33,23] # P3/8 - [30,61, 62,45, 59,119] # P4/16 From 9e8fb9fd0b4e6ad840991823f7342ca6227ddb62 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 4 Jul 2021 18:14:04 +0200 Subject: [PATCH 0382/1976] Create `utils/augmentations.py` (#3877) * Create `utils/augmentations.py` * cleanup --- utils/augmentations.py | 244 +++++++++++++++++++++++++++++++++++++++++ utils/datasets.py | 241 +--------------------------------------- 2 files changed, 250 insertions(+), 235 deletions(-) create mode 100644 utils/augmentations.py diff --git a/utils/augmentations.py b/utils/augmentations.py new file mode 100644 index 000000000000..f7b13165daf0 --- /dev/null +++ b/utils/augmentations.py @@ -0,0 +1,244 @@ +# YOLOv5 image augmentation functions + +import random + +import cv2 +import math +import numpy as np + +from utils.general import segment2box, resample_segments +from utils.metrics import bbox_ioa + + +def augment_hsv(im, hgain=0.5, sgain=0.5, vgain=0.5): + # HSV color-space augmentation + if hgain or sgain or vgain: + r = np.random.uniform(-1, 1, 3) * [hgain, sgain, vgain] + 1 # random gains + hue, sat, val = cv2.split(cv2.cvtColor(im, cv2.COLOR_BGR2HSV)) + dtype = im.dtype # uint8 + + x = np.arange(0, 256, dtype=r.dtype) + lut_hue = ((x * r[0]) % 180).astype(dtype) + lut_sat = np.clip(x * r[1], 0, 255).astype(dtype) + lut_val = np.clip(x * r[2], 0, 255).astype(dtype) + + img_hsv = cv2.merge((cv2.LUT(hue, lut_hue), cv2.LUT(sat, lut_sat), cv2.LUT(val, lut_val))) + cv2.cvtColor(img_hsv, cv2.COLOR_HSV2BGR, dst=im) # no return needed + + +def hist_equalize(im, clahe=True, bgr=False): + # Equalize histogram on BGR image 'img' with img.shape(n,m,3) and range 0-255 + yuv = cv2.cvtColor(im, cv2.COLOR_BGR2YUV if bgr else cv2.COLOR_RGB2YUV) + if clahe: + c = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8)) + yuv[:, :, 0] = c.apply(yuv[:, :, 0]) + else: + yuv[:, :, 0] = cv2.equalizeHist(yuv[:, :, 0]) # equalize Y channel histogram + return cv2.cvtColor(yuv, cv2.COLOR_YUV2BGR if bgr else cv2.COLOR_YUV2RGB) # convert YUV image to RGB + + +def replicate(im, labels): + # Replicate labels + h, w = im.shape[:2] + boxes = labels[:, 1:].astype(int) + x1, y1, x2, y2 = boxes.T + s = ((x2 - x1) + (y2 - y1)) / 2 # side length (pixels) + for i in s.argsort()[:round(s.size * 0.5)]: # smallest indices + x1b, y1b, x2b, y2b = boxes[i] + bh, bw = y2b - y1b, x2b - x1b + yc, xc = int(random.uniform(0, h - bh)), int(random.uniform(0, w - bw)) # offset x, y + x1a, y1a, x2a, y2a = [xc, yc, xc + bw, yc + bh] + im[y1a:y2a, x1a:x2a] = im[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax] + labels = np.append(labels, [[labels[i, 0], x1a, y1a, x2a, y2a]], axis=0) + + return im, labels + + +def letterbox(im, new_shape=(640, 640), color=(114, 114, 114), auto=True, scaleFill=False, scaleup=True, stride=32): + # Resize and pad image while meeting stride-multiple constraints + shape = im.shape[:2] # current shape [height, width] + if isinstance(new_shape, int): + new_shape = (new_shape, new_shape) + + # Scale ratio (new / old) + r = min(new_shape[0] / shape[0], new_shape[1] / shape[1]) + if not scaleup: # only scale down, do not scale up (for better test mAP) + r = min(r, 1.0) + + # Compute padding + ratio = r, r # width, height ratios + new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r)) + dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1] # wh padding + if auto: # minimum rectangle + dw, dh = np.mod(dw, stride), np.mod(dh, stride) # wh padding + elif scaleFill: # stretch + dw, dh = 0.0, 0.0 + new_unpad = (new_shape[1], new_shape[0]) + ratio = new_shape[1] / shape[1], new_shape[0] / shape[0] # width, height ratios + + dw /= 2 # divide padding into 2 sides + dh /= 2 + + if shape[::-1] != new_unpad: # resize + im = cv2.resize(im, new_unpad, interpolation=cv2.INTER_LINEAR) + top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1)) + left, right = int(round(dw - 0.1)), int(round(dw + 0.1)) + im = cv2.copyMakeBorder(im, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color) # add border + return im, ratio, (dw, dh) + + +def random_perspective(im, targets=(), segments=(), degrees=10, translate=.1, scale=.1, shear=10, perspective=0.0, + border=(0, 0)): + # torchvision.transforms.RandomAffine(degrees=(-10, 10), translate=(.1, .1), scale=(.9, 1.1), shear=(-10, 10)) + # targets = [cls, xyxy] + + height = im.shape[0] + border[0] * 2 # shape(h,w,c) + width = im.shape[1] + border[1] * 2 + + # Center + C = np.eye(3) + C[0, 2] = -im.shape[1] / 2 # x translation (pixels) + C[1, 2] = -im.shape[0] / 2 # y translation (pixels) + + # Perspective + P = np.eye(3) + P[2, 0] = random.uniform(-perspective, perspective) # x perspective (about y) + P[2, 1] = random.uniform(-perspective, perspective) # y perspective (about x) + + # Rotation and Scale + R = np.eye(3) + a = random.uniform(-degrees, degrees) + # a += random.choice([-180, -90, 0, 90]) # add 90deg rotations to small rotations + s = random.uniform(1 - scale, 1 + scale) + # s = 2 ** random.uniform(-scale, scale) + R[:2] = cv2.getRotationMatrix2D(angle=a, center=(0, 0), scale=s) + + # Shear + S = np.eye(3) + S[0, 1] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # x shear (deg) + S[1, 0] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # y shear (deg) + + # Translation + T = np.eye(3) + T[0, 2] = random.uniform(0.5 - translate, 0.5 + translate) * width # x translation (pixels) + T[1, 2] = random.uniform(0.5 - translate, 0.5 + translate) * height # y translation (pixels) + + # Combined rotation matrix + M = T @ S @ R @ P @ C # order of operations (right to left) is IMPORTANT + if (border[0] != 0) or (border[1] != 0) or (M != np.eye(3)).any(): # image changed + if perspective: + im = cv2.warpPerspective(im, M, dsize=(width, height), borderValue=(114, 114, 114)) + else: # affine + im = cv2.warpAffine(im, M[:2], dsize=(width, height), borderValue=(114, 114, 114)) + + # Visualize + # import matplotlib.pyplot as plt + # ax = plt.subplots(1, 2, figsize=(12, 6))[1].ravel() + # ax[0].imshow(img[:, :, ::-1]) # base + # ax[1].imshow(img2[:, :, ::-1]) # warped + + # Transform label coordinates + n = len(targets) + if n: + use_segments = any(x.any() for x in segments) + new = np.zeros((n, 4)) + if use_segments: # warp segments + segments = resample_segments(segments) # upsample + for i, segment in enumerate(segments): + xy = np.ones((len(segment), 3)) + xy[:, :2] = segment + xy = xy @ M.T # transform + xy = xy[:, :2] / xy[:, 2:3] if perspective else xy[:, :2] # perspective rescale or affine + + # clip + new[i] = segment2box(xy, width, height) + + else: # warp boxes + xy = np.ones((n * 4, 3)) + xy[:, :2] = targets[:, [1, 2, 3, 4, 1, 4, 3, 2]].reshape(n * 4, 2) # x1y1, x2y2, x1y2, x2y1 + xy = xy @ M.T # transform + xy = (xy[:, :2] / xy[:, 2:3] if perspective else xy[:, :2]).reshape(n, 8) # perspective rescale or affine + + # create new boxes + x = xy[:, [0, 2, 4, 6]] + y = xy[:, [1, 3, 5, 7]] + new = np.concatenate((x.min(1), y.min(1), x.max(1), y.max(1))).reshape(4, n).T + + # clip + new[:, [0, 2]] = new[:, [0, 2]].clip(0, width) + new[:, [1, 3]] = new[:, [1, 3]].clip(0, height) + + # filter candidates + i = box_candidates(box1=targets[:, 1:5].T * s, box2=new.T, area_thr=0.01 if use_segments else 0.10) + targets = targets[i] + targets[:, 1:5] = new[i] + + return im, targets + + +def copy_paste(im, labels, segments, probability=0.5): + # Implement Copy-Paste augmentation https://arxiv.org/abs/2012.07177, labels as nx5 np.array(cls, xyxy) + n = len(segments) + if probability and n: + h, w, c = im.shape # height, width, channels + im_new = np.zeros(im.shape, np.uint8) + for j in random.sample(range(n), k=round(probability * n)): + l, s = labels[j], segments[j] + box = w - l[3], l[2], w - l[1], l[4] + ioa = bbox_ioa(box, labels[:, 1:5]) # intersection over area + if (ioa < 0.30).all(): # allow 30% obscuration of existing labels + labels = np.concatenate((labels, [[l[0], *box]]), 0) + segments.append(np.concatenate((w - s[:, 0:1], s[:, 1:2]), 1)) + cv2.drawContours(im_new, [segments[j].astype(np.int32)], -1, (255, 255, 255), cv2.FILLED) + + result = cv2.bitwise_and(src1=im, src2=im_new) + result = cv2.flip(result, 1) # augment segments (flip left-right) + i = result > 0 # pixels to replace + # i[:, :] = result.max(2).reshape(h, w, 1) # act over ch + im[i] = result[i] # cv2.imwrite('debug.jpg', img) # debug + + return im, labels, segments + + +def cutout(im, labels): + # Applies image cutout augmentation https://arxiv.org/abs/1708.04552 + h, w = im.shape[:2] + + # create random masks + scales = [0.5] * 1 + [0.25] * 2 + [0.125] * 4 + [0.0625] * 8 + [0.03125] * 16 # image size fraction + for s in scales: + mask_h = random.randint(1, int(h * s)) + mask_w = random.randint(1, int(w * s)) + + # box + xmin = max(0, random.randint(0, w) - mask_w // 2) + ymin = max(0, random.randint(0, h) - mask_h // 2) + xmax = min(w, xmin + mask_w) + ymax = min(h, ymin + mask_h) + + # apply random color mask + im[ymin:ymax, xmin:xmax] = [random.randint(64, 191) for _ in range(3)] + + # return unobscured labels + if len(labels) and s > 0.03: + box = np.array([xmin, ymin, xmax, ymax], dtype=np.float32) + ioa = bbox_ioa(box, labels[:, 1:5]) # intersection over area + labels = labels[ioa < 0.60] # remove >60% obscured labels + + return labels + + +def mixup(im, labels, im2, labels2): + # Applies MixUp augmentation https://arxiv.org/pdf/1710.09412.pdf + r = np.random.beta(32.0, 32.0) # mixup ratio, alpha=beta=32.0 + im = (im * r + im2 * (1 - r)).astype(np.uint8) + labels = np.concatenate((labels, labels2), 0) + return im, labels + + +def box_candidates(box1, box2, wh_thr=2, ar_thr=20, area_thr=0.1, eps=1e-16): # box1(4,n), box2(4,n) + # Compute candidate boxes: box1 before augment, box2 after augment, wh_thr (pixels), aspect_ratio_thr, area_ratio + w1, h1 = box1[2] - box1[0], box1[3] - box1[1] + w2, h2 = box2[2] - box2[0], box2[3] - box2[1] + ar = np.maximum(w2 / (h2 + eps), h2 / (w2 + eps)) # aspect ratio + return (w2 > wh_thr) & (h2 > wh_thr) & (w2 * h2 / (w1 * h1 + eps) > area_thr) & (ar < ar_thr) # candidates diff --git a/utils/datasets.py b/utils/datasets.py index 8560f7cfeb88..5c76a908c559 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -1,4 +1,4 @@ -# Dataset utils and dataloaders +# YOLOv5 dataset utils and dataloaders import glob import hashlib @@ -14,7 +14,6 @@ from threading import Thread import cv2 -import math import numpy as np import torch import torch.nn.functional as F @@ -23,9 +22,9 @@ from torch.utils.data import Dataset from tqdm import tqdm +from utils.augmentations import augment_hsv, copy_paste, letterbox, mixup, random_perspective from utils.general import check_requirements, check_file, check_dataset, xywh2xyxy, xywhn2xyxy, xyxy2xywhn, \ - xyn2xy, segment2box, segments2boxes, resample_segments, clean_str -from utils.metrics import bbox_ioa + xyn2xy, segments2boxes, clean_str from utils.torch_utils import torch_distributed_zero_first # Parameters @@ -523,12 +522,10 @@ def __getitem__(self, index): img, labels = load_mosaic(self, index) shapes = None - # MixUp https://arxiv.org/pdf/1710.09412.pdf + # MixUp augmentation if random.random() < hyp['mixup']: - img2, labels2 = load_mosaic(self, random.randint(0, self.n - 1)) - r = np.random.beta(32.0, 32.0) # mixup ratio, alpha=beta=32.0 - img = (img * r + img2 * (1 - r)).astype(np.uint8) - labels = np.concatenate((labels, labels2), 0) + img, labels = mixup(img, labels, *load_mosaic(self, random.randint(0, self.n - 1))) + else: # Load image @@ -639,32 +636,6 @@ def load_image(self, index): return self.imgs[index], self.img_hw0[index], self.img_hw[index] # img, hw_original, hw_resized -def augment_hsv(img, hgain=0.5, sgain=0.5, vgain=0.5): - if hgain or sgain or vgain: - r = np.random.uniform(-1, 1, 3) * [hgain, sgain, vgain] + 1 # random gains - hue, sat, val = cv2.split(cv2.cvtColor(img, cv2.COLOR_BGR2HSV)) - dtype = img.dtype # uint8 - - x = np.arange(0, 256, dtype=r.dtype) - lut_hue = ((x * r[0]) % 180).astype(dtype) - lut_sat = np.clip(x * r[1], 0, 255).astype(dtype) - lut_val = np.clip(x * r[2], 0, 255).astype(dtype) - - img_hsv = cv2.merge((cv2.LUT(hue, lut_hue), cv2.LUT(sat, lut_sat), cv2.LUT(val, lut_val))) - cv2.cvtColor(img_hsv, cv2.COLOR_HSV2BGR, dst=img) # no return needed - - -def hist_equalize(img, clahe=True, bgr=False): - # Equalize histogram on BGR image 'img' with img.shape(n,m,3) and range 0-255 - yuv = cv2.cvtColor(img, cv2.COLOR_BGR2YUV if bgr else cv2.COLOR_RGB2YUV) - if clahe: - c = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8)) - yuv[:, :, 0] = c.apply(yuv[:, :, 0]) - else: - yuv[:, :, 0] = cv2.equalizeHist(yuv[:, :, 0]) # equalize Y channel histogram - return cv2.cvtColor(yuv, cv2.COLOR_YUV2BGR if bgr else cv2.COLOR_YUV2RGB) # convert YUV image to RGB - - def load_mosaic(self, index): # loads images in a 4-mosaic @@ -796,205 +767,6 @@ def load_mosaic9(self, index): return img9, labels9 -def replicate(img, labels): - # Replicate labels - h, w = img.shape[:2] - boxes = labels[:, 1:].astype(int) - x1, y1, x2, y2 = boxes.T - s = ((x2 - x1) + (y2 - y1)) / 2 # side length (pixels) - for i in s.argsort()[:round(s.size * 0.5)]: # smallest indices - x1b, y1b, x2b, y2b = boxes[i] - bh, bw = y2b - y1b, x2b - x1b - yc, xc = int(random.uniform(0, h - bh)), int(random.uniform(0, w - bw)) # offset x, y - x1a, y1a, x2a, y2a = [xc, yc, xc + bw, yc + bh] - img[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax] - labels = np.append(labels, [[labels[i, 0], x1a, y1a, x2a, y2a]], axis=0) - - return img, labels - - -def letterbox(img, new_shape=(640, 640), color=(114, 114, 114), auto=True, scaleFill=False, scaleup=True, stride=32): - # Resize and pad image while meeting stride-multiple constraints - shape = img.shape[:2] # current shape [height, width] - if isinstance(new_shape, int): - new_shape = (new_shape, new_shape) - - # Scale ratio (new / old) - r = min(new_shape[0] / shape[0], new_shape[1] / shape[1]) - if not scaleup: # only scale down, do not scale up (for better test mAP) - r = min(r, 1.0) - - # Compute padding - ratio = r, r # width, height ratios - new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r)) - dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1] # wh padding - if auto: # minimum rectangle - dw, dh = np.mod(dw, stride), np.mod(dh, stride) # wh padding - elif scaleFill: # stretch - dw, dh = 0.0, 0.0 - new_unpad = (new_shape[1], new_shape[0]) - ratio = new_shape[1] / shape[1], new_shape[0] / shape[0] # width, height ratios - - dw /= 2 # divide padding into 2 sides - dh /= 2 - - if shape[::-1] != new_unpad: # resize - img = cv2.resize(img, new_unpad, interpolation=cv2.INTER_LINEAR) - top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1)) - left, right = int(round(dw - 0.1)), int(round(dw + 0.1)) - img = cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color) # add border - return img, ratio, (dw, dh) - - -def random_perspective(img, targets=(), segments=(), degrees=10, translate=.1, scale=.1, shear=10, perspective=0.0, - border=(0, 0)): - # torchvision.transforms.RandomAffine(degrees=(-10, 10), translate=(.1, .1), scale=(.9, 1.1), shear=(-10, 10)) - # targets = [cls, xyxy] - - height = img.shape[0] + border[0] * 2 # shape(h,w,c) - width = img.shape[1] + border[1] * 2 - - # Center - C = np.eye(3) - C[0, 2] = -img.shape[1] / 2 # x translation (pixels) - C[1, 2] = -img.shape[0] / 2 # y translation (pixels) - - # Perspective - P = np.eye(3) - P[2, 0] = random.uniform(-perspective, perspective) # x perspective (about y) - P[2, 1] = random.uniform(-perspective, perspective) # y perspective (about x) - - # Rotation and Scale - R = np.eye(3) - a = random.uniform(-degrees, degrees) - # a += random.choice([-180, -90, 0, 90]) # add 90deg rotations to small rotations - s = random.uniform(1 - scale, 1 + scale) - # s = 2 ** random.uniform(-scale, scale) - R[:2] = cv2.getRotationMatrix2D(angle=a, center=(0, 0), scale=s) - - # Shear - S = np.eye(3) - S[0, 1] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # x shear (deg) - S[1, 0] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # y shear (deg) - - # Translation - T = np.eye(3) - T[0, 2] = random.uniform(0.5 - translate, 0.5 + translate) * width # x translation (pixels) - T[1, 2] = random.uniform(0.5 - translate, 0.5 + translate) * height # y translation (pixels) - - # Combined rotation matrix - M = T @ S @ R @ P @ C # order of operations (right to left) is IMPORTANT - if (border[0] != 0) or (border[1] != 0) or (M != np.eye(3)).any(): # image changed - if perspective: - img = cv2.warpPerspective(img, M, dsize=(width, height), borderValue=(114, 114, 114)) - else: # affine - img = cv2.warpAffine(img, M[:2], dsize=(width, height), borderValue=(114, 114, 114)) - - # Visualize - # import matplotlib.pyplot as plt - # ax = plt.subplots(1, 2, figsize=(12, 6))[1].ravel() - # ax[0].imshow(img[:, :, ::-1]) # base - # ax[1].imshow(img2[:, :, ::-1]) # warped - - # Transform label coordinates - n = len(targets) - if n: - use_segments = any(x.any() for x in segments) - new = np.zeros((n, 4)) - if use_segments: # warp segments - segments = resample_segments(segments) # upsample - for i, segment in enumerate(segments): - xy = np.ones((len(segment), 3)) - xy[:, :2] = segment - xy = xy @ M.T # transform - xy = xy[:, :2] / xy[:, 2:3] if perspective else xy[:, :2] # perspective rescale or affine - - # clip - new[i] = segment2box(xy, width, height) - - else: # warp boxes - xy = np.ones((n * 4, 3)) - xy[:, :2] = targets[:, [1, 2, 3, 4, 1, 4, 3, 2]].reshape(n * 4, 2) # x1y1, x2y2, x1y2, x2y1 - xy = xy @ M.T # transform - xy = (xy[:, :2] / xy[:, 2:3] if perspective else xy[:, :2]).reshape(n, 8) # perspective rescale or affine - - # create new boxes - x = xy[:, [0, 2, 4, 6]] - y = xy[:, [1, 3, 5, 7]] - new = np.concatenate((x.min(1), y.min(1), x.max(1), y.max(1))).reshape(4, n).T - - # clip - new[:, [0, 2]] = new[:, [0, 2]].clip(0, width) - new[:, [1, 3]] = new[:, [1, 3]].clip(0, height) - - # filter candidates - i = box_candidates(box1=targets[:, 1:5].T * s, box2=new.T, area_thr=0.01 if use_segments else 0.10) - targets = targets[i] - targets[:, 1:5] = new[i] - - return img, targets - - -def copy_paste(img, labels, segments, probability=0.5): - # Implement Copy-Paste augmentation https://arxiv.org/abs/2012.07177, labels as nx5 np.array(cls, xyxy) - n = len(segments) - if probability and n: - h, w, c = img.shape # height, width, channels - im_new = np.zeros(img.shape, np.uint8) - for j in random.sample(range(n), k=round(probability * n)): - l, s = labels[j], segments[j] - box = w - l[3], l[2], w - l[1], l[4] - ioa = bbox_ioa(box, labels[:, 1:5]) # intersection over area - if (ioa < 0.30).all(): # allow 30% obscuration of existing labels - labels = np.concatenate((labels, [[l[0], *box]]), 0) - segments.append(np.concatenate((w - s[:, 0:1], s[:, 1:2]), 1)) - cv2.drawContours(im_new, [segments[j].astype(np.int32)], -1, (255, 255, 255), cv2.FILLED) - - result = cv2.bitwise_and(src1=img, src2=im_new) - result = cv2.flip(result, 1) # augment segments (flip left-right) - i = result > 0 # pixels to replace - # i[:, :] = result.max(2).reshape(h, w, 1) # act over ch - img[i] = result[i] # cv2.imwrite('debug.jpg', img) # debug - - return img, labels, segments - - -def box_candidates(box1, box2, wh_thr=2, ar_thr=20, area_thr=0.1, eps=1e-16): # box1(4,n), box2(4,n) - # Compute candidate boxes: box1 before augment, box2 after augment, wh_thr (pixels), aspect_ratio_thr, area_ratio - w1, h1 = box1[2] - box1[0], box1[3] - box1[1] - w2, h2 = box2[2] - box2[0], box2[3] - box2[1] - ar = np.maximum(w2 / (h2 + eps), h2 / (w2 + eps)) # aspect ratio - return (w2 > wh_thr) & (h2 > wh_thr) & (w2 * h2 / (w1 * h1 + eps) > area_thr) & (ar < ar_thr) # candidates - - -def cutout(image, labels): - # Applies image cutout augmentation https://arxiv.org/abs/1708.04552 - h, w = image.shape[:2] - - # create random masks - scales = [0.5] * 1 + [0.25] * 2 + [0.125] * 4 + [0.0625] * 8 + [0.03125] * 16 # image size fraction - for s in scales: - mask_h = random.randint(1, int(h * s)) - mask_w = random.randint(1, int(w * s)) - - # box - xmin = max(0, random.randint(0, w) - mask_w // 2) - ymin = max(0, random.randint(0, h) - mask_h // 2) - xmax = min(w, xmin + mask_w) - ymax = min(h, ymin + mask_h) - - # apply random color mask - image[ymin:ymax, xmin:xmax] = [random.randint(64, 191) for _ in range(3)] - - # return unobscured labels - if len(labels) and s > 0.03: - box = np.array([xmin, ymin, xmax, ymax], dtype=np.float32) - ioa = bbox_ioa(box, labels[:, 1:5]) # intersection over area - labels = labels[ioa < 0.60] # remove >60% obscured labels - - return labels - - def create_folder(path='./new'): # Create folder if os.path.exists(path): @@ -1012,7 +784,6 @@ def flatten_recursive(path='../datasets/coco128'): def extract_boxes(path='../datasets/coco128'): # from utils.datasets import *; extract_boxes() # Convert detection dataset into classification dataset, with one directory per class - path = Path(path) # images dir shutil.rmtree(path / 'classifier') if (path / 'classifier').is_dir() else None # remove existing files = list(path.rglob('*.*')) From 3c3f8fbd5d2e5bf2cbeaf824dc3a74c8a7bf6300 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 4 Jul 2021 20:12:32 +0200 Subject: [PATCH 0383/1976] Improved BGR2RGB speeds (#3880) * Update BGR2RGB ops * speed improvements * cleanup --- models/common.py | 2 +- utils/datasets.py | 9 ++++----- 2 files changed, 5 insertions(+), 6 deletions(-) diff --git a/models/common.py b/models/common.py index 9911b207d060..6b8b4e4cb42f 100644 --- a/models/common.py +++ b/models/common.py @@ -259,7 +259,7 @@ def forward(self, imgs, size=640, augment=False, profile=False): files.append(Path(f).with_suffix('.jpg').name) if im.shape[0] < 5: # image in CHW im = im.transpose((1, 2, 0)) # reverse dataloader .transpose(2, 0, 1) - im = im[:, :, :3] if im.ndim == 3 else np.tile(im[:, :, None], 3) # enforce 3ch input + im = im[..., :3] if im.ndim == 3 else np.tile(im[..., None], 3) # enforce 3ch input s = im.shape[:2] # HWC shape0.append(s) # image shape g = (size / max(s)) # gain diff --git a/utils/datasets.py b/utils/datasets.py index 5c76a908c559..5a3fbefa28b7 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -218,7 +218,7 @@ def __next__(self): img = letterbox(img0, self.img_size, stride=self.stride)[0] # Convert - img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB and HWC to CHW + img = img.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB img = np.ascontiguousarray(img) return path, img, img0, self.cap @@ -264,7 +264,7 @@ def __next__(self): img = letterbox(img0, self.img_size, stride=self.stride)[0] # Convert - img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB and HWC to CHW + img = img.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB img = np.ascontiguousarray(img) return img_path, img, img0, None @@ -345,7 +345,7 @@ def __next__(self): img = np.stack(img, 0) # Convert - img = img[:, :, :, ::-1].transpose(0, 3, 1, 2) # BGR to RGB and BHWC to BCHW + img = img[..., ::-1].transpose((0, 3, 1, 2)) # BGR to RGB, BHWC to BCHW img = np.ascontiguousarray(img) return self.sources, img, img0, None @@ -526,7 +526,6 @@ def __getitem__(self, index): if random.random() < hyp['mixup']: img, labels = mixup(img, labels, *load_mosaic(self, random.randint(0, self.n - 1))) - else: # Load image img, (h0, w0), (h, w) = load_image(self, index) @@ -579,7 +578,7 @@ def __getitem__(self, index): labels_out[:, 1:] = torch.from_numpy(labels) # Convert - img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3 x img_height x img_width + img = img.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB img = np.ascontiguousarray(img) return torch.from_numpy(img), labels_out, self.img_files[index], shapes From 8930e22cceca4f07e8adb26baa5afa2745e77053 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 5 Jul 2021 12:48:27 +0200 Subject: [PATCH 0384/1976] Evolution commented `hyp['anchors']` fix (#3887) Fix for `KeyError: 'anchors'` error when start hyperparameter evolution: ```bash python train.py --evolve ``` ```bash Traceback (most recent call last): File "E:\yolov5\train.py", line 623, in hyp[k] = max(hyp[k], v[1]) # lower limit KeyError: 'anchors' ``` --- train.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/train.py b/train.py index 2e864a60cefc..5a434773eff7 100644 --- a/train.py +++ b/train.py @@ -596,6 +596,8 @@ def main(opt): with open(opt.hyp) as f: hyp = yaml.safe_load(f) # load hyps dict + if 'anchors' not in hyp: # anchors commented in hyp.yaml + hyp['anchors'] = 3 assert LOCAL_RANK == -1, 'DDP mode not implemented for --evolve' opt.notest, opt.nosave = True, True # only test/save final epoch # ei = [isinstance(x, (int, float)) for x in hyp.values()] # evolvable indices From 6a3ee7cf03efb17fbffde0e68b1a854e80fe3213 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 5 Jul 2021 16:20:46 +0200 Subject: [PATCH 0385/1976] Hub models `map_location=device` (#3894) * Hub models `map_location=device` * cleanup --- hubconf.py | 7 ++++--- utils/torch_utils.py | 5 +++-- 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/hubconf.py b/hubconf.py index 429e61bbab1b..2de71d617f1e 100644 --- a/hubconf.py +++ b/hubconf.py @@ -36,13 +36,15 @@ def _create(name, pretrained=True, channels=3, classes=80, autoshape=True, verbo fname = Path(name).with_suffix('.pt') # checkpoint filename try: + device = select_device(('0' if torch.cuda.is_available() else 'cpu') if device is None else device) + if pretrained and channels == 3 and classes == 80: - model = attempt_load(fname, map_location=torch.device('cpu')) # download/load FP32 model + model = attempt_load(fname, map_location=device) # download/load FP32 model else: cfg = list((Path(__file__).parent / 'models').rglob(f'{name}.yaml'))[0] # model.yaml path model = Model(cfg, channels, classes) # create model if pretrained: - ckpt = torch.load(attempt_download(fname), map_location=torch.device('cpu')) # load + ckpt = torch.load(attempt_download(fname), map_location=device) # load msd = model.state_dict() # model state_dict csd = ckpt['model'].float().state_dict() # checkpoint state_dict as FP32 csd = {k: v for k, v in csd.items() if msd[k].shape == v.shape} # filter @@ -51,7 +53,6 @@ def _create(name, pretrained=True, channels=3, classes=80, autoshape=True, verbo model.names = ckpt['model'].names # set class names attribute if autoshape: model = model.autoshape() # for file/URI/PIL/cv2/np inputs and NMS - device = select_device('0' if torch.cuda.is_available() else 'cpu') if device is None else torch.device(device) return model.to(device) except Exception as e: diff --git a/utils/torch_utils.py b/utils/torch_utils.py index 2d5382471e3c..36b6845a8c48 100644 --- a/utils/torch_utils.py +++ b/utils/torch_utils.py @@ -2,7 +2,6 @@ import datetime import logging -import math import os import platform import subprocess @@ -11,6 +10,7 @@ from copy import deepcopy from pathlib import Path +import math import torch import torch.backends.cudnn as cudnn import torch.distributed as dist @@ -64,7 +64,8 @@ def git_describe(path=Path(__file__).parent): # path must be a directory def select_device(device='', batch_size=None): # device = 'cpu' or '0' or '0,1,2,3' s = f'YOLOv5 🚀 {git_describe() or date_modified()} torch {torch.__version__} ' # string - cpu = device.lower() == 'cpu' + device = str(device).strip().lower().replace('cuda:', '') # to string, 'cuda:0' to '0' + cpu = device == 'cpu' if cpu: os.environ['CUDA_VISIBLE_DEVICES'] = '-1' # force torch.cuda.is_available() = False elif device: # non-cpu device requested From 33202b7f0bbfcc55860e8c0cafbf13f227e77a84 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 5 Jul 2021 18:01:54 +0200 Subject: [PATCH 0386/1976] YOLOv5 + Albumentations integration (#3882) * Albumentations integration * ToGray p=0.01 * print confirmation * create instance in dataloader init method * improved version handling * transform not defined fix * assert string update * create check_version() * add spaces * update class comment --- requirements.txt | 1 + utils/augmentations.py | 30 +++++++++++++++++++++++++++++- utils/datasets.py | 40 +++++++++++++++++++++------------------- utils/general.py | 17 ++++++++++------- 4 files changed, 61 insertions(+), 27 deletions(-) diff --git a/requirements.txt b/requirements.txt index b413ec01b31c..ef1736a12d5f 100755 --- a/requirements.txt +++ b/requirements.txt @@ -27,4 +27,5 @@ pandas # extras -------------------------------------- # Cython # for pycocotools https://github.com/cocodataset/cocoapi/issues/172 # pycocotools>=2.0 # COCO mAP +# albumentations>=1.0.0 thop # FLOPs computation diff --git a/utils/augmentations.py b/utils/augmentations.py index f7b13165daf0..74ee4de2131e 100644 --- a/utils/augmentations.py +++ b/utils/augmentations.py @@ -1,15 +1,43 @@ # YOLOv5 image augmentation functions +import logging import random import cv2 import math import numpy as np -from utils.general import segment2box, resample_segments +from utils.general import colorstr, segment2box, resample_segments, check_version from utils.metrics import bbox_ioa +class Albumentations: + # YOLOv5 Albumentations class (optional, only used if package is installed) + def __init__(self): + self.transform = None + try: + import albumentations as A + check_version(A.__version__, '1.0.0') # version requirement + + self.transform = A.Compose([ + A.Blur(p=0.1), + A.MedianBlur(p=0.1), + A.ToGray(p=0.01)], + bbox_params=A.BboxParams(format='yolo', label_fields=['class_labels'])) + + logging.info(colorstr('albumentations: ') + ', '.join(f'{x}' for x in self.transform.transforms)) + except ImportError: # package not installed, skip + pass + except Exception as e: + logging.info(colorstr('albumentations: ') + f'{e}') + + def __call__(self, im, labels, p=1.0): + if self.transform and random.random() < p: + new = self.transform(image=im, bboxes=labels[:, 1:], class_labels=labels[:, 0]) # transformed + im, labels = new['image'], np.array([[c, *b] for c, b in zip(new['class_labels'], new['bboxes'])]) + return im, labels + + def augment_hsv(im, hgain=0.5, sgain=0.5, vgain=0.5): # HSV color-space augmentation if hgain or sgain or vgain: diff --git a/utils/datasets.py b/utils/datasets.py index 5a3fbefa28b7..0bcfdcc1cda6 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -22,7 +22,7 @@ from torch.utils.data import Dataset from tqdm import tqdm -from utils.augmentations import augment_hsv, copy_paste, letterbox, mixup, random_perspective +from utils.augmentations import Albumentations, augment_hsv, copy_paste, letterbox, mixup, random_perspective from utils.general import check_requirements, check_file, check_dataset, xywh2xyxy, xywhn2xyxy, xyxy2xywhn, \ xyn2xy, segments2boxes, clean_str from utils.torch_utils import torch_distributed_zero_first @@ -372,6 +372,7 @@ def __init__(self, path, img_size=640, batch_size=16, augment=False, hyp=None, r self.mosaic_border = [-img_size // 2, -img_size // 2] self.stride = stride self.path = path + self.albumentations = Albumentations() if augment else None try: f = [] # image files @@ -539,9 +540,7 @@ def __getitem__(self, index): if labels.size: # normalized xywh to pixel xyxy format labels[:, 1:] = xywhn2xyxy(labels[:, 1:], ratio[0] * w, ratio[1] * h, padw=pad[0], padh=pad[1]) - if self.augment: - # Augment imagespace - if not mosaic: + if self.augment: img, labels = random_perspective(img, labels, degrees=hyp['degrees'], translate=hyp['translate'], @@ -549,32 +548,35 @@ def __getitem__(self, index): shear=hyp['shear'], perspective=hyp['perspective']) - # Augment colorspace - augment_hsv(img, hgain=hyp['hsv_h'], sgain=hyp['hsv_s'], vgain=hyp['hsv_v']) - - # Apply cutouts - # if random.random() < 0.9: - # labels = cutout(img, labels) - - nL = len(labels) # number of labels - if nL: + nl = len(labels) # number of labels + if nl: labels[:, 1:5] = xyxy2xywhn(labels[:, 1:5], w=img.shape[1], h=img.shape[0]) # xyxy to xywh normalized if self.augment: - # flip up-down + # Albumentations + img, labels = self.albumentations(img, labels) + + # HSV color-space + augment_hsv(img, hgain=hyp['hsv_h'], sgain=hyp['hsv_s'], vgain=hyp['hsv_v']) + + # Flip up-down if random.random() < hyp['flipud']: img = np.flipud(img) - if nL: + if nl: labels[:, 2] = 1 - labels[:, 2] - # flip left-right + # Flip left-right if random.random() < hyp['fliplr']: img = np.fliplr(img) - if nL: + if nl: labels[:, 1] = 1 - labels[:, 1] - labels_out = torch.zeros((nL, 6)) - if nL: + # Cutouts + # if random.random() < 0.9: + # labels = cutout(img, labels) + + labels_out = torch.zeros((nl, 6)) + if nl: labels_out[:, 1:] = torch.from_numpy(labels) # Convert diff --git a/utils/general.py b/utils/general.py index 4606a8ec54f5..b4c8994d233a 100755 --- a/utils/general.py +++ b/utils/general.py @@ -3,7 +3,6 @@ import contextlib import glob import logging -import math import os import platform import random @@ -17,6 +16,7 @@ from subprocess import check_output import cv2 +import math import numpy as np import pandas as pd import pkg_resources as pkg @@ -136,13 +136,16 @@ def check_git_status(err_msg=', for updates see https://github.com/ultralytics/y print(f'{e}{err_msg}') -def check_python(minimum='3.6.2', required=True): +def check_python(minimum='3.6.2'): # Check current python version vs. required python version - current = platform.python_version() - result = pkg.parse_version(current) >= pkg.parse_version(minimum) - if required: - assert result, f'Python {minimum} required by YOLOv5, but Python {current} is currently installed' - return result + check_version(platform.python_version(), minimum, name='Python ') + + +def check_version(current='0.0.0', minimum='0.0.0', name='version ', pinned=False): + # Check version vs. required version + current, minimum = (pkg.parse_version(x) for x in (current, minimum)) + result = (current == minimum) if pinned else (current >= minimum) + assert result, f'{name}{minimum} required by YOLOv5, but {name}{current} is currently installed' def check_requirements(requirements='requirements.txt', exclude=()): From 61047a2b4fb318a2cf86475c0099ead7832e45cf Mon Sep 17 00:00:00 2001 From: johnohagan <86861886+johnohagan@users.noreply.github.com> Date: Wed, 7 Jul 2021 21:41:46 +1000 Subject: [PATCH 0387/1976] Save PyTorch Hub models to `/root/hub/cache/dir` (#3904) * Create hubconf.py * Add save_dir variable Co-authored-by: Glenn Jocher --- hubconf.py | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/hubconf.py b/hubconf.py index 2de71d617f1e..df268b18d177 100644 --- a/hubconf.py +++ b/hubconf.py @@ -4,9 +4,12 @@ import torch model = torch.hub.load('ultralytics/yolov5', 'yolov5s') """ +from pathlib import Path import torch +FILE = Path(__file__).absolute() + def _create(name, pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None): """Creates a specified YOLOv5 model @@ -23,28 +26,26 @@ def _create(name, pretrained=True, channels=3, classes=80, autoshape=True, verbo Returns: YOLOv5 pytorch model """ - from pathlib import Path - from models.yolo import Model, attempt_load from utils.general import check_requirements, set_logging from utils.google_utils import attempt_download from utils.torch_utils import select_device - check_requirements(requirements=Path(__file__).parent / 'requirements.txt', - exclude=('tensorboard', 'thop', 'opencv-python')) + check_requirements(requirements=FILE.parent / 'requirements.txt', exclude=('tensorboard', 'thop', 'opencv-python')) set_logging(verbose=verbose) - fname = Path(name).with_suffix('.pt') # checkpoint filename + save_dir = Path('') if str(name).endswith('.pt') else FILE.parent + path = (save_dir / name).with_suffix('.pt') # checkpoint path try: device = select_device(('0' if torch.cuda.is_available() else 'cpu') if device is None else device) if pretrained and channels == 3 and classes == 80: - model = attempt_load(fname, map_location=device) # download/load FP32 model + model = attempt_load(path, map_location=device) # download/load FP32 model else: cfg = list((Path(__file__).parent / 'models').rglob(f'{name}.yaml'))[0] # model.yaml path model = Model(cfg, channels, classes) # create model if pretrained: - ckpt = torch.load(attempt_download(fname), map_location=device) # load + ckpt = torch.load(attempt_download(path), map_location=device) # load msd = model.state_dict() # model state_dict csd = ckpt['model'].float().state_dict() # checkpoint state_dict as FP32 csd = {k: v for k, v in csd.items() if msd[k].shape == v.shape} # filter From 87b094bcbcf209c89febcc9a3bb0ae119fee882d Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 7 Jul 2021 15:41:58 +0200 Subject: [PATCH 0388/1976] Feature visualization update (#3920) * Feature visualization update * Save to jpg (faster) * Save to png --- detect.py | 6 +++++- models/yolo.py | 11 +++++------ utils/plots.py | 39 ++++++++++++++++++--------------------- 3 files changed, 28 insertions(+), 28 deletions(-) diff --git a/detect.py b/detect.py index a4542f7e8802..44b33eb42289 100644 --- a/detect.py +++ b/detect.py @@ -40,6 +40,7 @@ def run(weights='yolov5s.pt', # model.pt path(s) classes=None, # filter by class: --class 0, or --class 0 2 3 agnostic_nms=False, # class-agnostic NMS augment=False, # augmented inference + visualize=False, # visualize features update=False, # update all models project='runs/detect', # save results to project/name name='exp', # save results to project/name @@ -100,7 +101,9 @@ def run(weights='yolov5s.pt', # model.pt path(s) # Inference t1 = time_synchronized() - pred = model(img, augment=augment)[0] + pred = model(img, + augment=augment, + visualize=increment_path(save_dir / 'features', mkdir=True) if visualize else False)[0] # Apply NMS pred = non_max_suppression(pred, conf_thres, iou_thres, classes, agnostic_nms, max_det=max_det) @@ -201,6 +204,7 @@ def parse_opt(): parser.add_argument('--classes', nargs='+', type=int, help='filter by class: --class 0, or --class 0 2 3') parser.add_argument('--agnostic-nms', action='store_true', help='class-agnostic NMS') parser.add_argument('--augment', action='store_true', help='augmented inference') + parser.add_argument('--visualize', action='store_true', help='visualize features') parser.add_argument('--update', action='store_true', help='update all models') parser.add_argument('--project', default='runs/detect', help='save results to project/name') parser.add_argument('--name', default='exp', help='save results to project/name') diff --git a/models/yolo.py b/models/yolo.py index 826590bd9783..b11443377080 100644 --- a/models/yolo.py +++ b/models/yolo.py @@ -117,11 +117,10 @@ def __init__(self, cfg='yolov5s.yaml', ch=3, nc=None, anchors=None): # model, i self.info() logger.info('') - def forward(self, x, augment=False, profile=False): + def forward(self, x, augment=False, profile=False, visualize=False): if augment: return self.forward_augment(x) # augmented inference, None - else: - return self.forward_once(x, profile) # single-scale inference, train + return self.forward_once(x, profile, visualize) # single-scale inference, train def forward_augment(self, x): img_size = x.shape[-2:] # height, width @@ -136,7 +135,7 @@ def forward_augment(self, x): y.append(yi) return torch.cat(y, 1), None # augmented inference, train - def forward_once(self, x, profile=False, feature_vis=False): + def forward_once(self, x, profile=False, visualize=False): y, dt = [], [] # outputs for m in self.model: if m.f != -1: # if not from previous layer @@ -155,8 +154,8 @@ def forward_once(self, x, profile=False, feature_vis=False): x = m(x) # run y.append(x if m.i in self.save else None) # save output - if feature_vis and m.type == 'models.common.SPP': - feature_visualization(x, m.type, m.i) + if visualize: + feature_visualization(x, m.type, m.i, save_dir=visualize) if profile: logger.info('%.1fms total' % sum(dt)) diff --git a/utils/plots.py b/utils/plots.py index 4b6c63992ac7..1ab3bb6f21fe 100644 --- a/utils/plots.py +++ b/utils/plots.py @@ -1,12 +1,12 @@ # Plotting utils import glob -import math import os from copy import copy from pathlib import Path import cv2 +import math import matplotlib import matplotlib.pyplot as plt import numpy as np @@ -15,7 +15,6 @@ import torch import yaml from PIL import Image, ImageDraw, ImageFont -from torchvision import transforms from utils.general import increment_path, xywh2xyxy, xyxy2xywh from utils.metrics import fitness @@ -448,28 +447,26 @@ def plot_results(start=0, stop=0, bucket='', id=(), labels=(), save_dir=''): fig.savefig(Path(save_dir) / 'results.png', dpi=200) -def feature_visualization(x, module_type, stage, n=64): +def feature_visualization(x, module_type, stage, n=64, save_dir=Path('runs/detect/exp')): """ x: Features to be visualized module_type: Module type stage: Module stage within model n: Maximum number of feature maps to plot + save_dir: Directory to save results """ - batch, channels, height, width = x.shape # batch, channels, height, width - if height > 1 and width > 1: - project, name = 'runs/features', 'exp' - save_dir = increment_path(Path(project) / name) # increment run - save_dir.mkdir(parents=True, exist_ok=True) # make dir - - plt.figure(tight_layout=True) - blocks = torch.chunk(x, channels, dim=1) # block by channel dimension - n = min(n, len(blocks)) - for i in range(n): - feature = transforms.ToPILImage()(blocks[i].squeeze()) - ax = plt.subplot(int(math.sqrt(n)), int(math.sqrt(n)), i + 1) - ax.axis('off') - plt.imshow(feature) # cmap='gray' - - f = f"stage_{stage}_{module_type.split('.')[-1]}_features.png" - print(f'Saving {save_dir / f}...') - plt.savefig(save_dir / f, dpi=300) + if 'Detect' not in module_type: + batch, channels, height, width = x.shape # batch, channels, height, width + if height > 1 and width > 1: + f = f"stage{stage}_{module_type.split('.')[-1]}_features.png" # filename + + plt.figure(tight_layout=True) + blocks = torch.chunk(x[0], channels, dim=0) # select batch index 0, block by channels + n = min(n, channels) # number of plots + ax = plt.subplots(math.ceil(n / 8), 8, tight_layout=True)[1].ravel() # 8 rows x n/8 cols + for i in range(n): + ax[i].imshow(blocks[i].squeeze()) # cmap='gray' + ax[i].axis('off') + + print(f'Saving {save_dir / f}... ({n}/{channels})') + plt.savefig(save_dir / f, dpi=300) From 411842e0583ea77970a35a367faf3cf5017845eb Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 7 Jul 2021 16:08:42 +0200 Subject: [PATCH 0389/1976] Fix `torch.hub.list('ultralytics/yolov5')` pathlib bug (#3921) --- hubconf.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/hubconf.py b/hubconf.py index df268b18d177..55536c3a42f3 100644 --- a/hubconf.py +++ b/hubconf.py @@ -4,12 +4,9 @@ import torch model = torch.hub.load('ultralytics/yolov5', 'yolov5s') """ -from pathlib import Path import torch -FILE = Path(__file__).absolute() - def _create(name, pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None): """Creates a specified YOLOv5 model @@ -26,15 +23,18 @@ def _create(name, pretrained=True, channels=3, classes=80, autoshape=True, verbo Returns: YOLOv5 pytorch model """ + from pathlib import Path + from models.yolo import Model, attempt_load from utils.general import check_requirements, set_logging from utils.google_utils import attempt_download from utils.torch_utils import select_device - check_requirements(requirements=FILE.parent / 'requirements.txt', exclude=('tensorboard', 'thop', 'opencv-python')) + file = Path(__file__).absolute() + check_requirements(requirements=file.parent / 'requirements.txt', exclude=('tensorboard', 'thop', 'opencv-python')) set_logging(verbose=verbose) - save_dir = Path('') if str(name).endswith('.pt') else FILE.parent + save_dir = Path('') if str(name).endswith('.pt') else file.parent path = (save_dir / name).with_suffix('.pt') # checkpoint path try: device = select_device(('0' if torch.cuda.is_available() else 'cpu') if device is None else device) From 588094eb7adbbdc8d89d87ce0cf689d1d9f31bfc Mon Sep 17 00:00:00 2001 From: jmiranda-laplateforme <67475949+jmiranda-laplateforme@users.noreply.github.com> Date: Wed, 7 Jul 2021 16:13:12 +0200 Subject: [PATCH 0390/1976] Update `setattr()` default for Hub PIL images (#3923) Fix inference from PIL source. --- models/common.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/models/common.py b/models/common.py index 6b8b4e4cb42f..f4b91da62250 100644 --- a/models/common.py +++ b/models/common.py @@ -255,7 +255,7 @@ def forward(self, imgs, size=640, augment=False, profile=False): im, f = Image.open(requests.get(im, stream=True).raw if im.startswith('http') else im), im im = np.asarray(exif_transpose(im)) elif isinstance(im, Image.Image): # PIL Image - im, f = np.asarray(exif_transpose(im)), getattr(im, 'filename') or f + im, f = np.asarray(exif_transpose(im)), getattr(im, 'filename', f) or f files.append(Path(f).with_suffix('.jpg').name) if im.shape[0] < 5: # image in CHW im = im.transpose((1, 2, 0)) # reverse dataloader .transpose(2, 0, 1) From 850970e081687df6427898948a27df37ab4de5d3 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 7 Jul 2021 16:23:31 +0200 Subject: [PATCH 0391/1976] `feature_visualization()` CUDA fix (#3925) --- utils/plots.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/plots.py b/utils/plots.py index 1ab3bb6f21fe..23a48620e6b5 100644 --- a/utils/plots.py +++ b/utils/plots.py @@ -461,7 +461,7 @@ def feature_visualization(x, module_type, stage, n=64, save_dir=Path('runs/detec f = f"stage{stage}_{module_type.split('.')[-1]}_features.png" # filename plt.figure(tight_layout=True) - blocks = torch.chunk(x[0], channels, dim=0) # select batch index 0, block by channels + blocks = torch.chunk(x[0].cpu(), channels, dim=0) # select batch index 0, block by channels n = min(n, channels) # number of plots ax = plt.subplots(math.ceil(n / 8), 8, tight_layout=True)[1].ravel() # 8 rows x n/8 cols for i in range(n): From 8c6f9e15bfc0000d18b976a95b9d7c17d407ec91 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 8 Jul 2021 11:42:30 +0200 Subject: [PATCH 0392/1976] Update `dataset_stats()` for zipped datasets (#3926) * Update `dataset_stats()` for zipped datasets @KalenMike * cleanup --- utils/datasets.py | 20 +++++++++++++++++--- 1 file changed, 17 insertions(+), 3 deletions(-) diff --git a/utils/datasets.py b/utils/datasets.py index 0bcfdcc1cda6..a527230b868a 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -888,9 +888,11 @@ def verify_image_label(args): def dataset_stats(path='coco128.yaml', autodownload=False, verbose=False): """ Return dataset statistics dictionary with images and instances counts per split per class - Usage: from utils.datasets import *; dataset_stats('coco128.yaml', verbose=True) + Usage1: from utils.datasets import *; dataset_stats('coco128.yaml', verbose=True) + Usage2: from utils.datasets import *; dataset_stats('../datasets/coco128.zip', verbose=True) + Arguments - path: Path to data.yaml + path: Path to data.yaml or data.zip (with data.yaml inside data.zip) autodownload: Attempt to download dataset if not found locally verbose: Print stats dictionary """ @@ -899,8 +901,20 @@ def round_labels(labels): # Update labels to integer class and 6 decimal place floats return [[int(c), *[round(x, 6) for x in points]] for c, *points in labels] - with open(check_file(path)) as f: + def unzip(path): + # Unzip data.zip TODO: CONSTRAINT: path/to/abc.zip MUST unzip to 'path/to/abc/' + if str(path).endswith('.zip'): # path is data.zip + assert os.system(f'unzip -q {path} -d {path.parent}') == 0, f'Error unzipping {path}' + data_dir = path.with_suffix('') # dataset directory + return True, data_dir, list(data_dir.rglob('*.yaml'))[0] # zipped, data_dir, yaml_path + else: # path is data.yaml + return False, None, path + + zipped, data_dir, yaml_path = unzip(Path(path)) + with open(check_file(yaml_path)) as f: data = yaml.safe_load(f) # data dict + if zipped: + data['path'] = data_dir # TODO: should this be dir.resolve()? check_dataset(data, autodownload) # download dataset if missing nc = data['nc'] # number of classes stats = {'nc': nc, 'names': data['names']} # statistics dictionary From e7888af94c0ee232f6d47e768c090b05e3baebb8 Mon Sep 17 00:00:00 2001 From: Eldar Kurtic Date: Thu, 8 Jul 2021 15:29:02 +0200 Subject: [PATCH 0393/1976] Fix inconsistent NMS IoU value for COCO (#3934) Evaluation of 'best' and 'last' models will use the same params as the evaluation during the training phase. This PR fixes https://github.com/ultralytics/yolov5/issues/3907 --- train.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/train.py b/train.py index 5a434773eff7..e58d7c4f0348 100644 --- a/train.py +++ b/train.py @@ -457,8 +457,6 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary results, _, _ = test.run(data_dict, batch_size=batch_size // WORLD_SIZE * 2, imgsz=imgsz_test, - conf_thres=0.001, - iou_thres=0.7, model=attempt_load(m, device).half(), single_cls=single_cls, dataloader=testloader, From dabad5793a638cba1e5a2bbb878c9b87fe1a14a0 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 8 Jul 2021 15:45:53 +0200 Subject: [PATCH 0394/1976] Created using Colaboratory --- tutorial.ipynb | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tutorial.ipynb b/tutorial.ipynb index a87f787cca8e..2641743b8c36 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -1058,14 +1058,14 @@ "id": "OYG4WFEnTVrI" }, "source": [ - "> \n", + "> \n", "`train_batch0.jpg` shows train batch 0 mosaics and labels\n", "\n", - "> \n", + "> \n", "`test_batch0_labels.jpg` shows test batch 0 labels\n", "\n", - "> \n", - "`test_batch0_pred.jpg` shows test batch 0 _predictions_\n" + "> \n", + "`test_batch0_pred.jpg` shows test batch 0 _predictions_" ] }, { From 248504cf13c2cba9e211e6110089a3e6f916109c Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 9 Jul 2021 15:23:02 +0200 Subject: [PATCH 0395/1976] Feature visualization improvements 32 (#3947) --- detect.py | 2 +- utils/plots.py | 11 ++++++----- 2 files changed, 7 insertions(+), 6 deletions(-) diff --git a/detect.py b/detect.py index 44b33eb42289..be2c5969c6d7 100644 --- a/detect.py +++ b/detect.py @@ -103,7 +103,7 @@ def run(weights='yolov5s.pt', # model.pt path(s) t1 = time_synchronized() pred = model(img, augment=augment, - visualize=increment_path(save_dir / 'features', mkdir=True) if visualize else False)[0] + visualize=increment_path(save_dir / Path(path).stem, mkdir=True) if visualize else False)[0] # Apply NMS pred = non_max_suppression(pred, conf_thres, iou_thres, classes, agnostic_nms, max_det=max_det) diff --git a/utils/plots.py b/utils/plots.py index 23a48620e6b5..4e6b001dcc2f 100644 --- a/utils/plots.py +++ b/utils/plots.py @@ -16,7 +16,7 @@ import yaml from PIL import Image, ImageDraw, ImageFont -from utils.general import increment_path, xywh2xyxy, xyxy2xywh +from utils.general import xywh2xyxy, xyxy2xywh from utils.metrics import fitness # Settings @@ -447,7 +447,7 @@ def plot_results(start=0, stop=0, bucket='', id=(), labels=(), save_dir=''): fig.savefig(Path(save_dir) / 'results.png', dpi=200) -def feature_visualization(x, module_type, stage, n=64, save_dir=Path('runs/detect/exp')): +def feature_visualization(x, module_type, stage, n=32, save_dir=Path('runs/detect/exp')): """ x: Features to be visualized module_type: Module type @@ -460,13 +460,14 @@ def feature_visualization(x, module_type, stage, n=64, save_dir=Path('runs/detec if height > 1 and width > 1: f = f"stage{stage}_{module_type.split('.')[-1]}_features.png" # filename - plt.figure(tight_layout=True) blocks = torch.chunk(x[0].cpu(), channels, dim=0) # select batch index 0, block by channels n = min(n, channels) # number of plots - ax = plt.subplots(math.ceil(n / 8), 8, tight_layout=True)[1].ravel() # 8 rows x n/8 cols + fig, ax = plt.subplots(math.ceil(n / 8), 8, tight_layout=True) # 8 rows x n/8 cols + ax = ax.ravel() + plt.subplots_adjust(wspace=0.05, hspace=0.05) for i in range(n): ax[i].imshow(blocks[i].squeeze()) # cmap='gray' ax[i].axis('off') print(f'Saving {save_dir / f}... ({n}/{channels})') - plt.savefig(save_dir / f, dpi=300) + plt.savefig(save_dir / f, dpi=300, bbox_inches='tight') From a26e7de2bffaf5a87e7ed83aeabd0f0b2e8ad861 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 9 Jul 2021 16:45:04 +0200 Subject: [PATCH 0396/1976] Update augmentations.py (#3948) --- utils/augmentations.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/augmentations.py b/utils/augmentations.py index 74ee4de2131e..81652c191bc1 100644 --- a/utils/augmentations.py +++ b/utils/augmentations.py @@ -25,7 +25,7 @@ def __init__(self): A.ToGray(p=0.01)], bbox_params=A.BboxParams(format='yolo', label_fields=['class_labels'])) - logging.info(colorstr('albumentations: ') + ', '.join(f'{x}' for x in self.transform.transforms)) + logging.info(colorstr('albumentations: ') + ', '.join(f'{x}' for x in self.transform.transforms if x.p)) except ImportError: # package not installed, skip pass except Exception as e: From 443af8b25ae5121e920623511e38465bacde75b8 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 10 Jul 2021 14:18:46 +0200 Subject: [PATCH 0397/1976] Cache v0.4 update (#3954) --- utils/datasets.py | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/utils/datasets.py b/utils/datasets.py index a527230b868a..c0b51ee39711 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -397,12 +397,11 @@ def __init__(self, path, img_size=640, batch_size=16, augment=False, hyp=None, r # Check cache self.label_files = img2label_paths(self.img_files) # labels - cache_path = (p if p.is_file() else Path(self.label_files[0]).parent).with_suffix('.cache') # cached labels - if cache_path.is_file(): - cache, exists = torch.load(cache_path), True # load - if cache.get('version') != 0.3 or cache.get('hash') != get_hash(self.label_files + self.img_files): - cache, exists = self.cache_labels(cache_path, prefix), False # re-cache - else: + cache_path = (p if p.is_file() else Path(self.label_files[0]).parent).with_suffix('.cache') + try: + cache, exists = np.load(cache_path, allow_pickle=True).item(), True # load dict + assert cache['version'] == 0.4 and cache['hash'] == get_hash(self.label_files + self.img_files) + except: cache, exists = self.cache_labels(cache_path, prefix), False # cache # Display cache @@ -496,9 +495,10 @@ def cache_labels(self, path=Path('./labels.cache'), prefix=''): x['hash'] = get_hash(self.label_files + self.img_files) x['results'] = nf, nm, ne, nc, len(self.img_files) x['msgs'] = msgs # warnings - x['version'] = 0.3 # cache version + x['version'] = 0.4 # cache version try: - torch.save(x, path) # save cache for next time + np.save(path, x) # save cache for next time + path.with_suffix('.cache.npy').rename(path) # remove .npy suffix logging.info(f'{prefix}New cache created: {path}') except Exception as e: logging.info(f'{prefix}WARNING: Cache directory {path.parent} is not writeable: {e}') # path not writeable From 80299a57e26accf196558da01c071e13caec14ae Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 10 Jul 2021 19:50:53 +0200 Subject: [PATCH 0398/1976] Numerical stability fix for Albumentations (#3958) --- utils/datasets.py | 2 +- utils/general.py | 24 +++++++++++------------- 2 files changed, 12 insertions(+), 14 deletions(-) diff --git a/utils/datasets.py b/utils/datasets.py index c0b51ee39711..d95677a133e1 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -550,7 +550,7 @@ def __getitem__(self, index): nl = len(labels) # number of labels if nl: - labels[:, 1:5] = xyxy2xywhn(labels[:, 1:5], w=img.shape[1], h=img.shape[0]) # xyxy to xywh normalized + labels[:, 1:5] = xyxy2xywhn(labels[:, 1:5], w=img.shape[1], h=img.shape[0], clip=True, eps=1E-3) if self.augment: # Albumentations diff --git a/utils/general.py b/utils/general.py index b4c8994d233a..23a827d03d80 100755 --- a/utils/general.py +++ b/utils/general.py @@ -396,10 +396,10 @@ def xywhn2xyxy(x, w=640, h=640, padw=0, padh=0): return y -def xyxy2xywhn(x, w=640, h=640, clip=False): +def xyxy2xywhn(x, w=640, h=640, clip=False, eps=0.0): # Convert nx4 boxes from [x1, y1, x2, y2] to [x, y, w, h] normalized where xy1=top-left, xy2=bottom-right if clip: - clip_coords(x, (h, w)) # warning: inplace clip + clip_coords(x, (h - eps, w - eps)) # warning: inplace clip y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) y[:, 0] = ((x[:, 0] + x[:, 2]) / 2) / w # x center y[:, 1] = ((x[:, 1] + x[:, 3]) / 2) / h # y center @@ -458,18 +458,16 @@ def scale_coords(img1_shape, coords, img0_shape, ratio_pad=None): return coords -def clip_coords(boxes, img_shape): +def clip_coords(boxes, shape): # Clip bounding xyxy bounding boxes to image shape (height, width) - if isinstance(boxes, torch.Tensor): - boxes[:, 0].clamp_(0, img_shape[1]) # x1 - boxes[:, 1].clamp_(0, img_shape[0]) # y1 - boxes[:, 2].clamp_(0, img_shape[1]) # x2 - boxes[:, 3].clamp_(0, img_shape[0]) # y2 - else: # np.array - boxes[:, 0].clip(0, img_shape[1], out=boxes[:, 0]) # x1 - boxes[:, 1].clip(0, img_shape[0], out=boxes[:, 1]) # y1 - boxes[:, 2].clip(0, img_shape[1], out=boxes[:, 2]) # x2 - boxes[:, 3].clip(0, img_shape[0], out=boxes[:, 3]) # y2 + if isinstance(boxes, torch.Tensor): # faster individually + boxes[:, 0].clamp_(0, shape[1]) # x1 + boxes[:, 1].clamp_(0, shape[0]) # y1 + boxes[:, 2].clamp_(0, shape[1]) # x2 + boxes[:, 3].clamp_(0, shape[0]) # y2 + else: # np.array (faster grouped) + boxes[:, [0, 2]] = boxes[:, [0, 2]].clip(0, shape[1]) # x1, x2 + boxes[:, [1, 3]] = boxes[:, [1, 3]].clip(0, shape[0]) # y1, y2 def non_max_suppression(prediction, conf_thres=0.25, iou_thres=0.45, classes=None, agnostic=False, multi_label=False, From 8298ce5e885a129891db598a43a490ed1a78cb92 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 11 Jul 2021 15:49:30 +0200 Subject: [PATCH 0399/1976] Update `albumentations>=1.0.2` (#3966) --- utils/augmentations.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/augmentations.py b/utils/augmentations.py index 81652c191bc1..5eaeabdb665d 100644 --- a/utils/augmentations.py +++ b/utils/augmentations.py @@ -17,7 +17,7 @@ def __init__(self): self.transform = None try: import albumentations as A - check_version(A.__version__, '1.0.0') # version requirement + check_version(A.__version__, '1.0.2') # version requirement self.transform = A.Compose([ A.Blur(p=0.1), From 90e60b403d0e349cecdbe98a9763e32dd733da2b Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 11 Jul 2021 16:56:36 +0200 Subject: [PATCH 0400/1976] Update `np.random.random()` to `random.random()` (#3967) --- utils/autoanchor.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/utils/autoanchor.py b/utils/autoanchor.py index 87dc394c832e..6abdd2d38832 100644 --- a/utils/autoanchor.py +++ b/utils/autoanchor.py @@ -1,5 +1,7 @@ # Auto-anchor utils +import random + import numpy as np import torch import yaml @@ -149,7 +151,7 @@ def print_results(k): for _ in pbar: v = np.ones(sh) while (v == 1).all(): # mutate until a change occurs (prevent duplicates) - v = ((npr.random(sh) < mp) * npr.random() * npr.randn(*sh) * s + 1).clip(0.3, 3.0) + v = ((npr.random(sh) < mp) * random.random() * npr.randn(*sh) * s + 1).clip(0.3, 3.0) kg = (k.copy() * v).clip(min=2.0) fg = anchor_fitness(kg) if fg > f: From a544d59f52d167c7ef2d86d514a5737bd52dc818 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 11 Jul 2021 19:07:42 +0200 Subject: [PATCH 0401/1976] Update requirements.txt `albumentations>=1.0.2` (#3972) --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index ef1736a12d5f..886d21ce8047 100755 --- a/requirements.txt +++ b/requirements.txt @@ -27,5 +27,5 @@ pandas # extras -------------------------------------- # Cython # for pycocotools https://github.com/cocodataset/cocoapi/issues/172 # pycocotools>=2.0 # COCO mAP -# albumentations>=1.0.0 +# albumentations>=1.0.2 thop # FLOPs computation From 647223a7a8e5d8fca69481d477fe9a2930a8004c Mon Sep 17 00:00:00 2001 From: KEN <33506506+seven320@users.noreply.github.com> Date: Mon, 12 Jul 2021 02:47:08 +0900 Subject: [PATCH 0402/1976] `Ensemble()` visualize fix (#3973) * fix visualize error * Revert "fix visualize error" * add visualise profile --- models/experimental.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/models/experimental.py b/models/experimental.py index d316b18373c3..30dc36192bc0 100644 --- a/models/experimental.py +++ b/models/experimental.py @@ -100,10 +100,10 @@ class Ensemble(nn.ModuleList): def __init__(self): super(Ensemble, self).__init__() - def forward(self, x, augment=False): + def forward(self, x, augment=False, profile=False, visualize=False): y = [] for module in self: - y.append(module(x, augment)[0]) + y.append(module(x, augment, profile, visualize)[0]) # y = torch.stack(y).max(0)[0] # max ensemble # y = torch.stack(y).mean(0) # mean ensemble y = torch.cat(y, 1) # nms ensemble From 41fdf9fa53bdc178f3df76764df5b655c94b6f7b Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 12 Jul 2021 12:43:26 +0200 Subject: [PATCH 0403/1976] Created using Colaboratory --- tutorial.ipynb | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tutorial.ipynb b/tutorial.ipynb index 2641743b8c36..15d003c19606 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -527,7 +527,7 @@ }, "source": [ "\n", - "\n", + "\n", "\n", "This is the **official YOLOv5 🚀 notebook** authored by **Ultralytics**, and is freely available for redistribution under the [GPL-3.0 license](https://choosealicense.com/licenses/gpl-3.0/). \n", "For more information please visit https://github.com/ultralytics/yolov5 and https://ultralytics.com. Thank you!" @@ -1025,7 +1025,7 @@ "\n", "During training you will see live updates at [https://wandb.ai/home](https://wandb.ai/home?utm_campaign=repo_yolo_notebook), and you can create and share detailed [Reports](https://wandb.ai/glenn-jocher/yolov5_tutorial/reports/YOLOv5-COCO128-Tutorial-Results--VmlldzozMDI5OTY) of your results. For more information see the [YOLOv5 Weights & Biases Tutorial](https://github.com/ultralytics/yolov5/issues/1289). \n", "\n", - "" + "" ] }, { @@ -1096,7 +1096,7 @@ "id": "lfrEegCSW3fK" }, "source": [ - "

\"COCO128

" + "

\"COCO128

" ] }, { From b3dabdcc380b45bbc802a3808457d8d0091e9148 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 12 Jul 2021 15:54:43 +0200 Subject: [PATCH 0404/1976] Update `probability` to `p` (#3980) --- models/common.py | 4 +-- utils/augmentations.py | 65 +++++++++++++++++++++--------------------- utils/datasets.py | 7 ++--- 3 files changed, 37 insertions(+), 39 deletions(-) diff --git a/models/common.py b/models/common.py index f4b91da62250..418034ddeaac 100644 --- a/models/common.py +++ b/models/common.py @@ -215,7 +215,7 @@ def forward(self, x): class AutoShape(nn.Module): - # input-robust model wrapper for passing cv2/np/PIL/torch inputs. Includes preprocessing, inference and NMS + # YOLOv5 input-robust model wrapper for passing cv2/np/PIL/torch inputs. Includes preprocessing, inference and NMS conf = 0.25 # NMS confidence threshold iou = 0.45 # NMS IoU threshold classes = None # (optional list) filter by class @@ -287,7 +287,7 @@ def forward(self, imgs, size=640, augment=False, profile=False): class Detections: - # detections class for YOLOv5 inference results + # YOLOv5 detections class for inference results def __init__(self, imgs, pred, files, times=None, names=None, shape=None): super(Detections, self).__init__() d = pred[0].device # device diff --git a/utils/augmentations.py b/utils/augmentations.py index 5eaeabdb665d..c953fcbcc90b 100644 --- a/utils/augmentations.py +++ b/utils/augmentations.py @@ -50,12 +50,12 @@ def augment_hsv(im, hgain=0.5, sgain=0.5, vgain=0.5): lut_sat = np.clip(x * r[1], 0, 255).astype(dtype) lut_val = np.clip(x * r[2], 0, 255).astype(dtype) - img_hsv = cv2.merge((cv2.LUT(hue, lut_hue), cv2.LUT(sat, lut_sat), cv2.LUT(val, lut_val))) - cv2.cvtColor(img_hsv, cv2.COLOR_HSV2BGR, dst=im) # no return needed + im_hsv = cv2.merge((cv2.LUT(hue, lut_hue), cv2.LUT(sat, lut_sat), cv2.LUT(val, lut_val))) + cv2.cvtColor(im_hsv, cv2.COLOR_HSV2BGR, dst=im) # no return needed def hist_equalize(im, clahe=True, bgr=False): - # Equalize histogram on BGR image 'img' with img.shape(n,m,3) and range 0-255 + # Equalize histogram on BGR image 'im' with im.shape(n,m,3) and range 0-255 yuv = cv2.cvtColor(im, cv2.COLOR_BGR2YUV if bgr else cv2.COLOR_RGB2YUV) if clahe: c = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8)) @@ -76,7 +76,7 @@ def replicate(im, labels): bh, bw = y2b - y1b, x2b - x1b yc, xc = int(random.uniform(0, h - bh)), int(random.uniform(0, w - bw)) # offset x, y x1a, y1a, x2a, y2a = [xc, yc, xc + bw, yc + bh] - im[y1a:y2a, x1a:x2a] = im[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax] + im[y1a:y2a, x1a:x2a] = im[y1b:y2b, x1b:x2b] # im4[ymin:ymax, xmin:xmax] labels = np.append(labels, [[labels[i, 0], x1a, y1a, x2a, y2a]], axis=0) return im, labels @@ -162,8 +162,8 @@ def random_perspective(im, targets=(), segments=(), degrees=10, translate=.1, sc # Visualize # import matplotlib.pyplot as plt # ax = plt.subplots(1, 2, figsize=(12, 6))[1].ravel() - # ax[0].imshow(img[:, :, ::-1]) # base - # ax[1].imshow(img2[:, :, ::-1]) # warped + # ax[0].imshow(im[:, :, ::-1]) # base + # ax[1].imshow(im2[:, :, ::-1]) # warped # Transform label coordinates n = len(targets) @@ -204,13 +204,13 @@ def random_perspective(im, targets=(), segments=(), degrees=10, translate=.1, sc return im, targets -def copy_paste(im, labels, segments, probability=0.5): +def copy_paste(im, labels, segments, p=0.5): # Implement Copy-Paste augmentation https://arxiv.org/abs/2012.07177, labels as nx5 np.array(cls, xyxy) n = len(segments) - if probability and n: + if p and n: h, w, c = im.shape # height, width, channels im_new = np.zeros(im.shape, np.uint8) - for j in random.sample(range(n), k=round(probability * n)): + for j in random.sample(range(n), k=round(p * n)): l, s = labels[j], segments[j] box = w - l[3], l[2], w - l[1], l[4] ioa = bbox_ioa(box, labels[:, 1:5]) # intersection over area @@ -223,35 +223,34 @@ def copy_paste(im, labels, segments, probability=0.5): result = cv2.flip(result, 1) # augment segments (flip left-right) i = result > 0 # pixels to replace # i[:, :] = result.max(2).reshape(h, w, 1) # act over ch - im[i] = result[i] # cv2.imwrite('debug.jpg', img) # debug + im[i] = result[i] # cv2.imwrite('debug.jpg', im) # debug return im, labels, segments -def cutout(im, labels): +def cutout(im, labels, p=0.5): # Applies image cutout augmentation https://arxiv.org/abs/1708.04552 - h, w = im.shape[:2] - - # create random masks - scales = [0.5] * 1 + [0.25] * 2 + [0.125] * 4 + [0.0625] * 8 + [0.03125] * 16 # image size fraction - for s in scales: - mask_h = random.randint(1, int(h * s)) - mask_w = random.randint(1, int(w * s)) - - # box - xmin = max(0, random.randint(0, w) - mask_w // 2) - ymin = max(0, random.randint(0, h) - mask_h // 2) - xmax = min(w, xmin + mask_w) - ymax = min(h, ymin + mask_h) - - # apply random color mask - im[ymin:ymax, xmin:xmax] = [random.randint(64, 191) for _ in range(3)] - - # return unobscured labels - if len(labels) and s > 0.03: - box = np.array([xmin, ymin, xmax, ymax], dtype=np.float32) - ioa = bbox_ioa(box, labels[:, 1:5]) # intersection over area - labels = labels[ioa < 0.60] # remove >60% obscured labels + if random.random() < p: + h, w = im.shape[:2] + scales = [0.5] * 1 + [0.25] * 2 + [0.125] * 4 + [0.0625] * 8 + [0.03125] * 16 # image size fraction + for s in scales: + mask_h = random.randint(1, int(h * s)) # create random masks + mask_w = random.randint(1, int(w * s)) + + # box + xmin = max(0, random.randint(0, w) - mask_w // 2) + ymin = max(0, random.randint(0, h) - mask_h // 2) + xmax = min(w, xmin + mask_w) + ymax = min(h, ymin + mask_h) + + # apply random color mask + im[ymin:ymax, xmin:xmax] = [random.randint(64, 191) for _ in range(3)] + + # return unobscured labels + if len(labels) and s > 0.03: + box = np.array([xmin, ymin, xmax, ymax], dtype=np.float32) + ioa = bbox_ioa(box, labels[:, 1:5]) # intersection over area + labels = labels[ioa < 0.60] # remove >60% obscured labels return labels diff --git a/utils/datasets.py b/utils/datasets.py index d95677a133e1..0763b56d31e3 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -22,7 +22,7 @@ from torch.utils.data import Dataset from tqdm import tqdm -from utils.augmentations import Albumentations, augment_hsv, copy_paste, letterbox, mixup, random_perspective +from utils.augmentations import Albumentations, augment_hsv, copy_paste, letterbox, mixup, random_perspective, cutout from utils.general import check_requirements, check_file, check_dataset, xywh2xyxy, xywhn2xyxy, xyxy2xywhn, \ xyn2xy, segments2boxes, clean_str from utils.torch_utils import torch_distributed_zero_first @@ -572,8 +572,7 @@ def __getitem__(self, index): labels[:, 1] = 1 - labels[:, 1] # Cutouts - # if random.random() < 0.9: - # labels = cutout(img, labels) + # labels = cutout(img, labels, p=0.5) labels_out = torch.zeros((nl, 6)) if nl: @@ -682,7 +681,7 @@ def load_mosaic(self, index): # img4, labels4 = replicate(img4, labels4) # replicate # Augment - img4, labels4, segments4 = copy_paste(img4, labels4, segments4, probability=self.hyp['copy_paste']) + img4, labels4, segments4 = copy_paste(img4, labels4, segments4, p=self.hyp['copy_paste']) img4, labels4 = random_perspective(img4, labels4, segments4, degrees=self.hyp['degrees'], translate=self.hyp['translate'], From d204a61834d0f6b2e73c1f43facf32fbadb6b284 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 12 Jul 2021 19:48:47 +0200 Subject: [PATCH 0405/1976] Alert (no detections) (#3984) * `Detections()` class `print()` overload * Update common.py --- models/common.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/models/common.py b/models/common.py index 418034ddeaac..05372ae149f5 100644 --- a/models/common.py +++ b/models/common.py @@ -307,7 +307,7 @@ def __init__(self, imgs, pred, files, times=None, names=None, shape=None): def display(self, pprint=False, show=False, save=False, crop=False, render=False, save_dir=Path('')): for i, (im, pred) in enumerate(zip(self.imgs, self.pred)): str = f'image {i + 1}/{len(self.pred)}: {im.shape[0]}x{im.shape[1]} ' - if pred is not None: + if pred.shape[0]: for c in pred[:, -1].unique(): n = (pred[:, -1] == c).sum() # detections per class str += f"{n} {self.names[int(c)]}{'s' * (n > 1)}, " # add to string @@ -318,6 +318,8 @@ def display(self, pprint=False, show=False, save=False, crop=False, render=False save_one_box(box, im, file=save_dir / 'crops' / self.names[int(cls)] / self.files[i]) else: # all others plot_one_box(box, im, label=label, color=colors(cls)) + else: + str += '(no detections)' im = Image.fromarray(im.astype(np.uint8)) if isinstance(im, np.ndarray) else im # from np if pprint: From 8ee9fd15059e807374f52527951399e61d57b1b0 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 13 Jul 2021 23:07:09 +0200 Subject: [PATCH 0406/1976] Update README.md (#3996) --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 360afd2cd7e6..64086643373c 100755 --- a/README.md +++ b/README.md @@ -6,7 +6,7 @@
CI CPU testing -Open In Kaggle +YOLOv5 Citation
Open In Colab Open In Kaggle From 720aaa65c8873c0d87df09e3c1c14f3581d4ea61 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 14 Jul 2021 15:43:54 +0200 Subject: [PATCH 0407/1976] Rename `test.py` to `val.py` (#4000) --- .github/ISSUE_TEMPLATE/bug-report.md | 2 +- .github/workflows/ci-testing.yml | 6 +-- .github/workflows/greetings.yml | 2 +- README.md | 8 ++-- models/yolo.py | 1 - train.py | 68 ++++++++++++++-------------- tutorial.ipynb | 36 +++++++-------- utils/augmentations.py | 2 +- utils/general.py | 2 +- utils/plots.py | 8 ++-- test.py => val.py | 20 ++++---- 11 files changed, 77 insertions(+), 78 deletions(-) rename test.py => val.py (95%) diff --git a/.github/ISSUE_TEMPLATE/bug-report.md b/.github/ISSUE_TEMPLATE/bug-report.md index 362059b288d5..b7fc7c5a8838 100644 --- a/.github/ISSUE_TEMPLATE/bug-report.md +++ b/.github/ISSUE_TEMPLATE/bug-report.md @@ -12,7 +12,7 @@ Before submitting a bug report, please be aware that your issue **must be reprod - **Common dataset**: coco.yaml or coco128.yaml - **Common environment**: Colab, Google Cloud, or Docker image. See https://github.com/ultralytics/yolov5#environments -If this is a custom dataset/training question you **must include** your `train*.jpg`, `test*.jpg` and `results.png` figures, or we can not help you. You can generate these with `utils.plot_results()`. +If this is a custom dataset/training question you **must include** your `train*.jpg`, `val*.jpg` and `results.png` figures, or we can not help you. You can generate these with `utils.plot_results()`. ## 🐛 Bug diff --git a/.github/workflows/ci-testing.yml b/.github/workflows/ci-testing.yml index 20c1d5b026b0..a7964ea01d5d 100644 --- a/.github/workflows/ci-testing.yml +++ b/.github/workflows/ci-testing.yml @@ -68,9 +68,9 @@ jobs: # detect python detect.py --weights ${{ matrix.model }}.pt --device $di python detect.py --weights runs/train/exp/weights/last.pt --device $di - # test - python test.py --img 128 --batch 16 --weights ${{ matrix.model }}.pt --device $di - python test.py --img 128 --batch 16 --weights runs/train/exp/weights/last.pt --device $di + # val + python val.py --img 128 --batch 16 --weights ${{ matrix.model }}.pt --device $di + python val.py --img 128 --batch 16 --weights runs/train/exp/weights/last.pt --device $di python hubconf.py # hub python models/yolo.py --cfg ${{ matrix.model }}.yaml # inspect diff --git a/.github/workflows/greetings.yml b/.github/workflows/greetings.yml index fdf1cfae8df5..787fbd71721b 100644 --- a/.github/workflows/greetings.yml +++ b/.github/workflows/greetings.yml @@ -52,5 +52,5 @@ jobs: ![CI CPU testing](https://github.com/ultralytics/yolov5/workflows/CI%20CPU%20testing/badge.svg) - If this badge is green, all [YOLOv5 GitHub Actions](https://github.com/ultralytics/yolov5/actions) Continuous Integration (CI) tests are currently passing. CI tests verify correct operation of YOLOv5 training ([train.py](https://github.com/ultralytics/yolov5/blob/master/train.py)), testing ([test.py](https://github.com/ultralytics/yolov5/blob/master/test.py)), inference ([detect.py](https://github.com/ultralytics/yolov5/blob/master/detect.py)) and export ([export.py](https://github.com/ultralytics/yolov5/blob/master/export.py)) on MacOS, Windows, and Ubuntu every 24 hours and on every commit. + If this badge is green, all [YOLOv5 GitHub Actions](https://github.com/ultralytics/yolov5/actions) Continuous Integration (CI) tests are currently passing. CI tests verify correct operation of YOLOv5 training ([train.py](https://github.com/ultralytics/yolov5/blob/master/train.py)), testing ([val.py](https://github.com/ultralytics/yolov5/blob/master/val.py)), inference ([detect.py](https://github.com/ultralytics/yolov5/blob/master/detect.py)) and export ([export.py](https://github.com/ultralytics/yolov5/blob/master/export.py)) on MacOS, Windows, and Ubuntu every 24 hours and on every commit. diff --git a/README.md b/README.md index 64086643373c..035b7002774a 100755 --- a/README.md +++ b/README.md @@ -197,7 +197,7 @@ We are super excited about our first-ever Ultralytics YOLOv5 🚀 EXPORT Competi * GPU Speed measures end-to-end time per image averaged over 5000 COCO val2017 images using a V100 GPU with batch size 32, and includes image preprocessing, PyTorch FP16 inference, postprocessing and NMS. * EfficientDet data from [google/automl](https://github.com/google/automl) at batch size 8. - * **Reproduce** by `python test.py --task study --data coco.yaml --iou 0.7 --weights yolov5s6.pt yolov5m6.pt yolov5l6.pt yolov5x6.pt` + * **Reproduce** by `python val.py --task study --data coco.yaml --iou 0.7 --weights yolov5s6.pt yolov5m6.pt yolov5l6.pt yolov5x6.pt`
@@ -223,10 +223,10 @@ We are super excited about our first-ever Ultralytics YOLOv5 🚀 EXPORT Competi Table Notes (click to expand) * APtest denotes COCO [test-dev2017](http://cocodataset.org/#upload) server results, all other AP results denote val2017 accuracy. - * AP values are for single-model single-scale unless otherwise noted. **Reproduce mAP** by `python test.py --data coco.yaml --img 640 --conf 0.001 --iou 0.65` - * SpeedGPU averaged over 5000 COCO val2017 images using a GCP [n1-standard-16](https://cloud.google.com/compute/docs/machine-types#n1_standard_machine_types) V100 instance, and includes FP16 inference, postprocessing and NMS. **Reproduce speed** by `python test.py --data coco.yaml --img 640 --conf 0.25 --iou 0.45` + * AP values are for single-model single-scale unless otherwise noted. **Reproduce mAP** by `python val.py --data coco.yaml --img 640 --conf 0.001 --iou 0.65` + * SpeedGPU averaged over 5000 COCO val2017 images using a GCP [n1-standard-16](https://cloud.google.com/compute/docs/machine-types#n1_standard_machine_types) V100 instance, and includes FP16 inference, postprocessing and NMS. **Reproduce speed** by `python val.py --data coco.yaml --img 640 --conf 0.25 --iou 0.45` * All checkpoints are trained to 300 epochs with default settings and hyperparameters (no autoaugmentation). - * Test Time Augmentation ([TTA](https://github.com/ultralytics/yolov5/issues/303)) includes reflection and scale augmentation. **Reproduce TTA** by `python test.py --data coco.yaml --img 1536 --iou 0.7 --augment` + * Test Time Augmentation ([TTA](https://github.com/ultralytics/yolov5/issues/303)) includes reflection and scale augmentation. **Reproduce TTA** by `python val.py --data coco.yaml --img 1536 --iou 0.7 --augment`
diff --git a/models/yolo.py b/models/yolo.py index b11443377080..7b49dfcf48a3 100644 --- a/models/yolo.py +++ b/models/yolo.py @@ -310,4 +310,3 @@ def parse_model(d, ch): # model_dict, input_channels(3) # tb_writer = SummaryWriter('.') # logger.info("Run 'tensorboard --logdir=models' to view tensorboard at http://localhost:6006/") # tb_writer.add_graph(torch.jit.trace(model, img, strict=False), []) # add model graph - # tb_writer.add_image('test', img[0], dataformats='CWH') # add model to tensorboard diff --git a/train.py b/train.py index e58d7c4f0348..205c73d85e20 100644 --- a/train.py +++ b/train.py @@ -32,7 +32,7 @@ FILE = Path(__file__).absolute() sys.path.append(FILE.parents[0].as_posix()) # add yolov5/ to path -import test # for end-of-epoch mAP +import val # for end-of-epoch mAP from models.experimental import attempt_load from models.yolo import Model from utils.autoanchor import check_anchors @@ -57,9 +57,9 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary opt, device, ): - save_dir, epochs, batch_size, weights, single_cls, evolve, data, cfg, resume, notest, nosave, workers, = \ + save_dir, epochs, batch_size, weights, single_cls, evolve, data, cfg, resume, noval, nosave, workers, = \ opt.save_dir, opt.epochs, opt.batch_size, opt.weights, opt.single_cls, opt.evolve, opt.data, opt.cfg, \ - opt.resume, opt.notest, opt.nosave, opt.workers + opt.resume, opt.noval, opt.nosave, opt.workers # Directories save_dir = Path(save_dir) @@ -129,7 +129,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary with torch_distributed_zero_first(RANK): check_dataset(data_dict) # check train_path = data_dict['train'] - test_path = data_dict['val'] + val_path = data_dict['val'] # Freeze freeze = [] # parameter names to freeze (full or partial) @@ -207,7 +207,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary # Image sizes gs = max(int(model.stride.max()), 32) # grid size (max stride) nl = model.model[-1].nl # number of detection layers (used for scaling hyp['obj']) - imgsz, imgsz_test = [check_img_size(x, gs) for x in opt.img_size] # verify imgsz are gs-multiples + imgsz, imgsz_val = [check_img_size(x, gs) for x in opt.img_size] # verify imgsz are gs-multiples # DP mode if cuda and RANK == -1 and torch.cuda.device_count() > 1: @@ -231,8 +231,8 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary # Process 0 if RANK in [-1, 0]: - testloader = create_dataloader(test_path, imgsz_test, batch_size // WORLD_SIZE * 2, gs, single_cls, - hyp=hyp, cache=opt.cache_images and not notest, rect=True, rank=-1, + valloader = create_dataloader(val_path, imgsz_val, batch_size // WORLD_SIZE * 2, gs, single_cls, + hyp=hyp, cache=opt.cache_images and not noval, rect=True, rank=-1, workers=workers, pad=0.5, prefix=colorstr('val: '))[0] @@ -276,7 +276,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary scheduler.last_epoch = start_epoch - 1 # do not move scaler = amp.GradScaler(enabled=cuda) compute_loss = ComputeLoss(model) # init loss class - logger.info(f'Image sizes {imgsz} train, {imgsz_test} test\n' + logger.info(f'Image sizes {imgsz} train, {imgsz_val} val\n' f'Using {dataloader.num_workers} dataloader workers\n' f'Logging results to {save_dir}\n' f'Starting training for {epochs} epochs...') @@ -384,20 +384,20 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary # mAP ema.update_attr(model, include=['yaml', 'nc', 'hyp', 'gr', 'names', 'stride', 'class_weights']) final_epoch = epoch + 1 == epochs - if not notest or final_epoch: # Calculate mAP + if not noval or final_epoch: # Calculate mAP wandb_logger.current_epoch = epoch + 1 - results, maps, _ = test.run(data_dict, - batch_size=batch_size // WORLD_SIZE * 2, - imgsz=imgsz_test, - model=ema.ema, - single_cls=single_cls, - dataloader=testloader, - save_dir=save_dir, - save_json=is_coco and final_epoch, - verbose=nc < 50 and final_epoch, - plots=plots and final_epoch, - wandb_logger=wandb_logger, - compute_loss=compute_loss) + results, maps, _ = val.run(data_dict, + batch_size=batch_size // WORLD_SIZE * 2, + imgsz=imgsz_val, + model=ema.ema, + single_cls=single_cls, + dataloader=valloader, + save_dir=save_dir, + save_json=is_coco and final_epoch, + verbose=nc < 50 and final_epoch, + plots=plots and final_epoch, + wandb_logger=wandb_logger, + compute_loss=compute_loss) # Write with open(results_file, 'a') as f: @@ -454,15 +454,15 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary if not evolve: if is_coco: # COCO dataset for m in [last, best] if best.exists() else [last]: # speed, mAP tests - results, _, _ = test.run(data_dict, - batch_size=batch_size // WORLD_SIZE * 2, - imgsz=imgsz_test, - model=attempt_load(m, device).half(), - single_cls=single_cls, - dataloader=testloader, - save_dir=save_dir, - save_json=True, - plots=False) + results, _, _ = val.run(data_dict, + batch_size=batch_size // WORLD_SIZE * 2, + imgsz=imgsz_val, + model=attempt_load(m, device).half(), + single_cls=single_cls, + dataloader=valloader, + save_dir=save_dir, + save_json=True, + plots=False) # Strip optimizers for f in last, best: @@ -486,11 +486,11 @@ def parse_opt(known=False): parser.add_argument('--hyp', type=str, default='data/hyps/hyp.scratch.yaml', help='hyperparameters path') parser.add_argument('--epochs', type=int, default=300) parser.add_argument('--batch-size', type=int, default=16, help='total batch size for all GPUs') - parser.add_argument('--img-size', nargs='+', type=int, default=[640, 640], help='[train, test] image sizes') + parser.add_argument('--img-size', nargs='+', type=int, default=[640, 640], help='[train, val] image sizes') parser.add_argument('--rect', action='store_true', help='rectangular training') parser.add_argument('--resume', nargs='?', const=True, default=False, help='resume most recent training') parser.add_argument('--nosave', action='store_true', help='only save final checkpoint') - parser.add_argument('--notest', action='store_true', help='only test final epoch') + parser.add_argument('--noval', action='store_true', help='only validate final epoch') parser.add_argument('--noautoanchor', action='store_true', help='disable autoanchor check') parser.add_argument('--evolve', type=int, nargs='?', const=300, help='evolve hyperparameters for x generations') parser.add_argument('--bucket', type=str, default='', help='gsutil bucket') @@ -538,7 +538,7 @@ def main(opt): # opt.hyp = opt.hyp or ('hyp.finetune.yaml' if opt.weights else 'hyp.scratch.yaml') opt.data, opt.cfg, opt.hyp = check_file(opt.data), check_file(opt.cfg), check_file(opt.hyp) # check files assert len(opt.cfg) or len(opt.weights), 'either --cfg or --weights must be specified' - opt.img_size.extend([opt.img_size[-1]] * (2 - len(opt.img_size))) # extend to 2 sizes (train, test) + opt.img_size.extend([opt.img_size[-1]] * (2 - len(opt.img_size))) # extend to 2 sizes (train, val) opt.name = 'evolve' if opt.evolve else opt.name opt.save_dir = str(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok or opt.evolve)) @@ -597,7 +597,7 @@ def main(opt): if 'anchors' not in hyp: # anchors commented in hyp.yaml hyp['anchors'] = 3 assert LOCAL_RANK == -1, 'DDP mode not implemented for --evolve' - opt.notest, opt.nosave = True, True # only test/save final epoch + opt.noval, opt.nosave = True, True # only val/save final epoch # ei = [isinstance(x, (int, float)) for x in hyp.values()] # evolvable indices yaml_file = Path(opt.save_dir) / 'hyp_evolved.yaml' # save best result here if opt.bucket: diff --git a/tutorial.ipynb b/tutorial.ipynb index 15d003c19606..957c0e140f88 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -643,8 +643,8 @@ "id": "0eq1SMWl6Sfn" }, "source": [ - "# 2. Test\n", - "Test a model's accuracy on [COCO](https://cocodataset.org/#home) val or test-dev datasets. Models are downloaded automatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases). To show results by class use the `--verbose` flag. Note that `pycocotools` metrics may be ~1% better than the equivalent repo metrics, as is visible below, due to slight differences in mAP computation." + "# 2. Validate\n", + "Validate a model's accuracy on [COCO](https://cocodataset.org/#home) val or test-dev datasets. Models are downloaded automatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases). To show results by class use the `--verbose` flag. Note that `pycocotools` metrics may be ~1% better than the equivalent repo metrics, as is visible below, due to slight differences in mAP computation." ] }, { @@ -720,14 +720,14 @@ }, "source": [ "# Run YOLOv5x on COCO val2017\n", - "!python test.py --weights yolov5x.pt --data coco.yaml --img 640 --iou 0.65 --half" + "!python val.py --weights yolov5x.pt --data coco.yaml --img 640 --iou 0.65 --half" ], "execution_count": null, "outputs": [ { "output_type": "stream", "text": [ - "Namespace(augment=False, batch_size=32, conf_thres=0.001, data='./data/coco.yaml', device='', exist_ok=False, half=True, img_size=640, iou_thres=0.65, name='exp', project='runs/test', save_conf=False, save_hybrid=False, save_json=True, save_txt=False, single_cls=False, task='val', verbose=False, weights=['yolov5x.pt'])\n", + "Namespace(augment=False, batch_size=32, conf_thres=0.001, data='./data/coco.yaml', device='', exist_ok=False, half=True, img_size=640, iou_thres=0.65, name='exp', project='runs/val', save_conf=False, save_hybrid=False, save_json=True, save_txt=False, single_cls=False, task='val', verbose=False, weights=['yolov5x.pt'])\n", "YOLOv5 🚀 v5.0-157-gc6b51f4 torch 1.8.1+cu101 CUDA:0 (Tesla V100-SXM2-16GB, 16160.5MB)\n", "\n", "Downloading https://github.com/ultralytics/yolov5/releases/download/v5.0/yolov5x.pt to yolov5x.pt...\n", @@ -741,7 +741,7 @@ " all 5000 36335 0.746 0.626 0.68 0.49\n", "Speed: 5.3/1.5/6.8 ms inference/NMS/total per 640x640 image at batch-size 32\n", "\n", - "Evaluating pycocotools mAP... saving runs/test/exp/yolov5x_predictions.json...\n", + "Evaluating pycocotools mAP... saving runs/val/exp/yolov5x_predictions.json...\n", "loading annotations into memory...\n", "Done (t=0.44s)\n", "creating index...\n", @@ -767,7 +767,7 @@ " Average Recall (AR) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.524\n", " Average Recall (AR) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.735\n", " Average Recall (AR) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.827\n", - "Results saved to runs/test/exp\n" + "Results saved to runs/val/exp\n" ], "name": "stdout" } @@ -805,7 +805,7 @@ }, "source": [ "# Run YOLOv5s on COCO test-dev2017 using --task test\n", - "!python test.py --weights yolov5s.pt --data coco.yaml --task test" + "!python val.py --weights yolov5s.pt --data coco.yaml --task test" ], "execution_count": null, "outputs": [] @@ -976,7 +976,7 @@ "Plotting labels... \n", "\n", "\u001b[34m\u001b[1mautoanchor: \u001b[0mAnalyzing anchors... anchors/target = 4.26, Best Possible Recall (BPR) = 0.9946\n", - "Image sizes 640 train, 640 test\n", + "Image sizes 640 train, 640 val\n", "Using 2 dataloader workers\n", "Logging results to runs/train/exp\n", "Starting training for 3 epochs...\n", @@ -1036,7 +1036,7 @@ "source": [ "## Local Logging\n", "\n", - "All results are logged by default to `runs/train`, with a new experiment directory created for each new training as `runs/train/exp2`, `runs/train/exp3`, etc. View train and test jpgs to see mosaics, labels, predictions and augmentation effects. Note a **Mosaic Dataloader** is used for training (shown below), a new concept developed by Ultralytics and first featured in [YOLOv4](https://arxiv.org/abs/2004.10934)." + "All results are logged by default to `runs/train`, with a new experiment directory created for each new training as `runs/train/exp2`, `runs/train/exp3`, etc. View train and val jpgs to see mosaics, labels, predictions and augmentation effects. Note a **Mosaic Dataloader** is used for training (shown below), a new concept developed by Ultralytics and first featured in [YOLOv4](https://arxiv.org/abs/2004.10934)." ] }, { @@ -1046,8 +1046,8 @@ }, "source": [ "Image(filename='runs/train/exp/train_batch0.jpg', width=800) # train batch 0 mosaics and labels\n", - "Image(filename='runs/train/exp/test_batch0_labels.jpg', width=800) # test batch 0 labels\n", - "Image(filename='runs/train/exp/test_batch0_pred.jpg', width=800) # test batch 0 predictions" + "Image(filename='runs/train/exp/test_batch0_labels.jpg', width=800) # val batch 0 labels\n", + "Image(filename='runs/train/exp/test_batch0_pred.jpg', width=800) # val batch 0 predictions" ], "execution_count": null, "outputs": [] @@ -1062,10 +1062,10 @@ "`train_batch0.jpg` shows train batch 0 mosaics and labels\n", "\n", "> \n", - "`test_batch0_labels.jpg` shows test batch 0 labels\n", + "`test_batch0_labels.jpg` shows val batch 0 labels\n", "\n", "> \n", - "`test_batch0_pred.jpg` shows test batch 0 _predictions_" + "`test_batch0_pred.jpg` shows val batch 0 _predictions_" ] }, { @@ -1125,7 +1125,7 @@ "\n", "![CI CPU testing](https://github.com/ultralytics/yolov5/workflows/CI%20CPU%20testing/badge.svg)\n", "\n", - "If this badge is green, all [YOLOv5 GitHub Actions](https://github.com/ultralytics/yolov5/actions) Continuous Integration (CI) tests are currently passing. CI tests verify correct operation of YOLOv5 training ([train.py](https://github.com/ultralytics/yolov5/blob/master/train.py)), testing ([test.py](https://github.com/ultralytics/yolov5/blob/master/test.py)), inference ([detect.py](https://github.com/ultralytics/yolov5/blob/master/detect.py)) and export ([export.py](https://github.com/ultralytics/yolov5/blob/master/export.py)) on MacOS, Windows, and Ubuntu every 24 hours and on every commit.\n" + "If this badge is green, all [YOLOv5 GitHub Actions](https://github.com/ultralytics/yolov5/actions) Continuous Integration (CI) tests are currently passing. CI tests verify correct operation of YOLOv5 training ([train.py](https://github.com/ultralytics/yolov5/blob/master/train.py)), testing ([val.py](https://github.com/ultralytics/yolov5/blob/master/val.py)), inference ([detect.py](https://github.com/ultralytics/yolov5/blob/master/detect.py)) and export ([export.py](https://github.com/ultralytics/yolov5/blob/master/export.py)) on MacOS, Windows, and Ubuntu every 24 hours and on every commit.\n" ] }, { @@ -1147,8 +1147,8 @@ "source": [ "# Reproduce\n", "for x in 'yolov5s', 'yolov5m', 'yolov5l', 'yolov5x':\n", - " !python test.py --weights {x}.pt --data coco.yaml --img 640 --conf 0.25 --iou 0.45 # speed\n", - " !python test.py --weights {x}.pt --data coco.yaml --img 640 --conf 0.001 --iou 0.65 # mAP" + " !python val.py --weights {x}.pt --data coco.yaml --img 640 --conf 0.25 --iou 0.45 # speed\n", + " !python val.py --weights {x}.pt --data coco.yaml --img 640 --conf 0.001 --iou 0.65 # mAP" ], "execution_count": null, "outputs": [] @@ -1193,8 +1193,8 @@ " for d in 0 cpu; do # devices\n", " python detect.py --weights $m.pt --device $d # detect official\n", " python detect.py --weights runs/train/exp/weights/best.pt --device $d # detect custom\n", - " python test.py --weights $m.pt --device $d # test official\n", - " python test.py --weights runs/train/exp/weights/best.pt --device $d # test custom\n", + " python val.py --weights $m.pt --device $d # val official\n", + " python val.py --weights runs/train/exp/weights/best.pt --device $d # val custom\n", " done\n", " python hubconf.py # hub\n", " python models/yolo.py --cfg $m.yaml # inspect\n", diff --git a/utils/augmentations.py b/utils/augmentations.py index c953fcbcc90b..69b835db0db9 100644 --- a/utils/augmentations.py +++ b/utils/augmentations.py @@ -90,7 +90,7 @@ def letterbox(im, new_shape=(640, 640), color=(114, 114, 114), auto=True, scaleF # Scale ratio (new / old) r = min(new_shape[0] / shape[0], new_shape[1] / shape[1]) - if not scaleup: # only scale down, do not scale up (for better test mAP) + if not scaleup: # only scale down, do not scale up (for better val mAP) r = min(r, 1.0) # Compute padding diff --git a/utils/general.py b/utils/general.py index 23a827d03d80..846c1464c28c 100755 --- a/utils/general.py +++ b/utils/general.py @@ -633,7 +633,7 @@ def apply_classifier(x, model, img, im0): for j, a in enumerate(d): # per item cutout = im0[i][int(a[1]):int(a[3]), int(a[0]):int(a[2])] im = cv2.resize(cutout, (224, 224)) # BGR - # cv2.imwrite('test%i.jpg' % j, cutout) + # cv2.imwrite('example%i.jpg' % j, cutout) im = im[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416 im = np.ascontiguousarray(im, dtype=np.float32) # uint8 to float32 diff --git a/utils/plots.py b/utils/plots.py index 4e6b001dcc2f..cd9a45e8c761 100644 --- a/utils/plots.py +++ b/utils/plots.py @@ -219,9 +219,9 @@ def plot_lr_scheduler(optimizer, scheduler, epochs=300, save_dir=''): plt.close() -def plot_test_txt(): # from utils.plots import *; plot_test() - # Plot test.txt histograms - x = np.loadtxt('test.txt', dtype=np.float32) +def plot_val_txt(): # from utils.plots import *; plot_val() + # Plot val.txt histograms + x = np.loadtxt('val.txt', dtype=np.float32) box = xyxy2xywh(x[:, :4]) cx, cy = box[:, 0], box[:, 1] @@ -250,7 +250,7 @@ def plot_targets_txt(): # from utils.plots import *; plot_targets_txt() def plot_study_txt(path='', x=None): # from utils.plots import *; plot_study_txt() - # Plot study.txt generated by test.py + # Plot study.txt generated by val.py plot2 = False # plot additional results if plot2: ax = plt.subplots(2, 4, figsize=(10, 6), tight_layout=True)[1].ravel() diff --git a/test.py b/val.py similarity index 95% rename from test.py rename to val.py index 643dc441e521..fa5cb8f113e0 100644 --- a/test.py +++ b/val.py @@ -1,7 +1,7 @@ -"""Test a trained YOLOv5 model accuracy on a custom dataset +"""Validate a trained YOLOv5 model accuracy on a custom dataset Usage: - $ python path/to/test.py --data coco128.yaml --weights yolov5s.pt --img 640 + $ python path/to/val.py --data coco128.yaml --weights yolov5s.pt --img 640 """ import argparse @@ -44,7 +44,7 @@ def run(data, save_hybrid=False, # save label+prediction hybrid results to *.txt save_conf=False, # save confidences in --save-txt labels save_json=False, # save a cocoapi-compatible JSON results file - project='runs/test', # save to project/name + project='runs/val', # save to project/name name='exp', # save to project/name exist_ok=False, # existing project/name ok, do not increment half=True, # use FP16 half-precision inference @@ -228,9 +228,9 @@ def run(data, # Plot images if plots and batch_i < 3: - f = save_dir / f'test_batch{batch_i}_labels.jpg' # labels + f = save_dir / f'val_batch{batch_i}_labels.jpg' # labels Thread(target=plot_images, args=(img, targets, paths, f, names), daemon=True).start() - f = save_dir / f'test_batch{batch_i}_pred.jpg' # predictions + f = save_dir / f'val_batch{batch_i}_pred.jpg' # predictions Thread(target=plot_images, args=(img, output_to_target(out), paths, f, names), daemon=True).start() # Compute statistics @@ -262,7 +262,7 @@ def run(data, if plots: confusion_matrix.plot(save_dir=save_dir, names=list(names.values())) if wandb_logger and wandb_logger.wandb: - val_batches = [wandb_logger.wandb.Image(str(f), caption=f.name) for f in sorted(save_dir.glob('test*.jpg'))] + val_batches = [wandb_logger.wandb.Image(str(f), caption=f.name) for f in sorted(save_dir.glob('val*.jpg'))] wandb_logger.log({"Validation": val_batches}) if wandb_images: wandb_logger.log({"Bounding Box Debugger/Images": wandb_images}) @@ -305,7 +305,7 @@ def run(data, def parse_opt(): - parser = argparse.ArgumentParser(prog='test.py') + parser = argparse.ArgumentParser(prog='val.py') parser.add_argument('--data', type=str, default='data/coco128.yaml', help='dataset.yaml path') parser.add_argument('--weights', nargs='+', type=str, default='yolov5s.pt', help='model.pt path(s)') parser.add_argument('--batch-size', type=int, default=32, help='batch size') @@ -321,7 +321,7 @@ def parse_opt(): parser.add_argument('--save-hybrid', action='store_true', help='save label+prediction hybrid results to *.txt') parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels') parser.add_argument('--save-json', action='store_true', help='save a cocoapi-compatible JSON results file') - parser.add_argument('--project', default='runs/test', help='save to project/name') + parser.add_argument('--project', default='runs/val', help='save to project/name') parser.add_argument('--name', default='exp', help='save to project/name') parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment') parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference') @@ -334,7 +334,7 @@ def parse_opt(): def main(opt): set_logging() - print(colorstr('test: ') + ', '.join(f'{k}={v}' for k, v in vars(opt).items())) + print(colorstr('val: ') + ', '.join(f'{k}={v}' for k, v in vars(opt).items())) check_requirements(exclude=('tensorboard', 'thop')) if opt.task in ('train', 'val', 'test'): # run normally @@ -346,7 +346,7 @@ def main(opt): save_json=False, plots=False) elif opt.task == 'study': # run over a range of settings and save/plot - # python test.py --task study --data coco.yaml --iou 0.7 --weights yolov5s.pt yolov5m.pt yolov5l.pt yolov5x.pt + # python val.py --task study --data coco.yaml --iou 0.7 --weights yolov5s.pt yolov5m.pt yolov5l.pt yolov5x.pt x = list(range(256, 1536 + 128, 128)) # x axis (image sizes) for w in opt.weights if isinstance(opt.weights, list) else [opt.weights]: f = f'study_{Path(opt.data).stem}_{Path(w).stem}.txt' # filename to save to From 62409eea0807830669f21a84733e73052ee85c07 Mon Sep 17 00:00:00 2001 From: Ayush Chaurasia Date: Wed, 14 Jul 2021 22:43:02 +0530 Subject: [PATCH 0408/1976] W&B sweeps support (#3938) * Add support for W&B Sweeps * Update and reformat * Update search space * reformat * reformat sweep.py * Update sweep.py * Move sweeps files to wandb dir * Remove print Co-authored-by: Glenn Jocher --- utils/wandb_logging/sweep.py | 33 +++++++ utils/wandb_logging/sweep.yaml | 143 +++++++++++++++++++++++++++++ utils/wandb_logging/wandb_utils.py | 2 +- 3 files changed, 177 insertions(+), 1 deletion(-) create mode 100644 utils/wandb_logging/sweep.py create mode 100644 utils/wandb_logging/sweep.yaml diff --git a/utils/wandb_logging/sweep.py b/utils/wandb_logging/sweep.py new file mode 100644 index 000000000000..6c8719b32006 --- /dev/null +++ b/utils/wandb_logging/sweep.py @@ -0,0 +1,33 @@ +import sys +from pathlib import Path +import wandb + +FILE = Path(__file__).absolute() +sys.path.append(FILE.parents[2].as_posix()) # add utils/ to path + +from train import train, parse_opt +import test +from utils.general import increment_path +from utils.torch_utils import select_device + + +def sweep(): + wandb.init() + # Get hyp dict from sweep agent + hyp_dict = vars(wandb.config).get("_items") + + # Workaround: get necessary opt args + opt = parse_opt(known=True) + opt.batch_size = hyp_dict.get("batch_size") + opt.save_dir = str(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok or opt.evolve)) + opt.epochs = hyp_dict.get("epochs") + opt.nosave = True + opt.data = hyp_dict.get("data") + device = select_device(opt.device, batch_size=opt.batch_size) + + # train + train(hyp_dict, opt, device) + + +if __name__ == "__main__": + sweep() diff --git a/utils/wandb_logging/sweep.yaml b/utils/wandb_logging/sweep.yaml new file mode 100644 index 000000000000..64e395533c1c --- /dev/null +++ b/utils/wandb_logging/sweep.yaml @@ -0,0 +1,143 @@ +# Hyperparameters for training +# To set range- +# Provide min and max values as: +# parameter: +# +# min: scalar +# max: scalar +# OR +# +# Set a specific list of search space- +# parameter: +# values: [scalar1, scalar2, scalar3...] +# +# You can use grid, bayesian and hyperopt search strategy +# For more info on configuring sweeps visit - https://docs.wandb.ai/guides/sweeps/configuration + +program: utils/wandb_logging/sweep.py +method: random +metric: + name: metrics/mAP_0.5 + goal: maximize + +parameters: + # hyperparameters: set either min, max range or values list + data: + value: "data/coco128.yaml" + batch_size: + values: [ 64 ] + epochs: + values: [ 10 ] + + lr0: + distribution: uniform + min: 1e-5 + max: 1e-1 + lrf: + distribution: uniform + min: 0.01 + max: 1.0 + momentum: + distribution: uniform + min: 0.6 + max: 0.98 + weight_decay: + distribution: uniform + min: 0.0 + max: 0.001 + warmup_epochs: + distribution: uniform + min: 0.0 + max: 5.0 + warmup_momentum: + distribution: uniform + min: 0.0 + max: 0.95 + warmup_bias_lr: + distribution: uniform + min: 0.0 + max: 0.2 + box: + distribution: uniform + min: 0.02 + max: 0.2 + cls: + distribution: uniform + min: 0.2 + max: 4.0 + cls_pw: + distribution: uniform + min: 0.5 + max: 2.0 + obj: + distribution: uniform + min: 0.2 + max: 4.0 + obj_pw: + distribution: uniform + min: 0.5 + max: 2.0 + iou_t: + distribution: uniform + min: 0.1 + max: 0.7 + anchor_t: + distribution: uniform + min: 2.0 + max: 8.0 + fl_gamma: + distribution: uniform + min: 0.0 + max: 0.1 + hsv_h: + distribution: uniform + min: 0.0 + max: 0.1 + hsv_s: + distribution: uniform + min: 0.0 + max: 0.9 + hsv_v: + distribution: uniform + min: 0.0 + max: 0.9 + degrees: + distribution: uniform + min: 0.0 + max: 45.0 + translate: + distribution: uniform + min: 0.0 + max: 0.9 + scale: + distribution: uniform + min: 0.0 + max: 0.9 + shear: + distribution: uniform + min: 0.0 + max: 10.0 + perspective: + distribution: uniform + min: 0.0 + max: 0.001 + flipud: + distribution: uniform + min: 0.0 + max: 1.0 + fliplr: + distribution: uniform + min: 0.0 + max: 1.0 + mosaic: + distribution: uniform + min: 0.0 + max: 1.0 + mixup: + distribution: uniform + min: 0.0 + max: 1.0 + copy_paste: + distribution: uniform + min: 0.0 + max: 1.0 diff --git a/utils/wandb_logging/wandb_utils.py b/utils/wandb_logging/wandb_utils.py index f031a819b977..2adea9235f6c 100644 --- a/utils/wandb_logging/wandb_utils.py +++ b/utils/wandb_logging/wandb_utils.py @@ -153,7 +153,7 @@ def setup_training(self, opt, data_dict): self.weights = Path(modeldir) / "last.pt" config = self.wandb_run.config opt.weights, opt.save_period, opt.batch_size, opt.bbox_interval, opt.epochs, opt.hyp = str( - self.weights), config.save_period, config.total_batch_size, config.bbox_interval, config.epochs, \ + self.weights), config.save_period, config.batch_size, config.bbox_interval, config.epochs, \ config.opt['hyp'] data_dict = dict(self.wandb_run.config.data_dict) # eliminates the need for config file to resume if 'val_artifact' not in self.__dict__: # If --upload_dataset is set, use the existing artifact, don't download From b7e985e397e12ea2efd19bf3b6329028fb2a4c75 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 16 Jul 2021 14:38:59 +0200 Subject: [PATCH 0409/1976] Update greetings.yml (#4024) * Update greetings.yml * Update greetings.yml --- .github/workflows/greetings.yml | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/.github/workflows/greetings.yml b/.github/workflows/greetings.yml index 787fbd71721b..a8990e7222d3 100644 --- a/.github/workflows/greetings.yml +++ b/.github/workflows/greetings.yml @@ -33,8 +33,10 @@ jobs: ## Requirements - Python 3.8 or later with all [requirements.txt](https://github.com/ultralytics/yolov5/blob/master/requirements.txt) dependencies installed, including `torch>=1.7`. To install run: + **Python>=3.6.0** with all [requirements.txt](https://github.com/ultralytics/yolov5/blob/master/requirements.txt) installed including **PyTorch>=1.7**. To get started: ```bash + $ git clone https://github.com/ultralytics/yolov5 + $ cd yolov5 $ pip install -r requirements.txt ``` @@ -52,5 +54,5 @@ jobs: ![CI CPU testing](https://github.com/ultralytics/yolov5/workflows/CI%20CPU%20testing/badge.svg) - If this badge is green, all [YOLOv5 GitHub Actions](https://github.com/ultralytics/yolov5/actions) Continuous Integration (CI) tests are currently passing. CI tests verify correct operation of YOLOv5 training ([train.py](https://github.com/ultralytics/yolov5/blob/master/train.py)), testing ([val.py](https://github.com/ultralytics/yolov5/blob/master/val.py)), inference ([detect.py](https://github.com/ultralytics/yolov5/blob/master/detect.py)) and export ([export.py](https://github.com/ultralytics/yolov5/blob/master/export.py)) on MacOS, Windows, and Ubuntu every 24 hours and on every commit. + If this badge is green, all [YOLOv5 GitHub Actions](https://github.com/ultralytics/yolov5/actions) Continuous Integration (CI) tests are currently passing. CI tests verify correct operation of YOLOv5 training ([train.py](https://github.com/ultralytics/yolov5/blob/master/train.py)), validation ([val.py](https://github.com/ultralytics/yolov5/blob/master/val.py)), inference ([detect.py](https://github.com/ultralytics/yolov5/blob/master/detect.py)) and export ([export.py](https://github.com/ultralytics/yolov5/blob/master/export.py)) on MacOS, Windows, and Ubuntu every 24 hours and on every commit. From 951922c735c1e98b596fd9845de25a62fcdc7c73 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 17 Jul 2021 13:07:19 +0200 Subject: [PATCH 0410/1976] Add `--sync-bn` known issue (#4032) * Add `--sync-bn` known issue * Update train.py --- train.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/train.py b/train.py index 205c73d85e20..15c2c356f60e 100644 --- a/train.py +++ b/train.py @@ -217,6 +217,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary # SyncBatchNorm if opt.sync_bn and cuda and RANK != -1: + raise Exception('can not train with --sync-bn, known issue https://github.com/ultralytics/yolov5/issues/3998') model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model).to(device) logger.info('Using SyncBatchNorm()') @@ -232,9 +233,9 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary # Process 0 if RANK in [-1, 0]: valloader = create_dataloader(val_path, imgsz_val, batch_size // WORLD_SIZE * 2, gs, single_cls, - hyp=hyp, cache=opt.cache_images and not noval, rect=True, rank=-1, - workers=workers, - pad=0.5, prefix=colorstr('val: '))[0] + hyp=hyp, cache=opt.cache_images and not noval, rect=True, rank=-1, + workers=workers, + pad=0.5, prefix=colorstr('val: '))[0] if not resume: labels = np.concatenate(dataset.labels, 0) From 0067d9578ab5e4da238e56d5fbe181c389f03a9d Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 17 Jul 2021 15:47:05 +0200 Subject: [PATCH 0411/1976] Update greetings.yml (#4037) --- .github/workflows/greetings.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/greetings.yml b/.github/workflows/greetings.yml index a8990e7222d3..ddd739ea5769 100644 --- a/.github/workflows/greetings.yml +++ b/.github/workflows/greetings.yml @@ -23,17 +23,17 @@ jobs: - ✅ Reduce changes to the absolute **minimum** required for your bug fix or feature addition. _"It is not daily increase but daily decrease, hack away the unessential. The closer to the source, the less wastage there is."_ -Bruce Lee issue-message: | - 👋 Hello @${{ github.actor }}, thank you for your interest in 🚀 YOLOv5! Please visit our ⭐️ [Tutorials](https://github.com/ultralytics/yolov5/wiki#tutorials) to get started, where you can find quickstart guides for simple tasks like [Custom Data Training](https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data) all the way to advanced concepts like [Hyperparameter Evolution](https://github.com/ultralytics/yolov5/issues/607). + 👋 Hello @${{ github.actor }}, thank you for your interest in YOLOv5 🚀! Please visit our ⭐️ [Tutorials](https://github.com/ultralytics/yolov5/wiki#tutorials) to get started, where you can find quickstart guides for simple tasks like [Custom Data Training](https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data) all the way to advanced concepts like [Hyperparameter Evolution](https://github.com/ultralytics/yolov5/issues/607). If this is a 🐛 Bug Report, please provide screenshots and **minimum viable code to reproduce your issue**, otherwise we can not help you. If this is a custom training ❓ Question, please provide as much information as possible, including dataset images, training logs, screenshots, and a public link to online [W&B logging](https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data#visualize) if available. - For business inquiries or professional support requests please visit https://www.ultralytics.com or email Glenn Jocher at glenn.jocher@ultralytics.com. + For business inquiries or professional support requests please visit https://ultralytics.com or email Glenn Jocher at glenn.jocher@ultralytics.com. ## Requirements - **Python>=3.6.0** with all [requirements.txt](https://github.com/ultralytics/yolov5/blob/master/requirements.txt) installed including **PyTorch>=1.7**. To get started: + [**Python>=3.6.0**](https://www.python.org/) with all [requirements.txt](https://github.com/ultralytics/yolov5/blob/master/requirements.txt) installed including [**PyTorch>=1.7**](https://pytorch.org/get-started/locally/). To get started: ```bash $ git clone https://github.com/ultralytics/yolov5 $ cd yolov5 From dd62e2d05cdc0312732202c952e2513acdb8dc3e Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 17 Jul 2021 23:12:27 +0200 Subject: [PATCH 0412/1976] Update README.md (#4041) * Update README.md * Update README.md * Update README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 035b7002774a..7dff1a0efd33 100755 --- a/README.md +++ b/README.md @@ -64,7 +64,7 @@ See the [YOLOv5 Docs](https://docs.ultralytics.com) for full documentation on tr
Install -Python >= 3.6.0 required with all [requirements.txt](https://github.com/ultralytics/yolov5/blob/master/requirements.txt) dependencies installed: +[**Python>=3.6.0**](https://www.python.org/) is required with all [requirements.txt](https://github.com/ultralytics/yolov5/blob/master/requirements.txt) installed including [**PyTorch>=1.7**](https://pytorch.org/get-started/locally/): ```bash $ git clone https://github.com/ultralytics/yolov5 From 9dd33fd20f0f1a07762df129d2c2da2b1e9d09d7 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 18 Jul 2021 15:25:37 +0200 Subject: [PATCH 0413/1976] AutoShape PosixPath support (#4047) * AutoShape PosixPath support Usage example: ````python from pathlib import Path model = ... file = Path('data/images/zidane.jpg') results = model(file) ``` * Update common.py --- models/common.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/models/common.py b/models/common.py index 05372ae149f5..41fc128c07b9 100644 --- a/models/common.py +++ b/models/common.py @@ -1,7 +1,7 @@ # YOLOv5 common modules from copy import copy -from pathlib import Path +from pathlib import Path, PosixPath import math import numpy as np @@ -232,8 +232,8 @@ def autoshape(self): @torch.no_grad() def forward(self, imgs, size=640, augment=False, profile=False): # Inference from various sources. For height=640, width=1280, RGB images example inputs are: - # filename: imgs = 'data/images/zidane.jpg' - # URI: = 'https://github.com/ultralytics/yolov5/releases/download/v1.0/zidane.jpg' + # filename: imgs = 'data/images/zidane.jpg' # str or PosixPath + # URI: = 'https://ultralytics.com/images/zidane.jpg' # OpenCV: = cv2.imread('image.jpg')[:,:,::-1] # HWC BGR to RGB x(640,1280,3) # PIL: = Image.open('image.jpg') # HWC x(640,1280,3) # numpy: = np.zeros((640,1280,3)) # HWC @@ -251,8 +251,8 @@ def forward(self, imgs, size=640, augment=False, profile=False): shape0, shape1, files = [], [], [] # image and inference shapes, filenames for i, im in enumerate(imgs): f = f'image{i}' # filename - if isinstance(im, str): # filename or uri - im, f = Image.open(requests.get(im, stream=True).raw if im.startswith('http') else im), im + if isinstance(im, (str, PosixPath)): # filename or uri + im, f = Image.open(requests.get(im, stream=True).raw if str(im).startswith('http') else im), im im = np.asarray(exif_transpose(im)) elif isinstance(im, Image.Image): # PIL Image im, f = np.asarray(exif_transpose(im)), getattr(im, 'filename', f) or f From f7d85620601f4c2513bcd2b7911c20fbc49e9097 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 19 Jul 2021 10:43:01 +0200 Subject: [PATCH 0414/1976] `val.py` refactor (#4053) * val.py refactor * cleanup * cleanup * cleanup * cleanup * save after eval * opt.imgsz bug fix * wandb refactor * dataloader to train_loader * capitalize global variables * runs/hub/exp to runs/detect/exp * refactor wandb logging * Refactor wandb operations (#4061) Co-authored-by: Ayush Chaurasia --- detect.py | 6 +- models/common.py | 33 +++--- models/yolo.py | 43 ++++---- train.py | 67 ++++++------ utils/datasets.py | 35 +++---- utils/torch_utils.py | 14 +-- utils/wandb_logging/wandb_utils.py | 65 ++++++++---- val.py | 160 ++++++++++++++--------------- 8 files changed, 220 insertions(+), 203 deletions(-) diff --git a/detect.py b/detect.py index be2c5969c6d7..73f962398442 100644 --- a/detect.py +++ b/detect.py @@ -21,7 +21,7 @@ from utils.general import check_img_size, check_requirements, check_imshow, colorstr, non_max_suppression, \ apply_classifier, scale_coords, xyxy2xywh, strip_optimizer, set_logging, increment_path, save_one_box from utils.plots import colors, plot_one_box -from utils.torch_utils import select_device, load_classifier, time_synchronized +from utils.torch_utils import select_device, load_classifier, time_sync @torch.no_grad() @@ -100,14 +100,14 @@ def run(weights='yolov5s.pt', # model.pt path(s) img = img.unsqueeze(0) # Inference - t1 = time_synchronized() + t1 = time_sync() pred = model(img, augment=augment, visualize=increment_path(save_dir / Path(path).stem, mkdir=True) if visualize else False)[0] # Apply NMS pred = non_max_suppression(pred, conf_thres, iou_thres, classes, agnostic_nms, max_det=max_det) - t2 = time_synchronized() + t2 = time_sync() # Apply Classifier if classify: diff --git a/models/common.py b/models/common.py index 41fc128c07b9..4db90b54663e 100644 --- a/models/common.py +++ b/models/common.py @@ -1,5 +1,6 @@ # YOLOv5 common modules +import logging from copy import copy from pathlib import Path, PosixPath @@ -15,7 +16,9 @@ from utils.datasets import exif_transpose, letterbox from utils.general import non_max_suppression, make_divisible, scale_coords, increment_path, xyxy2xywh, save_one_box from utils.plots import colors, plot_one_box -from utils.torch_utils import time_synchronized +from utils.torch_utils import time_sync + +LOGGER = logging.getLogger(__name__) def autopad(k, p=None): # kernel, padding @@ -226,7 +229,7 @@ def __init__(self, model): self.model = model.eval() def autoshape(self): - print('AutoShape already enabled, skipping... ') # model already converted to model.autoshape() + LOGGER.info('AutoShape already enabled, skipping... ') # model already converted to model.autoshape() return self @torch.no_grad() @@ -240,7 +243,7 @@ def forward(self, imgs, size=640, augment=False, profile=False): # torch: = torch.zeros(16,3,320,640) # BCHW (scaled to size=640, 0-1 values) # multiple: = [Image.open('image1.jpg'), Image.open('image2.jpg'), ...] # list of images - t = [time_synchronized()] + t = [time_sync()] p = next(self.model.parameters()) # for device and type if isinstance(imgs, torch.Tensor): # torch with amp.autocast(enabled=p.device.type != 'cpu'): @@ -270,19 +273,19 @@ def forward(self, imgs, size=640, augment=False, profile=False): x = np.stack(x, 0) if n > 1 else x[0][None] # stack x = np.ascontiguousarray(x.transpose((0, 3, 1, 2))) # BHWC to BCHW x = torch.from_numpy(x).to(p.device).type_as(p) / 255. # uint8 to fp16/32 - t.append(time_synchronized()) + t.append(time_sync()) with amp.autocast(enabled=p.device.type != 'cpu'): # Inference y = self.model(x, augment, profile)[0] # forward - t.append(time_synchronized()) + t.append(time_sync()) # Post-process y = non_max_suppression(y, self.conf, iou_thres=self.iou, classes=self.classes, max_det=self.max_det) # NMS for i in range(n): scale_coords(shape1, y[i][:, :4], shape0[i]) - t.append(time_synchronized()) + t.append(time_sync()) return Detections(imgs, y, files, t, self.names, x.shape) @@ -323,31 +326,33 @@ def display(self, pprint=False, show=False, save=False, crop=False, render=False im = Image.fromarray(im.astype(np.uint8)) if isinstance(im, np.ndarray) else im # from np if pprint: - print(str.rstrip(', ')) + LOGGER.info(str.rstrip(', ')) if show: im.show(self.files[i]) # show if save: f = self.files[i] im.save(save_dir / f) # save - print(f"{'Saved' * (i == 0)} {f}", end=',' if i < self.n - 1 else f' to {save_dir}\n') + if i == self.n - 1: + LOGGER.info(f"Saved {self.n} image{'s' * (self.n > 1)} to '{save_dir}'") if render: self.imgs[i] = np.asarray(im) def print(self): self.display(pprint=True) # print results - print(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {tuple(self.s)}' % self.t) + LOGGER.info(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {tuple(self.s)}' % + self.t) def show(self): self.display(show=True) # show results - def save(self, save_dir='runs/hub/exp'): - save_dir = increment_path(save_dir, exist_ok=save_dir != 'runs/hub/exp', mkdir=True) # increment save_dir + def save(self, save_dir='runs/detect/exp'): + save_dir = increment_path(save_dir, exist_ok=save_dir != 'runs/detect/exp', mkdir=True) # increment save_dir self.display(save=True, save_dir=save_dir) # save results - def crop(self, save_dir='runs/hub/exp'): - save_dir = increment_path(save_dir, exist_ok=save_dir != 'runs/hub/exp', mkdir=True) # increment save_dir + def crop(self, save_dir='runs/detect/exp'): + save_dir = increment_path(save_dir, exist_ok=save_dir != 'runs/detect/exp', mkdir=True) # increment save_dir self.display(crop=True, save_dir=save_dir) # crop results - print(f'Saved results to {save_dir}\n') + LOGGER.info(f'Saved results to {save_dir}\n') def render(self): self.display(render=True) # render results diff --git a/models/yolo.py b/models/yolo.py index 7b49dfcf48a3..3a3af9b5fbde 100644 --- a/models/yolo.py +++ b/models/yolo.py @@ -5,7 +5,6 @@ """ import argparse -import logging import sys from copy import deepcopy from pathlib import Path @@ -18,7 +17,7 @@ from utils.autoanchor import check_anchor_order from utils.general import make_divisible, check_file, set_logging from utils.plots import feature_visualization -from utils.torch_utils import time_synchronized, fuse_conv_and_bn, model_info, scale_img, initialize_weights, \ +from utils.torch_utils import time_sync, fuse_conv_and_bn, model_info, scale_img, initialize_weights, \ select_device, copy_attr try: @@ -26,7 +25,7 @@ except ImportError: thop = None -logger = logging.getLogger(__name__) +LOGGER = logging.getLogger(__name__) class Detect(nn.Module): @@ -90,15 +89,15 @@ def __init__(self, cfg='yolov5s.yaml', ch=3, nc=None, anchors=None): # model, i # Define model ch = self.yaml['ch'] = self.yaml.get('ch', ch) # input channels if nc and nc != self.yaml['nc']: - logger.info(f"Overriding model.yaml nc={self.yaml['nc']} with nc={nc}") + LOGGER.info(f"Overriding model.yaml nc={self.yaml['nc']} with nc={nc}") self.yaml['nc'] = nc # override yaml value if anchors: - logger.info(f'Overriding model.yaml anchors with anchors={anchors}') + LOGGER.info(f'Overriding model.yaml anchors with anchors={anchors}') self.yaml['anchors'] = round(anchors) # override yaml value self.model, self.save = parse_model(deepcopy(self.yaml), ch=[ch]) # model, savelist self.names = [str(i) for i in range(self.yaml['nc'])] # default names self.inplace = self.yaml.get('inplace', True) - # logger.info([x.shape for x in self.forward(torch.zeros(1, ch, 64, 64))]) + # LOGGER.info([x.shape for x in self.forward(torch.zeros(1, ch, 64, 64))]) # Build strides, anchors m = self.model[-1] # Detect() @@ -110,12 +109,12 @@ def __init__(self, cfg='yolov5s.yaml', ch=3, nc=None, anchors=None): # model, i check_anchor_order(m) self.stride = m.stride self._initialize_biases() # only run once - # logger.info('Strides: %s' % m.stride.tolist()) + # LOGGER.info('Strides: %s' % m.stride.tolist()) # Init weights, biases initialize_weights(self) self.info() - logger.info('') + LOGGER.info('') def forward(self, x, augment=False, profile=False, visualize=False): if augment: @@ -143,13 +142,13 @@ def forward_once(self, x, profile=False, visualize=False): if profile: o = thop.profile(m, inputs=(x,), verbose=False)[0] / 1E9 * 2 if thop else 0 # FLOPs - t = time_synchronized() + t = time_sync() for _ in range(10): _ = m(x) - dt.append((time_synchronized() - t) * 100) + dt.append((time_sync() - t) * 100) if m == self.model[0]: - logger.info(f"{'time (ms)':>10s} {'GFLOPs':>10s} {'params':>10s} {'module'}") - logger.info(f'{dt[-1]:10.2f} {o:10.2f} {m.np:10.0f} {m.type}') + LOGGER.info(f"{'time (ms)':>10s} {'GFLOPs':>10s} {'params':>10s} {'module'}") + LOGGER.info(f'{dt[-1]:10.2f} {o:10.2f} {m.np:10.0f} {m.type}') x = m(x) # run y.append(x if m.i in self.save else None) # save output @@ -158,7 +157,7 @@ def forward_once(self, x, profile=False, visualize=False): feature_visualization(x, m.type, m.i, save_dir=visualize) if profile: - logger.info('%.1fms total' % sum(dt)) + LOGGER.info('%.1fms total' % sum(dt)) return x def _descale_pred(self, p, flips, scale, img_size): @@ -192,16 +191,16 @@ def _print_biases(self): m = self.model[-1] # Detect() module for mi in m.m: # from b = mi.bias.detach().view(m.na, -1).T # conv.bias(255) to (3,85) - logger.info( + LOGGER.info( ('%6g Conv2d.bias:' + '%10.3g' * 6) % (mi.weight.shape[1], *b[:5].mean(1).tolist(), b[5:].mean())) # def _print_weights(self): # for m in self.model.modules(): # if type(m) is Bottleneck: - # logger.info('%10.3g' % (m.w.detach().sigmoid() * 2)) # shortcut weights + # LOGGER.info('%10.3g' % (m.w.detach().sigmoid() * 2)) # shortcut weights def fuse(self): # fuse model Conv2d() + BatchNorm2d() layers - logger.info('Fusing layers... ') + LOGGER.info('Fusing layers... ') for m in self.model.modules(): if type(m) is Conv and hasattr(m, 'bn'): m.conv = fuse_conv_and_bn(m.conv, m.bn) # update conv @@ -213,19 +212,19 @@ def fuse(self): # fuse model Conv2d() + BatchNorm2d() layers def nms(self, mode=True): # add or remove NMS module present = type(self.model[-1]) is NMS # last layer is NMS if mode and not present: - logger.info('Adding NMS... ') + LOGGER.info('Adding NMS... ') m = NMS() # module m.f = -1 # from m.i = self.model[-1].i + 1 # index self.model.add_module(name='%s' % m.i, module=m) # add self.eval() elif not mode and present: - logger.info('Removing NMS... ') + LOGGER.info('Removing NMS... ') self.model = self.model[:-1] # remove return self def autoshape(self): # add AutoShape module - logger.info('Adding AutoShape... ') + LOGGER.info('Adding AutoShape... ') m = AutoShape(self) # wrap model copy_attr(m, self, include=('yaml', 'nc', 'hyp', 'names', 'stride'), exclude=()) # copy attributes return m @@ -235,7 +234,7 @@ def info(self, verbose=False, img_size=640): # print model information def parse_model(d, ch): # model_dict, input_channels(3) - logger.info('\n%3s%18s%3s%10s %-40s%-30s' % ('', 'from', 'n', 'params', 'module', 'arguments')) + LOGGER.info('\n%3s%18s%3s%10s %-40s%-30s' % ('', 'from', 'n', 'params', 'module', 'arguments')) anchors, nc, gd, gw = d['anchors'], d['nc'], d['depth_multiple'], d['width_multiple'] na = (len(anchors[0]) // 2) if isinstance(anchors, list) else anchors # number of anchors no = na * (nc + 5) # number of outputs = anchors * (classes + 5) @@ -279,7 +278,7 @@ def parse_model(d, ch): # model_dict, input_channels(3) t = str(m)[8:-2].replace('__main__.', '') # module type np = sum([x.numel() for x in m_.parameters()]) # number params m_.i, m_.f, m_.type, m_.np = i, f, t, np # attach index, 'from' index, type, number params - logger.info('%3s%18s%3s%10.0f %-40s%-30s' % (i, f, n, np, t, args)) # print + LOGGER.info('%3s%18s%3s%10.0f %-40s%-30s' % (i, f, n, np, t, args)) # print save.extend(x % i for x in ([f] if isinstance(f, int) else f) if x != -1) # append to savelist layers.append(m_) if i == 0: @@ -308,5 +307,5 @@ def parse_model(d, ch): # model_dict, input_channels(3) # Tensorboard (not working https://github.com/ultralytics/yolov5/issues/2898) # from torch.utils.tensorboard import SummaryWriter # tb_writer = SummaryWriter('.') - # logger.info("Run 'tensorboard --logdir=models' to view tensorboard at http://localhost:6006/") + # LOGGER.info("Run 'tensorboard --logdir=models' to view tensorboard at http://localhost:6006/") # tb_writer.add_graph(torch.jit.trace(model, img, strict=False), []) # add model graph diff --git a/train.py b/train.py index 15c2c356f60e..b1afaf8ada75 100644 --- a/train.py +++ b/train.py @@ -47,7 +47,7 @@ from utils.wandb_logging.wandb_utils import WandbLogger, check_wandb_resume from utils.metrics import fitness -logger = logging.getLogger(__name__) +LOGGER = logging.getLogger(__name__) LOCAL_RANK = int(os.getenv('LOCAL_RANK', -1)) # https://pytorch.org/docs/stable/elastic/run.html RANK = int(os.getenv('RANK', -1)) WORLD_SIZE = int(os.getenv('WORLD_SIZE', 1)) @@ -73,7 +73,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary if isinstance(hyp, str): with open(hyp) as f: hyp = yaml.safe_load(f) # load hyps dict - logger.info(colorstr('hyperparameters: ') + ', '.join(f'{k}={v}' for k, v in hyp.items())) + LOGGER.info(colorstr('hyperparameters: ') + ', '.join(f'{k}={v}' for k, v in hyp.items())) # Save run settings with open(save_dir / 'hyp.yaml', 'w') as f: @@ -94,7 +94,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary # TensorBoard if not evolve: prefix = colorstr('tensorboard: ') - logger.info(f"{prefix}Start with 'tensorboard --logdir {opt.project}', view at http://localhost:6006/") + LOGGER.info(f"{prefix}Start with 'tensorboard --logdir {opt.project}', view at http://localhost:6006/") loggers['tb'] = SummaryWriter(str(save_dir)) # W&B @@ -123,7 +123,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary state_dict = ckpt['model'].float().state_dict() # to FP32 state_dict = intersect_dicts(state_dict, model.state_dict(), exclude=exclude) # intersect model.load_state_dict(state_dict, strict=False) # load - logger.info('Transferred %g/%g items from %s' % (len(state_dict), len(model.state_dict()), weights)) # report + LOGGER.info('Transferred %g/%g items from %s' % (len(state_dict), len(model.state_dict()), weights)) # report else: model = Model(cfg, ch=3, nc=nc, anchors=hyp.get('anchors')).to(device) # create with torch_distributed_zero_first(RANK): @@ -143,7 +143,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary nbs = 64 # nominal batch size accumulate = max(round(nbs / batch_size), 1) # accumulate loss before optimizing hyp['weight_decay'] *= batch_size * accumulate / nbs # scale weight_decay - logger.info(f"Scaled weight_decay = {hyp['weight_decay']}") + LOGGER.info(f"Scaled weight_decay = {hyp['weight_decay']}") pg0, pg1, pg2 = [], [], [] # optimizer parameter groups for k, v in model.named_modules(): @@ -161,7 +161,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary optimizer.add_param_group({'params': pg1, 'weight_decay': hyp['weight_decay']}) # add pg1 with weight_decay optimizer.add_param_group({'params': pg2}) # add pg2 (biases) - logger.info('Optimizer groups: %g .bias, %g conv.weight, %g other' % (len(pg2), len(pg1), len(pg0))) + LOGGER.info('Optimizer groups: %g .bias, %g conv.weight, %g other' % (len(pg2), len(pg1), len(pg0))) del pg0, pg1, pg2 # Scheduler https://arxiv.org/pdf/1812.01187.pdf @@ -198,7 +198,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary if resume: assert start_epoch > 0, '%s training to %g epochs is finished, nothing to resume.' % (weights, epochs) if epochs < start_epoch: - logger.info('%s has been trained for %g epochs. Fine-tuning for %g additional epochs.' % + LOGGER.info('%s has been trained for %g epochs. Fine-tuning for %g additional epochs.' % (weights, ckpt['epoch'], epochs)) epochs += ckpt['epoch'] # finetune additional epochs @@ -207,7 +207,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary # Image sizes gs = max(int(model.stride.max()), 32) # grid size (max stride) nl = model.model[-1].nl # number of detection layers (used for scaling hyp['obj']) - imgsz, imgsz_val = [check_img_size(x, gs) for x in opt.img_size] # verify imgsz are gs-multiples + imgsz = check_img_size(opt.imgsz, gs) # verify imgsz is gs-multiple # DP mode if cuda and RANK == -1 and torch.cuda.device_count() > 1: @@ -219,33 +219,31 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary if opt.sync_bn and cuda and RANK != -1: raise Exception('can not train with --sync-bn, known issue https://github.com/ultralytics/yolov5/issues/3998') model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model).to(device) - logger.info('Using SyncBatchNorm()') + LOGGER.info('Using SyncBatchNorm()') # Trainloader - dataloader, dataset = create_dataloader(train_path, imgsz, batch_size // WORLD_SIZE, gs, single_cls, - hyp=hyp, augment=True, cache=opt.cache_images, rect=opt.rect, rank=RANK, - workers=workers, - image_weights=opt.image_weights, quad=opt.quad, prefix=colorstr('train: ')) + train_loader, dataset = create_dataloader(train_path, imgsz, batch_size // WORLD_SIZE, gs, single_cls, + hyp=hyp, augment=True, cache=opt.cache_images, rect=opt.rect, rank=RANK, + workers=workers, image_weights=opt.image_weights, quad=opt.quad, + prefix=colorstr('train: ')) mlc = np.concatenate(dataset.labels, 0)[:, 0].max() # max label class - nb = len(dataloader) # number of batches + nb = len(train_loader) # number of batches assert mlc < nc, 'Label class %g exceeds nc=%g in %s. Possible class labels are 0-%g' % (mlc, nc, data, nc - 1) # Process 0 if RANK in [-1, 0]: - valloader = create_dataloader(val_path, imgsz_val, batch_size // WORLD_SIZE * 2, gs, single_cls, - hyp=hyp, cache=opt.cache_images and not noval, rect=True, rank=-1, - workers=workers, - pad=0.5, prefix=colorstr('val: '))[0] + val_loader = create_dataloader(val_path, imgsz, batch_size // WORLD_SIZE * 2, gs, single_cls, + hyp=hyp, cache=opt.cache_images and not noval, rect=True, rank=-1, + workers=workers, pad=0.5, + prefix=colorstr('val: '))[0] if not resume: labels = np.concatenate(dataset.labels, 0) - c = torch.tensor(labels[:, 0]) # classes + # c = torch.tensor(labels[:, 0]) # classes # cf = torch.bincount(c.long(), minlength=nc) + 1. # frequency # model._initialize_biases(cf.to(device)) if plots: plot_labels(labels, names, save_dir, loggers) - if loggers['tb']: - loggers['tb'].add_histogram('classes', c, 0) # TensorBoard # Anchors if not opt.noautoanchor: @@ -277,8 +275,8 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary scheduler.last_epoch = start_epoch - 1 # do not move scaler = amp.GradScaler(enabled=cuda) compute_loss = ComputeLoss(model) # init loss class - logger.info(f'Image sizes {imgsz} train, {imgsz_val} val\n' - f'Using {dataloader.num_workers} dataloader workers\n' + LOGGER.info(f'Image sizes {imgsz} train, {imgsz} val\n' + f'Using {train_loader.num_workers} dataloader workers\n' f'Logging results to {save_dir}\n' f'Starting training for {epochs} epochs...') for epoch in range(start_epoch, epochs): # epoch ------------------------------------------------------------------ @@ -304,9 +302,9 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary mloss = torch.zeros(4, device=device) # mean losses if RANK != -1: - dataloader.sampler.set_epoch(epoch) - pbar = enumerate(dataloader) - logger.info(('\n' + '%10s' * 8) % ('Epoch', 'gpu_mem', 'box', 'obj', 'cls', 'total', 'labels', 'img_size')) + train_loader.sampler.set_epoch(epoch) + pbar = enumerate(train_loader) + LOGGER.info(('\n' + '%10s' * 8) % ('Epoch', 'gpu_mem', 'box', 'obj', 'cls', 'total', 'labels', 'img_size')) if RANK in [-1, 0]: pbar = tqdm(pbar, total=nb) # progress bar optimizer.zero_grad() @@ -389,10 +387,10 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary wandb_logger.current_epoch = epoch + 1 results, maps, _ = val.run(data_dict, batch_size=batch_size // WORLD_SIZE * 2, - imgsz=imgsz_val, + imgsz=imgsz, model=ema.ema, single_cls=single_cls, - dataloader=valloader, + dataloader=val_loader, save_dir=save_dir, save_json=is_coco and final_epoch, verbose=nc < 50 and final_epoch, @@ -444,7 +442,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary # end epoch ---------------------------------------------------------------------------------------------------- # end training ----------------------------------------------------------------------------------------------------- if RANK in [-1, 0]: - logger.info(f'{epoch - start_epoch + 1} epochs completed in {(time.time() - t0) / 3600:.3f} hours.\n') + LOGGER.info(f'{epoch - start_epoch + 1} epochs completed in {(time.time() - t0) / 3600:.3f} hours.\n') if plots: plot_results(save_dir=save_dir) # save as results.png if loggers['wandb']: @@ -457,10 +455,10 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary for m in [last, best] if best.exists() else [last]: # speed, mAP tests results, _, _ = val.run(data_dict, batch_size=batch_size // WORLD_SIZE * 2, - imgsz=imgsz_val, + imgsz=imgsz, model=attempt_load(m, device).half(), single_cls=single_cls, - dataloader=valloader, + dataloader=val_loader, save_dir=save_dir, save_json=True, plots=False) @@ -487,7 +485,7 @@ def parse_opt(known=False): parser.add_argument('--hyp', type=str, default='data/hyps/hyp.scratch.yaml', help='hyperparameters path') parser.add_argument('--epochs', type=int, default=300) parser.add_argument('--batch-size', type=int, default=16, help='total batch size for all GPUs') - parser.add_argument('--img-size', nargs='+', type=int, default=[640, 640], help='[train, val] image sizes') + parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=640, help='train, val image size (pixels)') parser.add_argument('--rect', action='store_true', help='rectangular training') parser.add_argument('--resume', nargs='?', const=True, default=False, help='resume most recent training') parser.add_argument('--nosave', action='store_true', help='only save final checkpoint') @@ -534,12 +532,11 @@ def main(opt): with open(Path(ckpt).parent.parent / 'opt.yaml') as f: opt = argparse.Namespace(**yaml.safe_load(f)) # replace opt.cfg, opt.weights, opt.resume = '', ckpt, True # reinstate - logger.info('Resuming training from %s' % ckpt) + LOGGER.info(f'Resuming training from {ckpt}') else: # opt.hyp = opt.hyp or ('hyp.finetune.yaml' if opt.weights else 'hyp.scratch.yaml') opt.data, opt.cfg, opt.hyp = check_file(opt.data), check_file(opt.cfg), check_file(opt.hyp) # check files assert len(opt.cfg) or len(opt.weights), 'either --cfg or --weights must be specified' - opt.img_size.extend([opt.img_size[-1]] * (2 - len(opt.img_size))) # extend to 2 sizes (train, val) opt.name = 'evolve' if opt.evolve else opt.name opt.save_dir = str(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok or opt.evolve)) @@ -602,7 +599,7 @@ def main(opt): # ei = [isinstance(x, (int, float)) for x in hyp.values()] # evolvable indices yaml_file = Path(opt.save_dir) / 'hyp_evolved.yaml' # save best result here if opt.bucket: - os.system('gsutil cp gs://%s/evolve.txt .' % opt.bucket) # download evolve.txt if exists + os.system(f'gsutil cp gs://{opt.bucket}/evolve.txt .') # download evolve.txt if exists for _ in range(opt.evolve): # generations to evolve if Path('evolve.txt').exists(): # if evolve.txt exists: select best hyps and mutate diff --git a/utils/datasets.py b/utils/datasets.py index 0763b56d31e3..d3edafa99bd0 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -22,17 +22,16 @@ from torch.utils.data import Dataset from tqdm import tqdm -from utils.augmentations import Albumentations, augment_hsv, copy_paste, letterbox, mixup, random_perspective, cutout +from utils.augmentations import Albumentations, augment_hsv, copy_paste, letterbox, mixup, random_perspective from utils.general import check_requirements, check_file, check_dataset, xywh2xyxy, xywhn2xyxy, xyxy2xywhn, \ xyn2xy, segments2boxes, clean_str from utils.torch_utils import torch_distributed_zero_first # Parameters -help_url = 'https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data' -img_formats = ['bmp', 'jpg', 'jpeg', 'png', 'tif', 'tiff', 'dng', 'webp', 'mpo'] # acceptable image suffixes -vid_formats = ['mov', 'avi', 'mp4', 'mpg', 'mpeg', 'm4v', 'wmv', 'mkv'] # acceptable video suffixes -num_threads = min(8, os.cpu_count()) # number of multiprocessing threads -logger = logging.getLogger(__name__) +HELP_URL = 'https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data' +IMG_FORMATS = ['bmp', 'jpg', 'jpeg', 'png', 'tif', 'tiff', 'dng', 'webp', 'mpo'] # acceptable image suffixes +VID_FORMATS = ['mov', 'avi', 'mp4', 'mpg', 'mpeg', 'm4v', 'wmv', 'mkv'] # acceptable video suffixes +NUM_THREADS = min(8, os.cpu_count()) # number of multiprocessing threads # Get orientation exif tag for orientation in ExifTags.TAGS.keys(): @@ -164,8 +163,8 @@ def __init__(self, path, img_size=640, stride=32): else: raise Exception(f'ERROR: {p} does not exist') - images = [x for x in files if x.split('.')[-1].lower() in img_formats] - videos = [x for x in files if x.split('.')[-1].lower() in vid_formats] + images = [x for x in files if x.split('.')[-1].lower() in IMG_FORMATS] + videos = [x for x in files if x.split('.')[-1].lower() in VID_FORMATS] ni, nv = len(images), len(videos) self.img_size = img_size @@ -179,7 +178,7 @@ def __init__(self, path, img_size=640, stride=32): else: self.cap = None assert self.nf > 0, f'No images or videos found in {p}. ' \ - f'Supported formats are:\nimages: {img_formats}\nvideos: {vid_formats}' + f'Supported formats are:\nimages: {IMG_FORMATS}\nvideos: {VID_FORMATS}' def __iter__(self): self.count = 0 @@ -389,11 +388,11 @@ def __init__(self, path, img_size=640, batch_size=16, augment=False, hyp=None, r # f += [p.parent / x.lstrip(os.sep) for x in t] # local to global path (pathlib) else: raise Exception(f'{prefix}{p} does not exist') - self.img_files = sorted([x.replace('/', os.sep) for x in f if x.split('.')[-1].lower() in img_formats]) + self.img_files = sorted([x.replace('/', os.sep) for x in f if x.split('.')[-1].lower() in IMG_FORMATS]) # self.img_files = sorted([x for x in f if x.suffix[1:].lower() in img_formats]) # pathlib assert self.img_files, f'{prefix}No images found' except Exception as e: - raise Exception(f'{prefix}Error loading data from {path}: {e}\nSee {help_url}') + raise Exception(f'{prefix}Error loading data from {path}: {e}\nSee {HELP_URL}') # Check cache self.label_files = img2label_paths(self.img_files) # labels @@ -411,7 +410,7 @@ def __init__(self, path, img_size=640, batch_size=16, augment=False, hyp=None, r tqdm(None, desc=prefix + d, total=n, initial=n) # display cache results if cache['msgs']: logging.info('\n'.join(cache['msgs'])) # display warnings - assert nf > 0 or not augment, f'{prefix}No labels in {cache_path}. Can not train without labels. See {help_url}' + assert nf > 0 or not augment, f'{prefix}No labels in {cache_path}. Can not train without labels. See {HELP_URL}' # Read cache [cache.pop(k) for k in ('hash', 'version', 'msgs')] # remove items @@ -460,7 +459,7 @@ def __init__(self, path, img_size=640, batch_size=16, augment=False, hyp=None, r if cache_images: gb = 0 # Gigabytes of cached images self.img_hw0, self.img_hw = [None] * n, [None] * n - results = ThreadPool(num_threads).imap(lambda x: load_image(*x), zip(repeat(self), range(n))) + results = ThreadPool(NUM_THREADS).imap(lambda x: load_image(*x), zip(repeat(self), range(n))) pbar = tqdm(enumerate(results), total=n) for i, x in pbar: self.imgs[i], self.img_hw0[i], self.img_hw[i] = x # img, hw_original, hw_resized = load_image(self, i) @@ -473,7 +472,7 @@ def cache_labels(self, path=Path('./labels.cache'), prefix=''): x = {} # dict nm, nf, ne, nc, msgs = 0, 0, 0, 0, [] # number missing, found, empty, corrupt, messages desc = f"{prefix}Scanning '{path.parent / path.stem}' images and labels..." - with Pool(num_threads) as pool: + with Pool(NUM_THREADS) as pool: pbar = tqdm(pool.imap_unordered(verify_image_label, zip(self.img_files, self.label_files, repeat(prefix))), desc=desc, total=len(self.img_files)) for im_file, l, shape, segments, nm_f, nf_f, ne_f, nc_f, msg in pbar: @@ -491,7 +490,7 @@ def cache_labels(self, path=Path('./labels.cache'), prefix=''): if msgs: logging.info('\n'.join(msgs)) if nf == 0: - logging.info(f'{prefix}WARNING: No labels found in {path}. See {help_url}') + logging.info(f'{prefix}WARNING: No labels found in {path}. See {HELP_URL}') x['hash'] = get_hash(self.label_files + self.img_files) x['results'] = nf, nm, ne, nc, len(self.img_files) x['msgs'] = msgs # warnings @@ -789,7 +788,7 @@ def extract_boxes(path='../datasets/coco128'): # from utils.datasets import *; files = list(path.rglob('*.*')) n = len(files) # number of files for im_file in tqdm(files, total=n): - if im_file.suffix[1:] in img_formats: + if im_file.suffix[1:] in IMG_FORMATS: # image im = cv2.imread(str(im_file))[..., ::-1] # BGR to RGB h, w = im.shape[:2] @@ -825,7 +824,7 @@ def autosplit(path='../datasets/coco128/images', weights=(0.9, 0.1, 0.0), annota annotated_only: Only use images with an annotated txt file """ path = Path(path) # images dir - files = sum([list(path.rglob(f"*.{img_ext}")) for img_ext in img_formats], []) # image files only + files = sum([list(path.rglob(f"*.{img_ext}")) for img_ext in IMG_FORMATS], []) # image files only n = len(files) # number of files random.seed(0) # for reproducibility indices = random.choices([0, 1, 2], weights=weights, k=n) # assign each image to a split @@ -850,7 +849,7 @@ def verify_image_label(args): im.verify() # PIL verify shape = exif_size(im) # image size assert (shape[0] > 9) & (shape[1] > 9), f'image size {shape} <10 pixels' - assert im.format.lower() in img_formats, f'invalid image format {im.format}' + assert im.format.lower() in IMG_FORMATS, f'invalid image format {im.format}' if im.format.lower() in ('jpg', 'jpeg'): with open(im_file, 'rb') as f: f.seek(-2, 2) diff --git a/utils/torch_utils.py b/utils/torch_utils.py index 36b6845a8c48..d86267b26356 100644 --- a/utils/torch_utils.py +++ b/utils/torch_utils.py @@ -22,7 +22,7 @@ import thop # for FLOPs computation except ImportError: thop = None -logger = logging.getLogger(__name__) +LOGGER = logging.getLogger(__name__) @contextmanager @@ -85,11 +85,11 @@ def select_device(device='', batch_size=None): else: s += 'CPU\n' - logger.info(s.encode().decode('ascii', 'ignore') if platform.system() == 'Windows' else s) # emoji-safe + LOGGER.info(s.encode().decode('ascii', 'ignore') if platform.system() == 'Windows' else s) # emoji-safe return torch.device('cuda:0' if cuda else 'cpu') -def time_synchronized(): +def time_sync(): # pytorch-accurate time if torch.cuda.is_available(): torch.cuda.synchronize() @@ -118,12 +118,12 @@ def profile(x, ops, n=100, device=None): flops = 0 for _ in range(n): - t[0] = time_synchronized() + t[0] = time_sync() y = m(x) - t[1] = time_synchronized() + t[1] = time_sync() try: _ = y.sum().backward() - t[2] = time_synchronized() + t[2] = time_sync() except: # no backward method t[2] = float('nan') dtf += (t[1] - t[0]) * 1000 / n # ms per op forward @@ -231,7 +231,7 @@ def model_info(model, verbose=False, img_size=640): except (ImportError, Exception): fs = '' - logger.info(f"Model Summary: {len(list(model.modules()))} layers, {n_p} parameters, {n_g} gradients{fs}") + LOGGER.info(f"Model Summary: {len(list(model.modules()))} layers, {n_p} parameters, {n_g} gradients{fs}") def load_classifier(name='resnet101', n=2): diff --git a/utils/wandb_logging/wandb_utils.py b/utils/wandb_logging/wandb_utils.py index 2adea9235f6c..a7e84ca100e4 100644 --- a/utils/wandb_logging/wandb_utils.py +++ b/utils/wandb_logging/wandb_utils.py @@ -98,7 +98,14 @@ class WandbLogger(): def __init__(self, opt, name, run_id, data_dict, job_type='Training'): # Pre-training routine -- self.job_type = job_type - self.wandb, self.wandb_run, self.data_dict = wandb, None if not wandb else wandb.run, data_dict + self.wandb, self.wandb_run = wandb, None if not wandb else wandb.run + self.val_artifact, self.train_artifact = None, None + self.train_artifact_path, self.val_artifact_path = None, None + self.result_artifact = None + self.val_table, self.result_table = None, None + self.data_dict = data_dict + self.bbox_media_panel_images = [] + self.val_table_path_map = None # It's more elegant to stick to 1 wandb.init call, but useful config data is overwritten in the WandbLogger's wandb.init call if isinstance(opt.resume, str): # checks resume from artifact if opt.resume.startswith(WANDB_ARTIFACT_PREFIX): @@ -156,25 +163,27 @@ def setup_training(self, opt, data_dict): self.weights), config.save_period, config.batch_size, config.bbox_interval, config.epochs, \ config.opt['hyp'] data_dict = dict(self.wandb_run.config.data_dict) # eliminates the need for config file to resume - if 'val_artifact' not in self.__dict__: # If --upload_dataset is set, use the existing artifact, don't download + if self.val_artifact is None: # If --upload_dataset is set, use the existing artifact, don't download self.train_artifact_path, self.train_artifact = self.download_dataset_artifact(data_dict.get('train'), opt.artifact_alias) self.val_artifact_path, self.val_artifact = self.download_dataset_artifact(data_dict.get('val'), opt.artifact_alias) - self.result_artifact, self.result_table, self.val_table, self.weights = None, None, None, None - if self.train_artifact_path is not None: - train_path = Path(self.train_artifact_path) / 'data/images/' - data_dict['train'] = str(train_path) - if self.val_artifact_path is not None: - val_path = Path(self.val_artifact_path) / 'data/images/' - data_dict['val'] = str(val_path) - self.val_table = self.val_artifact.get("val") - self.map_val_table_path() - wandb.log({"validation dataset": self.val_table}) - + + if self.train_artifact_path is not None: + train_path = Path(self.train_artifact_path) / 'data/images/' + data_dict['train'] = str(train_path) + if self.val_artifact_path is not None: + val_path = Path(self.val_artifact_path) / 'data/images/' + data_dict['val'] = str(val_path) + + if self.val_artifact is not None: self.result_artifact = wandb.Artifact("run_" + wandb.run.id + "_progress", "evaluation") self.result_table = wandb.Table(["epoch", "id", "ground truth", "prediction", "avg_confidence"]) + self.val_table = self.val_artifact.get("val") + if self.val_table_path_map is None: + self.map_val_table_path() + wandb.log({"validation dataset": self.val_table}) if opt.bbox_interval == -1: self.bbox_interval = opt.bbox_interval = (opt.epochs // 10) if opt.epochs > 10 else 1 return data_dict @@ -182,7 +191,7 @@ def setup_training(self, opt, data_dict): def download_dataset_artifact(self, path, alias): if isinstance(path, str) and path.startswith(WANDB_ARTIFACT_PREFIX): artifact_path = Path(remove_prefix(path, WANDB_ARTIFACT_PREFIX) + ":" + alias) - dataset_artifact = wandb.use_artifact(artifact_path.as_posix().replace("\\","/")) + dataset_artifact = wandb.use_artifact(artifact_path.as_posix().replace("\\", "/")) assert dataset_artifact is not None, "'Error: W&B dataset artifact doesn\'t exist'" datadir = dataset_artifact.download() return datadir, dataset_artifact @@ -246,10 +255,10 @@ def log_dataset_artifact(self, data_file, single_cls, project, overwrite_config= return path def map_val_table_path(self): - self.val_table_map = {} + self.val_table_path_map = {} print("Mapping dataset") for i, data in enumerate(tqdm(self.val_table.data)): - self.val_table_map[data[3]] = data[0] + self.val_table_path_map[data[3]] = data[0] def create_dataset_table(self, dataset, class_to_id, name='dataset'): # TODO: Explore multiprocessing to slpit this loop parallely| This is essential for speeding up the the logging @@ -283,7 +292,6 @@ def create_dataset_table(self, dataset, class_to_id, name='dataset'): return artifact def log_training_progress(self, predn, path, names): - if self.val_table and self.result_table: class_set = wandb.Classes([{'id': id, 'name': name} for id, name in names.items()]) box_data = [] total_conf = 0 @@ -297,7 +305,7 @@ def log_training_progress(self, predn, path, names): "domain": "pixel"}) total_conf = total_conf + conf boxes = {"predictions": {"box_data": box_data, "class_labels": names}} # inference-space - id = self.val_table_map[Path(path).name] + id = self.val_table_path_map[Path(path).name] self.result_table.add_data(self.current_epoch, id, self.val_table.data[id][1], @@ -305,6 +313,22 @@ def log_training_progress(self, predn, path, names): total_conf / max(1, len(box_data)) ) + def val_one_image(self, pred, predn, path, names, im): + if self.val_table and self.result_table: # Log Table if Val dataset is uploaded as artifact + self.log_training_progress(predn, path, names) + else: # Default to bbox media panelif Val artifact not found + log_imgs = min(self.log_imgs, 100) + if len(self.bbox_media_panel_images) < log_imgs and self.current_epoch > 0: + if self.current_epoch % self.bbox_interval == 0: + box_data = [{"position": {"minX": xyxy[0], "minY": xyxy[1], "maxX": xyxy[2], "maxY": xyxy[3]}, + "class_id": int(cls), + "box_caption": "%s %.3f" % (names[cls], conf), + "scores": {"class_score": conf}, + "domain": "pixel"} for *xyxy, conf, cls in pred.tolist()] + boxes = {"predictions": {"box_data": box_data, "class_labels": names}} # inference-space + self.bbox_media_panel_images.append(wandb.Image(im, boxes=boxes, caption=path.name)) + + def log(self, log_dict): if self.wandb_run: for key, value in log_dict.items(): @@ -313,13 +337,16 @@ def log(self, log_dict): def end_epoch(self, best_result=False): if self.wandb_run: with all_logging_disabled(): + if self.bbox_media_panel_images: + self.log_dict["Bounding Box Debugger/Images"] = self.bbox_media_panel_images wandb.log(self.log_dict) self.log_dict = {} + self.bbox_media_panel_images = [] if self.result_artifact: self.result_artifact.add(self.result_table, 'result') wandb.log_artifact(self.result_artifact, aliases=['latest', 'last', 'epoch ' + str(self.current_epoch), ('best' if best_result else '')]) - + wandb.log({"evaluation": self.result_table}) self.result_table = wandb.Table(["epoch", "id", "ground truth", "prediction", "avg_confidence"]) self.result_artifact = wandb.Artifact("run_" + wandb.run.id + "_progress", "evaluation") diff --git a/val.py b/val.py index fa5cb8f113e0..5a8486720577 100644 --- a/val.py +++ b/val.py @@ -25,7 +25,52 @@ box_iou, non_max_suppression, scale_coords, xyxy2xywh, xywh2xyxy, set_logging, increment_path, colorstr from utils.metrics import ap_per_class, ConfusionMatrix from utils.plots import plot_images, output_to_target, plot_study_txt -from utils.torch_utils import select_device, time_synchronized +from utils.torch_utils import select_device, time_sync + + +def save_one_txt(predn, save_conf, shape, file): + # Save one txt result + gn = torch.tensor(shape)[[1, 0, 1, 0]] # normalization gain whwh + for *xyxy, conf, cls in predn.tolist(): + xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh + line = (cls, *xywh, conf) if save_conf else (cls, *xywh) # label format + with open(file, 'a') as f: + f.write(('%g ' * len(line)).rstrip() % line + '\n') + + +def save_one_json(predn, jdict, path, class_map): + # Save one JSON result {"image_id": 42, "category_id": 18, "bbox": [258.15, 41.29, 348.26, 243.78], "score": 0.236} + image_id = int(path.stem) if path.stem.isnumeric() else path.stem + box = xyxy2xywh(predn[:, :4]) # xywh + box[:, :2] -= box[:, 2:] / 2 # xy center to top-left corner + for p, b in zip(predn.tolist(), box.tolist()): + jdict.append({'image_id': image_id, + 'category_id': class_map[int(p[5])], + 'bbox': [round(x, 3) for x in b], + 'score': round(p[4], 5)}) + + +def process_batch(predictions, labels, iouv): + # Evaluate 1 batch of predictions + correct = torch.zeros(predictions.shape[0], len(iouv), dtype=torch.bool, device=iouv.device) + detected = [] # label indices + tcls, pcls = labels[:, 0], predictions[:, 5] + nl = labels.shape[0] # number of labels + for cls in torch.unique(tcls): + ti = (cls == tcls).nonzero().view(-1) # label indices + pi = (cls == pcls).nonzero().view(-1) # prediction indices + if pi.shape[0]: # find detections + ious, i = box_iou(predictions[pi, 0:4], labels[ti, 1:5]).max(1) # best ious, indices + detected_set = set() + for j in (ious > iouv[0]).nonzero(): + d = ti[i[j]] # detected label + if d.item() not in detected_set: + detected_set.add(d.item()) + detected.append(d) # append detections + correct[pi[j]] = ious[j] > iouv # iou_thres is 1xn + if len(detected) == nl: # all labels already located in image + break + return correct @torch.no_grad() @@ -43,7 +88,7 @@ def run(data, save_txt=False, # save results to *.txt save_hybrid=False, # save label+prediction hybrid results to *.txt save_conf=False, # save confidences in --save-txt labels - save_json=False, # save a cocoapi-compatible JSON results file + save_json=False, # save a COCO-JSON results file project='runs/val', # save to project/name name='exp', # save to project/name exist_ok=False, # existing project/name ok, do not increment @@ -93,10 +138,6 @@ def run(data, iouv = torch.linspace(0.5, 0.95, 10).to(device) # iou vector for mAP@0.5:0.95 niou = iouv.numel() - # Logging - log_imgs = 0 - if wandb_logger and wandb_logger.wandb: - log_imgs = min(wandb_logger.log_imgs, 100) # Dataloader if not training: if device.type != 'cpu': @@ -108,24 +149,24 @@ def run(data, seen = 0 confusion_matrix = ConfusionMatrix(nc=nc) names = {k: v for k, v in enumerate(model.names if hasattr(model, 'names') else model.module.names)} - coco91class = coco80_to_coco91_class() + class_map = coco80_to_coco91_class() if is_coco else list(range(1000)) s = ('%20s' + '%11s' * 6) % ('Class', 'Images', 'Labels', 'P', 'R', 'mAP@.5', 'mAP@.5:.95') p, r, f1, mp, mr, map50, map, t0, t1, t2 = 0., 0., 0., 0., 0., 0., 0., 0., 0., 0. loss = torch.zeros(3, device=device) - jdict, stats, ap, ap_class, wandb_images = [], [], [], [], [] + jdict, stats, ap, ap_class = [], [], [], [] for batch_i, (img, targets, paths, shapes) in enumerate(tqdm(dataloader, desc=s)): - t_ = time_synchronized() + t_ = time_sync() img = img.to(device, non_blocking=True) img = img.half() if half else img.float() # uint8 to fp16/32 img /= 255.0 # 0 - 255 to 0.0 - 1.0 targets = targets.to(device) nb, _, height, width = img.shape # batch size, channels, height, width - t = time_synchronized() + t = time_sync() t0 += t - t_ # Run model out, train_out = model(img, augment=augment) # inference and training outputs - t1 += time_synchronized() - t + t1 += time_sync() - t # Compute loss if compute_loss: @@ -134,16 +175,16 @@ def run(data, # Run NMS targets[:, 2:] *= torch.Tensor([width, height, width, height]).to(device) # to pixels lb = [targets[targets[:, 0] == i, 1:] for i in range(nb)] if save_hybrid else [] # for autolabelling - t = time_synchronized() + t = time_sync() out = non_max_suppression(out, conf_thres, iou_thres, labels=lb, multi_label=True, agnostic=single_cls) - t2 += time_synchronized() - t + t2 += time_sync() - t # Statistics per image for si, pred in enumerate(out): labels = targets[targets[:, 0] == si, 1:] nl = len(labels) tcls = labels[:, 0].tolist() if nl else [] # target class - path = Path(paths[si]) + path, shape = Path(paths[si]), shapes[si][0] seen += 1 if len(pred) == 0: @@ -155,76 +196,27 @@ def run(data, if single_cls: pred[:, 5] = 0 predn = pred.clone() - scale_coords(img[si].shape[1:], predn[:, :4], shapes[si][0], shapes[si][1]) # native-space pred + scale_coords(img[si].shape[1:], predn[:, :4], shape, shapes[si][1]) # native-space pred - # Append to text file - if save_txt: - gn = torch.tensor(shapes[si][0])[[1, 0, 1, 0]] # normalization gain whwh - for *xyxy, conf, cls in predn.tolist(): - xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh - line = (cls, *xywh, conf) if save_conf else (cls, *xywh) # label format - with open(save_dir / 'labels' / (path.stem + '.txt'), 'a') as f: - f.write(('%g ' * len(line)).rstrip() % line + '\n') - - # W&B logging - Media Panel plots - if len(wandb_images) < log_imgs and wandb_logger.current_epoch > 0: # Check for test operation - if wandb_logger.current_epoch % wandb_logger.bbox_interval == 0: - box_data = [{"position": {"minX": xyxy[0], "minY": xyxy[1], "maxX": xyxy[2], "maxY": xyxy[3]}, - "class_id": int(cls), - "box_caption": "%s %.3f" % (names[cls], conf), - "scores": {"class_score": conf}, - "domain": "pixel"} for *xyxy, conf, cls in pred.tolist()] - boxes = {"predictions": {"box_data": box_data, "class_labels": names}} # inference-space - wandb_images.append(wandb_logger.wandb.Image(img[si], boxes=boxes, caption=path.name)) - wandb_logger.log_training_progress(predn, path, names) if wandb_logger and wandb_logger.wandb_run else None - - # Append to pycocotools JSON dictionary - if save_json: - # [{"image_id": 42, "category_id": 18, "bbox": [258.15, 41.29, 348.26, 243.78], "score": 0.236}, ... - image_id = int(path.stem) if path.stem.isnumeric() else path.stem - box = xyxy2xywh(predn[:, :4]) # xywh - box[:, :2] -= box[:, 2:] / 2 # xy center to top-left corner - for p, b in zip(pred.tolist(), box.tolist()): - jdict.append({'image_id': image_id, - 'category_id': coco91class[int(p[5])] if is_coco else int(p[5]), - 'bbox': [round(x, 3) for x in b], - 'score': round(p[4], 5)}) - - # Assign all predictions as incorrect - correct = torch.zeros(pred.shape[0], niou, dtype=torch.bool, device=device) + # Evaluate if nl: - detected = [] # target indices - tcls_tensor = labels[:, 0] - - # target boxes - tbox = xywh2xyxy(labels[:, 1:5]) - scale_coords(img[si].shape[1:], tbox, shapes[si][0], shapes[si][1]) # native-space labels + tbox = xywh2xyxy(labels[:, 1:5]) # target boxes + scale_coords(img[si].shape[1:], tbox, shape, shapes[si][1]) # native-space labels + labelsn = torch.cat((labels[:, 0:1], tbox), 1) # native-space labels + correct = process_batch(predn, labelsn, iouv) if plots: - confusion_matrix.process_batch(predn, torch.cat((labels[:, 0:1], tbox), 1)) - - # Per target class - for cls in torch.unique(tcls_tensor): - ti = (cls == tcls_tensor).nonzero(as_tuple=False).view(-1) # target indices - pi = (cls == pred[:, 5]).nonzero(as_tuple=False).view(-1) # prediction indices - - # Search for detections - if pi.shape[0]: - # Prediction to target ious - ious, i = box_iou(predn[pi, :4], tbox[ti]).max(1) # best ious, indices - - # Append detections - detected_set = set() - for j in (ious > iouv[0]).nonzero(as_tuple=False): - d = ti[i[j]] # detected target - if d.item() not in detected_set: - detected_set.add(d.item()) - detected.append(d) - correct[pi[j]] = ious[j] > iouv # iou_thres is 1xn - if len(detected) == nl: # all targets already located in image - break - - # Append statistics (correct, conf, pcls, tcls) - stats.append((correct.cpu(), pred[:, 4].cpu(), pred[:, 5].cpu(), tcls)) + confusion_matrix.process_batch(predn, labelsn) + else: + correct = torch.zeros(pred.shape[0], niou, dtype=torch.bool) + stats.append((correct.cpu(), pred[:, 4].cpu(), pred[:, 5].cpu(), tcls)) # (correct, conf, pcls, tcls) + + # Save/log + if save_txt: + save_one_txt(predn, save_conf, shape, file=save_dir / 'labels' / (path.stem + '.txt')) + if save_json: + save_one_json(predn, jdict, path, class_map) # append to COCO-JSON dictionary + if wandb_logger: + wandb_logger.val_one_image(pred, predn, path, names, img[si]) # Plot images if plots and batch_i < 3: @@ -264,15 +256,13 @@ def run(data, if wandb_logger and wandb_logger.wandb: val_batches = [wandb_logger.wandb.Image(str(f), caption=f.name) for f in sorted(save_dir.glob('val*.jpg'))] wandb_logger.log({"Validation": val_batches}) - if wandb_images: - wandb_logger.log({"Bounding Box Debugger/Images": wandb_images}) # Save JSON if save_json and len(jdict): w = Path(weights[0] if isinstance(weights, list) else weights).stem if weights is not None else '' # weights anno_json = str(Path(data.get('path', '../coco')) / 'annotations/instances_val2017.json') # annotations json pred_json = str(save_dir / f"{w}_predictions.json") # predictions json - print('\nEvaluating pycocotools mAP... saving %s...' % pred_json) + print(f'\nEvaluating pycocotools mAP... saving {pred_json}...') with open(pred_json, 'w') as f: json.dump(jdict, f) @@ -320,7 +310,7 @@ def parse_opt(): parser.add_argument('--save-txt', action='store_true', help='save results to *.txt') parser.add_argument('--save-hybrid', action='store_true', help='save label+prediction hybrid results to *.txt') parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels') - parser.add_argument('--save-json', action='store_true', help='save a cocoapi-compatible JSON results file') + parser.add_argument('--save-json', action='store_true', help='save a COCO-JSON results file') parser.add_argument('--project', default='runs/val', help='save to project/name') parser.add_argument('--name', default='exp', help='save to project/name') parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment') From b1be6850050959a09c3e26813646c52b2a73b1a0 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 19 Jul 2021 12:41:15 +0200 Subject: [PATCH 0415/1976] Module `super().__init__()` (#4065) * Module `super().__init__()` * remove NMS --- models/common.py | 42 ++++++++++++++++++------------------------ models/experimental.py | 12 ++++++------ models/yolo.py | 20 +++----------------- 3 files changed, 27 insertions(+), 47 deletions(-) diff --git a/models/common.py b/models/common.py index 4db90b54663e..901648b693a3 100644 --- a/models/common.py +++ b/models/common.py @@ -36,7 +36,7 @@ def DWConv(c1, c2, k=1, s=1, act=True): class Conv(nn.Module): # Standard convolution def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True): # ch_in, ch_out, kernel, stride, padding, groups - super(Conv, self).__init__() + super().__init__() self.conv = nn.Conv2d(c1, c2, k, s, autopad(k, p), groups=g, bias=False) self.bn = nn.BatchNorm2d(c2) self.act = nn.SiLU() if act is True else (act if isinstance(act, nn.Module) else nn.Identity()) @@ -87,7 +87,7 @@ def forward(self, x): class Bottleneck(nn.Module): # Standard bottleneck def __init__(self, c1, c2, shortcut=True, g=1, e=0.5): # ch_in, ch_out, shortcut, groups, expansion - super(Bottleneck, self).__init__() + super().__init__() c_ = int(c2 * e) # hidden channels self.cv1 = Conv(c1, c_, 1, 1) self.cv2 = Conv(c_, c2, 3, 1, g=g) @@ -100,7 +100,7 @@ def forward(self, x): class BottleneckCSP(nn.Module): # CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion - super(BottleneckCSP, self).__init__() + super().__init__() c_ = int(c2 * e) # hidden channels self.cv1 = Conv(c1, c_, 1, 1) self.cv2 = nn.Conv2d(c1, c_, 1, 1, bias=False) @@ -119,7 +119,7 @@ def forward(self, x): class C3(nn.Module): # CSP Bottleneck with 3 convolutions def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion - super(C3, self).__init__() + super().__init__() c_ = int(c2 * e) # hidden channels self.cv1 = Conv(c1, c_, 1, 1) self.cv2 = Conv(c1, c_, 1, 1) @@ -139,10 +139,18 @@ def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): self.m = TransformerBlock(c_, c_, 4, n) +class C3SPP(C3): + # C3 module with SPP() + def __init__(self, c1, c2, k=(5, 9, 13), n=1, shortcut=True, g=1, e=0.5): + super().__init__(c1, c2, n, shortcut, g, e) + c_ = int(c2 * e) + self.m = SPP(c_, c_, k) + + class SPP(nn.Module): # Spatial pyramid pooling layer used in YOLOv3-SPP def __init__(self, c1, c2, k=(5, 9, 13)): - super(SPP, self).__init__() + super().__init__() c_ = c1 // 2 # hidden channels self.cv1 = Conv(c1, c_, 1, 1) self.cv2 = Conv(c_ * (len(k) + 1), c2, 1, 1) @@ -156,7 +164,7 @@ def forward(self, x): class Focus(nn.Module): # Focus wh information into c-space def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True): # ch_in, ch_out, kernel, stride, padding, groups - super(Focus, self).__init__() + super().__init__() self.conv = Conv(c1 * 4, c2, k, s, p, g, act) # self.contract = Contract(gain=2) @@ -196,27 +204,13 @@ def forward(self, x): class Concat(nn.Module): # Concatenate a list of tensors along dimension def __init__(self, dimension=1): - super(Concat, self).__init__() + super().__init__() self.d = dimension def forward(self, x): return torch.cat(x, self.d) -class NMS(nn.Module): - # Non-Maximum Suppression (NMS) module - conf = 0.25 # confidence threshold - iou = 0.45 # IoU threshold - classes = None # (optional list) filter by class - max_det = 1000 # maximum number of detections per image - - def __init__(self): - super(NMS, self).__init__() - - def forward(self, x): - return non_max_suppression(x[0], self.conf, iou_thres=self.iou, classes=self.classes, max_det=self.max_det) - - class AutoShape(nn.Module): # YOLOv5 input-robust model wrapper for passing cv2/np/PIL/torch inputs. Includes preprocessing, inference and NMS conf = 0.25 # NMS confidence threshold @@ -225,7 +219,7 @@ class AutoShape(nn.Module): max_det = 1000 # maximum number of detections per image def __init__(self, model): - super(AutoShape, self).__init__() + super().__init__() self.model = model.eval() def autoshape(self): @@ -292,7 +286,7 @@ def forward(self, imgs, size=640, augment=False, profile=False): class Detections: # YOLOv5 detections class for inference results def __init__(self, imgs, pred, files, times=None, names=None, shape=None): - super(Detections, self).__init__() + super().__init__() d = pred[0].device # device gn = [torch.tensor([*[im.shape[i] for i in [1, 0, 1, 0]], 1., 1.], device=d) for im in imgs] # normalizations self.imgs = imgs # list of images as numpy arrays @@ -383,7 +377,7 @@ def __len__(self): class Classify(nn.Module): # Classification head, i.e. x(b,c1,20,20) to x(b,c2) def __init__(self, c1, c2, k=1, s=1, p=None, g=1): # ch_in, ch_out, kernel, stride, padding, groups - super(Classify, self).__init__() + super().__init__() self.aap = nn.AdaptiveAvgPool2d(1) # to x(b,c1,1,1) self.conv = nn.Conv2d(c1, c2, k, s, autopad(k, p), groups=g) # to x(b,c2,1,1) self.flat = nn.Flatten() diff --git a/models/experimental.py b/models/experimental.py index 30dc36192bc0..0d996d913b0c 100644 --- a/models/experimental.py +++ b/models/experimental.py @@ -12,7 +12,7 @@ class CrossConv(nn.Module): # Cross Convolution Downsample def __init__(self, c1, c2, k=3, s=1, g=1, e=1.0, shortcut=False): # ch_in, ch_out, kernel, stride, groups, expansion, shortcut - super(CrossConv, self).__init__() + super().__init__() c_ = int(c2 * e) # hidden channels self.cv1 = Conv(c1, c_, (1, k), (1, s)) self.cv2 = Conv(c_, c2, (k, 1), (s, 1), g=g) @@ -25,7 +25,7 @@ def forward(self, x): class Sum(nn.Module): # Weighted sum of 2 or more layers https://arxiv.org/abs/1911.09070 def __init__(self, n, weight=False): # n: number of inputs - super(Sum, self).__init__() + super().__init__() self.weight = weight # apply weights boolean self.iter = range(n - 1) # iter object if weight: @@ -46,7 +46,7 @@ def forward(self, x): class GhostConv(nn.Module): # Ghost Convolution https://github.com/huawei-noah/ghostnet def __init__(self, c1, c2, k=1, s=1, g=1, act=True): # ch_in, ch_out, kernel, stride, groups - super(GhostConv, self).__init__() + super().__init__() c_ = c2 // 2 # hidden channels self.cv1 = Conv(c1, c_, k, s, None, g, act) self.cv2 = Conv(c_, c_, 5, 1, None, c_, act) @@ -59,7 +59,7 @@ def forward(self, x): class GhostBottleneck(nn.Module): # Ghost Bottleneck https://github.com/huawei-noah/ghostnet def __init__(self, c1, c2, k=3, s=1): # ch_in, ch_out, kernel, stride - super(GhostBottleneck, self).__init__() + super().__init__() c_ = c2 // 2 self.conv = nn.Sequential(GhostConv(c1, c_, 1, 1), # pw DWConv(c_, c_, k, s, act=False) if s == 2 else nn.Identity(), # dw @@ -74,7 +74,7 @@ def forward(self, x): class MixConv2d(nn.Module): # Mixed Depthwise Conv https://arxiv.org/abs/1907.09595 def __init__(self, c1, c2, k=(1, 3), s=1, equal_ch=True): - super(MixConv2d, self).__init__() + super().__init__() groups = len(k) if equal_ch: # equal c_ per group i = torch.linspace(0, groups - 1E-6, c2).floor() # c2 indices @@ -98,7 +98,7 @@ def forward(self, x): class Ensemble(nn.ModuleList): # Ensemble of models def __init__(self): - super(Ensemble, self).__init__() + super().__init__() def forward(self, x, augment=False, profile=False, visualize=False): y = [] diff --git a/models/yolo.py b/models/yolo.py index 3a3af9b5fbde..2e7a20f813e2 100644 --- a/models/yolo.py +++ b/models/yolo.py @@ -33,7 +33,7 @@ class Detect(nn.Module): onnx_dynamic = False # ONNX export parameter def __init__(self, nc=80, anchors=(), ch=(), inplace=True): # detection layer - super(Detect, self).__init__() + super().__init__() self.nc = nc # number of classes self.no = nc + 5 # number of outputs per anchor self.nl = len(anchors) # number of detection layers @@ -77,7 +77,7 @@ def _make_grid(nx=20, ny=20): class Model(nn.Module): def __init__(self, cfg='yolov5s.yaml', ch=3, nc=None, anchors=None): # model, input channels, number of classes - super(Model, self).__init__() + super().__init__() if isinstance(cfg, dict): self.yaml = cfg # model dict else: # is *.yaml @@ -209,20 +209,6 @@ def fuse(self): # fuse model Conv2d() + BatchNorm2d() layers self.info() return self - def nms(self, mode=True): # add or remove NMS module - present = type(self.model[-1]) is NMS # last layer is NMS - if mode and not present: - LOGGER.info('Adding NMS... ') - m = NMS() # module - m.f = -1 # from - m.i = self.model[-1].i + 1 # index - self.model.add_module(name='%s' % m.i, module=m) # add - self.eval() - elif not mode and present: - LOGGER.info('Removing NMS... ') - self.model = self.model[:-1] # remove - return self - def autoshape(self): # add AutoShape module LOGGER.info('Adding AutoShape... ') m = AutoShape(self) # wrap model @@ -250,7 +236,7 @@ def parse_model(d, ch): # model_dict, input_channels(3) n = max(round(n * gd), 1) if n > 1 else n # depth gain if m in [Conv, GhostConv, Bottleneck, GhostBottleneck, SPP, DWConv, MixConv2d, Focus, CrossConv, BottleneckCSP, - C3, C3TR]: + C3, C3TR, C3SPP]: c1, c2 = ch[f], args[0] if c2 != no: # if not output c2 = make_divisible(c2 * gw, 8) From c8a98cb7cbbf5d05abb9b134ada0c75d0dc62a6c Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 19 Jul 2021 13:10:21 +0200 Subject: [PATCH 0416/1976] Missing `nc` and `names` handling in check_dataset() (#4066) --- utils/general.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/utils/general.py b/utils/general.py index 846c1464c28c..08a3ff6539b2 100755 --- a/utils/general.py +++ b/utils/general.py @@ -231,6 +231,9 @@ def check_dataset(data, autodownload=True): if data.get(k): # prepend path data[k] = str(path / data[k]) if isinstance(data[k], str) else [str(path / x) for x in data[k]] + assert 'nc' in data, "Dataset 'nc' key missing." + if 'names' not in data: + data['names'] = [str(i) for i in range(data['nc'])] # assign class names if missing train, val, test, s = [data.get(x) for x in ('train', 'val', 'test', 'download')] if val: val = [Path(x).resolve() for x in (val if isinstance(val, list) else [val])] # val path From ee76a68f1d9a4d2d4ff995bda99ee2748fa49fe6 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 19 Jul 2021 13:14:09 +0200 Subject: [PATCH 0417/1976] Created using Colaboratory --- tutorial.ipynb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tutorial.ipynb b/tutorial.ipynb index 957c0e140f88..f316dc5f550a 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -1166,7 +1166,7 @@ "model = torch.hub.load('ultralytics/yolov5', 'yolov5s')\n", "\n", "# Images\n", - "dir = 'https://github.com/ultralytics/yolov5/raw/master/data/images/'\n", + "dir = 'https://ultralytics.com/images/'\n", "imgs = [dir + f for f in ('zidane.jpg', 'bus.jpg')] # batch of images\n", "\n", "# Inference\n", From 7fdcc77bf408a11357be4d6e9be65e4bb85e6a1c Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 19 Jul 2021 13:23:19 +0200 Subject: [PATCH 0418/1976] Albumentations >= 1.0.3 (#4068) --- requirements.txt | 2 +- utils/augmentations.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/requirements.txt b/requirements.txt index 886d21ce8047..f1629eafc65a 100755 --- a/requirements.txt +++ b/requirements.txt @@ -27,5 +27,5 @@ pandas # extras -------------------------------------- # Cython # for pycocotools https://github.com/cocodataset/cocoapi/issues/172 # pycocotools>=2.0 # COCO mAP -# albumentations>=1.0.2 +# albumentations>=1.0.3 thop # FLOPs computation diff --git a/utils/augmentations.py b/utils/augmentations.py index 69b835db0db9..cf64f2f9db1f 100644 --- a/utils/augmentations.py +++ b/utils/augmentations.py @@ -17,7 +17,7 @@ def __init__(self): self.transform = None try: import albumentations as A - check_version(A.__version__, '1.0.2') # version requirement + check_version(A.__version__, '1.0.3') # version requirement self.transform = A.Compose([ A.Blur(p=0.1), From 0cc7c587870f31f0fc175a74048ceca616870aea Mon Sep 17 00:00:00 2001 From: Ayush Chaurasia Date: Mon, 19 Jul 2021 17:27:13 +0530 Subject: [PATCH 0419/1976] W&B: fix refactor bugs (#4069) --- utils/wandb_logging/wandb_utils.py | 8 ++++---- val.py | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/utils/wandb_logging/wandb_utils.py b/utils/wandb_logging/wandb_utils.py index a7e84ca100e4..03f2d151bdc3 100644 --- a/utils/wandb_logging/wandb_utils.py +++ b/utils/wandb_logging/wandb_utils.py @@ -106,6 +106,7 @@ def __init__(self, opt, name, run_id, data_dict, job_type='Training'): self.data_dict = data_dict self.bbox_media_panel_images = [] self.val_table_path_map = None + self.max_imgs_to_log = 16 # It's more elegant to stick to 1 wandb.init call, but useful config data is overwritten in the WandbLogger's wandb.init call if isinstance(opt.resume, str): # checks resume from artifact if opt.resume.startswith(WANDB_ARTIFACT_PREFIX): @@ -133,7 +134,7 @@ def __init__(self, opt, name, run_id, data_dict, job_type='Training'): if not opt.resume: wandb_data_dict = self.check_and_upload_dataset(opt) if opt.upload_dataset else data_dict # Info useful for resuming from artifacts - self.wandb_run.config.update({'opt': vars(opt), 'data_dict': data_dict}, allow_val_change=True) + self.wandb_run.config.update({'opt': vars(opt), 'data_dict': wandb_data_dict}, allow_val_change=True) self.data_dict = self.setup_training(opt, data_dict) if self.job_type == 'Dataset Creation': self.data_dict = self.check_and_upload_dataset(opt) @@ -152,7 +153,7 @@ def check_and_upload_dataset(self, opt): return wandb_data_dict def setup_training(self, opt, data_dict): - self.log_dict, self.current_epoch, self.log_imgs = {}, 0, 16 # Logging Constants + self.log_dict, self.current_epoch = {}, 0 self.bbox_interval = opt.bbox_interval if isinstance(opt.resume, str): modeldir, _ = self.download_model_artifact(opt) @@ -317,8 +318,7 @@ def val_one_image(self, pred, predn, path, names, im): if self.val_table and self.result_table: # Log Table if Val dataset is uploaded as artifact self.log_training_progress(predn, path, names) else: # Default to bbox media panelif Val artifact not found - log_imgs = min(self.log_imgs, 100) - if len(self.bbox_media_panel_images) < log_imgs and self.current_epoch > 0: + if len(self.bbox_media_panel_images) < self.max_imgs_to_log and self.current_epoch > 0: if self.current_epoch % self.bbox_interval == 0: box_data = [{"position": {"minX": xyxy[0], "minY": xyxy[1], "maxX": xyxy[2], "maxY": xyxy[3]}, "class_id": int(cls), diff --git a/val.py b/val.py index 5a8486720577..e493dfe66ae8 100644 --- a/val.py +++ b/val.py @@ -215,7 +215,7 @@ def run(data, save_one_txt(predn, save_conf, shape, file=save_dir / 'labels' / (path.stem + '.txt')) if save_json: save_one_json(predn, jdict, path, class_map) # append to COCO-JSON dictionary - if wandb_logger: + if wandb_logger and wandb_logger.wandb_run: wandb_logger.val_one_image(pred, predn, path, names, img[si]) # Plot images From 442a7abdf263bb24c51e494b4fd41d81cb097943 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 20 Jul 2021 13:21:52 +0200 Subject: [PATCH 0420/1976] Refactor `export.py` (#4080) * Refactor `export.py` * cleanup * Update check_requirements() * Update export.py --- export.py | 148 +++++++++++++++++++++++++++++------------------------- 1 file changed, 80 insertions(+), 68 deletions(-) diff --git a/export.py b/export.py index b7ff0748ba93..34cd21449bc0 100644 --- a/export.py +++ b/export.py @@ -24,6 +24,78 @@ from utils.torch_utils import select_device +def export_torchscript(model, img, file, optimize): + # TorchScript model export + prefix = colorstr('TorchScript:') + try: + print(f'\n{prefix} starting export with torch {torch.__version__}...') + f = file.with_suffix('.torchscript.pt') + ts = torch.jit.trace(model, img, strict=False) + (optimize_for_mobile(ts) if optimize else ts).save(f) + print(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') + return ts + except Exception as e: + print(f'{prefix} export failure: {e}') + + +def export_onnx(model, img, file, opset_version, train, dynamic, simplify): + # ONNX model export + prefix = colorstr('ONNX:') + try: + check_requirements(('onnx', 'onnx-simplifier')) + import onnx + + print(f'{prefix} starting export with onnx {onnx.__version__}...') + f = file.with_suffix('.onnx') + torch.onnx.export(model, img, f, verbose=False, opset_version=opset_version, + training=torch.onnx.TrainingMode.TRAINING if train else torch.onnx.TrainingMode.EVAL, + do_constant_folding=not train, + input_names=['images'], + output_names=['output'], + dynamic_axes={'images': {0: 'batch', 2: 'height', 3: 'width'}, # shape(1,3,640,640) + 'output': {0: 'batch', 1: 'anchors'} # shape(1,25200,85) + } if dynamic else None) + + # Checks + model_onnx = onnx.load(f) # load onnx model + onnx.checker.check_model(model_onnx) # check onnx model + # print(onnx.helper.printable_graph(model_onnx.graph)) # print + + # Simplify + if simplify: + try: + import onnxsim + + print(f'{prefix} simplifying with onnx-simplifier {onnxsim.__version__}...') + model_onnx, check = onnxsim.simplify( + model_onnx, + dynamic_input_shape=dynamic, + input_shapes={'images': list(img.shape)} if dynamic else None) + assert check, 'assert check failed' + onnx.save(model_onnx, f) + except Exception as e: + print(f'{prefix} simplifier failure: {e}') + print(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') + except Exception as e: + print(f'{prefix} export failure: {e}') + + +def export_coreml(ts_model, img, file, train): + # CoreML model export + prefix = colorstr('CoreML:') + try: + import coremltools as ct + + print(f'{prefix} starting export with coremltools {ct.__version__}...') + f = file.with_suffix('.mlmodel') + assert train, 'CoreML exports should be placed in model.train() mode with `python export.py --train`' + model = ct.convert(ts_model, inputs=[ct.ImageType('image', shape=img.shape, scale=1 / 255.0, bias=[0, 0, 0])]) + model.save(f) + print(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') + except Exception as e: + print(f'{prefix} export failure: {e}') + + def run(weights='./yolov5s.pt', # weights path img_size=(640, 640), # image (height, width) batch_size=1, # batch size @@ -40,12 +112,13 @@ def run(weights='./yolov5s.pt', # weights path t = time.time() include = [x.lower() for x in include] img_size *= 2 if len(img_size) == 1 else 1 # expand + file = Path(weights) # Load PyTorch model device = select_device(device) assert not (device.type == 'cpu' and half), '--half only compatible with GPU export, i.e. use --device 0' model = attempt_load(weights, map_location=device) # load FP32 model - labels = model.names + names = model.names # Input gs = int(max(model.stride)) # grid size (max stride) @@ -57,7 +130,6 @@ def run(weights='./yolov5s.pt', # weights path img, model = img.half(), model.half() # to FP16 model.train() if train else model.eval() # training mode = no Detect() layer grid construction for k, m in model.named_modules(): - m._non_persistent_buffers_set = set() # pytorch 1.6.0 compatibility if isinstance(m, Conv): # assign export-friendly activations if isinstance(m.act, nn.Hardswish): m.act = Hardswish() @@ -72,73 +144,13 @@ def run(weights='./yolov5s.pt', # weights path y = model(img) # dry runs print(f"\n{colorstr('PyTorch:')} starting from {weights} ({file_size(weights):.1f} MB)") - # TorchScript export ----------------------------------------------------------------------------------------------- - if 'torchscript' in include or 'coreml' in include: - prefix = colorstr('TorchScript:') - try: - print(f'\n{prefix} starting export with torch {torch.__version__}...') - f = weights.replace('.pt', '.torchscript.pt') # filename - ts = torch.jit.trace(model, img, strict=False) - (optimize_for_mobile(ts) if optimize else ts).save(f) - print(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') - except Exception as e: - print(f'{prefix} export failure: {e}') - - # ONNX export ------------------------------------------------------------------------------------------------------ + # Exports if 'onnx' in include: - prefix = colorstr('ONNX:') - try: - import onnx - - print(f'{prefix} starting export with onnx {onnx.__version__}...') - f = weights.replace('.pt', '.onnx') # filename - torch.onnx.export(model, img, f, verbose=False, opset_version=opset_version, - training=torch.onnx.TrainingMode.TRAINING if train else torch.onnx.TrainingMode.EVAL, - do_constant_folding=not train, - input_names=['images'], - output_names=['output'], - dynamic_axes={'images': {0: 'batch', 2: 'height', 3: 'width'}, # shape(1,3,640,640) - 'output': {0: 'batch', 1: 'anchors'} # shape(1,25200,85) - } if dynamic else None) - - # Checks - model_onnx = onnx.load(f) # load onnx model - onnx.checker.check_model(model_onnx) # check onnx model - # print(onnx.helper.printable_graph(model_onnx.graph)) # print - - # Simplify - if simplify: - try: - check_requirements(['onnx-simplifier']) - import onnxsim - - print(f'{prefix} simplifying with onnx-simplifier {onnxsim.__version__}...') - model_onnx, check = onnxsim.simplify( - model_onnx, - dynamic_input_shape=dynamic, - input_shapes={'images': list(img.shape)} if dynamic else None) - assert check, 'assert check failed' - onnx.save(model_onnx, f) - except Exception as e: - print(f'{prefix} simplifier failure: {e}') - print(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') - except Exception as e: - print(f'{prefix} export failure: {e}') - - # CoreML export ---------------------------------------------------------------------------------------------------- - if 'coreml' in include: - prefix = colorstr('CoreML:') - try: - import coremltools as ct - - print(f'{prefix} starting export with coremltools {ct.__version__}...') - assert train, 'CoreML exports should be placed in model.train() mode with `python export.py --train`' - model = ct.convert(ts, inputs=[ct.ImageType('image', shape=img.shape, scale=1 / 255.0, bias=[0, 0, 0])]) - f = weights.replace('.pt', '.mlmodel') # filename - model.save(f) - print(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') - except Exception as e: - print(f'{prefix} export failure: {e}') + export_onnx(model, img, file, opset_version, train, dynamic, simplify) + if 'torchscript' in include or 'coreml' in include: + ts = export_torchscript(model, img, file, optimize) + if 'coreml' in include: + export_coreml(ts, img, file, train) # Finish print(f'\nExport complete ({time.time() - t:.2f}s). Visualize with https://github.com/lutzroeder/netron.') From 3bef77f5cb7eda3fa3cae53f2579cd3363c99744 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 20 Jul 2021 18:42:27 +0200 Subject: [PATCH 0421/1976] Addition refactor `export.py` (#4089) * Addition refactor `export.py` * Update export.py --- export.py | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/export.py b/export.py index 34cd21449bc0..1e3a5ed6a4af 100644 --- a/export.py +++ b/export.py @@ -45,7 +45,7 @@ def export_onnx(model, img, file, opset_version, train, dynamic, simplify): check_requirements(('onnx', 'onnx-simplifier')) import onnx - print(f'{prefix} starting export with onnx {onnx.__version__}...') + print(f'\n{prefix} starting export with onnx {onnx.__version__}...') f = file.with_suffix('.onnx') torch.onnx.export(model, img, f, verbose=False, opset_version=opset_version, training=torch.onnx.TrainingMode.TRAINING if train else torch.onnx.TrainingMode.EVAL, @@ -80,16 +80,17 @@ def export_onnx(model, img, file, opset_version, train, dynamic, simplify): print(f'{prefix} export failure: {e}') -def export_coreml(ts_model, img, file, train): +def export_coreml(model, img, file): # CoreML model export prefix = colorstr('CoreML:') try: import coremltools as ct - print(f'{prefix} starting export with coremltools {ct.__version__}...') + print(f'\n{prefix} starting export with coremltools {ct.__version__}...') f = file.with_suffix('.mlmodel') - assert train, 'CoreML exports should be placed in model.train() mode with `python export.py --train`' - model = ct.convert(ts_model, inputs=[ct.ImageType('image', shape=img.shape, scale=1 / 255.0, bias=[0, 0, 0])]) + model.train() # CoreML exports should be placed in model.train() mode + ts = torch.jit.trace(model, img, strict=False) # TorchScript model + model = ct.convert(ts, inputs=[ct.ImageType('image', shape=img.shape, scale=1 / 255.0, bias=[0, 0, 0])]) model.save(f) print(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') except Exception as e: @@ -145,12 +146,12 @@ def run(weights='./yolov5s.pt', # weights path print(f"\n{colorstr('PyTorch:')} starting from {weights} ({file_size(weights):.1f} MB)") # Exports + if 'torchscript' in include: + export_torchscript(model, img, file, optimize) if 'onnx' in include: export_onnx(model, img, file, opset_version, train, dynamic, simplify) - if 'torchscript' in include or 'coreml' in include: - ts = export_torchscript(model, img, file, optimize) - if 'coreml' in include: - export_coreml(ts, img, file, train) + if 'coreml' in include: + export_coreml(model, img, file) # Finish print(f'\nExport complete ({time.time() - t:.2f}s). Visualize with https://github.com/lutzroeder/netron.') From 2c073cd207bae1163b472c561d3fd31b1d2ba870 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 21 Jul 2021 16:50:47 +0200 Subject: [PATCH 0422/1976] Add train.py ``--img-size` floor (#4099) --- train.py | 2 +- utils/general.py | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/train.py b/train.py index b1afaf8ada75..9a844ebac0de 100644 --- a/train.py +++ b/train.py @@ -207,7 +207,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary # Image sizes gs = max(int(model.stride.max()), 32) # grid size (max stride) nl = model.model[-1].nl # number of detection layers (used for scaling hyp['obj']) - imgsz = check_img_size(opt.imgsz, gs) # verify imgsz is gs-multiple + imgsz = check_img_size(opt.imgsz, gs, floor=gs * 2) # verify imgsz is gs-multiple # DP mode if cuda and RANK == -1 and torch.cuda.device_count() > 1: diff --git a/utils/general.py b/utils/general.py index 08a3ff6539b2..fabd0f35fe9e 100755 --- a/utils/general.py +++ b/utils/general.py @@ -181,11 +181,11 @@ def check_requirements(requirements='requirements.txt', exclude=()): print(emojis(s)) # emoji-safe -def check_img_size(img_size, s=32): +def check_img_size(img_size, s=32, floor=0): # Verify img_size is a multiple of stride s - new_size = make_divisible(img_size, int(s)) # ceil gs-multiple + new_size = max(make_divisible(img_size, int(s)), floor) # ceil gs-multiple if new_size != img_size: - print('WARNING: --img-size %g must be multiple of max stride %g, updating to %g' % (img_size, s, new_size)) + print(f'WARNING: --img-size {img_size} must be multiple of max stride {s}, updating to {new_size}') return new_size From 4bad9147611238f31a66ba5414b35e8ca604ea37 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 22 Jul 2021 17:22:11 +0200 Subject: [PATCH 0423/1976] Update resume.py (#4115) --- utils/aws/resume.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/aws/resume.py b/utils/aws/resume.py index 4b0d4246b594..e869834e96e7 100644 --- a/utils/aws/resume.py +++ b/utils/aws/resume.py @@ -28,7 +28,7 @@ if ddp: # multi-GPU port += 1 - cmd = f'python -m torch.distributed.launch --nproc_per_node {nd} --master_port {port} train.py --resume {last}' + cmd = f'python -m torch.distributed.run --nproc_per_node {nd} --master_port {port} train.py --resume {last}' else: # single-GPU cmd = f'python train.py --resume {last}' From 4495e00016cb18b35011bf99da1beb4eb639186b Mon Sep 17 00:00:00 2001 From: imyhxy Date: Fri, 23 Jul 2021 20:55:00 +0800 Subject: [PATCH 0424/1976] Fix indentation in `log_training_progress()` (#4126) --- utils/wandb_logging/wandb_utils.py | 40 +++++++++++++++--------------- 1 file changed, 20 insertions(+), 20 deletions(-) diff --git a/utils/wandb_logging/wandb_utils.py b/utils/wandb_logging/wandb_utils.py index 03f2d151bdc3..4986e01afe36 100644 --- a/utils/wandb_logging/wandb_utils.py +++ b/utils/wandb_logging/wandb_utils.py @@ -293,26 +293,26 @@ def create_dataset_table(self, dataset, class_to_id, name='dataset'): return artifact def log_training_progress(self, predn, path, names): - class_set = wandb.Classes([{'id': id, 'name': name} for id, name in names.items()]) - box_data = [] - total_conf = 0 - for *xyxy, conf, cls in predn.tolist(): - if conf >= 0.25: - box_data.append( - {"position": {"minX": xyxy[0], "minY": xyxy[1], "maxX": xyxy[2], "maxY": xyxy[3]}, - "class_id": int(cls), - "box_caption": "%s %.3f" % (names[cls], conf), - "scores": {"class_score": conf}, - "domain": "pixel"}) - total_conf = total_conf + conf - boxes = {"predictions": {"box_data": box_data, "class_labels": names}} # inference-space - id = self.val_table_path_map[Path(path).name] - self.result_table.add_data(self.current_epoch, - id, - self.val_table.data[id][1], - wandb.Image(self.val_table.data[id][1], boxes=boxes, classes=class_set), - total_conf / max(1, len(box_data)) - ) + class_set = wandb.Classes([{'id': id, 'name': name} for id, name in names.items()]) + box_data = [] + total_conf = 0 + for *xyxy, conf, cls in predn.tolist(): + if conf >= 0.25: + box_data.append( + {"position": {"minX": xyxy[0], "minY": xyxy[1], "maxX": xyxy[2], "maxY": xyxy[3]}, + "class_id": int(cls), + "box_caption": "%s %.3f" % (names[cls], conf), + "scores": {"class_score": conf}, + "domain": "pixel"}) + total_conf = total_conf + conf + boxes = {"predictions": {"box_data": box_data, "class_labels": names}} # inference-space + id = self.val_table_path_map[Path(path).name] + self.result_table.add_data(self.current_epoch, + id, + self.val_table.data[id][1], + wandb.Image(self.val_table.data[id][1], boxes=boxes, classes=class_set), + total_conf / max(1, len(box_data)) + ) def val_one_image(self, pred, predn, path, names, im): if self.val_table and self.result_table: # Log Table if Val dataset is uploaded as artifact From 39ef6c7a801eb666dbea5b36c8223517b84d9b81 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 24 Jul 2021 12:36:07 +0200 Subject: [PATCH 0425/1976] Update README.md (#4134) --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 7dff1a0efd33..c27fbc6fa639 100755 --- a/README.md +++ b/README.md @@ -224,7 +224,7 @@ We are super excited about our first-ever Ultralytics YOLOv5 🚀 EXPORT Competi * APtest denotes COCO [test-dev2017](http://cocodataset.org/#upload) server results, all other AP results denote val2017 accuracy. * AP values are for single-model single-scale unless otherwise noted. **Reproduce mAP** by `python val.py --data coco.yaml --img 640 --conf 0.001 --iou 0.65` - * SpeedGPU averaged over 5000 COCO val2017 images using a GCP [n1-standard-16](https://cloud.google.com/compute/docs/machine-types#n1_standard_machine_types) V100 instance, and includes FP16 inference, postprocessing and NMS. **Reproduce speed** by `python val.py --data coco.yaml --img 640 --conf 0.25 --iou 0.45` + * SpeedGPU averaged over 5000 COCO val2017 images using a GCP [n1-standard-16](https://cloud.google.com/compute/docs/machine-types#n1_standard_machine_types) V100 instance, and includes FP16 inference, postprocessing and NMS. **Reproduce speed** by `python val.py --data coco.yaml --img 640 --conf 0.25 --iou 0.45 --half` * All checkpoints are trained to 300 epochs with default settings and hyperparameters (no autoaugmentation). * Test Time Augmentation ([TTA](https://github.com/ultralytics/yolov5/issues/303)) includes reflection and scale augmentation. **Reproduce TTA** by `python val.py --data coco.yaml --img 1536 --iou 0.7 --augment`
From 2e538443b721a8fa1bca2c51b59f5400fdd38bec Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 24 Jul 2021 13:08:51 +0200 Subject: [PATCH 0426/1976] ONNX inference update (#4073) --- detect.py | 54 ++++++++++++++++++++++++++++++++---------------------- 1 file changed, 32 insertions(+), 22 deletions(-) diff --git a/detect.py b/detect.py index 73f962398442..80517f342a41 100644 --- a/detect.py +++ b/detect.py @@ -64,18 +64,23 @@ def run(weights='yolov5s.pt', # model.pt path(s) half &= device.type != 'cpu' # half precision only supported on CUDA # Load model - model = attempt_load(weights, map_location=device) # load FP32 model - stride = int(model.stride.max()) # model stride + w = weights[0] if isinstance(weights, list) else weights + classify, pt, onnx = False, w.endswith('.pt'), w.endswith('.onnx') # inference type + stride, names = 64, [f'class{i}' for i in range(1000)] # assign defaults + if pt: + model = attempt_load(weights, map_location=device) # load FP32 model + stride = int(model.stride.max()) # model stride + names = model.module.names if hasattr(model, 'module') else model.names # get class names + if half: + model.half() # to FP16 + if classify: # second-stage classifier + modelc = load_classifier(name='resnet50', n=2) # initialize + modelc.load_state_dict(torch.load('resnet50.pt', map_location=device)['model']).to(device).eval() + elif onnx: + check_requirements(('onnx', 'onnxruntime')) + import onnxruntime + session = onnxruntime.InferenceSession(w, None) imgsz = check_img_size(imgsz, s=stride) # check image size - names = model.module.names if hasattr(model, 'module') else model.names # get class names - if half: - model.half() # to FP16 - - # Second-stage classifier - classify = False - if classify: - modelc = load_classifier(name='resnet50', n=2) # initialize - modelc.load_state_dict(torch.load('resnet50.pt', map_location=device)['model']).to(device).eval() # Dataloader if webcam: @@ -89,31 +94,36 @@ def run(weights='yolov5s.pt', # model.pt path(s) vid_path, vid_writer = [None] * bs, [None] * bs # Run inference - if device.type != 'cpu': + if pt and device.type != 'cpu': model(torch.zeros(1, 3, imgsz, imgsz).to(device).type_as(next(model.parameters()))) # run once t0 = time.time() for path, img, im0s, vid_cap in dataset: - img = torch.from_numpy(img).to(device) - img = img.half() if half else img.float() # uint8 to fp16/32 + if pt: + img = torch.from_numpy(img).to(device) + img = img.half() if half else img.float() # uint8 to fp16/32 + elif onnx: + img = img.astype('float32') img /= 255.0 # 0 - 255 to 0.0 - 1.0 - if img.ndimension() == 3: - img = img.unsqueeze(0) + if len(img.shape) == 3: + img = img[None] # expand for batch dim # Inference t1 = time_sync() - pred = model(img, - augment=augment, - visualize=increment_path(save_dir / Path(path).stem, mkdir=True) if visualize else False)[0] + if pt: + visualize = increment_path(save_dir / Path(path).stem, mkdir=True) if visualize else False + pred = model(img, augment=augment, visualize=visualize)[0] + elif onnx: + pred = torch.tensor(session.run([session.get_outputs()[0].name], {session.get_inputs()[0].name: img})) - # Apply NMS + # NMS pred = non_max_suppression(pred, conf_thres, iou_thres, classes, agnostic_nms, max_det=max_det) t2 = time_sync() - # Apply Classifier + # Second-stage classifier (optional) if classify: pred = apply_classifier(pred, modelc, img, im0s) - # Process detections + # Process predictions for i, det in enumerate(pred): # detections per image if webcam: # batch_size >= 1 p, s, im0, frame = path[i], f'{i}: ', im0s[i].copy(), dataset.count From 264be1a6162780d46feb3e9eec9b43e3ff157ea0 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 24 Jul 2021 13:19:12 +0200 Subject: [PATCH 0427/1976] Rename `opset_version` to `opset` (#4135) --- export.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/export.py b/export.py index 1e3a5ed6a4af..ceee15e1644f 100644 --- a/export.py +++ b/export.py @@ -38,7 +38,7 @@ def export_torchscript(model, img, file, optimize): print(f'{prefix} export failure: {e}') -def export_onnx(model, img, file, opset_version, train, dynamic, simplify): +def export_onnx(model, img, file, opset, train, dynamic, simplify): # ONNX model export prefix = colorstr('ONNX:') try: @@ -47,7 +47,7 @@ def export_onnx(model, img, file, opset_version, train, dynamic, simplify): print(f'\n{prefix} starting export with onnx {onnx.__version__}...') f = file.with_suffix('.onnx') - torch.onnx.export(model, img, f, verbose=False, opset_version=opset_version, + torch.onnx.export(model, img, f, verbose=False, opset_version=opset, training=torch.onnx.TrainingMode.TRAINING if train else torch.onnx.TrainingMode.EVAL, do_constant_folding=not train, input_names=['images'], @@ -108,7 +108,7 @@ def run(weights='./yolov5s.pt', # weights path optimize=False, # TorchScript: optimize for mobile dynamic=False, # ONNX: dynamic axes simplify=False, # ONNX: simplify model - opset_version=12, # ONNX: opset version + opset=12, # ONNX: opset version ): t = time.time() include = [x.lower() for x in include] @@ -149,7 +149,7 @@ def run(weights='./yolov5s.pt', # weights path if 'torchscript' in include: export_torchscript(model, img, file, optimize) if 'onnx' in include: - export_onnx(model, img, file, opset_version, train, dynamic, simplify) + export_onnx(model, img, file, opset, train, dynamic, simplify) if 'coreml' in include: export_coreml(model, img, file) @@ -170,7 +170,7 @@ def parse_opt(): parser.add_argument('--optimize', action='store_true', help='TorchScript: optimize for mobile') parser.add_argument('--dynamic', action='store_true', help='ONNX: dynamic axes') parser.add_argument('--simplify', action='store_true', help='ONNX: simplify model') - parser.add_argument('--opset-version', type=int, default=12, help='ONNX: opset version') + parser.add_argument('--opset', type=int, default=12, help='ONNX: opset version') opt = parser.parse_args() return opt From 63dd65e7edd96debbefa81e22f3d5cfb07dd2ba4 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 24 Jul 2021 16:11:39 +0200 Subject: [PATCH 0428/1976] Update train.py (#4136) * Refactor train.py * Update imports * Update imports * Update optimizer * cleanup --- train.py | 104 +++++++++++++++++++++-------------------------- utils/general.py | 2 +- utils/loss.py | 2 +- 3 files changed, 49 insertions(+), 59 deletions(-) diff --git a/train.py b/train.py index 9a844ebac0de..ad13ed6a52e4 100644 --- a/train.py +++ b/train.py @@ -17,15 +17,13 @@ import math import numpy as np +import torch import torch.distributed as dist import torch.nn as nn -import torch.nn.functional as F -import torch.optim as optim -import torch.optim.lr_scheduler as lr_scheduler -import torch.utils.data import yaml from torch.cuda import amp from torch.nn.parallel import DistributedDataParallel as DDP +from torch.optim import Adam, SGD, lr_scheduler from torch.utils.tensorboard import SummaryWriter from tqdm import tqdm @@ -58,16 +56,13 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary device, ): save_dir, epochs, batch_size, weights, single_cls, evolve, data, cfg, resume, noval, nosave, workers, = \ - opt.save_dir, opt.epochs, opt.batch_size, opt.weights, opt.single_cls, opt.evolve, opt.data, opt.cfg, \ + Path(opt.save_dir), opt.epochs, opt.batch_size, opt.weights, opt.single_cls, opt.evolve, opt.data, opt.cfg, \ opt.resume, opt.noval, opt.nosave, opt.workers # Directories - save_dir = Path(save_dir) - wdir = save_dir / 'weights' - wdir.mkdir(parents=True, exist_ok=True) # make dir - last = wdir / 'last.pt' - best = wdir / 'best.pt' - results_file = save_dir / 'results.txt' + w = save_dir / 'weights' # weights dir + w.mkdir(parents=True, exist_ok=True) # make dir + last, best, results_file = w / 'last.pt', w / 'best.pt', save_dir / 'results.txt' # Hyperparameters if isinstance(hyp, str): @@ -92,7 +87,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary loggers = {'wandb': None, 'tb': None} # loggers dict if RANK in [-1, 0]: # TensorBoard - if not evolve: + if plots: prefix = colorstr('tensorboard: ') LOGGER.info(f"{prefix}Start with 'tensorboard --logdir {opt.project}', view at http://localhost:6006/") loggers['tb'] = SummaryWriter(str(save_dir)) @@ -105,11 +100,11 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary loggers['wandb'] = wandb_logger.wandb if loggers['wandb']: data_dict = wandb_logger.data_dict - weights, epochs, hyp = opt.weights, opt.epochs, opt.hyp # may update weights, epochs if resuming + weights, epochs, hyp = opt.weights, opt.epochs, opt.hyp # may update values if resuming nc = 1 if single_cls else int(data_dict['nc']) # number of classes names = ['item'] if single_cls and len(data_dict['names']) != 1 else data_dict['names'] # class names - assert len(names) == nc, '%g names found for nc=%g dataset in %s' % (len(names), nc, data) # check + assert len(names) == nc, f'{len(names)} names found for nc={nc} dataset in {data}' # check is_coco = data.endswith('coco.yaml') and nc == 80 # COCO dataset # Model @@ -120,23 +115,22 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary ckpt = torch.load(weights, map_location=device) # load checkpoint model = Model(cfg or ckpt['model'].yaml, ch=3, nc=nc, anchors=hyp.get('anchors')).to(device) # create exclude = ['anchor'] if (cfg or hyp.get('anchors')) and not resume else [] # exclude keys - state_dict = ckpt['model'].float().state_dict() # to FP32 - state_dict = intersect_dicts(state_dict, model.state_dict(), exclude=exclude) # intersect - model.load_state_dict(state_dict, strict=False) # load - LOGGER.info('Transferred %g/%g items from %s' % (len(state_dict), len(model.state_dict()), weights)) # report + csd = ckpt['model'].float().state_dict() # checkpoint state_dict as FP32 + csd = intersect_dicts(csd, model.state_dict(), exclude=exclude) # intersect + model.load_state_dict(csd, strict=False) # load + LOGGER.info(f'Transferred {len(csd)}/{len(model.state_dict())} items from {weights}') # report else: model = Model(cfg, ch=3, nc=nc, anchors=hyp.get('anchors')).to(device) # create with torch_distributed_zero_first(RANK): check_dataset(data_dict) # check - train_path = data_dict['train'] - val_path = data_dict['val'] + train_path, val_path = data_dict['train'], data_dict['val'] # Freeze freeze = [] # parameter names to freeze (full or partial) for k, v in model.named_parameters(): v.requires_grad = True # train all layers if any(x in k for x in freeze): - print('freezing %s' % k) + print(f'freezing {k}') v.requires_grad = False # Optimizer @@ -145,33 +139,32 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary hyp['weight_decay'] *= batch_size * accumulate / nbs # scale weight_decay LOGGER.info(f"Scaled weight_decay = {hyp['weight_decay']}") - pg0, pg1, pg2 = [], [], [] # optimizer parameter groups - for k, v in model.named_modules(): - if hasattr(v, 'bias') and isinstance(v.bias, nn.Parameter): - pg2.append(v.bias) # biases - if isinstance(v, nn.BatchNorm2d): - pg0.append(v.weight) # no decay - elif hasattr(v, 'weight') and isinstance(v.weight, nn.Parameter): - pg1.append(v.weight) # apply decay + g0, g1, g2 = [], [], [] # optimizer parameter groups + for v in model.modules(): + if hasattr(v, 'bias') and isinstance(v.bias, nn.Parameter): # bias + g2.append(v.bias) + if isinstance(v, nn.BatchNorm2d): # weight with decay + g0.append(v.weight) + elif hasattr(v, 'weight') and isinstance(v.weight, nn.Parameter): # weight without decay + g1.append(v.weight) if opt.adam: - optimizer = optim.Adam(pg0, lr=hyp['lr0'], betas=(hyp['momentum'], 0.999)) # adjust beta1 to momentum + optimizer = Adam(g0, lr=hyp['lr0'], betas=(hyp['momentum'], 0.999)) # adjust beta1 to momentum else: - optimizer = optim.SGD(pg0, lr=hyp['lr0'], momentum=hyp['momentum'], nesterov=True) + optimizer = SGD(g0, lr=hyp['lr0'], momentum=hyp['momentum'], nesterov=True) - optimizer.add_param_group({'params': pg1, 'weight_decay': hyp['weight_decay']}) # add pg1 with weight_decay - optimizer.add_param_group({'params': pg2}) # add pg2 (biases) - LOGGER.info('Optimizer groups: %g .bias, %g conv.weight, %g other' % (len(pg2), len(pg1), len(pg0))) - del pg0, pg1, pg2 + optimizer.add_param_group({'params': g1, 'weight_decay': hyp['weight_decay']}) # add g1 with weight_decay + optimizer.add_param_group({'params': g2}) # add g2 (biases) + LOGGER.info(f"{colorstr('optimizer:')} {type(optimizer).__name__} with parameter groups " + f"{len(g0)} weight, {len(g1)} weight (no decay), {len(g2)} bias") + del g0, g1, g2 - # Scheduler https://arxiv.org/pdf/1812.01187.pdf - # https://pytorch.org/docs/stable/_modules/torch/optim/lr_scheduler.html#OneCycleLR + # Scheduler if opt.linear_lr: lf = lambda x: (1 - x / (epochs - 1)) * (1.0 - hyp['lrf']) + hyp['lrf'] # linear else: lf = one_cycle(1, hyp['lrf'], epochs) # cosine 1->hyp['lrf'] - scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf) - # plot_lr_scheduler(optimizer, scheduler, epochs) + scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf) # plot_lr_scheduler(optimizer, scheduler, epochs) # EMA ema = ModelEMA(model) if RANK in [-1, 0] else None @@ -196,13 +189,12 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary # Epochs start_epoch = ckpt['epoch'] + 1 if resume: - assert start_epoch > 0, '%s training to %g epochs is finished, nothing to resume.' % (weights, epochs) + assert start_epoch > 0, f'{weights} training to {epochs} epochs is finished, nothing to resume.' if epochs < start_epoch: - LOGGER.info('%s has been trained for %g epochs. Fine-tuning for %g additional epochs.' % - (weights, ckpt['epoch'], epochs)) + LOGGER.info(f"{weights} has been trained for {ckpt['epoch']} epochs. Fine-tuning for {epochs} more epochs.") epochs += ckpt['epoch'] # finetune additional epochs - del ckpt, state_dict + del ckpt, csd # Image sizes gs = max(int(model.stride.max()), 32) # grid size (max stride) @@ -217,7 +209,6 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary # SyncBatchNorm if opt.sync_bn and cuda and RANK != -1: - raise Exception('can not train with --sync-bn, known issue https://github.com/ultralytics/yolov5/issues/3998') model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model).to(device) LOGGER.info('Using SyncBatchNorm()') @@ -228,7 +219,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary prefix=colorstr('train: ')) mlc = np.concatenate(dataset.labels, 0)[:, 0].max() # max label class nb = len(train_loader) # number of batches - assert mlc < nc, 'Label class %g exceeds nc=%g in %s. Possible class labels are 0-%g' % (mlc, nc, data, nc - 1) + assert mlc < nc, f'Label class {mlc} exceeds nc={nc} in {data}. Possible class labels are 0-{nc - 1}' # Process 0 if RANK in [-1, 0]: @@ -261,7 +252,6 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary hyp['label_smoothing'] = opt.label_smoothing model.nc = nc # attach number of classes to model model.hyp = hyp # attach hyperparameters to model - model.gr = 1.0 # iou loss ratio (obj_loss = 1.0 or iou) model.class_weights = labels_to_class_weights(dataset.labels, nc).to(device) * nc # attach class weights model.names = names @@ -315,7 +305,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary # Warmup if ni <= nw: xi = [0, nw] # x interp - # model.gr = np.interp(ni, xi, [0.0, 1.0]) # iou loss ratio (obj_loss = 1.0 or iou) + # compute_loss.gr = np.interp(ni, xi, [0.0, 1.0]) # iou loss ratio (obj_loss = 1.0 or iou) accumulate = max(1, np.interp(ni, xi, [1, nbs / batch_size]).round()) for j, x in enumerate(optimizer.param_groups): # bias lr falls from 0.1 to lr0, all other lrs rise from 0.0 to lr0 @@ -329,7 +319,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary sf = sz / max(imgs.shape[2:]) # scale factor if sf != 1: ns = [math.ceil(x * sf / gs) * gs for x in imgs.shape[2:]] # new shape (stretched to gs-multiple) - imgs = F.interpolate(imgs, size=ns, mode='bilinear', align_corners=False) + imgs = nn.functional.interpolate(imgs, size=ns, mode='bilinear', align_corners=False) # Forward with amp.autocast(enabled=cuda): @@ -355,7 +345,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary # Print if RANK in [-1, 0]: mloss = (mloss * i + loss_items) / (i + 1) # update mean losses - mem = '%.3gG' % (torch.cuda.memory_reserved() / 1E9 if torch.cuda.is_available() else 0) # (GB) + mem = f'{torch.cuda.memory_reserved() / 1E9 if torch.cuda.is_available() else 0:.3g}G' # (GB) s = ('%10s' * 2 + '%10.4g' * 6) % ( f'{epoch}/{epochs - 1}', mem, *mloss, targets.shape[0], imgs.shape[-1]) pbar.set_description(s) @@ -381,7 +371,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary # DDP process 0 or single-GPU if RANK in [-1, 0]: # mAP - ema.update_attr(model, include=['yaml', 'nc', 'hyp', 'gr', 'names', 'stride', 'class_weights']) + ema.update_attr(model, include=['yaml', 'nc', 'hyp', 'names', 'stride', 'class_weights']) final_epoch = epoch + 1 == epochs if not noval or final_epoch: # Calculate mAP wandb_logger.current_epoch = epoch + 1 @@ -457,6 +447,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary batch_size=batch_size // WORLD_SIZE * 2, imgsz=imgsz, model=attempt_load(m, device).half(), + iou_thres=0.7, # NMS IoU threshold for best pycocotools results single_cls=single_cls, dataloader=val_loader, save_dir=save_dir, @@ -525,8 +516,7 @@ def main(opt): check_requirements(exclude=['thop']) # Resume - wandb_run = check_wandb_resume(opt) - if opt.resume and not wandb_run: # resume an interrupted run + if opt.resume and not check_wandb_resume(opt): # resume an interrupted run ckpt = opt.resume if isinstance(opt.resume, str) else get_latest_run() # specified or most recent path assert os.path.isfile(ckpt), 'ERROR: --resume checkpoint does not exist' with open(Path(ckpt).parent.parent / 'opt.yaml') as f: @@ -534,7 +524,6 @@ def main(opt): opt.cfg, opt.weights, opt.resume = '', ckpt, True # reinstate LOGGER.info(f'Resuming training from {ckpt}') else: - # opt.hyp = opt.hyp or ('hyp.finetune.yaml' if opt.weights else 'hyp.scratch.yaml') opt.data, opt.cfg, opt.hyp = check_file(opt.data), check_file(opt.cfg), check_file(opt.hyp) # check files assert len(opt.cfg) or len(opt.weights), 'either --cfg or --weights must be specified' opt.name = 'evolve' if opt.evolve else opt.name @@ -545,11 +534,13 @@ def main(opt): if LOCAL_RANK != -1: from datetime import timedelta assert torch.cuda.device_count() > LOCAL_RANK, 'insufficient CUDA devices for DDP command' + assert opt.batch_size % WORLD_SIZE == 0, '--batch-size must be multiple of CUDA device count' + assert not opt.image_weights, '--image-weights argument is not compatible with DDP training' + assert not opt.evolve, '--evolve argument is not compatible with DDP training' + assert not opt.sync_bn, '--sync-bn known training issue, see https://github.com/ultralytics/yolov5/issues/3998' torch.cuda.set_device(LOCAL_RANK) device = torch.device('cuda', LOCAL_RANK) dist.init_process_group(backend="nccl" if dist.is_nccl_available() else "gloo", timeout=timedelta(seconds=60)) - assert opt.batch_size % WORLD_SIZE == 0, '--batch-size must be multiple of CUDA device count' - assert not opt.image_weights, '--image-weights argument is not compatible with DDP training' # Train if not opt.evolve: @@ -594,7 +585,6 @@ def main(opt): hyp = yaml.safe_load(f) # load hyps dict if 'anchors' not in hyp: # anchors commented in hyp.yaml hyp['anchors'] = 3 - assert LOCAL_RANK == -1, 'DDP mode not implemented for --evolve' opt.noval, opt.nosave = True, True # only val/save final epoch # ei = [isinstance(x, (int, float)) for x in hyp.values()] # evolvable indices yaml_file = Path(opt.save_dir) / 'hyp_evolved.yaml' # save best result here @@ -646,7 +636,7 @@ def main(opt): def run(**kwargs): - # Usage: import train; train.run(imgsz=320, weights='yolov5m.pt') + # Usage: import train; train.run(data='coco128.yaml', imgsz=320, weights='yolov5m.pt') opt = parse_opt(True) for k, v in kwargs.items(): setattr(opt, k, v) diff --git a/utils/general.py b/utils/general.py index fabd0f35fe9e..db81f7679cd7 100755 --- a/utils/general.py +++ b/utils/general.py @@ -301,7 +301,7 @@ def clean_str(s): def one_cycle(y1=0.0, y2=1.0, steps=100): - # lambda function for sinusoidal ramp from y1 to y2 + # lambda function for sinusoidal ramp from y1 to y2 https://arxiv.org/pdf/1812.01187.pdf return lambda x: ((1 - math.cos(x * math.pi / steps)) / 2) * (y2 - y1) + y1 diff --git a/utils/loss.py b/utils/loss.py index 88f57693307c..22061a11ff27 100644 --- a/utils/loss.py +++ b/utils/loss.py @@ -108,7 +108,7 @@ def __init__(self, model, autobalance=False): det = model.module.model[-1] if is_parallel(model) else model.model[-1] # Detect() module self.balance = {3: [4.0, 1.0, 0.4]}.get(det.nl, [4.0, 1.0, 0.25, 0.06, .02]) # P3-P7 self.ssi = list(det.stride).index(16) if autobalance else 0 # stride 16 index - self.BCEcls, self.BCEobj, self.gr, self.hyp, self.autobalance = BCEcls, BCEobj, model.gr, h, autobalance + self.BCEcls, self.BCEobj, self.gr, self.hyp, self.autobalance = BCEcls, BCEobj, 1.0, h, autobalance for k in 'na', 'nc', 'nl', 'anchors': setattr(self, k, getattr(det, k)) From efe60b568130612ef9558db14b84462c297ceb3a Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 25 Jul 2021 01:18:39 +0200 Subject: [PATCH 0429/1976] Refactor train.py and val.py `loggers` (#4137) * Update loggers * Config * Update val.py * cleanup * fix1 * fix2 * fix3 and reformat * format sweep.py * Logger() class * cleanup * cleanup2 * wandb package import fix * wandb package import fix2 * txt fix * fix4 * fix5 * fix6 * drop wandb into utils/loggers * fix 7 * rename loggers/wandb_logging to loggers/wandb * Update message * Update message * Update message * cleanup * Fix x axis bug * fix rank 0 issue * cleanup --- train.py | 87 +++--------- utils/loggers/__init__.py | 129 ++++++++++++++++++ .../wandb}/__init__.py | 0 .../wandb}/log_dataset.py | 0 .../{wandb_logging => loggers/wandb}/sweep.py | 2 +- .../wandb}/sweep.yaml | 2 +- .../wandb}/wandb_utils.py | 28 ++-- utils/plots.py | 5 +- val.py | 10 +- 9 files changed, 172 insertions(+), 91 deletions(-) create mode 100644 utils/loggers/__init__.py rename utils/{wandb_logging => loggers/wandb}/__init__.py (100%) rename utils/{wandb_logging => loggers/wandb}/log_dataset.py (100%) rename utils/{wandb_logging => loggers/wandb}/sweep.py (98%) rename utils/{wandb_logging => loggers/wandb}/sweep.yaml (98%) rename utils/{wandb_logging => loggers/wandb}/wandb_utils.py (96%) diff --git a/train.py b/train.py index ad13ed6a52e4..1c48fa49f0f7 100644 --- a/train.py +++ b/train.py @@ -10,7 +10,6 @@ import random import sys import time -import warnings from copy import deepcopy from pathlib import Path from threading import Thread @@ -24,7 +23,6 @@ from torch.cuda import amp from torch.nn.parallel import DistributedDataParallel as DDP from torch.optim import Adam, SGD, lr_scheduler -from torch.utils.tensorboard import SummaryWriter from tqdm import tqdm FILE = Path(__file__).absolute() @@ -42,8 +40,9 @@ from utils.loss import ComputeLoss from utils.plots import plot_images, plot_labels, plot_results, plot_evolution from utils.torch_utils import ModelEMA, select_device, intersect_dicts, torch_distributed_zero_first, de_parallel -from utils.wandb_logging.wandb_utils import WandbLogger, check_wandb_resume +from utils.loggers.wandb.wandb_utils import check_wandb_resume from utils.metrics import fitness +from utils.loggers import Loggers LOGGER = logging.getLogger(__name__) LOCAL_RANK = int(os.getenv('LOCAL_RANK', -1)) # https://pytorch.org/docs/stable/elastic/run.html @@ -76,37 +75,23 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary with open(save_dir / 'opt.yaml', 'w') as f: yaml.safe_dump(vars(opt), f, sort_keys=False) - # Configure + # Config plots = not evolve # create plots cuda = device.type != 'cpu' init_seeds(1 + RANK) with open(data) as f: data_dict = yaml.safe_load(f) # data dict - - # Loggers - loggers = {'wandb': None, 'tb': None} # loggers dict - if RANK in [-1, 0]: - # TensorBoard - if plots: - prefix = colorstr('tensorboard: ') - LOGGER.info(f"{prefix}Start with 'tensorboard --logdir {opt.project}', view at http://localhost:6006/") - loggers['tb'] = SummaryWriter(str(save_dir)) - - # W&B - opt.hyp = hyp # add hyperparameters - run_id = torch.load(weights).get('wandb_id') if weights.endswith('.pt') and os.path.isfile(weights) else None - run_id = run_id if opt.resume else None # start fresh run if transfer learning - wandb_logger = WandbLogger(opt, save_dir.stem, run_id, data_dict) - loggers['wandb'] = wandb_logger.wandb - if loggers['wandb']: - data_dict = wandb_logger.data_dict - weights, epochs, hyp = opt.weights, opt.epochs, opt.hyp # may update values if resuming - nc = 1 if single_cls else int(data_dict['nc']) # number of classes names = ['item'] if single_cls and len(data_dict['names']) != 1 else data_dict['names'] # class names assert len(names) == nc, f'{len(names)} names found for nc={nc} dataset in {data}' # check is_coco = data.endswith('coco.yaml') and nc == 80 # COCO dataset + # Loggers + if RANK in [-1, 0]: + loggers = Loggers(save_dir, results_file, weights, opt, hyp, data_dict, LOGGER).start() # loggers dict + if loggers.wandb and resume: + weights, epochs, hyp, data_dict = opt.weights, opt.epochs, opt.hyp, loggers.wandb.data_dict + # Model pretrained = weights.endswith('.pt') if pretrained: @@ -351,16 +336,11 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary pbar.set_description(s) # Plot - if plots and ni < 3: - f = save_dir / f'train_batch{ni}.jpg' # filename - Thread(target=plot_images, args=(imgs, targets, paths, f), daemon=True).start() - if loggers['tb'] and ni == 0: # TensorBoard - with warnings.catch_warnings(): - warnings.simplefilter('ignore') # suppress jit trace warning - loggers['tb'].add_graph(torch.jit.trace(de_parallel(model), imgs[0:1], strict=False), []) - elif plots and ni == 10 and loggers['wandb']: - wandb_logger.log({'Mosaics': [loggers['wandb'].Image(str(x), caption=x.name) for x in - save_dir.glob('train*.jpg') if x.exists()]}) + if plots: + if ni < 3: + f = save_dir / f'train_batch{ni}.jpg' # filename + Thread(target=plot_images, args=(imgs, targets, paths, f), daemon=True).start() + loggers.on_train_batch_end(ni, model, imgs) # end batch ------------------------------------------------------------------------------------------------ @@ -368,13 +348,12 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary lr = [x['lr'] for x in optimizer.param_groups] # for loggers scheduler.step() - # DDP process 0 or single-GPU if RANK in [-1, 0]: # mAP + loggers.on_train_epoch_end(epoch) ema.update_attr(model, include=['yaml', 'nc', 'hyp', 'names', 'stride', 'class_weights']) final_epoch = epoch + 1 == epochs if not noval or final_epoch: # Calculate mAP - wandb_logger.current_epoch = epoch + 1 results, maps, _ = val.run(data_dict, batch_size=batch_size // WORLD_SIZE * 2, imgsz=imgsz, @@ -385,29 +364,14 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary save_json=is_coco and final_epoch, verbose=nc < 50 and final_epoch, plots=plots and final_epoch, - wandb_logger=wandb_logger, + loggers=loggers, compute_loss=compute_loss) - # Write - with open(results_file, 'a') as f: - f.write(s + '%10.4g' * 7 % results + '\n') # append metrics, val_loss - - # Log - tags = ['train/box_loss', 'train/obj_loss', 'train/cls_loss', # train loss - 'metrics/precision', 'metrics/recall', 'metrics/mAP_0.5', 'metrics/mAP_0.5:0.95', - 'val/box_loss', 'val/obj_loss', 'val/cls_loss', # val loss - 'x/lr0', 'x/lr1', 'x/lr2'] # params - for x, tag in zip(list(mloss[:-1]) + list(results) + lr, tags): - if loggers['tb']: - loggers['tb'].add_scalar(tag, x, epoch) # TensorBoard - if loggers['wandb']: - wandb_logger.log({tag: x}) # W&B - # Update best mAP fi = fitness(np.array(results).reshape(1, -1)) # weighted combination of [P, R, mAP@.5, mAP@.5-.95] if fi > best_fitness: best_fitness = fi - wandb_logger.end_epoch(best_result=best_fitness == fi) + loggers.on_train_val_end(mloss, results, lr, epoch, s, best_fitness, fi) # Save model if (not nosave) or (final_epoch and not evolve): # if save @@ -418,16 +382,14 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary 'ema': deepcopy(ema.ema).half(), 'updates': ema.updates, 'optimizer': optimizer.state_dict(), - 'wandb_id': wandb_logger.wandb_run.id if loggers['wandb'] else None} + 'wandb_id': loggers.wandb.wandb_run.id if loggers.wandb else None} # Save last, best and delete torch.save(ckpt, last) if best_fitness == fi: torch.save(ckpt, best) - if loggers['wandb']: - if ((epoch + 1) % opt.save_period == 0 and not final_epoch) and opt.save_period != -1: - wandb_logger.log_model(last.parent, opt, epoch, fi, best_model=best_fitness == fi) del ckpt + loggers.on_model_save(last, epoch, final_epoch, best_fitness, fi) # end epoch ---------------------------------------------------------------------------------------------------- # end training ----------------------------------------------------------------------------------------------------- @@ -435,10 +397,6 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary LOGGER.info(f'{epoch - start_epoch + 1} epochs completed in {(time.time() - t0) / 3600:.3f} hours.\n') if plots: plot_results(save_dir=save_dir) # save as results.png - if loggers['wandb']: - files = ['results.png', 'confusion_matrix.png', *[f'{x}_curve.png' for x in ('F1', 'PR', 'P', 'R')]] - wandb_logger.log({"Results": [loggers['wandb'].Image(str(save_dir / f), caption=f) for f in files - if (save_dir / f).exists()]}) if not evolve: if is_coco: # COCO dataset @@ -458,11 +416,8 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary for f in last, best: if f.exists(): strip_optimizer(f) # strip optimizers - if loggers['wandb']: # Log the stripped model - loggers['wandb'].log_artifact(str(best if best.exists() else last), type='model', - name='run_' + wandb_logger.wandb_run.id + '_model', - aliases=['latest', 'best', 'stripped']) - wandb_logger.finish_run() + + loggers.on_train_end(last, best) torch.cuda.empty_cache() return results diff --git a/utils/loggers/__init__.py b/utils/loggers/__init__.py new file mode 100644 index 000000000000..ceca84c95252 --- /dev/null +++ b/utils/loggers/__init__.py @@ -0,0 +1,129 @@ +# YOLOv5 experiment logging utils + +import warnings + +import torch +from torch.utils.tensorboard import SummaryWriter + +from utils.general import colorstr, emojis +from utils.loggers.wandb.wandb_utils import WandbLogger +from utils.torch_utils import de_parallel + +LOGGERS = ('txt', 'tb', 'wandb') # text-file, TensorBoard, Weights & Biases + +try: + import wandb + + assert hasattr(wandb, '__version__') # verify package import not local dir +except (ImportError, AssertionError): + wandb = None + + +class Loggers(): + # YOLOv5 Loggers class + def __init__(self, save_dir=None, results_file=None, weights=None, opt=None, hyp=None, + data_dict=None, logger=None, include=LOGGERS): + self.save_dir = save_dir + self.results_file = results_file + self.weights = weights + self.opt = opt + self.hyp = hyp + self.data_dict = data_dict + self.logger = logger # for printing results to console + self.include = include + for k in LOGGERS: + setattr(self, k, None) # init empty logger dictionary + + def start(self): + self.txt = True # always log to txt + + # Message + try: + import wandb + except ImportError: + prefix = colorstr('Weights & Biases: ') + s = f"{prefix}run 'pip install wandb' to automatically track and visualize YOLOv5 🚀 runs (RECOMMENDED)" + print(emojis(s)) + + # TensorBoard + s = self.save_dir + if 'tb' in self.include and not self.opt.evolve: + prefix = colorstr('TensorBoard: ') + self.logger.info(f"{prefix}Start with 'tensorboard --logdir {s.parent}', view at http://localhost:6006/") + self.tb = SummaryWriter(str(s)) + + # W&B + try: + assert 'wandb' in self.include and wandb + run_id = torch.load(self.weights).get('wandb_id') if self.opt.resume else None + self.opt.hyp = self.hyp # add hyperparameters + self.wandb = WandbLogger(self.opt, s.stem, run_id, self.data_dict) + except: + self.wandb = None + + return self + + def on_train_batch_end(self, ni, model, imgs): + # Callback runs on train batch end + if ni == 0: + with warnings.catch_warnings(): + warnings.simplefilter('ignore') # suppress jit trace warning + self.tb.add_graph(torch.jit.trace(de_parallel(model), imgs[0:1], strict=False), []) + if self.wandb and ni == 10: + files = sorted(self.save_dir.glob('train*.jpg')) + self.wandb.log({'Mosaics': [wandb.Image(str(f), caption=f.name) for f in files if f.exists()]}) + + def on_train_epoch_end(self, epoch): + # Callback runs on train epoch end + if self.wandb: + self.wandb.current_epoch = epoch + 1 + + def on_val_batch_end(self, pred, predn, path, names, im): + # Callback runs on train batch end + if self.wandb: + self.wandb.val_one_image(pred, predn, path, names, im) + + def on_val_end(self): + # Callback runs on val end + if self.wandb: + files = sorted(self.save_dir.glob('val*.jpg')) + self.wandb.log({"Validation": [wandb.Image(str(f), caption=f.name) for f in files]}) + + def on_train_val_end(self, mloss, results, lr, epoch, s, best_fitness, fi): + # Callback runs on validation end during training + vals = list(mloss[:-1]) + list(results) + lr + tags = ['train/box_loss', 'train/obj_loss', 'train/cls_loss', # train loss + 'metrics/precision', 'metrics/recall', 'metrics/mAP_0.5', 'metrics/mAP_0.5:0.95', + 'val/box_loss', 'val/obj_loss', 'val/cls_loss', # val loss + 'x/lr0', 'x/lr1', 'x/lr2'] # params + if self.txt: + with open(self.results_file, 'a') as f: + f.write(s + '%10.4g' * 7 % results + '\n') # append metrics, val_loss + if self.tb: + for x, tag in zip(vals, tags): + self.tb.add_scalar(tag, x, epoch) # TensorBoard + if self.wandb: + self.wandb.log({k: v for k, v in zip(tags, vals)}) + self.wandb.end_epoch(best_result=best_fitness == fi) + + def on_model_save(self, last, epoch, final_epoch, best_fitness, fi): + # Callback runs on model save event + if self.wandb: + if ((epoch + 1) % self.opt.save_period == 0 and not final_epoch) and self.opt.save_period != -1: + self.wandb.log_model(last.parent, self.opt, epoch, fi, best_model=best_fitness == fi) + + def on_train_end(self, last, best): + # Callback runs on training end + files = ['results.png', 'confusion_matrix.png', *[f'{x}_curve.png' for x in ('F1', 'PR', 'P', 'R')]] + files = [(self.save_dir / f) for f in files if (self.save_dir / f).exists()] # filter + if self.wandb: + wandb.log({"Results": [wandb.Image(str(f), caption=f.name) for f in files]}) + wandb.log_artifact(str(best if best.exists() else last), type='model', + name='run_' + self.wandb.wandb_run.id + '_model', + aliases=['latest', 'best', 'stripped']) + self.wandb.finish_run() + + def log_images(self, paths): + # Log images + if self.wandb: + self.wandb.log({"Labels": [wandb.Image(str(x), caption=x.name) for x in paths]}) diff --git a/utils/wandb_logging/__init__.py b/utils/loggers/wandb/__init__.py similarity index 100% rename from utils/wandb_logging/__init__.py rename to utils/loggers/wandb/__init__.py diff --git a/utils/wandb_logging/log_dataset.py b/utils/loggers/wandb/log_dataset.py similarity index 100% rename from utils/wandb_logging/log_dataset.py rename to utils/loggers/wandb/log_dataset.py diff --git a/utils/wandb_logging/sweep.py b/utils/loggers/wandb/sweep.py similarity index 98% rename from utils/wandb_logging/sweep.py rename to utils/loggers/wandb/sweep.py index 6c8719b32006..8e952d03c085 100644 --- a/utils/wandb_logging/sweep.py +++ b/utils/loggers/wandb/sweep.py @@ -1,12 +1,12 @@ import sys from pathlib import Path + import wandb FILE = Path(__file__).absolute() sys.path.append(FILE.parents[2].as_posix()) # add utils/ to path from train import train, parse_opt -import test from utils.general import increment_path from utils.torch_utils import select_device diff --git a/utils/wandb_logging/sweep.yaml b/utils/loggers/wandb/sweep.yaml similarity index 98% rename from utils/wandb_logging/sweep.yaml rename to utils/loggers/wandb/sweep.yaml index 64e395533c1c..dcc95264f8cd 100644 --- a/utils/wandb_logging/sweep.yaml +++ b/utils/loggers/wandb/sweep.yaml @@ -14,7 +14,7 @@ # You can use grid, bayesian and hyperopt search strategy # For more info on configuring sweeps visit - https://docs.wandb.ai/guides/sweeps/configuration -program: utils/wandb_logging/sweep.py +program: utils/loggers/wandb/sweep.py method: random metric: name: metrics/mAP_0.5 diff --git a/utils/wandb_logging/wandb_utils.py b/utils/loggers/wandb/wandb_utils.py similarity index 96% rename from utils/wandb_logging/wandb_utils.py rename to utils/loggers/wandb/wandb_utils.py index 4986e01afe36..db2693a9e11c 100644 --- a/utils/wandb_logging/wandb_utils.py +++ b/utils/loggers/wandb/wandb_utils.py @@ -1,4 +1,5 @@ """Utilities and tools for tracking runs with Weights & Biases.""" + import logging import os import sys @@ -8,15 +9,18 @@ import yaml from tqdm import tqdm -sys.path.append(str(Path(__file__).parent.parent.parent)) # add utils/ to path +FILE = Path(__file__).absolute() +sys.path.append(FILE.parents[3].as_posix()) # add yolov5/ to path + from utils.datasets import LoadImagesAndLabels from utils.datasets import img2label_paths -from utils.general import colorstr, check_dataset, check_file +from utils.general import check_dataset, check_file try: import wandb - from wandb import init, finish -except ImportError: + + assert hasattr(wandb, '__version__') # verify package import not local dir +except (ImportError, AssertionError): wandb = None RANK = int(os.getenv('RANK', -1)) @@ -106,7 +110,7 @@ def __init__(self, opt, name, run_id, data_dict, job_type='Training'): self.data_dict = data_dict self.bbox_media_panel_images = [] self.val_table_path_map = None - self.max_imgs_to_log = 16 + self.max_imgs_to_log = 16 # It's more elegant to stick to 1 wandb.init call, but useful config data is overwritten in the WandbLogger's wandb.init call if isinstance(opt.resume, str): # checks resume from artifact if opt.resume.startswith(WANDB_ARTIFACT_PREFIX): @@ -134,13 +138,11 @@ def __init__(self, opt, name, run_id, data_dict, job_type='Training'): if not opt.resume: wandb_data_dict = self.check_and_upload_dataset(opt) if opt.upload_dataset else data_dict # Info useful for resuming from artifacts - self.wandb_run.config.update({'opt': vars(opt), 'data_dict': wandb_data_dict}, allow_val_change=True) + self.wandb_run.config.update({'opt': vars(opt), 'data_dict': wandb_data_dict}, + allow_val_change=True) self.data_dict = self.setup_training(opt, data_dict) if self.job_type == 'Dataset Creation': self.data_dict = self.check_and_upload_dataset(opt) - else: - prefix = colorstr('wandb: ') - print(f"{prefix}Install Weights & Biases for YOLOv5 logging with 'pip install wandb' (recommended)") def check_and_upload_dataset(self, opt): assert wandb, 'Install wandb to upload dataset' @@ -169,7 +171,7 @@ def setup_training(self, opt, data_dict): opt.artifact_alias) self.val_artifact_path, self.val_artifact = self.download_dataset_artifact(data_dict.get('val'), opt.artifact_alias) - + if self.train_artifact_path is not None: train_path = Path(self.train_artifact_path) / 'data/images/' data_dict['train'] = str(train_path) @@ -177,7 +179,6 @@ def setup_training(self, opt, data_dict): val_path = Path(self.val_artifact_path) / 'data/images/' data_dict['val'] = str(val_path) - if self.val_artifact is not None: self.result_artifact = wandb.Artifact("run_" + wandb.run.id + "_progress", "evaluation") self.result_table = wandb.Table(["epoch", "id", "ground truth", "prediction", "avg_confidence"]) @@ -315,9 +316,9 @@ def log_training_progress(self, predn, path, names): ) def val_one_image(self, pred, predn, path, names, im): - if self.val_table and self.result_table: # Log Table if Val dataset is uploaded as artifact + if self.val_table and self.result_table: # Log Table if Val dataset is uploaded as artifact self.log_training_progress(predn, path, names) - else: # Default to bbox media panelif Val artifact not found + else: # Default to bbox media panelif Val artifact not found if len(self.bbox_media_panel_images) < self.max_imgs_to_log and self.current_epoch > 0: if self.current_epoch % self.bbox_interval == 0: box_data = [{"position": {"minX": xyxy[0], "minY": xyxy[1], "maxX": xyxy[2], "maxY": xyxy[3]}, @@ -328,7 +329,6 @@ def val_one_image(self, pred, predn, path, names, im): boxes = {"predictions": {"box_data": box_data, "class_labels": names}} # inference-space self.bbox_media_panel_images.append(wandb.Image(im, boxes=boxes, caption=path.name)) - def log(self, log_dict): if self.wandb_run: for key, value in log_dict.items(): diff --git a/utils/plots.py b/utils/plots.py index cd9a45e8c761..f9fd35fce751 100644 --- a/utils/plots.py +++ b/utils/plots.py @@ -327,9 +327,8 @@ def plot_labels(labels, names=(), save_dir=Path(''), loggers=None): plt.close() # loggers - for k, v in loggers.items() or {}: - if k == 'wandb' and v: - v.log({"Labels": [v.Image(str(x), caption=x.name) for x in save_dir.glob('*labels*.jpg')]}, commit=False) + if loggers: + loggers.log_images(save_dir.glob('*labels*.jpg')) def plot_evolution(yaml_file='data/hyp.finetune.yaml'): # from utils.plots import *; plot_evolution() diff --git a/val.py b/val.py index e493dfe66ae8..2b088dcdf210 100644 --- a/val.py +++ b/val.py @@ -26,6 +26,7 @@ from utils.metrics import ap_per_class, ConfusionMatrix from utils.plots import plot_images, output_to_target, plot_study_txt from utils.torch_utils import select_device, time_sync +from utils.loggers import Loggers def save_one_txt(predn, save_conf, shape, file): @@ -97,7 +98,7 @@ def run(data, dataloader=None, save_dir=Path(''), plots=True, - wandb_logger=None, + loggers=Loggers(), compute_loss=None, ): # Initialize/load model and set device @@ -215,8 +216,7 @@ def run(data, save_one_txt(predn, save_conf, shape, file=save_dir / 'labels' / (path.stem + '.txt')) if save_json: save_one_json(predn, jdict, path, class_map) # append to COCO-JSON dictionary - if wandb_logger and wandb_logger.wandb_run: - wandb_logger.val_one_image(pred, predn, path, names, img[si]) + loggers.on_val_batch_end(pred, predn, path, names, img[si]) # Plot images if plots and batch_i < 3: @@ -253,9 +253,7 @@ def run(data, # Plots if plots: confusion_matrix.plot(save_dir=save_dir, names=list(names.values())) - if wandb_logger and wandb_logger.wandb: - val_batches = [wandb_logger.wandb.Image(str(f), caption=f.name) for f in sorted(save_dir.glob('val*.jpg'))] - wandb_logger.log({"Validation": val_batches}) + loggers.on_val_end() # Save JSON if save_json and len(jdict): From d17b45eaad041b2fbb219232363bc865d4134d5e Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 25 Jul 2021 01:22:00 +0200 Subject: [PATCH 0430/1976] Update README.md (#4143) --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index c27fbc6fa639..b4aacd78b0ca 100755 --- a/README.md +++ b/README.md @@ -82,10 +82,10 @@ Inference with YOLOv5 and [PyTorch Hub](https://github.com/ultralytics/yolov5/is import torch # Model -model = torch.hub.load('ultralytics/yolov5', 'yolov5s') # or yolov5m, yolov5x, custom +model = torch.hub.load('ultralytics/yolov5', 'yolov5s') # or yolov5m, yolov5l, yolov5x, custom # Images -img = 'https://ultralytics.com/images/zidane.jpg' # or file, PIL, OpenCV, numpy, multiple +img = 'https://ultralytics.com/images/zidane.jpg' # or PosixPath, PIL, OpenCV, numpy, list # Inference results = model(img) From 6e4358f3f3f770a3b4ececfc3da73e25b3d8a004 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 25 Jul 2021 13:46:16 +0200 Subject: [PATCH 0431/1976] Add `export.py` ONNX inference suggestion (#4146) --- export.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/export.py b/export.py index ceee15e1644f..c98e92d972c6 100644 --- a/export.py +++ b/export.py @@ -76,6 +76,7 @@ def export_onnx(model, img, file, opset, train, dynamic, simplify): except Exception as e: print(f'{prefix} simplifier failure: {e}') print(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') + print(f"{prefix} run --dynamic ONNX model inference with detect.py: 'python detect.py --weights {f}'") except Exception as e: print(f'{prefix} export failure: {e}') @@ -94,7 +95,7 @@ def export_coreml(model, img, file): model.save(f) print(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') except Exception as e: - print(f'{prefix} export failure: {e}') + print(f'\n{prefix} export failure: {e}') def run(weights='./yolov5s.pt', # weights path From 3764277f95a1419fde96fdf68bc88d520e7dd0ed Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 25 Jul 2021 18:59:18 +0200 Subject: [PATCH 0432/1976] Created using Colaboratory --- tutorial.ipynb | 64 +++++++++----------------------------------------- 1 file changed, 11 insertions(+), 53 deletions(-) diff --git a/tutorial.ipynb b/tutorial.ipynb index f316dc5f550a..88adc08c0ef1 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -1036,28 +1036,8 @@ "source": [ "## Local Logging\n", "\n", - "All results are logged by default to `runs/train`, with a new experiment directory created for each new training as `runs/train/exp2`, `runs/train/exp3`, etc. View train and val jpgs to see mosaics, labels, predictions and augmentation effects. Note a **Mosaic Dataloader** is used for training (shown below), a new concept developed by Ultralytics and first featured in [YOLOv4](https://arxiv.org/abs/2004.10934)." - ] - }, - { - "cell_type": "code", - "metadata": { - "id": "riPdhraOTCO0" - }, - "source": [ - "Image(filename='runs/train/exp/train_batch0.jpg', width=800) # train batch 0 mosaics and labels\n", - "Image(filename='runs/train/exp/test_batch0_labels.jpg', width=800) # val batch 0 labels\n", - "Image(filename='runs/train/exp/test_batch0_pred.jpg', width=800) # val batch 0 predictions" - ], - "execution_count": null, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "OYG4WFEnTVrI" - }, - "source": [ + "All results are logged by default to `runs/train`, with a new experiment directory created for each new training as `runs/train/exp2`, `runs/train/exp3`, etc. View train and val jpgs to see mosaics, labels, predictions and augmentation effects. Note an Ultralytics **Mosaic Dataloader** is used for training (shown below), which combined each original image with 3 additional random training images.\n", + "\n", "> \n", "`train_batch0.jpg` shows train batch 0 mosaics and labels\n", "\n", @@ -1065,38 +1045,16 @@ "`test_batch0_labels.jpg` shows val batch 0 labels\n", "\n", "> \n", - "`test_batch0_pred.jpg` shows val batch 0 _predictions_" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "7KN5ghjE6ZWh" - }, - "source": [ - "Training results are automatically logged to [Tensorboard](https://www.tensorflow.org/tensorboard) and `runs/train/exp/results.txt`, which is plotted as `results.png` (below) after training completes. You can also plot any `results.txt` file manually:" - ] - }, - { - "cell_type": "code", - "metadata": { - "id": "MDznIqPF7nk3" - }, - "source": [ + "`test_batch0_pred.jpg` shows val batch 0 _predictions_\n", + "\n", + "Training results are automatically logged to [Tensorboard](https://www.tensorflow.org/tensorboard) and [CSV](https://github.com/ultralytics/yolov5/pull/4148) as `results.csv`, which is plotted as `results.png` (below) after training completes. You can also plot any `results.csv` file manually:\n", + "\n", + "```python\n", "from utils.plots import plot_results \n", - "plot_results(save_dir='runs/train/exp') # plot all results*.txt files in 'runs/train/exp'\n", - "Image(filename='runs/train/exp/results.png', width=800)" - ], - "execution_count": null, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "lfrEegCSW3fK" - }, - "source": [ - "

\"COCO128

" + "plot_results('path/to/results.csv') # plot 'results.csv' as 'results.png'\n", + "```\n", + "\n", + "

\"COCO128

" ] }, { From 96e36a7c913e2433446ff410a4cf60041010a524 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 25 Jul 2021 19:06:37 +0200 Subject: [PATCH 0433/1976] New CSV Logger (#4148) * New CSV Logger * cleanup * move batch plots into Logger * rename comment * Remove total loss from progress bar * mloss :-1 bug fix * Update plot_results() * Update plot_results() * plot_results bug fix --- .gitignore | 1 + train.py | 40 +++++++---------------- utils/loggers/__init__.py | 63 ++++++++++++++++++++++-------------- utils/loss.py | 3 +- utils/plots.py | 68 +++++++++------------------------------ val.py | 2 +- 6 files changed, 68 insertions(+), 109 deletions(-) diff --git a/.gitignore b/.gitignore index 91299e263b86..b07134d097dd 100755 --- a/.gitignore +++ b/.gitignore @@ -31,6 +31,7 @@ data/* !data/*.sh results*.txt +results*.csv # Datasets ------------------------------------------------------------------------------------------------------------- coco/ diff --git a/train.py b/train.py index 1c48fa49f0f7..db045c766716 100644 --- a/train.py +++ b/train.py @@ -12,7 +12,6 @@ import time from copy import deepcopy from pathlib import Path -from threading import Thread import math import numpy as np @@ -38,7 +37,7 @@ check_requirements, print_mutation, set_logging, one_cycle, colorstr from utils.google_utils import attempt_download from utils.loss import ComputeLoss -from utils.plots import plot_images, plot_labels, plot_results, plot_evolution +from utils.plots import plot_labels, plot_evolution from utils.torch_utils import ModelEMA, select_device, intersect_dicts, torch_distributed_zero_first, de_parallel from utils.loggers.wandb.wandb_utils import check_wandb_resume from utils.metrics import fitness @@ -61,7 +60,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary # Directories w = save_dir / 'weights' # weights dir w.mkdir(parents=True, exist_ok=True) # make dir - last, best, results_file = w / 'last.pt', w / 'best.pt', save_dir / 'results.txt' + last, best = w / 'last.pt', w / 'best.pt' # Hyperparameters if isinstance(hyp, str): @@ -88,7 +87,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary # Loggers if RANK in [-1, 0]: - loggers = Loggers(save_dir, results_file, weights, opt, hyp, data_dict, LOGGER).start() # loggers dict + loggers = Loggers(save_dir, weights, opt, hyp, data_dict, LOGGER).start() # loggers dict if loggers.wandb and resume: weights, epochs, hyp, data_dict = opt.weights, opt.epochs, opt.hyp, loggers.wandb.data_dict @@ -167,10 +166,6 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary ema.ema.load_state_dict(ckpt['ema'].float().state_dict()) ema.updates = ckpt['updates'] - # Results - if ckpt.get('training_results') is not None: - results_file.write_text(ckpt['training_results']) # write results.txt - # Epochs start_epoch = ckpt['epoch'] + 1 if resume: @@ -275,11 +270,11 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary # b = int(random.uniform(0.25 * imgsz, 0.75 * imgsz + gs) // gs * gs) # dataset.mosaic_border = [b - imgsz, -b] # height, width borders - mloss = torch.zeros(4, device=device) # mean losses + mloss = torch.zeros(3, device=device) # mean losses if RANK != -1: train_loader.sampler.set_epoch(epoch) pbar = enumerate(train_loader) - LOGGER.info(('\n' + '%10s' * 8) % ('Epoch', 'gpu_mem', 'box', 'obj', 'cls', 'total', 'labels', 'img_size')) + LOGGER.info(('\n' + '%10s' * 7) % ('Epoch', 'gpu_mem', 'box', 'obj', 'cls', 'labels', 'img_size')) if RANK in [-1, 0]: pbar = tqdm(pbar, total=nb) # progress bar optimizer.zero_grad() @@ -327,20 +322,13 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary ema.update(model) last_opt_step = ni - # Print + # Log if RANK in [-1, 0]: mloss = (mloss * i + loss_items) / (i + 1) # update mean losses mem = f'{torch.cuda.memory_reserved() / 1E9 if torch.cuda.is_available() else 0:.3g}G' # (GB) - s = ('%10s' * 2 + '%10.4g' * 6) % ( - f'{epoch}/{epochs - 1}', mem, *mloss, targets.shape[0], imgs.shape[-1]) - pbar.set_description(s) - - # Plot - if plots: - if ni < 3: - f = save_dir / f'train_batch{ni}.jpg' # filename - Thread(target=plot_images, args=(imgs, targets, paths, f), daemon=True).start() - loggers.on_train_batch_end(ni, model, imgs) + pbar.set_description(('%10s' * 2 + '%10.4g' * 5) % ( + f'{epoch}/{epochs - 1}', mem, *mloss, targets.shape[0], imgs.shape[-1])) + loggers.on_train_batch_end(ni, model, imgs, targets, paths, plots) # end batch ------------------------------------------------------------------------------------------------ @@ -371,13 +359,12 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary fi = fitness(np.array(results).reshape(1, -1)) # weighted combination of [P, R, mAP@.5, mAP@.5-.95] if fi > best_fitness: best_fitness = fi - loggers.on_train_val_end(mloss, results, lr, epoch, s, best_fitness, fi) + loggers.on_train_val_end(mloss, results, lr, epoch, best_fitness, fi) # Save model if (not nosave) or (final_epoch and not evolve): # if save ckpt = {'epoch': epoch, 'best_fitness': best_fitness, - 'training_results': results_file.read_text(), 'model': deepcopy(de_parallel(model)).half(), 'ema': deepcopy(ema.ema).half(), 'updates': ema.updates, @@ -395,9 +382,6 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary # end training ----------------------------------------------------------------------------------------------------- if RANK in [-1, 0]: LOGGER.info(f'{epoch - start_epoch + 1} epochs completed in {(time.time() - t0) / 3600:.3f} hours.\n') - if plots: - plot_results(save_dir=save_dir) # save as results.png - if not evolve: if is_coco: # COCO dataset for m in [last, best] if best.exists() else [last]: # speed, mAP tests @@ -411,13 +395,11 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary save_dir=save_dir, save_json=True, plots=False) - # Strip optimizers for f in last, best: if f.exists(): strip_optimizer(f) # strip optimizers - - loggers.on_train_end(last, best) + loggers.on_train_end(last, best, plots) torch.cuda.empty_cache() return results diff --git a/utils/loggers/__init__.py b/utils/loggers/__init__.py index ceca84c95252..29dd4605341b 100644 --- a/utils/loggers/__init__.py +++ b/utils/loggers/__init__.py @@ -1,15 +1,17 @@ # YOLOv5 experiment logging utils import warnings +from threading import Thread import torch from torch.utils.tensorboard import SummaryWriter from utils.general import colorstr, emojis from utils.loggers.wandb.wandb_utils import WandbLogger +from utils.plots import plot_images, plot_results from utils.torch_utils import de_parallel -LOGGERS = ('txt', 'tb', 'wandb') # text-file, TensorBoard, Weights & Biases +LOGGERS = ('csv', 'tb', 'wandb') # text-file, TensorBoard, Weights & Biases try: import wandb @@ -21,10 +23,8 @@ class Loggers(): # YOLOv5 Loggers class - def __init__(self, save_dir=None, results_file=None, weights=None, opt=None, hyp=None, - data_dict=None, logger=None, include=LOGGERS): + def __init__(self, save_dir=None, weights=None, opt=None, hyp=None, data_dict=None, logger=None, include=LOGGERS): self.save_dir = save_dir - self.results_file = results_file self.weights = weights self.opt = opt self.hyp = hyp @@ -35,7 +35,7 @@ def __init__(self, save_dir=None, results_file=None, weights=None, opt=None, hyp setattr(self, k, None) # init empty logger dictionary def start(self): - self.txt = True # always log to txt + self.csv = True # always log to csv # Message try: @@ -63,15 +63,19 @@ def start(self): return self - def on_train_batch_end(self, ni, model, imgs): + def on_train_batch_end(self, ni, model, imgs, targets, paths, plots): # Callback runs on train batch end - if ni == 0: - with warnings.catch_warnings(): - warnings.simplefilter('ignore') # suppress jit trace warning - self.tb.add_graph(torch.jit.trace(de_parallel(model), imgs[0:1], strict=False), []) - if self.wandb and ni == 10: - files = sorted(self.save_dir.glob('train*.jpg')) - self.wandb.log({'Mosaics': [wandb.Image(str(f), caption=f.name) for f in files if f.exists()]}) + if plots: + if ni == 0: + with warnings.catch_warnings(): + warnings.simplefilter('ignore') # suppress jit trace warning + self.tb.add_graph(torch.jit.trace(de_parallel(model), imgs[0:1], strict=False), []) + if ni < 3: + f = self.save_dir / f'train_batch{ni}.jpg' # filename + Thread(target=plot_images, args=(imgs, targets, paths, f), daemon=True).start() + if self.wandb and ni == 10: + files = sorted(self.save_dir.glob('train*.jpg')) + self.wandb.log({'Mosaics': [wandb.Image(str(f), caption=f.name) for f in files if f.exists()]}) def on_train_epoch_end(self, epoch): # Callback runs on train epoch end @@ -89,21 +93,28 @@ def on_val_end(self): files = sorted(self.save_dir.glob('val*.jpg')) self.wandb.log({"Validation": [wandb.Image(str(f), caption=f.name) for f in files]}) - def on_train_val_end(self, mloss, results, lr, epoch, s, best_fitness, fi): - # Callback runs on validation end during training - vals = list(mloss[:-1]) + list(results) + lr - tags = ['train/box_loss', 'train/obj_loss', 'train/cls_loss', # train loss - 'metrics/precision', 'metrics/recall', 'metrics/mAP_0.5', 'metrics/mAP_0.5:0.95', + def on_train_val_end(self, mloss, results, lr, epoch, best_fitness, fi): + # Callback runs on val end during training + vals = list(mloss) + list(results) + lr + keys = ['train/box_loss', 'train/obj_loss', 'train/cls_loss', # train loss + 'metrics/precision', 'metrics/recall', 'metrics/mAP_0.5', 'metrics/mAP_0.5:0.95', # metrics 'val/box_loss', 'val/obj_loss', 'val/cls_loss', # val loss 'x/lr0', 'x/lr1', 'x/lr2'] # params - if self.txt: - with open(self.results_file, 'a') as f: - f.write(s + '%10.4g' * 7 % results + '\n') # append metrics, val_loss + x = {k: v for k, v in zip(keys, vals)} # dict + + if self.csv: + file = self.save_dir / 'results.csv' + n = len(x) + 1 # number of cols + s = '' if file.exists() else (('%20s,' * n % tuple(['epoch'] + keys)).rstrip(',') + '\n') # add header + with open(file, 'a') as f: + f.write(s + ('%20.5g,' * n % tuple([epoch] + vals)).rstrip(',') + '\n') + if self.tb: - for x, tag in zip(vals, tags): - self.tb.add_scalar(tag, x, epoch) # TensorBoard + for k, v in x.items(): + self.tb.add_scalar(k, v, epoch) # TensorBoard + if self.wandb: - self.wandb.log({k: v for k, v in zip(tags, vals)}) + self.wandb.log(x) self.wandb.end_epoch(best_result=best_fitness == fi) def on_model_save(self, last, epoch, final_epoch, best_fitness, fi): @@ -112,8 +123,10 @@ def on_model_save(self, last, epoch, final_epoch, best_fitness, fi): if ((epoch + 1) % self.opt.save_period == 0 and not final_epoch) and self.opt.save_period != -1: self.wandb.log_model(last.parent, self.opt, epoch, fi, best_model=best_fitness == fi) - def on_train_end(self, last, best): + def on_train_end(self, last, best, plots): # Callback runs on training end + if plots: + plot_results(dir=self.save_dir) # save results.png files = ['results.png', 'confusion_matrix.png', *[f'{x}_curve.png' for x in ('F1', 'PR', 'P', 'R')]] files = [(self.save_dir / f) for f in files if (self.save_dir / f).exists()] # filter if self.wandb: diff --git a/utils/loss.py b/utils/loss.py index 22061a11ff27..79e8f24359c1 100644 --- a/utils/loss.py +++ b/utils/loss.py @@ -162,8 +162,7 @@ def __call__(self, p, targets): # predictions, targets, model lcls *= self.hyp['cls'] bs = tobj.shape[0] # batch size - loss = lbox + lobj + lcls - return loss * bs, torch.cat((lbox, lobj, lcls, loss)).detach() + return (lbox + lobj + lcls) * bs, torch.cat((lbox, lobj, lcls)).detach() def build_targets(self, p, targets): # Build targets for compute_loss(), input targets(image,class,x,y,w,h) diff --git a/utils/plots.py b/utils/plots.py index f9fd35fce751..e13e316314dd 100644 --- a/utils/plots.py +++ b/utils/plots.py @@ -1,7 +1,5 @@ # Plotting utils -import glob -import os from copy import copy from pathlib import Path @@ -387,63 +385,29 @@ def profile_idetection(start=0, stop=0, labels=(), save_dir=''): plt.savefig(Path(save_dir) / 'idetection_profile.png', dpi=200) -def plot_results_overlay(start=0, stop=0): # from utils.plots import *; plot_results_overlay() - # Plot training 'results*.txt', overlaying train and val losses - s = ['train', 'train', 'train', 'Precision', 'mAP@0.5', 'val', 'val', 'val', 'Recall', 'mAP@0.5:0.95'] # legends - t = ['Box', 'Objectness', 'Classification', 'P-R', 'mAP-F1'] # titles - for f in sorted(glob.glob('results*.txt') + glob.glob('../../Downloads/results*.txt')): - results = np.loadtxt(f, usecols=[2, 3, 4, 8, 9, 12, 13, 14, 10, 11], ndmin=2).T - n = results.shape[1] # number of rows - x = range(start, min(stop, n) if stop else n) - fig, ax = plt.subplots(1, 5, figsize=(14, 3.5), tight_layout=True) - ax = ax.ravel() - for i in range(5): - for j in [i, i + 5]: - y = results[j, x] - ax[i].plot(x, y, marker='.', label=s[j]) - # y_smooth = butter_lowpass_filtfilt(y) - # ax[i].plot(x, np.gradient(y_smooth), marker='.', label=s[j]) - - ax[i].set_title(t[i]) - ax[i].legend() - ax[i].set_ylabel(f) if i == 0 else None # add filename - fig.savefig(f.replace('.txt', '.png'), dpi=200) - - -def plot_results(start=0, stop=0, bucket='', id=(), labels=(), save_dir=''): - # Plot training 'results*.txt'. from utils.plots import *; plot_results(save_dir='runs/train/exp') +def plot_results(file='', dir=''): + # Plot training results.csv. Usage: from utils.plots import *; plot_results('path/to/results.csv') + save_dir = Path(file).parent if file else Path(dir) fig, ax = plt.subplots(2, 5, figsize=(12, 6), tight_layout=True) ax = ax.ravel() - s = ['Box', 'Objectness', 'Classification', 'Precision', 'Recall', - 'val Box', 'val Objectness', 'val Classification', 'mAP@0.5', 'mAP@0.5:0.95'] - if bucket: - # files = ['https://storage.googleapis.com/%s/results%g.txt' % (bucket, x) for x in id] - files = ['results%g.txt' % x for x in id] - c = ('gsutil cp ' + '%s ' * len(files) + '.') % tuple('gs://%s/results%g.txt' % (bucket, x) for x in id) - os.system(c) - else: - files = list(Path(save_dir).glob('results*.txt')) - assert len(files), 'No results.txt files found in %s, nothing to plot.' % os.path.abspath(save_dir) + files = list(save_dir.glob('results*.csv')) + assert len(files), f'No results.csv files found in {save_dir.resolve()}, nothing to plot.' for fi, f in enumerate(files): try: - results = np.loadtxt(f, usecols=[2, 3, 4, 8, 9, 12, 13, 14, 10, 11], ndmin=2).T - n = results.shape[1] # number of rows - x = range(start, min(stop, n) if stop else n) - for i in range(10): - y = results[i, x] - if i in [0, 1, 2, 5, 6, 7]: - y[y == 0] = np.nan # don't show zero loss values - # y /= y[0] # normalize - label = labels[fi] if len(labels) else f.stem - ax[i].plot(x, y, marker='.', label=label, linewidth=2, markersize=8) - ax[i].set_title(s[i]) - # if i in [5, 6, 7]: # share train and val loss y axes + data = pd.read_csv(f) + s = [x.strip() for x in data.columns] + x = data.values[:, 0] + for i, j in enumerate([1, 2, 3, 4, 5, 8, 9, 10, 6, 7]): + y = data.values[:, j] + # y[y == 0] = np.nan # don't show zero values + ax[i].plot(x, y, marker='.', label=f.stem, linewidth=2, markersize=8) + ax[i].set_title(s[j], fontsize=12) + # if j in [8, 9, 10]: # share train and val loss y axes # ax[i].get_shared_y_axes().join(ax[i], ax[i - 5]) except Exception as e: - print('Warning: Plotting error for %s; %s' % (f, e)) - + print(f'Warning: Plotting error for {f}: {e}') ax[1].legend() - fig.savefig(Path(save_dir) / 'results.png', dpi=200) + fig.savefig(save_dir / 'results.png', dpi=200) def feature_visualization(x, module_type, stage, n=32, save_dir=Path('runs/detect/exp')): diff --git a/val.py b/val.py index 2b088dcdf210..f20877e8aa0b 100644 --- a/val.py +++ b/val.py @@ -171,7 +171,7 @@ def run(data, # Compute loss if compute_loss: - loss += compute_loss([x.float() for x in train_out], targets)[1][:3] # box, obj, cls + loss += compute_loss([x.float() for x in train_out], targets)[1] # box, obj, cls # Run NMS targets[:, 2:] *= torch.Tensor([width, height, width, height]).to(device) # to pixels From 8acb5734c7f0d1b7baf62b5c5dab6107a37896c6 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 26 Jul 2021 00:57:09 +0200 Subject: [PATCH 0434/1976] Created using Colaboratory --- tutorial.ipynb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tutorial.ipynb b/tutorial.ipynb index 88adc08c0ef1..831735cc0830 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -1036,7 +1036,7 @@ "source": [ "## Local Logging\n", "\n", - "All results are logged by default to `runs/train`, with a new experiment directory created for each new training as `runs/train/exp2`, `runs/train/exp3`, etc. View train and val jpgs to see mosaics, labels, predictions and augmentation effects. Note an Ultralytics **Mosaic Dataloader** is used for training (shown below), which combined each original image with 3 additional random training images.\n", + "All results are logged by default to `runs/train`, with a new experiment directory created for each new training as `runs/train/exp2`, `runs/train/exp3`, etc. View train and val jpgs to see mosaics, labels, predictions and augmentation effects. Note an Ultralytics **Mosaic Dataloader** is used for training (shown below), which combines 4 images into 1 mosaic during training.\n", "\n", "> \n", "`train_batch0.jpg` shows train batch 0 mosaics and labels\n", From f8e11483df9055a30843162a33a185c5f4b47ab3 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 26 Jul 2021 14:23:43 +0200 Subject: [PATCH 0435/1976] Update dataset headers (#4162) --- data/Argoverse_HD.yaml | 11 ++++++----- data/GlobalWheat2020.yaml | 11 ++++++----- data/Objects365.yaml | 11 ++++++----- data/SKU-110K.yaml | 11 ++++++----- data/VOC.yaml | 13 +++++++------ data/VisDrone.yaml | 11 ++++++----- data/coco.yaml | 11 ++++++----- data/coco128.yaml | 13 +++++++------ data/xView.yaml | 13 +++++++------ 9 files changed, 57 insertions(+), 48 deletions(-) diff --git a/data/Argoverse_HD.yaml b/data/Argoverse_HD.yaml index ad1a52254d74..90721cc0b9fb 100644 --- a/data/Argoverse_HD.yaml +++ b/data/Argoverse_HD.yaml @@ -1,9 +1,10 @@ +# Copyright Ultralytics https://ultralytics.com, licensed under GNU GPL v3.0 # Argoverse-HD dataset (ring-front-center camera) http://www.cs.cmu.edu/~mengtial/proj/streaming/ -# Train command: python train.py --data Argoverse_HD.yaml -# Default dataset location is next to YOLOv5: -# /parent -# /datasets/Argoverse -# /yolov5 +# YOLOv5 🚀 example usage: python train.py --data Argoverse_HD.yaml +# parent +# ├── yolov5 +# └── datasets +# └── Argoverse ← downloads here # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..] diff --git a/data/GlobalWheat2020.yaml b/data/GlobalWheat2020.yaml index b77534944ed7..58b55114c722 100644 --- a/data/GlobalWheat2020.yaml +++ b/data/GlobalWheat2020.yaml @@ -1,9 +1,10 @@ +# Copyright Ultralytics https://ultralytics.com, licensed under GNU GPL v3.0 # Global Wheat 2020 dataset http://www.global-wheat.com/ -# Train command: python train.py --data GlobalWheat2020.yaml -# Default dataset location is next to YOLOv5: -# /parent -# /datasets/GlobalWheat2020 -# /yolov5 +# YOLOv5 🚀 example usage: python train.py --data GlobalWheat2020.yaml +# parent +# ├── yolov5 +# └── datasets +# └── GlobalWheat2020 ← downloads here # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..] diff --git a/data/Objects365.yaml b/data/Objects365.yaml index e365c82cab08..e29803bc9e02 100644 --- a/data/Objects365.yaml +++ b/data/Objects365.yaml @@ -1,9 +1,10 @@ +# Copyright Ultralytics https://ultralytics.com, licensed under GNU GPL v3.0 # Objects365 dataset https://www.objects365.org/ -# Train command: python train.py --data Objects365.yaml -# Default dataset location is next to YOLOv5: -# /parent -# /datasets/Objects365 -# /yolov5 +# YOLOv5 🚀 example usage: python train.py --data Objects365.yaml +# parent +# ├── yolov5 +# └── datasets +# └── Objects365 ← downloads here # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..] diff --git a/data/SKU-110K.yaml b/data/SKU-110K.yaml index 7087bb9c2893..04d8e0819e7f 100644 --- a/data/SKU-110K.yaml +++ b/data/SKU-110K.yaml @@ -1,9 +1,10 @@ +# Copyright Ultralytics https://ultralytics.com, licensed under GNU GPL v3.0 # SKU-110K retail items dataset https://github.com/eg4000/SKU110K_CVPR19 -# Train command: python train.py --data SKU-110K.yaml -# Default dataset location is next to YOLOv5: -# /parent -# /datasets/SKU-110K -# /yolov5 +# YOLOv5 🚀 example usage: python train.py --data SKU-110K.yaml +# parent +# ├── yolov5 +# └── datasets +# └── SKU-110K ← downloads here # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..] diff --git a/data/VOC.yaml b/data/VOC.yaml index 3d878fa67a60..40df3d9ff001 100644 --- a/data/VOC.yaml +++ b/data/VOC.yaml @@ -1,9 +1,10 @@ -# PASCAL VOC dataset http://host.robots.ox.ac.uk/pascal/VOC/ -# Train command: python train.py --data VOC.yaml -# Default dataset location is next to YOLOv5: -# /parent -# /datasets/VOC -# /yolov5 +# Copyright Ultralytics https://ultralytics.com, licensed under GNU GPL v3.0 +# PASCAL VOC dataset http://host.robots.ox.ac.uk/pascal/VOC +# YOLOv5 🚀 example usage: python train.py --data VOC.yaml +# parent +# ├── yolov5 +# └── datasets +# └── VOC ← downloads here # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..] diff --git a/data/VisDrone.yaml b/data/VisDrone.yaml index c1cd38d1e10f..e7865c5b44f2 100644 --- a/data/VisDrone.yaml +++ b/data/VisDrone.yaml @@ -1,9 +1,10 @@ +# Copyright Ultralytics https://ultralytics.com, licensed under GNU GPL v3.0 # VisDrone2019-DET dataset https://github.com/VisDrone/VisDrone-Dataset -# Train command: python train.py --data VisDrone.yaml -# Default dataset location is next to YOLOv5: -# /parent -# /datasets/VisDrone -# /yolov5 +# YOLOv5 🚀 example usage: python train.py --data VisDrone.yaml +# parent +# ├── yolov5 +# └── datasets +# └── VisDrone ← downloads here # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..] diff --git a/data/coco.yaml b/data/coco.yaml index c6053c984bc0..699a761c6f54 100644 --- a/data/coco.yaml +++ b/data/coco.yaml @@ -1,9 +1,10 @@ +# Copyright Ultralytics https://ultralytics.com, licensed under GNU GPL v3.0 # COCO 2017 dataset http://cocodataset.org -# Train command: python train.py --data coco.yaml -# Default dataset location is next to YOLOv5: -# /parent -# /datasets/coco -# /yolov5 +# YOLOv5 🚀 example usage: python train.py --data coco.yaml +# parent +# ├── yolov5 +# └── datasets +# └── coco ← downloads here # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..] diff --git a/data/coco128.yaml b/data/coco128.yaml index e70ad687dd88..91e4bde66465 100644 --- a/data/coco128.yaml +++ b/data/coco128.yaml @@ -1,9 +1,10 @@ -# COCO 2017 dataset http://cocodataset.org - first 128 training images -# Train command: python train.py --data coco128.yaml -# Default dataset location is next to YOLOv5: -# /parent -# /datasets/coco128 -# /yolov5 +# Copyright Ultralytics https://ultralytics.com, licensed under GNU GPL v3.0 +# COCO128 dataset https://www.kaggle.com/ultralytics/coco128 (first 128 images from COCO train2017) +# YOLOv5 🚀 example usage: python train.py --data coco128.yaml +# parent +# ├── yolov5 +# └── datasets +# └── coco128 ← downloads here # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..] diff --git a/data/xView.yaml b/data/xView.yaml index 5212193a0bf0..0766f9dc8776 100644 --- a/data/xView.yaml +++ b/data/xView.yaml @@ -1,10 +1,11 @@ +# Copyright Ultralytics https://ultralytics.com, licensed under GNU GPL v3.0 # xView 2018 dataset https://challenge.xviewdataset.org -# ----> NOTE: DOWNLOAD DATA MANUALLY from URL above and unzip to /datasets/xView before running train command below -# Train command: python train.py --data xView.yaml -# Default dataset location is next to YOLOv5: -# /parent -# /datasets/xView -# /yolov5 +# -------- DOWNLOAD DATA MANUALLY from URL above and unzip to 'datasets/xView' before running train command! -------- +# YOLOv5 🚀 example usage: python train.py --data xView.yaml +# parent +# ├── yolov5 +# └── datasets +# └── xView ← downloads here # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..] From 0ad6301c9603130e020dfe52335dbb3e37210c19 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 26 Jul 2021 15:23:33 +0200 Subject: [PATCH 0436/1976] Update script headers (#4163) * Update download script headers * cleanup * bug fix attempt * bug fix attempt2 * bug fix attempt3 * cleanup --- data/scripts/download_weights.sh | 9 +++++++-- data/scripts/get_coco.sh | 14 +++++++------- data/scripts/get_coco128.sh | 16 ++++++++-------- train.py | 5 +++-- utils/autoanchor.py | 10 ++++------ utils/datasets.py | 2 +- utils/loggers/wandb/log_dataset.py | 4 ++-- utils/loggers/wandb/wandb_utils.py | 6 +++--- val.py | 2 +- 9 files changed, 36 insertions(+), 32 deletions(-) diff --git a/data/scripts/download_weights.sh b/data/scripts/download_weights.sh index 6a279f1636fc..5d74f0266815 100755 --- a/data/scripts/download_weights.sh +++ b/data/scripts/download_weights.sh @@ -1,7 +1,12 @@ #!/bin/bash +# Copyright Ultralytics https://ultralytics.com, licensed under GNU GPL v3.0 # Download latest models from https://github.com/ultralytics/yolov5/releases -# Usage: -# $ bash path/to/download_weights.sh +# YOLOv5 🚀 example usage: bash path/to/download_weights.sh +# parent +# └── yolov5 +# ├── yolov5s.pt ← downloads here +# ├── yolov5m.pt +# └── ... python - < Date: Tue, 27 Jul 2021 18:43:32 +0530 Subject: [PATCH 0437/1976] Improve docstrings and run names (#4174) --- utils/loggers/__init__.py | 2 +- utils/loggers/wandb/wandb_utils.py | 145 ++++++++++++++++++++++++++--- 2 files changed, 133 insertions(+), 14 deletions(-) diff --git a/utils/loggers/__init__.py b/utils/loggers/__init__.py index 29dd4605341b..e65c8f9fd085 100644 --- a/utils/loggers/__init__.py +++ b/utils/loggers/__init__.py @@ -57,7 +57,7 @@ def start(self): assert 'wandb' in self.include and wandb run_id = torch.load(self.weights).get('wandb_id') if self.opt.resume else None self.opt.hyp = self.hyp # add hyperparameters - self.wandb = WandbLogger(self.opt, s.stem, run_id, self.data_dict) + self.wandb = WandbLogger(self.opt, run_id, self.data_dict) except: self.wandb = None diff --git a/utils/loggers/wandb/wandb_utils.py b/utils/loggers/wandb/wandb_utils.py index 581041acbdb7..cd5939155169 100644 --- a/utils/loggers/wandb/wandb_utils.py +++ b/utils/loggers/wandb/wandb_utils.py @@ -99,7 +99,19 @@ class WandbLogger(): https://docs.wandb.com/guides/integrations/yolov5 """ - def __init__(self, opt, name, run_id, data_dict, job_type='Training'): + def __init__(self, opt, run_id, data_dict, job_type='Training'): + ''' + - Initialize WandbLogger instance + - Upload dataset if opt.upload_dataset is True + - Setup trainig processes if job_type is 'Training' + + arguments: + opt (namespace) -- Commandline arguments for this run + run_id (str) -- Run ID of W&B run to be resumed + data_dict (Dict) -- Dictionary conataining info about the dataset to be used + job_type (str) -- To set the job_type for this run + + ''' # Pre-training routine -- self.job_type = job_type self.wandb, self.wandb_run = wandb, None if not wandb else wandb.run @@ -129,7 +141,7 @@ def __init__(self, opt, name, run_id, data_dict, job_type='Training'): resume="allow", project='YOLOv5' if opt.project == 'runs/train' else Path(opt.project).stem, entity=opt.entity, - name=name, + name=opt.name if opt.name != 'exp' else None, job_type=job_type, id=run_id, allow_val_change=True) if not wandb.run else wandb.run @@ -145,6 +157,15 @@ def __init__(self, opt, name, run_id, data_dict, job_type='Training'): self.data_dict = self.check_and_upload_dataset(opt) def check_and_upload_dataset(self, opt): + ''' + Check if the dataset format is compatible and upload it as W&B artifact + + arguments: + opt (namespace)-- Commandline arguments for current run + + returns: + Updated dataset info dictionary where local dataset paths are replaced by WAND_ARFACT_PREFIX links. + ''' assert wandb, 'Install wandb to upload dataset' config_path = self.log_dataset_artifact(check_file(opt.data), opt.single_cls, @@ -155,6 +176,19 @@ def check_and_upload_dataset(self, opt): return wandb_data_dict def setup_training(self, opt, data_dict): + ''' + Setup the necessary processes for training YOLO models: + - Attempt to download model checkpoint and dataset artifacts if opt.resume stats with WANDB_ARTIFACT_PREFIX + - Update data_dict, to contain info of previous run if resumed and the paths of dataset artifact if downloaded + - Setup log_dict, initialize bbox_interval + + arguments: + opt (namespace) -- commandline arguments for this run + data_dict (Dict) -- Dataset dictionary for this run + + returns: + data_dict (Dict) -- contains the updated info about the dataset to be used for training + ''' self.log_dict, self.current_epoch = {}, 0 self.bbox_interval = opt.bbox_interval if isinstance(opt.resume, str): @@ -185,12 +219,22 @@ def setup_training(self, opt, data_dict): self.val_table = self.val_artifact.get("val") if self.val_table_path_map is None: self.map_val_table_path() - wandb.log({"validation dataset": self.val_table}) if opt.bbox_interval == -1: self.bbox_interval = opt.bbox_interval = (opt.epochs // 10) if opt.epochs > 10 else 1 return data_dict def download_dataset_artifact(self, path, alias): + ''' + download the model checkpoint artifact if the path starts with WANDB_ARTIFACT_PREFIX + + arguments: + path -- path of the dataset to be used for training + alias (str)-- alias of the artifact to be download/used for training + + returns: + (str, wandb.Artifact) -- path of the downladed dataset and it's corresponding artifact object if dataset + is found otherwise returns (None, None) + ''' if isinstance(path, str) and path.startswith(WANDB_ARTIFACT_PREFIX): artifact_path = Path(remove_prefix(path, WANDB_ARTIFACT_PREFIX) + ":" + alias) dataset_artifact = wandb.use_artifact(artifact_path.as_posix().replace("\\", "/")) @@ -200,6 +244,12 @@ def download_dataset_artifact(self, path, alias): return None, None def download_model_artifact(self, opt): + ''' + download the model checkpoint artifact if the resume path starts with WANDB_ARTIFACT_PREFIX + + arguments: + opt (namespace) -- Commandline arguments for this run + ''' if opt.resume.startswith(WANDB_ARTIFACT_PREFIX): model_artifact = wandb.use_artifact(remove_prefix(opt.resume, WANDB_ARTIFACT_PREFIX) + ":latest") assert model_artifact is not None, 'Error: W&B model artifact doesn\'t exist' @@ -212,6 +262,16 @@ def download_model_artifact(self, opt): return None, None def log_model(self, path, opt, epoch, fitness_score, best_model=False): + ''' + Log the model checkpoint as W&B artifact + + arguments: + path (Path) -- Path of directory containing the checkpoints + opt (namespace) -- Command line arguments for this run + epoch (int) -- Current epoch number + fitness_score (float) -- fitness score for current epoch + best_model (boolean) -- Boolean representing if the current checkpoint is the best yet. + ''' model_artifact = wandb.Artifact('run_' + wandb.run.id + '_model', type='model', metadata={ 'original_url': str(path), 'epochs_trained': epoch + 1, @@ -226,6 +286,19 @@ def log_model(self, path, opt, epoch, fitness_score, best_model=False): print("Saving model artifact on epoch ", epoch + 1) def log_dataset_artifact(self, data_file, single_cls, project, overwrite_config=False): + ''' + Log the dataset as W&B artifact and return the new data file with W&B links + + arguments: + data_file (str) -- the .yaml file with information about the dataset like - path, classes etc. + single_class (boolean) -- train multi-class data as single-class + project (str) -- project name. Used to construct the artifact path + overwrite_config (boolean) -- overwrites the data.yaml file if set to true otherwise creates a new + file with _wandb postfix. Eg -> data_wandb.yaml + + returns: + the new .yaml file with artifact links. it can be used to start training directly from artifacts + ''' with open(data_file, encoding='ascii', errors='ignore') as f: data = yaml.safe_load(f) # data dict check_dataset(data) @@ -257,12 +330,27 @@ def log_dataset_artifact(self, data_file, single_cls, project, overwrite_config= return path def map_val_table_path(self): + ''' + Map the validation dataset Table like name of file -> it's id in the W&B Table. + Useful for - referencing artifacts for evaluation. + ''' self.val_table_path_map = {} print("Mapping dataset") for i, data in enumerate(tqdm(self.val_table.data)): self.val_table_path_map[data[3]] = data[0] def create_dataset_table(self, dataset, class_to_id, name='dataset'): + ''' + Create and return W&B artifact containing W&B Table of the dataset. + + arguments: + dataset (LoadImagesAndLabels) -- instance of LoadImagesAndLabels class used to iterate over the data to build Table + class_to_id (dict(int, str)) -- hash map that maps class ids to labels + name (str) -- name of the artifact + + returns: + dataset artifact to be logged or used + ''' # TODO: Explore multiprocessing to slpit this loop parallely| This is essential for speeding up the the logging artifact = wandb.Artifact(name=name, type="dataset") img_files = tqdm([dataset.path]) if isinstance(dataset.path, str) and Path(dataset.path).is_dir() else None @@ -294,6 +382,14 @@ def create_dataset_table(self, dataset, class_to_id, name='dataset'): return artifact def log_training_progress(self, predn, path, names): + ''' + Build evaluation Table. Uses reference from validation dataset table. + + arguments: + predn (list): list of predictions in the native space in the format - [xmin, ymin, xmax, ymax, confidence, class] + path (str): local path of the current evaluation image + names (dict(int, str)): hash map that maps class ids to labels + ''' class_set = wandb.Classes([{'id': id, 'name': name} for id, name in names.items()]) box_data = [] total_conf = 0 @@ -316,25 +412,45 @@ def log_training_progress(self, predn, path, names): ) def val_one_image(self, pred, predn, path, names, im): + ''' + Log validation data for one image. updates the result Table if validation dataset is uploaded and log bbox media panel + + arguments: + pred (list): list of scaled predictions in the format - [xmin, ymin, xmax, ymax, confidence, class] + predn (list): list of predictions in the native space - [xmin, ymin, xmax, ymax, confidence, class] + path (str): local path of the current evaluation image + ''' if self.val_table and self.result_table: # Log Table if Val dataset is uploaded as artifact self.log_training_progress(predn, path, names) - else: # Default to bbox media panelif Val artifact not found - if len(self.bbox_media_panel_images) < self.max_imgs_to_log and self.current_epoch > 0: - if self.current_epoch % self.bbox_interval == 0: - box_data = [{"position": {"minX": xyxy[0], "minY": xyxy[1], "maxX": xyxy[2], "maxY": xyxy[3]}, - "class_id": int(cls), - "box_caption": "%s %.3f" % (names[cls], conf), - "scores": {"class_score": conf}, - "domain": "pixel"} for *xyxy, conf, cls in pred.tolist()] - boxes = {"predictions": {"box_data": box_data, "class_labels": names}} # inference-space - self.bbox_media_panel_images.append(wandb.Image(im, boxes=boxes, caption=path.name)) + + if len(self.bbox_media_panel_images) < self.max_imgs_to_log and self.current_epoch > 0: + if self.current_epoch % self.bbox_interval == 0: + box_data = [{"position": {"minX": xyxy[0], "minY": xyxy[1], "maxX": xyxy[2], "maxY": xyxy[3]}, + "class_id": int(cls), + "box_caption": "%s %.3f" % (names[cls], conf), + "scores": {"class_score": conf}, + "domain": "pixel"} for *xyxy, conf, cls in pred.tolist()] + boxes = {"predictions": {"box_data": box_data, "class_labels": names}} # inference-space + self.bbox_media_panel_images.append(wandb.Image(im, boxes=boxes, caption=path.name)) def log(self, log_dict): + ''' + save the metrics to the logging dictionary + + arguments: + log_dict (Dict) -- metrics/media to be logged in current step + ''' if self.wandb_run: for key, value in log_dict.items(): self.log_dict[key] = value def end_epoch(self, best_result=False): + ''' + commit the log_dict, model artifacts and Tables to W&B and flush the log_dict. + + arguments: + best_result (boolean): Boolean representing if the result of this evaluation is best or not + ''' if self.wandb_run: with all_logging_disabled(): if self.bbox_media_panel_images: @@ -352,6 +468,9 @@ def end_epoch(self, best_result=False): self.result_artifact = wandb.Artifact("run_" + wandb.run.id + "_progress", "evaluation") def finish_run(self): + ''' + Log metrics if any and finish the current W&B run + ''' if self.wandb_run: if self.log_dict: with all_logging_disabled(): From 3fef11706c384a9b73e6098b006a57bbee7643c7 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 27 Jul 2021 23:23:41 +0200 Subject: [PATCH 0438/1976] Update comments header (#4184) --- data/Argoverse_HD.yaml | 4 ++-- data/GlobalWheat2020.yaml | 4 ++-- data/Objects365.yaml | 4 ++-- data/SKU-110K.yaml | 4 ++-- data/VOC.yaml | 4 ++-- data/VisDrone.yaml | 4 ++-- data/coco.yaml | 4 ++-- data/coco128.yaml | 4 ++-- data/scripts/download_weights.sh | 4 ++-- data/scripts/get_coco.sh | 4 ++-- data/scripts/get_coco128.sh | 4 ++-- data/xView.yaml | 4 ++-- 12 files changed, 24 insertions(+), 24 deletions(-) diff --git a/data/Argoverse_HD.yaml b/data/Argoverse_HD.yaml index 90721cc0b9fb..e379b1ec99df 100644 --- a/data/Argoverse_HD.yaml +++ b/data/Argoverse_HD.yaml @@ -1,6 +1,6 @@ -# Copyright Ultralytics https://ultralytics.com, licensed under GNU GPL v3.0 +# YOLOv5 🚀 by Ultralytics https://ultralytics.com, licensed under GNU GPL v3.0 # Argoverse-HD dataset (ring-front-center camera) http://www.cs.cmu.edu/~mengtial/proj/streaming/ -# YOLOv5 🚀 example usage: python train.py --data Argoverse_HD.yaml +# Example usage: python train.py --data Argoverse_HD.yaml # parent # ├── yolov5 # └── datasets diff --git a/data/GlobalWheat2020.yaml b/data/GlobalWheat2020.yaml index 58b55114c722..842456047953 100644 --- a/data/GlobalWheat2020.yaml +++ b/data/GlobalWheat2020.yaml @@ -1,6 +1,6 @@ -# Copyright Ultralytics https://ultralytics.com, licensed under GNU GPL v3.0 +# YOLOv5 🚀 by Ultralytics https://ultralytics.com, licensed under GNU GPL v3.0 # Global Wheat 2020 dataset http://www.global-wheat.com/ -# YOLOv5 🚀 example usage: python train.py --data GlobalWheat2020.yaml +# Example usage: python train.py --data GlobalWheat2020.yaml # parent # ├── yolov5 # └── datasets diff --git a/data/Objects365.yaml b/data/Objects365.yaml index e29803bc9e02..52577581d7bb 100644 --- a/data/Objects365.yaml +++ b/data/Objects365.yaml @@ -1,6 +1,6 @@ -# Copyright Ultralytics https://ultralytics.com, licensed under GNU GPL v3.0 +# YOLOv5 🚀 by Ultralytics https://ultralytics.com, licensed under GNU GPL v3.0 # Objects365 dataset https://www.objects365.org/ -# YOLOv5 🚀 example usage: python train.py --data Objects365.yaml +# Example usage: python train.py --data Objects365.yaml # parent # ├── yolov5 # └── datasets diff --git a/data/SKU-110K.yaml b/data/SKU-110K.yaml index 04d8e0819e7f..01bf36c0d870 100644 --- a/data/SKU-110K.yaml +++ b/data/SKU-110K.yaml @@ -1,6 +1,6 @@ -# Copyright Ultralytics https://ultralytics.com, licensed under GNU GPL v3.0 +# YOLOv5 🚀 by Ultralytics https://ultralytics.com, licensed under GNU GPL v3.0 # SKU-110K retail items dataset https://github.com/eg4000/SKU110K_CVPR19 -# YOLOv5 🚀 example usage: python train.py --data SKU-110K.yaml +# Example usage: python train.py --data SKU-110K.yaml # parent # ├── yolov5 # └── datasets diff --git a/data/VOC.yaml b/data/VOC.yaml index 40df3d9ff001..55f39d852d31 100644 --- a/data/VOC.yaml +++ b/data/VOC.yaml @@ -1,6 +1,6 @@ -# Copyright Ultralytics https://ultralytics.com, licensed under GNU GPL v3.0 +# YOLOv5 🚀 by Ultralytics https://ultralytics.com, licensed under GNU GPL v3.0 # PASCAL VOC dataset http://host.robots.ox.ac.uk/pascal/VOC -# YOLOv5 🚀 example usage: python train.py --data VOC.yaml +# Example usage: python train.py --data VOC.yaml # parent # ├── yolov5 # └── datasets diff --git a/data/VisDrone.yaml b/data/VisDrone.yaml index e7865c5b44f2..12e0e7c4a009 100644 --- a/data/VisDrone.yaml +++ b/data/VisDrone.yaml @@ -1,6 +1,6 @@ -# Copyright Ultralytics https://ultralytics.com, licensed under GNU GPL v3.0 +# YOLOv5 🚀 by Ultralytics https://ultralytics.com, licensed under GNU GPL v3.0 # VisDrone2019-DET dataset https://github.com/VisDrone/VisDrone-Dataset -# YOLOv5 🚀 example usage: python train.py --data VisDrone.yaml +# Example usage: python train.py --data VisDrone.yaml # parent # ├── yolov5 # └── datasets diff --git a/data/coco.yaml b/data/coco.yaml index 699a761c6f54..cab1a0171963 100644 --- a/data/coco.yaml +++ b/data/coco.yaml @@ -1,6 +1,6 @@ -# Copyright Ultralytics https://ultralytics.com, licensed under GNU GPL v3.0 +# YOLOv5 🚀 by Ultralytics https://ultralytics.com, licensed under GNU GPL v3.0 # COCO 2017 dataset http://cocodataset.org -# YOLOv5 🚀 example usage: python train.py --data coco.yaml +# Example usage: python train.py --data coco.yaml # parent # ├── yolov5 # └── datasets diff --git a/data/coco128.yaml b/data/coco128.yaml index 91e4bde66465..6902eb9397a1 100644 --- a/data/coco128.yaml +++ b/data/coco128.yaml @@ -1,6 +1,6 @@ -# Copyright Ultralytics https://ultralytics.com, licensed under GNU GPL v3.0 +# YOLOv5 🚀 by Ultralytics https://ultralytics.com, licensed under GNU GPL v3.0 # COCO128 dataset https://www.kaggle.com/ultralytics/coco128 (first 128 images from COCO train2017) -# YOLOv5 🚀 example usage: python train.py --data coco128.yaml +# Example usage: python train.py --data coco128.yaml # parent # ├── yolov5 # └── datasets diff --git a/data/scripts/download_weights.sh b/data/scripts/download_weights.sh index 5d74f0266815..013036978c07 100755 --- a/data/scripts/download_weights.sh +++ b/data/scripts/download_weights.sh @@ -1,7 +1,7 @@ #!/bin/bash -# Copyright Ultralytics https://ultralytics.com, licensed under GNU GPL v3.0 +# YOLOv5 🚀 by Ultralytics https://ultralytics.com, licensed under GNU GPL v3.0 # Download latest models from https://github.com/ultralytics/yolov5/releases -# YOLOv5 🚀 example usage: bash path/to/download_weights.sh +# Example usage: bash path/to/download_weights.sh # parent # └── yolov5 # ├── yolov5s.pt ← downloads here diff --git a/data/scripts/get_coco.sh b/data/scripts/get_coco.sh index b3f838f533ab..1f484beee34c 100755 --- a/data/scripts/get_coco.sh +++ b/data/scripts/get_coco.sh @@ -1,7 +1,7 @@ #!/bin/bash -# Copyright Ultralytics https://ultralytics.com, licensed under GNU GPL v3.0 +# YOLOv5 🚀 by Ultralytics https://ultralytics.com, licensed under GNU GPL v3.0 # Download COCO 2017 dataset http://cocodataset.org -# YOLOv5 🚀 example usage: bash data/scripts/get_coco.sh +# Example usage: bash data/scripts/get_coco.sh # parent # ├── yolov5 # └── datasets diff --git a/data/scripts/get_coco128.sh b/data/scripts/get_coco128.sh index 26bb8ad09e14..3d705890b56d 100644 --- a/data/scripts/get_coco128.sh +++ b/data/scripts/get_coco128.sh @@ -1,7 +1,7 @@ #!/bin/bash -# Copyright Ultralytics https://ultralytics.com, licensed under GNU GPL v3.0 +# YOLOv5 🚀 by Ultralytics https://ultralytics.com, licensed under GNU GPL v3.0 # Download COCO128 dataset https://www.kaggle.com/ultralytics/coco128 (first 128 images from COCO train2017) -# YOLOv5 🚀 example usage: bash data/scripts/get_coco128.sh +# Example usage: bash data/scripts/get_coco128.sh # parent # ├── yolov5 # └── datasets diff --git a/data/xView.yaml b/data/xView.yaml index 0766f9dc8776..f4f27bfbc8ec 100644 --- a/data/xView.yaml +++ b/data/xView.yaml @@ -1,7 +1,7 @@ -# Copyright Ultralytics https://ultralytics.com, licensed under GNU GPL v3.0 +# YOLOv5 🚀 by Ultralytics https://ultralytics.com, licensed under GNU GPL v3.0 # xView 2018 dataset https://challenge.xviewdataset.org # -------- DOWNLOAD DATA MANUALLY from URL above and unzip to 'datasets/xView' before running train command! -------- -# YOLOv5 🚀 example usage: python train.py --data xView.yaml +# Example usage: python train.py --data xView.yaml # parent # ├── yolov5 # └── datasets From 5d66e487236daf4cbf816704453d0cc4905ee463 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 28 Jul 2021 02:04:10 +0200 Subject: [PATCH 0439/1976] Train from `--data path/to/dataset.zip` feature (#4185) * Train from `--data path/to/dataset.zip` feature * Update dataset_stats() * cleanup * cleanup2 --- data/{Argoverse_HD.yaml => Argoverse.yaml} | 2 +- hubconf.py | 2 +- models/experimental.py | 2 +- train.py | 11 ++-- utils/datasets.py | 66 ++++++++++++++++------ utils/{google_utils.py => downloads.py} | 6 +- utils/general.py | 40 +++++++++---- utils/loggers/wandb/wandb_utils.py | 62 ++++++++++---------- val.py | 4 +- 9 files changed, 122 insertions(+), 73 deletions(-) rename data/{Argoverse_HD.yaml => Argoverse.yaml} (97%) rename utils/{google_utils.py => downloads.py} (98%) diff --git a/data/Argoverse_HD.yaml b/data/Argoverse.yaml similarity index 97% rename from data/Argoverse_HD.yaml rename to data/Argoverse.yaml index e379b1ec99df..c42624c5783f 100644 --- a/data/Argoverse_HD.yaml +++ b/data/Argoverse.yaml @@ -1,6 +1,6 @@ # YOLOv5 🚀 by Ultralytics https://ultralytics.com, licensed under GNU GPL v3.0 # Argoverse-HD dataset (ring-front-center camera) http://www.cs.cmu.edu/~mengtial/proj/streaming/ -# Example usage: python train.py --data Argoverse_HD.yaml +# Example usage: python train.py --data Argoverse.yaml # parent # ├── yolov5 # └── datasets diff --git a/hubconf.py b/hubconf.py index 55536c3a42f3..7ef512655ae2 100644 --- a/hubconf.py +++ b/hubconf.py @@ -27,7 +27,7 @@ def _create(name, pretrained=True, channels=3, classes=80, autoshape=True, verbo from models.yolo import Model, attempt_load from utils.general import check_requirements, set_logging - from utils.google_utils import attempt_download + from utils.downloads import attempt_download from utils.torch_utils import select_device file = Path(__file__).absolute() diff --git a/models/experimental.py b/models/experimental.py index 0d996d913b0c..276ca954b173 100644 --- a/models/experimental.py +++ b/models/experimental.py @@ -5,7 +5,7 @@ import torch.nn as nn from models.common import Conv, DWConv -from utils.google_utils import attempt_download +from utils.downloads import attempt_download class CrossConv(nn.Module): diff --git a/train.py b/train.py index bd1fa9c74328..020883ce98ba 100644 --- a/train.py +++ b/train.py @@ -35,7 +35,7 @@ from utils.general import labels_to_class_weights, increment_path, labels_to_image_weights, init_seeds, \ strip_optimizer, get_latest_run, check_dataset, check_file, check_git_status, check_img_size, \ check_requirements, print_mutation, set_logging, one_cycle, colorstr -from utils.google_utils import attempt_download +from utils.downloads import attempt_download from utils.loss import ComputeLoss from utils.plots import plot_labels, plot_evolution from utils.torch_utils import ModelEMA, select_device, intersect_dicts, torch_distributed_zero_first, de_parallel @@ -78,9 +78,9 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary plots = not evolve # create plots cuda = device.type != 'cpu' init_seeds(1 + RANK) - with open(data, encoding='ascii', errors='ignore') as f: - data_dict = yaml.safe_load(f) - + with torch_distributed_zero_first(RANK): + data_dict = check_dataset(data) # check + train_path, val_path = data_dict['train'], data_dict['val'] nc = 1 if single_cls else int(data_dict['nc']) # number of classes names = ['item'] if single_cls and len(data_dict['names']) != 1 else data_dict['names'] # class names assert len(names) == nc, f'{len(names)} names found for nc={nc} dataset in {data}' # check @@ -106,9 +106,6 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary LOGGER.info(f'Transferred {len(csd)}/{len(model.state_dict())} items from {weights}') # report else: model = Model(cfg, ch=3, nc=nc, anchors=hyp.get('anchors')).to(device) # create - with torch_distributed_zero_first(RANK): - check_dataset(data_dict) # check - train_path, val_path = data_dict['train'], data_dict['val'] # Freeze freeze = [] # parameter names to freeze (full or partial) diff --git a/utils/datasets.py b/utils/datasets.py index 5b5ded4bbc41..fffe39a61459 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -884,11 +884,11 @@ def verify_image_label(args): return [None, None, None, None, nm, nf, ne, nc, msg] -def dataset_stats(path='coco128.yaml', autodownload=False, verbose=False): +def dataset_stats(path='coco128.yaml', autodownload=False, verbose=False, profile=False, hub=False): """ Return dataset statistics dictionary with images and instances counts per split per class - Usage1: from utils.datasets import *; dataset_stats('coco128.yaml', verbose=True) - Usage2: from utils.datasets import *; dataset_stats('../datasets/coco128.zip', verbose=True) - + To run in parent directory: export PYTHONPATH="$PWD/yolov5" + Usage1: from utils.datasets import *; dataset_stats('coco128.yaml', autodownload=True) + Usage2: from utils.datasets import *; dataset_stats('../datasets/coco128_with_yaml.zip') Arguments path: Path to data.yaml or data.zip (with data.yaml inside data.zip) autodownload: Attempt to download dataset if not found locally @@ -897,35 +897,42 @@ def dataset_stats(path='coco128.yaml', autodownload=False, verbose=False): def round_labels(labels): # Update labels to integer class and 6 decimal place floats - return [[int(c), *[round(x, 6) for x in points]] for c, *points in labels] + return [[int(c), *[round(x, 4) for x in points]] for c, *points in labels] def unzip(path): # Unzip data.zip TODO: CONSTRAINT: path/to/abc.zip MUST unzip to 'path/to/abc/' if str(path).endswith('.zip'): # path is data.zip + assert Path(path).is_file(), f'Error unzipping {path}, file not found' assert os.system(f'unzip -q {path} -d {path.parent}') == 0, f'Error unzipping {path}' - data_dir = path.with_suffix('') # dataset directory - return True, data_dir, list(data_dir.rglob('*.yaml'))[0] # zipped, data_dir, yaml_path + dir = path.with_suffix('') # dataset directory + return True, str(dir), next(dir.rglob('*.yaml')) # zipped, data_dir, yaml_path else: # path is data.yaml return False, None, path + def hub_ops(f, max_dim=1920): + # HUB ops for 1 image 'f' + im = Image.open(f) + r = max_dim / max(im.height, im.width) # ratio + if r < 1.0: # image too large + im = im.resize((int(im.width * r), int(im.height * r))) + im.save(im_dir / Path(f).name, quality=75) # save + zipped, data_dir, yaml_path = unzip(Path(path)) with open(check_file(yaml_path), encoding='ascii', errors='ignore') as f: data = yaml.safe_load(f) # data dict if zipped: data['path'] = data_dir # TODO: should this be dir.resolve()? check_dataset(data, autodownload) # download dataset if missing - nc = data['nc'] # number of classes - stats = {'nc': nc, 'names': data['names']} # statistics dictionary + hub_dir = Path(data['path'] + ('-hub' if hub else '')) + stats = {'nc': data['nc'], 'names': data['names']} # statistics dictionary for split in 'train', 'val', 'test': if data.get(split) is None: stats[split] = None # i.e. no test set continue x = [] - dataset = LoadImagesAndLabels(data[split], augment=False, rect=True) # load dataset - if split == 'train': - cache_path = Path(dataset.label_files[0]).parent.with_suffix('.cache') # *.cache path + dataset = LoadImagesAndLabels(data[split]) # load dataset for label in tqdm(dataset.labels, total=dataset.n, desc='Statistics'): - x.append(np.bincount(label[:, 0].astype(int), minlength=nc)) + x.append(np.bincount(label[:, 0].astype(int), minlength=data['nc'])) x = np.array(x) # shape(128x80) stats[split] = {'instance_stats': {'total': int(x.sum()), 'per_class': x.sum(0).tolist()}, 'image_stats': {'total': dataset.n, 'unlabelled': int(np.all(x == 0, 1).sum()), @@ -933,10 +940,37 @@ def unzip(path): 'labels': [{str(Path(k).name): round_labels(v.tolist())} for k, v in zip(dataset.img_files, dataset.labels)]} + if hub: + im_dir = hub_dir / 'images' + im_dir.mkdir(parents=True, exist_ok=True) + for _ in tqdm(ThreadPool(NUM_THREADS).imap(hub_ops, dataset.img_files), total=dataset.n, desc='HUB Ops'): + pass + + # Profile + stats_path = hub_dir / 'stats.json' + if profile: + for _ in range(1): + file = stats_path.with_suffix('.npy') + t1 = time.time() + np.save(file, stats) + t2 = time.time() + x = np.load(file, allow_pickle=True) + print(f'stats.npy times: {time.time() - t2:.3f}s read, {t2 - t1:.3f}s write') + + file = stats_path.with_suffix('.json') + t1 = time.time() + with open(file, 'w') as f: + json.dump(stats, f) # save stats *.json + t2 = time.time() + with open(file, 'r') as f: + x = json.load(f) # load hyps dict + print(f'stats.json times: {time.time() - t2:.3f}s read, {t2 - t1:.3f}s write') + # Save, print and return - with open(cache_path.with_suffix('.json'), 'w') as f: - json.dump(stats, f) # save stats *.json + if hub: + print(f'Saving {stats_path.resolve()}...') + with open(stats_path, 'w') as f: + json.dump(stats, f) # save stats.json if verbose: print(json.dumps(stats, indent=2, sort_keys=False)) - # print(yaml.dump([stats], sort_keys=False, default_flow_style=False)) return stats diff --git a/utils/google_utils.py b/utils/downloads.py similarity index 98% rename from utils/google_utils.py rename to utils/downloads.py index aefc7de2db2e..00156962380b 100644 --- a/utils/google_utils.py +++ b/utils/downloads.py @@ -1,4 +1,4 @@ -# Google utils: https://cloud.google.com/storage/docs/reference/libraries +# Download utils import os import platform @@ -115,6 +115,10 @@ def get_token(cookie="./cookie"): return line.split()[-1] return "" + +# Google utils: https://cloud.google.com/storage/docs/reference/libraries ---------------------------------------------- +# +# # def upload_blob(bucket_name, source_file_name, destination_blob_name): # # Uploads a file to a bucket # # https://cloud.google.com/storage/docs/uploading-objects#storage-upload-object-python diff --git a/utils/general.py b/utils/general.py index db81f7679cd7..6b00ddf2ff72 100755 --- a/utils/general.py +++ b/utils/general.py @@ -24,7 +24,7 @@ import torchvision import yaml -from utils.google_utils import gsutil_getsize +from utils.downloads import gsutil_getsize from utils.metrics import box_iou, fitness from utils.torch_utils import init_torch_seeds @@ -224,16 +224,30 @@ def check_file(file): def check_dataset(data, autodownload=True): - # Download dataset if not found locally - path = Path(data.get('path', '')) # optional 'path' field - if path: - for k in 'train', 'val', 'test': - if data.get(k): # prepend path - data[k] = str(path / data[k]) if isinstance(data[k], str) else [str(path / x) for x in data[k]] + # Download and/or unzip dataset if not found locally + # Usage: https://github.com/ultralytics/yolov5/releases/download/v1.0/coco128_with_yaml.zip + + # Download (optional) + extract_dir = '' + if isinstance(data, (str, Path)) and str(data).endswith('.zip'): # i.e. gs://bucket/dir/coco128.zip + download(data, dir='../datasets', unzip=True, delete=False, curl=False, threads=1) + data = next((Path('../datasets') / Path(data).stem).rglob('*.yaml')) + extract_dir, autodownload = data.parent, False + + # Read yaml (optional) + if isinstance(data, (str, Path)): + with open(data, encoding='ascii', errors='ignore') as f: + data = yaml.safe_load(f) # dictionary + + # Parse yaml + path = extract_dir or Path(data.get('path') or '') # optional 'path' default to '.' + for k in 'train', 'val', 'test': + if data.get(k): # prepend path + data[k] = str(path / data[k]) if isinstance(data[k], str) else [str(path / x) for x in data[k]] assert 'nc' in data, "Dataset 'nc' key missing." if 'names' not in data: - data['names'] = [str(i) for i in range(data['nc'])] # assign class names if missing + data['names'] = [f'class{i}' for i in range(data['nc'])] # assign class names if missing train, val, test, s = [data.get(x) for x in ('train', 'val', 'test', 'download')] if val: val = [Path(x).resolve() for x in (val if isinstance(val, list) else [val])] # val path @@ -256,13 +270,17 @@ def check_dataset(data, autodownload=True): else: raise Exception('Dataset not found.') + return data # dictionary + def download(url, dir='.', unzip=True, delete=True, curl=False, threads=1): - # Multi-threaded file download and unzip function + # Multi-threaded file download and unzip function, used in data.yaml for autodownload def download_one(url, dir): # Download 1 file f = dir / Path(url).name # filename - if not f.exists(): + if Path(url).is_file(): # exists in current path + Path(url).rename(f) # move to dir + elif not f.exists(): print(f'Downloading {url} to {f}...') if curl: os.system(f"curl -L '{url}' -o '{f}' --retry 9 -C -") # curl download, retry and resume on fail @@ -286,7 +304,7 @@ def download_one(url, dir): pool.close() pool.join() else: - for u in tuple(url) if isinstance(url, str) else url: + for u in [url] if isinstance(url, (str, Path)) else url: download_one(u, dir) diff --git a/utils/loggers/wandb/wandb_utils.py b/utils/loggers/wandb/wandb_utils.py index cd5939155169..f4f228df4e24 100644 --- a/utils/loggers/wandb/wandb_utils.py +++ b/utils/loggers/wandb/wandb_utils.py @@ -100,7 +100,7 @@ class WandbLogger(): """ def __init__(self, opt, run_id, data_dict, job_type='Training'): - ''' + """ - Initialize WandbLogger instance - Upload dataset if opt.upload_dataset is True - Setup trainig processes if job_type is 'Training' @@ -111,7 +111,7 @@ def __init__(self, opt, run_id, data_dict, job_type='Training'): data_dict (Dict) -- Dictionary conataining info about the dataset to be used job_type (str) -- To set the job_type for this run - ''' + """ # Pre-training routine -- self.job_type = job_type self.wandb, self.wandb_run = wandb, None if not wandb else wandb.run @@ -157,7 +157,7 @@ def __init__(self, opt, run_id, data_dict, job_type='Training'): self.data_dict = self.check_and_upload_dataset(opt) def check_and_upload_dataset(self, opt): - ''' + """ Check if the dataset format is compatible and upload it as W&B artifact arguments: @@ -165,7 +165,7 @@ def check_and_upload_dataset(self, opt): returns: Updated dataset info dictionary where local dataset paths are replaced by WAND_ARFACT_PREFIX links. - ''' + """ assert wandb, 'Install wandb to upload dataset' config_path = self.log_dataset_artifact(check_file(opt.data), opt.single_cls, @@ -176,7 +176,7 @@ def check_and_upload_dataset(self, opt): return wandb_data_dict def setup_training(self, opt, data_dict): - ''' + """ Setup the necessary processes for training YOLO models: - Attempt to download model checkpoint and dataset artifacts if opt.resume stats with WANDB_ARTIFACT_PREFIX - Update data_dict, to contain info of previous run if resumed and the paths of dataset artifact if downloaded @@ -188,7 +188,7 @@ def setup_training(self, opt, data_dict): returns: data_dict (Dict) -- contains the updated info about the dataset to be used for training - ''' + """ self.log_dict, self.current_epoch = {}, 0 self.bbox_interval = opt.bbox_interval if isinstance(opt.resume, str): @@ -224,7 +224,7 @@ def setup_training(self, opt, data_dict): return data_dict def download_dataset_artifact(self, path, alias): - ''' + """ download the model checkpoint artifact if the path starts with WANDB_ARTIFACT_PREFIX arguments: @@ -234,7 +234,7 @@ def download_dataset_artifact(self, path, alias): returns: (str, wandb.Artifact) -- path of the downladed dataset and it's corresponding artifact object if dataset is found otherwise returns (None, None) - ''' + """ if isinstance(path, str) and path.startswith(WANDB_ARTIFACT_PREFIX): artifact_path = Path(remove_prefix(path, WANDB_ARTIFACT_PREFIX) + ":" + alias) dataset_artifact = wandb.use_artifact(artifact_path.as_posix().replace("\\", "/")) @@ -244,12 +244,12 @@ def download_dataset_artifact(self, path, alias): return None, None def download_model_artifact(self, opt): - ''' + """ download the model checkpoint artifact if the resume path starts with WANDB_ARTIFACT_PREFIX arguments: opt (namespace) -- Commandline arguments for this run - ''' + """ if opt.resume.startswith(WANDB_ARTIFACT_PREFIX): model_artifact = wandb.use_artifact(remove_prefix(opt.resume, WANDB_ARTIFACT_PREFIX) + ":latest") assert model_artifact is not None, 'Error: W&B model artifact doesn\'t exist' @@ -262,7 +262,7 @@ def download_model_artifact(self, opt): return None, None def log_model(self, path, opt, epoch, fitness_score, best_model=False): - ''' + """ Log the model checkpoint as W&B artifact arguments: @@ -271,7 +271,7 @@ def log_model(self, path, opt, epoch, fitness_score, best_model=False): epoch (int) -- Current epoch number fitness_score (float) -- fitness score for current epoch best_model (boolean) -- Boolean representing if the current checkpoint is the best yet. - ''' + """ model_artifact = wandb.Artifact('run_' + wandb.run.id + '_model', type='model', metadata={ 'original_url': str(path), 'epochs_trained': epoch + 1, @@ -286,7 +286,7 @@ def log_model(self, path, opt, epoch, fitness_score, best_model=False): print("Saving model artifact on epoch ", epoch + 1) def log_dataset_artifact(self, data_file, single_cls, project, overwrite_config=False): - ''' + """ Log the dataset as W&B artifact and return the new data file with W&B links arguments: @@ -298,10 +298,8 @@ def log_dataset_artifact(self, data_file, single_cls, project, overwrite_config= returns: the new .yaml file with artifact links. it can be used to start training directly from artifacts - ''' - with open(data_file, encoding='ascii', errors='ignore') as f: - data = yaml.safe_load(f) # data dict - check_dataset(data) + """ + data = check_dataset(data_file) # parse and check nc, names = (1, ['item']) if single_cls else (int(data['nc']), data['names']) names = {k: v for k, v in enumerate(names)} # to index dictionary self.train_artifact = self.create_dataset_table(LoadImagesAndLabels( @@ -330,17 +328,17 @@ def log_dataset_artifact(self, data_file, single_cls, project, overwrite_config= return path def map_val_table_path(self): - ''' + """ Map the validation dataset Table like name of file -> it's id in the W&B Table. Useful for - referencing artifacts for evaluation. - ''' + """ self.val_table_path_map = {} print("Mapping dataset") for i, data in enumerate(tqdm(self.val_table.data)): self.val_table_path_map[data[3]] = data[0] def create_dataset_table(self, dataset, class_to_id, name='dataset'): - ''' + """ Create and return W&B artifact containing W&B Table of the dataset. arguments: @@ -350,7 +348,7 @@ def create_dataset_table(self, dataset, class_to_id, name='dataset'): returns: dataset artifact to be logged or used - ''' + """ # TODO: Explore multiprocessing to slpit this loop parallely| This is essential for speeding up the the logging artifact = wandb.Artifact(name=name, type="dataset") img_files = tqdm([dataset.path]) if isinstance(dataset.path, str) and Path(dataset.path).is_dir() else None @@ -382,14 +380,14 @@ def create_dataset_table(self, dataset, class_to_id, name='dataset'): return artifact def log_training_progress(self, predn, path, names): - ''' + """ Build evaluation Table. Uses reference from validation dataset table. arguments: predn (list): list of predictions in the native space in the format - [xmin, ymin, xmax, ymax, confidence, class] path (str): local path of the current evaluation image names (dict(int, str)): hash map that maps class ids to labels - ''' + """ class_set = wandb.Classes([{'id': id, 'name': name} for id, name in names.items()]) box_data = [] total_conf = 0 @@ -412,17 +410,17 @@ def log_training_progress(self, predn, path, names): ) def val_one_image(self, pred, predn, path, names, im): - ''' + """ Log validation data for one image. updates the result Table if validation dataset is uploaded and log bbox media panel arguments: pred (list): list of scaled predictions in the format - [xmin, ymin, xmax, ymax, confidence, class] predn (list): list of predictions in the native space - [xmin, ymin, xmax, ymax, confidence, class] path (str): local path of the current evaluation image - ''' + """ if self.val_table and self.result_table: # Log Table if Val dataset is uploaded as artifact self.log_training_progress(predn, path, names) - + if len(self.bbox_media_panel_images) < self.max_imgs_to_log and self.current_epoch > 0: if self.current_epoch % self.bbox_interval == 0: box_data = [{"position": {"minX": xyxy[0], "minY": xyxy[1], "maxX": xyxy[2], "maxY": xyxy[3]}, @@ -434,23 +432,23 @@ def val_one_image(self, pred, predn, path, names, im): self.bbox_media_panel_images.append(wandb.Image(im, boxes=boxes, caption=path.name)) def log(self, log_dict): - ''' + """ save the metrics to the logging dictionary arguments: log_dict (Dict) -- metrics/media to be logged in current step - ''' + """ if self.wandb_run: for key, value in log_dict.items(): self.log_dict[key] = value def end_epoch(self, best_result=False): - ''' + """ commit the log_dict, model artifacts and Tables to W&B and flush the log_dict. arguments: best_result (boolean): Boolean representing if the result of this evaluation is best or not - ''' + """ if self.wandb_run: with all_logging_disabled(): if self.bbox_media_panel_images: @@ -468,9 +466,9 @@ def end_epoch(self, best_result=False): self.result_artifact = wandb.Artifact("run_" + wandb.run.id + "_progress", "evaluation") def finish_run(self): - ''' + """ Log metrics if any and finish the current W&B run - ''' + """ if self.wandb_run: if self.log_dict: with all_logging_disabled(): diff --git a/val.py b/val.py index c58bcdb209c2..ee2287644b92 100644 --- a/val.py +++ b/val.py @@ -123,9 +123,7 @@ def run(data, # model = nn.DataParallel(model) # Data - with open(data, encoding='ascii', errors='ignore') as f: - data = yaml.safe_load(f) - check_dataset(data) # check + data = check_dataset(data) # check # Half half &= device.type != 'cpu' # half precision only supported on CUDA From 1f31b7c503867b6e8f493cf76ed2da490f834fd4 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 28 Jul 2021 15:04:30 +0200 Subject: [PATCH 0440/1976] Create yolov5-bifpn.yaml (#4195) --- models/hub/yolov5-bifpn.yaml | 40 ++++++++++++++++++++++++++++++++++++ 1 file changed, 40 insertions(+) create mode 100644 models/hub/yolov5-bifpn.yaml diff --git a/models/hub/yolov5-bifpn.yaml b/models/hub/yolov5-bifpn.yaml new file mode 100644 index 000000000000..f1dd7c601b9c --- /dev/null +++ b/models/hub/yolov5-bifpn.yaml @@ -0,0 +1,40 @@ +# Parameters +nc: 80 # number of classes +depth_multiple: 1.0 # model depth multiple +width_multiple: 1.0 # layer channel multiple +anchors: + - [ 10,13, 16,30, 33,23 ] # P3/8 + - [ 30,61, 62,45, 59,119 ] # P4/16 + - [ 116,90, 156,198, 373,326 ] # P5/32 + +# YOLOv5 backbone +backbone: + # [from, number, module, args] + [ [ -1, 1, Focus, [ 64, 3 ] ], # 0-P1/2 + [ -1, 1, Conv, [ 128, 3, 2 ] ], # 1-P2/4 + [ -1, 3, Bottleneck, [ 128 ] ], + [ -1, 1, Conv, [ 256, 3, 2 ] ], # 3-P3/8 + [ -1, 9, BottleneckCSP, [ 256 ] ], + [ -1, 1, Conv, [ 512, 3, 2 ] ], # 5-P4/16 + [ -1, 9, BottleneckCSP, [ 512 ] ], + [ -1, 1, Conv, [ 1024, 3, 2 ] ], # 7-P5/32 + [ -1, 1, SPP, [ 1024, [ 5, 9, 13 ] ] ], + [ -1, 6, BottleneckCSP, [ 1024 ] ], # 9 + ] + +# YOLOv5 BiFPN head +head: + [ [ -1, 3, BottleneckCSP, [ 1024, False ] ], # 10 (P5/32-large) + + [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], + [ [ -1, 20, 6 ], 1, Concat, [ 1 ] ], # cat P4 + [ -1, 1, Conv, [ 512, 1, 1 ] ], + [ -1, 3, BottleneckCSP, [ 512, False ] ], # 14 (P4/16-medium) + + [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], + [ [ -1, 4 ], 1, Concat, [ 1 ] ], # cat backbone P3 + [ -1, 1, Conv, [ 256, 1, 1 ] ], + [ -1, 3, BottleneckCSP, [ 256, False ] ], # 18 (P3/8-small) + + [ [ 18, 14, 10 ], 1, Detect, [ nc, anchors ] ], # Detect(P3, P4, P5) + ] From 2683b180795c134b7bcdcebd515fac8c0e9cc7a6 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 28 Jul 2021 16:55:39 +0200 Subject: [PATCH 0441/1976] Update Hub Path inputs (#4200) --- hubconf.py | 4 +++- models/common.py | 4 ++-- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/hubconf.py b/hubconf.py index 7ef512655ae2..93ea84d69dd3 100644 --- a/hubconf.py +++ b/hubconf.py @@ -115,9 +115,11 @@ def yolov5x6(pretrained=True, channels=3, classes=80, autoshape=True, verbose=Tr import cv2 import numpy as np from PIL import Image + from pathlib import Path imgs = ['data/images/zidane.jpg', # filename - 'https://github.com/ultralytics/yolov5/releases/download/v1.0/zidane.jpg', # URI + Path('data/images/zidane.jpg'), # Path + 'https://ultralytics.com/images/zidane.jpg', # URI cv2.imread('data/images/bus.jpg')[:, :, ::-1], # OpenCV Image.open('data/images/bus.jpg'), # PIL np.zeros((320, 640, 3))] # numpy diff --git a/models/common.py b/models/common.py index 901648b693a3..fc085e22b16b 100644 --- a/models/common.py +++ b/models/common.py @@ -2,7 +2,7 @@ import logging from copy import copy -from pathlib import Path, PosixPath +from pathlib import Path import math import numpy as np @@ -248,7 +248,7 @@ def forward(self, imgs, size=640, augment=False, profile=False): shape0, shape1, files = [], [], [] # image and inference shapes, filenames for i, im in enumerate(imgs): f = f'image{i}' # filename - if isinstance(im, (str, PosixPath)): # filename or uri + if isinstance(im, (str, Path)): # filename or uri im, f = Image.open(requests.get(im, stream=True).raw if str(im).startswith('http') else im), im im = np.asarray(exif_transpose(im)) elif isinstance(im, Image.Image): # PIL Image From e88e8f7a988662fb2d613e1aca3ae89214c84084 Mon Sep 17 00:00:00 2001 From: Ayush Chaurasia Date: Wed, 28 Jul 2021 21:10:08 +0530 Subject: [PATCH 0442/1976] W&B: Restructure code to support the new dataset_check() feature (#4197) * Improve docstrings and run names * default wandb login prompt with timeout * return key * Update api_key check logic * Properly support zipped dataset feature * update docstring * Revert tuorial change * extend changes to log_dataset * add run name * bug fix * bug fix * Update comment * fix import check * remove unused import * Hardcore .yaml file extension * reduce code * Reformat using pycharm Co-authored-by: Glenn Jocher --- README.md | 0 train.py | 17 ++++++---- utils/loggers/__init__.py | 13 +++----- utils/loggers/wandb/log_dataset.py | 6 ++-- utils/loggers/wandb/sweep.py | 3 +- utils/loggers/wandb/wandb_utils.py | 53 +++++++++++++++++++----------- 6 files changed, 52 insertions(+), 40 deletions(-) mode change 100755 => 100644 README.md diff --git a/README.md b/README.md old mode 100755 new mode 100644 diff --git a/train.py b/train.py index 020883ce98ba..7a8c15a6551a 100644 --- a/train.py +++ b/train.py @@ -73,24 +73,29 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary yaml.safe_dump(hyp, f, sort_keys=False) with open(save_dir / 'opt.yaml', 'w') as f: yaml.safe_dump(vars(opt), f, sort_keys=False) + data_dict = None + + # Loggers + if RANK in [-1, 0]: + loggers = Loggers(save_dir, weights, opt, hyp, LOGGER).start() # loggers dict + if loggers.wandb: + data_dict = loggers.wandb.data_dict + if resume: + weights, epochs, hyp = opt.weights, opt.epochs, opt.hyp + # Config plots = not evolve # create plots cuda = device.type != 'cpu' init_seeds(1 + RANK) with torch_distributed_zero_first(RANK): - data_dict = check_dataset(data) # check + data_dict = data_dict or check_dataset(data) # check if None train_path, val_path = data_dict['train'], data_dict['val'] nc = 1 if single_cls else int(data_dict['nc']) # number of classes names = ['item'] if single_cls and len(data_dict['names']) != 1 else data_dict['names'] # class names assert len(names) == nc, f'{len(names)} names found for nc={nc} dataset in {data}' # check is_coco = data.endswith('coco.yaml') and nc == 80 # COCO dataset - # Loggers - if RANK in [-1, 0]: - loggers = Loggers(save_dir, weights, opt, hyp, data_dict, LOGGER).start() # loggers dict - if loggers.wandb and resume: - weights, epochs, hyp, data_dict = opt.weights, opt.epochs, opt.hyp, loggers.wandb.data_dict # Model pretrained = weights.endswith('.pt') diff --git a/utils/loggers/__init__.py b/utils/loggers/__init__.py index e65c8f9fd085..027cef4d283a 100644 --- a/utils/loggers/__init__.py +++ b/utils/loggers/__init__.py @@ -1,9 +1,7 @@ # YOLOv5 experiment logging utils - +import torch import warnings from threading import Thread - -import torch from torch.utils.tensorboard import SummaryWriter from utils.general import colorstr, emojis @@ -23,12 +21,11 @@ class Loggers(): # YOLOv5 Loggers class - def __init__(self, save_dir=None, weights=None, opt=None, hyp=None, data_dict=None, logger=None, include=LOGGERS): + def __init__(self, save_dir=None, weights=None, opt=None, hyp=None, logger=None, include=LOGGERS): self.save_dir = save_dir self.weights = weights self.opt = opt self.hyp = hyp - self.data_dict = data_dict self.logger = logger # for printing results to console self.include = include for k in LOGGERS: @@ -38,9 +35,7 @@ def start(self): self.csv = True # always log to csv # Message - try: - import wandb - except ImportError: + if not wandb: prefix = colorstr('Weights & Biases: ') s = f"{prefix}run 'pip install wandb' to automatically track and visualize YOLOv5 🚀 runs (RECOMMENDED)" print(emojis(s)) @@ -57,7 +52,7 @@ def start(self): assert 'wandb' in self.include and wandb run_id = torch.load(self.weights).get('wandb_id') if self.opt.resume else None self.opt.hyp = self.hyp # add hyperparameters - self.wandb = WandbLogger(self.opt, run_id, self.data_dict) + self.wandb = WandbLogger(self.opt, run_id) except: self.wandb = None diff --git a/utils/loggers/wandb/log_dataset.py b/utils/loggers/wandb/log_dataset.py index b5663c92ee09..1328e20806ef 100644 --- a/utils/loggers/wandb/log_dataset.py +++ b/utils/loggers/wandb/log_dataset.py @@ -1,5 +1,4 @@ import argparse - import yaml from wandb_utils import WandbLogger @@ -8,9 +7,7 @@ def create_dataset_artifact(opt): - with open(opt.data, encoding='ascii', errors='ignore') as f: - data = yaml.safe_load(f) # data dict - logger = WandbLogger(opt, '', None, data, job_type='Dataset Creation') # TODO: return value unused + logger = WandbLogger(opt, None, job_type='Dataset Creation') # TODO: return value unused if __name__ == '__main__': @@ -19,6 +16,7 @@ def create_dataset_artifact(opt): parser.add_argument('--single-cls', action='store_true', help='train as single-class dataset') parser.add_argument('--project', type=str, default='YOLOv5', help='name of W&B Project') parser.add_argument('--entity', default=None, help='W&B entity') + parser.add_argument('--name', type=str, default='log dataset', help='name of W&B run') opt = parser.parse_args() opt.resume = False # Explicitly disallow resume check for dataset upload job diff --git a/utils/loggers/wandb/sweep.py b/utils/loggers/wandb/sweep.py index 8e952d03c085..a0c76a10caa1 100644 --- a/utils/loggers/wandb/sweep.py +++ b/utils/loggers/wandb/sweep.py @@ -1,7 +1,6 @@ import sys -from pathlib import Path - import wandb +from pathlib import Path FILE = Path(__file__).absolute() sys.path.append(FILE.parents[2].as_posix()) # add utils/ to path diff --git a/utils/loggers/wandb/wandb_utils.py b/utils/loggers/wandb/wandb_utils.py index f4f228df4e24..ba2d830df07b 100644 --- a/utils/loggers/wandb/wandb_utils.py +++ b/utils/loggers/wandb/wandb_utils.py @@ -3,10 +3,9 @@ import logging import os import sys +import yaml from contextlib import contextmanager from pathlib import Path - -import yaml from tqdm import tqdm FILE = Path(__file__).absolute() @@ -99,7 +98,7 @@ class WandbLogger(): https://docs.wandb.com/guides/integrations/yolov5 """ - def __init__(self, opt, run_id, data_dict, job_type='Training'): + def __init__(self, opt, run_id, job_type='Training'): """ - Initialize WandbLogger instance - Upload dataset if opt.upload_dataset is True @@ -108,7 +107,6 @@ def __init__(self, opt, run_id, data_dict, job_type='Training'): arguments: opt (namespace) -- Commandline arguments for this run run_id (str) -- Run ID of W&B run to be resumed - data_dict (Dict) -- Dictionary conataining info about the dataset to be used job_type (str) -- To set the job_type for this run """ @@ -119,10 +117,11 @@ def __init__(self, opt, run_id, data_dict, job_type='Training'): self.train_artifact_path, self.val_artifact_path = None, None self.result_artifact = None self.val_table, self.result_table = None, None - self.data_dict = data_dict self.bbox_media_panel_images = [] self.val_table_path_map = None self.max_imgs_to_log = 16 + self.wandb_artifact_data_dict = None + self.data_dict = None # It's more elegant to stick to 1 wandb.init call, but useful config data is overwritten in the WandbLogger's wandb.init call if isinstance(opt.resume, str): # checks resume from artifact if opt.resume.startswith(WANDB_ARTIFACT_PREFIX): @@ -148,11 +147,23 @@ def __init__(self, opt, run_id, data_dict, job_type='Training'): if self.wandb_run: if self.job_type == 'Training': if not opt.resume: - wandb_data_dict = self.check_and_upload_dataset(opt) if opt.upload_dataset else data_dict - # Info useful for resuming from artifacts - self.wandb_run.config.update({'opt': vars(opt), 'data_dict': wandb_data_dict}, - allow_val_change=True) - self.data_dict = self.setup_training(opt, data_dict) + if opt.upload_dataset: + self.wandb_artifact_data_dict = self.check_and_upload_dataset(opt) + + elif opt.data.endswith('_wandb.yaml'): # When dataset is W&B artifact + with open(opt.data, encoding='ascii', errors='ignore') as f: + data_dict = yaml.safe_load(f) + self.data_dict = data_dict + else: # Local .yaml dataset file or .zip file + self.data_dict = check_dataset(opt.data) + + self.setup_training(opt) + # write data_dict to config. useful for resuming from artifacts + if not self.wandb_artifact_data_dict: + self.wandb_artifact_data_dict = self.data_dict + self.wandb_run.config.update({'data_dict': self.wandb_artifact_data_dict}, + allow_val_change=True) + if self.job_type == 'Dataset Creation': self.data_dict = self.check_and_upload_dataset(opt) @@ -167,7 +178,7 @@ def check_and_upload_dataset(self, opt): Updated dataset info dictionary where local dataset paths are replaced by WAND_ARFACT_PREFIX links. """ assert wandb, 'Install wandb to upload dataset' - config_path = self.log_dataset_artifact(check_file(opt.data), + config_path = self.log_dataset_artifact(opt.data, opt.single_cls, 'YOLOv5' if opt.project == 'runs/train' else Path(opt.project).stem) print("Created dataset config file ", config_path) @@ -175,7 +186,7 @@ def check_and_upload_dataset(self, opt): wandb_data_dict = yaml.safe_load(f) return wandb_data_dict - def setup_training(self, opt, data_dict): + def setup_training(self, opt): """ Setup the necessary processes for training YOLO models: - Attempt to download model checkpoint and dataset artifacts if opt.resume stats with WANDB_ARTIFACT_PREFIX @@ -184,10 +195,7 @@ def setup_training(self, opt, data_dict): arguments: opt (namespace) -- commandline arguments for this run - data_dict (Dict) -- Dataset dictionary for this run - returns: - data_dict (Dict) -- contains the updated info about the dataset to be used for training """ self.log_dict, self.current_epoch = {}, 0 self.bbox_interval = opt.bbox_interval @@ -198,8 +206,10 @@ def setup_training(self, opt, data_dict): config = self.wandb_run.config opt.weights, opt.save_period, opt.batch_size, opt.bbox_interval, opt.epochs, opt.hyp = str( self.weights), config.save_period, config.batch_size, config.bbox_interval, config.epochs, \ - config.opt['hyp'] + config.hyp data_dict = dict(self.wandb_run.config.data_dict) # eliminates the need for config file to resume + else: + data_dict = self.data_dict if self.val_artifact is None: # If --upload_dataset is set, use the existing artifact, don't download self.train_artifact_path, self.train_artifact = self.download_dataset_artifact(data_dict.get('train'), opt.artifact_alias) @@ -221,7 +231,10 @@ def setup_training(self, opt, data_dict): self.map_val_table_path() if opt.bbox_interval == -1: self.bbox_interval = opt.bbox_interval = (opt.epochs // 10) if opt.epochs > 10 else 1 - return data_dict + train_from_artifact = self.train_artifact_path is not None and self.val_artifact_path is not None + # Update the the data_dict to point to local artifacts dir + if train_from_artifact: + self.data_dict = data_dict def download_dataset_artifact(self, path, alias): """ @@ -299,7 +312,8 @@ def log_dataset_artifact(self, data_file, single_cls, project, overwrite_config= returns: the new .yaml file with artifact links. it can be used to start training directly from artifacts """ - data = check_dataset(data_file) # parse and check + self.data_dict = check_dataset(data_file) # parse and check + data = dict(self.data_dict) nc, names = (1, ['item']) if single_cls else (int(data['nc']), data['names']) names = {k: v for k, v in enumerate(names)} # to index dictionary self.train_artifact = self.create_dataset_table(LoadImagesAndLabels( @@ -310,7 +324,8 @@ def log_dataset_artifact(self, data_file, single_cls, project, overwrite_config= data['train'] = WANDB_ARTIFACT_PREFIX + str(Path(project) / 'train') if data.get('val'): data['val'] = WANDB_ARTIFACT_PREFIX + str(Path(project) / 'val') - path = data_file if overwrite_config else '_wandb.'.join(data_file.rsplit('.', 1)) # updated data.yaml path + path = Path(data_file).stem + path = (path if overwrite_config else path + '_wandb') + '.yaml' # updated data.yaml path data.pop('download', None) data.pop('path', None) with open(path, 'w') as f: From e016b15555591be8ee4fc5c164df9436d5916368 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 28 Jul 2021 21:25:20 +0200 Subject: [PATCH 0443/1976] Update yolov5-bifpn.yaml (#4208) --- models/hub/yolov5-bifpn.yaml | 52 ++++++++++++++++++++---------------- 1 file changed, 29 insertions(+), 23 deletions(-) diff --git a/models/hub/yolov5-bifpn.yaml b/models/hub/yolov5-bifpn.yaml index f1dd7c601b9c..69f7b5938c58 100644 --- a/models/hub/yolov5-bifpn.yaml +++ b/models/hub/yolov5-bifpn.yaml @@ -3,38 +3,44 @@ nc: 80 # number of classes depth_multiple: 1.0 # model depth multiple width_multiple: 1.0 # layer channel multiple anchors: - - [ 10,13, 16,30, 33,23 ] # P3/8 - - [ 30,61, 62,45, 59,119 ] # P4/16 - - [ 116,90, 156,198, 373,326 ] # P5/32 + - [10,13, 16,30, 33,23] # P3/8 + - [30,61, 62,45, 59,119] # P4/16 + - [116,90, 156,198, 373,326] # P5/32 # YOLOv5 backbone backbone: # [from, number, module, args] - [ [ -1, 1, Focus, [ 64, 3 ] ], # 0-P1/2 - [ -1, 1, Conv, [ 128, 3, 2 ] ], # 1-P2/4 - [ -1, 3, Bottleneck, [ 128 ] ], - [ -1, 1, Conv, [ 256, 3, 2 ] ], # 3-P3/8 - [ -1, 9, BottleneckCSP, [ 256 ] ], - [ -1, 1, Conv, [ 512, 3, 2 ] ], # 5-P4/16 - [ -1, 9, BottleneckCSP, [ 512 ] ], - [ -1, 1, Conv, [ 1024, 3, 2 ] ], # 7-P5/32 - [ -1, 1, SPP, [ 1024, [ 5, 9, 13 ] ] ], - [ -1, 6, BottleneckCSP, [ 1024 ] ], # 9 + [[-1, 1, Focus, [64, 3]], # 0-P1/2 + [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 + [-1, 3, C3, [128]], + [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 + [-1, 9, C3, [256]], + [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 + [-1, 9, C3, [512]] + [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 + [-1, 1, SPP, [1024, [5, 9, 13]]], + [-1, 3, C3, [1024, False]], # 9 ] # YOLOv5 BiFPN head head: - [ [ -1, 3, BottleneckCSP, [ 1024, False ] ], # 10 (P5/32-large) + [[-1, 1, Conv, [512, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 6], 1, Concat, [1]], # cat backbone P4 + [-1, 3, C3, [512, False]], # 13 - [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], - [ [ -1, 20, 6 ], 1, Concat, [ 1 ] ], # cat P4 - [ -1, 1, Conv, [ 512, 1, 1 ] ], - [ -1, 3, BottleneckCSP, [ 512, False ] ], # 14 (P4/16-medium) + [-1, 1, Conv, [256, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 4], 1, Concat, [1]], # cat backbone P3 + [-1, 3, C3, [256, False]], # 17 (P3/8-small) - [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], - [ [ -1, 4 ], 1, Concat, [ 1 ] ], # cat backbone P3 - [ -1, 1, Conv, [ 256, 1, 1 ] ], - [ -1, 3, BottleneckCSP, [ 256, False ] ], # 18 (P3/8-small) + [-1, 1, Conv, [256, 3, 2]], + [[-1, 14, 6], 1, Concat, [1]], # cat P4 + [-1, 3, C3, [512, False]], # 20 (P4/16-medium) - [ [ 18, 14, 10 ], 1, Detect, [ nc, anchors ] ], # Detect(P3, P4, P5) + [-1, 1, Conv, [512, 3, 2]], + [[-1, 10], 1, Concat, [1]], # cat head P5 + [-1, 3, C3, [1024, False]], # 23 (P5/32-large) + + [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) ] From 750465edae8a1eb68409377c3bba94a49d3bf196 Mon Sep 17 00:00:00 2001 From: Ayush Chaurasia Date: Thu, 29 Jul 2021 02:55:15 +0530 Subject: [PATCH 0444/1976] W&B: More improvements and refactoring (#4205) * Improve docstrings and run names * default wandb login prompt with timeout * return key * Update api_key check logic * Properly support zipped dataset feature * update docstring * Revert tuorial change * extend changes to log_dataset * add run name * bug fix * bug fix * Update comment * fix import check * remove unused import * Hardcore .yaml file extension * reduce code * Reformat using pycharm * Remove redundant try catch * More refactoring and bug fixes * retry * Reformat using pycharm * respect LOGGERS include list Co-authored-by: Glenn Jocher --- utils/loggers/__init__.py | 8 ++++---- utils/loggers/wandb/wandb_utils.py | 7 ++++--- 2 files changed, 8 insertions(+), 7 deletions(-) diff --git a/utils/loggers/__init__.py b/utils/loggers/__init__.py index 027cef4d283a..603837d57052 100644 --- a/utils/loggers/__init__.py +++ b/utils/loggers/__init__.py @@ -48,12 +48,12 @@ def start(self): self.tb = SummaryWriter(str(s)) # W&B - try: - assert 'wandb' in self.include and wandb - run_id = torch.load(self.weights).get('wandb_id') if self.opt.resume else None + if wandb and 'wandb' in self.include: + wandb_artifact_resume = isinstance(self.opt.resume, str) and self.opt.resume.startswith('wandb-artifact://') + run_id = torch.load(self.weights).get('wandb_id') if self.opt.resume and not wandb_artifact_resume else None self.opt.hyp = self.hyp # add hyperparameters self.wandb = WandbLogger(self.opt, run_id) - except: + else: self.wandb = None return self diff --git a/utils/loggers/wandb/wandb_utils.py b/utils/loggers/wandb/wandb_utils.py index ba2d830df07b..c978e3ea838d 100644 --- a/utils/loggers/wandb/wandb_utils.py +++ b/utils/loggers/wandb/wandb_utils.py @@ -158,11 +158,12 @@ def __init__(self, opt, run_id, job_type='Training'): self.data_dict = check_dataset(opt.data) self.setup_training(opt) - # write data_dict to config. useful for resuming from artifacts if not self.wandb_artifact_data_dict: self.wandb_artifact_data_dict = self.data_dict - self.wandb_run.config.update({'data_dict': self.wandb_artifact_data_dict}, - allow_val_change=True) + # write data_dict to config. useful for resuming from artifacts. Do this only when not resuming. + if not opt.resume: + self.wandb_run.config.update({'data_dict': self.wandb_artifact_data_dict}, + allow_val_change=True) if self.job_type == 'Dataset Creation': self.data_dict = self.check_and_upload_dataset(opt) From b60b62e874e7cf0581c51936e39287c6906a419f Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 28 Jul 2021 23:35:14 +0200 Subject: [PATCH 0445/1976] PyCharm reformat (#4209) * PyCharm reformat * YAML reformat * Markdown reformat --- .github/ISSUE_TEMPLATE/bug-report.md | 28 ++++---- .github/ISSUE_TEMPLATE/feature-request.md | 3 +- .github/ISSUE_TEMPLATE/question.md | 1 - CONTRIBUTING.md | 52 ++++++++++---- README.md | 79 ++++++++++++-------- data/Argoverse.yaml | 2 +- data/GlobalWheat2020.yaml | 2 +- data/Objects365.yaml | 82 ++++++++++----------- data/SKU-110K.yaml | 2 +- data/VOC.yaml | 4 +- data/VisDrone.yaml | 2 +- data/coco.yaml | 18 ++--- data/coco128.yaml | 18 ++--- data/scripts/get_coco.sh | 4 +- data/scripts/get_coco128.sh | 2 +- data/xView.yaml | 18 ++--- models/hub/anchors.yaml | 60 ++++++++-------- models/hub/yolov3-spp.yaml | 64 ++++++++--------- models/hub/yolov3-tiny.yaml | 46 ++++++------ models/hub/yolov3.yaml | 64 ++++++++--------- models/hub/yolov5-fpn.yaml | 46 ++++++------ models/hub/yolov5-p2.yaml | 76 ++++++++++---------- models/hub/yolov5-p6.yaml | 80 ++++++++++----------- models/hub/yolov5-p7.yaml | 86 +++++++++++----------- models/hub/yolov5-panet.yaml | 56 +++++++-------- models/hub/yolov5l6.yaml | 88 +++++++++++------------ models/hub/yolov5m6.yaml | 88 +++++++++++------------ models/hub/yolov5s-transformer.yaml | 56 +++++++-------- models/hub/yolov5s6.yaml | 88 +++++++++++------------ models/hub/yolov5x6.yaml | 88 +++++++++++------------ train.py | 4 +- utils/downloads.py | 1 - utils/loggers/__init__.py | 3 +- utils/loggers/wandb/log_dataset.py | 1 - utils/loggers/wandb/sweep.py | 3 +- utils/loggers/wandb/sweep.yaml | 4 +- utils/loggers/wandb/wandb_utils.py | 3 +- val.py | 1 - 38 files changed, 683 insertions(+), 640 deletions(-) diff --git a/.github/ISSUE_TEMPLATE/bug-report.md b/.github/ISSUE_TEMPLATE/bug-report.md index b7fc7c5a8838..62a02a3a6948 100644 --- a/.github/ISSUE_TEMPLATE/bug-report.md +++ b/.github/ISSUE_TEMPLATE/bug-report.md @@ -7,21 +7,24 @@ assignees: '' --- -Before submitting a bug report, please be aware that your issue **must be reproducible** with all of the following, otherwise it is non-actionable, and we can not help you: - - **Current repo**: run `git fetch && git status -uno` to check and `git pull` to update repo - - **Common dataset**: coco.yaml or coco128.yaml - - **Common environment**: Colab, Google Cloud, or Docker image. See https://github.com/ultralytics/yolov5#environments - -If this is a custom dataset/training question you **must include** your `train*.jpg`, `val*.jpg` and `results.png` figures, or we can not help you. You can generate these with `utils.plot_results()`. +Before submitting a bug report, please be aware that your issue **must be reproducible** with all of the following, +otherwise it is non-actionable, and we can not help you: +- **Current repo**: run `git fetch && git status -uno` to check and `git pull` to update repo +- **Common dataset**: coco.yaml or coco128.yaml +- **Common environment**: Colab, Google Cloud, or Docker image. See https://github.com/ultralytics/yolov5#environments + +If this is a custom dataset/training question you **must include** your `train*.jpg`, `val*.jpg` and `results.png` +figures, or we can not help you. You can generate these with `utils.plot_results()`. ## 🐛 Bug -A clear and concise description of what the bug is. +A clear and concise description of what the bug is. ## To Reproduce (REQUIRED) Input: + ``` import torch @@ -30,6 +33,7 @@ c = a / 0 ``` Output: + ``` Traceback (most recent call last): File "/Users/glennjocher/opt/anaconda3/envs/env1/lib/python3.7/site-packages/IPython/core/interactiveshell.py", line 3331, in run_code @@ -39,17 +43,17 @@ Traceback (most recent call last): RuntimeError: ZeroDivisionError ``` - ## Expected behavior -A clear and concise description of what you expected to happen. +A clear and concise description of what you expected to happen. ## Environment -If applicable, add screenshots to help explain your problem. - - OS: [e.g. Ubuntu] - - GPU [e.g. 2080 Ti] +If applicable, add screenshots to help explain your problem. +- OS: [e.g. Ubuntu] +- GPU [e.g. 2080 Ti] ## Additional context + Add any other context about the problem here. diff --git a/.github/ISSUE_TEMPLATE/feature-request.md b/.github/ISSUE_TEMPLATE/feature-request.md index 02320771b5f5..1fdf99045488 100644 --- a/.github/ISSUE_TEMPLATE/feature-request.md +++ b/.github/ISSUE_TEMPLATE/feature-request.md @@ -13,7 +13,8 @@ assignees: '' ## Motivation - + ## Pitch diff --git a/.github/ISSUE_TEMPLATE/question.md b/.github/ISSUE_TEMPLATE/question.md index 2c22aea70a7b..2892cfe262fb 100644 --- a/.github/ISSUE_TEMPLATE/question.md +++ b/.github/ISSUE_TEMPLATE/question.md @@ -9,5 +9,4 @@ assignees: '' ## ❔Question - ## Additional context diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 7c0ba3ae9f18..38601775caeb 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -8,32 +8,44 @@ We love your input! We want to make contributing to YOLOv5 as easy and transpare - Proposing a new feature - Becoming a maintainer -YOLOv5 works so well due to our combined community effort, and for every small improvement you contribute you will be helping push the frontiers of what's possible in AI 😃! - +YOLOv5 works so well due to our combined community effort, and for every small improvement you contribute you will be +helping push the frontiers of what's possible in AI 😃! ## Submitting a Pull Request (PR) 🛠️ + Submitting a PR is easy! This example shows how to submit a PR for updating `requirements.txt` in 4 steps: ### 1. Select File to Update + Select `requirements.txt` to update by clicking on it in GitHub.

PR_step1

### 2. Click 'Edit this file' + Button is in top-right corner.

PR_step2

### 3. Make Changes + Change `matplotlib` version from `3.2.2` to `3.3`.

PR_step3

### 4. Preview Changes and Submit PR -Click on the **Preview changes** tab to verify your updates. At the bottom of the screen select 'Create a **new branch** for this commit', assign your branch a descriptive name such as `fix/matplotlib_version` and click the green **Propose changes** button. All done, your PR is now submitted to YOLOv5 for review and approval 😃! + +Click on the **Preview changes** tab to verify your updates. At the bottom of the screen select 'Create a **new branch** +for this commit', assign your branch a descriptive name such as `fix/matplotlib_version` and click the green **Propose +changes** button. All done, your PR is now submitted to YOLOv5 for review and approval 😃!

PR_step4

### PR recommendations To allow your work to be integrated as seamlessly as possible, we advise you to: -- ✅ Verify your PR is **up-to-date with origin/master.** If your PR is behind origin/master an automatic [GitHub actions](https://github.com/ultralytics/yolov5/blob/master/.github/workflows/rebase.yml) rebase may be attempted by including the /rebase command in a comment body, or by running the following code, replacing 'feature' with the name of your local branch: + +- ✅ Verify your PR is **up-to-date with origin/master.** If your PR is behind origin/master an + automatic [GitHub actions](https://github.com/ultralytics/yolov5/blob/master/.github/workflows/rebase.yml) rebase may + be attempted by including the /rebase command in a comment body, or by running the following code, replacing 'feature' + with the name of your local branch: + ```bash git remote add upstream https://github.com/ultralytics/yolov5.git git fetch upstream @@ -41,30 +53,42 @@ git checkout feature # <----- replace 'feature' with local branch name git merge upstream/master git push -u origin -f ``` -- ✅ Verify all Continuous Integration (CI) **checks are passing**. -- ✅ Reduce changes to the absolute **minimum** required for your bug fix or feature addition. _"It is not daily increase but daily decrease, hack away the unessential. The closer to the source, the less wastage there is."_ -Bruce Lee +- ✅ Verify all Continuous Integration (CI) **checks are passing**. +- ✅ Reduce changes to the absolute **minimum** required for your bug fix or feature addition. _"It is not daily increase + but daily decrease, hack away the unessential. The closer to the source, the less wastage there is."_ -Bruce Lee ## Submitting a Bug Report 🐛 If you spot a problem with YOLOv5 please submit a Bug Report! -For us to start investigating a possibel problem we need to be able to reproduce it ourselves first. We've created a few short guidelines below to help users provide what we need in order to get started. +For us to start investigating a possibel problem we need to be able to reproduce it ourselves first. We've created a few +short guidelines below to help users provide what we need in order to get started. -When asking a question, people will be better able to provide help if you provide **code** that they can easily understand and use to **reproduce** the problem. This is referred to by community members as creating a [minimum reproducible example](https://stackoverflow.com/help/minimal-reproducible-example). Your code that reproduces the problem should be: +When asking a question, people will be better able to provide help if you provide **code** that they can easily +understand and use to **reproduce** the problem. This is referred to by community members as creating +a [minimum reproducible example](https://stackoverflow.com/help/minimal-reproducible-example). Your code that reproduces +the problem should be: * ✅ **Minimal** – Use as little code as possible that still produces the same problem * ✅ **Complete** – Provide **all** parts someone else needs to reproduce your problem in the question itself * ✅ **Reproducible** – Test the code you're about to provide to make sure it reproduces the problem -In addition to the above requirements, for [Ultralytics](https://ultralytics.com/) to provide assistance your code should be: - -* ✅ **Current** – Verify that your code is up-to-date with current GitHub [master](https://github.com/ultralytics/yolov5/tree/master), and if necessary `git pull` or `git clone` a new copy to ensure your problem has not already been resolved by previous commits. -* ✅ **Unmodified** – Your problem must be reproducible without any modifications to the codebase in this repository. [Ultralytics](https://ultralytics.com/) does not provide support for custom code ⚠️. +In addition to the above requirements, for [Ultralytics](https://ultralytics.com/) to provide assistance your code +should be: -If you believe your problem meets all of the above criteria, please close this issue and raise a new one using the 🐛 **Bug Report** [template](https://github.com/ultralytics/yolov5/issues/new/choose) and providing a [minimum reproducible example](https://stackoverflow.com/help/minimal-reproducible-example) to help us better understand and diagnose your problem. +* ✅ **Current** – Verify that your code is up-to-date with current + GitHub [master](https://github.com/ultralytics/yolov5/tree/master), and if necessary `git pull` or `git clone` a new + copy to ensure your problem has not already been resolved by previous commits. +* ✅ **Unmodified** – Your problem must be reproducible without any modifications to the codebase in this + repository. [Ultralytics](https://ultralytics.com/) does not provide support for custom code ⚠️. +If you believe your problem meets all of the above criteria, please close this issue and raise a new one using the 🐛 ** +Bug Report** [template](https://github.com/ultralytics/yolov5/issues/new/choose) and providing +a [minimum reproducible example](https://stackoverflow.com/help/minimal-reproducible-example) to help us better +understand and diagnose your problem. ## License -By contributing, you agree that your contributions will be licensed under the [GPL-3.0 license](https://choosealicense.com/licenses/gpl-3.0/) +By contributing, you agree that your contributions will be licensed under +the [GPL-3.0 license](https://choosealicense.com/licenses/gpl-3.0/) diff --git a/README.md b/README.md index b4aacd78b0ca..df4e9add519d 100644 --- a/README.md +++ b/README.md @@ -52,31 +52,33 @@ YOLOv5 🚀 is a family of object detection architectures and models pretrained - ##
Documentation
See the [YOLOv5 Docs](https://docs.ultralytics.com) for full documentation on training, testing and deployment. - ##
Quick Start Examples
-
Install -[**Python>=3.6.0**](https://www.python.org/) is required with all [requirements.txt](https://github.com/ultralytics/yolov5/blob/master/requirements.txt) installed including [**PyTorch>=1.7**](https://pytorch.org/get-started/locally/): +[**Python>=3.6.0**](https://www.python.org/) is required with all +[requirements.txt](https://github.com/ultralytics/yolov5/blob/master/requirements.txt) installed including +[**PyTorch>=1.7**](https://pytorch.org/get-started/locally/): + ```bash $ git clone https://github.com/ultralytics/yolov5 $ cd yolov5 $ pip install -r requirements.txt ``` +
Inference -Inference with YOLOv5 and [PyTorch Hub](https://github.com/ultralytics/yolov5/issues/36). Models automatically download from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases). +Inference with YOLOv5 and [PyTorch Hub](https://github.com/ultralytics/yolov5/issues/36). Models automatically download +from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases). ```python import torch @@ -85,7 +87,7 @@ import torch model = torch.hub.load('ultralytics/yolov5', 'yolov5s') # or yolov5m, yolov5l, yolov5x, custom # Images -img = 'https://ultralytics.com/images/zidane.jpg' # or PosixPath, PIL, OpenCV, numpy, list +img = 'https://ultralytics.com/images/zidane.jpg' # or file, Path, PIL, OpenCV, numpy, list # Inference results = model(img) @@ -101,7 +103,9 @@ results.print() # or .show(), .save(), .crop(), .pandas(), etc.
Inference with detect.py -`detect.py` runs inference on a variety of sources, downloading models automatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases) and saving results to `runs/detect`. +`detect.py` runs inference on a variety of sources, downloading models automatically from +the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases) and saving results to `runs/detect`. + ```bash $ python detect.py --source 0 # webcam file.jpg # image @@ -117,13 +121,18 @@ $ python detect.py --source 0 # webcam
Training -Run commands below to reproduce results on [COCO](https://github.com/ultralytics/yolov5/blob/master/data/scripts/get_coco.sh) dataset (dataset auto-downloads on first use). Training times for YOLOv5s/m/l/x are 2/4/6/8 days on a single V100 (multi-GPU times faster). Use the largest `--batch-size` your GPU allows (batch sizes shown for 16 GB devices). +Run commands below to reproduce results +on [COCO](https://github.com/ultralytics/yolov5/blob/master/data/scripts/get_coco.sh) dataset (dataset auto-downloads on +first use). Training times for YOLOv5s/m/l/x are 2/4/6/8 days on a single V100 (multi-GPU times faster). Use the +largest `--batch-size` your GPU allows (batch sizes shown for 16 GB devices). + ```bash $ python train.py --data coco.yaml --cfg yolov5s.yaml --weights '' --batch-size 64 yolov5m 40 yolov5l 24 yolov5x 16 ``` +
@@ -132,7 +141,8 @@ $ python train.py --data coco.yaml --cfg yolov5s.yaml --weights '' --batch-size Tutorials * [Train Custom Data](https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data)  🚀 RECOMMENDED -* [Tips for Best Training Results](https://github.com/ultralytics/yolov5/wiki/Tips-for-Best-Training-Results)  ☘️ RECOMMENDED +* [Tips for Best Training Results](https://github.com/ultralytics/yolov5/wiki/Tips-for-Best-Training-Results)  ☘️ + RECOMMENDED * [Weights & Biases Logging](https://github.com/ultralytics/yolov5/issues/1289)  🌟 NEW * [Supervisely Ecosystem](https://github.com/ultralytics/yolov5/issues/2518)  🌟 NEW * [Multi-GPU Training](https://github.com/ultralytics/yolov5/issues/475) @@ -147,10 +157,11 @@ $ python train.py --data coco.yaml --cfg yolov5s.yaml --weights '' --batch-size
- ##
Environments and Integrations
-Get started in seconds with our verified environments and integrations, including [Weights & Biases](https://wandb.ai/site?utm_campaign=repo_yolo_readme) for automatic YOLOv5 experiment logging. Click each icon below for details. +Get started in seconds with our verified environments and integrations, +including [Weights & Biases](https://wandb.ai/site?utm_campaign=repo_yolo_readme) for automatic YOLOv5 experiment +logging. Click each icon below for details. - ##
Compete and Win
-We are super excited about our first-ever Ultralytics YOLOv5 🚀 EXPORT Competition with **$10,000** in cash prizes! +We are super excited about our first-ever Ultralytics YOLOv5 🚀 EXPORT Competition with **$10,000** in cash prizes!

- ##
Why YOLOv5

YOLOv5-P5 640 Figure (click to expand) - +

Figure Notes (click to expand) - - * GPU Speed measures end-to-end time per image averaged over 5000 COCO val2017 images using a V100 GPU with batch size 32, and includes image preprocessing, PyTorch FP16 inference, postprocessing and NMS. - * EfficientDet data from [google/automl](https://github.com/google/automl) at batch size 8. - * **Reproduce** by `python val.py --task study --data coco.yaml --iou 0.7 --weights yolov5s6.pt yolov5m6.pt yolov5l6.pt yolov5x6.pt` -
+* GPU Speed measures end-to-end time per image averaged over 5000 COCO val2017 images using a V100 GPU with batch size + 32, and includes image preprocessing, PyTorch FP16 inference, postprocessing and NMS. +* EfficientDet data from [google/automl](https://github.com/google/automl) at batch size 8. +* **Reproduce** by + `python val.py --task study --data coco.yaml --iou 0.7 --weights yolov5s6.pt yolov5m6.pt yolov5l6.pt yolov5x6.pt` + +
### Pretrained Checkpoints @@ -221,24 +232,30 @@ We are super excited about our first-ever Ultralytics YOLOv5 🚀 EXPORT Competi
Table Notes (click to expand) - - * APtest denotes COCO [test-dev2017](http://cocodataset.org/#upload) server results, all other AP results denote val2017 accuracy. - * AP values are for single-model single-scale unless otherwise noted. **Reproduce mAP** by `python val.py --data coco.yaml --img 640 --conf 0.001 --iou 0.65` - * SpeedGPU averaged over 5000 COCO val2017 images using a GCP [n1-standard-16](https://cloud.google.com/compute/docs/machine-types#n1_standard_machine_types) V100 instance, and includes FP16 inference, postprocessing and NMS. **Reproduce speed** by `python val.py --data coco.yaml --img 640 --conf 0.25 --iou 0.45 --half` - * All checkpoints are trained to 300 epochs with default settings and hyperparameters (no autoaugmentation). - * Test Time Augmentation ([TTA](https://github.com/ultralytics/yolov5/issues/303)) includes reflection and scale augmentation. **Reproduce TTA** by `python val.py --data coco.yaml --img 1536 --iou 0.7 --augment` -
+* APtest denotes COCO [test-dev2017](http://cocodataset.org/#upload) server results, all other AP results + denote val2017 accuracy. +* AP values are for single-model single-scale unless otherwise noted. **Reproduce mAP** + by `python val.py --data coco.yaml --img 640 --conf 0.001 --iou 0.65` +* SpeedGPU averaged over 5000 COCO val2017 images using a + GCP [n1-standard-16](https://cloud.google.com/compute/docs/machine-types#n1_standard_machine_types) V100 instance, and + includes FP16 inference, postprocessing and NMS. **Reproduce speed** + by `python val.py --data coco.yaml --img 640 --conf 0.25 --iou 0.45 --half` +* All checkpoints are trained to 300 epochs with default settings and hyperparameters (no autoaugmentation). +* Test Time Augmentation ([TTA](https://github.com/ultralytics/yolov5/issues/303)) includes reflection and scale + augmentation. **Reproduce TTA** by `python val.py --data coco.yaml --img 1536 --iou 0.7 --augment` -##
Contribute
+
-We love your input! We want to make contributing to YOLOv5 as easy and transparent as possible. Please see our [Contributing Guide](CONTRIBUTING.md) to get started. +##
Contribute
+We love your input! We want to make contributing to YOLOv5 as easy and transparent as possible. Please see +our [Contributing Guide](CONTRIBUTING.md) to get started. ##
Contact
-For issues running YOLOv5 please visit [GitHub Issues](https://github.com/ultralytics/yolov5/issues). For business or professional support requests please visit -[https://ultralytics.com/contact](https://ultralytics.com/contact). +For issues running YOLOv5 please visit [GitHub Issues](https://github.com/ultralytics/yolov5/issues). For business or +professional support requests please visit [https://ultralytics.com/contact](https://ultralytics.com/contact).
diff --git a/data/Argoverse.yaml b/data/Argoverse.yaml index c42624c5783f..3bf91ce7d504 100644 --- a/data/Argoverse.yaml +++ b/data/Argoverse.yaml @@ -15,7 +15,7 @@ test: Argoverse-1.1/images/test/ # test images (optional) https://eval.ai/web/c # Classes nc: 8 # number of classes -names: [ 'person', 'bicycle', 'car', 'motorcycle', 'bus', 'truck', 'traffic_light', 'stop_sign' ] # class names +names: ['person', 'bicycle', 'car', 'motorcycle', 'bus', 'truck', 'traffic_light', 'stop_sign'] # class names # Download script/URL (optional) --------------------------------------------------------------------------------------- diff --git a/data/GlobalWheat2020.yaml b/data/GlobalWheat2020.yaml index 842456047953..de9c7837cf57 100644 --- a/data/GlobalWheat2020.yaml +++ b/data/GlobalWheat2020.yaml @@ -27,7 +27,7 @@ test: # test images (optional) 1276 images # Classes nc: 1 # number of classes -names: [ 'wheat_head' ] # class names +names: ['wheat_head'] # class names # Download script/URL (optional) --------------------------------------------------------------------------------------- diff --git a/data/Objects365.yaml b/data/Objects365.yaml index 52577581d7bb..457b9fd9bf69 100644 --- a/data/Objects365.yaml +++ b/data/Objects365.yaml @@ -15,47 +15,47 @@ test: # test images (optional) # Classes nc: 365 # number of classes -names: [ 'Person', 'Sneakers', 'Chair', 'Other Shoes', 'Hat', 'Car', 'Lamp', 'Glasses', 'Bottle', 'Desk', 'Cup', - 'Street Lights', 'Cabinet/shelf', 'Handbag/Satchel', 'Bracelet', 'Plate', 'Picture/Frame', 'Helmet', 'Book', - 'Gloves', 'Storage box', 'Boat', 'Leather Shoes', 'Flower', 'Bench', 'Potted Plant', 'Bowl/Basin', 'Flag', - 'Pillow', 'Boots', 'Vase', 'Microphone', 'Necklace', 'Ring', 'SUV', 'Wine Glass', 'Belt', 'Monitor/TV', - 'Backpack', 'Umbrella', 'Traffic Light', 'Speaker', 'Watch', 'Tie', 'Trash bin Can', 'Slippers', 'Bicycle', - 'Stool', 'Barrel/bucket', 'Van', 'Couch', 'Sandals', 'Basket', 'Drum', 'Pen/Pencil', 'Bus', 'Wild Bird', - 'High Heels', 'Motorcycle', 'Guitar', 'Carpet', 'Cell Phone', 'Bread', 'Camera', 'Canned', 'Truck', - 'Traffic cone', 'Cymbal', 'Lifesaver', 'Towel', 'Stuffed Toy', 'Candle', 'Sailboat', 'Laptop', 'Awning', - 'Bed', 'Faucet', 'Tent', 'Horse', 'Mirror', 'Power outlet', 'Sink', 'Apple', 'Air Conditioner', 'Knife', - 'Hockey Stick', 'Paddle', 'Pickup Truck', 'Fork', 'Traffic Sign', 'Balloon', 'Tripod', 'Dog', 'Spoon', 'Clock', - 'Pot', 'Cow', 'Cake', 'Dinning Table', 'Sheep', 'Hanger', 'Blackboard/Whiteboard', 'Napkin', 'Other Fish', - 'Orange/Tangerine', 'Toiletry', 'Keyboard', 'Tomato', 'Lantern', 'Machinery Vehicle', 'Fan', - 'Green Vegetables', 'Banana', 'Baseball Glove', 'Airplane', 'Mouse', 'Train', 'Pumpkin', 'Soccer', 'Skiboard', - 'Luggage', 'Nightstand', 'Tea pot', 'Telephone', 'Trolley', 'Head Phone', 'Sports Car', 'Stop Sign', - 'Dessert', 'Scooter', 'Stroller', 'Crane', 'Remote', 'Refrigerator', 'Oven', 'Lemon', 'Duck', 'Baseball Bat', - 'Surveillance Camera', 'Cat', 'Jug', 'Broccoli', 'Piano', 'Pizza', 'Elephant', 'Skateboard', 'Surfboard', - 'Gun', 'Skating and Skiing shoes', 'Gas stove', 'Donut', 'Bow Tie', 'Carrot', 'Toilet', 'Kite', 'Strawberry', - 'Other Balls', 'Shovel', 'Pepper', 'Computer Box', 'Toilet Paper', 'Cleaning Products', 'Chopsticks', - 'Microwave', 'Pigeon', 'Baseball', 'Cutting/chopping Board', 'Coffee Table', 'Side Table', 'Scissors', - 'Marker', 'Pie', 'Ladder', 'Snowboard', 'Cookies', 'Radiator', 'Fire Hydrant', 'Basketball', 'Zebra', 'Grape', - 'Giraffe', 'Potato', 'Sausage', 'Tricycle', 'Violin', 'Egg', 'Fire Extinguisher', 'Candy', 'Fire Truck', - 'Billiards', 'Converter', 'Bathtub', 'Wheelchair', 'Golf Club', 'Briefcase', 'Cucumber', 'Cigar/Cigarette', - 'Paint Brush', 'Pear', 'Heavy Truck', 'Hamburger', 'Extractor', 'Extension Cord', 'Tong', 'Tennis Racket', - 'Folder', 'American Football', 'earphone', 'Mask', 'Kettle', 'Tennis', 'Ship', 'Swing', 'Coffee Machine', - 'Slide', 'Carriage', 'Onion', 'Green beans', 'Projector', 'Frisbee', 'Washing Machine/Drying Machine', - 'Chicken', 'Printer', 'Watermelon', 'Saxophone', 'Tissue', 'Toothbrush', 'Ice cream', 'Hot-air balloon', - 'Cello', 'French Fries', 'Scale', 'Trophy', 'Cabbage', 'Hot dog', 'Blender', 'Peach', 'Rice', 'Wallet/Purse', - 'Volleyball', 'Deer', 'Goose', 'Tape', 'Tablet', 'Cosmetics', 'Trumpet', 'Pineapple', 'Golf Ball', - 'Ambulance', 'Parking meter', 'Mango', 'Key', 'Hurdle', 'Fishing Rod', 'Medal', 'Flute', 'Brush', 'Penguin', - 'Megaphone', 'Corn', 'Lettuce', 'Garlic', 'Swan', 'Helicopter', 'Green Onion', 'Sandwich', 'Nuts', - 'Speed Limit Sign', 'Induction Cooker', 'Broom', 'Trombone', 'Plum', 'Rickshaw', 'Goldfish', 'Kiwi fruit', - 'Router/modem', 'Poker Card', 'Toaster', 'Shrimp', 'Sushi', 'Cheese', 'Notepaper', 'Cherry', 'Pliers', 'CD', - 'Pasta', 'Hammer', 'Cue', 'Avocado', 'Hamimelon', 'Flask', 'Mushroom', 'Screwdriver', 'Soap', 'Recorder', - 'Bear', 'Eggplant', 'Board Eraser', 'Coconut', 'Tape Measure/Ruler', 'Pig', 'Showerhead', 'Globe', 'Chips', - 'Steak', 'Crosswalk Sign', 'Stapler', 'Camel', 'Formula 1', 'Pomegranate', 'Dishwasher', 'Crab', - 'Hoverboard', 'Meat ball', 'Rice Cooker', 'Tuba', 'Calculator', 'Papaya', 'Antelope', 'Parrot', 'Seal', - 'Butterfly', 'Dumbbell', 'Donkey', 'Lion', 'Urinal', 'Dolphin', 'Electric Drill', 'Hair Dryer', 'Egg tart', - 'Jellyfish', 'Treadmill', 'Lighter', 'Grapefruit', 'Game board', 'Mop', 'Radish', 'Baozi', 'Target', 'French', - 'Spring Rolls', 'Monkey', 'Rabbit', 'Pencil Case', 'Yak', 'Red Cabbage', 'Binoculars', 'Asparagus', 'Barbell', - 'Scallop', 'Noddles', 'Comb', 'Dumpling', 'Oyster', 'Table Tennis paddle', 'Cosmetics Brush/Eyeliner Pencil', - 'Chainsaw', 'Eraser', 'Lobster', 'Durian', 'Okra', 'Lipstick', 'Cosmetics Mirror', 'Curling', 'Table Tennis' ] +names: ['Person', 'Sneakers', 'Chair', 'Other Shoes', 'Hat', 'Car', 'Lamp', 'Glasses', 'Bottle', 'Desk', 'Cup', + 'Street Lights', 'Cabinet/shelf', 'Handbag/Satchel', 'Bracelet', 'Plate', 'Picture/Frame', 'Helmet', 'Book', + 'Gloves', 'Storage box', 'Boat', 'Leather Shoes', 'Flower', 'Bench', 'Potted Plant', 'Bowl/Basin', 'Flag', + 'Pillow', 'Boots', 'Vase', 'Microphone', 'Necklace', 'Ring', 'SUV', 'Wine Glass', 'Belt', 'Monitor/TV', + 'Backpack', 'Umbrella', 'Traffic Light', 'Speaker', 'Watch', 'Tie', 'Trash bin Can', 'Slippers', 'Bicycle', + 'Stool', 'Barrel/bucket', 'Van', 'Couch', 'Sandals', 'Basket', 'Drum', 'Pen/Pencil', 'Bus', 'Wild Bird', + 'High Heels', 'Motorcycle', 'Guitar', 'Carpet', 'Cell Phone', 'Bread', 'Camera', 'Canned', 'Truck', + 'Traffic cone', 'Cymbal', 'Lifesaver', 'Towel', 'Stuffed Toy', 'Candle', 'Sailboat', 'Laptop', 'Awning', + 'Bed', 'Faucet', 'Tent', 'Horse', 'Mirror', 'Power outlet', 'Sink', 'Apple', 'Air Conditioner', 'Knife', + 'Hockey Stick', 'Paddle', 'Pickup Truck', 'Fork', 'Traffic Sign', 'Balloon', 'Tripod', 'Dog', 'Spoon', 'Clock', + 'Pot', 'Cow', 'Cake', 'Dinning Table', 'Sheep', 'Hanger', 'Blackboard/Whiteboard', 'Napkin', 'Other Fish', + 'Orange/Tangerine', 'Toiletry', 'Keyboard', 'Tomato', 'Lantern', 'Machinery Vehicle', 'Fan', + 'Green Vegetables', 'Banana', 'Baseball Glove', 'Airplane', 'Mouse', 'Train', 'Pumpkin', 'Soccer', 'Skiboard', + 'Luggage', 'Nightstand', 'Tea pot', 'Telephone', 'Trolley', 'Head Phone', 'Sports Car', 'Stop Sign', + 'Dessert', 'Scooter', 'Stroller', 'Crane', 'Remote', 'Refrigerator', 'Oven', 'Lemon', 'Duck', 'Baseball Bat', + 'Surveillance Camera', 'Cat', 'Jug', 'Broccoli', 'Piano', 'Pizza', 'Elephant', 'Skateboard', 'Surfboard', + 'Gun', 'Skating and Skiing shoes', 'Gas stove', 'Donut', 'Bow Tie', 'Carrot', 'Toilet', 'Kite', 'Strawberry', + 'Other Balls', 'Shovel', 'Pepper', 'Computer Box', 'Toilet Paper', 'Cleaning Products', 'Chopsticks', + 'Microwave', 'Pigeon', 'Baseball', 'Cutting/chopping Board', 'Coffee Table', 'Side Table', 'Scissors', + 'Marker', 'Pie', 'Ladder', 'Snowboard', 'Cookies', 'Radiator', 'Fire Hydrant', 'Basketball', 'Zebra', 'Grape', + 'Giraffe', 'Potato', 'Sausage', 'Tricycle', 'Violin', 'Egg', 'Fire Extinguisher', 'Candy', 'Fire Truck', + 'Billiards', 'Converter', 'Bathtub', 'Wheelchair', 'Golf Club', 'Briefcase', 'Cucumber', 'Cigar/Cigarette', + 'Paint Brush', 'Pear', 'Heavy Truck', 'Hamburger', 'Extractor', 'Extension Cord', 'Tong', 'Tennis Racket', + 'Folder', 'American Football', 'earphone', 'Mask', 'Kettle', 'Tennis', 'Ship', 'Swing', 'Coffee Machine', + 'Slide', 'Carriage', 'Onion', 'Green beans', 'Projector', 'Frisbee', 'Washing Machine/Drying Machine', + 'Chicken', 'Printer', 'Watermelon', 'Saxophone', 'Tissue', 'Toothbrush', 'Ice cream', 'Hot-air balloon', + 'Cello', 'French Fries', 'Scale', 'Trophy', 'Cabbage', 'Hot dog', 'Blender', 'Peach', 'Rice', 'Wallet/Purse', + 'Volleyball', 'Deer', 'Goose', 'Tape', 'Tablet', 'Cosmetics', 'Trumpet', 'Pineapple', 'Golf Ball', + 'Ambulance', 'Parking meter', 'Mango', 'Key', 'Hurdle', 'Fishing Rod', 'Medal', 'Flute', 'Brush', 'Penguin', + 'Megaphone', 'Corn', 'Lettuce', 'Garlic', 'Swan', 'Helicopter', 'Green Onion', 'Sandwich', 'Nuts', + 'Speed Limit Sign', 'Induction Cooker', 'Broom', 'Trombone', 'Plum', 'Rickshaw', 'Goldfish', 'Kiwi fruit', + 'Router/modem', 'Poker Card', 'Toaster', 'Shrimp', 'Sushi', 'Cheese', 'Notepaper', 'Cherry', 'Pliers', 'CD', + 'Pasta', 'Hammer', 'Cue', 'Avocado', 'Hamimelon', 'Flask', 'Mushroom', 'Screwdriver', 'Soap', 'Recorder', + 'Bear', 'Eggplant', 'Board Eraser', 'Coconut', 'Tape Measure/Ruler', 'Pig', 'Showerhead', 'Globe', 'Chips', + 'Steak', 'Crosswalk Sign', 'Stapler', 'Camel', 'Formula 1', 'Pomegranate', 'Dishwasher', 'Crab', + 'Hoverboard', 'Meat ball', 'Rice Cooker', 'Tuba', 'Calculator', 'Papaya', 'Antelope', 'Parrot', 'Seal', + 'Butterfly', 'Dumbbell', 'Donkey', 'Lion', 'Urinal', 'Dolphin', 'Electric Drill', 'Hair Dryer', 'Egg tart', + 'Jellyfish', 'Treadmill', 'Lighter', 'Grapefruit', 'Game board', 'Mop', 'Radish', 'Baozi', 'Target', 'French', + 'Spring Rolls', 'Monkey', 'Rabbit', 'Pencil Case', 'Yak', 'Red Cabbage', 'Binoculars', 'Asparagus', 'Barbell', + 'Scallop', 'Noddles', 'Comb', 'Dumpling', 'Oyster', 'Table Tennis paddle', 'Cosmetics Brush/Eyeliner Pencil', + 'Chainsaw', 'Eraser', 'Lobster', 'Durian', 'Okra', 'Lipstick', 'Cosmetics Mirror', 'Curling', 'Table Tennis'] # Download script/URL (optional) --------------------------------------------------------------------------------------- diff --git a/data/SKU-110K.yaml b/data/SKU-110K.yaml index 01bf36c0d870..c85fa81d2e03 100644 --- a/data/SKU-110K.yaml +++ b/data/SKU-110K.yaml @@ -15,7 +15,7 @@ test: test.txt # test images (optional) 2936 images # Classes nc: 1 # number of classes -names: [ 'object' ] # class names +names: ['object'] # class names # Download script/URL (optional) --------------------------------------------------------------------------------------- diff --git a/data/VOC.yaml b/data/VOC.yaml index 55f39d852d31..e59fb6afd2fd 100644 --- a/data/VOC.yaml +++ b/data/VOC.yaml @@ -21,8 +21,8 @@ test: # test images (optional) # Classes nc: 20 # number of classes -names: [ 'aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog', - 'horse', 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor' ] # class names +names: ['aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog', + 'horse', 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor'] # class names # Download script/URL (optional) --------------------------------------------------------------------------------------- diff --git a/data/VisDrone.yaml b/data/VisDrone.yaml index 12e0e7c4a009..fe6cb9199ce1 100644 --- a/data/VisDrone.yaml +++ b/data/VisDrone.yaml @@ -15,7 +15,7 @@ test: VisDrone2019-DET-test-dev/images # test images (optional) 1610 images # Classes nc: 10 # number of classes -names: [ 'pedestrian', 'people', 'bicycle', 'car', 'van', 'truck', 'tricycle', 'awning-tricycle', 'bus', 'motor' ] +names: ['pedestrian', 'people', 'bicycle', 'car', 'van', 'truck', 'tricycle', 'awning-tricycle', 'bus', 'motor'] # Download script/URL (optional) --------------------------------------------------------------------------------------- diff --git a/data/coco.yaml b/data/coco.yaml index cab1a0171963..acf8e84f3e21 100644 --- a/data/coco.yaml +++ b/data/coco.yaml @@ -15,15 +15,15 @@ test: test-dev2017.txt # 20288 of 40670 images, submit to https://competitions. # Classes nc: 80 # number of classes -names: [ 'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck', 'boat', 'traffic light', - 'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow', - 'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee', - 'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard', - 'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple', - 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', - 'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', - 'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear', - 'hair drier', 'toothbrush' ] # class names +names: ['person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck', 'boat', 'traffic light', + 'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow', + 'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee', + 'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard', + 'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple', + 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', + 'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', + 'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear', + 'hair drier', 'toothbrush'] # class names # Download script/URL (optional) diff --git a/data/coco128.yaml b/data/coco128.yaml index 6902eb9397a1..eda39dcdaa8d 100644 --- a/data/coco128.yaml +++ b/data/coco128.yaml @@ -15,15 +15,15 @@ test: # test images (optional) # Classes nc: 80 # number of classes -names: [ 'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck', 'boat', 'traffic light', - 'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow', - 'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee', - 'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard', - 'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple', - 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', - 'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', - 'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear', - 'hair drier', 'toothbrush' ] # class names +names: ['person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck', 'boat', 'traffic light', + 'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow', + 'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee', + 'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard', + 'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple', + 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', + 'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', + 'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear', + 'hair drier', 'toothbrush'] # class names # Download script/URL (optional) diff --git a/data/scripts/get_coco.sh b/data/scripts/get_coco.sh index 1f484beee34c..f6c075689709 100755 --- a/data/scripts/get_coco.sh +++ b/data/scripts/get_coco.sh @@ -12,7 +12,7 @@ d='../datasets' # unzip directory url=https://github.com/ultralytics/yolov5/releases/download/v1.0/ f='coco2017labels.zip' # or 'coco2017labels-segments.zip', 68 MB echo 'Downloading' $url$f ' ...' -curl -L $url$f -o $f && unzip -q $f -d $d && rm $f & # download, unzip, remove in background +curl -L $url$f -o $f && unzip -q $f -d $d && rm $f & # Download/unzip images d='../datasets/coco/images' # unzip directory @@ -22,6 +22,6 @@ f2='val2017.zip' # 1G, 5k images f3='test2017.zip' # 7G, 41k images (optional) for f in $f1 $f2; do echo 'Downloading' $url$f '...' - curl -L $url$f -o $f && unzip -q $f -d $d && rm $f & # download, unzip, remove in background + curl -L $url$f -o $f && unzip -q $f -d $d && rm $f & done wait # finish background tasks diff --git a/data/scripts/get_coco128.sh b/data/scripts/get_coco128.sh index 3d705890b56d..6eb47bfe5595 100644 --- a/data/scripts/get_coco128.sh +++ b/data/scripts/get_coco128.sh @@ -12,6 +12,6 @@ d='../datasets' # unzip directory url=https://github.com/ultralytics/yolov5/releases/download/v1.0/ f='coco128.zip' # or 'coco2017labels-segments.zip', 68 MB echo 'Downloading' $url$f ' ...' -curl -L $url$f -o $f && unzip -q $f -d $d && rm $f & # download, unzip, remove in background +curl -L $url$f -o $f && unzip -q $f -d $d && rm $f & wait # finish background tasks diff --git a/data/xView.yaml b/data/xView.yaml index f4f27bfbc8ec..e191188da0f0 100644 --- a/data/xView.yaml +++ b/data/xView.yaml @@ -15,15 +15,15 @@ val: images/autosplit_val.txt # train images (relative to 'path') 10% of 847 tr # Classes nc: 60 # number of classes -names: [ 'Fixed-wing Aircraft', 'Small Aircraft', 'Cargo Plane', 'Helicopter', 'Passenger Vehicle', 'Small Car', 'Bus', - 'Pickup Truck', 'Utility Truck', 'Truck', 'Cargo Truck', 'Truck w/Box', 'Truck Tractor', 'Trailer', - 'Truck w/Flatbed', 'Truck w/Liquid', 'Crane Truck', 'Railway Vehicle', 'Passenger Car', 'Cargo Car', - 'Flat Car', 'Tank car', 'Locomotive', 'Maritime Vessel', 'Motorboat', 'Sailboat', 'Tugboat', 'Barge', - 'Fishing Vessel', 'Ferry', 'Yacht', 'Container Ship', 'Oil Tanker', 'Engineering Vehicle', 'Tower crane', - 'Container Crane', 'Reach Stacker', 'Straddle Carrier', 'Mobile Crane', 'Dump Truck', 'Haul Truck', - 'Scraper/Tractor', 'Front loader/Bulldozer', 'Excavator', 'Cement Mixer', 'Ground Grader', 'Hut/Tent', 'Shed', - 'Building', 'Aircraft Hangar', 'Damaged Building', 'Facility', 'Construction Site', 'Vehicle Lot', 'Helipad', - 'Storage Tank', 'Shipping container lot', 'Shipping Container', 'Pylon', 'Tower' ] # class names +names: ['Fixed-wing Aircraft', 'Small Aircraft', 'Cargo Plane', 'Helicopter', 'Passenger Vehicle', 'Small Car', 'Bus', + 'Pickup Truck', 'Utility Truck', 'Truck', 'Cargo Truck', 'Truck w/Box', 'Truck Tractor', 'Trailer', + 'Truck w/Flatbed', 'Truck w/Liquid', 'Crane Truck', 'Railway Vehicle', 'Passenger Car', 'Cargo Car', + 'Flat Car', 'Tank car', 'Locomotive', 'Maritime Vessel', 'Motorboat', 'Sailboat', 'Tugboat', 'Barge', + 'Fishing Vessel', 'Ferry', 'Yacht', 'Container Ship', 'Oil Tanker', 'Engineering Vehicle', 'Tower crane', + 'Container Crane', 'Reach Stacker', 'Straddle Carrier', 'Mobile Crane', 'Dump Truck', 'Haul Truck', + 'Scraper/Tractor', 'Front loader/Bulldozer', 'Excavator', 'Cement Mixer', 'Ground Grader', 'Hut/Tent', 'Shed', + 'Building', 'Aircraft Hangar', 'Damaged Building', 'Facility', 'Construction Site', 'Vehicle Lot', 'Helipad', + 'Storage Tank', 'Shipping container lot', 'Shipping Container', 'Pylon', 'Tower'] # class names # Download script/URL (optional) --------------------------------------------------------------------------------------- diff --git a/models/hub/anchors.yaml b/models/hub/anchors.yaml index a07a4dc72387..57512955ac1f 100644 --- a/models/hub/anchors.yaml +++ b/models/hub/anchors.yaml @@ -4,55 +4,55 @@ # P5 ------------------------------------------------------------------------------------------------------------------- # P5-640: anchors_p5_640: - - [ 10,13, 16,30, 33,23 ] # P3/8 - - [ 30,61, 62,45, 59,119 ] # P4/16 - - [ 116,90, 156,198, 373,326 ] # P5/32 + - [10,13, 16,30, 33,23] # P3/8 + - [30,61, 62,45, 59,119] # P4/16 + - [116,90, 156,198, 373,326] # P5/32 # P6 ------------------------------------------------------------------------------------------------------------------- # P6-640: thr=0.25: 0.9964 BPR, 5.54 anchors past thr, n=12, img_size=640, metric_all=0.281/0.716-mean/best, past_thr=0.469-mean: 9,11, 21,19, 17,41, 43,32, 39,70, 86,64, 65,131, 134,130, 120,265, 282,180, 247,354, 512,387 anchors_p6_640: - - [ 9,11, 21,19, 17,41 ] # P3/8 - - [ 43,32, 39,70, 86,64 ] # P4/16 - - [ 65,131, 134,130, 120,265 ] # P5/32 - - [ 282,180, 247,354, 512,387 ] # P6/64 + - [9,11, 21,19, 17,41] # P3/8 + - [43,32, 39,70, 86,64] # P4/16 + - [65,131, 134,130, 120,265] # P5/32 + - [282,180, 247,354, 512,387] # P6/64 # P6-1280: thr=0.25: 0.9950 BPR, 5.55 anchors past thr, n=12, img_size=1280, metric_all=0.281/0.714-mean/best, past_thr=0.468-mean: 19,27, 44,40, 38,94, 96,68, 86,152, 180,137, 140,301, 303,264, 238,542, 436,615, 739,380, 925,792 anchors_p6_1280: - - [ 19,27, 44,40, 38,94 ] # P3/8 - - [ 96,68, 86,152, 180,137 ] # P4/16 - - [ 140,301, 303,264, 238,542 ] # P5/32 - - [ 436,615, 739,380, 925,792 ] # P6/64 + - [19,27, 44,40, 38,94] # P3/8 + - [96,68, 86,152, 180,137] # P4/16 + - [140,301, 303,264, 238,542] # P5/32 + - [436,615, 739,380, 925,792] # P6/64 # P6-1920: thr=0.25: 0.9950 BPR, 5.55 anchors past thr, n=12, img_size=1920, metric_all=0.281/0.714-mean/best, past_thr=0.468-mean: 28,41, 67,59, 57,141, 144,103, 129,227, 270,205, 209,452, 455,396, 358,812, 653,922, 1109,570, 1387,1187 anchors_p6_1920: - - [ 28,41, 67,59, 57,141 ] # P3/8 - - [ 144,103, 129,227, 270,205 ] # P4/16 - - [ 209,452, 455,396, 358,812 ] # P5/32 - - [ 653,922, 1109,570, 1387,1187 ] # P6/64 + - [28,41, 67,59, 57,141] # P3/8 + - [144,103, 129,227, 270,205] # P4/16 + - [209,452, 455,396, 358,812] # P5/32 + - [653,922, 1109,570, 1387,1187] # P6/64 # P7 ------------------------------------------------------------------------------------------------------------------- # P7-640: thr=0.25: 0.9962 BPR, 6.76 anchors past thr, n=15, img_size=640, metric_all=0.275/0.733-mean/best, past_thr=0.466-mean: 11,11, 13,30, 29,20, 30,46, 61,38, 39,92, 78,80, 146,66, 79,163, 149,150, 321,143, 157,303, 257,402, 359,290, 524,372 anchors_p7_640: - - [ 11,11, 13,30, 29,20 ] # P3/8 - - [ 30,46, 61,38, 39,92 ] # P4/16 - - [ 78,80, 146,66, 79,163 ] # P5/32 - - [ 149,150, 321,143, 157,303 ] # P6/64 - - [ 257,402, 359,290, 524,372 ] # P7/128 + - [11,11, 13,30, 29,20] # P3/8 + - [30,46, 61,38, 39,92] # P4/16 + - [78,80, 146,66, 79,163] # P5/32 + - [149,150, 321,143, 157,303] # P6/64 + - [257,402, 359,290, 524,372] # P7/128 # P7-1280: thr=0.25: 0.9968 BPR, 6.71 anchors past thr, n=15, img_size=1280, metric_all=0.273/0.732-mean/best, past_thr=0.463-mean: 19,22, 54,36, 32,77, 70,83, 138,71, 75,173, 165,159, 148,334, 375,151, 334,317, 251,626, 499,474, 750,326, 534,814, 1079,818 anchors_p7_1280: - - [ 19,22, 54,36, 32,77 ] # P3/8 - - [ 70,83, 138,71, 75,173 ] # P4/16 - - [ 165,159, 148,334, 375,151 ] # P5/32 - - [ 334,317, 251,626, 499,474 ] # P6/64 - - [ 750,326, 534,814, 1079,818 ] # P7/128 + - [19,22, 54,36, 32,77] # P3/8 + - [70,83, 138,71, 75,173] # P4/16 + - [165,159, 148,334, 375,151] # P5/32 + - [334,317, 251,626, 499,474] # P6/64 + - [750,326, 534,814, 1079,818] # P7/128 # P7-1920: thr=0.25: 0.9968 BPR, 6.71 anchors past thr, n=15, img_size=1920, metric_all=0.273/0.732-mean/best, past_thr=0.463-mean: 29,34, 81,55, 47,115, 105,124, 207,107, 113,259, 247,238, 222,500, 563,227, 501,476, 376,939, 749,711, 1126,489, 801,1222, 1618,1227 anchors_p7_1920: - - [ 29,34, 81,55, 47,115 ] # P3/8 - - [ 105,124, 207,107, 113,259 ] # P4/16 - - [ 247,238, 222,500, 563,227 ] # P5/32 - - [ 501,476, 376,939, 749,711 ] # P6/64 - - [ 1126,489, 801,1222, 1618,1227 ] # P7/128 + - [29,34, 81,55, 47,115] # P3/8 + - [105,124, 207,107, 113,259] # P4/16 + - [247,238, 222,500, 563,227] # P5/32 + - [501,476, 376,939, 749,711] # P6/64 + - [1126,489, 801,1222, 1618,1227] # P7/128 diff --git a/models/hub/yolov3-spp.yaml b/models/hub/yolov3-spp.yaml index 0ca7b7f6577b..ddc0549f50d6 100644 --- a/models/hub/yolov3-spp.yaml +++ b/models/hub/yolov3-spp.yaml @@ -3,47 +3,47 @@ nc: 80 # number of classes depth_multiple: 1.0 # model depth multiple width_multiple: 1.0 # layer channel multiple anchors: - - [ 10,13, 16,30, 33,23 ] # P3/8 - - [ 30,61, 62,45, 59,119 ] # P4/16 - - [ 116,90, 156,198, 373,326 ] # P5/32 + - [10,13, 16,30, 33,23] # P3/8 + - [30,61, 62,45, 59,119] # P4/16 + - [116,90, 156,198, 373,326] # P5/32 # darknet53 backbone backbone: # [from, number, module, args] - [ [ -1, 1, Conv, [ 32, 3, 1 ] ], # 0 - [ -1, 1, Conv, [ 64, 3, 2 ] ], # 1-P1/2 - [ -1, 1, Bottleneck, [ 64 ] ], - [ -1, 1, Conv, [ 128, 3, 2 ] ], # 3-P2/4 - [ -1, 2, Bottleneck, [ 128 ] ], - [ -1, 1, Conv, [ 256, 3, 2 ] ], # 5-P3/8 - [ -1, 8, Bottleneck, [ 256 ] ], - [ -1, 1, Conv, [ 512, 3, 2 ] ], # 7-P4/16 - [ -1, 8, Bottleneck, [ 512 ] ], - [ -1, 1, Conv, [ 1024, 3, 2 ] ], # 9-P5/32 - [ -1, 4, Bottleneck, [ 1024 ] ], # 10 + [[-1, 1, Conv, [32, 3, 1]], # 0 + [-1, 1, Conv, [64, 3, 2]], # 1-P1/2 + [-1, 1, Bottleneck, [64]], + [-1, 1, Conv, [128, 3, 2]], # 3-P2/4 + [-1, 2, Bottleneck, [128]], + [-1, 1, Conv, [256, 3, 2]], # 5-P3/8 + [-1, 8, Bottleneck, [256]], + [-1, 1, Conv, [512, 3, 2]], # 7-P4/16 + [-1, 8, Bottleneck, [512]], + [-1, 1, Conv, [1024, 3, 2]], # 9-P5/32 + [-1, 4, Bottleneck, [1024]], # 10 ] # YOLOv3-SPP head head: - [ [ -1, 1, Bottleneck, [ 1024, False ] ], - [ -1, 1, SPP, [ 512, [ 5, 9, 13 ] ] ], - [ -1, 1, Conv, [ 1024, 3, 1 ] ], - [ -1, 1, Conv, [ 512, 1, 1 ] ], - [ -1, 1, Conv, [ 1024, 3, 1 ] ], # 15 (P5/32-large) + [[-1, 1, Bottleneck, [1024, False]], + [-1, 1, SPP, [512, [5, 9, 13]]], + [-1, 1, Conv, [1024, 3, 1]], + [-1, 1, Conv, [512, 1, 1]], + [-1, 1, Conv, [1024, 3, 1]], # 15 (P5/32-large) - [ -2, 1, Conv, [ 256, 1, 1 ] ], - [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], - [ [ -1, 8 ], 1, Concat, [ 1 ] ], # cat backbone P4 - [ -1, 1, Bottleneck, [ 512, False ] ], - [ -1, 1, Bottleneck, [ 512, False ] ], - [ -1, 1, Conv, [ 256, 1, 1 ] ], - [ -1, 1, Conv, [ 512, 3, 1 ] ], # 22 (P4/16-medium) + [-2, 1, Conv, [256, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 8], 1, Concat, [1]], # cat backbone P4 + [-1, 1, Bottleneck, [512, False]], + [-1, 1, Bottleneck, [512, False]], + [-1, 1, Conv, [256, 1, 1]], + [-1, 1, Conv, [512, 3, 1]], # 22 (P4/16-medium) - [ -2, 1, Conv, [ 128, 1, 1 ] ], - [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], - [ [ -1, 6 ], 1, Concat, [ 1 ] ], # cat backbone P3 - [ -1, 1, Bottleneck, [ 256, False ] ], - [ -1, 2, Bottleneck, [ 256, False ] ], # 27 (P3/8-small) + [-2, 1, Conv, [128, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 6], 1, Concat, [1]], # cat backbone P3 + [-1, 1, Bottleneck, [256, False]], + [-1, 2, Bottleneck, [256, False]], # 27 (P3/8-small) - [ [ 27, 22, 15 ], 1, Detect, [ nc, anchors ] ], # Detect(P3, P4, P5) + [[27, 22, 15], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) ] diff --git a/models/hub/yolov3-tiny.yaml b/models/hub/yolov3-tiny.yaml index d39a6b1f581c..537ad755b166 100644 --- a/models/hub/yolov3-tiny.yaml +++ b/models/hub/yolov3-tiny.yaml @@ -3,37 +3,37 @@ nc: 80 # number of classes depth_multiple: 1.0 # model depth multiple width_multiple: 1.0 # layer channel multiple anchors: - - [ 10,14, 23,27, 37,58 ] # P4/16 - - [ 81,82, 135,169, 344,319 ] # P5/32 + - [10,14, 23,27, 37,58] # P4/16 + - [81,82, 135,169, 344,319] # P5/32 # YOLOv3-tiny backbone backbone: # [from, number, module, args] - [ [ -1, 1, Conv, [ 16, 3, 1 ] ], # 0 - [ -1, 1, nn.MaxPool2d, [ 2, 2, 0 ] ], # 1-P1/2 - [ -1, 1, Conv, [ 32, 3, 1 ] ], - [ -1, 1, nn.MaxPool2d, [ 2, 2, 0 ] ], # 3-P2/4 - [ -1, 1, Conv, [ 64, 3, 1 ] ], - [ -1, 1, nn.MaxPool2d, [ 2, 2, 0 ] ], # 5-P3/8 - [ -1, 1, Conv, [ 128, 3, 1 ] ], - [ -1, 1, nn.MaxPool2d, [ 2, 2, 0 ] ], # 7-P4/16 - [ -1, 1, Conv, [ 256, 3, 1 ] ], - [ -1, 1, nn.MaxPool2d, [ 2, 2, 0 ] ], # 9-P5/32 - [ -1, 1, Conv, [ 512, 3, 1 ] ], - [ -1, 1, nn.ZeroPad2d, [ [ 0, 1, 0, 1 ] ] ], # 11 - [ -1, 1, nn.MaxPool2d, [ 2, 1, 0 ] ], # 12 + [[-1, 1, Conv, [16, 3, 1]], # 0 + [-1, 1, nn.MaxPool2d, [2, 2, 0]], # 1-P1/2 + [-1, 1, Conv, [32, 3, 1]], + [-1, 1, nn.MaxPool2d, [2, 2, 0]], # 3-P2/4 + [-1, 1, Conv, [64, 3, 1]], + [-1, 1, nn.MaxPool2d, [2, 2, 0]], # 5-P3/8 + [-1, 1, Conv, [128, 3, 1]], + [-1, 1, nn.MaxPool2d, [2, 2, 0]], # 7-P4/16 + [-1, 1, Conv, [256, 3, 1]], + [-1, 1, nn.MaxPool2d, [2, 2, 0]], # 9-P5/32 + [-1, 1, Conv, [512, 3, 1]], + [-1, 1, nn.ZeroPad2d, [[0, 1, 0, 1]]], # 11 + [-1, 1, nn.MaxPool2d, [2, 1, 0]], # 12 ] # YOLOv3-tiny head head: - [ [ -1, 1, Conv, [ 1024, 3, 1 ] ], - [ -1, 1, Conv, [ 256, 1, 1 ] ], - [ -1, 1, Conv, [ 512, 3, 1 ] ], # 15 (P5/32-large) + [[-1, 1, Conv, [1024, 3, 1]], + [-1, 1, Conv, [256, 1, 1]], + [-1, 1, Conv, [512, 3, 1]], # 15 (P5/32-large) - [ -2, 1, Conv, [ 128, 1, 1 ] ], - [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], - [ [ -1, 8 ], 1, Concat, [ 1 ] ], # cat backbone P4 - [ -1, 1, Conv, [ 256, 3, 1 ] ], # 19 (P4/16-medium) + [-2, 1, Conv, [128, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 8], 1, Concat, [1]], # cat backbone P4 + [-1, 1, Conv, [256, 3, 1]], # 19 (P4/16-medium) - [ [ 19, 15 ], 1, Detect, [ nc, anchors ] ], # Detect(P4, P5) + [[19, 15], 1, Detect, [nc, anchors]], # Detect(P4, P5) ] diff --git a/models/hub/yolov3.yaml b/models/hub/yolov3.yaml index 09df0d9ef362..3adfc2c6d2f9 100644 --- a/models/hub/yolov3.yaml +++ b/models/hub/yolov3.yaml @@ -3,47 +3,47 @@ nc: 80 # number of classes depth_multiple: 1.0 # model depth multiple width_multiple: 1.0 # layer channel multiple anchors: - - [ 10,13, 16,30, 33,23 ] # P3/8 - - [ 30,61, 62,45, 59,119 ] # P4/16 - - [ 116,90, 156,198, 373,326 ] # P5/32 + - [10,13, 16,30, 33,23] # P3/8 + - [30,61, 62,45, 59,119] # P4/16 + - [116,90, 156,198, 373,326] # P5/32 # darknet53 backbone backbone: # [from, number, module, args] - [ [ -1, 1, Conv, [ 32, 3, 1 ] ], # 0 - [ -1, 1, Conv, [ 64, 3, 2 ] ], # 1-P1/2 - [ -1, 1, Bottleneck, [ 64 ] ], - [ -1, 1, Conv, [ 128, 3, 2 ] ], # 3-P2/4 - [ -1, 2, Bottleneck, [ 128 ] ], - [ -1, 1, Conv, [ 256, 3, 2 ] ], # 5-P3/8 - [ -1, 8, Bottleneck, [ 256 ] ], - [ -1, 1, Conv, [ 512, 3, 2 ] ], # 7-P4/16 - [ -1, 8, Bottleneck, [ 512 ] ], - [ -1, 1, Conv, [ 1024, 3, 2 ] ], # 9-P5/32 - [ -1, 4, Bottleneck, [ 1024 ] ], # 10 + [[-1, 1, Conv, [32, 3, 1]], # 0 + [-1, 1, Conv, [64, 3, 2]], # 1-P1/2 + [-1, 1, Bottleneck, [64]], + [-1, 1, Conv, [128, 3, 2]], # 3-P2/4 + [-1, 2, Bottleneck, [128]], + [-1, 1, Conv, [256, 3, 2]], # 5-P3/8 + [-1, 8, Bottleneck, [256]], + [-1, 1, Conv, [512, 3, 2]], # 7-P4/16 + [-1, 8, Bottleneck, [512]], + [-1, 1, Conv, [1024, 3, 2]], # 9-P5/32 + [-1, 4, Bottleneck, [1024]], # 10 ] # YOLOv3 head head: - [ [ -1, 1, Bottleneck, [ 1024, False ] ], - [ -1, 1, Conv, [ 512, [ 1, 1 ] ] ], - [ -1, 1, Conv, [ 1024, 3, 1 ] ], - [ -1, 1, Conv, [ 512, 1, 1 ] ], - [ -1, 1, Conv, [ 1024, 3, 1 ] ], # 15 (P5/32-large) + [[-1, 1, Bottleneck, [1024, False]], + [-1, 1, Conv, [512, [1, 1]]], + [-1, 1, Conv, [1024, 3, 1]], + [-1, 1, Conv, [512, 1, 1]], + [-1, 1, Conv, [1024, 3, 1]], # 15 (P5/32-large) - [ -2, 1, Conv, [ 256, 1, 1 ] ], - [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], - [ [ -1, 8 ], 1, Concat, [ 1 ] ], # cat backbone P4 - [ -1, 1, Bottleneck, [ 512, False ] ], - [ -1, 1, Bottleneck, [ 512, False ] ], - [ -1, 1, Conv, [ 256, 1, 1 ] ], - [ -1, 1, Conv, [ 512, 3, 1 ] ], # 22 (P4/16-medium) + [-2, 1, Conv, [256, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 8], 1, Concat, [1]], # cat backbone P4 + [-1, 1, Bottleneck, [512, False]], + [-1, 1, Bottleneck, [512, False]], + [-1, 1, Conv, [256, 1, 1]], + [-1, 1, Conv, [512, 3, 1]], # 22 (P4/16-medium) - [ -2, 1, Conv, [ 128, 1, 1 ] ], - [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], - [ [ -1, 6 ], 1, Concat, [ 1 ] ], # cat backbone P3 - [ -1, 1, Bottleneck, [ 256, False ] ], - [ -1, 2, Bottleneck, [ 256, False ] ], # 27 (P3/8-small) + [-2, 1, Conv, [128, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 6], 1, Concat, [1]], # cat backbone P3 + [-1, 1, Bottleneck, [256, False]], + [-1, 2, Bottleneck, [256, False]], # 27 (P3/8-small) - [ [ 27, 22, 15 ], 1, Detect, [ nc, anchors ] ], # Detect(P3, P4, P5) + [[27, 22, 15], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) ] diff --git a/models/hub/yolov5-fpn.yaml b/models/hub/yolov5-fpn.yaml index b8b7fc1a23d4..217e4ca6ac96 100644 --- a/models/hub/yolov5-fpn.yaml +++ b/models/hub/yolov5-fpn.yaml @@ -3,38 +3,38 @@ nc: 80 # number of classes depth_multiple: 1.0 # model depth multiple width_multiple: 1.0 # layer channel multiple anchors: - - [ 10,13, 16,30, 33,23 ] # P3/8 - - [ 30,61, 62,45, 59,119 ] # P4/16 - - [ 116,90, 156,198, 373,326 ] # P5/32 + - [10,13, 16,30, 33,23] # P3/8 + - [30,61, 62,45, 59,119] # P4/16 + - [116,90, 156,198, 373,326] # P5/32 # YOLOv5 backbone backbone: # [from, number, module, args] - [ [ -1, 1, Focus, [ 64, 3 ] ], # 0-P1/2 - [ -1, 1, Conv, [ 128, 3, 2 ] ], # 1-P2/4 - [ -1, 3, Bottleneck, [ 128 ] ], - [ -1, 1, Conv, [ 256, 3, 2 ] ], # 3-P3/8 - [ -1, 9, BottleneckCSP, [ 256 ] ], - [ -1, 1, Conv, [ 512, 3, 2 ] ], # 5-P4/16 - [ -1, 9, BottleneckCSP, [ 512 ] ], - [ -1, 1, Conv, [ 1024, 3, 2 ] ], # 7-P5/32 - [ -1, 1, SPP, [ 1024, [ 5, 9, 13 ] ] ], - [ -1, 6, BottleneckCSP, [ 1024 ] ], # 9 + [[-1, 1, Focus, [64, 3]], # 0-P1/2 + [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 + [-1, 3, Bottleneck, [128]], + [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 + [-1, 9, BottleneckCSP, [256]], + [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 + [-1, 9, BottleneckCSP, [512]], + [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 + [-1, 1, SPP, [1024, [5, 9, 13]]], + [-1, 6, BottleneckCSP, [1024]], # 9 ] # YOLOv5 FPN head head: - [ [ -1, 3, BottleneckCSP, [ 1024, False ] ], # 10 (P5/32-large) + [[-1, 3, BottleneckCSP, [1024, False]], # 10 (P5/32-large) - [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], - [ [ -1, 6 ], 1, Concat, [ 1 ] ], # cat backbone P4 - [ -1, 1, Conv, [ 512, 1, 1 ] ], - [ -1, 3, BottleneckCSP, [ 512, False ] ], # 14 (P4/16-medium) + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 6], 1, Concat, [1]], # cat backbone P4 + [-1, 1, Conv, [512, 1, 1]], + [-1, 3, BottleneckCSP, [512, False]], # 14 (P4/16-medium) - [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], - [ [ -1, 4 ], 1, Concat, [ 1 ] ], # cat backbone P3 - [ -1, 1, Conv, [ 256, 1, 1 ] ], - [ -1, 3, BottleneckCSP, [ 256, False ] ], # 18 (P3/8-small) + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 4], 1, Concat, [1]], # cat backbone P3 + [-1, 1, Conv, [256, 1, 1]], + [-1, 3, BottleneckCSP, [256, False]], # 18 (P3/8-small) - [ [ 18, 14, 10 ], 1, Detect, [ nc, anchors ] ], # Detect(P3, P4, P5) + [[18, 14, 10], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) ] diff --git a/models/hub/yolov5-p2.yaml b/models/hub/yolov5-p2.yaml index 62122363df2d..6a932a868229 100644 --- a/models/hub/yolov5-p2.yaml +++ b/models/hub/yolov5-p2.yaml @@ -7,46 +7,46 @@ anchors: 3 # YOLOv5 backbone backbone: # [from, number, module, args] - [ [ -1, 1, Focus, [ 64, 3 ] ], # 0-P1/2 - [ -1, 1, Conv, [ 128, 3, 2 ] ], # 1-P2/4 - [ -1, 3, C3, [ 128 ] ], - [ -1, 1, Conv, [ 256, 3, 2 ] ], # 3-P3/8 - [ -1, 9, C3, [ 256 ] ], - [ -1, 1, Conv, [ 512, 3, 2 ] ], # 5-P4/16 - [ -1, 9, C3, [ 512 ] ], - [ -1, 1, Conv, [ 1024, 3, 2 ] ], # 7-P5/32 - [ -1, 1, SPP, [ 1024, [ 5, 9, 13 ] ] ], - [ -1, 3, C3, [ 1024, False ] ], # 9 + [[-1, 1, Focus, [64, 3]], # 0-P1/2 + [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 + [-1, 3, C3, [128]], + [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 + [-1, 9, C3, [256]], + [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 + [-1, 9, C3, [512]], + [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 + [-1, 1, SPP, [1024, [5, 9, 13]]], + [-1, 3, C3, [1024, False]], # 9 ] # YOLOv5 head head: - [ [ -1, 1, Conv, [ 512, 1, 1 ] ], - [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], - [ [ -1, 6 ], 1, Concat, [ 1 ] ], # cat backbone P4 - [ -1, 3, C3, [ 512, False ] ], # 13 - - [ -1, 1, Conv, [ 256, 1, 1 ] ], - [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], - [ [ -1, 4 ], 1, Concat, [ 1 ] ], # cat backbone P3 - [ -1, 3, C3, [ 256, False ] ], # 17 (P3/8-small) - - [ -1, 1, Conv, [ 128, 1, 1 ] ], - [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], - [ [ -1, 2 ], 1, Concat, [ 1 ] ], # cat backbone P2 - [ -1, 1, C3, [ 128, False ] ], # 21 (P2/4-xsmall) - - [ -1, 1, Conv, [ 128, 3, 2 ] ], - [ [ -1, 18 ], 1, Concat, [ 1 ] ], # cat head P3 - [ -1, 3, C3, [ 256, False ] ], # 24 (P3/8-small) - - [ -1, 1, Conv, [ 256, 3, 2 ] ], - [ [ -1, 14 ], 1, Concat, [ 1 ] ], # cat head P4 - [ -1, 3, C3, [ 512, False ] ], # 27 (P4/16-medium) - - [ -1, 1, Conv, [ 512, 3, 2 ] ], - [ [ -1, 10 ], 1, Concat, [ 1 ] ], # cat head P5 - [ -1, 3, C3, [ 1024, False ] ], # 30 (P5/32-large) - - [ [ 24, 27, 30 ], 1, Detect, [ nc, anchors ] ], # Detect(P3, P4, P5) + [[-1, 1, Conv, [512, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 6], 1, Concat, [1]], # cat backbone P4 + [-1, 3, C3, [512, False]], # 13 + + [-1, 1, Conv, [256, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 4], 1, Concat, [1]], # cat backbone P3 + [-1, 3, C3, [256, False]], # 17 (P3/8-small) + + [-1, 1, Conv, [128, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 2], 1, Concat, [1]], # cat backbone P2 + [-1, 1, C3, [128, False]], # 21 (P2/4-xsmall) + + [-1, 1, Conv, [128, 3, 2]], + [[-1, 18], 1, Concat, [1]], # cat head P3 + [-1, 3, C3, [256, False]], # 24 (P3/8-small) + + [-1, 1, Conv, [256, 3, 2]], + [[-1, 14], 1, Concat, [1]], # cat head P4 + [-1, 3, C3, [512, False]], # 27 (P4/16-medium) + + [-1, 1, Conv, [512, 3, 2]], + [[-1, 10], 1, Concat, [1]], # cat head P5 + [-1, 3, C3, [1024, False]], # 30 (P5/32-large) + + [[24, 27, 30], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) ] diff --git a/models/hub/yolov5-p6.yaml b/models/hub/yolov5-p6.yaml index c5ef5177f0c8..58b86b0ca892 100644 --- a/models/hub/yolov5-p6.yaml +++ b/models/hub/yolov5-p6.yaml @@ -7,48 +7,48 @@ anchors: 3 # YOLOv5 backbone backbone: # [from, number, module, args] - [ [ -1, 1, Focus, [ 64, 3 ] ], # 0-P1/2 - [ -1, 1, Conv, [ 128, 3, 2 ] ], # 1-P2/4 - [ -1, 3, C3, [ 128 ] ], - [ -1, 1, Conv, [ 256, 3, 2 ] ], # 3-P3/8 - [ -1, 9, C3, [ 256 ] ], - [ -1, 1, Conv, [ 512, 3, 2 ] ], # 5-P4/16 - [ -1, 9, C3, [ 512 ] ], - [ -1, 1, Conv, [ 768, 3, 2 ] ], # 7-P5/32 - [ -1, 3, C3, [ 768 ] ], - [ -1, 1, Conv, [ 1024, 3, 2 ] ], # 9-P6/64 - [ -1, 1, SPP, [ 1024, [ 3, 5, 7 ] ] ], - [ -1, 3, C3, [ 1024, False ] ], # 11 + [[-1, 1, Focus, [64, 3]], # 0-P1/2 + [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 + [-1, 3, C3, [128]], + [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 + [-1, 9, C3, [256]], + [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 + [-1, 9, C3, [512]], + [-1, 1, Conv, [768, 3, 2]], # 7-P5/32 + [-1, 3, C3, [768]], + [-1, 1, Conv, [1024, 3, 2]], # 9-P6/64 + [-1, 1, SPP, [1024, [3, 5, 7]]], + [-1, 3, C3, [1024, False]], # 11 ] # YOLOv5 head head: - [ [ -1, 1, Conv, [ 768, 1, 1 ] ], - [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], - [ [ -1, 8 ], 1, Concat, [ 1 ] ], # cat backbone P5 - [ -1, 3, C3, [ 768, False ] ], # 15 - - [ -1, 1, Conv, [ 512, 1, 1 ] ], - [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], - [ [ -1, 6 ], 1, Concat, [ 1 ] ], # cat backbone P4 - [ -1, 3, C3, [ 512, False ] ], # 19 - - [ -1, 1, Conv, [ 256, 1, 1 ] ], - [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], - [ [ -1, 4 ], 1, Concat, [ 1 ] ], # cat backbone P3 - [ -1, 3, C3, [ 256, False ] ], # 23 (P3/8-small) - - [ -1, 1, Conv, [ 256, 3, 2 ] ], - [ [ -1, 20 ], 1, Concat, [ 1 ] ], # cat head P4 - [ -1, 3, C3, [ 512, False ] ], # 26 (P4/16-medium) - - [ -1, 1, Conv, [ 512, 3, 2 ] ], - [ [ -1, 16 ], 1, Concat, [ 1 ] ], # cat head P5 - [ -1, 3, C3, [ 768, False ] ], # 29 (P5/32-large) - - [ -1, 1, Conv, [ 768, 3, 2 ] ], - [ [ -1, 12 ], 1, Concat, [ 1 ] ], # cat head P6 - [ -1, 3, C3, [ 1024, False ] ], # 32 (P5/64-xlarge) - - [ [ 23, 26, 29, 32 ], 1, Detect, [ nc, anchors ] ], # Detect(P3, P4, P5, P6) + [[-1, 1, Conv, [768, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 8], 1, Concat, [1]], # cat backbone P5 + [-1, 3, C3, [768, False]], # 15 + + [-1, 1, Conv, [512, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 6], 1, Concat, [1]], # cat backbone P4 + [-1, 3, C3, [512, False]], # 19 + + [-1, 1, Conv, [256, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 4], 1, Concat, [1]], # cat backbone P3 + [-1, 3, C3, [256, False]], # 23 (P3/8-small) + + [-1, 1, Conv, [256, 3, 2]], + [[-1, 20], 1, Concat, [1]], # cat head P4 + [-1, 3, C3, [512, False]], # 26 (P4/16-medium) + + [-1, 1, Conv, [512, 3, 2]], + [[-1, 16], 1, Concat, [1]], # cat head P5 + [-1, 3, C3, [768, False]], # 29 (P5/32-large) + + [-1, 1, Conv, [768, 3, 2]], + [[-1, 12], 1, Concat, [1]], # cat head P6 + [-1, 3, C3, [1024, False]], # 32 (P5/64-xlarge) + + [[23, 26, 29, 32], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5, P6) ] diff --git a/models/hub/yolov5-p7.yaml b/models/hub/yolov5-p7.yaml index 505c590ca168..f6e8fc7928cc 100644 --- a/models/hub/yolov5-p7.yaml +++ b/models/hub/yolov5-p7.yaml @@ -7,59 +7,59 @@ anchors: 3 # YOLOv5 backbone backbone: # [from, number, module, args] - [ [ -1, 1, Focus, [ 64, 3 ] ], # 0-P1/2 - [ -1, 1, Conv, [ 128, 3, 2 ] ], # 1-P2/4 - [ -1, 3, C3, [ 128 ] ], - [ -1, 1, Conv, [ 256, 3, 2 ] ], # 3-P3/8 - [ -1, 9, C3, [ 256 ] ], - [ -1, 1, Conv, [ 512, 3, 2 ] ], # 5-P4/16 - [ -1, 9, C3, [ 512 ] ], - [ -1, 1, Conv, [ 768, 3, 2 ] ], # 7-P5/32 - [ -1, 3, C3, [ 768 ] ], - [ -1, 1, Conv, [ 1024, 3, 2 ] ], # 9-P6/64 - [ -1, 3, C3, [ 1024 ] ], - [ -1, 1, Conv, [ 1280, 3, 2 ] ], # 11-P7/128 - [ -1, 1, SPP, [ 1280, [ 3, 5 ] ] ], - [ -1, 3, C3, [ 1280, False ] ], # 13 + [[-1, 1, Focus, [64, 3]], # 0-P1/2 + [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 + [-1, 3, C3, [128]], + [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 + [-1, 9, C3, [256]], + [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 + [-1, 9, C3, [512]], + [-1, 1, Conv, [768, 3, 2]], # 7-P5/32 + [-1, 3, C3, [768]], + [-1, 1, Conv, [1024, 3, 2]], # 9-P6/64 + [-1, 3, C3, [1024]], + [-1, 1, Conv, [1280, 3, 2]], # 11-P7/128 + [-1, 1, SPP, [1280, [3, 5]]], + [-1, 3, C3, [1280, False]], # 13 ] # YOLOv5 head head: - [ [ -1, 1, Conv, [ 1024, 1, 1 ] ], - [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], - [ [ -1, 10 ], 1, Concat, [ 1 ] ], # cat backbone P6 - [ -1, 3, C3, [ 1024, False ] ], # 17 + [[-1, 1, Conv, [1024, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 10], 1, Concat, [1]], # cat backbone P6 + [-1, 3, C3, [1024, False]], # 17 - [ -1, 1, Conv, [ 768, 1, 1 ] ], - [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], - [ [ -1, 8 ], 1, Concat, [ 1 ] ], # cat backbone P5 - [ -1, 3, C3, [ 768, False ] ], # 21 + [-1, 1, Conv, [768, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 8], 1, Concat, [1]], # cat backbone P5 + [-1, 3, C3, [768, False]], # 21 - [ -1, 1, Conv, [ 512, 1, 1 ] ], - [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], - [ [ -1, 6 ], 1, Concat, [ 1 ] ], # cat backbone P4 - [ -1, 3, C3, [ 512, False ] ], # 25 + [-1, 1, Conv, [512, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 6], 1, Concat, [1]], # cat backbone P4 + [-1, 3, C3, [512, False]], # 25 - [ -1, 1, Conv, [ 256, 1, 1 ] ], - [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], - [ [ -1, 4 ], 1, Concat, [ 1 ] ], # cat backbone P3 - [ -1, 3, C3, [ 256, False ] ], # 29 (P3/8-small) + [-1, 1, Conv, [256, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 4], 1, Concat, [1]], # cat backbone P3 + [-1, 3, C3, [256, False]], # 29 (P3/8-small) - [ -1, 1, Conv, [ 256, 3, 2 ] ], - [ [ -1, 26 ], 1, Concat, [ 1 ] ], # cat head P4 - [ -1, 3, C3, [ 512, False ] ], # 32 (P4/16-medium) + [-1, 1, Conv, [256, 3, 2]], + [[-1, 26], 1, Concat, [1]], # cat head P4 + [-1, 3, C3, [512, False]], # 32 (P4/16-medium) - [ -1, 1, Conv, [ 512, 3, 2 ] ], - [ [ -1, 22 ], 1, Concat, [ 1 ] ], # cat head P5 - [ -1, 3, C3, [ 768, False ] ], # 35 (P5/32-large) + [-1, 1, Conv, [512, 3, 2]], + [[-1, 22], 1, Concat, [1]], # cat head P5 + [-1, 3, C3, [768, False]], # 35 (P5/32-large) - [ -1, 1, Conv, [ 768, 3, 2 ] ], - [ [ -1, 18 ], 1, Concat, [ 1 ] ], # cat head P6 - [ -1, 3, C3, [ 1024, False ] ], # 38 (P6/64-xlarge) + [-1, 1, Conv, [768, 3, 2]], + [[-1, 18], 1, Concat, [1]], # cat head P6 + [-1, 3, C3, [1024, False]], # 38 (P6/64-xlarge) - [ -1, 1, Conv, [ 1024, 3, 2 ] ], - [ [ -1, 14 ], 1, Concat, [ 1 ] ], # cat head P7 - [ -1, 3, C3, [ 1280, False ] ], # 41 (P7/128-xxlarge) + [-1, 1, Conv, [1024, 3, 2]], + [[-1, 14], 1, Concat, [1]], # cat head P7 + [-1, 3, C3, [1280, False]], # 41 (P7/128-xxlarge) - [ [ 29, 32, 35, 38, 41 ], 1, Detect, [ nc, anchors ] ], # Detect(P3, P4, P5, P6, P7) + [[29, 32, 35, 38, 41], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5, P6, P7) ] diff --git a/models/hub/yolov5-panet.yaml b/models/hub/yolov5-panet.yaml index aee5dab01fa1..c5f3b4817102 100644 --- a/models/hub/yolov5-panet.yaml +++ b/models/hub/yolov5-panet.yaml @@ -3,44 +3,44 @@ nc: 80 # number of classes depth_multiple: 1.0 # model depth multiple width_multiple: 1.0 # layer channel multiple anchors: - - [ 10,13, 16,30, 33,23 ] # P3/8 - - [ 30,61, 62,45, 59,119 ] # P4/16 - - [ 116,90, 156,198, 373,326 ] # P5/32 + - [10,13, 16,30, 33,23] # P3/8 + - [30,61, 62,45, 59,119] # P4/16 + - [116,90, 156,198, 373,326] # P5/32 # YOLOv5 backbone backbone: # [from, number, module, args] - [ [ -1, 1, Focus, [ 64, 3 ] ], # 0-P1/2 - [ -1, 1, Conv, [ 128, 3, 2 ] ], # 1-P2/4 - [ -1, 3, BottleneckCSP, [ 128 ] ], - [ -1, 1, Conv, [ 256, 3, 2 ] ], # 3-P3/8 - [ -1, 9, BottleneckCSP, [ 256 ] ], - [ -1, 1, Conv, [ 512, 3, 2 ] ], # 5-P4/16 - [ -1, 9, BottleneckCSP, [ 512 ] ], - [ -1, 1, Conv, [ 1024, 3, 2 ] ], # 7-P5/32 - [ -1, 1, SPP, [ 1024, [ 5, 9, 13 ] ] ], - [ -1, 3, BottleneckCSP, [ 1024, False ] ], # 9 + [[-1, 1, Focus, [64, 3]], # 0-P1/2 + [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 + [-1, 3, BottleneckCSP, [128]], + [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 + [-1, 9, BottleneckCSP, [256]], + [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 + [-1, 9, BottleneckCSP, [512]], + [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 + [-1, 1, SPP, [1024, [5, 9, 13]]], + [-1, 3, BottleneckCSP, [1024, False]], # 9 ] # YOLOv5 PANet head head: - [ [ -1, 1, Conv, [ 512, 1, 1 ] ], - [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], - [ [ -1, 6 ], 1, Concat, [ 1 ] ], # cat backbone P4 - [ -1, 3, BottleneckCSP, [ 512, False ] ], # 13 + [[-1, 1, Conv, [512, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 6], 1, Concat, [1]], # cat backbone P4 + [-1, 3, BottleneckCSP, [512, False]], # 13 - [ -1, 1, Conv, [ 256, 1, 1 ] ], - [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], - [ [ -1, 4 ], 1, Concat, [ 1 ] ], # cat backbone P3 - [ -1, 3, BottleneckCSP, [ 256, False ] ], # 17 (P3/8-small) + [-1, 1, Conv, [256, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 4], 1, Concat, [1]], # cat backbone P3 + [-1, 3, BottleneckCSP, [256, False]], # 17 (P3/8-small) - [ -1, 1, Conv, [ 256, 3, 2 ] ], - [ [ -1, 14 ], 1, Concat, [ 1 ] ], # cat head P4 - [ -1, 3, BottleneckCSP, [ 512, False ] ], # 20 (P4/16-medium) + [-1, 1, Conv, [256, 3, 2]], + [[-1, 14], 1, Concat, [1]], # cat head P4 + [-1, 3, BottleneckCSP, [512, False]], # 20 (P4/16-medium) - [ -1, 1, Conv, [ 512, 3, 2 ] ], - [ [ -1, 10 ], 1, Concat, [ 1 ] ], # cat head P5 - [ -1, 3, BottleneckCSP, [ 1024, False ] ], # 23 (P5/32-large) + [-1, 1, Conv, [512, 3, 2]], + [[-1, 10], 1, Concat, [1]], # cat head P5 + [-1, 3, BottleneckCSP, [1024, False]], # 23 (P5/32-large) - [ [ 17, 20, 23 ], 1, Detect, [ nc, anchors ] ], # Detect(P3, P4, P5) + [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) ] diff --git a/models/hub/yolov5l6.yaml b/models/hub/yolov5l6.yaml index 91c57da1939e..d5afd7d84100 100644 --- a/models/hub/yolov5l6.yaml +++ b/models/hub/yolov5l6.yaml @@ -3,56 +3,56 @@ nc: 80 # number of classes depth_multiple: 1.0 # model depth multiple width_multiple: 1.0 # layer channel multiple anchors: - - [ 19,27, 44,40, 38,94 ] # P3/8 - - [ 96,68, 86,152, 180,137 ] # P4/16 - - [ 140,301, 303,264, 238,542 ] # P5/32 - - [ 436,615, 739,380, 925,792 ] # P6/64 + - [19,27, 44,40, 38,94] # P3/8 + - [96,68, 86,152, 180,137] # P4/16 + - [140,301, 303,264, 238,542] # P5/32 + - [436,615, 739,380, 925,792] # P6/64 # YOLOv5 backbone backbone: # [from, number, module, args] - [ [ -1, 1, Focus, [ 64, 3 ] ], # 0-P1/2 - [ -1, 1, Conv, [ 128, 3, 2 ] ], # 1-P2/4 - [ -1, 3, C3, [ 128 ] ], - [ -1, 1, Conv, [ 256, 3, 2 ] ], # 3-P3/8 - [ -1, 9, C3, [ 256 ] ], - [ -1, 1, Conv, [ 512, 3, 2 ] ], # 5-P4/16 - [ -1, 9, C3, [ 512 ] ], - [ -1, 1, Conv, [ 768, 3, 2 ] ], # 7-P5/32 - [ -1, 3, C3, [ 768 ] ], - [ -1, 1, Conv, [ 1024, 3, 2 ] ], # 9-P6/64 - [ -1, 1, SPP, [ 1024, [ 3, 5, 7 ] ] ], - [ -1, 3, C3, [ 1024, False ] ], # 11 + [[-1, 1, Focus, [64, 3]], # 0-P1/2 + [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 + [-1, 3, C3, [128]], + [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 + [-1, 9, C3, [256]], + [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 + [-1, 9, C3, [512]], + [-1, 1, Conv, [768, 3, 2]], # 7-P5/32 + [-1, 3, C3, [768]], + [-1, 1, Conv, [1024, 3, 2]], # 9-P6/64 + [-1, 1, SPP, [1024, [3, 5, 7]]], + [-1, 3, C3, [1024, False]], # 11 ] # YOLOv5 head head: - [ [ -1, 1, Conv, [ 768, 1, 1 ] ], - [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], - [ [ -1, 8 ], 1, Concat, [ 1 ] ], # cat backbone P5 - [ -1, 3, C3, [ 768, False ] ], # 15 - - [ -1, 1, Conv, [ 512, 1, 1 ] ], - [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], - [ [ -1, 6 ], 1, Concat, [ 1 ] ], # cat backbone P4 - [ -1, 3, C3, [ 512, False ] ], # 19 - - [ -1, 1, Conv, [ 256, 1, 1 ] ], - [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], - [ [ -1, 4 ], 1, Concat, [ 1 ] ], # cat backbone P3 - [ -1, 3, C3, [ 256, False ] ], # 23 (P3/8-small) - - [ -1, 1, Conv, [ 256, 3, 2 ] ], - [ [ -1, 20 ], 1, Concat, [ 1 ] ], # cat head P4 - [ -1, 3, C3, [ 512, False ] ], # 26 (P4/16-medium) - - [ -1, 1, Conv, [ 512, 3, 2 ] ], - [ [ -1, 16 ], 1, Concat, [ 1 ] ], # cat head P5 - [ -1, 3, C3, [ 768, False ] ], # 29 (P5/32-large) - - [ -1, 1, Conv, [ 768, 3, 2 ] ], - [ [ -1, 12 ], 1, Concat, [ 1 ] ], # cat head P6 - [ -1, 3, C3, [ 1024, False ] ], # 32 (P6/64-xlarge) - - [ [ 23, 26, 29, 32 ], 1, Detect, [ nc, anchors ] ], # Detect(P3, P4, P5, P6) + [[-1, 1, Conv, [768, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 8], 1, Concat, [1]], # cat backbone P5 + [-1, 3, C3, [768, False]], # 15 + + [-1, 1, Conv, [512, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 6], 1, Concat, [1]], # cat backbone P4 + [-1, 3, C3, [512, False]], # 19 + + [-1, 1, Conv, [256, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 4], 1, Concat, [1]], # cat backbone P3 + [-1, 3, C3, [256, False]], # 23 (P3/8-small) + + [-1, 1, Conv, [256, 3, 2]], + [[-1, 20], 1, Concat, [1]], # cat head P4 + [-1, 3, C3, [512, False]], # 26 (P4/16-medium) + + [-1, 1, Conv, [512, 3, 2]], + [[-1, 16], 1, Concat, [1]], # cat head P5 + [-1, 3, C3, [768, False]], # 29 (P5/32-large) + + [-1, 1, Conv, [768, 3, 2]], + [[-1, 12], 1, Concat, [1]], # cat head P6 + [-1, 3, C3, [1024, False]], # 32 (P6/64-xlarge) + + [[23, 26, 29, 32], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5, P6) ] diff --git a/models/hub/yolov5m6.yaml b/models/hub/yolov5m6.yaml index 4bef2e074a96..16a841a0b4b0 100644 --- a/models/hub/yolov5m6.yaml +++ b/models/hub/yolov5m6.yaml @@ -3,56 +3,56 @@ nc: 80 # number of classes depth_multiple: 0.67 # model depth multiple width_multiple: 0.75 # layer channel multiple anchors: - - [ 19,27, 44,40, 38,94 ] # P3/8 - - [ 96,68, 86,152, 180,137 ] # P4/16 - - [ 140,301, 303,264, 238,542 ] # P5/32 - - [ 436,615, 739,380, 925,792 ] # P6/64 + - [19,27, 44,40, 38,94] # P3/8 + - [96,68, 86,152, 180,137] # P4/16 + - [140,301, 303,264, 238,542] # P5/32 + - [436,615, 739,380, 925,792] # P6/64 # YOLOv5 backbone backbone: # [from, number, module, args] - [ [ -1, 1, Focus, [ 64, 3 ] ], # 0-P1/2 - [ -1, 1, Conv, [ 128, 3, 2 ] ], # 1-P2/4 - [ -1, 3, C3, [ 128 ] ], - [ -1, 1, Conv, [ 256, 3, 2 ] ], # 3-P3/8 - [ -1, 9, C3, [ 256 ] ], - [ -1, 1, Conv, [ 512, 3, 2 ] ], # 5-P4/16 - [ -1, 9, C3, [ 512 ] ], - [ -1, 1, Conv, [ 768, 3, 2 ] ], # 7-P5/32 - [ -1, 3, C3, [ 768 ] ], - [ -1, 1, Conv, [ 1024, 3, 2 ] ], # 9-P6/64 - [ -1, 1, SPP, [ 1024, [ 3, 5, 7 ] ] ], - [ -1, 3, C3, [ 1024, False ] ], # 11 + [[-1, 1, Focus, [64, 3]], # 0-P1/2 + [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 + [-1, 3, C3, [128]], + [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 + [-1, 9, C3, [256]], + [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 + [-1, 9, C3, [512]], + [-1, 1, Conv, [768, 3, 2]], # 7-P5/32 + [-1, 3, C3, [768]], + [-1, 1, Conv, [1024, 3, 2]], # 9-P6/64 + [-1, 1, SPP, [1024, [3, 5, 7]]], + [-1, 3, C3, [1024, False]], # 11 ] # YOLOv5 head head: - [ [ -1, 1, Conv, [ 768, 1, 1 ] ], - [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], - [ [ -1, 8 ], 1, Concat, [ 1 ] ], # cat backbone P5 - [ -1, 3, C3, [ 768, False ] ], # 15 - - [ -1, 1, Conv, [ 512, 1, 1 ] ], - [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], - [ [ -1, 6 ], 1, Concat, [ 1 ] ], # cat backbone P4 - [ -1, 3, C3, [ 512, False ] ], # 19 - - [ -1, 1, Conv, [ 256, 1, 1 ] ], - [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], - [ [ -1, 4 ], 1, Concat, [ 1 ] ], # cat backbone P3 - [ -1, 3, C3, [ 256, False ] ], # 23 (P3/8-small) - - [ -1, 1, Conv, [ 256, 3, 2 ] ], - [ [ -1, 20 ], 1, Concat, [ 1 ] ], # cat head P4 - [ -1, 3, C3, [ 512, False ] ], # 26 (P4/16-medium) - - [ -1, 1, Conv, [ 512, 3, 2 ] ], - [ [ -1, 16 ], 1, Concat, [ 1 ] ], # cat head P5 - [ -1, 3, C3, [ 768, False ] ], # 29 (P5/32-large) - - [ -1, 1, Conv, [ 768, 3, 2 ] ], - [ [ -1, 12 ], 1, Concat, [ 1 ] ], # cat head P6 - [ -1, 3, C3, [ 1024, False ] ], # 32 (P6/64-xlarge) - - [ [ 23, 26, 29, 32 ], 1, Detect, [ nc, anchors ] ], # Detect(P3, P4, P5, P6) + [[-1, 1, Conv, [768, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 8], 1, Concat, [1]], # cat backbone P5 + [-1, 3, C3, [768, False]], # 15 + + [-1, 1, Conv, [512, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 6], 1, Concat, [1]], # cat backbone P4 + [-1, 3, C3, [512, False]], # 19 + + [-1, 1, Conv, [256, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 4], 1, Concat, [1]], # cat backbone P3 + [-1, 3, C3, [256, False]], # 23 (P3/8-small) + + [-1, 1, Conv, [256, 3, 2]], + [[-1, 20], 1, Concat, [1]], # cat head P4 + [-1, 3, C3, [512, False]], # 26 (P4/16-medium) + + [-1, 1, Conv, [512, 3, 2]], + [[-1, 16], 1, Concat, [1]], # cat head P5 + [-1, 3, C3, [768, False]], # 29 (P5/32-large) + + [-1, 1, Conv, [768, 3, 2]], + [[-1, 12], 1, Concat, [1]], # cat head P6 + [-1, 3, C3, [1024, False]], # 32 (P6/64-xlarge) + + [[23, 26, 29, 32], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5, P6) ] diff --git a/models/hub/yolov5s-transformer.yaml b/models/hub/yolov5s-transformer.yaml index 8023ba480d24..b999ebb7583d 100644 --- a/models/hub/yolov5s-transformer.yaml +++ b/models/hub/yolov5s-transformer.yaml @@ -3,44 +3,44 @@ nc: 80 # number of classes depth_multiple: 0.33 # model depth multiple width_multiple: 0.50 # layer channel multiple anchors: - - [ 10,13, 16,30, 33,23 ] # P3/8 - - [ 30,61, 62,45, 59,119 ] # P4/16 - - [ 116,90, 156,198, 373,326 ] # P5/32 + - [10,13, 16,30, 33,23] # P3/8 + - [30,61, 62,45, 59,119] # P4/16 + - [116,90, 156,198, 373,326] # P5/32 # YOLOv5 backbone backbone: # [from, number, module, args] - [ [ -1, 1, Focus, [ 64, 3 ] ], # 0-P1/2 - [ -1, 1, Conv, [ 128, 3, 2 ] ], # 1-P2/4 - [ -1, 3, C3, [ 128 ] ], - [ -1, 1, Conv, [ 256, 3, 2 ] ], # 3-P3/8 - [ -1, 9, C3, [ 256 ] ], - [ -1, 1, Conv, [ 512, 3, 2 ] ], # 5-P4/16 - [ -1, 9, C3, [ 512 ] ], - [ -1, 1, Conv, [ 1024, 3, 2 ] ], # 7-P5/32 - [ -1, 1, SPP, [ 1024, [ 5, 9, 13 ] ] ], - [ -1, 3, C3TR, [ 1024, False ] ], # 9 <-------- C3TR() Transformer module + [[-1, 1, Focus, [64, 3]], # 0-P1/2 + [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 + [-1, 3, C3, [128]], + [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 + [-1, 9, C3, [256]], + [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 + [-1, 9, C3, [512]], + [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 + [-1, 1, SPP, [1024, [5, 9, 13]]], + [-1, 3, C3TR, [1024, False]], # 9 <-------- C3TR() Transformer module ] # YOLOv5 head head: - [ [ -1, 1, Conv, [ 512, 1, 1 ] ], - [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], - [ [ -1, 6 ], 1, Concat, [ 1 ] ], # cat backbone P4 - [ -1, 3, C3, [ 512, False ] ], # 13 + [[-1, 1, Conv, [512, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 6], 1, Concat, [1]], # cat backbone P4 + [-1, 3, C3, [512, False]], # 13 - [ -1, 1, Conv, [ 256, 1, 1 ] ], - [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], - [ [ -1, 4 ], 1, Concat, [ 1 ] ], # cat backbone P3 - [ -1, 3, C3, [ 256, False ] ], # 17 (P3/8-small) + [-1, 1, Conv, [256, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 4], 1, Concat, [1]], # cat backbone P3 + [-1, 3, C3, [256, False]], # 17 (P3/8-small) - [ -1, 1, Conv, [ 256, 3, 2 ] ], - [ [ -1, 14 ], 1, Concat, [ 1 ] ], # cat head P4 - [ -1, 3, C3, [ 512, False ] ], # 20 (P4/16-medium) + [-1, 1, Conv, [256, 3, 2]], + [[-1, 14], 1, Concat, [1]], # cat head P4 + [-1, 3, C3, [512, False]], # 20 (P4/16-medium) - [ -1, 1, Conv, [ 512, 3, 2 ] ], - [ [ -1, 10 ], 1, Concat, [ 1 ] ], # cat head P5 - [ -1, 3, C3, [ 1024, False ] ], # 23 (P5/32-large) + [-1, 1, Conv, [512, 3, 2]], + [[-1, 10], 1, Concat, [1]], # cat head P5 + [-1, 3, C3, [1024, False]], # 23 (P5/32-large) - [ [ 17, 20, 23 ], 1, Detect, [ nc, anchors ] ], # Detect(P3, P4, P5) + [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) ] diff --git a/models/hub/yolov5s6.yaml b/models/hub/yolov5s6.yaml index ba1025ec87ad..2fb245050053 100644 --- a/models/hub/yolov5s6.yaml +++ b/models/hub/yolov5s6.yaml @@ -3,56 +3,56 @@ nc: 80 # number of classes depth_multiple: 0.33 # model depth multiple width_multiple: 0.50 # layer channel multiple anchors: - - [ 19,27, 44,40, 38,94 ] # P3/8 - - [ 96,68, 86,152, 180,137 ] # P4/16 - - [ 140,301, 303,264, 238,542 ] # P5/32 - - [ 436,615, 739,380, 925,792 ] # P6/64 + - [19,27, 44,40, 38,94] # P3/8 + - [96,68, 86,152, 180,137] # P4/16 + - [140,301, 303,264, 238,542] # P5/32 + - [436,615, 739,380, 925,792] # P6/64 # YOLOv5 backbone backbone: # [from, number, module, args] - [ [ -1, 1, Focus, [ 64, 3 ] ], # 0-P1/2 - [ -1, 1, Conv, [ 128, 3, 2 ] ], # 1-P2/4 - [ -1, 3, C3, [ 128 ] ], - [ -1, 1, Conv, [ 256, 3, 2 ] ], # 3-P3/8 - [ -1, 9, C3, [ 256 ] ], - [ -1, 1, Conv, [ 512, 3, 2 ] ], # 5-P4/16 - [ -1, 9, C3, [ 512 ] ], - [ -1, 1, Conv, [ 768, 3, 2 ] ], # 7-P5/32 - [ -1, 3, C3, [ 768 ] ], - [ -1, 1, Conv, [ 1024, 3, 2 ] ], # 9-P6/64 - [ -1, 1, SPP, [ 1024, [ 3, 5, 7 ] ] ], - [ -1, 3, C3, [ 1024, False ] ], # 11 + [[-1, 1, Focus, [64, 3]], # 0-P1/2 + [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 + [-1, 3, C3, [128]], + [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 + [-1, 9, C3, [256]], + [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 + [-1, 9, C3, [512]], + [-1, 1, Conv, [768, 3, 2]], # 7-P5/32 + [-1, 3, C3, [768]], + [-1, 1, Conv, [1024, 3, 2]], # 9-P6/64 + [-1, 1, SPP, [1024, [3, 5, 7]]], + [-1, 3, C3, [1024, False]], # 11 ] # YOLOv5 head head: - [ [ -1, 1, Conv, [ 768, 1, 1 ] ], - [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], - [ [ -1, 8 ], 1, Concat, [ 1 ] ], # cat backbone P5 - [ -1, 3, C3, [ 768, False ] ], # 15 - - [ -1, 1, Conv, [ 512, 1, 1 ] ], - [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], - [ [ -1, 6 ], 1, Concat, [ 1 ] ], # cat backbone P4 - [ -1, 3, C3, [ 512, False ] ], # 19 - - [ -1, 1, Conv, [ 256, 1, 1 ] ], - [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], - [ [ -1, 4 ], 1, Concat, [ 1 ] ], # cat backbone P3 - [ -1, 3, C3, [ 256, False ] ], # 23 (P3/8-small) - - [ -1, 1, Conv, [ 256, 3, 2 ] ], - [ [ -1, 20 ], 1, Concat, [ 1 ] ], # cat head P4 - [ -1, 3, C3, [ 512, False ] ], # 26 (P4/16-medium) - - [ -1, 1, Conv, [ 512, 3, 2 ] ], - [ [ -1, 16 ], 1, Concat, [ 1 ] ], # cat head P5 - [ -1, 3, C3, [ 768, False ] ], # 29 (P5/32-large) - - [ -1, 1, Conv, [ 768, 3, 2 ] ], - [ [ -1, 12 ], 1, Concat, [ 1 ] ], # cat head P6 - [ -1, 3, C3, [ 1024, False ] ], # 32 (P6/64-xlarge) - - [ [ 23, 26, 29, 32 ], 1, Detect, [ nc, anchors ] ], # Detect(P3, P4, P5, P6) + [[-1, 1, Conv, [768, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 8], 1, Concat, [1]], # cat backbone P5 + [-1, 3, C3, [768, False]], # 15 + + [-1, 1, Conv, [512, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 6], 1, Concat, [1]], # cat backbone P4 + [-1, 3, C3, [512, False]], # 19 + + [-1, 1, Conv, [256, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 4], 1, Concat, [1]], # cat backbone P3 + [-1, 3, C3, [256, False]], # 23 (P3/8-small) + + [-1, 1, Conv, [256, 3, 2]], + [[-1, 20], 1, Concat, [1]], # cat head P4 + [-1, 3, C3, [512, False]], # 26 (P4/16-medium) + + [-1, 1, Conv, [512, 3, 2]], + [[-1, 16], 1, Concat, [1]], # cat head P5 + [-1, 3, C3, [768, False]], # 29 (P5/32-large) + + [-1, 1, Conv, [768, 3, 2]], + [[-1, 12], 1, Concat, [1]], # cat head P6 + [-1, 3, C3, [1024, False]], # 32 (P6/64-xlarge) + + [[23, 26, 29, 32], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5, P6) ] diff --git a/models/hub/yolov5x6.yaml b/models/hub/yolov5x6.yaml index 4fc9c9a119b8..c5187101072b 100644 --- a/models/hub/yolov5x6.yaml +++ b/models/hub/yolov5x6.yaml @@ -3,56 +3,56 @@ nc: 80 # number of classes depth_multiple: 1.33 # model depth multiple width_multiple: 1.25 # layer channel multiple anchors: - - [ 19,27, 44,40, 38,94 ] # P3/8 - - [ 96,68, 86,152, 180,137 ] # P4/16 - - [ 140,301, 303,264, 238,542 ] # P5/32 - - [ 436,615, 739,380, 925,792 ] # P6/64 + - [19,27, 44,40, 38,94] # P3/8 + - [96,68, 86,152, 180,137] # P4/16 + - [140,301, 303,264, 238,542] # P5/32 + - [436,615, 739,380, 925,792] # P6/64 # YOLOv5 backbone backbone: # [from, number, module, args] - [ [ -1, 1, Focus, [ 64, 3 ] ], # 0-P1/2 - [ -1, 1, Conv, [ 128, 3, 2 ] ], # 1-P2/4 - [ -1, 3, C3, [ 128 ] ], - [ -1, 1, Conv, [ 256, 3, 2 ] ], # 3-P3/8 - [ -1, 9, C3, [ 256 ] ], - [ -1, 1, Conv, [ 512, 3, 2 ] ], # 5-P4/16 - [ -1, 9, C3, [ 512 ] ], - [ -1, 1, Conv, [ 768, 3, 2 ] ], # 7-P5/32 - [ -1, 3, C3, [ 768 ] ], - [ -1, 1, Conv, [ 1024, 3, 2 ] ], # 9-P6/64 - [ -1, 1, SPP, [ 1024, [ 3, 5, 7 ] ] ], - [ -1, 3, C3, [ 1024, False ] ], # 11 + [[-1, 1, Focus, [64, 3]], # 0-P1/2 + [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 + [-1, 3, C3, [128]], + [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 + [-1, 9, C3, [256]], + [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 + [-1, 9, C3, [512]], + [-1, 1, Conv, [768, 3, 2]], # 7-P5/32 + [-1, 3, C3, [768]], + [-1, 1, Conv, [1024, 3, 2]], # 9-P6/64 + [-1, 1, SPP, [1024, [3, 5, 7]]], + [-1, 3, C3, [1024, False]], # 11 ] # YOLOv5 head head: - [ [ -1, 1, Conv, [ 768, 1, 1 ] ], - [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], - [ [ -1, 8 ], 1, Concat, [ 1 ] ], # cat backbone P5 - [ -1, 3, C3, [ 768, False ] ], # 15 - - [ -1, 1, Conv, [ 512, 1, 1 ] ], - [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], - [ [ -1, 6 ], 1, Concat, [ 1 ] ], # cat backbone P4 - [ -1, 3, C3, [ 512, False ] ], # 19 - - [ -1, 1, Conv, [ 256, 1, 1 ] ], - [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], - [ [ -1, 4 ], 1, Concat, [ 1 ] ], # cat backbone P3 - [ -1, 3, C3, [ 256, False ] ], # 23 (P3/8-small) - - [ -1, 1, Conv, [ 256, 3, 2 ] ], - [ [ -1, 20 ], 1, Concat, [ 1 ] ], # cat head P4 - [ -1, 3, C3, [ 512, False ] ], # 26 (P4/16-medium) - - [ -1, 1, Conv, [ 512, 3, 2 ] ], - [ [ -1, 16 ], 1, Concat, [ 1 ] ], # cat head P5 - [ -1, 3, C3, [ 768, False ] ], # 29 (P5/32-large) - - [ -1, 1, Conv, [ 768, 3, 2 ] ], - [ [ -1, 12 ], 1, Concat, [ 1 ] ], # cat head P6 - [ -1, 3, C3, [ 1024, False ] ], # 32 (P6/64-xlarge) - - [ [ 23, 26, 29, 32 ], 1, Detect, [ nc, anchors ] ], # Detect(P3, P4, P5, P6) + [[-1, 1, Conv, [768, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 8], 1, Concat, [1]], # cat backbone P5 + [-1, 3, C3, [768, False]], # 15 + + [-1, 1, Conv, [512, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 6], 1, Concat, [1]], # cat backbone P4 + [-1, 3, C3, [512, False]], # 19 + + [-1, 1, Conv, [256, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 4], 1, Concat, [1]], # cat backbone P3 + [-1, 3, C3, [256, False]], # 23 (P3/8-small) + + [-1, 1, Conv, [256, 3, 2]], + [[-1, 20], 1, Concat, [1]], # cat head P4 + [-1, 3, C3, [512, False]], # 26 (P4/16-medium) + + [-1, 1, Conv, [512, 3, 2]], + [[-1, 16], 1, Concat, [1]], # cat head P5 + [-1, 3, C3, [768, False]], # 29 (P5/32-large) + + [-1, 1, Conv, [768, 3, 2]], + [[-1, 12], 1, Concat, [1]], # cat head P6 + [-1, 3, C3, [1024, False]], # 32 (P6/64-xlarge) + + [[23, 26, 29, 32], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5, P6) ] diff --git a/train.py b/train.py index 7a8c15a6551a..3f5b5ed1195b 100644 --- a/train.py +++ b/train.py @@ -74,7 +74,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary with open(save_dir / 'opt.yaml', 'w') as f: yaml.safe_dump(vars(opt), f, sort_keys=False) data_dict = None - + # Loggers if RANK in [-1, 0]: loggers = Loggers(save_dir, weights, opt, hyp, LOGGER).start() # loggers dict @@ -83,7 +83,6 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary if resume: weights, epochs, hyp = opt.weights, opt.epochs, opt.hyp - # Config plots = not evolve # create plots cuda = device.type != 'cpu' @@ -96,7 +95,6 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary assert len(names) == nc, f'{len(names)} names found for nc={nc} dataset in {data}' # check is_coco = data.endswith('coco.yaml') and nc == 80 # COCO dataset - # Model pretrained = weights.endswith('.pt') if pretrained: diff --git a/utils/downloads.py b/utils/downloads.py index 00156962380b..588db5170e0e 100644 --- a/utils/downloads.py +++ b/utils/downloads.py @@ -115,7 +115,6 @@ def get_token(cookie="./cookie"): return line.split()[-1] return "" - # Google utils: https://cloud.google.com/storage/docs/reference/libraries ---------------------------------------------- # # diff --git a/utils/loggers/__init__.py b/utils/loggers/__init__.py index 603837d57052..06d562d60f99 100644 --- a/utils/loggers/__init__.py +++ b/utils/loggers/__init__.py @@ -1,7 +1,8 @@ # YOLOv5 experiment logging utils -import torch import warnings from threading import Thread + +import torch from torch.utils.tensorboard import SummaryWriter from utils.general import colorstr, emojis diff --git a/utils/loggers/wandb/log_dataset.py b/utils/loggers/wandb/log_dataset.py index 1328e20806ef..8447272cdb48 100644 --- a/utils/loggers/wandb/log_dataset.py +++ b/utils/loggers/wandb/log_dataset.py @@ -1,5 +1,4 @@ import argparse -import yaml from wandb_utils import WandbLogger diff --git a/utils/loggers/wandb/sweep.py b/utils/loggers/wandb/sweep.py index a0c76a10caa1..8e952d03c085 100644 --- a/utils/loggers/wandb/sweep.py +++ b/utils/loggers/wandb/sweep.py @@ -1,7 +1,8 @@ import sys -import wandb from pathlib import Path +import wandb + FILE = Path(__file__).absolute() sys.path.append(FILE.parents[2].as_posix()) # add utils/ to path diff --git a/utils/loggers/wandb/sweep.yaml b/utils/loggers/wandb/sweep.yaml index dcc95264f8cd..c3727de82d4a 100644 --- a/utils/loggers/wandb/sweep.yaml +++ b/utils/loggers/wandb/sweep.yaml @@ -25,9 +25,9 @@ parameters: data: value: "data/coco128.yaml" batch_size: - values: [ 64 ] + values: [64] epochs: - values: [ 10 ] + values: [10] lr0: distribution: uniform diff --git a/utils/loggers/wandb/wandb_utils.py b/utils/loggers/wandb/wandb_utils.py index c978e3ea838d..66fa8f85ec4e 100644 --- a/utils/loggers/wandb/wandb_utils.py +++ b/utils/loggers/wandb/wandb_utils.py @@ -3,9 +3,10 @@ import logging import os import sys -import yaml from contextlib import contextmanager from pathlib import Path + +import yaml from tqdm import tqdm FILE = Path(__file__).absolute() diff --git a/val.py b/val.py index ee2287644b92..06b2501515b5 100644 --- a/val.py +++ b/val.py @@ -13,7 +13,6 @@ import numpy as np import torch -import yaml from tqdm import tqdm FILE = Path(__file__).absolute() From 7820614c40f307308492e28b74df8b6cd1c15437 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 29 Jul 2021 17:23:35 +0200 Subject: [PATCH 0446/1976] Add `@try_except` decorator (#4224) --- utils/general.py | 53 ++++++++++++++++++++++++++++-------------------- 1 file changed, 31 insertions(+), 22 deletions(-) diff --git a/utils/general.py b/utils/general.py index 6b00ddf2ff72..a414b391d24e 100755 --- a/utils/general.py +++ b/utils/general.py @@ -56,6 +56,17 @@ def __exit__(self, exc_type, exc_val, exc_tb): return True +def try_except(func): + # try-except function. Usage: @try_except decorator + def handler(*args, **kwargs): + try: + func(*args, **kwargs) + except Exception as e: + print(e) + + return handler + + def set_logging(rank=-1, verbose=True): logging.basicConfig( format="%(message)s", @@ -114,26 +125,25 @@ def check_online(): return False -def check_git_status(err_msg=', for updates see https://github.com/ultralytics/yolov5'): +@try_except +def check_git_status(): # Recommend 'git pull' if code is out of date + msg = ', for updates see https://github.com/ultralytics/yolov5' print(colorstr('github: '), end='') - try: - assert Path('.git').exists(), 'skipping check (not a git repository)' - assert not is_docker(), 'skipping check (Docker image)' - assert check_online(), 'skipping check (offline)' - - cmd = 'git fetch && git config --get remote.origin.url' - url = check_output(cmd, shell=True, timeout=5).decode().strip().rstrip('.git') # git fetch - branch = check_output('git rev-parse --abbrev-ref HEAD', shell=True).decode().strip() # checked out - n = int(check_output(f'git rev-list {branch}..origin/master --count', shell=True)) # commits behind - if n > 0: - s = f"⚠️ WARNING: code is out of date by {n} commit{'s' * (n > 1)}. " \ - f"Use 'git pull' to update or 'git clone {url}' to download latest." - else: - s = f'up to date with {url} ✅' - print(emojis(s)) # emoji-safe - except Exception as e: - print(f'{e}{err_msg}') + assert Path('.git').exists(), 'skipping check (not a git repository)' + msg + assert not is_docker(), 'skipping check (Docker image)' + msg + assert check_online(), 'skipping check (offline)' + msg + + cmd = 'git fetch && git config --get remote.origin.url' + url = check_output(cmd, shell=True, timeout=5).decode().strip().rstrip('.git') # git fetch + branch = check_output('git rev-parse --abbrev-ref HEAD', shell=True).decode().strip() # checked out + n = int(check_output(f'git rev-list {branch}..origin/master --count', shell=True)) # commits behind + if n > 0: + s = f"⚠️ WARNING: code is out of date by {n} commit{'s' * (n > 1)}. " \ + f"Use 'git pull' to update or 'git clone {url}' to download latest." + else: + s = f'up to date with {url} ✅' + print(emojis(s)) # emoji-safe def check_python(minimum='3.6.2'): @@ -148,15 +158,14 @@ def check_version(current='0.0.0', minimum='0.0.0', name='version ', pinned=Fals assert result, f'{name}{minimum} required by YOLOv5, but {name}{current} is currently installed' +@try_except def check_requirements(requirements='requirements.txt', exclude=()): # Check installed dependencies meet requirements (pass *.txt file or list of packages) prefix = colorstr('red', 'bold', 'requirements:') check_python() # check python version if isinstance(requirements, (str, Path)): # requirements.txt file file = Path(requirements) - if not file.exists(): - print(f"{prefix} {file.resolve()} not found, check failed.") - return + assert file.exists(), f"{prefix} {file.resolve()} not found, check failed." requirements = [f'{x.name}{x.specifier}' for x in pkg.parse_requirements(file.open()) if x.name not in exclude] else: # list or tuple of packages requirements = [x for x in requirements if x not in exclude] @@ -178,7 +187,7 @@ def check_requirements(requirements='requirements.txt', exclude=()): source = file.resolve() if 'file' in locals() else requirements s = f"{prefix} {n} package{'s' * (n > 1)} updated per {source}\n" \ f"{prefix} ⚠️ {colorstr('bold', 'Restart runtime or rerun command for updates to take effect')}\n" - print(emojis(s)) # emoji-safe + print(emojis(s)) def check_img_size(img_size, s=32, floor=0): From c2c958c350407b630bbbf063cefbd64cea7d8c81 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 29 Jul 2021 17:29:39 +0200 Subject: [PATCH 0447/1976] Explicit `requirements.txt` location (#4225) --- train.py | 2 +- val.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/train.py b/train.py index 3f5b5ed1195b..250342acff18 100644 --- a/train.py +++ b/train.py @@ -451,7 +451,7 @@ def main(opt): if RANK in [-1, 0]: print(colorstr('train: ') + ', '.join(f'{k}={v}' for k, v in vars(opt).items())) check_git_status() - check_requirements(exclude=['thop']) + check_requirements(requirements=FILE.parent / 'requirements.txt', exclude=['thop']) # Resume if opt.resume and not check_wandb_resume(opt): # resume an interrupted run diff --git a/val.py b/val.py index 06b2501515b5..86439b1380dc 100644 --- a/val.py +++ b/val.py @@ -320,7 +320,7 @@ def parse_opt(): def main(opt): set_logging() print(colorstr('val: ') + ', '.join(f'{k}={v}' for k, v in vars(opt).items())) - check_requirements(exclude=('tensorboard', 'thop')) + check_requirements(requirements=FILE.parent / 'requirements.txt', exclude=('tensorboard', 'thop')) if opt.task in ('train', 'val', 'test'): # run normally run(**vars(opt)) From 18f6ba77cfbbf060a25d32a657629c2c1d419a49 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 30 Jul 2021 00:37:55 +0200 Subject: [PATCH 0448/1976] Suppress torch 1.9.0 max_pool2d() warning (#4227) --- models/common.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/models/common.py b/models/common.py index fc085e22b16b..24f02c2a584c 100644 --- a/models/common.py +++ b/models/common.py @@ -1,6 +1,7 @@ # YOLOv5 common modules import logging +import warnings from copy import copy from pathlib import Path @@ -158,7 +159,9 @@ def __init__(self, c1, c2, k=(5, 9, 13)): def forward(self, x): x = self.cv1(x) - return self.cv2(torch.cat([x] + [m(x) for m in self.m], 1)) + with warnings.catch_warnings(): + warnings.simplefilter('ignore') # suppress torch 1.9.0 max_pool2d() warning + return self.cv2(torch.cat([x] + [m(x) for m in self.m], 1)) class Focus(nn.Module): From 083c13da45f02ebbf23eac535cfcdd4c3b2b9492 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 30 Jul 2021 00:58:14 +0200 Subject: [PATCH 0449/1976] Created using Colaboratory --- tutorial.ipynb | 257 +++++++++++++++++++++++++------------------------ 1 file changed, 130 insertions(+), 127 deletions(-) diff --git a/tutorial.ipynb b/tutorial.ipynb index 831735cc0830..3f3f73ad4443 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -16,7 +16,7 @@ "accelerator": "GPU", "widgets": { "application/vnd.jupyter.widget-state+json": { - "cef5e9351ca743bcba5febac0b096a30": { + "2e915d9016c846e095e382b6a02ee773": { "model_module": "@jupyter-widgets/controls", "model_name": "HBoxModel", "state": { @@ -28,15 +28,15 @@ "_view_count": null, "_view_module_version": "1.5.0", "box_style": "", - "layout": "IPY_MODEL_ec326c52378f4410920c328f221e0514", + "layout": "IPY_MODEL_cb7fc3a5c6cc4fde8d2c83e594a7c86e", "_model_module": "@jupyter-widgets/controls", "children": [ - "IPY_MODEL_83000c64a11c4ae8abd6f0ef2f108cef", - "IPY_MODEL_0f7899eb719f4a9c9852426551f97be9" + "IPY_MODEL_ac3edef4e3434f4587e6cbf8aa048770", + "IPY_MODEL_853ac234cc2a4236946fc516871e10eb" ] } }, - "ec326c52378f4410920c328f221e0514": { + "cb7fc3a5c6cc4fde8d2c83e594a7c86e": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "state": { @@ -87,50 +87,50 @@ "left": null } }, - "83000c64a11c4ae8abd6f0ef2f108cef": { + "ac3edef4e3434f4587e6cbf8aa048770": { "model_module": "@jupyter-widgets/controls", "model_name": "FloatProgressModel", "state": { "_view_name": "ProgressView", - "style": "IPY_MODEL_886ac5b18b3c4c82bf15ad5055f1e17e", + "style": "IPY_MODEL_13842ca90c0047e584b8d68d99dad2b1", "_dom_classes": [], "description": "100%", "_model_name": "FloatProgressModel", "bar_style": "success", - "max": 819257867, + "max": 818322941, "_view_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", - "value": 819257867, + "value": 818322941, "_view_count": null, "_view_module_version": "1.5.0", "orientation": "horizontal", "min": 0, "description_tooltip": null, "_model_module": "@jupyter-widgets/controls", - "layout": "IPY_MODEL_4e67b3c3a49849c7a7ba28b7eec96e7a" + "layout": "IPY_MODEL_f454999c3a924c7bad0746fb453dec36" } }, - "0f7899eb719f4a9c9852426551f97be9": { + "853ac234cc2a4236946fc516871e10eb": { "model_module": "@jupyter-widgets/controls", "model_name": "HTMLModel", "state": { "_view_name": "HTMLView", - "style": "IPY_MODEL_62c3682ff1804571a483d46664533969", + "style": "IPY_MODEL_f94a7ca8c1f04761bf38fdc5f99664b8", "_dom_classes": [], "description": "", "_model_name": "HTMLModel", "placeholder": "​", "_view_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", - "value": " 781M/781M [00:12<00:00, 67.1MB/s]", + "value": " 780M/780M [03:59<00:00, 3.42MB/s]", "_view_count": null, "_view_module_version": "1.5.0", "description_tooltip": null, "_model_module": "@jupyter-widgets/controls", - "layout": "IPY_MODEL_599dda3b608b432393760b2ca4ae7c7d" + "layout": "IPY_MODEL_9da1a23b042c41618dd14b0e30aa7cbe" } }, - "886ac5b18b3c4c82bf15ad5055f1e17e": { + "13842ca90c0047e584b8d68d99dad2b1": { "model_module": "@jupyter-widgets/controls", "model_name": "ProgressStyleModel", "state": { @@ -145,7 +145,7 @@ "_model_module": "@jupyter-widgets/controls" } }, - "4e67b3c3a49849c7a7ba28b7eec96e7a": { + "f454999c3a924c7bad0746fb453dec36": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "state": { @@ -196,7 +196,7 @@ "left": null } }, - "62c3682ff1804571a483d46664533969": { + "f94a7ca8c1f04761bf38fdc5f99664b8": { "model_module": "@jupyter-widgets/controls", "model_name": "DescriptionStyleModel", "state": { @@ -210,7 +210,7 @@ "_model_module": "@jupyter-widgets/controls" } }, - "599dda3b608b432393760b2ca4ae7c7d": { + "9da1a23b042c41618dd14b0e30aa7cbe": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "state": { @@ -261,7 +261,7 @@ "left": null } }, - "217ca488c82a4b7a80318b70887a556e": { + "6ff8a710ded44391a624dec5c460b771": { "model_module": "@jupyter-widgets/controls", "model_name": "HBoxModel", "state": { @@ -273,15 +273,15 @@ "_view_count": null, "_view_module_version": "1.5.0", "box_style": "", - "layout": "IPY_MODEL_4e63af16f1084ca98a6fa5a282f2a81e", + "layout": "IPY_MODEL_3c19729b51cd45d4848035da06e96ff8", "_model_module": "@jupyter-widgets/controls", "children": [ - "IPY_MODEL_49f4b3c7f6ff42b4b9132a8550e12186", - "IPY_MODEL_8ec9e1a4883245daaf029458ee09721f" + "IPY_MODEL_23b2f0ae3d46438c8de375987c77f580", + "IPY_MODEL_dd9498c321a9422da6faf17a0be026d4" ] } }, - "4e63af16f1084ca98a6fa5a282f2a81e": { + "3c19729b51cd45d4848035da06e96ff8": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "state": { @@ -332,50 +332,50 @@ "left": null } }, - "49f4b3c7f6ff42b4b9132a8550e12186": { + "23b2f0ae3d46438c8de375987c77f580": { "model_module": "@jupyter-widgets/controls", "model_name": "FloatProgressModel", "state": { "_view_name": "ProgressView", - "style": "IPY_MODEL_9d3e775ee11e4cf4b587b64fbc3cc6f7", + "style": "IPY_MODEL_d8dda4b2ce864fd682e558b9a48f602e", "_dom_classes": [], "description": "100%", "_model_name": "FloatProgressModel", "bar_style": "success", - "max": 22091032, + "max": 6984509, "_view_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", - "value": 22091032, + "value": 6984509, "_view_count": null, "_view_module_version": "1.5.0", "orientation": "horizontal", "min": 0, "description_tooltip": null, "_model_module": "@jupyter-widgets/controls", - "layout": "IPY_MODEL_70f68a9a51ac46e6ab7e51fb4fc6bda3" + "layout": "IPY_MODEL_ff8151449e444a14869684212b9ab14e" } }, - "8ec9e1a4883245daaf029458ee09721f": { + "dd9498c321a9422da6faf17a0be026d4": { "model_module": "@jupyter-widgets/controls", "model_name": "HTMLModel", "state": { "_view_name": "HTMLView", - "style": "IPY_MODEL_fdb8ab377c114bc3b862ba76eb93cef7", + "style": "IPY_MODEL_0f84fe609bcf4aa9afdc32a8cf076909", "_dom_classes": [], "description": "", "_model_name": "HTMLModel", "placeholder": "​", "_view_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", - "value": " 21.1M/21.1M [00:36<00:00, 605kB/s]", + "value": " 6.66M/6.66M [00:01<00:00, 6.08MB/s]", "_view_count": null, "_view_module_version": "1.5.0", "description_tooltip": null, "_model_module": "@jupyter-widgets/controls", - "layout": "IPY_MODEL_cd267c153c244621a1f50706d2ddc897" + "layout": "IPY_MODEL_8fda673769984e2b928ef820d34c85c3" } }, - "9d3e775ee11e4cf4b587b64fbc3cc6f7": { + "d8dda4b2ce864fd682e558b9a48f602e": { "model_module": "@jupyter-widgets/controls", "model_name": "ProgressStyleModel", "state": { @@ -390,7 +390,7 @@ "_model_module": "@jupyter-widgets/controls" } }, - "70f68a9a51ac46e6ab7e51fb4fc6bda3": { + "ff8151449e444a14869684212b9ab14e": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "state": { @@ -441,7 +441,7 @@ "left": null } }, - "fdb8ab377c114bc3b862ba76eb93cef7": { + "0f84fe609bcf4aa9afdc32a8cf076909": { "model_module": "@jupyter-widgets/controls", "model_name": "DescriptionStyleModel", "state": { @@ -455,7 +455,7 @@ "_model_module": "@jupyter-widgets/controls" } }, - "cd267c153c244621a1f50706d2ddc897": { + "8fda673769984e2b928ef820d34c85c3": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "state": { @@ -551,7 +551,7 @@ "colab": { "base_uri": "https://localhost:8080/" }, - "outputId": "0cabe440-e06c-48b9-9180-4b4ea1790ff5" + "outputId": "ada1dd8d-e0aa-4858-e893-dc320319ca30" }, "source": [ "!git clone https://github.com/ultralytics/yolov5 # clone repo\n", @@ -564,12 +564,12 @@ "clear_output()\n", "print(f\"Setup complete. Using torch {torch.__version__} ({torch.cuda.get_device_properties(0).name if torch.cuda.is_available() else 'CPU'})\")" ], - "execution_count": null, + "execution_count": 1, "outputs": [ { "output_type": "stream", "text": [ - "Setup complete. Using torch 1.8.1+cu101 (Tesla V100-SXM2-16GB)\n" + "Setup complete. Using torch 1.9.0+cu102 (Tesla V100-SXM2-16GB)\n" ], "name": "stdout" } @@ -593,50 +593,43 @@ "metadata": { "id": "zR9ZbuQCH7FX", "colab": { - "base_uri": "https://localhost:8080/", - "height": 534 + "base_uri": "https://localhost:8080/" }, - "outputId": "c9a308f7-2216-4805-8003-eca8dd0dc30d" + "outputId": "a7a37616-a82b-4bdb-a463-6ead850b5615" }, "source": [ "!python detect.py --weights yolov5s.pt --img 640 --conf 0.25 --source data/images/\n", "Image(filename='runs/detect/exp/zidane.jpg', width=600)" ], - "execution_count": null, + "execution_count": 9, "outputs": [ { "output_type": "stream", "text": [ - "Namespace(agnostic_nms=False, augment=False, classes=None, conf_thres=0.25, device='', exist_ok=False, img_size=640, iou_thres=0.45, name='exp', project='runs/detect', save_conf=False, save_txt=False, source='data/images/', update=False, view_img=False, weights=['yolov5s.pt'])\n", - "YOLOv5 🚀 v5.0-1-g0f395b3 torch 1.8.1+cu101 CUDA:0 (Tesla V100-SXM2-16GB, 16160.5MB)\n", + "\u001b[34m\u001b[1mdetect: \u001b[0mweights=['yolov5s.pt'], source=data/images/, imgsz=640, conf_thres=0.25, iou_thres=0.45, max_det=1000, device=, view_img=False, save_txt=False, save_conf=False, save_crop=False, nosave=False, classes=None, agnostic_nms=False, augment=False, visualize=False, update=False, project=runs/detect, name=exp, exist_ok=False, line_thickness=3, hide_labels=False, hide_conf=False, half=False\n", + "YOLOv5 🚀 v5.0-330-g18f6ba7 torch 1.9.0+cu102 CUDA:0 (Tesla V100-SXM2-16GB, 16160.5MB)\n", "\n", "Fusing layers... \n", - "Model Summary: 224 layers, 7266973 parameters, 0 gradients, 17.0 GFLOPs\n", - "image 1/2 /content/yolov5/data/images/bus.jpg: 640x480 4 persons, 1 bus, Done. (0.008s)\n", + "Model Summary: 224 layers, 7266973 parameters, 0 gradients\n", + "image 1/2 /content/yolov5/data/images/bus.jpg: 640x480 4 persons, 1 bus, 1 fire hydrant, Done. (0.008s)\n", "image 2/2 /content/yolov5/data/images/zidane.jpg: 384x640 2 persons, 2 ties, Done. (0.008s)\n", "Results saved to runs/detect/exp\n", - "Done. (0.087)\n" + "Done. (0.091s)\n" ], "name": "stdout" - }, - { - "output_type": "execute_result", - "data": { - "image/jpeg": "/9j/4AAQSkZJRgABAQAAAQABAAD/2wBDAAIBAQEBAQIBAQECAgICAgQDAgICAgUEBAMEBgUGBgYFBgYGBwkIBgcJBwYGCAsICQoKCgoKBggLDAsKDAkKCgr/2wBDAQICAgICAgUDAwUKBwYHCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgoKCgr/wAARCALQBQADASIAAhEBAxEB/8QAHwAAAQUBAQEBAQEAAAAAAAAAAAECAwQFBgcICQoL/8QAtRAAAgEDAwIEAwUFBAQAAAF9AQIDAAQRBRIhMUEGE1FhByJxFDKBkaEII0KxwRVS0fAkM2JyggkKFhcYGRolJicoKSo0NTY3ODk6Q0RFRkdISUpTVFVWV1hZWmNkZWZnaGlqc3R1dnd4eXqDhIWGh4iJipKTlJWWl5iZmqKjpKWmp6ipqrKztLW2t7i5usLDxMXGx8jJytLT1NXW19jZ2uHi4+Tl5ufo6erx8vP09fb3+Pn6/8QAHwEAAwEBAQEBAQEBAQAAAAAAAAECAwQFBgcICQoL/8QAtREAAgECBAQDBAcFBAQAAQJ3AAECAxEEBSExBhJBUQdhcRMiMoEIFEKRobHBCSMzUvAVYnLRChYkNOEl8RcYGRomJygpKjU2Nzg5OkNERUZHSElKU1RVVldYWVpjZGVmZ2hpanN0dXZ3eHl6goOEhYaHiImKkpOUlZaXmJmaoqOkpaanqKmqsrO0tba3uLm6wsPExcbHyMnK0tPU1dbX2Nna4uPk5ebn6Onq8vP09fb3+Pn6/9oADAMBAAIRAxEAPwD8347F5pkSP5t38P3ttaFjZzR2rzOMjfs+/wDNVi10+5kh877Gqv8AwfP96tOz0+2b99sw0e1drfxV87HY+wjHm94z4bOZ2WZ4dgV9vzN81Tx6a8jHvu+bd/DV+HT51uHd0Up95Pl21bhtfIkH2ncqfN8q/e21NS0dUbU4/ZMf7Oi52OzMu1UVU+an/wBjlW3w7l2t8y/3q3pNPRl2I+1tn/AqZZ280cXk3Nrub+7v+6tefKtLl5onZGm48qMqbQ3k/wBJeb5lb5PMf5l/2aZcaW6tshhyzffZn3ba3biHzI5USFfmX7tQyWc3zTXltuWPb+8jT+LbXJWxVWO534XDxkchrmm/KZt+d3yvurBm0maHLvu2su1G/vV3OsWsMe5xyWTd5bVh3VikkLJ5Pyqu7b/easaNacX7x6nsYyicrJYws3nom1m/vf3qWC3uYW32zr8v95v/AEGtK6s5I9iJuDMu51aq62827502Nt3Jur6zAylKUTlqREj+0wsiI7OzNuRW/wBr+7ViSPy4/wBzud9+1vm+Wq0aurIJtxdf4qtLayeX8nyusu5mb+KvqMPSlKJ58qnvco65uHaNpvlTdt2fJ8y0kjSbER3Vtq7tzJtqbyPtDLDNtx96nTKjR/Ii7t38X3a9D2fKebUkoy5SHyXjnP75l/i/3amSSVm+0v5joqbfv/Ky/wB6i3/fRrv+9911j+6rUsMMuxvJufu/fXZXPKXLE4OaUuaxPBv3b9n+r/hjl3LVqH9zJ/qV2t823/eqtbwpHGkP+qVn+dY/l/4FVuzZLqRI5plV13b12fdX+GvLxHvF04825p2cm1Ucopdvl+V9taVvDcSSK6fd+ZXrN0+GGS637F+V1aXd/d/hq7b75mX51Db9zMr/AC/7Py14WIqSNadHuaVjNLJCsP2pmTfuddvzNU8jO3yQ7X2/e/iaq8IeGNPLRW+bbu2fdq95n2OZXhhV2b5V3V4dap7+h6VOnHqWob792yI6o6orfLVCZJpPnudrBf4v97+KpmuIWmDzTKsrfdXft+7VCS5dpmR5o3/vq392uJSjztQOlx928hzbIZXSFFLs7fMqf6yopmubzY63jIVb7qrU32OGSP8AhRPveXHSyKluy/J975VXf/FWkqnNqLk5fdEntdy/3vl2eZs/76pU3yQyJsYeX8if3lqwsE0iy2zzfuvl/d/7VVr6O6WTf8yfe/d7/u1n71TRSMK0R8d1cxwrvRQv3dzfdWoprp75hNc3cjtHtSLzG+61OaGaS3RJnV1+88bVVkkRlKWtthlf+GspRhKRjH3Y8rKuoXtvHteN8qy7X/vVga9cXisrpcthkVfm/u1pXk00zAu+R/d/utWDq14+5n342/6rav3a78PFRj8JyVqhj6lM/wC8+8f/AB3dXManN82/fjd/CtdBqW+4bM0/Gzc1Yd48Pls/Vm+Xb/FXsUYy5NDxsVLmiYF9avt+07F21QVXmuNmzb/utW9cWbyR56hVqnHp7rMJvJ8xK9CnKMeU82T5hljlWZE3fN9//ZrodI3x7ntn+Rk2srfM1V9N03bGOdu7/wAdrVhs4I5BGiMk0f8ADJ8tEqhrToz+I1NLtUinR9+fLf5F/wDsa7bQZnjwibU2/N+7X5VrjdH/AHKxBE3f367TRZE+x7E2/wB1dv3mqo1PfOj2fuWOu0W4k+ziF5sOzfxfw11ui6uNyu6Mrqu1/Mfb8v8As1wWk3KOuy28xVVvnb+7W/puqQxsU3/eiVmj+9XZGpzmMoyj8R3Wn6kQN8Myh1f/AEfb93/eatXT9am8ve+1vvbmrgrHWd0iXOcFfl3L/F/wGtCHxB5K+d8wSR9qKq/M3/Aa6OYw9+J2q69C3zpZttX5Ub+9/vUybV4IYd+//WbtzL/CtcqutbYf3fmHc+1/mqvcawk3ybJCu/b9/wC9U/DAfunT/wBtusCv0/2d/wDDWbqGuosbO8jEt91tvystYN9q226ldH2xtt8qNX3f8B3VVvtUm2l3TLsnzLu/i/hqJRjI25vslPxRNDdZm85iv3fLb+GuMvJ3dXR/uK23/erW1PVHuomQXLFpJfkZvur/ALNZGqQ/aFb5G+V/3sa1x1I8x0UeaOjOa1SG2ml85Pv/AMO5vlWqtvbupYOmPLf5d3yturcbTkjdt6Mxb/lm38NQXWnpJcM8iSO38Un8K1nKn7p2RqQ5tTPWFJpD5czIn97726mTWVzIHfez+Z/yz/vVZa1eSTZDCqqqNu+fbSLYwzRuXhxufd9/71cNSnI0lUM2SN1CwpMuyT5tv/stJbxurI/nL+8ba0cn92tXybaOSHyYfuxbtrN8v3qq3Eltu+0+T86tt+VK5q1P3tCoVOXWRbtWdcoltv2tu2t8u6uj01na3TZuAVt27+61YNu7s0jzbWlb5U/hrQ0+aGObzo3bzl+X7/y7q+Ox1GXNKTPewtT4ZI7LT2T/AFM03mt8q7v4a0WuvLUI+6H5v9Wvzbv+BVzVnfTeSH/55q25d/3m/wBmp/7UdpI+Nqt8rbWr5DEYeUqp9DRrfDzG5cXySsN9zuVot6qybvu1m3mpRrD5iO0KSRbvlf5aqSal8zbNuPm2/J8q1Uk1QSM73KKrrF8nlr8u6tKOHUZe8dvtOhPeahD5yc7v3X975t1Zs0zrsfo2/wCZW/h/4FS3F4jKkEyMXX5X3fdaqzLBNJscrsZNqqv8NexhcPGPuozqVOWHKJe+c0hf7Tv3fL8tVri3DSPD9pUyr/F91d1aEljH/wAvMylG+4yp91aktdPeRc+Tv+f5fk3V9XluH5dTwcdiIx+0YLK6tvfcKry6bN5ezZ+7b/lpG+35q7BfDiNa+XNC37xtq7m27qdY+DXuN0m/hX/1f8NfY4ej7lz5XGYjm+E5C10e/Ece+2+fdtXb81XF8P7bqPztwkVGV9vyrt/2a7ux8KzRyJCkLM6/Nt3/ACtU7eDXkmj811Ty2+f91ub5q1lTjGZwRrcp5wuihpJIPmZGf/v2tQDwrMzHyXbZ93aqV6ovg/y5FT7zL99VT7y0kngvM3nfZmQbWZFWuKpR5vdN6dbl+0eUyeG7mO4Dp0Zf/Hqfp+jzQtLNczZK/wAP92vS28HmaOL/AEXa21n/AOA1m3HhWaxmm32fySIv+1uX/drxsVR+yejh63N7xysmnwxqrwp5rtztV/4f/iqJLRLVVT7HIo2bd27+Kuqj8Nos29BiKRdySN/d/u1UvrN/MhhmtmH/AE0rzJRl9hnbGpLm1Obmt5LfPkoxdvmdqpGzTzks33MrRbvL37WrevtPmkuNk3zLI27958tZd1bJZ3mz94Xk/vN8taxl9kr4vhM9YUt2SFJtq/8AXX5vlqb7PNdTPNM6r5iLsVf4f9qnzW8KM72yKpX+KrDWf7vYJtoXb95vmrS8fi5iPe5iCGSZrdYfObYvy7v7zLUNxcFVaNHaM/Mu3/ZqzInkxhGm+79xf7tZN1I7L9/HzfPu/irejTlUkYyqcseWRDM0Plu8kzfc+6v8VZ0cszN87qPm+fy/m2rVm6Z7iTyfl2xpt8yNdu6qk0nlqXh2hG+4y161GmeZWqSjL3SNpEZfJjhXb/D/ALVIq/ut83zf3fmpkbIrDftC7P4fvbqVVTCPHBtH8MbN/FXV7P7RjGt7xGq3O48Z2/N8vy7qfIszRq6Pj+9u+9VhbXbJs3/MqfP8u75qVbVMt5j/ADfe2rTfvfEbxqe5ykSXj/Y3DzSBv4Kt2zIsa70y+/dtb/0KmW8aW6tcvM21fl3bPutWlHYO1vvmhYf3JF/irel8ISrT5CssYM/7l2Rm/vfLUNxpsysNm4fLtfd92tVdI+UvezbXZP71X9I8Ga14hMh0DQri+EWzzRFEWC5zjOOnQ/lXrYalXxNRU6MXKT2STbfyWpxuTnLlgm32RyMmkvtY72Z93y/N92si+sXkupk2MNvy7a9Pl+E3jiRk2+BtTz3JtWx/Ks7Ufg98Q1K/ZvBGqvlfmxYt/hXrxyPOv+gap/4BL/I5qmDxcv8Al1L/AMBf+R5Lqmkutrvdm3r8yMtc1qmmlv8Ab+8te0X3wT+JchMa/D/WCGXLEWLnn8qwr74BfFhi0dv8NNZ2Hp/oD/4U45HnX2sNU/8AAJf5HDUy/Hy/5dS/8Bf+R4Vqlrc28jI6fKv8VUvJmkH8TbvmdVr2DV/2cPjTJBttvhTrROMcabIf6Vz837Mvx5H7v/hUXiHH95NKl/wq5ZJnXLf6tU/8Al/kY/2fj/h9lL/wF/5HARw+Wd+9v92rlrbTSXGx5mZW/vV2sP7NXx13Av8ACDxGfc6VL/hWlZ/s7fG5U82X4P66GxjH9kyf4Vw1clzxx/3Wp/4BL/I6Y5djv+fUv/AX/kcfb2fksr/+Oq1adrbvMqo/ys33Pm212Np+z38ZwUf/AIVbrqKFyR/ZsgOfyrRh+AXxcjRm/wCFZa3uP3f+JZJ/hXHLJM7/AOgSr/4Ll/kdtPLsY96cvuf+Rx0cMkbbEfdWhaxO3753Zd38O77tdVbfAr4tyuwufhrrgCr8pOnyfN+lWbX4G/FpVDn4b6wGAYLmwfgflXPLI8++zhKv/guf+R108uxcf+XcvuZy6wvtabDf7W6jzN0iPvZR8uzzK7OP4KfFRkIj+HWsq+xuXsXxu/KlPwQ+KrBVk+H2rnav/QPf/CsP7Cz3m1wtX/wXP/I744HFdIP7mcpCtzNIRDtbb/DJUMizKuwQ7dqfe/iVq69vgt8Vf4PhtrQ29D9jf/CiL4HfGK/lW1sfhVr8zf8APOLTJGZvwAzWryXPErvCVf8AwXL/ACNYYLEOWsH9zOJmjhb5PmLL8yM33t396mzSTRsr7Fd1Tb9+utv/AIEfF21Lx/8ACsfECSl8SRPpsgKH6EVUk+CfxeWUlPhfr2W6gabJgfpTjkmfSj/ulX/wXL/Ip4LF/wAj+5nNtM7EI0+xV/hWp7eZGwn3X/i+atmT4J/GHIZPhdrudvP/ABKpOP0q5pv7Pnx9vibuy+C/iaZVfaJY9GmcH8Qtb/2FnahzSwtRf9uS/wAh+wxKlrB/czJh1CazmKO6uzJj+98taVvqD+WHd2LfeWnx/Bf4zwztK/w21zcG2lTpsn+FaWn/AAC+Pl7CZbL4O+Jp4ifkeHSJmVT6ZC1vHJc6pLmlhqi/7cl/kc88PjFK/I/uZlyakkP+pdVZm3M1QNqzzK3nPk7/AJljeuhP7Pn7RbhQ3wT8VAAYLDQJ92P7v3awPEnw1+JnhWyl1rxB4F1a0toCBPNdafIiQ5O0biRgckDnuacsmzOMHUlh5pLVvklZLu9NDlqU8Sot8jsvJmbqGoJMrbPlXb/E9ULjWCtsE6j+9WfNep5g42/8DqrdaomXTf8ALs+balcUY8u55NbFS6FqTUHaNXCMwas261J2kOeBs3Lu/iaq8l58pmhfb8vytWXdawFjb58t/dpyOeNbl0Ld1fTbt4mVFZfn2vWfNdJI3zuwH8DVTuNSuJOqLt/u1Va82/Oh/wC+a56nNE9CjiveNCS+eF98aMwX+Kh77cyzvN96s0zP5nzzcf3aljuEab9z/DXFWifS4XEc3KlI0HuPNGxH+ZvvbqktZ3jbY75C/das/wA5JJGdPvMnyK1WrW3uZJkT+7/FXHUjyxPfw+I5S/G7yHZM2/8A3v4ateSjR/I+NtUoflben975quRqixsyOzM38P8AdrllHlPeo4jmHqvk7dif7+7+KpJJJvOTf/wHdUTRuI9kz7t33amVXjiCTP8Adb5t1YSid8a0dgX5meB+iv8A+PVK8z+SJnfLt/d/hqDa8fKHhmoZtqt3bdtSlLmNvrRbVtuAk3y/+zVGJk/jT5o3qFpJ2jZPOyy/NtX71NaRFz8ir/Czf3qcaPMH1rm0JJ7h1Vnd1dW/8dqDzHkHmK/8X3aTa7s0Py//ABVV2byZN6JtK/K3z1v7PliclXGcurLM0yLh0h3fwtTFk2q2x2D/AN3fVJrpFY+Vu/21qP7chXncm7+Jq3jGR52IxkbFybUJvlfyVVm+Zqq3E3mKd83FRtMm5tnzL/BVRr5/M2bFUN99a6qcZHz+KxXNAtrP50bIHYK38NNjkDN5EzqrfNVKOYwJvR12K1SrdPcNvR/mX/x6uuMT5vFVoyNG3kdWV3mxWhbuiqr+d8v8f+1WPp58xnR/7+379atlHDIuNmVX+Grj73xHkyrGnZyO395Vbb8y1raer3Ejb33fwvub7y1nabDH5m+GHhtvzSVtaXZ/xzRrhfu7aInmyqcxr2VnNJE3zqEk/hX71dPpdrtjjf8AeSstZeh2L/I6Ip2rt+b+Guk8O2aW67LmFdsa/N8/3aoxlI39Ls0VU3pjcm5F/u1r2Vo8i7HhyzNu3R0zQ7OTy40httu5Ny/7VdJY2KMuyHdvVW37kro+I5/aGJNYpNC28tjavy/3WqZ7GFo1h37fl3OrfwtWtHo8022GaHbu/i/hqKbT3WRnfcn8Hyv822ly/aOmjL3zFis5mkFz8zlvl3b/ALu2npY/6QZpptgk27/722r62aQt5Nt5n7z+GT7y1FdWO2FfLfJVPustTKMeXmPewsvdM/ULO2kZZkRnX7RtRm/h/wBqub1rT5lkbZN/F95WrsLiOH+NJNv8DL/ermNUi+y5fYvzM3yq275qcYwl7x72Gj8Kkee69YvNC80L+cjN8jN/6DXE+JNPfcyb2O75fl+9XqHiCHcrfIy/P+6b+7XGa5Z+dG6JG3y/MjVyVpfzHqxwvN7x7Vp8NtCrvMm8eb95fvK1S28T3DOnkx+Urs0TL8rK1VoLiBWY2bqUjb7zL95v/iant77/AEjyfszPtVd1eNGPLA+e9pyl+xtXjb/SUV/l3J/FWjC0MinyX/g2orL8y/8AAqz47jyW2PJ+6Z9yxqn3f+BVehbtcvhFXcjf7VefXk/5TupVOaVxLqOFZCj7WPlKrrG3zfN/FUUdq8ciu7sGWp7iRPtDpIil9m/5U+WRqY1siq58lX/j+VvlWuKpUlHc9CnHm+EbarDM02+GRt0u3yW/9Coe12uIXufKRv8AWqzfdpI4937503IqMzqvy7amihgkjO+GR3++vy/7NefUqcsz0KMfc5jCks0vJpvJdflfbFI33qzri3kmuDc7MlV27vl+9XRX0MyqblJoV2yr8uysya3hjV08lfmqqPN7U6OaJzV4rwyM7quP4G2fdrI8lLiTY80m2H7nz11WpWv7vem77vzKy/w1g3Gmp8r+WqfL8n95q+wy3mjLUxqcv2Situk+5/O3eW7I/wDDuqzDG9nCH2Nt3/eVd1RTK80ZTf8AOu1fl/vf7VSRqkfkwIm3/vpt1fXUZHj4qpGMWSWs3mN8+5f7rMv3qjnZ7qF0R9u5/vfdqxIr7o3G7+9taq7MIV2O67t/zr/drq9ofPVK0ucVLV9q/Plv4F31JDM+0v8Aw/7NRF3jwmzCsnybf4lqONpp5vOebbt+VFrKpIiMpfCX4WeSYul4r7futs2/8Bq3DJBDD/pPVt2+P+9trJhWFv7zsr/N81akLTfIny7vvff27Vrx8ZKPN8R3UYy+I2bVdrJMib0k2t+7+993+KtK3t7OaN3dPNO35WX5axIWS0Z32bty7VMdbdveLbwo+xUVU2bV+avnsRU97mPQo0/5i7C0k0bbyzOsX71tm1f+A06G427vszthk27W/h/3qqtdOq+Sj7n/AIY2/u1Fcag4Z3uYVXcy/wCr+VVrwMRKSPSp04/aLn9o7v8ARn8vav3W/wBmkVbO4ZbmaFn8v5f3afNtqGCRFklSWaGT+L94v3V/u0QyPFIIYQ3lbvm/hb/7Ksaf7szqe8XbO3S6jTY7LF/C33WqePyZFlR9u2Nv4vmakt1Tj7SY0H30WSpJI5lhX/RsnbuZmbdt+b7tVUqX6GUVL3SMxzRgwpNCu7+Lf91ajaO5kka5m+ZG/h3bq0Lf7THhJoY0Xb8iqv3qrzWsyyMkNzlm+6rbV21NPTZ3JqfCZ8kaXExhTdlot27+Ff8AZqtdNNbr86bZWTbtVa1VhdlD7GQs/wA0e373+1RNZ2aoIdjbm+VP71KVTlkc0uaMTl9SsUhUyJudv4lVqwtStwtqLaZMvJ/Ev3mrsNSs4biLMN4xLfK67P7tYOrWvkSM83ysqqvmKv3lr0sPzT5W/hPJrOcuY4y+hSNPJ2N8vy/M1ZkNjDcZ+RQ6ttX/AGq6TUIYZjJC+1d3z+X/AA1RmtYZ5lSHaiq/zrXrwlJwkeVUjIxfsDzXBdNyfw+W1Ot9Lkz8+7Zt3L/s10Xl+XJvS23Bmp0dijRt5Myp/syJ92m6zjG3QSpxjLmMWHS0jh8xId7bvl3fLSzRpDN5MwyZE+b5/mrX1C12ybPm3fKy+X/EtUry28mbfMn3k+RqqMve8jqjTHafcQ+YkGxfN+78r/dWug024aGP+HG7duX+7WDZ27+WzvDGzfeRlatjT7yT7Os0yZbf95aIy5pe6a8v8x02l30y7k+9uTcjN97bWrHdJJbo++Quqbkkjfburm7KHyLj7TCjfc+dletVZoGt/wB9BuDbvvPt/h/hrup1P5jjqQ7mxY648MiokeEbarMy/wAX+zVxfEEMLLD9p37X+b5q5r7YmYrbfNvWL7rfd/3qinmdpC7uw2/N8tdkahxy906tfFCSSMU3Ax/Lu2/L81Jb60l18m9WZXb95G3y1zEeqIsaiZNrSfM0b/w1Nb6lDHGpKfxfe3fLtrfm9wiMoROjbVE2hH6L/D/eqjPs8wpDDlJn+dd27bWba3UM3yb2O77kf8NWYw8itJbblVv7rVFT4SebmmMmuJpFP2lNnktsT/aX+GpobXgyeSuf4Y1+7V2GzeaFXeRWZk+81W/sq/IXTY3yov8Avf3qw9nzG0cROJi3WlvG/mPbK38KbqzLjR7lYWdIcPu+9Ia7aTTRdXAmS2/h+ST+H5arSaDM0x+0ozJv3bVeqjHl90qOI984yTR0W3kdEwF+aX5f4qp/Ybn5BM8e5vm/d11V5ptyvm20MPKtufd8u5f7tVLjR/s9ud8K79nyeWtYSpm8cRyyOauIYY7eL5P49yMtU7izT5XdGbc27/eroZrCGNW2Q8r827+7WbqEaRzNGkzJ5nzbtn3q4qlMuNYoQ3jrI33vvbfm/hq5Y7DJ+5dQq/wyPWe0c0cjI6L83yqrNUtvZ+WpTYxlb7jfeWvnswwvc9nD4rl1N+yunWVd6KWV93/AamlvIY5f33HmT/Kv+zVPSYUXKu7Nt+X5nrRhsZmk/dpwu1kaT71fF1MH+91Po8PiuaF5CNbosnzv5Qbds+eq8027dvtsnZtTd/6E1ai2rzfuRZ7/AC03/N93dSrpE98sWyyVpNnz7vlX/vqoo4OcavPI9SNb3DKgjNxMkPzLu/vfdrQj0va3nQou3cvzf3q1NP0HzJGf5ZW3/wAL/L/u1o2ugwwyCH7GyGOXb977te/g8L7WV1E48RjIU9zHj0tNsvnfPu+8v92tOx8N3lxHHDNYbjDtfcqf+PV0Fn4XRpF2Q7f3v3m/irf0/wALwwx/PuVlf5Nrf+O19bgcO4xiuU+Yx2KVTmZzVh4f8+Pe8Kld22Jm+ZVq/b+FZm+dPnRW+9H92up0/S0jhhjRGil37ty/Mvy/3qvWeg7l+eZYl+Y7f9rdX0mHj7p89Uqcuhztn4d8z50sG2/89P7zVfs/Dc0qvD9m8oxvXT6X4ZRjJCLfZtZvK8tvl/4FWnY+HYbWFEfcq7t6/wD2VdMqZySrSlLQ5CTwvCsKfZkZljl3S/uqbcaDbQ/6ZCjeV/D8n8Vd5Ho810q+Sir8/wC9Zf4l/hpt54ZmWR0+V4vu/wB3atcNSiHtDzG48LzSK3yYC/NuX+Jf9qsy68Pvayb38yR9nyM392vUdU0WGNSiQtsjT/lj91v96ua1LSRIwh3/ADyfcWRflX/erzK1HmO/C1jhLzR5ncTJbMi/wLJt+ZqxNS0fyZGe58zcybdrfL5bV3Osx+XdPDvX5fuTfwVzd5bvNcI7zbYWZm3TPu3Nt/vV4MsLKLke/RxUTjrzT7lpA7wq3lptdl+bbXP61C9vveGFnT5WSXbXZ67DuuAmxl3fNuV/4awdYhdl+T5lX7lYuHU6lLuYCypCzzDrs27W/i/3alk/0i4PyLt27tzU+4s3hmdgkbBv4m/hao5pHkj3x7R5ibdrfw1rTpwcvcMvae7ZyM+5uoWt/wBzbNtZ2+b/ANmrJu9833IWHy/LV7UGePaiuxVk3bvusq1UuA7/AHGUv/D8n3q9PD04Hl4iXvXM+Oa2kj3puDqu7d/eqnLN5i7H+RV/8eq3qGxlZ0RkC/f21Raby4wghWYN9za/zLXfGPL7xySqc3ulmO3eZVP3yqbtu3atEMgbajp5b/3lqPYm4yI/7r+6rfNU8N1+887y2+Z9u1fm3VcfeMvQs28aMzB4Y2Xb8rL/AMtP96r8Ni8kbfuflk+/UFrDtYuibG/u1s2Nv5sKI/y7v4W/iq3KUYlxlL4ipZ6fBD9/a6s/zR1o2enx71Tzt+7/AJZr92rcNjbSKiTBcyfxba0LDTYYmEMKMyxr97/a/vVZftOaPulb+zd4XZjcr/Iq16f+znpdy8V/bWljI0s80EcUaIWaV/mAwBySTgYrk9N0eeRlTZ8zfxf3a+mf+CUtvHpP7Zvw9+0CRwfHemx4STYQTLgHODxkgkdwCOM5r9C8McW8u4up4tR5vZ060rbX5aM3a/S9rXPRyLEeyzeM7X5VN29ISZseGf2SP2pPGT30Xhf9nfxpetpl21pqKQ+Grkm2nX70Tgp8rjjKnkZGRzXD67oOueF9ZufDvibRrrTtQspmivLG+t2imgkU4KOjAFWB6gjNfpN/wUz/AOCoH7Sf7Pf7S9x8E/gvcaVpNhollay3V1caal1NfSzRLKQ3mZCIqsqgKA2dxLHIC5X7W114P/4KCf8ABNy2/baufBFhpvj3wdOtlrFxZ3BjTylnWOaEBmO+M+dHMiOS6Fiqsdzb/wCgsp8QOJvZ5fjs3wVOnhMbKEISp1HKcJVF+79omkuWfeL93rro/uMNnOP5aNbE0kqdVpJqV2nLa6aWj8tup+fPgnwJ42+JPiODwh8PfCWpa5qt1n7Pp2lWT3E0mBkkIgJIA5J7Ctb4m/Aj41fBc2v/AAtv4UeIfDYvd32N9a0mW3Wfb94IXUBiMjIHIyPWv1v/AGQf2W/Hv7P37DukQfsy6f4T0/4i+L9LtNQ13xD4juJbiDdKhcMDCH8zy0cLGi4iyWc78tv7XwB8H/2ifFnwh8X/AAv/AOCgPi3wH4r0LVLBvL1PSLZrdraPaS5lV4Y4l8shZElXDIykkngr8vmHjtSw+ZVXQpU5YelU9m4uo1Xmk7OpCKi4cqeqjKXNJLpfTz63F0YV5OEYuEXa13zvo2la1vJu7Pw/0fwn4p8RWOoap4f8NahfW2k2wudVuLOzeWOyhLBBJKygiNNzKu5sDLAdTXU6n+zH+0ZovgI/FPWPgZ4stfDgt1uG1u40CdLYQsQFkMhXAQ5GG6HI55r78/4IdX+j/D7wB8a9c1HV459I0W/tZZLgSJ80MEV2zy7QxABQA5yVODgnGa+cvjz/AMFdv2rPjhNrfhldfh8PeENadoJdD0S1ijuBYk4MX2p1aTeycMwwCS2FCnbX3MOL+Kc04txeU5Xg6bpYaVPnqVKkleNSEZWjFQfv6y3dtFe19fWWZZhiMxqYfD0ouNNxvJtrRpOyVt9/LueEfDb9nn48fGK1lv8A4VfBzxL4it4G2zXOj6LNPGjehdFKg+2c1k+Pfhr8Q/hZrZ8NfEvwNq+gagF3fY9Y0+S2kK/3gsgBI9xxX7IeKdb+O/xb+APgvWv+CXvxT8CadoFhpMcFzpuowJLLCqwx+Va7tsiQuikh43VWBx83OK+Zf+CgXx0+PUP7HjfBL9uj9nG9PjGXW4ZPDfjrRjB/ZBK7mEhljLhLkoJUMAVdyMz/ACYAPz2QeKec53nFOh9WoqM6nI6XtWsTTV2nOUJxjGSVrtQbaT8jiwfEOKxWJjD2cbN2cea1SPm00k7btI+BPBPgPxt8SvEcHg/4eeEdS1zVbnP2fTtJsnuJpABkkIgJwACSegAya1vib8CPjV8Fza/8Lb+FHiHw2L3d9jfWtJlt1n2/eCF1AYjIyByMj1r9Brr4gab/AMEqf+CeXgzxF8KPB2mP8RPifbwXN/q14/2hQzQecZThsOIo5I0SNcRhnLndlt7P2Av28fE37efiHW/2O/2yPD2k+JNP8SaRNNY3cVmLV3MWHeFxEVGQoMiSIFdGjJycgp34jxE4ilhsRnODwEZ5bQlKMpOpatOMHyzqQjbl5YtOylK8lF6q+m086xrpzxVKinQg2m7+80nZyS2svN62Pzg8N+FPFPjK/fSvCHhrUNVuo7aW4kttNs3nkWGNS8khVASEVQWZugAJOBXX+D/2U/2mfiB4Vj8ceB/gD4w1fR5lZoNS0/w9cSwyqvUoyoQ4GCOM8givt7/gkD8Nv+FM/t7fF74TXN07S+H9JurGIJIsiSRR6hEodmB+9t2cY/iYHaRiuV+IH/Ba79ojU/2j4bf4fWOmaP4Is/EEdoujSack9xe2izhWaWVuVkdc8RlQmQMsQXbpxvG/FGOz+vl2QYOnVjSp06rqVKjimqkXJJRUW7y+y72VnfdW0q5rmFbGToYOlGSjGMuaTa0krpWS3fT01Pg6eCe1ne2uYXjkjYrJG6kMrA4IIPQ02vtP/guj8P8Aw74T/a203xVoyeXc+JfCsF3qcawqqtLHLJAJNw+8SkaA5HGwcnPHxZX3PC2fU+J+HsNmkIcirRUuW97PZq9lezTSdlfex62X4tY/BU8QlbmV7di94Y8P33i3xLp3hXSyv2nU76G0t9+ceZI4Rc4BOMkdAT7V+mvx4/aE+Fv/AAR68IeE/wBn34EfCjRNf8a3WiLd+IfEepW/kvMhkYeZK0f7yQySCbZGZMRIij5hivg79iEaKf2wfhn/AMJD5H2T/hNtO837Tu2Z89Nv3ec7sY7ZxnjNew/8FrP7S/4bt1X7djyv+Ef037F97/VeTz14+/5nTj8c18RxbgqHE3HOByLHXeFVKrWlC7UaklKMIqVmm1G7la9tdbnk5jShj82o4SrrT5ZTa2Ummkr27XufQC+JPhT/AMFfv2TPG2u6r8NdH8K/FTwNCt6mp2Ft5jXCrFI8Y8zb5rQyhJozGS+xgjjccCvzMr73/wCCCH2j/hb/AMQ/tez+y/8AhEIvt/mbsbvtA257Y2+b159O9fC/ir+z/wDhJ9S/snyvsv2+b7N5G7Z5e87du/5sYxjdzjrzWnAtCGR8TZvkOGb+rUXRnTi25Kn7WDcoJu7teN0r2SenW9ZRBYTH4nB078keVxW9uZar8NEe1f8ABND4PeAvjl+2b4Q8C/EqCG50kSz3s2n3AQx3z28LzJA6sRuRmQblAbcoIIwSR9d/tS/8FZPi7+yb8ctZ/Z8+G37NfhrStD8OXC2umRX1tNGbiLaCssSQNGiRsCCoAPHU54Hyd/wTL/Z58eftB/tV6Jb+CfGF54cXwwy61qHiCytxJJaxwuu1EDfIXkZggD5XBYlXClT91ftCf8FpP2efhX8XJvhzovwo1HxiNB1BrXUtejngijilRgshttysZdpBGT5YJX5SVw1fF+INCWaeIMMPDAf2lGGH96hzumqMnNtVHL4G5rRJ+9Zel/LzmDxGcqCo+3ShrC/Lyu+99rtaW3PJ/wDgqD4M8AfF39iLwF+2ZqPwns/BHjbWNQgW/sYreOOa8juI5CRKfkabAhSVGIZ1RiCACxHl3wS/4LRftBfBD4VaJ8J9F+F3ge7s9Csxa2tw+nTQO8YJILJBKke7nlgo3Hk5Ykn2j/gqVp8X7av7JHhn9tT4IfEHUbzwr4fDDUPCVxaIptWllEUs77CSs0bBUdWLrsO9GVdxk/NRVZ2CIpJJwAByTXreHnD+ScUcErB5xSVT2Ner+6nz3w75naleVpPli93pr1sdOS4LC4/KvZYmPNyzl7rv7jv8Ouui/M/XX/gnd/wUX/aF/bH8ba/J458AeE9E8I+FtJN1rOrWMdwHErZ8uMNLOVXhJHZiDgR9twNfkt/wWB+PVh+0ZqPxQ+L+jaNaWVhqE8cenR2lmsJkt45oo45pNoy8rqA7MxJy2M4AA/QH48sv/BO//gmJon7Ptk32bx58V99z4kKnEsEDohuFP+7GYbbHfdIR3r8p/wBr9zH+zh4ocDOLaH/0oiryOGuHMkWBz3PstoKlh5UqtGgo3tKFOLU6mrd+eovdfRRt1OLDYLCRwuNxlCCjBwnGFuqSd5fN7eSPg2S8RbfY8jF1/iqpNqE6t/Ds2fL/AL1U5ryZmf59yfwVQupnkjXD1/PnKfkMqxcm1b922x2P/AqzbjUHkjbemdv3WWkmkddyLyGX5v4arNNH/tDy1/irOW5cZcwNI8cn8S0jSOrnY/3v4VqGY7dpfcSqfw/dqMzZUOEbdWEtjqp+7ImaaZGZy6j/AGtlSwyOWV0+9VZd7yBH5/3anhXdl3+U/wAG2uOoexha0omjDG7D54dir92tGxby1SHZlt27zFqlZ/LCEwx+ati1jRcP/F/HXHKP8x9PhcRzcpahhTy98P8AF9+rEdttUzJuKKn3aS1R5Iw8m77/APu1fsbXpPHN8v8Adril7srnu0cQVYY2+/N8vy53VIlvtb7jP8/3mrRexRo1d/Lb+/8A7NJ/Z8K7pidy7N336x5uY7adacfiM+S1fcQ8O7d83y1XmiRV+RMH+7WnNb/KuU+WP5qrzWs32hpt+35NtEolyrlNpBuaZ32f7K/eqJm2NvL8rLT7hUkm2TcbV/76qpcEQo3kyfMvzfN92tIxlzGFTHRiLNeBWe2RGDt/E1U7ieETBLlGO35d2+oZpn85HhfLfxVD9odd2/qu7/gVdXs+b3jzamYdB9xcOsyif5F+7uqvdSOuX86o7i885fJdGqBr0RwtsfJ3fJuropx5Tz62O5pEs155UK702ruqvJfbpGd9v+z89VLrUPOP94N95WqtcTeW33Nyt/FXXGj7p4uKx32UaX2oudnZanspt+P7rf3ayY5vmZEdvm+61aemr5jff2t/DW/wnhVsR7T4Tcso33b04WtnS4XW4RJH3BlrF05Jm28fL/drpdMt3aRPnXarf8Cp8sDm5jY0uFzGPkV9r/8AjtbNjDC209m+X5vl21R02NIWLumFb+Kuk0u1hVVd4f8AcrL4Z3OeUuaNi/oP7mZI4fubPvNXX6DDbMu9LbfL/wA9N25f++a57SbVFuN8yL8v3P8AZrrdCj8uT7mX2bm2pWhidT4f0+8uGKPNuVolZdq7du2uk0uFJGWR0VkZd0vz7a53RZkRNjvMszMvlLu+Xay/d/2a6OwmhjZfOjX7vybU+61VH3fhDl5QGy3j/hG35khWql5D51wdm1v7rNV6S6m8tZpJl3tu3qyVTjuU3Lc2bsys3ySMny1fuSNKdTlK8kMduypv+8v+saqdxvZvJ2ZRV3eZ/C1W/tJf/Rkhx/tN826qcjLJMUlO2Nfm3fd+asZS+zE9zAynKUTPvf36+SkOX/56bvu1iapbpNsSF1A+b7qVt3kKRxpNs+bft21laq00a+TbJ8rbtn8VZR9pHY+xwdPmjqchqywxs0yfK6/w/wANchrC7ZiXTP3vu/w12WrWb7v3zqv95VX+GuZ8QWoVW8kLsZ/nauWpL+Y92jTlLU7e31BJIf3L/NGu5I2q9a6gJpjIiMg+X7r1xseoRxoqedtH3XaOtGz1S2h2o03G/d977tedHnifASlA7iGbdGX38bdrU+2ukj3cMrM/zN/Dtrm4dY/c/Nxu+6y/ebbVttUhkt38n94zJ91XrzazqrRHTTqU4yN+HULaPc8Lsksnyoyr8v8A31TI55re3XY6nb8u5n+9WNb3jiNoNi7Vfd81XIrp5IykyLsZP/Hv4a8mu5KpyqR6+Dq88byNmHZNGs1ymGV9u1vutT7q8FvCwuXX5l2qu7btrNjukMKo7fPH8yf3V/hqS1vIL6x3/u5fMfci/wB3bXHUlHnPZpy5oDrqTzI1k2b2j/vJ/DUV5aw27L533m+ZNvzVZk/0hlh+Vn2bdy/Lu/3qSRbby9+9Qrffb+7XVQ1mOp8JiapNc+W6WzruX5kVovu/7K1jXkLyZd/mMe1d2zaqtXQ6g0LW7J/Av31X5WrE1C6RQvztv2/73y19hl3wHFUl7vMZsi/aJntbZMt/D8nzNT4YfMVUebDbPlZU3fNUbRfM7pM3y/c21dh2QsPOdkf+6qV9LT+A8DFVpx+IrrbutuJndSV/9BqnqGxV855vODJ91V+bdWpIvlKnkw7F2t82/wC9/vVmalHJ5jQyOu1fvqv3lrq5zxpfvJlKaZI1RHRlbbt3b6YzIsjw+crbk3bmpzbIYWTfGq7/AJWb+H/ZqhJBNt3ojfdrCtU5YHXSo8vuotWcwkkCO/ys23atbVmqRt+8fcNnzrXPwxuqxI/9+ta1kRZgjoyL/eb5vlr53GVF8UT1MPTf/bpu6e3lqm+44X/lntrThkfzP9cq7f4mb7v+7WPbzDyVPnKrbv4v7tWvtGxvnmyknP8Atbf9mvAxFTmPao06UY2NBv30bI7r97/eanzX00kzwokap8rfvPm+XbWV5w+xjyZmR/N2qzL/AA/7taC3jrblEdXdkVd0ledze6dHs/dLlvJuUQ/K7fdX5KsW1ws0ivcvnzIti/3l21Rt1SS4+5t2v8u35W3bfvVow2v7x53ufk+VXZfu0/iOOUZx940tNjdrdYN6nb83zVY2zR3IkR2Xc33V/i3VXt/JiXyfJVH3K25n/wDZasrav5n7mRt7I3y0SlL5GMYe0nK4+K38tnh37tr7d0jbmVqFWCSNZppst975U+VqmUpKqvMmz5V3Kv8AFUbW94rM72ao33/mf71ZyhGOw7S5eUga5hmtXuXhZVVN3y/eqz9lhZT+5kD7Pu/xU6OSY70dMfKvyqnzNTLhLm3kSbZt3bf3ivuatIx55cpy1o+7qYmoSWelw70Rssnz7k3bWrntQH2yRv8ARm3L/E33WWul1+N7i4N5DtdVfa/z1zWoRwx7oV+V9n3Wr08PT5o6RPCxEuaXumDJGjTP5afd+42yq81n++aaHa/9+P8Ai/3q1rrZJl0eP7nzMq1SVXhZ/n+b7uP4lr1OX7J59SRB5c0nyQ/39svmJU3lia1DvCzpu/hWnrG7Y8sbmX7/APtVZtYvtE/kzXjAL/d+bb/wGj2ful0Zc0Cp5Y2/voVO75WVvl2/7tV2sUjjbejPub/e3VtSKkzB4U3qrbdrfxNVeOx/el5LZUP3nkjeol7p1x+Io6fZ/eQw87fkVv4auwR+VG6Sxrhmot5k+5nH+0v+992o1me3byfmfy23fN/drCMpR+E6vspMst5zbpk+ZF+/t+WrOn3UNvHsR8Kv/LOT+Gs2GSa5kZ3TLs3yfP8AL/3zVea98lvOnmwv3fmrro1JHJUj/Kbcl9DGodLn/WL8ysvzf99Uv26FkHyRna/z7n2/LWPb3ieSqeYxT/ZpJ72OOTej4/vq1dkZe8eZW90172eGe4RAi/d/hf8A9Cqx532fbD2/gZV3VjrdQsrunySK33dn3f8Adqb7T9oWJoblVf8A2q6acub4jil7sjZ+1LuTY+7anzts+7/s1t6LFtl3pMuJF+VWi+Zv71c9Yq8ypDNNsSR/8tXYeH9N85vJRFX51Zmkro5faE85o6fpT3W3f13/ACRsnyr/AL1bsPh3czMjq80iK3mR/d+X+7Uul6ftBezRQ+z+J/4q6O10b7RGEQbWVtm1v4quMeWPvC5omFp+h/Z1abyYyrJtfa+7bT5PDkKxF/tK7F+b93/C1dXa6DulXybNfN+9tZPlqxZ6L5KvClmz7n2/Kn8VRyw+IfOecat4dmt5P3yM67dzR+V95v8AerEv9FtrcD5edjMrN91f9mvUdY8PpLM6P52/+BlT+KsXUtFmFn8kKsivu27aOXmCMjzDUNJeH/SXSNk2f8s/4d396sO68P3jLLDA7OrJuTdXp974Z+0RtBMjfvH3vuT5VrOuPC9+qvsRSfvP/srXLKjy6le1PMJtBRVl85Gfb8yMsXzVNY6ait/qWxH823b81dxeeF/LkEyJI67/AOH+KiHwztVvJhkUSbt7N95VrzMZh4VNGdtHESOc0/TUa62PCuzbteORPvVvx+HXWEJ9maV9jbl/9lq1b6T9kUpc2y7vuOuz5l/2qtWbOszQyPJhm2/NF822vlsVgYxq3jE+iweK93lZSh0tJIR5O5N3/Lu33lqa30vcslh+8VNq7W3fxVanjtkZZv7vzbv7rVYsy/lbPJZnVfk/u/8AAqzjh4yp/CepLGSjLkiQ2Okv5MkLou/YuyON9vzVv2Ni7RxzJuyzqssap8sdVYVeS3ihS3b93t/eVuaXawyRpFMmBvVv+BV7uCw1tTzcViOhp6ToYZhDNDtaH5vl/irWs9JhuIzDC7I7L/wKo9LmTef3zeVu2+YqfNW/pqvHPsTblvldpF+8v96vpMPR948CtW5jMOg/6O/2Z/461rPR3dVjm3NtXc7f3q1LWz+XzE2/vPueZ92tSx0lLyZJ5o9g+83l/wAVetGj2PLqVDPsdBhlVZnmb5m3Mq/Lt/2a2LPwrDdI5mh3S7N2373/AHzW/o/hlGjEJX5Gf/WL81dLpfhW/t8eXMv+xJGn8NdEqPumEqxw8Ph1J49727PtX5fl+VaLzw/NcWoRN21olTbt+aSvQ4fDKJGqbGY72+9/DWfdaKi2cMN1uaNV/wB2uSVMcah5dc+GXhbf5PlNJ8qK33ttctrmi+TdSpNbbdqV6tr2jfvpU+Vvn+XzPl21xviS18mR08mRVkfbu37ty1w1qJ1063KeWeINMhb5Hdk3PuRW+61crqlvNHv2bW2vu+VflX+H5a9H17T4bht4tt7Rv/F/yzX/AGa5HWLGOEi53rt+bev92vNqUaXLY9ClWv8AaPPtWs3mnEkz/wCrXbE396uc1Rkjj+Sbd/D5jJ8zf7tdv4iExh3/AMbL93eu3bXE6pHNHDN5L7VV93y/w15dSjL7MT0qOI0Oe1HUI5pNiOzJs2tI33aozSQ+Y0O+R1z91flqXUpI5LgQ+Szovzbdnyt/tVTuNQjmk2JJhNvzf3VatoUugSrRkJdXELIjzJIny7Nyv8yrVG4mdV2FG+V/ut/47U9w7xxjeik/e2r/AHaq+dumW2SPb8ufMrrjHlOSpLm+Ijk2LDv2Y/56tWfNDbQtlIdrN/FVq6Z5vkSZkT+Flpqqkiok3Cr8qybN1dHLzR5jn5v5itHbwrIjo+7bU9jZvuKIkm9m2r/C1N3JC290Zmb5f31XrOGbyW2Ox3Pu3Uvdpl048xYs/JWZIZ9vyrt+b+9XT6PYxyzt5KM3lqqqzfdrAsbEY8+a23Irfd3/ADV2Xh87Y0dEyfu7V+8tZ81ocp1fYLdjorxyRLDMsqr95ZP4latvT9LgjUeciu0n39vy7as6Lp9s6p8kbSt8vyo25f8Aere03Rd7Dem9o1/u/Ltq6fvSMKlP+UpWen289mqQ9W+dFj+8v+9X0F/wTJsbWw/bP+G5uo5Gjfxzpu0xsAS/nDackHgMVyO4z0615FHY2f2jYlsqPu3ytH937v3Vr2D9inxX4U+HP7Snw58a+MNbj07R9I8WWN5qF9cKxWCFJ1dmYKCeAOwr9O8Msu+vZri6ibvSw1eSSV+ZuDp2/wDJ7/K3U9bh/De1xdR3+CnN+t1y/qfpD+3z/wAEovEX7XP7QF38ZfhV8YtEsrq4gtrTxHpWro7G0kihQIyNCGOWi8s+W6rjhgxDgL5P+3F8Tvgd+x3+xbaf8E7vgp8Q38R+Iby4Evi3VdPlj8uPE/mTpPsdvKkeRFUQAkrGnztyN/zV/wAFWv2kvDfjf9uTxb4n+CPxCs9b0ie1soU1HRL52hlkhtY45AHUhZAGVgGQspHIJr5bvPiRq0JdhBbMQu7mN8n9a+l4W4gyWWAy2GeZnOpRwqpzhQVDl5akY+6pzTbmqbuo6K9k3c68FnOXQpUI4zEuUadmoKFrSS0u1uo9D9YPg/4i+GH/AAU+/Yr8N/s2XPxjk8KfFPwNFDFpz6jcgNqJiiZFZEVw1xC8KgOV+eJ0DFWGPMx0/wCCdvwF/Y8+F/iL4if8FCfjq2vvcabLD4f8MeHNanglmk4HmQCR0e5m3MuEKeUnLSblPy/lSvxn1+yuEuEsYISvzRyKXDA+ow3FVtV/aH8XahcPLqTW900Q2LLPJK+T6Alulems54cw2JqUMuzirQwdSo6jpRoe/Ft80o063xQjJ9EnbVJ6u9f25lFKo4UcVOFKT5nFQ1V3dqMt0n6aH6e/8Er/ABJ4P0j9lf8AaVtbnxDZWCz+GS1nBqWpQpL5Rtb2JS2SufnliTdgKXcAckCvg61a2W5ja9jkeESAypE4VmXPIBIIBx0JB+hrx2//AGhvGFpvSHQdPZlOCSzgA/8AfVZN7+1N40tdqHw5pqyE8oyyED8Q1fYZXx/wLlGcY/H/AFmcnipQlb2cly8lNQte7ve176dvM9TC8U8P0MVWre0k/aNO3K9LJL5n7NS/8E0fh38aND8N/F//AIJn/tHweG4n0a3XW7O48SXLzrPsDeZLJAWeGc5xJCVVQw+UIPlrov24vEOm/AH/AIJv3X7OH7TfxvsPiL8R766iGnL9t3Xlu5uPNSZtxMxSJFcebIBv3BOAcV+Gqftr/FHRLiSXRdK06BsYZoZJ0bHvtkFUpf22PiLcTm5v/D+ku0nMkrecx3e5MnNfBLP+GcRmOGqZhnFStRw9RVIJ4a1ZuLvGMq9+ZpXs9E2tDzY5rldXEU/bYqU4wkpK9P3tNk572P22+F198Hf+CqX7FPhn9mjXPiknh34peA44o9LbWJEZr9o4WQNGm8NcRPCoDlRvidAxDDHmbX7OP7Jfwr/4JKprH7T/AO1R8ZdK1HXV0ua08OaBobYknVinmeQsxR7iZvlTG1UjUszMQcp+Gdj+2l8QkkWaHw7pUc0Q37oxMNp9Qd/Fav8Aw2L8S9ckW41zTbCZwuC8sk0hA9AS9ZYziPg3kr5fh83q0surzc50FQvL3nzThCrvGEn05XZXXV36pYnL5KdGniZRoTd3Dk11d2lLon6H68f8EfvjNbeOv24vif8AFrx/4ktbKfxD4cvr921K/jQ4N5FOygsRlY4kYkgYVI8nAFfDskkJ+KDSi6h8v+3yfO89fL2+f97fnbtxzuzjHOa+c9P/AGofFl118PWK4OGyJF/9mq7B+0V4keIzNpumlduV2JIdxzjH3q+py7xP8NMqz3F46liZ2r06VNQ9lK0FSUoqz63UtrK1utz3MLi8tp4qpXpzdpqKtbblTX6n6g/8F3PEPh7xH+0d4QuvD3iCwv418Cwl2sryOXaHuJpUJ2k4DRujqTwysCMivh6vI2/aF8UAZbRLA/u933n6f99VHcftGeI0fZFpOncfeLh+P/Hq34U8XPDfhfh7D5VHFVKipR5eb2UlfVu9tbb92b5fjMBl+Bhh1NvlVr2se3eF/EF94T8S6d4q0wKbnTL6G7tw5IG+Nw65wQcZA6EH3r9NPj5+z38L/wDgsH4P8J/tB/AT4raFoPja20RbTxB4b1S5814kEjHy5RHmSMxyGbbIYyJUZT8oAr8V5P2k/FKR5/sbTA2cbT5n/wAVUcX7VXj/AEqX7dpunWELp92WF5VZfxD15vE/ilwDneLw2PwGYVMPiqHMoT9i5xcZq0oSg7KSdk97pq61McfXwuLlCvRquFSF7PlurPdNdUfs6nhn4Vf8Egv2S/G+iax8SdI8U/FTxzCtkmmafc+WbdWikSM+Xu81YYt80hlITexVBtODXzl/wTN/Yb+DH7Z2s+J7X4sfFy60ZtEtI3s9F0ieGK8uQ2d1yWmR18mPaFYKpOXGWQY3fm/q37XPj0SSXdxoumyuV3tJKspZz9S9Yl5+2l8SLSISxeE9GbIycCXj/wAfrjwXHnCGFynGKlm9VY7FyjKeI9hquWyUY072UVFOKV76t32txRxGGoYeqvrElVqNNz5e2yS2slp+p+sP/BOP4xfCD9ir9vHxL4J8VfEjS9R8MahFdeH7XxvGWS1JWdHhnJyVWNzGFZssqkht+wFj3fxC/wCCHPi34g+Mb7xz+z/+0B4S1Dwnq91Jd6XNfSys8ccjlhGJIFkSYKCAJARu67RX4r3n7cvxHtpfLHhHRf8AgSTcf+RKZZ/8FGPjVpQeystG063iBJ2wT3KKx+glrXH8c8OvOHmuUZvOjXqU4Qq82H9pGpyX5Zct48stX8Lt5IxxOY4WGJeIw+JcJtJSvDmTts7aWfofuD+1Jc/A79gr/gnXqf7E2g/FXTPF/jTxPqm/VoLSUbrZzLFLLM8cbsYFVIYkRXbLsd20jeB83/8ABLL4HeFfjd+1zosfjjWtOttK8NRPrdzbX15HG161vhkiRX/1mGxI4wQI4nzgV+X8/wDwUB+KCF3l8HaFvByQVnyT/wB/Kgk/4KE/FSIgjwPoQB77Z/8A45XZlvGXBmX8N43AU8yqvEYpznUrui789RKLlGCaUUkkkk9O5lSznKMNgqtH28ueo23Pl6vS6XTTbU/Sz/gop+0vL+1L+1Nr/jewvfN0PTZP7K8OBH3IbOFmAkBHXzHLyZ9HA7V8rftHeDPEXxC+CmveDvCdmtxqF9BGttC0yxhiJkY/MxAHCnrXzpc/8FFfijChKeCvD5b+FSk/P/kSq7f8FHfi4kYY+A/Dxbb8wCz8H0/1lfT4fxB8OMPw6smpVJxoqn7LSDvyuPK3e2/W7T11ZvLiXhuGAeD5pKHLy6Rd7NW+8464/YW/aYeTKeBrYj/sMW3/AMXUD/sG/tNlsR+CLcBVwv8AxObb/wCLr0zRv28vjjrkipZ/DvQDvxtGyfv/ANtK9e+Gfi39sH4lzNaaP8G9PkmePfZRQ2VyxuR6r8/T3r88lgvByK1xOI+5f/Kz4udPgKE9a1W/ov8A5E+Um/YE/abBLjwhbkkcj+17br/38qAf8E//ANpxl2N4HteDkE6xbf8Axyv1T/Zj/Yr/AGkfiLq9sv7St/4d+G+nXC5828hmkuB6fuQ5YfjivS9Y/wCCedsnjWx0Lwp8f7LVNOmvvKvNRTw1Mojizjco87JP4Vj9W8F3L/e8R9y/+VlqPAkNVUq/d/8Aan4uz/8ABP8A/ajkO7/hBbUt6nWrb/45UR/4J9/tUHA/4Qe1Azn/AJDNr/8AHK/W74l/8E9/2qvB2vX1ppPi3wQbRJm+wLqXmRXEkXUOyedx8vNdBoH7FvhnSPDMeo/Ez9oa3k1JlDSWPhjwZd3EaZGdvms+3dWc8J4KL4sXifuX/wArLjPgWW1Wr93/ANqfjmn/AAT+/amU7f8AhBbXnuNZtcD8PMqxD+wD+06pBk8EW3y9P+Jzbf8AxdfqlqX7Pn9sytD4A8WXYbcRF/bHh9l8znAGEm+U/XNeYfHD9n79v74T2suu6N8MvDmsaXH9yZEnjkk4yPlMny/jURwHgpU2xeJ+5f8Ays6IYjginL+NUXy/+1Pg22/YQ/aPTa8nge2DL/1GLb/4ur8X7Ef7RuVVvBlsu1cbv7Wt/wD4uvSNf/bJ/aE8K6pJpOv/AA20GCaIYkj2T7g393/WVn/8N7/F0bd3gfQVz97Mc/H/AJErmq5d4Hw0lisV9y/+VnsYarwvL+HVn/X/AG6cxbfsYftBwqEfwtbnnOf7Ug4/8fq7bfse/H22gyvhe3MpbJb+0YP/AIuuli/bu+JUi7h4O0Tn7o2Tf/HKng/bl+I0/wAq+EtEB91m/wDjlck8v8CeuLxX3L/5UerTnkmnLOX9fI5Zv2PvjyzAP4at3XdnH9owc/X56mX9kD41JEdnhCAOPuf8TKDGPT79dOv7cHxCYKF8K6KWLYZQs3H/AI/Uq/ts/EGSMtH4V0cnbkKY5v8A4usnl/gNHfF4r7l/8qOh4jKIvWcv6+Rx0n7H3xzkkMknhOA+w1OD/wCLqGX9jn48Stz4UgUe2pwH+b12LftwfEeMEv4Q0bAOGfbNj/0Oobj9uz4gwnavhTRM7sAFZv8A4ur/ALO8CP8AoLxX3L/5UR7fJd+eX9fI4ib9iz4/lCq+FIGBOSv9qQf/ABdULv8AYf8A2h5590fg+JU9P7Xt/wD4uu/uv28/iZbyso8H6GVX+LZN/wDHKoz/APBQj4oRS+WPBWhd/mMc+OP+2lbU8B4Fx0WLxX3L/wCVGNWtkUvinL+vkcHP+wx+0lv3ReCbYn+8NXth/wCz1Rf9gz9pl02f8IPb8nLf8Tm2/wDjld1ef8FHvivaKWPgjw6SBnG2fp/38qg//BTX4truK+A/Dhx0AS45/wDItdEMu8EJaLFYr7l/8rPPqVOGoy96pP7v+AcZL+wV+1KwwvgW3Pu2tWuf/RlV5P8Agn9+1NI5c+Bbfn/qN2v/AMcrtJ/+CoHxbjTcngPw3nGcGO4/+O1UP/BU74yAE/8ACv8Awxz935bjn/yLWqy/wStpisT9y/8AlZzyqcKy3q1Pu/8AtTkT/wAE+/2qdpT/AIQC1I/7Ddr/APHKhf8A4J6ftYOdqeBbNF2441q1/wDjldif+Cqfxm8woPh74Y4Gc7bj/wCO01f+Cq3xjZcjwB4XB90uf/jtbRwPgt0xWJ+5f/KzjnDg6W9Wp93/ANqcrD/wT4/arUqx8C2ile/9s2v/AMcq7L+wp+0vo2nS6heeBIZI7eJpJEh1S3diAMnCh8seOg5PaultP+CpfxiuQCfh94aXPqlx/wDHa+gP2Pf2k/Ff7Rena7d+KdD06yfSprdIl08SAN5gkJ3b2b+4OnrXrZNwp4T8RZhDL8FicQ6s72vypaJye9Psi8FlXCeY4lYehVqczva9uiv/ACnw7pEa7wrBwScEEfMK6zS7d1kTfD8zfM237tafxtso7j49eLHUGMr4kuug+9+8ak0O3n3L/dr8TzHB/UcZVw978kpRvtflbV7dL2PiMRSlQrzp3vytr7nY1tHt/MZYUh+81dLY2aSTGGNMrH833fvf7NZej27j5H+8r/LtrprfT5Fj3un+0is23dXFzcpzylyF3SdN8uTyZodzMm3cvy7a3dLidmSF3bb/AHv71QWMMMkLWz2zFP4V+9WxZ2/8b220fdX+61Pm5iJR5vhNGyEkKxQ7N4/hkb+Jq3tL1Dy02zTZ+Tcqqm75qw7dZsfaYN0R3bkb7qx/7K1qWFtbW9ujTQ+Tufb8yfd/2quBMi1czQqsWxFT5d0qr/eqnqFwkm+HYw/uKtLPdTW7NZzeTMsbbl+X5apXl1Myr/q96tu20c3KXEkWTbNsRG3qm1lrPkmkupvkmbH93Z/tUyS4SPMzuwRn+9/7NTPtUN0okeZok+ZYpFT5d1TKXKe3l0e4XUkMkj3Oze8fyorJu21j6zJtmT5GVmdtqxt8taNxIkluiO+yTb8jbvlb/ZrHupEkmWG5f5fvbv4lauSUub7R93gXyqKaMPUFSMtB5yszfNtX+GuR1mP9ykcLqy72+X+9XXa03lwvMnKsv/Aq5TXIdzJ5aNt27kZflrnlLm9496lGMdeYry3CKyIiL+8+V6t2t9tkXznVF2fdb+KsRrzayPvzt/vJVeTUt0iyPwuz/Vt/FXNHnPzCVTmO60XWIWX9/Nxub5f4lrW026trdt7uzbvm+avO9J1ZIVRN7M33vmrXXXnXP+krt+75f96vGxPtfe7G1OXL70jtlvkurUu7thv4l/iX/wBlp39qP5aCzhZ0+ZmZZfmauQHiR0hZEk3bflfdV+z1jzIza+dsVXVt1eBUouU+ZHtYWXN9o62x1iS2dIX3Z37F+Xc21vmq41x5kZ+7v+9u+7XN299JdFB521l+/wDP/DWhZyIrFH8xWX5otyfeqKFGfOe5TqezjyyNrzHkmebyWMm7au1vl/3qfdXEMe6aa5VW2fLt+7uqpb3EFwqf8sw3yvN5v3aJLeOZUTf/AAfekT73+7Xt4OnzfEZYip/LIjuNkm1E4eRfvL8tZtxb3klx878L8jbfl21uLB9oX/UtujqO6tY7yHY4+6n3v7y19Pg17P3TzK2IjH3TCk0/zI1dONvzOzP/AA0tvsa8f52ZpPuN97/vmr0luhkCJCwib5fm+792nx2m5UuoU2bovlXf/FXv05R6ni1qntJycivqFvCsYeGHemzdu37ay7qQzTP5iMrf3VTbV+8t33eXcjftTcm5/u1l3TS+dve52nZt3N92t5S5feOen/dKF1CkanzpmUK25F27laqkzbcuH+ZvlZf7tas1v5kcfnTN5n3VaqElu/2xt6bH/wCei1wYip7p6NOMiCNXm2P+7T+5Vu385VD72eZm2/eqNYPMmTyX/dKu5/k+81W4YMZTzmb5vkZa+axlT3rnsYWPuk9nL9ouJXfh97fL/DVuG8eRnS5T5V+42z+GqqshUon36tL94IEYjZ8v8VeZUfN7x6dGjy+8WY2tppN/nbSqbX3JViCbzmX7Z95flXc/y7arR280LJs+ceVudf4t1SW6+Yr/ALlh/Du+98tc/uSNKnuwNe1+03lwk0D43fK/+1V+PyVmhtprlQVdml3J96sSz3syJ+8zH8z/AC/L/u1s6XI80x/0be0f3amVP3tDjlU5o6mxbh2bZ9pVCv8Ayz2feWtK1VP9dCiurfNuZvmWsuxW8YI7vx833fvVpx7LiHzoUjjPlL/Dt+7XNUjOUeUI8kpFhreby2ms0VnX725PlVaS3tPMkbzn+78rfxbv9qmwu8kPkO8jj7rtsqWOPdGuyZdu7aiqm3atTGE4mcpR5vd2EWNFkREhy0j7Hbzd3+7TZoXVf9TtLfLu/u1IsaW+XQ/LI/3VqteX0MzOiI0bx/8AAvlrrpVJc+iOKt70TA1CPdJ8+52+ZUXftrDvpt2XhgXP8asnzLXQ6hIkLLN5zM8fzbv4vmrD1CGb7Q/nOxRk3Oy/LXv4X3TwcRExbxUmV4RJ+6X5vMb+GoL6zhjVHfzHH3kXb/FWhND5MbSvtZN/3W/iqC8V2kLu7BWT5o1bdXdGPtDhqRjGPvGck0Nu0nzsjt/Ez7qs2Nxc3ipvRVaFOdv8X+9TGjTzD8nH8asm7dVm1VF2JbeW7N/d/irSUfsnNHmj70S4stzBGU2b9yMys3y7ao3Vwlzs+Rl/vf8A2VXbiSe1ZrZEU7l/4D/u1VZtyn59rfeRf73+zXPKnE7qdScdSOzTdA7ui7t21G/vVFbyQ26ed8o+8qKz1PHZurffVfM+ZNr/AHaq3Fq7Qt523fv/ANXt27q4pHdGRG15tUzJbSD+8v3dv+1urOuZvOZtkzbmfc67d1Xmt08n99D95Plj3/erPuI3Vt6PtXft21dP3feRnW94qSag6rJ5Pmf3U+bbVyxvPtiK+/8A1a/3N1Zl1au0i+dHvVv4m+6rVoaRbvax+T5LfL8u7furvpyjI8vEe6XVkubi4/0lF2/KyNH97dWvp9i8nlZ2lf7rL826q+l2PmN86MWX5UVf4Wrdt7V2u4vvH/pmq/xf7VdlPc8upI0dJ011VZkdZG/uyf8Astd/4X0GSaFIXhZFuP8AVbn+7/tNXP8AhbQ4VkU78fJuWRmX71dz4b01G8nZdRy+Wm5Fb7q/7Nd0Y+4ZSlym74b0NI1+4uV+V2X5t1dFY6KkcnnWb53PuRm+Vv8AgP8AepfD9snyzJCy+Sm3a33m3fe211+l6fZria2h+ZV3Rfxf71ax/vGEqhj2ehvGyTfvGZfv7m2/LVqPw88du23d8rs3mRvXT2OlpMo3w71k+Xbv+arlroX2e3fenz7/AOJPlqfZxkL2hwWoeF9sweG8b5tzs0jfxMtYNxob2qr5af6v7i7t26vS9S8PzSTfPD8rbVesrUPD/l/vnto3WP7jKnzbafLKKI+sc3uo83utB8uFv3O+L5v3cn3l3f3aoXGhwzbvkyq/wyJ97/er0W80d5IQkyKrKjfeT/vlayL7w68cYm3rvVFbatTKnAftPe5UefXWh+cqvDIyIq/3aqX2ipNH+5fcy/fXdXa3Wlv5Mlr+8VvvfN91ayLiz8uGSGz3M/8AeZf4f71ediKZ3UZHKXUPkhofJbdJ/rW/5aN/u1BPZvHIpmdkkjT5P4W21tTaajYvH+V9+0LJ8u6ql8jtumf5Tv2LtfcteNiKMuX3T28PU5Sh/ZsMkbI824SJ/C3ysy0iL9laK2hjYeZ827zdq1JqH2do/kh2NI251X7tCXyNJslH/XJW/urWNLD8252e2/lLtvZvuRH+VV+Vfn+atuz85bjZDwY1+81YulzQSKqTncPveZu3fd/vVoabeOrFJpmZWdfKZv4t1ephafu2OLEVPaHWaO0MCrsdWeT5nVf/AEKuh0/943n3U/mn+Bdv3VrldMh/fB4dyOv3F/vLXT6OEaOKa83b2b7q/d/3a9mlGx5tSUuY6TS7ePy0m2SYX5ol27l/75rttL0lIVDzQfP95F/9lrA8O2/l4+95TN92u90Wxm8lJnttz/e2yfeVa9aMuWB51SXLL4jR0HRofs6BIdu5NzN/drqNL8O7oY32bUb5UaodHs/Ot1mmtW2RtsRY/wCKujhh+VExho33bmX5v+BU5e8cHtvf94zTpP2fe9m8OPKZdzfxVmaho9tGPtSIu1f4fvV1lxFDHjft/eJuST+GsHXLdJIW8lNyxy7mWFtv3v4q5qkf5TWMjgvElj51zs+zM4VF+9/erhPEFq8jfcw8jsjbvlaNq9K8RWs1rJL+5+dXVnbd/DXE+KbV5pHh3szSJu27K5KkeY7KdTmPMtYsZpVljdFRo/k/2f8AvquJ8QaT8rJM/mov32/hr0zW7OG3m2b/AJ2RmZW/u1xHia1+0M/nTNsVNjL/AMs2X+8rVxVKZ2U582p5v4i0+2mt3RLbDr9z+Fq4DxNZ3yyJ/q8/xrv/APHq9P1i1eSdHh+YL8kUjV534ou386a5ddhk+VFjX/x6uSVPlOunW7nA6o0ylk8lt8aN81ZscsM0mxPuM+19y/MzVq6tHMt9Md/y7NyNWHJIm7Y42H7zLv8Au1hyxOzm5tIkrXf2VsoMov8ADsqlcTG3b98m5ZP4anaZGU+T/F9zd96q9xcbW+R1I27WZV+b/drWMeb3iZSj9oiMkMm5E6/dVViqPzizSpM6jb91Y/ur/wDZUrSPGrO8O2X+9v2/LVdpIdzfPv2/7FXy8pzc0Ze6WRJC2zzplf8AhbzFrRtWDQu+xQWVdm2sqxXy9ieT8n3trfNuq5Gybi6bdjJ91f4aipHqbUeeO5uae3lzFJk2tt/iTdtrrNDk8mOLf5Zfb97/AOxritPuv9I2SOrfL8+7+KtjTdSkVfMm2p83/jtc8o8p2xl9k9T8N3kMcbPNMuPu/L8rf7NdRot5MtqLb5VO355F+7Xmvh/VkCo+/fHH/qvMf71dPpevPI6Inlptf5938VVR90KkjtY5kh2PDCrlV2ytGnzL/eq7BKzab5u3adjHAHTrXNLqCSRIjzM0i/N8rfKv/Aa39Om8zQxNKCP3Tbv1r9u8FnfOMf8A9g1T/wBKge7wvKMsXWt/z7l+aOVulhWQ/Y0Uru3bvutWJq0m5nhSbcv95v4q19WmhZUf7SybflVttZF4okj3oV/d/Kqs33q/KoysfB1InP30n2e8TyfkZv733ayppIbiR0eFVDPuZf71bGuKlw7onKKu6KSNN3/Aawry3TzA6W2z+Hdu/vV2xlGRxy934jJ1eJPOZ/M/vbq57VG+/wCZwPK+7t/irodQhSFpk+V/n+9XN3yzSRzP8u9m+ZVrKUvcKp83OYOoqjRrMg3t/Dt/u1QRFaP5E+Vv4mq7cRwsrQzQsu3/AGtqrVK9aGNQkbqqL8u3+GuCt/Kjup8nOPtf3Mm5/wC/tX/arTt714Y/73zfN/s1kNIkduknQL/D/dqSO68uQ735/wBlK82pThserh6h1Gm6lCyb/Obb/data31CGaHY02U3/ulj+VmrkrG8TaqTfIf9/wC9Vyz1HzpvOm+T+GKvNqYePQ9jD4iXwnUrIfJeRJlyqbH8ylW68tQlyilmT/SFWsKTVpvnfzsbfufxVOkzt86fLuT7zfe3VzSp8p0xrc0uUtXN0mF82HC7/k2/w1FNLeSS7IYVwqMvzOq7v+A0xZvmRPtSs/3tv8NR+TdSM291+b5kVfutUcsfiOiNSXwxM7VIQyrM6MpZ/wDe2/7NYeoafCrfO+FrpLi3eP8A5ed396sqe3Rpmh8ldsny7mrqp+7A563J9o5u+snjU79q7fu1n3Fqkn91j/eat28jtmVnR2Yfd/4FVBrEyTP/AH9v8SfLXpYfmlqeTiDGmsYXDbnZtvy1nXVq43b/ALq/d3V0U1n5cf3Gb/d/irS0P4X+IfF2oW1tpumzP9o/1SrFur0KfkeLiPdicJDod5qF8LazhkZ5H2osabq+rf2Bf+CW/wAWv2vvG0Og6bYSQ2ULq2pak1qzeTH9792v8Un+zX03/wAE3P8AgjzefFjxVpU3jOwupHaf9/b+Q0EUK/e3SSN/eX+7X7b/AA5/Z58GfCfQU+GPwQsNL8KadDYLZJdabb/6Szf8tJt395v71dNTFRpw0PErSnUl/dPze+B//BJ34D/B3VLZNY8NX2o6xDOqabot1YfabmZl+80kcfyx/wDAq+uLXwD4k+GetSX7+NrHwfdw6WsVloui6XDJdLGq7lVY41ZlZmr1T4qeF4f2bvBaW3gDWLfQra+vP+Ko8d65ceZcxx/xLBu+ZpGrxyL9v/4V6D4T8SQ/s2eEJ38QWUTLB4u8S6Zu+0bfvTbfvMtcVTFc0uVl08Py+8jl/CvxQT4P3WpeNvj98PLzVZdS+bTdS8cXq2zyNu+6sP3m/wC+a7vwX/wU0/4J56L4LW21o6amszMy3Gm6Ho0ki28i/wALSV8ReLfhh8Vv2n/G0Xj74reONQ8Q6je27NPqk1vJtjjZvlWGNflVf92qmufsSv8ABbxlpGt23wc8VeKtLjt1nnt5L/7D9quN33d393/0KudwqSl7j5TROlT3PsDxR/wUN+BV3rNr4nufiF4JfTVLQweHb7SVRo23fK0tzKvzVwvxS+O3iTXJpofCvjnwjqOkattli03Q2WRbfd/CzLXjPxA8Fp8XPDb+FdY/Y80Hw3bSSx7bq68Q/afL/wBn7v3q1/Cv7GPxI8K/Dyz13QYfBdtZ2M7eba6PK3m+X/DuasvelFe8TKMPiZufDHRf2jtH1j/hLbbwTp99Asv+i3FrKrK393crfxV3mg/Ej4naa9zqvxX/AGY9U8R2l15jT6hH5byeX/eVV+XateP6X+0NrHw/aXwr4z1WPFvLuijtZdyrtr3X4A/tufBzXri2sIdVvEC/8fkMkXlrWftlEJUZSjFxPMvit/wT1/YA/buhv5ktLjwl4kvLNks75bVoJ7ebb8vmf8Cr8yPj9/wSv/a3/Zn8ZXPhXVfhvJ4q0ppW/s7xBp8EjR3EK/ekZtvy1+6/iz4mfsr/ABC1pPCuj+MNF0zXI3826Zv3DR/wruk+6zLXZ/Dv4Y/Ejwr4fubzQPj9Z+IIfK22FveIsvmL/d+b5dtdUcTGpG03dDo1q+Hn7p/Mp44+Bz6LpqarZ21xb3McrRXum3jqrx7V+8q/e21wEdnDG3yOr1/Rn+3N+x78Fvjx4Nurz4i/CjSdD8QzQMsHibQUji3Mq/dZV+81fjH+2N+xzpvwR157/wAH+JI9StI7PzZbdl2zxt/FuVayrU6Uo80GfQZfnHNPkmfOTWfQI64X7+1fmpY7WRVkDo3zNt/3qteWkjLsttv8T7qmhs3j2zfKSv8ADXm/D7p7/N7Qz5I3WMJ5PzN/eWq7Wf7nZ5C5V/vba12jmRRNH93fVC+V45nm2b93+d1bRjORn7SEdjEvI3be7w/d/i3ferH1BflE3ksvybdtb2oRpIvlx8Lv27t1YeoRzRspPI/3q6acffOKtWnuc5ffvH8x3xt+Xb/FWJfrM0iIUXDbvu/w1t30b7t6f99VhX3neWybNy/3a9GnE8utWl9oo3XkhTx/wKqUknzEbPm/2at3SptG9/vJ92qsm+OT5ErrjE45S94rsu1N3zUx442j2fxbvu0+TzG+R9zfxbf7tPhjk/5ada0jEjmLenr5siu833f7tfan/BL2MR6H4wAXH+k2X/oM1fGFjGke3Yn/AH1X2f8A8EvUddC8YGRcMbixzzn+Gav0fwijbj/CelT/ANNzPpOEv+Sgpf8Ab3/pLPKPjc+z47+K5NuNviC65X/roadoMkM0ImCNv3fI3+z/ALVP+NpT/hePiuIY51+6LY6/6w1DoMe2MeTwdy7Wb5q+Hz73s8xX/Xyf/pTPEx/u46r/AIpfmzq9BRFuPJTa25l+9/DXTWMcefJSbhpfk+SuY0dnmZnMyn5vu10+m/6QphSFt2/5If7teRzcupwyidDptvDIqOj7v92tayhmeYfdCRszRRr/ABf71Y+lw4kG+Zl2/NtrXsZPvTb2f5dyfL81Rzcxlzfyl+3tUjbz53+Zm/h+b5aurcCNt/X5F2R7flqrb3ULqY9jH/ZWnyyfZWMaooRvmdt9aD6+8N1C8/1yTIqbovvL/wCg7ayLwQx26eTNIdvy+d/EzVYvE2OJtm7cnzrWczXO7zo/LVvu+Wr/ADUS/lHTiNuLl5GeadMt95/7tRzXTxKr+fu+fai/e2tVLVmk+5DeqybFaVV/h/2apSTJ5O/exH3V+eplKEj28HGEf8RqXWoIzDztsyxy/wBz7zVm+Yka4uZl3M/yNs/hqtJcQ3GLYPtCy7trVJ9udrZnuU+Rn2ouyuGX90+zy+p0ZT1DZJJ5yPgbvnk/2f7rVzeuRxyKqIjbFb/gVa2qXSNal0mZEb726sDWL54wy9V/jasYxme9Tl9mRzEl5tdk8liP4V31UluUjb765/ip0mxZCkM2dv39v96q19JjCfw7fvbanmjzH5p7OXQeuoJGyzfMxWrtvqSSKqPMu+sZrqHzBI77FX+FaFXdMskLyDb83yv96sqlOlJm8Y8x0a6o7YQq2W/u/NWvZ6gm1N77Gbarr/8AY1yFvK63Su7sn8S7WrZt7iaKRftib2+75lcVbAwlLmideHqezmdfb6skMivM/lbfl+X5t1bmn6h5irNHefN8q/8AAa4rT7hJJm+RnX/lk1btnMdyP/ef/d2rXP8AU4R2+I76eJ/mOqsZEVTNNCxMku1Gb7vy1qWdw6sn8O75t33tq/xVzenLNJG6Q3P3pflkX7tb+myTbET+HbtdVWuzD049TT2kuX3S/HIkLK8ULHc23zI//ZqsyWsjRs/kqHX5mkX+7UViwVTN8u3+6z1PMs0cjum77v8AE/y7a9zDx5TgnUluyvdK8cg2Ovy/M+6qlxcJbwvczfuk/ikb7tF1cZha5meNXjZkRY/vVk3V1jKTTbU+7tavUiebUl73vEclwJN7+cz+Z/49VLbMzPzHlvlSht6L+5eP/eX+7TjLbNjY+1v71ay5eUUZAFh8tNj7P4dzfdZqr6hDDGyb3VWZNz7XqPbBJIyeT8zP86s//j1WZLe2uPnG1/L+Xd/drya3xHpUKkpR1Kcbor7N/wDubfu09Y7aORPn2My/6tnpsKp5ypsZh/B8v3qWbY0hTZlP7q14OIj7/unt4ep7t+UtW7P9l2IMfe83c/zNVvyfLWJ4fM+7tZf4f++qqW8GNiPbM3y7dy1qxwpJcLbfMm2Jvvf+g15laUeU9OlL+YS32Kyvvb73ysr1OLdFk/fXTFV+9I38VOt18uP/AEnzPl/8epwh+zKHd1P8P3P4a5+aP2RylEnaORbdUttyP8rPJ/C22rcMiMQk6KG37naP7rLUFjDNNEmP9T95d38NWo40Me93YN935V+Vqly5jjk5RjsamnqGZ5ndtm9XX+7u/vVr2NzNNIzudvl/Lu2VjW7JG0MUCYf+7v8AvLWxDceZI8KfMzfdbd8y1MnPm2FH3Y3LscyTR/ang2iP5f8Ae/3agkj+wWZhvH3D7yeWm35f7tXY1SNSiTSJtlVvLk/hqpcLunMM0kjD73zPURlze6Ty+01CaRFjDpNsCrvdf4t1Z+qTQyMiQJsTb8zQr826pN0LebNs3Ju2rtqpcWciqfJ3NIy7V8v7rV04WPLLmMa/vU+WJRvtlyv2lnUqu3ezVUuoZnWSZNv91/4vlrTaOZVa2hRQ6xbv3n3W3fxVFNbzW6k+cp8z5flr38PzS1keHiI8srGM1vDIokdF2/3d9U5LWHyX2Js+fdu/hate4hSdfO/i/vf3dtZ95dfaIVhtvn8v5mVlr0InDUo80TOW18tok8n5pP8AgS0tqsMkm+F1YbtnmN8vl0rKnmM6PhW+X/gVRQTPuELlVbdudmrf7BzSp8upamt4FXyUdju/8epPsaeWjpNu/wBlqjhkmjuPJ2Z2/MrMnyt/s1YsVdkaGTblvuVjUjLkKo+7KzHNYpcQlPJ2r95Vqhdw/wDLbeu/+8zVqws8MZePlGfb81UbqNFYQfNmvN5pRm7ndy/CZkmn3KIj71/iXa3zMv8AtVDcaem7yfuq38LVsW9u/mL5ybfm+6q7t1PFm8jv+5Uxx/KvyfLtqftWNZR93mMSPQ9yhLby9m5m2yfw/wC7WlpugwrKqB8ldrRfJ8zVqLYorb3T/XL8m5d3l1o2th5ezZC38K7a6o80djy8RHmK2l6G9ri6mRnb7y7fl2tWxo+kos377cvmff8Ak+7Uum6em4pDB867m+Zvl21sabZpIYt8LMi/Lt3r83+9XpUfdPHrR5dy9pFlbNMkyJG6R/Lu/h212/h+2RY1e3+VW+b5fm21j6bYwx7UtvnhWJfm2bvm/u12Hh+xtvNheD5Qy/N/tV6cfgPMrS/vHUaGs3mKZkZ1835N1drotvDFGYUhZX2f73y1zfh1UjkhhebazN8i/e/76/u13On26QTf8fMm6aLb5irurXlOOUpbFyz0uCOP7NCkbSbV3yfxba0tvmW6TeSrN5Wx9y/L/vf71Mt7W23D7339u5fl3LV64hSO3Z0fIX5fLq/dkTGpKOrkc9JBNMjukWH3fJu/u/xM1Z91paLJK6QsRvXdt+6tdHLY+cuxEb7m5vmqnNbwL++N4ybk3P8A8CqfhHTlzT1OU1TTIZn/AOPbLKm3cz/K1Zl7pqRw7Ld1RJF2/f8Au/71dRJY+cyw9E+b5m/ib+9WLqkM1ufOhfYNjfd/h/2mrKW5106fvcxyOpWf775/LZl++2zav+ztrCvtPDbtiK0zf3fl3V1us7JrdPMhjTzN2yTZWDqFpuVkmh2fL96OX/x6uKodtPc5XVLMNl0TL7W+X/arCvLWGORUTa3l/Nt/2tv/AI7XXalb2DRvD58yFU3K0fy/N/drC1iz25d9qSfefdXnVNzvoylE5uaFI5vO+VGb5trS7qVrBHUpv81YUVt38S1eutPS4vI9/Kr9zau2oGi+xvsfzNzfLtX+7/vVivdj7x3RlIns2SMtbQ2ysrfwr8qr/eardiYWmZ3kbf5W6Blb5ttZbXn2GRX2KwZ9qbt33aYusKzI72zY+4v+7XbhvhIqVDs9JuEjmSF3k3tL8qr/AHa6zQ44bi4WFE/d7fk/2Wrz7RbmFm2Qvvdovvbtvy12fhXUnlX98m0bdu1fvV6tGXLE8+p7x6jocfkrs+0/8stu5a7nRW3QxzTop3JuTzP4W/2q838I6hbeWpmhZm8rZtZtu6u30fVHmh+0u7b2dd25PmavSp8so8p5dc9K0G6na1ieb91LHudJFb5a6K1unvIzNJ8/7pmeRa4Kw1izjs47VH2NIzNu3/w1vaXrkMduAjrjytqbmrSMuh50v7p0MMjr++WaNl+/tb+7/u1jax9muN6GFn/ifb/DuqX+1E+zpNbTKjeUyuv95f8AarI1K8QxtCk23d9xVqOX+Ur2nLEwvEjQ2rPs8xV3fd835vu1xeqSp9sTf5n3W+ZU+7XVas1y29YfL3/x+d/d/vLXMa03yuA6hFX51b+Lb/FXNUidNGRxWvbN7O8MiJv2rJ/E26uI1yzWS3ltnfIb+Fv71d34keGaGTY/zLFuWRmridaaG4k87Zgtt/1f3a45RPRjU5Tz/wARJ5cJhd23/fXy/mVa878YWaMx3IpVvlSvR/Elvf8A2hoYUWPdLv8AMZ/4dv3a4bxVDNJCzpbRp839/wDirlqU/tHTTkeZeIPPSZYd7M/8arXK6g0zXDPs2O39567XxBAkUbzJbMjx/wDLRX+7XF3ypNJiab5m2/NsrilGX8p2U5fCNW6RpGe6ufm27dv+f4qivGSRhIjsEX5t396mtj5tiZ2/Lu2feqNleSHZHwq/dVUqI+7sVL3pkd1N5jJ5iblZ9u7+7T22RyMET5Pm81W/u/7NRLDu+dPn/wB3+Kp1W53NMnyqz7l3PXRzmEf5RLeR/kdIW3Mn3VWtJbNG3Q/Zl+aL/e3VDD+8k3zblbfuWRavWqpFbibZub723dtrCpzHXRjEfDazyQ7HdU8z5av2K/YQ0Gzeypt3Mv8ADUVn8rfu0ZQvLsvzVoqqLcBPO+8isit91mrklI9CNPmjzF7Sb7yykKWzfdbZu/hrotL1KGHH77cWiVkZf71c3DJ9lZN77ZG+barfd/vVb0+5T7Qu99u75katqMo82hjU5up2tjq1t5SoHXLffuP9qvQNHlQ+DVlAOPsrnBOfWvHrWaZpGm8/a8ny/f8Al/75r1jw3dK/w6S63BgLOUkk4zjd/hX7b4Ku+cY9/wDUNU/9KgfQcLJLF11/07f5o5yZoZpC7zZj+9BJ/F/ustZmqTIszIi79v8AzzpYbyaTfvRn/ufLtqreeTaqzusnnM25fm2qv+y1fkMa3KfFSjzGfqk2632Wbsg2Y2/d2/7Nc1qz3McjwPJ95F3svzLu/wB6tm+kubjfO7xh2Tb8yfdasLVPmY2czqv3vvfLW/teUw9nzGRfXSQzOny7WTanz1z2rXjpI/2Z9q7fvL/erV1pYY2R0OxWb7y/NXP30bm3b7w2v8+3+Gp9pGQo0yjqF5uXYiNn+Pd96s2aTzNkJ243fxf3qkummkb53/g+bclZsmoeX8j7VH97+81ZSkbRLrSW3k7872b5drf3lomm/c7JplG3bs2vWZ9ukmUwzPs+b5f+BUn2ny2/hAj/AIWrjlT9/wB07o1Pd5Tfjut0zOiYdvvf7tWrNv3ZfeuN/wDF/DWDa6h8wd593zfNV6x1J49yJIqLu3J8v3qwrUZnZRre7qbiyOuI3flU/h/irRhk+0Rs7v8ALt27VrFtdReS3b98rN92r8d1C0I2XPyx/M6t/erlqQud9OX8pfhhh2qjpvaR9qKv3v8AgVWFuEk2fJsf+JW/5Z1TtdQmuFZEdkRv4lqz/pMiqg2uVbbXNKEubU6Y1OWPukM0fnXB/wBJ4/grI1NUaTy5kYhfuba0rrzdwT7Nna3zqrfdqrqtu6xr5L42/cVXrWnHl5UZy97Uy/s++TEiMit/DI/8VQR2X2hmhm4H+/VqSHzP3Lzcr83+1XQeBfhzrHiq8httHs2keSVV2+Vu+9/s130Y++ebiJcsRnw7+HP/AAk2qW9nN8jSSr5W5GbdX6r/ALAP7FfhvR9M0258N+D7W/1mS9VpZr6DzWX5f4Y/4a8s/YO/Zb0Twt4403Ur/Spr+6sd32qRbVWgjk/u/wC01fpF+zLq9/8ADG6u9A+Ffw6vLvxDql1591qmqOq21nDu/vf8tJNv3VWtqlbl2PnMRW5pcp9P/A3wjeeGPBdonjbTbW11Lb5UW23WPc3+yq0z4wfHr4K/APQP7d+Kniy3WWF/3Vjbxb5ZpP4VWNf4qu+Cl+Ilno9ze3lra3N66eYt9qMu1Wkb+H/ZVa+WP2l/2fbzxRHqU/xR8f2d4983mWtjodvJuXa3zNu/hVf71c060tLbGHLA4P8Aa9/bJ8WftB+HrTTPhJ8N43fULryIrrUl+2XNiu370UC/u45P9pvu1o/smf8ABOvWdU8Pvr3xU+NE1jLcKq3unyWSyysv/XRvl+b/AGa6D9kv4K+EvC/iaDw74M0qaGwtV8/7VcXDSXNxM33tv8NfaPhzwNBpmmtiHyJZFz9ocKzL/wB9VtSlzRugb5jz7XPB+i/Bf4Yp4P8ABnhiN20+3Vl1jULWPy1+b+KvnH4pal4t8YFL+GGPVnkTdtW82qu3+7XvnxnuPAei6bcN4y+IcmuyzTsn9mteeXHuVflVlX73+7Xw7+0d4os7G+stYm8SeHYXum2RW9jebHjX/a+b+7WVStKUiPZ83xHNfFS4/wCEHszrfiHwlfW33pfsduvmt/vKq1leHf2nPhj4quH8PaJ4km0p2dVnsbiJkkZv4lrmr79u7XvhrJPoPwr8H6TtuItqXXiK3a7kbavzNury7w78PfFHx41C51i88f8A9ircXrTy28ekrBA0jfeZZPvbaw5pz1pm0eXlsz0T46/st/DG5kl8beH/ABDeTeJLza0unxt+6aP+6zf3qf8Asl26fC/4pQp4h/Z5mn06SVWutS1C/wB7fL/Esar/AOO1hfD/AOFfhfwz44t/D1/8XZrm8t4t25d3kKv+1u/i/wBqtj4mTePPB+uR674J8eeKLm2hZV+0aT4f/dqv+y3/AC0/3quUqjjZmXL714nrnj/UPFXxd8a3Gpab+zxo9vYrdLcRah4ksNsce3+FYl/vf7VdKv7ZHxD+Atj/AGl4wv8AwHrEUd1t/sfT5drRq3/LNY1+7Wf8OP8AgpRo/wAMdJ0rTfiF8N/E2uWrSr9q1LVLWGJW+Xb91l3Uz4hfBH9hX9uDxR/wmH7Nl/caV4ptf3uraTpM7LFqTfxRsrfLu/2v4ayk4zhyS91mnLKnLmR2d9/wUg/Z4+NkNt4D+IXw9k0F7qLfFeQ3Xybv9la+Y/23f2Xfh78RtJvfij8Mdet9SNnp0y7bf5XmXb92T+9838Va/ij9nn4P+D9Qm8PfFT4o+D/CeuW8uyLw6uvfa7qNf4d237rf7NRaL8L/ABV4bhuNSfXpNX0e+umVLqFNqqq/dXb/ALtRGUqPu812TKXN76Vj8j9b0+5tdSlttSh+zur7XhVf9W392o2t0+WFLncF/hr9NPi1/wAEi7P4zatL4z+HXi2ztZ7z/j4sWfayt97dt2/3a+O/2gv2I/H/AMB7iayvEjuQrM3mQy7m+X/0Kt/q8pQ50fQ4LNqE4xhI8Kkt3WHf0ZXasu+hkZPv4Pyt/ercuLVPM8mZ2H+7/erP1KNFh2WzqR/Ezf3qxpy5fcketKMJe9E53VFTy3TP+18vy1gapG8m596+V/Atb19+93o521jahG8au7lWZt3yr/dr0KfNLlPMrS5fhOY1BWZPuMv8SstYt8sMWUfdub+L+7XR6hH9qlKD5X/u/wB2sC9jTa298n+9XfT0908mpKXMY0ypDJsHztJ8v+zVORC0jJ53zbN1Wrr9yzP94VCkfy70fI/j3V083vGBBDvXG87t33mqaFAGXYn3qTydzbEdf9mpLdXVs9dr/eqvhI5mXNPX5mR/7ny19nf8EwiDoPi8j/n5ssj0+WavjS0jhVdj7sr/ABV9lf8ABMJy+heLywwftNln/vmav0jwi/5L/CelT/03M+o4R/5H9L/t7/0lnlPxqm2/HjxYgAI/4SC43Fu37w1V0uTbcKLaHd/tN/DUnxwM3/C+PFyxy5J1y6wPT94aq6TJtjXDsD/Btr4jiD/ke4r/AK+T/wDSmeLj+b69V/xS/NnX6XI+77jKG+Xd/C1dRpMiNAsNy+8fwfNt21xGl3otmVzN8rJtZfvV0Ol3iTQq0MKtKr/eryJR93U4ZbnaafcFl5fYq/c2vWzp9518lF837y/3Wrj7PUXVlebam7/nn92tjTdShEwRLpf95vustBHKblrM6zRTu7THZs8vbtqx501xIXwu6P5n8xPlb/gNYq6tNHCkKIx2/wC3t8ula+S5kXyNu1vl3K27bTlLsXysu6tqCKuzzGB2bt38NYl7qE0aF4uFkbbuj/hqO8vnWPMz7l2bvlesi91R5G3um7b9xd9Z+05vhHEnurxoRs6+Y/z7vl2rWa2oIJFSFOfvJt+7WfeXSSKdh2j+Pc3/AKDWfLceRCH+0ttVfvfeqZfCejhZe9zG22oJC/yQsN27duXd8tQXGsPGuyP51VPn/utWW+pIyrCkzYVNqtUclw6rJCjZC/3Xrml70rH0uHrSly2JtQvnuI2DxxrE38NY+pXaKzpsykiL8u+lurxJIWR/u/8AfO2snUdS2ln+XC/L8tH2j3I1pfFIoMzu29E+6m1lqCZpmXY+3K/3f7tLIz7iUC/e/iqG437sf+PVjKPQ+Zo4eRXjj86d/wD2WrNqrzKrom0/x/7VR6e0KrvRGG19u6rdnG8bbE3ZZ/vVhKXKd8cCWLNUkjR0TL/d/wBmtS3t0ZPOh3b/AO9I9U7dJDIu9Mt/srWtart3p8zsv92ueUhfVZFvSvtLfvkTYN3yblrZsbc253o7fKnybm+9VCztv3ZmfawZdv8Au1qWtv8AMv2NPNVV+bzGrH4iOWMY25TV0i6dW/cxsQ3yvu+6v+zXSaPHHMrB/vr/ABVzel3k1um/7MsiMm52b/x2t7S7qJd6OrK2xW+V/vV1UY+9flMuaMYG7BdQ/wCuuYW/hVWV/mZv71STTeXZuQnKu21l/u1kWMgl3u6NsaX/AFn3fm/2almu3Zvs0L5C/wAOyvWo6e8c1Sp1KOoTXjR/P825P9W3y/NWPOs7Mfk8xf7y1q6lHNJK7xuyMvzbf71ULqNPuImVb+KvSjUt7zOeUoy0KCw+Yjp8zjf977u6o185MfZiy7U+ZfvVaa1eFg7n5WX71VfL8uRPs275vuf7VOUoS0OaMeUVWRspDtVu22nRyQtiH7yt9/b/ABU5bV1kYvDj+Ld95adFYvDGvz7WZvk2xfLXBWlCR208RykM0O3/AFL7Ds27mepoYXaPfC+x/wD0L+9UsdnayY2PJIV+5tT5as2tqkatCqMzs3yf/E7a8HES5T3MLU5o3Qun2MMa/aUtlXd8zzbvvVdt7V5v9d5mfl2L/Dtq0umTRwtDDDG3yfdX7q1aj0lGVUudwdvmZV/hrxqkuadz14/BYq/Z3X7m5N3+tVqfHD5jb3/3h/dWr32JFjeZJvNdf4W/hp32GFs/Iz7otzbfl21EuSWhfMQQwuqpNc8NJ/qt38NWYY0kxOm5dr7dzfdp9rYzSRlHh3xr9zan3attH5MZhhtmdVX72373/AqqnGUpcqOSpUjGPvCWbTPOqTeXnzW/eMn8Natr50kPkvtz/wA9FTbWfG0Jh+SHcrJu+ZGVlq9pk0jTffYxL9xWb5q19n7vKc6rf3jTYXL4hublQN6tuZdzNtWoNSkP33/esv3WjanKv7z/AEOFmO6okt0VvJdG+XdURo/DGJp7b3blK5mhjmREf5tvz/J/47Uas8a7Eutm1G/d/wB6p76N3t1T5mib+FX+as2TY28Ju/eLt2yV3UaP90yqVJdCyGto7OKb77Rtu3SP/wCOrVaSb7Ysbwjb97bUULO2xJ3WIR/cj/utTftCthEmkfc+3dIm2vWpx+yefKPtPeIL7eqvC9su6P8Ai31kXCwlPJ+YO332rS1CNPM3v8u1d21WrI1KYSK7p8iq+12V67IxMpUylNJMsyRnyz+9b5m/i2/3aa9x5jK+zYG/8dptwqHe77lDfKsjN97/AHabCsNwy2021gq7ttacsTllHl90s6XsYMjyL5sku7ar/wANaDLCsmyENlfvsy1ShVIdjujMy/c8ur9vlo3d02tu3IrVjUkZez94fZs/+0W2srKyf+PU+ON5pPOd/kb5dv8AtVHn7KzjfJ++2/x1chVJlLxjZu+Xcqfdry8RI7KcZdCKxsfJkeb+98yL97bV+yt3uJvJhmYts37VSoYYXVRCjthV+eSRPvf7VXIZlhxD5ys23a21az9/4hyl/KQrbo67N/Kvu+WrMaQ27ffk+Xb838TVHJcQqwTzGJ2feVKRri22p+5kLr825d1ddOPMeZX/AHnMy9atDHGZt+6P+P8AvVs6fNZtG3nWef8ApoyfdX+GsGzkc3n2b5odybt2z5Wrc0uZFZXkO3a+395/E392vWoRueJW5onY6KqLDA7zNsZPu7du5a7jw7HD5MUycfdZGX5dtcDolx9lWPfDtWP5k+b5t1dj4bvEkhQJZ7mZ/uxtXqU480bnjYjl5jvtEukjKI8yyuzt+7j/AIv96uw02TdYq/ygbVb723/vmuC0O6+0YmQMzfdVfu7a6zT77y2+eZXEaruVl3fNWhzfEdla3VzMq73Xy9m/b/EtXproLMXR1Kr/AOPVh2OpPDuS2udn2j/XtJF8rf7tW4byBgyJMoiVWbdJ8tTzFRo9CzI3kyfJD/Budd9Zt80Mjec6cf3W/hqaS6tm33MO4LGu7zG/iX+9WbcapbTX0aOi7GRnRl+bdXPKsddPD9yDUm8yPzoYfNGxlVWfbXO6hZpHD5PkwvtTc3zfMv8Au/7Na11cedC+P4XbYrVh3U0CxlE5RflRd/3axqVDqjRlEytQg86P7RNNxvXarL93/gNY2qfLcb5nyyqz7VrbvpH8tvJuViK/f+T7tc1rV1YW6vv5bf8AdV655VOY6Y05dSvdb1K+dCzI33o2T7rf3qx7yGG6UlHZ1Z9u2b+GrGsa88kLzJN8y/LukrmtS8Q+VG8N1Mrhm3Iv8K1yVJc3wnRCPSQXzOuxI3XdGu/cv/xVZGtahbW5l8mZkb+7tqjrXiia3tWe2O5938L/AC1yXiDxg8d0qQvHtVG+Xf8Aeaufm+ydtOUuU27rxAscgebdsX7n+0396qC+IEaTYu53ZvkZq4+68UQs3nJ8jr833/l3VUt/FE8lwD5zKy/3v4q7sPEwrS5j17Rtc+yqr/aY2VV+RfvV3/hDWJpoxEjqs396vEPCfiIbh5M3zb/n+SvR/CuoJtCTXO12Tc7f3a9KlLm+I4an909o0XVraHYj+XCzIrJJ96uz0HXHkaK6e5Ybfk2q/wB7/aryPQb52mhmtplO5Nv+9XcaLqSTR7/s0aFvl3M+2vUhJHnVn05T0bTfEG24W5G0Ddt2t81dLa6k8ca/PGiN92vPdI1CH7KmbmR2VlV/k+8tdPoMnzbJvnDI3zfe2/xVvGUebyOGpGJ1Ed5eNC0LzZ3ffZUp8cN7HEu94X+Xay/7VVbWPdCh+07UVVZ9rK3zVZh2TXHybUZvl/eU+WJEoy90ydajh85EmtlIX+JX+Vq5nxFJ+7W2Ta7bWZ5G/wA/drrdU8wzM7pt2pu2/wAVchrUcMcO3eyp93zPustc1T3TaG5xPiKZLhXdywDRMqx7P/Qa4jVFRVW2e5bY27Yuz7tdv4kaDzD9j2qyvtfdXF+JJLaNltvs+59+5W/iZf71ckpWkejCWhxviq1tri3ZLYs6LFtSTf8AM1cPr1pM1vHNZTZ8ncvzfw13mqLCsmxEaINuXav8X+7XKaxaosO9JmQyP91k+XbXNKJ0U9jzTxBaveQuiOzQs25o64/UtK27/k2D7u1q9R1SxhhD73XMnyoy/wANcl4g0ZGuvLm+fb92T+9Xn1PeO6n/ADHEzWqLGZl/vr/s/LSNC8f+jOkeV+ZW3VsTWLyTb0+6v3l27lZaij0uGWR5nh2/3/8A4muOU/sm/LKWxkR2czM1zN5iL/Bt/ipi2+6bcjs21v8Avmte60/gukMi/P8Ae/2aotaeXMN6Nlpdyt/erb4oaClDl5R1q3kzjZDvVX3eW1Wobh/O+xony7NyR0+1t0WPe8m9v9lf/HasWLwyHzkTcrMyt5iVjU/lOmnzRiT28dy2zyU3KyfearkK+XIIUT5lX52X+FqdZ2sxjML7lC/6pd9W5NNSFVd3Ywsv8X3laoj8Oh1KU+YrsyWKs87tnZ8+5d1XbVbaKaOCZIdq7fm/+Jp0MM6yF3dlZv4l/hpq27qv2abgt83mbfu/3aPi+EJR5SeG4hjk+ROVdtm5/u/7VeweEl8r4UqpPAsZ+T3GX5ryFrf7PNDG+1wyr8395q9d8Ks7fCf5mJIsrkHcMHhnFftfgnJvOcf/ANgtT/0qB9Bwwn9crX/59v8ANHBR6pC1v9pLtvV9z+X8zbahvdRRp5Eh8x1+6qyf+hNVHdPbt5P+r3Ju3L91lqvJqXmQt5Xmb/7v3a/F5Vj5uOH7C3135PzpDHub79YGvSO2+ZJvk27kVv71XbzVvJVnhfHzrurF1jUPtELom5t33FX7tRGtLmuZywsTKvrh2k+fhf42/u1zuqTec/z3Klf7v96tK+utirv+dtn9/wCVWrIvo5mVd+3d952WtI1uaXumX1flmZ2pR7rht6Y/hVmf71ZV5sZvuKo/gZkrQul8zbs2v/d+SqF8r/K8z8L/AA1rGXtCJUyncO6/Ojqyf7S1BJqCbd+9d27/AL5qO8ZNron8Pzbaz5pvLYQvHubq1ax7mMvdmadvqG2TZs2/7Va1jNC0yTbPmXdtWuUjuE/g+Y/x/P8AerR028dWaabhm/i30qkZ/ZNqMpc51kd4jRD73zL8/wAladldvIu/5v8AYrlrW8dlx9p5Vv4q37G+eRdgTKbP93bXBOPNI9ej8Bu2Nx+5SF9ys331X+KtNV3fvN+xv46wtPkSOH/XMzKn8Va6s7bZim9pPv8Az/w1xVI8tU76ceaER+o7/McI/wAzf8tKozQpJh5nUt/Gy1oyR+XGHROVX5/7u2mW9ik2fu/3vm+WrjGPLzGVaPNLlKNjpdzcSeXs2v8Ad3L/AA19HfsX/BPxz428UQab4VsNQlu9Qf7PYWtv96Zv4mZv4Vrz34DfD3SvEXii2tvEnFvJdKl15MW6SNd38P8Aeav1t/Y9+GmlfBfxF4a03wToOzxTq37+1s1iVm0+zZvlaRv4Wb722uylpHmkfOZpWlH3EfQf7Fv7HE3hpbLRfiEjJLawxyfZbG12bW/i3O1fXGpeAdH0icaroWg6VHKu1P342Ksa1bub6Hw9o1os2u6bazKkf2yW7dV3f3q+SP2wNO8YweNTrnhv4reIdYhvnaKLSLCz3QW7MvzL95d1TVqez+D3jx404QXvnu3jJ9burpNP8N+PNLheblo7V/P+X+L5f/Ha+Y/iV+0J4t1rXLzwB4WezsVt7hotSvLho5ZY4938Kru27q8Z1j/htXRdaSGb4PyWMMcSwJqGpaktqs0e75VWOP5q9c+FPwtT4aabL8Zf2h5vD/hXTdPeS6XT4Z1iW8Zf4mZv30zVyzS1lNGnxQjyHrXwQ1fwH8APCr/ET4l6xpelK0TLbzanKz31x/d8iD+L/gK1l/Ff9tr4kaxo93beHvhvNoOix2Uk/wDb3jLUo7F75f4fJi+9t/8AHq8W8eftKab4km1T45eAPAOjldPX7Ra+KvGTyeRGv3VWDzP/AB1Y1r5r8J/C/wCJ37fnx5uvFXxC+Md5r0G7zdUuriLyoreFV/1ca/dgWoVaOIjy/ZNYw5IEXxE/au/aN/aS8ZWHw6+CejR3UC3TJOuk7vIt933ppp/vN/31U3xA+CPhf4a6Dcw2dnpOseIbW183xDrl5KzQW67fmt4F3fNJu/iavqjRdS/Za+Gvhu1/Za+APifw7pVu2lyXvjXxFNdKktjCvzNuk/hXbu+9XwB+1x/wUy+BviLWdc+Fv7LXgxb/AMOaDLJbp4qvIt0epTN8rNHH96Tc3/LRqvD1MNT92GpnKnX+KZm+H7fw34s1h7nWPEP2izsUjW1tbN1T7VNI3lxwx/xN81fQ2h2fwf0HXNU8B+Lfijo+gQ+GdNZ/GV5ay+e1j8u77LD/AAtcMvy/7NfEX7JXhP4nax4/tvjB45SbTfDug+dq95faha+XH5yxt5Kq33du7+Fa89t/Elh4H+0eJ/H/AIwW+k1jV5tRvJrjd5V5IzM3/AqVatyRLp04y1Pr7xN+0xc3nhhrn4G/C6Pwx4Gs5WR/EmtQLJqWqNu+983yqu2qtr8cPj9ps1t42sPiLrmrWkL/APINt5Y/L27flVo1+6teReHf20dB+I11Z6N4nnsYNKhg2263Vv8AuF/7Z1778MdN8T3Ghr4q+A8PhXUWk2xXWnwxL/pTbd23b977tc8cVRnK8jX2M/hien/AT9szxD4+1XT/AAl8WvBmkvZ3Hy+TfWCu8m5tv3tvy19Q+KvhP8OvAfwz1HR/gb9l8I61rE6y+I9U8P8Altc2q/eW1X/nnu/5aba+SP2Vf2tvgtpPxkvfDf7VPwht/DF7oqSOt1b7mit1X7vyt975v/Qa7rwj4hf4f/EzxJ4q+EvxLutc8PeKriS9lutUVd7eZ95W3fd/2a562MdKTUZfeaRwrqbxPN/26Pgbf6potr8QhbWaap4fZU1TyUXzLxZF/dyM22sz4O/EDXtL0uz0TU55prZv3kULP8sfy/xVteMNU8SX2g6rba3qTP8AatyfvpdytGrfKv8AwGvM7q617SfD8iTSQxMq/wCsX+7XFWzLnnHQ7KeWyjTlc+mfhT8dLDTPESWdzZ7HmuN3nLKqqsf8W3+9/wACr3b9oz9iXwL+0F8C7q/1XwBDcOsTXFlq1rdbZV3L/s1+YeqePPFWh61bX9hfw3Lx2qxRRtF8u3duavtX9hX9uLVbJYfDfjy/aIr8zTMjLEqt/Dt/u16VHHOjGM90eVWwvf3T8yv23P2Fde+BMlxrdhqtrcvCypFHbuy7l/4FXyheXyfMiQ43P/49/tV/Qt+2Z+yzoP7SWgzeIfCv2W+tNY01kuI7ODzPssi/N53+zX8/nxu8G6l8OfilrngO8hk83Tb9leSbcrMtemksRHnid2V4ypH91Pc5TUZE3M8219vy/L/FWLqChV8x/My33v8AarTvbibkbNyfd3L/AA1lyrhSmGf+63+zXRT5uU6qkjn9QYKxm8liW+X5W2/LWLfKjN8/Cr81bl4vms6Qp83+9WPfRPJuf73y130pRPNqGPfTfP8Acb+6q1U8vbtd/lXf92rtwvyrlmX/AGarTJD8rp83+9W0eaRzczKskO5h5gbDVZtU8tvk+6v3KZt2bn2Z/wBn+7U0LOXZN9WLl6Fyz+7v35/h+avsj/gmGyPovjBkP/LzZcDp92avjm1jk2qn8K19kf8ABMdI00TxcIhwJ7Ef+OzV+l+Eji+P8I12qf8ApuZ9LwcpLiClf+9/6Szxv44s3/C+fF5EPK6/dbW/7aGqGns6qif886s/H+WcfHbxa0UmB/wkF0u3b/00NZGn3yXGCm4fJ/FXxHEH/I8xX/Xyf/pTPHx+uOq/4pfmzrtLkdoVd3+Va27W8+yyfu3/AN+uUsZH2BEfaF+9/tVs2N07ZhdFcf71eNLm2OWXwnV6XdQqvCfIy7d2+tKG8fzB5Kb1ZPlVv4a5iGT91shdV3Vdt2vFhCb9rf8Ajy1XwmXxHTR61CsZs3hZ/wC/8+1agm1SGNnbyWwq/dX5lrIa48vZ94nbtdart+5ZpoXkBVPu/wB6iMfcHzGhdX22AQ71+6v3k3VlaheOtw6QzRpudmVf4abdXTy/vn3bvvI396su4kmVnhmdWXZ/49RGMio7heTTrGqPc7T/ALPzVUa7dWaZPnLbqb5kM0n+sbCoy/8AAqptM+0JBN838W2lKJ00+5JJM8a+S7/e/u0yS4RZhGjt9z5t1R3E3mbXeZS/8H96qF9Ik3yb9yt/drnlHlPWwuI5eWJZuLh4V2O6urfMm6su8uklk2O6pu/i/houpkUh9i7l/h31VuJvMjZHRVC/3fu1j73xHvwxEZadi5Nbo3+rT5qrbXmZU2ba0riPy/n8n738K1Tkh2x+ciYdfmTdXHKXuHoUcKRLC+fkTcf7q1YXzpY/v4Zf4lpLdUKs7zbdv91alt4/l+Tayqm/a3y1zSly7Hs4fB80S3Z2z3A2I8ibvmrX0+ZNrQyblDPs3f3ao2qutuod1X5PurV2zhhkhXem3/Zaufmi37xljMHyrSJqaeP9IML7WRfm8tW+9W1DJbKqzJbbX+6/92sfTVmhjCTOvzVoWbIkj/JsXZ8m35qrlifNzjOmX7KZJJDbGHZu/wCea1q2N15WbaRlVN21Gk+8tYcM/wBnZ/ulf4drfNVyO3+Xfv2fP/e/irpo6SvI4pbnR299t/chN/ly/djqW6vNqy3ifJt/hj+ZvmrLsYX2k+czbf7v8VW7dnbdsRvlf72+vUo8nKcMoz+0RXM1tIrWzOz/AD/7rVRvgkn+pHzbtyRr8q1oXFucfaHTc0b7kVfm8yodQt3muEfYqrvX5VrsjU9y5lKMpFLznY5k2o3+9/FRDZodsiPs2/Kq1O1vtma58lnDfNupLO2SH9y77X+8rUpS7mPxTJJIXjZYZm2FkXZtp7W/lje6+Vu+XbJ/ep63DzBt6Nt/8eq59nSRU/fLcMyb9rP93/ergxFblOmjHmKMNrhgltJ5X+0q1oaesKqXv3+dn27v9qkjtXa8TyUZj/Gqv8talrazKx3zcyP+6XbXzmIqTlLlPoMHT5YXJLOxZVR32vKz79s1XLhZl3P5Kuy/c8v+GkjtrlVhh87zNv3/AJfu1dsrW8Wz+fn+8u3btrglKX2T1Kclze8UIYX8wu9srq3DU6OFIz51yW3fdVf4anhhMU3k23Lsm5FZ6uRab51u+xPKRn3Nu+83+7V0qcakia1b2cNSnZx3MkiTO6rF91l/iWrccKXLNCkOxlZvmVfvf71XrO1MkgeF1CRrtlWRPmarENvNubY8bLs/ufMtelTw84zseXWxEOTmMy4t7mG3CImGj+/u+ZdtWdPtfOVEfllbc+1NrVpLaz/Pv3bWVfKbZ8tW7bR3um8+2sPNaZP3sm/bXU8L/Mcf1rrEz1t7lmlS2dY0WXft3/Kq1Yj02/aPi2Zoo3+Rv9nburetdGe7tfscPG2Jdv8Ae/3a118NzW9uiJDgMv3mTcy10rCx3UTP61zfEcBdabNMr7E+WP5Xj+4y/wDxVZF9oqTXSzpCoZom/dt/DXp83hu2b906R/KnzeX96sy80FJC7JCqhXX5m+X71bxw5vTrcsOWR5y2mzMy/aYY0K/N5i/NUUmkvdeTIj4ffvZV/vV1114fvGjZJoMxfNvWP7zVD/YvlKHeFT8m1Y2rojT5ZaGsbSicRq2nu6+Xv4+6396sa6861j3wwr8r/vVau11bRbmS4T7Sm35Pmkj/AIa53Ure2aR4Zk3Dcv3l+ZmrTl5hfDexzl5LDueb7m3/AJZ/e21TC2yzM803yfxsr/d+WtXWrBLWMu8yjdKq/L95qw5LidmNtZpjd8yNs3Lt/ipc3MYVI8xorsaYIjsFZdrtG3yrU00z2tqyfaWX5v3W5d1ZNjdTmRfnUnd86/3quLI6xs/ktuZvvbvu/wCzWEo9zm92MS3JebVjmdFEv3X/ANr/AOJq/b6xNCws0ucSL/D/AHv+BVhPqW7Y7puXfuZVT7v+zVmG8haNnRG+V9u3+Fa86pTjz6mkanWJuyXm23DzTfPJF97722j+0ZvLO9F3/wDPNX/9mrMa68pfLhTMSov3vm2rSLeTzSM6IrtJ99v4ainGZMpcppfbXklGdvy7t7L/AOg1oafNuhW5eb5f9p6xLG68xCmFT/gf3WrR0+JJvK+7v2ttX/ar0KK/vHl1pcpu2quskkl48ez5dm3/AHau6XG/nM7w5C/J833Y/wDarGt5vLjCefnd9+P+Ld/erZtd8yGZ5mf51X5U27q9bDxPHxEoy3Oo03YzJHv+ZvlTd8tdXo9wlrIiQzf6xPvN8tcdpPlyXaQyP86/LFu/irT0++QYSdI9u/b83zV6cY+4eLWlKMrnouj37rbiaN9u35dzfxV2Wg380avc78JJt/h3f8Bry/Q9WT7Lvk8tRG6q/wDe/wC+a6rRdcghZ03yBWT55FolH3bGdryuegw608d1Eny7V++v/s1aS61CqjY8czfe8vd96uBsfEFndMLO5eRmWJmiZU+X/gVW7XUkaNN77GX5JdtctSX2Tto05HXf2h5cfkzSMvy/dWsy8uIbiEJsYtH96NX+as2TVkjjE1s7Mv3dzJ8tZ0+sQrIEbdvkTf5zfw/7VcVSod9OmbGpXUMLI77vlTdtVKwtW1iaTNtC+xN/3o1+Vay7jXZpF3vcs6qzeU3/AMVWbfeIPJhR0fcy/Knzbf8AvquWpUlE6o0eYtahrX2iF7VHxu3K3y/Nu/vVzOua1D5Zs/lYfd/2vlX71VtW1vddPC8yp/F81cfr2pI29IZo3Kv+9/e/NWHtDo9jIta14ihWPyd8m7duf/arivEHiqVXKbIx/wBNN/3v9ml8QagmnxqNjZ2fIrP8u6uO1rWAzfJtHyfNTlIcYjda8YXOZUR2H8Kt/E1cdr3ip2kaaa5+Zf7r7ttO8Qaoke3ekn8W9t/y1y958zPs+7s+b+LdWVOMzb4S3ceKImUpC7Nul3fvP/Zav6Lqj3F0yXKZMf8ADJXISfNdrP5Ku0L7U3fwrXQaHbzRKibFI3bt0iV1RlEiVPm949O8L3k0cKPC/wA33vu16R4U1KZduz5nbbvVq8r8KiZZIUd2zJ8z7Ur0jw3Hc28bTeWo2/KrM/8ArN1dlGXu2OGpH7R674dmSFrdneMBvlVY5Vb/AIFXW6XeJHvR037Zf4mrz3QVtljt9luuN/3V+Vt1dp4daOZY5k/1v8a13xlM46kTvdHuoZreF3Rv7rqvzNXXaTcJGqpvYM3/AH1XCaLNDNGsPmqpX5k3LXXaTcbriJ3hzt/ih/vV2xqezOCpGUjtNNvE8scrC2zajf8APTbVyONI/kn2rLI+7zP4masTSdSht08mZ94bcyf3l/3lq1Lqt40f7mbeWTc25fmrpj7sOZHH74/WJvs8yedNsKtu3fxf7VcTrmqItw8PnL5bSt8zfMzVta1qmGXzHzJH81cXrWqTJIzPZttVfvL/AAtWMvjNo+6c54hvIZP3MMPl+XKqtJt+9XJatcfbJEuRMrLs2qy/e21taxdPfTGZ/kXft3Mtc1qVxMzN8i+XIjN5jP8Adb+7XHU5JTO2n2MbVr52bY7yM8aqu1fu/wDAa5nWozHvvJkjLx/8tPvV019F82+a5j3yfLE33Wrm9ctxGyXNy67pEZUrmqHXF9DndSt0S1R3hUL/AHvvN/u1z2sRJ8kyQs8jN/C/ytXRag32qGTziqDavzR/3v7u2sbWP3jh04/iVvuqteVW907aPvSOZuLVJ2aH7N80f3mVflqC1hSOQ+T97/vqrs2+aR32Kqf3qbHC8bZdG2fdZv8AZrzZR9/mZ6NOXL7pk31qFtWm+bc38VV4LfyYUR4WIV/4q3LiNI1MKfN/EjVRukSZVd/LzGrLt/u1pGfKaS94qx/vF+dI12tU9nZ+XcbyisG/hb5lqPynj2TTQ7W+9t/vVoWdxvuE+SNUV9v+1upSlM1pxhyly3X5XhSFi396rstm7f6Sj8LtVlk2t/47VbT1eZWTzm++zfvE/hq5GyXqh3fbIyfwp96lGnLm90rm9wRo/Kk+eFnT5v8Aap8dok0iTBGYr/Du+apPs0wRHe1ZQ3/LRn+ZlqXdNMweF1C7m+VV+Zlol7r90IxnL4ih5L+Z/pNtIyt825vvLXrvhg7/AIVAlA2dPn+XGB/HxXmVxZv9oZ38wL/H/srXp/hoxv8AC0eW+VNhPhiev3+a/a/BF3zrMH/1C1P/AEqB9BwtGSxNW/8AI/zR5JeXDrC0Lp86/cVm3Mq1k6k3nW7F33q33m/vVueTsmfzoWH95f4mrH1SHdI8UPyD+7/er8MrS5fhPFp+9E5vVDcyRtDE+WV/urL8rVl3X2xd801sqL951jfdWnqln5Nw8rxsDGu35azLizDM1y82Pl2uq/LUxl3kWZepSBlaHZGv8O3/AGapyRurbN+4MtackKXUap8qL93d/s1Wmsfs8LoX37fubkrSnWjEzqYfmlzIxNSbdCkKeXjZ8se3/a+9WdeL5jNCm3O/5619SjdV3w/7K7WrJuFmhjlTO12fdu/2a9CnKMYHn1qfNMxLxbaOTf53z/x/JWPqEzw5SFPm/jZq2L6ZIyZvl/4F95qxNTkhaY/J/vV2UzjqR98heaCFv3jyFmq/aXF1JLvd9p+7t2VlyfNIzoy/7tW9PjdptiO2KuUeYuJ0mnzHycI+5lT5Grb0ubzI96PJu+981YWhxmRm2bd38O6un0bT5mj2TPgM/wB5a5alPlPVo/Cauj28O3f97b83+Vrct7e8upopkZvKZP3X7rbVPS7SH/Voiq38Lfdrf0mHyz88zRj7qL97bXHL+8ejTjL3RbWBJGELuxVX+fctbHhvw7DqmqfY4bbzXZf3q7/4f71JbWMy5Tdnbt+XZ/FXT+EdFe41iJEh3Sfwbf4d1Y+zjLQ3qRlGHMfS37Enwj0Oz14eM9bh85LNldI9m7cyr8tfpL+yP4BufDN5e/GbxJNavqWpP/oG394/l7flZl/hWvlj/gnX8HbnWNF0rw09neL9uumkv5JIv9XH/F8392vv/wATa54Y+DfhWbUrDR45/wCz7fytIs2TatxI3yx7qcvd91nwGMrSrYiR0Pw70ebWlvr/AOKmq6fqEzStLbxzWv8Ax6x/eXd/8VXIftS/tQfDX4e6Omm6V42stUmk2/Z45tO85V/veWy/+hV5n4r+L3xRm8L33hKw8KxWmpas0b65q00v3o2X/UxrXkPxK+HusapHN4qubKTVbyx05lSOR1RI1/8AQVWvOlKvKMuTQdKjScrSPG/2kP8Agol8VLPUH1+58VedbWNwy6No9qu6RWb/AJaMzfNXz54u/ay8Q+Jr+2+Inxgvbq5VZd0VjfXTMsn+ztZvu/7tQ/GzWNW/4SLUbDwveQski+VdXEMW5d38Sxs1eBeM9D1LxV4kgbUryS4trOJVt45v4m/3a5rU49fePXoYWUtIns+vftYfG/8Aao8Zab4Amv7qz8P28X2eK3ht12WNr/0zX7qs395q9T+PH7V/iT4Z+BbX4Cfs06xNpkaxQrex28Stc3ky/wCsmnn/ALv+zXgvge11D4feH4vCvh7y01LUtz3V591o4/4VovNDTTbP+xNH3TXV1K0t/fbtzM275VVqa5doy9fM0+q81W3Kc98RPEHxO+IHh2X4XaU9xHp91debr1xZu32nWJm/56t95o1+7tr0H4U/AfwN+zX4a0bx5+0Civb3l15uk+G43XzLpY/m+b+7H/tV3/h3xh4V/Zt+C8/ja80HRbTV1WP7Leax8z3E38McC/xf7VfFXxe+J3xR+P8A4uk8f/EjxldaxeTK0Vv/AMsoo4/+ecca/Kq1sqsFpCI44OrWn/dieu/Hb9u74kfFTXNZv9S1jTRbSW7W+jeF9Li2abp8O75dyr/rG21826hq2q+JtS+3+M9Vjd1+XzFT5Y1/uqv8K1u6L8O9Vb/Q0tlQSfNt2bdtaVj8EdVa68l0Z/n+6q0pVoSleTO2GVy25R/gzQ5ry1TUvD3i23dFXakPlfxV6p8J5viXBrEF54P1JtL1K1iZPOsZWXzG/hZv7tP+CP7Ob3l5aXOpWEkcLSruXft+X/dr9FfgD+yn4G/4R2K5vbONE2b/AN4iqzf7Tf7NeNi8RQcowZ6uDyOpKLkeAfCv4O+KviN+5+IUP9pX/wB77ZNLvdmb7y/7tfYHwp+BNnb+GYba5T7yfJHs+VlX5fu/3a6/4f8Awj0Twz4ia80rSo1ibb8qrXunh2x0m3+z6VNo8Jjj+XcsW1trf7VcVSpGU79D06eUQoxPmfxZ8Bb/AFazazttE+Vf4lT5f++a8g8ffCubwixe80yaVWl2I0cFfpBP4Y0ewgFzCAu5e1ea/GL4F+HviJor2cNv5LK7O3lv8zf8CrGpGMpB/Z/NGXKflj468B39ncfbNNtmeLeqt5zfNt3V9KfsE2/wv1jxNFZ+LfFrWbzbV+ztFv2rVX9oT4G3Pgu6EMMMjKu5/lTctaf7DdroM3xOsdH1m2hilupVW3kaL5pv9n/ZruwVb3vZ3PkMywfs+Zo+if2lLPxV8DdJj+IXwcmmitI4JEvNNhfZHeRt95vmr8qf+CvGg+FfjBryfF3RPD1vomqrp0fn2tvFt+1L/e3f3q/bv9qf9m7V/G/wb8iC8UfYV8+KaNs7o9v3Wr8av+ClHgebSfhfqXid9sU1jdLFLHMnzMv+zX0sYzjKMo6RPm8NU5a/LL4j8zVjdt6O7M+7a+6qklvMYTCXx5f92ta4ZJG/hDt/DUE0aLJwinb99q9mnKHIezUj7hzlxZ+Xu2Jjb/Ft+9WJfWrs+xONtdlcQ+c2zYuW/vf3ax9S03y8v5PLfwrW1OWvMjllT7HH38e19rj+CqfkeYT8nH96uhvNJSSQfuV+Ws64i2syZ+St4ynI5pR5TJj2LtTvTo4/LX/aX+JaszQ/x7NrbvvU37Om75+q10c5lGJLbq/y73bc1fY//BMbaNF8YKvQXNkB/wB8zV8bQt82w8/JX2L/AMEv33aN4xX+7c2P/oM9fpPhD/yX2E9Kn/puZ9Pwhb/WClb+9/6SzxD493Aj+P3i9C/3vEV1/wCjGrF09naQINwMjfeWtH9oVGPx+8YTRnD/APCSXSgev7w1kafN5TK7v91K+Lz6PNnmK/6+T/8ASmeNj/8Afqv+KX5s6bT1haQp823+Kt+1kSONETajb/kZfvVy2nq82x/OZT97atbFjdPGzP1Lf+PV5HwnD9g6Ozut032VIWwv+z81aEM0LQvMiYP3dsnytWTZt5cio+7fH99av28kPyud25n3f71HxESNC1j8tV3ncWTdu/8AiqWRZvLdJXw33kqKO587emViZXX93t/honk/dGFH3Ns+dmetYx+0Z/EUbyJ1489lVk2/L/DWZdKnlSwujP8AJ8jbttad5saQu+7d/wAtdtUrqH5vkTKf3v4qv7IRlyyMeSRPMZJht/hqtLD8xCblX+LdWlcQ7mCJHtZaoTske95tq/N/31WMoyN6ZWX5mWHzNoX7lMmCRt5Lv833ljalVngLJ5e4b/u1FJJuY5T+L5maolTOyjU9n7xnXkj/ADb/AJfk3fcqqZLZmKPuP8NT3gPlnejZZ93zNVC4k6h//wBqo5fsnpxxR1t0s32hdibW3/PVObZHJG8yMx3fw1p31vMq733FlfaG21VuC7N8iM25Pk2vXzvtI/Cfq2Hw3LHUrR72kd3Rfm+5tqeGONYx5xwP9+o2VI8og2v/AAL/AHafbzP52/5nf7vy1lKUz0qdGMTSs98kgR4cf7X+zV+OFJFHmdPuturNhvI42Ub95b5fuVfhk8uTe+512fw1yS5+fmMsVR5ocpfhaSWEbE2hU/v1fjaGGPztjbf49397+7WP9pcf6S7/APfNTWtwm37TsyzP8ys23/gVddKMn8R8Rjqfs5SubMciK3yR/N/B8n3q1LW4e4jjhdJP3nzq2z5a56yukb/l5y2/atb1nM8McTzJ91tybq6eY8LlNWG83xqjv833XWN60LVYW274G8qNNzqz/wDfNY1jJ5jMlsiru+/JHVqOaRm86Z2Qqv8Ayz+bd/stXVRqSlHlM5Rj8Rfkmf5P3LbG+Z/nqOa+gZShhVWbdvbNU5tSmZfJmdRt+Xb/APE1C3zNs87/ALZtXTzGUo83vE8cjxsI9mE2fLTFZFkG/n+Fv7tV1mjm3XO1vufd+792kW+2qsyIy/xbZPu/NSqVOUiNPlNSxjtrj53fYW+by2qzGvnbhtVdr/Nu/irLhuoVkRPP3qqfM2/+L+Kr6yQzRo8j4Mfzf7VeXiKkpyR10aMYxNWys5p5k2DYnzfdT+KtfT7cxr99mEf9771Zul6gjZmmmaTc3y7n2t92r0OpQ7ljR1H99fvMv/Aq8upze15T1adSnGlozUt4/Lbzkl3M33/3vy028muYV8mD5k3Lv/iZqitby5bcmxhJNEy/Kn8NL5jySIHTZIq7Umaop0+Wr7x1+0pypE1pI67blEjzu+9/Cq1q2do91JjfvZn/AIv4VqlYr9skRFRokX5vLX5d1dDZw2ki/c/e/Lv8t9rV6eHw8fiieXiMVy+6vhGWel+djZD5e2X+H/lpVqHSfLkZJpmUL83l7Pu1fs9P+0Rl5hwvzbf7tX7WxxMk1tbZjkf72/7terRw549TERMr+ybmeEO4+99yt3TdJaG3jm+xyeVv2o0bbq0tN0dIVeF33orfe3bttb3h3wy9vDjZuDS/e37a7Y4ePVHDUrS5vdM/S/Du3DpMzpJLtX/Z/wB6ty10FZIVtnfy93+qb+81dFpvhm2jt4dj7/4nWtjTdF87L7IQ/wDyy2/N5dbSokrESi+U4G+8LpD5UyWuDu/1i1i6v4ZdrgwmHzV/iZkr1abQdtw6OnmFvv7m+WsjXvDfl3Ucy/IzL93+9RGjynYsV2PJbzQX8xvs1tC6Rqu5W3bo2rn77SXjtTNNbMu1/u/e3fNXst54deO3/c221vm83/pov+1XMXnhuFoTNs2ltzPHtp+yidtPEc0eU8t1rTYZpNidF3bmX+Ff4a4zxFpvkys78q3y/wC7XrWqeHbPyXfY0P8AdVkrhPFWmpGux32eS+2Ld/Eq1nKMTf2zPN9at90Z2bfvbtzLXM3E3+lCHZs2t8y766zxRDsk3wzyB/4P7tcTqy/uzIXXK/M7VzezFKpAat5FZtvSH54327l+bdTl1SZXaOZ/m/g3P95axmvodzJ53H/fPy1A2rJc3Hyfw/fas+XmOaVTlOjhv4fMVE+7/E1TQ6hNtZPlTd9/5q5i21xI92/c7b/4asrqUca/675GT52+9uWseWXNzEe05Ycp0P25I41tndgdm7ctPbUPld3nZlX52/hrnv7aST7j4f70Xy1F/bDyL9/e7fM+2qjTjLY5vbe4dZHqUNvb/aXdpEkdflVK0YdQeb5E8wbV+Zo/urXE2usIoEKPiL7y7v71XrPVHW5SF33pJ8rLv2100aPL8JwVq3N8J6DpmpfKHeRiV/2vu10VjqUO6H5/kb+GvP8ARryHy97vHvV/vb61rHWE875Jm3N8y7q9alGMYnmVJHcprE0cT73V12bYtv3t26tFdWhtYX2XMcrr9zan8W6uGi14WapYJu+X5t396rMeseWqJG+4M235n3NurrPMlGXNzHp2n+IHvMP5ytt++zfL/wCO1rWuvW3ls8c2GZtzq275WrzLS/ESSAOm7zV+X7v3a2LHXEkZN/Lxvv8AmespS+0a0T0yHXJmh8yHcNz7drfNu+WtK18Rw7j5k2122713/e/2ttedaTrjlgkMzKrf3m+7V2bXEhkd9m7bFt3N/FXFUqcvxHp06J3c3iTzGCJNJEy/61Wb5Vasq68RXNrI0M253/3vl21yS+JN0aJGkm2P/wBBqG41aZo2Tzo9iru2s/zMteXWrRiz06OHlI6S58UPt2fKfM+4v+zWLqHirdbyokKj5/lZvmauduNWma33u6na7M3zfw/3ao3F8F+dLlkVvvRtXDLEc0viPQjh5RldGjeapNcR+dC+0SPt3fe+7XPatqyW9vJM9s2z7ryR/eb/AHf9moWvJo7hntpsIu75Y/utWFrl5NIqQzOyDd8m1/8Ax2s+b+U3jR+1Ipa9rEzffm3/ADbd1cpqWrPLHLsRl/8AHt1a2rSPcfOiYCvXPX3nW+XeXfu+Zo1raMuaVmYex5feMrULj7VGEd2dv7tZsOm3EjPsdW/2v7v+zWncbJpMbNu35vvfep9rbu0Ox0Yru+8qfeq/acsAjT98y4dLdpD2f/Zra0HSpmkZPmc/dqW1052ydijd/eWtvT9Ptlii2Q/7O5qKdaHOaVMOb/hez3NsRGRliVdv96vRNBt3t7XZvjCQ/MrRp91q4zwzp6Mo8yRt7ffk3/xV3Ogwo21E+4vy7lf5t1ejRkeZWp8sDtvDawtGkP29V3fPuZK7DR1NncQon3WT+FK4zQXht49gfcuz7y/xba6izuiY1+eRFb5ty/w/8Br0KcpcpwVKZ22kXyBURPLDN/D/ABV0mj6lA8Y2PIjfw/Pt3V5/pGrQTW4m2fJ83zNF826tvS9WSNUtYUZvL+4y/wAK12wjzHnVNj0W1vHtWzH+6E3y7l/hantq3kLLcw3MKvH8v+181crb6wkioUuZGDfws392pV1xPM+e5XbJ821q7Y/AcHL73umjq14j25R9v/Af4q5TWLqG2Evzq/mfN5dXLq9+0K009zkLuZmZ/l//AGa5jXNWRpPO2xoPK/iXc26spbGsYyMrUJ3ib59qjd8sa7vu1g6jJ/f8v5mZ5Vq/qF9ukWFJmf5/vb/u/wC9WPcfZlXfNI0Uyvt8xW+9XFU/vHZGU+XQyNQkkmZZrx2wsX3o2/1bVzusalDJHveZvlTZLuRl+atfWr547zZczK+35fMjb5V/u7q5DxJrDx24hL+aVRv3LP8Ad3Vyeh00utyvPKjRyeQ+Nv3Gb7u6sDVr6FpCJpmRm+VFX5lam3msbm2b5HX7zsq7ttZ91qEM87Rl4xt/irzcRKNP4juo8nwiRzTSMyJB5qKvz7vl+Wp5QmwP8qfxbV+7WbHcI8jb3UK38P8AtVpWrJNMEd9+2LbtV/lX/arzanve8ehGMRNqSfuYfmfZVL940Zd4413P8u1dzbaszSQsk32aRnSN/wCH5d1QNHDGxzcL/Ez7fvVnT68x0R94bH5LQna+9F/vL96kjtnWQedDIis27dJ/DVnTbVGZs/L/AL1TxokNx8j4WT7+7/lo1axlLm0NuXmhqTWipDYv5KM6K2591TWcabtjvsVl3J5a/wDjtR28e6HZ1Zf4d/y1citYWVIX+RVT5tv3qPacoRpyY638ldPjtkdS33vLkb5ttTx/vLxIUs8Nt2+Yrfw0yzW2tvnmRVZf+eifeVqsx+Yqw/Zk2+Z8vnVEZfFYrl+0QTNDHux5m/7v+y1el+HUB+FuxYgAbCfCYx/frzq4t/MYeSGD/e+V/lb/AGq9I8PxhPhlsxtH2CbjrjO6v2vwOd86zD/sFqf+lQPe4a5vrdb/AAP80eWR2vlxpZzP/H/F8u2s3WIdrfOi5/8AZa1mb5lm/eNKv3tq7lZf9qsy+t5Y5d/zMi/L93atfhk583MmeRRjGPunL6lp7x7k+VfMbbub5ttZs1ukzHY+1f8Ano33d1dHq1nHH1T/AHKz5rWHzGjtoWXd/Cv96uf2sY6nZHDykYclrtVke2Xa33938VUbousghd9jN9xa2ZmtlkXEPmfwsv8AEv8AvVj6lLbRx74XYFfmT+KroztU+EmVHliZOpLDIz/ufnXn5f4a57UXdZHQn5V+bbW9qU3y70K7m+bcv/s1c9q9wNr84Zk+Vm+7Xq4fY4K1M5+88xpN7purJuGZdybPvfxN/DWlqCOqPMj71X+Hf/FWZdM8e55h95K9Sn72h49Sn7xXX95cDL42/L/wGtXSYx5gOxvl+XbWbD500ibEUsv39tdFo9pj6Sf7FdPL7gUYzlI3NJsfM5+6yt86766rQ9NS4w/k5VWxtasXR7GGVfn/AIm2o1dloenooZ0C72XbXJU7M9mjTlze8WtLsEkZ9k0Y+fau7+9XUabo728afJ95Nybl3baqeHbcQqj3McbBfl3NXV2FmkjPJDcq4Xb96uOpGXxHq06dKUYpEOk6X5ylILZstF95f71emfAXwimreLre21W5YLuXfcfd2rXKabZfZ7h3jmZG+4vyfLur3b9kP4U63488VWc3h7TY79VlXzY2bb827/x6lHlcRY6MY4WTP1u/ZK+FOm/DP4Z6LDbbrq81KwVrJZF+6rfM25q1vGEj+OvHiTWFst5pfhtdlrb26/Leag38Un+zHXGfAP41eJ9e8SS/DeGzaKbSdNaJmX/l3Xbtbb/tNX0V8K/CPhfS/DMNhYRR+YsrS3En3mZvvM1cMv3kryPzapzJyPOND/Zr1KGFdV8SXLXVxskuNXmb/VLIzfLHHu/hWvnX9rjwb4vvvM+HulJbvaK6tcafp6N5cO5vl86Rf9Yzf3fu19SftCfFC+1vTIPA/gIXn2mSfY5tU+VV+7ub+81eaftlfEzwv+zP8K7fTdHS1PiprJVih3+Z9jkZW3TMv8Un93+7UuVKNKXY68LRk6sbbn5i/Gj4av4T1ObR9YS3udXbd5sMO1fssf8AtKvyq3+zXg+gfC3VY9Yge53QrJP/AKU0nzMq/wCzX0BY69rGrLI+sbUnvLhpJ2kXczbv9qsK6h+2X0eiWz7ZvN3TyMn3f92vmamKjf4T7/C5VOlh+aRw3iLwe+patPqttZRwQw7Yk/vNHt+Zqy4/Elh4Z1LzrnSrd0t/me3k+Xc235a9Pvo7DQfDGuGaFmnjt22M38Tf3Vrx3VtB8YeMNHm1u20qOB5k/wBW0u5qKdT23vIwo4flZ4r8ZvE/xI+Nnj648YeJ5ldY38rTbVflgs41/hjX+Hd/E1ZOk+DfEcbxubbaFf8AiT5a9T0P4J+P7qF5byzjTyZdreZL91q6qx/Z0+JbWsVzYaUt5u3b1t7jdt2/w12VqyjGNmdeFwspu7OS8A+A9Y3R6k9hJcS7/kWNl+b/AHq7nxNZ2ek2qasmiTQyKu6Xcn3f+BVZ8N+EfiLoOqeTdeErqFI0/wBX5W7/AL5rtvFmueHpPC+zXka3favm29wu3buryK9aPMe9Rw0eXmRjfDvx/olnshunVE3q21v/AGWvvb9lXXE8SWdto9nIyJIqq8lxtb5f4a+Crz4d+Etb0m21XR7xVbfuT7P91v8Adr65/Yt16aOxSO1n3vHt/wBcm1v92uLESheMkejh4ycHBo+0ZdP8K+FbX+1tYufnX5XZV3eZVvwT4i0r4heJjo+m20zJDtVty7f92l15/wC3PBdnc6rNa/uUVpdrbWZqT4O3GiaP4gj1j+1rWPajOkbS/wANdcKlKP8AhPPrRnGlJxjqe4W/wlbULBJgm0bflWue8Y/DK+0O3juAjKD8r7a73wF4+s9ejCJqNuyBtqqtanjHyrq0XO1l/ir2ZYfAV8LzwPi6ea5nhsdyTPi79pb4dvq2gyv9m+aGJmST/wCKr5n+Bf8AYnh/4sWb6x+7aO82xTL/AAtur9AvGXhWw8WCWxvIcbd33fvV4P4T/ZB0q18YX0NzDcT2011vguNu3y23fKteVhYxjV0OnPeWpSjM+x5YLZvhqNE169WeK5sNsdwv3WXbX4x/8FY/hzo+tf8ACTaDePNHpWl6XNcJJGzfvLr70Kt/s1+vXg+K6+Hvga58H+LTJdRQt5dq6/Mvl7a/O7/gsN8LZ7z4J+J/EPhW5ke3jtfPfy23P975vlr6aMvejA+AnKPt7n4GrcGTyxc7fOZdsrf7VM2J5flvt37/AOGtW+0/7LuhdMP8zfd+bduqs2mtIV2bhEzfPuT5q9eMoQ91nurmqQMy48mdtnl7f93+Gsy6skWOR0RifvJ89b0lqnmL5e1f9plqrdWqFmR3VQ3/AC02U4ygOVPl3ObmtfOV1dFx/svWVqGmw2+R975/4v7tdXNpvkwtDs+Zl+8q1mXGmzLG2+Fm+fbW1OpKWpyVI/ZOXuLeCNy/k/xfdqpcRw7nfZW9dafubY7/AHfl21m3MCRs2/cq11xlzHNyozWj+benT+OvsL/gl1uOi+MmOMG5sduP92evkSZE3b0Rifu7a+uv+CXAxo3jMbcf6VY/+gz1+n+EOvH+F9Kn/puZ9Fwj/wAj+l/29/6SzwP9oyZ/+GgvGK7sgeIrvj/tqaxNNuPL2JN0krZ/aOwfj94yOSCviW6/9GGub0+R2XYzt/srXxmff8jzFf8AXyf/AKUzyMf/AL9V/wAUvzZ1Oj3SNuRHw38DfxVu29w6yLDs27vv1yelzG3l3p81b1nN5jK78bvvN/drxvfOTl5pnSWl4nl7HT5l+V/n+8taVjcfJsSFflf+KsG3mh2+WduWb5P9qtXS5nkYxv5itu+9VRlH4jGUZmxbt+73zR/IvyrJ/FUMlxM0jOkfH3tzJ95qYskm9Rsbfs/75pZLh/nhO5F3fw/N81aR96Bj8I6WaaSMom7aq/eVP/QqryxI20P/AMCqeN5mZk+Vxt3eX/E1OjXd8kPyVUZcxPL9ozJraG3XeifKz/w/xVnXUMO95Htox/drcvLdFtVfYo3LuTbWXdL8q7AzL96plI1jyGJcQozF0P8A31WddSeYp4kEMbrvb+9W7dW7qGfO0N83+zWPdW/mK/f/AHanm5jojLlMy8m3/P5mTsrIuroN877Xdf7v92tPUI0jU+TuG35X+Tbt/wBmsS+XbJshfaf71Ryx5jXmker3lr9nk3pueJfl3VmzW6TM2zaF/grcuLdFHnHcR8qbW/hqo1rDJH/CN391a+PlL7R/QcY83wmTHYuzLM77gv36lW1SSRnhhZSvzfLVo2b+cSnzfd+an+RNC0SeSzOzsrt/DWHtOY648kYFdWeFUR9uG/8AHqkt5nikOxNoX+9822nL5xjy6Kf8/epm5GzD/H/epx973ZHBipR5fdJ4bi5aP9zNHs2Ns/vNVuHZJbNbI7P/ABf7W2qEbfZ2xN827/x2prW+e0be78L/AMtP7td0fhsj4fMOXn9407WSGFfJRGb+H7ta+nq8kHzpn+78+2se1mfcr71/3lq5b6hN5j7JsCRNqMq/LWrj7uh4EvdmbemyJ8qbFRt3zMv8VTLNNJHI/nLuX+L/AJ6L/s1nW/7uz++2/ZuRtv3quBPLg8ya5/g+ZWStIS5dSOX7I+GRI4XSGNgmz7rPVeS4+ZPOYsd/yU6Tfuf7M6xFv4WqnL50kjuky7fK2vC1dPP9oxkWbjUElk8l32t/s/dWoFvILyQJsYfNtX978tVmbbI6Kioqr8zN/FTI7xN2+Ty97L8jfdWueUuccTY+1Jb/ALlJF/dv8isv3mq/p9x84+eNh/sp8y1z6373EiI6Kr7fmaN6sW00KyK6bi2zduauOfNy+Z0x930Or02b95++mX5f71acepTKzBEXfs3Purn9LbanD7y339yfdrUhZP8Alt137t1cXNzTvI6YxlGBuWt8hhUIJF3fL5i/w/7tWVmSaNfJRti/Ju8373+1WXayJH8+xTtXbEy7mbbV2Jk2Lvs2H8KfPt3V1UafNL3iKlSMYmxpLNMqTecrPHtX5vvfLXXaDbpMrv5Ledv+Xb/dauP01vmVEhVNz/Kyp8tdt4fXzpPIRGR2Vd/l17mFp+6eFiqkoysdDo+mvIzb3b5flZl+7XQ6TpO1o5tmxGXd/stVHQbXdbxecmGh/wCeb10liqM4f92rr8u1Xr1KcYx+E4alTlLuk6D8rPDtQSfNu2LXS6X4f+yyKfJWUtF8q7/u03w3Y+XGyPDH8q/LG33mrqNL02e4bzo4VRVXay10RjGJyc5FpOkww42Qq+77m193zfxVvQ+H08xPJtlZ9m6Jn+XbV7S4YYlR0h3SL/dX5m/vVqRqyjyfJkD+V/q2Sq5UHtOU5280VPtD749n975K5/XtP8m8EM3y+d823+9XeXGzy1eZGd9jb/7tcxrlrbQyhNjFNu7c33aj7ZpzcxyOqWaQ7vOudqeVv/2v92uW1eOGa4a88r5FT91I33v+BV2Wp2brIiJzDI/zyKtcv4gjdZzs3bpPlZm+78tLlluddOfLM4XXo0mh+SHarJ/F8u6vM/G0dtaxvNawqV835tteo641tcb3vHbEbN833a8o8YL/AKU6u6tu/h+6tTLk5Top1JnmnjCOb7P+7mjHz7vl+9trhtek3bofP2oqbv8Aers/FVwlx5mx/nVG3bfmrzHxRdP5hQPzXJyzqG0sRFR94yNU1b7+z7y/L8tZNxq0c0b3Jm2Nv27t1VtavX3702/7y1h3GpPu2O/3f71Vy/ynDWxHtInW22uBdohfa6/Nu/hqZta3N+7fb/7NXFQas67k3/K38TVet9Q8xvlm2bfm+ap9nAx9tI6ttY2YdH3Fko+2Jw6df49rVzK6kYlb+PbUi3ySS/I7f3qIx5ZGUqkpe6dPb3m2RpoX/eL8rbv4a0bfUtzJdO67l/4FXI2eqQtHu3/8C/2qu2epTSOux9n+y3y7q2jH3zM73T9akVkm+8v93bWo2tedHvFyzL937u3a1cTY6g4VUmf52+atG31abDJu2p/erqicnxHZ2/iF5sbPLYqvzM33VqVfESBm2ffX76/3f9quTh1CaPbDH/wGRfu1ajZPl+zTbG/iX+8ta+hh7M7bS9YmkiWGafcjLt2r8rf71bVnrT7kh3yP/F/eX/ZrhrGbzFV3vG37Nrs33VrbsY7xIUe2m83c6szMv8NcdSpynVRpx6Hc2erQx2/2lOX/AIlX5ttWl1yaSETfM/z7f97dXLWN1cw/8eyfeTczL97/AL5rQtdShjhDo+2H+Nm/vV5WIqcsbnsYWjzStymxeeIEjbyYd22Ph2/iqncaw9rHvebCyP8Adb+9WU155032lHXZuZdv96oWZJLN7ab5BG3ybf71ePWxHNE97D4X3jT/ALSe8z9pj2+X8sSr91v96oG1SaTb5235f73/ACz/APiqprdO0QdEXYr/AL1f9mmTXfmMsMPzLs+RtlcVOod0aMfsj5Lp7iGWZ3b5k2/LWFeN5jeT80W7+983zVozXFyI2tt6hG+4zfxVkXjfudm9lZvl3f7Nb06ntNEYVKMY7mdqW+H9z23t8rP91ayLqO5uPkSFmRf4f4mrQuriFoU3fvfn2/NVW4vljk8mGHyjt2tu/vV2c0oxujk5feM37DuZkRMNt2/MlW7PiH/x35aYs26Yp8su5fuq3zLUsLPb3C/Pt2/cqJT5dB06ZetVh3ryq/71aGn/ALuVd/3VrJ3PJIyTJ93/AMdrY0G8Ty/JfzpRHu3tIn3qIRlH3jWUfaaHUaDcJ5ao6MzK27cv3a6zQbyQTNInzO33F2/LXD6bcJGyI74Vfm/2q6Wx1J9ypvkEvy7GjZfu162Hqe8eVWp+7ynfaPJbfZ2dHZ9z7Uh83asf+1W7pupTRstz529lT7rfxVw2lal5kZmd2V12/u2Td/wGtnT7ry1KJMu35v8AZr0adQ8ypR5TsLXVPMs/9c0e19rrIv8AF/s1r2uqXkEYvLOZndfllVotqrXFQ6nthE/ys0fysrPu2/71Os9amaYyC55+VWZq9GnLuedWp8x6FD4lhgaJIXYFlZvlT71Nj8T/AGj9yjr+8f8Ah/iWuGbxI9q7wWz/AC/xs3/oK1HJ4shUqnnMiL9zbXbHklsefKMIzO3m1yFrf7TC7AfMPJ/2qwdY1wrCu/gs3+sV/mrm5vFO3c9n8vz7XaSX5VrMk8UwtDKlzMvnK21l/haoqR5dCffl7xs3esJIX2bnC7d395v9qsbWdYe32b3XzP8Alrtf5dtYN14qgt3e2S5jV2/h/irBvvEk1xGzb1VY1+dt1cdSPNLQ6acvdNnWvEENnHNM8ykxv/C33q4fxDrk0k0ib/l37kkb5t26q+teKkkDvNt3t8zMtcnrHiItN5KBdn3k+b5lrgqfynZHlkbtxrVtZw7/ALTvZv8Almv/ALLWVeat5jApt+Z6xJtS3SI/nZX+61Vprh3kZ4Xj/eN97dXBUj7T4jqjI6i3u8Ks29Szf3flq3a6lJH8+/Yfvbv7y1ydvqEyxKnk7vn+T5qnk1Sa2yjzbdq7vm+9urilT5Ye6ehTqR925uyaqk0b/eZmfa275flqS31NGXzkto/m/hX/ANmrn21iZtuyZU8z5k3U+HWHaSN0Ta6/e3PtVq55VJcvwndTlyy1OvtdQmkX54VTc/zMv3q0oLy2jkV0Tdt/4FXK2OuQ+YryXK7Gf/V7Pmq6usYjeTYqqv3W/vVEeaR1HQxyJ5kSIjH5fut/dq/tcXSbPubPu7/mVv71YdnqXmbd7rhU+dmq/Y6pDcXH2YoyMy/I237vy0hx97c1Ifs3lt5jxjci7Fk+arUMHkqIUdX2oyuuz7v+7WRbt5eyH5Svyt8y7q1GZII9jo21fmb5/l/2adT3fhJjT5viGyabCuy5+YfPu2s9ej6AGT4akO4bFlP8wHXl686mmS6YpDuRFT978/3a9G8PRmP4bCNn2EWU3zD+H738q/a/A6bedZhf/oFqf+lQPo+HKThiattuR/mjzMxww2ryeTMrN/DUN1H50ex3b5fl3L8ys1XZLia8t4kTcwVdvmN8u3/aqhdM/l+RDuVF+ZGV/wDvqv59qVpVDmo4eFMxL63eRv30zBI/l2r81ZV3G8dw/G7y/wDVbf8A2auhmjSGR7v767dvy/xVg6pbTNDvcKCzLuX/AGv4amNSPNytnpU8PzR+E57ULiVpG+RYgrsrN/s1iXk3kuFd1y38X8NdBqNu+2RCnz7sfN/DXO6pGjcPtTb8u7+9XfRlEKmEjuYmsXDyKbeHais/zsz/AHqxNQ3sp3uuV+Xd/DWtqkfyKUm4VtqN/drFvmi2jf8AM/3Xb+GvUo+8eTisHLm90x71tqt5Nt8i/K7R1m3ioU/2v4a1Zmdd+zbsZfu1lzInmb3/AIW+7XqU4njVcHyyE02HazeTtb5/u11Gg27tlHeRtqfJ8n3VrG0+F4nDui/c+bbXU+H7fdGzvMxGz+5XVGXvExwvvm7o1v5Koj/OW+589dvoduk0LPs27fvqtc34ft5gqOm0bf4tldvodq7bOF3f7X/oVRWl7vMethaMZGrpdhbeXGnaSLdtZPmWun0nS5oWbeiqJk/1n/stZWiqjXCQ2z/Oy7Xb73y11Oh2VzGzpM7OVddjL/drhlzSPSw+Hjct2tn5cn2beu3dt8tX+Xd/vV9A/sg+IpPBPjCwv4YftU32jbFH91Y/9qvErK1s7gpNbQ5Lf3v/AEKvU/2fY9SXxdbPYbXXzVXc0TbVbctZVPdIzTD82Dkj9UPhb4f8K+A/BOr+P9Kud2qas8lxdXDRbVVW/hX+81dp4W8Za94f8Kvq/wBsbdJaqkULL821l+Zq8+0rxtrGj+C49N8SQ27XF49vE7bf3axt97av92qnib4gWzalcaPol/G6R3HleTC3zR/LXm1KnN7p+Zxp/vfeK/iD46P4D1aPUtEg8/WFuvN+2TS/u7eNV/55/wATV8RftLftA+IfiVql/wCNpr+adpNSkuPMkb/Wfw/NXrfxz1y8s7/WrmbaIbO1ZUXzf9Y235trV8m/EC8/taOysIYmjT/WxKv3dteZi4Q155H0WT041KsTJh1vxPfTPcpcyFW+Z1Z/u7v4Vrfsbi5Wa33wrujRv3itVLw/Y+Zbx22z5P8AZT5lrtPA/gua41KG2s7NZY2fc3mfe/4DXzlTEUuX3T9Fp058nvGF4ivtV1ZTYWGlSNbzNueSH5mrn1+Cfxa8SWKXL6lHo9lv+S6vFZfOX+Kvqe88E/C74b+BZviR8QtTjsbGxTfLH95rhv4Y468d8YfGLxb8WNNi8W63pVn4c8G287JYfal23N5H/u1vl+IpRjKLPPxWFlTfMtDw/wAVfDHTdDhTSrP4/XE100StKrRMqszN/wCg/wC1UvgnwX480WZI/DHxRsZEkb5FuL1o2Zv91mrK8dfED4Pw3kzw6VCgVtssi3DeZIv/ALLWDfeOPhXqlu6aR5ltNIny/vd22niPZyj7pphZOnLnke9Wev8AxR8M3STeJPDbXMcLqzSWvzbl/ibdW/q2peG/iF4Ru5nsLO4h3Ku26i2yx/8AfVeM/CX49X+k3iaVN4umu2hiVU+1bV/75r0Tw78YvB+rR3OiaxZQzxzS7kuF+Vl/vV5EuenP3T6KjUo1qRt+H/hn4Y/sWGawS4tfLf8AdLDtkRmr0j4f6TqXw71C21Kw1u4htm2rtWL5mZmrmNB0XwfcWkL+FdSurdJG/wBSs+75v4vl/u133ibxI+j+HdNsrnxDH5TXmyJVt/m+7/eqJS5pe8VGPs5H0R4T8RaDqnh1LbU7m6kuIVVYlkl/9Crs/h7oqX2oRzL9lCTPuX5l3Kv+1Xgnw50PStWtFv57+a4juIt3+tZdzV7f+zvofh24m87zt6Lu3NcS/M1VCM5T5UKtyRpSPpTwCulWumo14kburfu/n21va9rN3b2EjWc21JP+en8NcXpzeDZzHZwTW6vH8rLHPUXiH+1LGxl/sLU/MZdxSO4fcrf7Ne1KoqMOU+Lng4V8Xzv8SbS9VFzqkqxPld+167f4aWNtqGp3KXNvG6KnyqrfxV5HpWpXKyGa8mjhmX5pVr0v4ReJHW7CfKyzN8zLXHg8VGGIi5bcw8/wMlg3ym3400NLXSp7ZBny923d/Etfmz/wVA+I2n/C34d6lZ+IZpmsdeiazg8n5v8AWfLu/wCA/er9O/ia00OiPc20G9/KZdq/3dtfih/wWy+MP9vXkHwom01ZraGy3/ao22yLN5n3f++a+wVH967S0PzKNP2mIUT8tPE3h1NF1ibR7O586GJ9qXUn3pFrNm092KwyP89dVfaLtuij3O/y/m/vMv8As1VTS4VhV9nyr8u5V/hrr9ty+6fVUaMYwOWmtfL/ANGSFtzVRl0lNjJ8qjf8q118mj7ZGMyMyN8u7Z92qs2kl5CnkqqR/KlEanu2iP2PMchcQzQ/uUdSG+4rLVC8sX5eNNy/3WrrbrS3+1fP8rbN3l7Pu1l6lp8Z+4ny/eaumnU+E5JYeMeaTOJ1axRN2+FW/wBqsK4s8A7U+b/arttYsbYLsfbvZG3LXMahawqzp8wVf4v71dlORw1Iw6mDffd2Oiqf4WWvrP8A4Jehho3jLOcfabHbn/dnr5Ru49y79n3m27v4q+sP+CYMaxaT4zjXtdWX/oM9fqng+78f4T0qf+m5nt8Ke7n9Jf4v/SWfPn7SEu39oDxlC/3f+Eju2/8AIhrk7dkWVPn+9XW/tGr/AMX/APGZ+U/8VHdcf9tGrj7WRFk8z5T/AHK+OzyX/C5iv+vk/wD0pnjY6P8AttX/ABS/NnQafJuby3/hX7396tfTpN2Pk3L/ALX3a5eGTlXP3d+7733q2tNm8sId/wAyvury/fOXlOmt7j5n/fMh+9WrY3TsvnI6qW/hb+9XLw3Dqzl3+992tnT7h1Yfdx91V2VEveM5ROnhk85Vm2fw7W21L8kkPyJn/gf3ayLe8RlZ8MqL99quxzIy7IX2bl3bquMebYwlEsq0fmb5vl3fKn8NTKyeSsKbt0f3t3zbqq28k3k75Pnf7u5fmp8Mjsw/fcbNv+9/tVfKjD+6Pnt8Rp++VdqNs3fd/wB2sq7jcR7Plx975m+9WjeXkyv/AK5cL95WrMvl3Tb97Z/8dVaZZn3TJIwRE+RU/wC+qzrqNI5vItk+9/47WrqEh8z53+X/AGU+asm6fd9//Wt/tfw1MTaJja0tyu1PlZN/97c1c9qW9fuD5mf/AJaV0GoO8at/C/3vmT5WrA1SHG15H/4DUSiax2Pari186RoU+9/tf3qg2/u40d/+BKlaq2e64OxGI2feanSeTEvzp87fKrbP4q+Fl7x/QGDqc0DFa3RXXenzSfN5ar96mfZUmUeSkn+xWpbxzI3+kpG3/jzbabfWPlvvSHaqpuT+Go5oxNvaMxWt03vC+5N3/j1QSQvtVf4FbanzVpXlnub+L/gNVGXb9+Flb7zV0KVzkxlSHIVZLjbuR5MMzbWoWYxt5P2fcNv3mf5ahulhhbek25arxuVkd0dXX727+7/s120o8stT4zMKnMbtlcedMm+ZlVU+6v3a0obx5Lcwo+z/AGv4q5zTpH2h+7VrWbPtP2bbu/jZmreEYx1Z8/KU+c6TSbiZpGT5U/hRm/8AQquXH+sfzkZDsVU/76rCTVtqi2mSN9vyrtq9HqFzND52xVH3fv1EZT5tRSlzaF+ZYcv8izM33tq/d21R+yQzLK6blX7qbX+9RNqDxw/u3yGfbtVvu02PyZ1Z0fay/dp+/wDEEteUGWPc0m9i6/eWRf4qpNHbNvuX+dd/y/PuqzfM6yLNJNv3fxLUK2/ys/kqC38LfLSjKQL3vhK0d9tVoXRVZX3fMtaFnfIwZ3hVf4fMX+KqsduJ2Uum7cv3d33amk8yFRGj8fe2rRKMZe6VHnOj024eRf3KeYzJ/e21prcfZ1PkupZv4f4VrmNPuts0saQswVdz7v4a2bPY0aB5m2x/fb+Kud0ff1Or2nuWOhs7uGE+ciMq/d+WtNLtJFSzhRnK/Mm2sOxWa4+REUo0W5Pl+Zfmrd0+WzjiCJtfb8y7f4a6adLlncxnJVIWia+myTKrTTJzvVV2r92uv8M3lsu/fufcm3c1clpW+OL/AFO91Tckjfdaun8NyfaNs0yNEq/wqv3q9zC0z57Ec3OegaDM9zCNjxptbdKq/dauq0G1tm3XO9pIm+bbsVVWuF0uZLVY4fIWNGXcit8zbv4a67RrqZpbe58za33mXf8Aeb/dr0adP7RwSkejeH4/O8p0f7ybUX+9XZabawsEtk+QfK21V+9XE+H7jy1T54ztbc6xt8q13nh9vtEf77yzKyqrtH8zL/8AE1rGPLEn/CdBo9n5Nr5Lopdmb7v3dtW5l248ubLKn8TfNVeGa8tYV+fP+z95d1WZCjKX8jY2z96zJ96o5oyKjEqagqR7kd1y23Zt+61cz4gjS0j2XLspaXcvy/dauj1DZHH5z+Yd3zRR7fu1jaozRw/ImWaLd83zbaxlLlNIxOV1hUuml/crCm75FZf8/NXH68sMgEqc/e2fNXXa1CGuDC7+YN3mbo/vfd+WuN8QSQwq5d+d7SfMm3d/wKsuc3jznn3jC68uGaZ9vlbN21fvLXk/jpvMjaG2RVRdzRN/Ev8AwKvVPFU3mJvtodu5PnVfutXl3jC3hjjbZbMsvzebub5WqObqzX4fhPKvFKeTI6JuzIm5F2/erzDxRCity+CvzOuz7tekeKP3kjoiMpX5X3NXm3iCP/Xec+7bu2L/ABUub+Uzqe8cF4imdY5dm37/AN5a5ua4/fbH53J81dF4hWZf+WON392uUvF+zsfm3v8A3a2jyyOWUeUtW93tYo6ZVa0bebdH8j7f/Zq52GTdJ5KDFaNvePGqoNu3b/FWkokRkasF4jbod7K3+zUkd47Mdj/ef7y/3aoQ3Wfvj71WM7vuO3y7ay/xBzSNKxutuzyvm+Xa6slalnIkapM6K53/AHawre5RWWXeq7vlT+9WnYs8f39vzP8AIy1UdiZRN7TbjyFMexfu/wATVpWO+SPjcu59y7v4ax9PWFtvnD73yu1a9nN9onXzplfd8v8Ad+7R7QI0zTt5PM+ffkL/AA/3qvWtr50/nPwsf8Tf+y1Rs4xt2JuRa07ePZIg6IrfLTlW0kyo0zW09Zljih/5Zt8u7+KtuwaaFETep2/3X/8AQqx7P95H8j5WP5XVavxzMg8lHX5trLuavNqVvtHdRw/MblvqEy3SzB13r827ftq39qRd3nP5r/wL/drFVnXZNGy/3fl+ZqveZNMpeEL8rqqM38S15OKqHvYOjJF9Lj5Yk2bVb5tzJ/DTbiTbIkKbmf5m2qvy1VhZ23o42Ps+Vd9NW6muFD79zsu568mpyyqns048seUluZpo23w+WnmIvzbvlaqlxff6O80M+/dx9yo9QYMv8OGX7yp/FWfcaj5sapZwNt/ij3fd/wBqnTlt5DlHlkTtqj/wJhVX7zJVeSRHs9iOzbU+XdUdzsWZEh2/7W56q3kkyqzzI2FX5mX5q6Y8vN7phKPNEgmk2ugmfL/88/4apX0cM0ju7thf++d1TeT9oVXhmb5fmRv4qrXVi8dwqbGK/eZa6pc3wnHKn7pRk7TO7JtfY9WIZJlVkMzMq/Lu/vVNeWLzLv2bAv8ACyUn2eaPYibfv/3flo5eaMSffiTWMKK3yPv+fc6ru+b/AHq0bVbZvkR2iVf4W/vf71Z/lzW7s8PG75dy/dq3ayIbcJ50gl+ZtzfdpKXKXGJtaTqDr883+sVPl/4FW9p1x5OxJpm3Sf3U+bbXKW801ufkm+ZV3Ju/irSt9QS4eCZEkRP41V/vV3U5cxzSox+I7bS9XfSZUSZFZZHZfm/hrZXW9yqyXK5k/wBn+GvOYfESMwSdGYq/7rav3a0ofGEzf6HNMpVfuts+bdXo0ZHnYinGUTvl162WfZbTSEyf7P3qjbWHjjd5h8iy7dq/3a4638SJNImybDLu3Nt+VaZN4mRV/fTfLub5d33mr0acjyalM7bUNYeaPZZ367PvbVrNm8Qf6QzunyR7d9csviHTWmVH3Kjfek3f+y1BJ4ieJtqFZWm3N8v93+HdXRGpy7SOaWH5jevPEkPmTfvt25/733VrNv8AxNczKUebZ/FE2z/0KuevPEXzMjxxmWP+6/3qwtQ8SIv7nfIYv7q0qlaPQy+r8pval4qfZvR1DL8zrs+asjVPEUfktbQzKzb/AJ1j/hrmNQ8SbY9jupVW2uzVlXWseWrQo+3/AHXrmlUlzGscPy7Gtq3iCWNfJedt395U/wDHaw7jUNzMiTKq7/kXfuqpNqTyYTf8y/wtVSaZ4/nwu1nrlrSlI19lylxb658sskisW+b/AHqkjmfzN/nR/L8ybvu1kLeOkbTed8v+zUU2qOzDY/LcNtrm+L3S/hOguNSmZVm+X+79+j+0pto+dUaT+FqwZL4zL8/y7futUkdxN5i/6tv9pvvNXNKX2TpjLlNuPUnkkV5E+RV+epmuvNY7IcbV/ibdurGjvEkUQvDz/vbamt7r5UT7399t9YyjynoU6kZG3DqFyjJ8qsi/N5n/ALLWvp+qPt2bMf3JK5iNfm3puI+8vzVbsL7azb5m+b7sa/w1zSjJHZGR2djqD7t6TfKq/MqtXQaPO6wi6d/vRbdzfxVxWj6nuZUd8MrfJ8m6ug03UvO3Q/ZlO197/wANYylM2p0+Y6K1u/Lj+R2xG3ybvmatGO8fzF+T5Gfb83/oVY9ncJfND88cUknysv8Atf71X4ZPMZIXRW+f5v8AerHm5TaEZy+Iv+ZNuTftfy/vN/Fur0zw65f4Y+YV2E2Mxwe3368sfEjRI/yv/ufLXqnhlU/4Vqixn5TZTY/8er9s8C23nuY3/wCgSp/6VTPqMij+/m/7r/NHnt5H+5itn+5Ii7v7tZ8kMyzSp5KhI22r/d21qSWb3CnenCvuTb/6DULbPOh3w/Kr7Zdz1/PkqnN9o1o4X+6Y95buWNtC+xmTzfl+6y1i6hIixu6bR91nWN/vNXRX1u7M+xNyK21v9paytYVIdsH2Zl2oyp5f3v8Adp0/dPVp0/7pyGuRXMzffkVY2+Zvu/N/7NXO6sfLb7NMjJt/irq9Us90n2l3ztXYism5WrmdS86Ni820p/drvo8vxGv1X3DmL3ZIzIgVl3bflrJuo+S/3Ntb2rQpG2P4W+Xy1rKns3DSfuWAVflr2cPLljc4K2D5YnO3lmlwz7Jtzbty/wC7VWOw/wBKeR/+AVrzWe5m/c/w/dp32fy4N8m3/YbZXfGpKMeU8epg483NIi0+zSZvkTCr9/d/FXS6OszRjeioW+VmWsaxhdFX523b/wC58tdFpMflrsRN27/b/hraNTkMJYXlOi8OxoNls+4r/Ht/irudFt/tHluiMzxoq7WX7v8Au1xuhyOrIyQqhrufDLPJInd2/vfKtVKXNE2o0Yxl7vU6fQbONVLwwySvHFuVY9q11ulRzeVbzIm3+J1b+GsHQVSSNIYfLZV3bpN38Vdbodm81uj+Su5fmfa+7dWTX2melTpxL+k28zWvz229G+b5f4f92vVf2f2ez8XWSeR5paWPyvMT737z7tcFZ2MK2fnbN7q/7j59qq1dz8KbeaHxZDNYW0yXEm1U2v8Aeb+9Xn4upyYWpM6qeF+uTjQl9o/S74m+G4bT4ZS/EzwZ4psLzWtAtYWk06RvMjXb/s/xf7tfMHwX+Jut/FzxlqtnbPJJqlxPJdXEdvFt3MzfdVa+P/DP7SXxy8E/FbxVHZ63cSaRb6pI1/DNuZY/m27a+z/2Q/i54Bk1Cw+IWgww/wBtLexyrG0G1ZGr4fKM3nJSdXY8XijhChlyl7GXNJamT+1t8IfiX4VvNLstb8PXji4+ZpGT93GzL91q+cLjwref29NDqUKxxWfyRf7392v2s+K8ejeLvA3/AAmfjzSLGWBtOxArL8vnMv8AD/eavzg/aO+FvhvQ7BP7N8yaaS6knlVYvu/8Crtz/FUI0oqG8j5zhTC162KenwnhOmrbWypGkPlSyS7du37tegeFfFPhLQYY45LmMTyP/oqsn3o1/wBZI3+yteQeKtUvrWZLO2hbz1+5I275V/vV5r8VvjRqWh6bqei6Dcs1xfQfYpbpW+aOH+Lb/vV8pTjKpLlW5+jYiUMPDlR6b8fP2sPDHxO8SXOt68kieCvB8XkaXpqvsbVLrd/rmX+7uWvh74/ftWePPi14mmnub+4trCz3Jp1rHL8sa/w/LU/xI8XQ6ho6eG7CHyYN/wA/z7mZv7zV4zqWoPcTTwWyb3j/AIv71fQ5bl8YyakfJ5tjJThaMiDXvjJ4q85oZnbZ/vbv+BVF4d+N17Z3W+a5bLfK67qyNQ025jXzr22x5nzbWasi80mGVftKbVb/AGa+jp4XDOlySjY+TlWxUZX5j3bwZ8XptQmFyb/ey/eVW/8AZq9K8K/Eie/uGmS8k27l/d7q+QdNnv8ATXVra4kT/davR/APxKvLEJvmb/b3fxV5WKy5xu4HuZbm84+7UPqez/ac1v4b6lazJc3gtYdzvCr7vmavXfit+1emqaf4Pe2RY45r1ZZZPNbbuZfu7f71fFs3iiPxJqEWnxzcfe2q9bPxG8bPp+l6BpX2m4LWNw0+3zf4tv8AEteQ8JHSx9HDNpSpvnP1y/ZX+Muj61p6/wBvTbYo/mf978y/L/DXvPwv+IfgzSW/4SrVbOGSzXcqyNPtRWr8PvDf7bHifwHo722j6kyySffZvm3f3qitf+Ch3xyWzv8ARNK8W3Ahuk3RRrb7ttYxwmL2ggxObYaKP308Pfth/s1W/iR/Dd5qFvDcyT/6M0gXbGv+1JXpWkfETwP4igku/BviKzlRfmfbdb1r+Z7wz8Wvj34y1ppn8Q6ldy3Uv+rhT/vpa++v2Nf2nvGHw7htPCXjGwvoYm8tW+1W7Kzf8CrGthsdh4c9SzMMux2ExNW0vdP1Vm1r+0rUXP8Aq3ZvmXb/ABV2nwN8RSw+IvJuQqlW+RVrwvwH8RrbxhoMWq28yusy7lkjr0P4U6tdQ+JYXhdg6tudl+avBWInGpFy/mPezTDwqZdNf3T6A+OHiZvDHg6XXLi5W3tTbMtxMx+Vf7tfzQft1fFi8+M37Sni3xiniS4vIW1JrW1jaX5I1jba3lrX7ff8Fef2lY/hJ+yVcbtSEV3q0v2Ow2ffZtvzMq/7Nfz+XSw6lqCXN/c75vNZmmX5fM3N83/Aq/WsJKNajGbPx7BYb9/KZjw6bDI3/LRmb5v+BVLcaXc+Tsmh3L91mj+7Wva6OnmSp5P7tn3I0laVrpcEf7l7Zm3Ju87ZTqVPZyPoadH2hx7abMtr/o1ssifwf7NULjTX8vztnz7/APV/7VegtYpGzfuV2fwL/drJ1LTobdvMd1VvvP8A7tZKvzR1H9VjGRxmoaVM0e+ZGLr/ABL/ABVl3WmzRsyOWV/7ytXZXCosrJ9mZwz7f7v/AAKsDW7Xb5u75GX7rbN1dVOpI46lOH8xwWpaSFaV5k3N82xv4q5fVNJTaHfhv4a9F1PT4JIX2bS38bVyuuWe0SD+H+7Xo0ec8rER5ZXOA1KzmikL9fn+8tfVH/BMeEQ6T4xwxObmyPP+7NXzZrFvt+d4cbvlWvpj/gmlF5Wm+MRnObiy/wDQZq/V/B//AJL/AAnpU/8ATUz2eFVbPaX/AG9/6Sz5w/aTYx/tC+MJFlVf+Kju+G7/ALw1xm1Fb5E4b/brsv2kZU/4aD8YqzFc+JrsZP8A10NcarJu3u643ba+Lz/3c7xX/Xyf/pTPKxijLF1X/el+bLkM5HyId+3+KtW1vnX5ERWb+81YNrNJyiOrD/ZarsN0I8fw7q8s5ZRhE6WzvH2r8isW/wDHa1NPuJI5C8x3fwp89c1b3m1F8l/4N3zPWnp906/O83y/edWoM6lO51lneH78gX5l2+W3/oVXYbjzId6PJ833/n/9Brmre+jV1m87f/DV23vk3BE+X/e/hq+b+U4qkToo7qFVDmFkOzanzUkl87R8Iylk/wBW1ZC6hu/10i/u/wC7SRapDlPJf7qbZfn3bq0+Iw9maUkkednmbdqbd2/5agkvXVkf+Gqbao8jFEMaj/nntqvJqiOrOm1v4du+p5v5TSnEtXl1DIu/5vubU/hrImussu9Pm/2v4qbc3ifxt95qz5b7zJndnz8nzUe/8RrH3iLVrqHa2+bdJ97/AIFWBfSPJIzuGfd9yr99cPMu8uvy/wALfxVlyS7mPzMP/ZaylI1jyH0ZHHIpVJhnd/dp0dv9sm+SFYgqMzeZ95mqfy/LfyX++su7dv8AvL/DVn7Okcab+m5vlr4ipGXU/X8LjJR90zY4X2/O/wA6tt2r/F/u1DqFqkkbHfuZn2s0n3q2ZrF45IoXhVVjbcirVWbT0kkKJH838C1zxO/23LtI52+t5priUfdVU/76qjdWs3mKjpjcu75XreurOaR22W33v4l/u1i3kcbSedt+VflSuyMTixFaW5j3lq8ay7NvzJtfctZ8kbrI0MyrtrauoxGjb4eP7rVSnt3WTf5Kq+z7391a9Wjzch8lmFT2kiPTZNrL87bW/wBitjTlmmKR/d/vNsqnZ26Ltd+Qu1vm/hrWs49q/fbLfxVtKXszzI+9ISOF5mX7NM29X27q0o98J3vBIh/jWT/2WmR2/k/cSPCvt/2matLT7GGNWffu3fM7M3zVzyqFxjEreT5jCTyWLfN+72bamWzuVXZAjRJ975au2tq6xssbyKV+dV2bvmqS1hh+0M8/mAyOqqv+1S5uxfLzGdeRQtMjvHubft2/xbqhu4fJX54Wbb/Fs+Zq9I+F3wgX4kpfz3GutZm0mQKy2+9n3bv9oY6frXVz/so2lzEUm8dTlyc+Z9iGf/Q6+9yXwu434gy6nmGAwynRnflfPTV7ScXpKSa1T3R6mGyfMcTRVSnC8X5r/M8KNvBGqzP8vy7v9r/danRqkkqujx71+V1Va9wn/ZLs51Cv45lGO408f/F0xP2RLCMEJ45lGev/ABLx/wDF16i8FPEe93g1/wCDaX/yZ0RyHNV/y7/Ff5njCzOrfuduN3z/AN6tS1aZpPkf/tm3y16on7IumqF3eNpSydHFgAf/AEOr1p+zDbWgIXxrK5PQyWQOP/H60Xgt4jLbBr/wZS/+TD+wM0lvD8V/medaau6MJDuD7/nXZ/DXTaUz7kjS23Mz7flTbXT237OFrby+YfF8rgfdVrQcf+PVq23wXtrZdq+I5znhmEeCR6ferop+DXiItZYRf+DKX/yZl/q/m62p/jH/ADOftVS3l86Z1RP4Fb+L+GtnS7iazmaG5T915S7vL/hb+Fa1bf4YWkUgabUzKigBUaDgY/GrEfgFIp3mXVXw5yR5XP55r0KfhDx/HfCL/wAGUv8A5M4a3DGdTndUv/Jo/wCZd0CRJJNt5M2+F9jR7f8AZ+XbXYaDdPHshSaFW2fxfe/4DXJafoLWEm437SJnPllcDP51p2MjWYJYl3LZ3ZxiupeFHHyVvqi/8GU//kzgnwjnz2pf+TR/zPUPDd150my53YkfZ9/5l3V3mi6slqyQv/yzTam35WZl/vV4bpvje604Nts1YtjcQ+3OPwrdsvjbd2YBHh6JyDk75yc/+O0v+IT8ff8AQKv/AAZT/wDkyY8HZ9H/AJdf+TR/zPfdJuHVU4Xay7nZm3KtX9Nvt1usyIxPlbpV3fKrbq8EtP2jLyzgWGHwnCNvpdsAfw21ZtP2nb604TwdAQeubs5P47azl4Tcfv8A5hF/4Mp//JmkeEs9X/Lr/wAmj/me03rLJthhm3MvzPu+bdWLqG+OFvs20fN92OvS/wBmX9kb9r79qHwRa/ETRfB2ieHdA1KJpNO1XxBrTo14oZl3RxRRPIFypwzhQwwVJBBrlv2vP2af2n/2StD/AOEr+Ifw+0u/8PSXSWw8Q6DrLTQpK4Yqro8aSx52kbmTbkgbskCvk6fDOZ184eVwlSeIvy8ntqV+bbl+Ozknpyp3vpY4aeTYyWL9hePPtbnjv2338tzzzVGRZJd+4fJt+X5dtcR4qmRrP/Q0U7fl2t8275azLr40XVzE6f2GFZjkP9rJI/8AHab4Rl8YfFbxZpvw88FeEX1LWNZvY7TTbKKQbppXOFXnAA9WJAABJIAJr3q/hLx9SpupPCpRSu26tJJJbttz0SPZfC+cQi26aSX96P8AmcT4pkH2VoXfYNvyLs+7XlnjiaHy5cvs/wBrZX6WaP8A8EIP2u/Feix6r4h8deCdDubiP95pdze3Ezwj+6zQwshP+6zD3NfK/wC2P/wTO+PH7KtzZ6V8Y7eCGw1OSVNM1vSZFuLW7ZApZQch0YBgdsiqTyQCASPlsp4dzHOsx+pYGVKpV1tFVqV3bV8t5+9om/dvprscWHy6ri63sqbjKXZSjr6a6/I+I/FXnTzNsdgy/wDLRk+Zq4PXrOaSSWf7T95/vbfmX/Zr7z/Zo/4JCfHX9tnVb1PhJqCfYNNkSLU9b1YLDbWjSBiozuLyNhSSsasRkEgAjPefH7/g2U/at+FPhG68cad8RNI8U2dhayXOoReHlb7RBFGpZmEU/lmXAH3Y9zHoFNXjOFc2yzNVluKlShWdvddakmm9k/ftFu6sm03dW3Ir5ViKNb2FRxU+znH8ddPmfkv4gW1+yPsLNtf+GuM1C3jdnf5v9mvqn42fsd2vgT4d6l45h8evdvpypJ9mk00IJN0ioRuEhx97PQ9K+aNWt1Enku+F/urTzrhzOeFcXHCZlT5JyjzJc0ZaNtXvFtbp+Z5uaZbi8uqqniI2bV909NujZzY3qzJ90U+3abc53s3yf36ffRoku9EZf7tQLNMrK7wq7L/D/erzeb+Y8o0Ybr5WfZ91Ktwyfu/O3s/+7WXDO7ZjRFz975f4a0LV42j+RG3M/wB3+7WUo8poadlImVff/B95a19NLtIm9MfJWTYxwrHsR8j+9/drZsVfcqTOqq1R7TlLjT5zWsfOkKee6o6/drbsGhl+/wAHZ8jKtYtrD5hX7y/PuVvvfLWxp9p+7V49yqv+t3JXPUrGsaPLI17FX++EX/gVbumrcyR702qGTc/+zWdpSPtT59iM3yfL96tK1heRTCm4+Z/FXLUxUfhZ20cLItx2aRqs4mxF/wChNWjZr9l3JNCroy7n/vU6xsfMWONwrL/D81WWRIZsQosu1/7lebiMVGMbHsUcDy2Yy2j+ysnkptLPtXy/mqzHM/l73ePar7kkV6HjdsfI2/8Ah3fxf7tSx2ci/uXRtn8S/ery6laVSMYnr0aPL8IKzw2/32fc25G2/wAP/wATUUN1JJ8nkrs/jaP7tT3UDyRlIdq/Nt2qnzLTlsUhhZLaZd8n3fk+9XO46ndGnymRJMkkv2aa22K25Ytv96qrXjtIh6J8ys2yrsmnvCpd7mb/AK5t/DUE1nebfsxf733ZN+3dXauQ55U+WXu7lNVdZV2Pn5Nvy/8AoVSyRpqDFHRg+/8Ah/8AHqsrp+6Rsortt/e7aks7F5I1S2Rl27d235mat40+YzlGdPczZLGHc8RhZG2KqMq7W/4C1Tw6a8cjb0Vvl3bm+9/u1pNY/wCnEXMLB9n72P8Au/3dtTLpqSSb97Lu+626uj2JyuMfekc7eWu23cSblZk3fL/CtRrpaXDQv+73t/wGtqazS4uFTZt2vt3L/wCzUl1Z7pEdH2BW+Zl+61bRp8sfdOepzSkZsenvFIkMztkfM0kablX/AGaLrTdrb5ZWLr8qL/C1bCw7m/czbh/BItM1TS3WT7Y+35ovkbf/AKv/AIDWEo25Wa049DH8mEBXmf59m5NtWWvHtFSZ3+7tVtqf+g1DfF0ZYU24Vdrs393/AGaz7q4eZUmRGWJV27VraMvshVpx3iasmoIyvGkzIVbd81RLrkLbpk+/I/8AF975ayby6hmX/RnZjGnz1XuL54Y/kRlRv4q9ChL3PePJrUftG/JrUn+u8759vzRqtV217C7HuZP93fXOS6g6sPJf5mf+Kof7QkVmd34X5n+Wu+nPm3OSWF9pqdVHrB8xn+b5du9Wb5adea1tt+rfM33Y22/N/wDE1zUd9cou9JlG6ludSn8nY7/N/Ay1ftIxOmjl85QLuoah5ibE3Yb+Jf71Y+o6lNDGNn3mfa7b9tR3GoPIqfPg1l3DPJuM24/73/oVZe25h/2a6fvcoy81CZWXD4Zn+b5KoTXzzMrojMn3UarjR/aJNjuu7ZVZrErCv3vl3VhUrGjy2XxcpT+0TQ7/AN58zfc3J92hrhGbf/s/8s3qdbPbvd0Ulv7zVHJYPHDvRP8AZ+X7v/Aq5ZVv5jnqYKZXWT5flDbW/hX/ANmpqqjSf3fl2/71Sx27xyI7/KrJteSnN8q/Inzf7lL2nN8Jwyw/LLUhjV45G+66s/3aPMfzG3809ofMK/3dvystRv8AMqRvbbGb+Kl8XMY8vNIkW+eN9m3lv4qtwX0MaCEPt/8AZqztr2snnJMzbqfBNbNMjuF+X5V3fw1lUj7mptTlyzNi1ukZtiI1XtLeKOZnR9qN83ypWRb3HlyK7vub/wBCq5bzO27ft2r/AHa5PflHU9KjU5Tp9NndZt8cyqsjfOtdDZ3Dqv2lNqr93bu21xlnJ5cKzJN8rff3VvWd07Yh+VkjTd83zbqiXw3O6nI7LS7x/PR3+VfvbY62rPyVmCJ0+98yfdrlfDd+P40Zz935Vrp9Pjdo3S68zezLs+X7y152IlLmPQwsYyiX/sM0LDemwTfPFtf71ereHyj/AA7UxggGylxnt96vMrODbcLNM+Sq7drfdWvTvDqqfh+qKMgWkoGT7sK/bPAWTln+ZX/6BKn/AKVTPqMogoTkl2OHWOGORkR5NrbVdW/i+WiS1hGXh2t935f9qrMdvDIqfaXZFVvk2/N81L9l+0R7JkZh83zKjfw1/P0Y+/7p6uDomFfaW8TfOkgDbvljbau6snWo9sZebc+35tv3Wausu4d0zohYWy/KjMv3mrndQhSSN03sWk/vPXXE9Cnh4cxxOrKPnf5lhVNz7V3bf9mud1K12xb32s+3btX7u2uy8Q6PcrHs8tWVvm2q3zVz+paXuj85EZCyb9v92uqnL3YnoQw8eSRx11GkeYd6yOvzN/s1nTWsPlrsds79zru3V0mpW8LRv88YMi7vu/M1Zq26LHv+5tTb9yvSo82nY5a2HhIwJ7PdcbymPOf+GoZLVzJsTdhv4WX5a15LdI3Hk8/xf7VRzfOq7Imc/d2/3a9SjKK+yfP4qjCJSt4Z41+Xbtb+Fq1rH942x4VXb/Ev3apyRpHKZEm3H7q/3qksRtmZ0+9/eX71b/YPGqSinqdX4f2Dakb7v9pq7fQbySRkhuZlii+6rLFXA6Ldx+cYdjI7fLuX7y11uh3zqzJNMz7fl27vvVfLzR94iPaJ6JoN5ZxzvZwOr+Y23ds+aus8P3zNa/Zt671T5IWbbu/2q850fVHh2JGnzr8zbV/8drq9DunY/aXnzufd5e7burm9pynZRqcseU9J0m9eORJpkjQxxbdv3t1epfBfVLex8QLqVz8629rI6bU/2fl2/wC1Xiei6puaO5mmVP4XWvR/hrNc3WoXFhbIsrSRM0TR/wC7XjZ1KUsumo9j18qlzZjBnpXwl+Hfw31b4EXmq+OZlsbnxt4oZX1DULhVk8uNvvL/AHal+Aek/DfwT+1hb/D34deOYde0iNo23W7bo45N33a+bv28viA/hvwT4O8AaHrHlSQ6W1xKsO5fL8xvm+b+9Xt3/BBL9mG7+JXxmvPiLrM8kum6VbLcXjSP93b8y/8AfTV+aZdTxPsddD0uK5UJ81Rn64ftXyxWfwdsdRkhkhghs40SOP8A5Zttr8+/if4sfxZfXOpX7/JH8sUjNtVm2/xf7NfcP7X/AMZbKbwzD4Vhs4/skCHKyL95tvy1+YHxY8YXt14uuf32yHzWVo1Taq114/ERryjGEj53hbAzwuGlUqx5eY6K/wBF0HWLXffw27xxxfPJH8kjN/vfxV8sftHfD/TbWyuU8H3KyXk10yy/aLLb8v8AstX0T8PfEmja1cR2GqpJFa27Mtx5L/NJ/wB9V1Xir4A6b8RtJa80DSo7S3hRmWaZt3mVrl9SDnaZvnEeX3on5CfEiDxDp1vMlzDJFIvyvuSvKrrUPEmk28mxJAsn3pNtfpH4s/Zf0S88SXdt4i+zv5L/ACSSfd+Wvn348fB+z09Z30TQVeL/AJ57PurX2eX4rD/DKJ8DjMLiqseaB8k2uraxq1x5L/O/+1W1rHhXWNJtEuSi/c+7W+3hHQdJ1FLy2hmQszfu2ib5ai8Ua9NeW/2BEXasW3dtr069ZSlGEIniQwteL99nENqn2hfJ8xVK/frT8LyTXVx5KfKfu7qpWHh172++T5gybvlWvVPhT8Jb+6mS8e2ba33NtZ1pUqcC6EatSqd5+zb8Kbnxl4ytdBezkVrh9kU2zcq/7Vev/txfsK+PP2c/h3Z/F3xPpUlvol1PHBFeXDL+8kb7qr/FXa/sj+CX8L+MrDUtSso8Ky/M3ys1fZn/AAW2+DOuftE/8E1vCnirwlF5114T8QQ3l1tk+Zo/L8tm2/7NfIVakp5jGD92LPuJYPlyj2kdT8Qta1zTdPhH2l1X+5urc+GvxM+G+i6hDdaxp8dyyv8Ad3bdy/3q5Lxt8H/G0OrbNS0e4ETfKjSVe+F/7PviHxB4kisH02RRI3zs33a+meAwvsOac+U+XnjatOrFwpcx+lf7E3if9k74svC/gnV7HS9ajfb9jvkVWk/2q+7rL4d+CfHHhX/hG/Emj2895a2+yK6WBVZdtfkX8O/+Ce/xqXULbxJ8GXkhuYXWWJVb5m/y1fol+yjrH7S2m6vZeBvjT4Sm0u8jVVe6jf5bhf4vvfxV8bmmHrUo89KfNE+wy+eGxtK1aHJM9v8Agj4f13wbb3Gj/aZGs/tG1Gkb7te6fB3Xrb/hLLbfGzr9o2su1qw7XwTZw6YNQMLBJnVm3JuZmrT+C3jXSvCfxPx4gt1fT7G3mup7m42r5axqzbq+WhRp1sXCMv5kepXj7PKp/wCE/OL/AILjfteaP8dvjpZ/BPwL4hkuLHwDPJFetDuVvtkn+s/3tvyrXxdp9vDNdeckO/bxLuTd81df8ZtQTxt8cvGHieF28rUPFF9cQSSJ80kckjMvzf7tULfS0ZQ8j4Xf86r/ABV+y0qMaVKMI9D4PBYfmp8xGunpHhHh3jZuSNX+VWq9HYouUTzP91nq1Z2fkybPszOWep44Xabem4I33o9v3awre97p7dOjyx92Jl3Fq726u8K7Nvz/AMVZV5pu5XSYLt+8jKu1q6eaF45Am/5Pu7W/irG1a3SZmd0YN/zz3fdrKj/LIyqUeaGnxHHahZhWabewf7y1ja/HIsy/O23au9ttdTqVrtZ3Tps3O38KrXP6p532hneZnVU2pHtrspx960jyKkfd0OR1a1hk83ZwzNu+WuW1yx3MX/ib5fmru9QsvmkHyotc/qOmja2/j+Fa9GnHlZ5Nan3PP9WsbaOPZtr6O/4J02sdtZeMDEhUNc2Xy9uFm6V4brWk5UnY2N/8X8Ve+f8ABPq2a2sfFYb+K4sz/wCOzV+s+EGvH+F9Kn/puZ6fC6tn9J/4v/SWfLP7TRkX9oHxkEGc+Ibr+H/poa4Rrry/k/ir0X9qC2mT4++LZht2nX7k/wDkQ15xIuJN7pk79tfF59H/AIXMVzf8/J/+lM8fGSi8XV/xS/Nj45ts2+FF2t/Dsq1DeddzrhaobXjX5PmX+9up0M/zbHh+X+GvG5ehz/FL3jbsbxPlLx7l37srV9dSDK3kurN/B8n8Nc5BN5ce+F+W/hWrEdw6yMiblSiXxcxEpcx09nqTzYhhRd33d1Wf7eeNVSba235a5WG8mjP2aN9p+8+2ntfbRlNpP95qfNy/CYVNzsIdaTydjurFv7tDapthVIdvzfNXJLqgaPyZoP8Avlqmh1J2k+WfAZfu1rKRhyo6dtWTeuHwrffpn25IZG8jbtk/vPWJHqXnSLC6KzL83mVMrPIymbbt3/w1EpFRj73ul+ab94+zafn+bbVdmfyf3m1X+9/stTlL58t0wP4d1P8AJmMj7Id/8O5kqPaGsYlCZdq732qf/QagmtIpMfxbv7ta32Hy1Xjcyt95f4qa1n0TKg/x/L/DWPOaRjy/EfR0djbR3BedN+35fl/iqa4tfMhHkw427tm5fvVYs0hZfkhY/wAKfxNV1bdBahIX2/xOrf8AstfGVJe/7x+k0zG+zpDGk6Bkbf8ANteo5Le2jZ/nYpv3bpPvLWpcWPzbN7Kny7d38VVLuPaqI7r95vlb+Ks5RudEcRymDqASV3869ZPn2/L8y1i3sMZ/49oW+Vf4q6a6s0XY/kyK7fLu27lrLvLV1md3XZ8/8XzV1U/e0MKlTmj7xzF5azec2yHe2z7u6q0lpc28iu6LtX79bdxEjXDud37uX+FflqL7G8m9N7Hd/EtepRqS9lZHgYiPN8Rm2du/2j9yiy/N95q2rG3ufM2Jux/dVKdpeioyvOnlp/e/ire0fS03LJsVQ392nUrHPRo9ypZ2O21Fy9s3zPt2tV6x02GRWx/f+833avx2aSK0MO6VY/mRm+7WhY6TMu534TZuVW/irnlU9nA3p0485mNCkTI8KeU33fvblqxbR3LSfaUTMq/8tK1rjSTI3kum3+8q1J/Zu1fsyI22P7v+9WMqnNE3jT989g/YH+E+qfGT4n2nwi0LU7e0vfEmuWVhBdXgcxxPIzKGfYC2BnsPy61+l0//AAQ68IeBZJV+Nf7amgaAt1fvFoLPpscRvIlxhmE9ymJPmGY0LhePnOePhH/gkfJNYfto+BRbStFIPHuko7RsRkNMVYcdiCQfUGvpL/gs/qOp3v7eviG1vr2aWGz0jTIrKOVyVhjNqjlUB6Au7tgd2J71/VXh/i+KcxyvKMly3G/VacsPWqykqcJybjiZRsudabr5X0vZn0GGnj6lShhMPV9nFwlJvlTek7dfU4z9tf8A4J+fGP8AYo1y2k8VSQ634b1KRl0vxPplvIIGYE4hnDDEExUbgm5gRnazbW29D+xX/wAEyfil+1noFx8UPEHiW28FeBbNn83xFq1sxa6VFYu9uhKLJGhXa8jOqqcgFirKPftN1HUfGf8AwQi1C5+M9/MRpmoCLwfcXczB5I4r6NLdFJUlgCZogORsTG5QMr7h8Q/iH+xz8Iv+Cdnwv0r44eCde8RfD3VtH02GCDRJJZEa4Ft5w+0OksBbLiRsEAF0zsBUY9jMfETi7D5Osuprnxn1qphXVp01LmVNKTnCnKSh7Rp25G+VNS8iK+dZlDDewir1faOnzRSd+VXuk3bma6XtufJ3xy/4I5614d+Fd78X/wBmT496L8S9O0iCWXVrWySOObEYDOIGilljlZUJYxlkbA+XeWC15b+xJ/wTy8d/tweHfF2seCPiBo+jz+GUt0gtdUhlYXc8xYqrOgPlJtjkO8BzuCjZg7h9ifs8/t3/APBO/wCDlr4kP7I37MfxHlvZtJa71fTdI0qa5jlhgDESTBrqVYo13ENKV+VWPXoeW/4I5+Ph4X+Bv7QvxL0HShBc6bAmq20AdRGuy2vpY4wAgAwVIyBjBGFGOZnxh4iZdwnmNSupqrSnQVGpWp04TkqlSMZRnTi5QstlJWunffZPMs7oZdXlNNSi4cspRim+ZpNNJtfM8z+Pv/BKX4Vfs9/CTVvEXjL9t3wvF4w0jTFuJfC01oqmeYgEQRhZmnO4H5W8nnglVXJXS+HH/BGGS28A6b46/ah/ad8O/DxtYto5bHTJ40eSMugfy5XnlhUSqDhkTeAR9418UX/ibxDqviObxhqWt3U+q3F615PqMsxMz3BfeZS/UsW+bPXPNfof4k/bV/Y1/aV8G+GfCP8AwUr/AGffFHh7xXp+jxPYa59iuY0uYJo0P2yIxlJRHKyFwpSRAOVdsmvf4jh4k5Dg8PTpY2piOeUnVnSoUXVglFcqpUnZSjzX5m3KSVvn2Y5Z5g6cIxqud2+ZxhHmWisox0ur77s8I/bI/wCCXvjP9mL4ap8dfBHxS0jxz4Ie4iifVNOjMc0IlJVJGVWeNoi21N6yE7nUbQOapfsVf8Eyfij+1noFx8UPEPia28FeBbNn83xFq1qzNdKisXe3RiivGhXa8jOqqcgFirKPbP2qf2fU8HfsG33j39hL9pnXtd+Dl1qqz+IfCNyFk2EyGOSVZvLSZI1k8rfbOuDnzSTgVo/8FGdR1Hwr/wAEwPgj4Y+FN/N/wh+o2liusTW0zMs8gsxLGkjbRkGXznIO354x8uR8vnYPjPibHZVhcvw+Mi8RiMTOj7aVLknSjCHO1UotKKr9FHWDutbmNLNMfWw9OjCqnOc3HmcbOKSu+aL05/LY4D43/wDBG/xDofwxuvit+y58cdL+KNnpiSHUdO0u2X7U5TaStv5EkyzuFbcYyVbA+UOWC14l+xF+xZ4m/bY+JGr/AA60DxtY6BLpGhS6hLPf2skpcqyxpGFXGAZHQMxOVUkhXI2n2X/ghp4i8f2H7Xt14c8OT3DaJqHhm5fxFArHygsZUwysMEbhKwVScHEjgHkg+0f8Eyrfwvo3/BTn47aF4DuY5dGWHUTaup3/AHdTi4VyoO0FnGBwcDlsBqvN+K+LuFsHm+XV8Sq9bDUYVqVbkjFpTnyuM4pOHMt46arV+TxOY5ll9LE0JVOeUIqUZWS3drNbX7dzitH/AOCH/hnR7XT/AAz8X/2zPDmheMdTT/RdBtLNJVkZmKoIvOnhlmyRjIjXnIGcZPx/+0/+zZ4+/ZP+MF/8GviLNZz3tnDFPDe6dIzQXUEi7kkQsqsO6kEAhlYcjBMHiv4k+OfGX7R1x8TfE/iW6vddn8WLdPqNxJucSLcDZjPAVQqhVHyqqhQAABX1l/wX0WJf2lfB5SCJWbwOpeRYwHb/AEy4ABbGSBjgHgZOOpr6DKcZxhkvFmCwGa41YmGLpVZNKnGCpzp8kvccVeUbS5fe10vozsw1TM8LmNKjiKvtFUjJ/ClyuNnpbda21PhKus+A/gCb4q/Gvwl8NobI3H9ueIrOykhG75o5JlV87SCAFLEkEYAPIrk69C/ZL8Y23w//AGn/AIfeM7y1E0OneMNPlljO77ouEBI2kHIByPcdD0r9JzapiKWVV50PjUJOP+JRdvxPdxLnHDzcN7O3rbQ+sv8Agtb8f/GOgfF/Qv2Y/h7rVzoXhXwz4btpX0nSZGtoZJpM+WpVCAyRxJEEXGFy2OvHSf8ABJr4i+Jf2nPgL8Vf2P8A4sX0viPThoAn0KLWJHn+ziRXjMYZjlVSVYJEAIKNuZcHkeW/8FwfAmp+Gv2zv+EunsnW08R+GrOe2uNp2yPEGgdQTxlfLQkDoGU45ye4/wCCFmh3XhnU/ip8eL6wkOnaH4XS28/Y2JH3NcOi9iQsCkjkjcvTPP4FjMHldHwGw+JoRXtIwpVIySXN7d1I3ae/M5txbve10fHVaWHjwhCpBLmSjJPrz8y6976HwNf2N3pl9Npt/bvFPbytFNFIpDI6kgqQehBBFe7f8Ey/i34E+Cn7aPg/xr8R5YoNLaaexe/nKBLKS4heFJmLA7VDOAzArtUkk4BB8O1vUv7Y1m71f7OsP2q6km8pCSqbmLbRkk4Gcckn3r68/wCCJnwY8D/FX9qu98QeONFi1FPCfh9tS021urRZYBdmaKOOVt3AZNzMnB+YBgQUFfrvHeLweD4Ix9XHRbp+xkpKLs/eXLZPWzu99Ut9T6TN6lKllNaVVacrTt5q36nuX7Zn/BLT9sb46ftG618U/h78cNMutE1q8WWxi1fXLqCTTYtoxCESN18tDkLsOSOSMk5yf+Cq+saf8EP2HPhx+yN8Q/Hv/CWePIri3vLnUpZBJLHDCkqtMS4LhC0nkxk7WdY2JJ2sp+f/ANpb/gqD+2J41+OGr6r4X+K2s+E9L0zWJotH8P6W4gS2ijkKqs4A/fv8uW8zcNxYABcKPoX4teLf+G8v+CRt7+0F8Y/DMP8Awmvga9aK28QW+moJLpo54UkddoXZFLHKBIq4QSRFgvyKo/FqeU8X5HWyCvxFKlPC06tOEY0kozhOcXGnzvlXNFac6g0m+ktz5aOGzLCSwc8a4unGUUlFWabVo301Xe34lP8AaG8Y+Kf2MP8Agkz8M/h58MzL4b1zx6YrjXL/AE7fBcsssRuZiZAQyyMDBGTnPlqUGFAA8Z/4JJ/tO/EzwD+174e+H954x1K70DxfLJp2paZdXcksRlaMtDMqsSFkEiIN4GdrMOhr1P8A4KM2t18Vv+CZfwG+M2j6Qy22j2ltZXqxK5FuHtFhyck4XzLYLls8svPPPzl/wTD8B6l4/wD25/h9ZWFk8qabrH9qXbqpIijtkaXexHQblReeMsBznB97I8FleN8Mc4rY6EXUnPFyqtpNqcZTtq7u8UouOumljswlLD1chxMqyXM3UcvVN2+7Sx5l/wAFvvhPZfBr4qfF3wZonh6HTdOkvIb7TLO2hKRLBcPDMBGvQKC7AAfKNpAAAwPyc1SHZJ5P3jtZv92v2I/4L7+L7fxv+0H8WZbW2EK6eLHTi43ZkaBLdGY5P94EcYGAPcn8idWs3+d3hYqv8X3d1fkniPiMVVWTzxH8SWCouV97vmvfzfU+T4ldWccLKW7pRv8AichdW7qrP8zCqDWrwr8nylq3Ly1ePbsThqo3Nk24Mj/x18BGpynyHLzGfD5nmM/zYX7+2tO3bC70+Yf7P3qhjhS3kZ/J43Vo2sbybfJ/h/vVEpdzWnT5i3p8aeYqOGbd95dlbdnbvN8k0P7vf8+6s+xj43+R977+1/u1taWs21Y4fm2/+PVyyqR5fdOynRiaFnbv5Zm8lnZfm2763rGPzoD/AKM2xlVmjaszT4UVW2fM29WX5q3tJimaREd8fPtdWavPrVj06eHizY0exdVSHy1Xb9z5vvVsafb7m854WQx/Kn93/gNM0XT0jkRJnYln+Rmeui02z32I3cbn+X+Jlb/eryfrXNLlPZo4X3SPT7VFU/udki/5+Wr66em3Dw/My1YsbFIUe5+ba3y7m+XdV+OxSTc+xldVrgqVuWR6VHD+03MtbPbh34T+L56sJbzKzJ5Lf9dP4WrRktbOOON33L8nz7aj+zzRwo6OyfOy+W3/AKFWMZfDJHVGjy+6Vvsszfvw+xtq7WakXT7byw7/AMTfIy/e3VfWGSSG4/fLsk2/8BqW3s5o4/Jhh+Zvl+V/vVtyzluXGnGPxGNcaXZ3UhdNuxfk/wCBVBL4fh2h7lGLr95W/u/w10v9myKyWy/embdu/wCef+zTZ9Hf7QZptpXZsT/er0aNP3QlGHL7pzVvpaW8nyBi/wAyurfw1ZtdMRYfOSGbK/cjX7zf7Vbk3hmGNVcPt3ff2vuqxBpFtbru82R/Oi2szfer0adGPLoccv5TmJdNfKx/bG27fvTfM1VvsDrtmd/njbb/ABfe/vV2N/p9s80Lw/M2z+Fqp3mlwxx7HRt6t/E1dPs+U4ZU/iuc79khaMo6MvmfM0i/3qh+xJZw+ZCm9Gba3+zW9JY2fmK8MyuG+5uf7tULixdG+S552fNu/hp+z15Tn5ZSkY8apJ/qX3OrfdVf4aLqD9233vv/ADqzfL/s1d8vyZAjvj+H/Zpt3apcKrwuoZvvVlKPKZ8pzerWvlt5zp87feVqwLyaTzR/cVN23Z96uk1SGa3Xf5zFt+1ZG/hrmdUV9xMz87vurXD8J1L3oEEl9MzNB5yoW/26zr7UGVim/wCT+9uq55L7Qj220/w1n3lmdzJM6/f+7W9GW5nWw8qnwkMc3nKju+9Y2/ufepjXTr/rtzFvuL/eqw1nNbqqPbMqyf3n21XmtU3B0h2/7NdUa3N7sTqwuW82g5r55d/z/Pt27qZJNNtVJudv8W6kkXyzvdF2L91dlQv5fzyQpt/i2s33aqVafLofQYfKfcigEyecr7/9ylb98u93kZm/ib7tV13tG3nO29vuMqfLtq9Y277Tc/Nt+61ZSrRjG5p/ZREtm8bbPlG7+89Syae7J86YH+z/ABVct7fzmRNkmf8AZTduq7b6bNMrzvuxGn/fVc0sRp8Q/wCyeWJhyaCkipvfYzfcb+7TJtNjjVvLfjf87f3q66z017pf9Tt+RW+b5qq3mjoJFhhhVW37fuVyfWoy0ZwYvK/d0OVm0hFXG/afN+df9moJLH946P8ANt+VK6W4sUhm2fK3yt/urUP9nJuV5HXDfLWlPERj9o+VxmFlGRzFxZvHs2Q8fdfa21dtM+ywrGqFGb/Z31vSWMKzcpsXZ/y0+7ULaei/OkzFt/8AF91a3lW9w8ephzCm09933Nyt/DvqNbPdcLsT+P8AhrZvLHdI2/gf3qguoXVQibU/iqeaXui+r/zFOOF/vwu3y1djjmkkZ/MVGZ9v+ztqGGHH3/m2v97dWlbx7ZEh2K3ybk/2qr4YGlGM4ljT1aNkTZ97761t6azqph8njeu3/aWsqG12yLsTaWf71benwv5a4hVd3yo33a5vhO+j7ux0Oko/8c0hTf8A6pfl2112n2sO3zo9yvu2vufcu5a5fR03CF4YV2b9svmf3a63T45o1CK+8bvmZvvMzVxVI80z1cLU5Ym3Z2G79z5efutukr0fQURvA6orBla1k57HO6uB0iHy7dZXRW/5616FoMUY8JJDEgKeQ4VVHbLcV+2eA0OTPsy/7BKn/pVM+lyio5VJX7P9DlFtYZdlsjsZNy/u1/harUtrcx/uYfm2uzPtermnx3MM3k3SK39xdvzLU0MPmW/mb921/u/d/wCA1+DU6M+Y3o1pRlfmMDCMqJDbSTIzfeV6wdcs/Ojd3hj/ALvl/wAVdlqEbi3e1RFb/ZVNtYOoWMLQoX+Uqm3/AHq6PZ8p7mHrfaOI1q3fzGuXtlES/L97+Jqwr2zf54fmfd8vmL91a6zWo0jiWF3VQ277v+98tc/q0iRqzvul3Ov8XzV2UqMuU9aOIiclqVrc2qsiQKQzbfuVk6hD98OmHX7y1011++mf5Nv+zu3ViagttbjyfIY7vl+auyEJR6CqVqUTAuF3Sb03CRl2pVFo3t/4Gcs21/71al9a+Wyqi7D/AHaosqBTcv8AIy/fVf4q76cbHyWYVOb4SKRPs+7em7d/Ev3qLWP7Pdb0Rm/2lpxkfyxvTa7fw0kW6ZneE4X+L/arsjHmifN1q3Ka+lx+ZcBPO2qz/Mzferp9HkfzfOR/Kdfvsv8AFXK2FqHHKbXZflVfvN/tV0elwvHjL7H+6zK3zLUVOaRH1jlOu0u827fJT+LczMtdLpV88dwr3O3/AGGWuN09XjxsdmH93ftZq39Jme4VURF+XaF+f5t1cVSXunXHFR+yd5oeobbgJc/JKvyxMvzK1etfs/68mn+JvOmm+aO1kVI2+7N+7bateG6LdbtUTem54flWTf8A99V6R8I75G8WQwncr3D7P7rV5mYU51sHJHfluK9liozJvil+z/4q/aE0HSn8Kwx3OoWaNbfZY38z5d25Vr7K/wCCPfhbxf8ABT4b+K9N1jQ9Qsrp7uGGeGb5fl3fw/7K1t/8E8vgq/wZ+JjeNvFOgSXelWSyXpdf3mNq7q+gvhh8eP2ZvjNf6v8A8Kp0a5l1+/uZGubSCJlKFW+Zn/2a+FqqnGhyP4j2MZWqVMU3yc0Opyv7TmuTLvR5pHMMS72X7vzV8RePrHTZNeu33ybppfNlaT5lVf8AZr6n/aG1z7VNeW015JB5bMqKrfK22vnpfDqatp5mvIcmR/8AXfd2t/u15MY+yZ6EfdgeWaPq1h4f1R/sd5u/jlVvu7a9L8G/H+wtbaW1v9YmSwVdu2SX+L/Z/u1538WPhzreizM7wSYb51WNN+7dXz58T5vHmk+IrR7aNgnm7v7q/Kv8S16FHDupLmiefiq1CPu1T608XR+GPE1r9s8MeGLi5E0TM9006+Xt/wCBV8+/Gi38Z6Mvnf8ACt45rf7rSQorMse35d1eY/8AC5PivJJ/yEpv3O7fJHLtX/d21et/jl8YLjT/ACZpFltmfbtuk+WRa9WFPFU5RbR48vqM+ZRZ5d8QfiB4J1Czlhm8HtFPDuX5vlryHVrVNcvPs+j6a23eu6OP5q9u8beGbDxhqj3mq6VHEq/xW/8AFUOh+E/Dekx+Ra2y7YW3MzfeZq9ehW9h70nqeFisJKvP3YnK/C74B3OpXiXOqwthm+f+FdtfSfh34Z2FrpsSabCq+Snzxqv3q5bR9SsIY1sLZIUaP7qq/wAzV7L8IY7PXLT7HeXLJcbP3TRrt3VrWxyrQDD5b7HVfEdd8MNP0qz8Jprd4lmj2rR79z/vG3N/yzr9A/2ePCuifGj9mnVPAvifzXsLu1aN4WXcsn93/wAer4kh+Ad3D4fPim5ufJtoUVmjb5VVt33a+0/2H7rxLd/DwaR4ZsxPA0amRi/yxxrXz+Pjy1YSPqsBH2mAqQmfm7+3l+xHqX7OPxWhvPGmiTP4U17b/Z18vyrDJ/drX+Hf7Bt54i0+z8Q/C7xbD5Eksb/Z5Nr7mX7y1+rv7Q3wR8GftB/Aq68DeObBrnZFIbWRl+aFm/ir8vJvg/8AtG/sR/EpLHe2peGI7pnsrqNmZo1/hX/gVaVa05YWNSn7zj8SPNw2GpQxLo1fkz7P/ZV/Z78beEL63v8AxRCv2aGJfK8mBVbd/FX0p4u0nwlfNYQvokbz+asUU0i/Oqt8zfNXzf8ABX9szxCvhmF/E/hhjFJF/C+2SvY/hz4i1LxvqFtqb3LIrbmit5G/1a15H1irK66yPSxGBlH4djtvHsGmeGNBR41by1i3RM38TV8lfHzxpeaP8KfiH4hsLzyn/wCEXvILeRn/AOei7d3+z96voD4+eNg0Y0RrnZ5ab9sf/oNfMX7Ukltp/wCyj47165mbz5LCG3WFU+WRpptu3/vmuLCR9pmsEo7SOmph/Z5VN1Ox+b2j6HMqok1y0h+zr5s38LN/erdsdJhaEQwzKqb9m1k+Vmqa1tUt5gjurKv97+9WppdvMrfvkz/FtX/0Kv2Bw5ocyPicL7vukMOg3nnCZHjDxv8APCr7W27altdPmhV4ZkjWRl+61bsciSSedNbfL8q7m+9/31U91pKLbvMjqNvzIqp935q46lOctz16fuxOKvLab7Q6WzxqV3Km6Lc1Ys2j3KyPNI/m90X/AGq73VtPSS4e5b5Hk/1S7Nq1kXuhw27M6QzHd8zt/do9nLmsKpGEtjgNc0e5jdkmhZP4tqvuXbXK32mv814m4rv3L/s7a9U1bTkmg87fGsWz/dZv96ub1bw35kf2b5Wdfvssu1V/2a7KcYxPDxVP3vdPNNQt5maR5pvut/q1VfmX/erA1ix3L8nzLu+f/Zru77Q5rWE7Nu1k/wCBVgappaRskyIzp954d/8ADXfTlE8StG3unBatbTqr7Eyy/wATfdr2/wDYQt1gsPE+0/entCR/wGWvKNc092X92jbdny7V/wDHa9j/AGJovKsvEeEA3TWpyO/EtfqvhAkuPsLbtU/9NzOzhdWz6nb+9/6Sz5h/acsIJfjT4qk80gtrlyCAv/TQ15VqkOdyJCv3fk+bbXt/7RNktx8YfE+4DB1u4G4L/tmvI9es3Hzuiqq/xfe3V8Xn8eXPMV/18n/6UzwsZ/vdX/FL8zBTfJ1enq3k53puWmXcflNs8tR/u1CjSeXj+Fq8XlOb2hPDJtk/c8t/Aq/xVYW4dlHybf7+191VrdnG7/fqVZHjbKf7vy1X2Be0LH2ieTYibfm/9Bpv79tvbb92mpbzSbZjDll+XdV6Kzj2h33D/gH3aj+6YyjzFaFXZWfzty79tXbW1uZJFcBXFW7DR0kbfs3LWvY6WzfIkXzMn8NVKQombbw+Sq/Ju/3qt2lm8eX6Lv3bv4l/2a2LfQ0bY4tsszf981etdDMa7/vMzf8AjtYSlKUjanH7RlwwwNh3hb5fl21Yt7abd9/buTbt31t2+g7pPnDJt+ba1JJo7wyb9n/AmrnlLlOunT9oYzWc0J2dV/2aRYX2rDtU/wAX7z5a2X0vy03vSeX+8V3s1Xd8vzfw1hGpI19ifQS6S/nSpE67413IqrVlbV7dWCIoHlK21vm3NWvNpf8ApDvhWeFPnbd95qia32SbH2qF2/N/er52pT5T7Wn73xGLcW91IiQwzKh+9u+9WfNa2wxAkK5bcqsyfxbv4q6G+0/zriLZbKPvKzM33WqpeWqbhBsVvn3IrP8ANWPvHZKMOQwLu1mWPZ95F/8AHmrK1COFmX52+Zt3y/w1v6pH5m7bcsPLT+H+9WXdRpNN9z+Hcy7fu10Q92epw1OblkjAlhMl9smfaF+X5vu1DJayWTbJHZyz/dq7eR/6SU8lcb9rrUa7Ps/z/wDHx/Bu/u13c38q3POlHmJNNt4bO186H5tzbtqr81bmkw20kyTJDIyq+3b92sqzWaNwUG6Hzd27+LbXRaTNDHdJsRtirv8A9n/gVRL93qKnH2kuU09L0n7UY02YRv4f/iq1V09422Dy9v3VXO6ordkuFH2ZFbcv71fu/LWlbxpGqeTAqhdq/N/D/tVwyqc0T0I04RGLpqSfP5PzfwK3y7qhbT0t7VbmH5dz/wB/+KrrW+2N/tL79rrs2v8Aw1U1Jd0zJbPhG+7H/dpUypxj/KfTP/BKnTb7Q/2u/hnrkd2UbUPHOnKqoeVj8/y2B+oZgfY1+nP7dP7TX/BOXTf2gL74b/tefs86jrfiDwzDbfYdZ0+xV/tFvNAk6ozpPE5CtIw8t9yjJIxvYV+TX7M/xc1T4CXvhb42aLpVrqF54Uu11S2s7wt5U8kEhkCsUIbBK9jXm/7e37Y/ib9uH9pjX/2jdV8J23hj+2Ut4E0ay1CScW8UMSxRh5GC+Y+1RuYKgJGQq1/RfEs8s4WoZDWlSqODwSt7Ks6U1OclUk+dKTs+eS5bWtLyLzKjToV8PUkny+z+zLlabd9/mz7m/by/4KOX/wC1LomnfBr4UeCU8G/DbRCn2LQYkiV7to8rC7rGoWFEQgLAhKqcks+F29D+xv8A8FMPC3w8+EUn7LH7XXwyPjn4dyDy7ELDFJPpsWWfy/LcKJlEm1kberxHJVjhFX8in1jU7iRkS8nH+y0hWmT39x5w/wBMZzs+VlnPy1MvE7g+pkEMnWSuNKMueLVdqcam/tFU9nzc9/tX1Wj93Qbx2WPALD+wtFO697VP+a9r38/lsftl4v8A+Cn37Kf7PPwz1XwT/wAE6/gBceG9Z16Ird+JdVto0e0YYCuA7zPclQX2q7KiM27DZZT41+xx+3T4X/Zx+D3xe+H3jTwdqutaj8RdIMNnfWt5GqpO0U8TGXeMqMXDvvG8kqF2gMWH5ZTa7etEiveS7lb518w/LVO+1u7ijdn1C4Xav3vMLM1GD8Q+FKeW1sG8qnU9tKE6k54mUqk3TkpQ5punzWi0rJWVr6XbZjHHZdSw8qbouXM023NuTaaau7X0Psa1urmyuY72zneKaGQPFLG2GRgcggjoQa/QOD/gpp+w9+0x4O0Ww/bv/Zn1DUvEGhWSQR6zpQEwuW2gSOGSWCSIOwLeVl1BPU1+EF3q+oSycajcK23+KU/40+DVbq4fbLeSP5e07fNO2va4i8Vcl4ndKeKy6pCpSbcKlPEOnOPNZStKNNaSSs07jx+f4THcrqUWpRvZxnZq++qXU/aL9rf/AIKSfB/xT+zzL+yR+x38Grnwf4PuLhGv726dIpbiIP5jxCJC5+dwhaR5GZgpUjnNVP2N/wDgph4W+Hnwik/ZY/a6+GR8c/DuQeXYhYYpJ9Niyz+X5bhRMok2sjb1eI5KscIq/j3p/iG+lVzPNKBu3LtkNWo9e1BVd3v597fLuydy15v+vfB8ckeVvKZOLn7XneIk6vtf+fvtOTmU/NO1tLWbRxf2zln1R4f6s2m+a/O+bm/m5rXuftp4t/4Kf/sqfs7/AA01TwV/wTq+AE/hvWdejIu/EurWsavaMMBHAd53uSoL7UdljRmztbLKfEv+Cd37bnh39jz4z+I/ip8RPC+q+IDr2gT2jNY3KCX7Q0qThn8z7wZ4wGbOVDFgrkbT+YsXiO9LjbfOB5X3VJ+Wr9r4ouzMkM08hjblW840Yfj7hTD5RisBLK51Fibe1nPESlVnbbmm4c3u9ErJdtXfOGfZdChUoPDt8/xNzbk+13y306H1g2vW7eMT4n+wv5R1P7V9m84btvmb9m/bjOON233x2r3b/gpD+2d4Q/bZ+K2hePvBvgzUdGg0rw3FYTR6nPG7yS+Y8r7QnAVWkZQxOWADFUJKj86rPxBfLCqfaZWKtuVo87v92tnSdcaILcxXM7L93czHdur2cX435RUzTD5hUyuTq0IzjB+3dkpqKldezs9Irc9iPEmGxGIhWdB80E0ve72v08j3OgEqQynBHQivJbHU5JV3797bs/eP3q1bXXJHZ3ikcfL/AKljtb/arpq/SWo0/wDmVt/9xl/8qPapZ+qqv7P8f+Afpr8Nv+Cnv7NPxi+D2g/B7/goR8Br3xdP4dtxHaeKbUrPPORwHbLxSxOUWMOyyN5hXcwHSsD9qH/gpf8ACS8+AN7+yp+xJ8G7nwP4W1KXGq6nIyQz3cDDEsXlxlzmTbGryvIzMgKEYNfnXJr1zD9xHT91uX95upW8QvHbur+bhvl6/KrV+d0fEvg/D5lDEwyepyxn7SNL61L2Mal786p+z5U76pfCuiPIisrp1VUVN2T5lHnfKnvdR2/Q+6f+CZn7c3wb/Yv1rxPefFP4SXWsSa3aRx2et6RDDJeWwXO62xM6AQyEhmKsDlBlX+XbxXwf/bY1T4B/te6j+078J/hzp2l6dqWoXXn+DYJilt9gnfc1qrKPkIwrKwXaroCE2jZXyJLrBhCwvPI27+JTVJtRu8PDNKy7X3blb+GvWreNXDVTH4zF1smlKWLgoVU8Q3GUUrJKPs7LTqrNbqzbv0vFYGdWrVlSu6iSleTs0vLofq/rf7bH/BIT42anJ8SPjX+yLrVr4nv2MmqiwtgUlmJyzl4LmESsSTl2QM3U15h+2v8A8FLPC/xu+Dlt+y9+zl8HY/BPgG0ukkljJjSW7SNvMSMQxDZCvmZkb5nZ2CnI+YN+bOpXF3DGq211KZPvbdxw1ZN1qF+gbF5KdqblVXPyt/drzMs8S+FsDi6OK/s2tV9i1KlCpjJzhTa2cIunZNLRXvb11POhiMuwtWM3CUuXWKlNtRfkmunQ/TP9h/8A4KPeHvgP8LNU/Zq/aM+GMnjn4eapcb4rFpI5G09WJaVFilG2VGkCSBdybH3OCS1eqt/wUz/YY/Zl8N6q37Cf7L13p/iXWbGSBta1iJIhakjKEs8s8kqK4RjCCiMVHPFfjBquo3IVsXs/8K7mc/erntT1e+l81GnlX5mZN0h+WujHeI3B+b4+pi6uVVEqslKpTjipRpVJK3vTpqCTeivtd6u7bM8Vi8sq1pVJUZe87yiptRk+7VrH1j8UNOk+MVpq8Hj7Vby9m1ydp9TvXn3TzytJ5jSM7A5YvySc5ya8ok/Yk+DskPkG+1sKTnAvY/8A43XgOp6pqjsVGpXGxv70pb+tc/f3eoxK+7Urgtvwi+c3zL+dfT5l4ucK53VjWxuRxqSjHlTdRaRV2kv3e2rNcTneX4uSdbCKTStq+n3H0jJ+wN8D5Dk32vD5cYF/H09P9VUR/wCCfPwJLlzfa/z1H2+LB/8AIVfKt5qd7BIGTVrovv8Al/ft9386z7zUdUZj5eoT7P8Aanbd/OuP/iIfAf8A0T0P/Bi/+VnE8zyR/wDMDH7/AP7U+uD/AME9vgUW3f2n4i4z/wAxGPv/ANsqcP8Agn58Cwwb+0PEBI6f8TCPj/yFXx02p62zB01af+7tadv++utPh1PW5mXfqs4+f+Gdv8ab8QeAv+ieh/4Gv/lZUM0yd7YKP3/8A+y4f2D/AIKQFSmoa9kdzfx8/X91VqD9if4PWxBhvdaGBj/j8j/+N18f2upapt+fVLn5f4vPb/GtvTNSv5Zkht9TuPm+ZlaRvm/WueXiL4fx/wCaep/+DF/8rNlmmU3t9TX3/wDAPrCL9j/4TxcpcatnOcm7T/43Vq3/AGVfhnbSiWO71UkDABukx/6BXzNo93rB/ez6hK3zfIqzN93866LSrq8KrNJdXH93b5priqeJXh4t+G6f/gxf/Kzrp5hlstsKvv8A+AfQ1v8As8eA7crtuNRO0YAa4X/4irkHwV8IW5UpcX3y9jOvJ9fu14Xpup6hLJ5qX0qFX/e7ifmrdtr2984+TeyOW+bazn5a5ZeJPhwnb/Vmn/4MX/ys7I4/AvfDpfP/AIB67F8IfCkKqoluyEGF3Srx/wCO1MPhh4byCZLrg5H70D+QrzDRdRuNizXV84f7mybPzVeivLg2kkUch3SfLiRi1Yy8TPDfm14Yp/8Agxf/ACo6oYzB8vNGivv/AOAegN8LfDTsrmS63L91xMMgenSpF+G3h5ZFlD3GV6Eup/8AZa4EXUjSR/O+V6tuP3f7tT2M8zxn7I7ff+75h+7SfiT4bf8ARL0//Bi/+VG0MVhmrqkvv/4B2zfDTw65O6S5wwwVEigfotTL4B0FAMedlVwrbxlR7cVyOlw+YiXL3Mof76qzFt3/AAGtW0S6eNXlimX+Dcw+9/tV1UvEfw4n8PDNP/wYv/lYfXMPLel+P/ANqLwHoUTBgZyQcjdIOP0p6+CtGBDP5rkZxvYHGfwqlawySSLsw7N95VX/AMeqza25nb7Xbo6Fn+RWFdtHxB8PJ6Lhymv+4i/+VjlicNGP8Jff/wAAnTwfo0b+ZHG4OMZ3Dp+VKfCOjkY2OD/eBGf5Vet9DF5aCLyxtZvvsdu2nXGkBLpI5rgfd2qqr97/AIFXUvEDgBfDw7T/APBi/wDlZzrFYSX/AC5X3/8AAMs+CtEIUBJBtORgj/CkfwPosgIlMzZILZYckDHpVwaWzXRS5QlNv9/azVQ1LSr+3iUoiDe/zLn5WrT/AIiBwFb/AJJ6n/4MX/ysyni8FF/wF9//AACB/hh4YdizLP8AMCCPMGDn8KZcfCvw1chRLPd/L6SgZ+vy81T1KwnRmeAzbF/5aSSL92svU9MnSNo453lMm1Ym3D5l/vVMuP8AgGGv+r1P/wAGL/5Wccsxy2DusMvv/wCAbI+DPg8MH33m4HO4zjP/AKDQfg14Q3tIj3alv7sq8f8AjtcbNBcIxRbqVmba33Cq7qpXVreBiIWbePmlZnLfLXK/Efw/5uX/AFdp/wDgxf8Aysc8wy6muZ4Zff8A8A7a7+AngW8yZGvVJGCyTKCf/HaoN+zH8OXBV7nUyCcnN0v/AMRXH6qskqhBeNGv3U3OfmrY+Cv7K/7Rf7SviePRvgz8M9b1x3fynktY3WNWX+JpG+VVrN+Ivh7y8z4cp/8Agxf/ACsn+2MsjK0qCXz/AOAasn7Lnw3kIzd6qMYwFuk7f8Aph/ZV+GRJb7TqmSck/ak/+Ir7C+DX/Buz8Xbq3g1v9o7456f4ZhcsZtH00m8uFX/eX5Vr6D8Of8EH/wDgn9ocMaa/4p8ea5IyrumbVBAu72Vf4a8yv4ueF2GdpcPU/wDwYv8A5WaU8ywtX4MI3/Xofl5P+y38Nrn/AF11qhOc5+0pn/0CmSfsp/DGTGbnVB/u3KDP/jlfq/cf8EQf+CdMg8v/AIRnxdDuRv36+KnZt396vOfGf/Bv7+yjrDk+EPjX490hPu7JZYp1rmXjP4VPbh6n/wCDF/8AKz0aGYUlLSg0fm/P+yH8K7mQyy3msFj1P2xP/iKYP2PfhQCWN3rBJOSTdp/8br6q+Lf/AAbwfGrRklvvgh+0Zo3iaBf9VZ+IEktblm/u/L8tfH/x0/YH/bc/ZzkuE+KvwZ1qK2t5f+Qlo+bu2aP+9ujr0YeK3hnV+Hh6n/4MX/ys9nDYzDV95cvqjXX9j74UJ9261f8A8Co//jdSw/sl/C6AbY7rVgM5/wCPtP8A4ivBIJ71L86Wt5NFKv34Z2dW/wC+c1p2IvlYb9Tuf725pDWkvErw6cdeG6f/AIMX/wArPZoYJV43jP8AA9xP7L3w2IIE+pjLZBFwnB/74qZP2bPh4gws+o4/6+E/+IryJEktlBW5lLSfc2yP8zVoQT6nGDDPqE25v4d527a5ZeJ3hx/0TNP/AMGL/wCVms8vqxdnL8D04fs3fDsNkSah7j7QvP8A47RL+zh4Clk8xrzUx7C5THt/BXlqGaSTyLy7ddr8t5x3VQvJrpTsS4lx6+Yf71S/E3w3f/NMU/8AwYv/AJUeXiMOqbbauesS/ss/DSZy73Gp5Y5P+kp/8RTJf2VPhlKQxutVUjptuk/+Irxa4vb6RnH2112t8+1jVSW6vZGb7NezIP73mHbTj4neHD/5pmn/AODF/wDKj5nF1sHTV5UE/n/wD3F/2TPhhJy95qxwMf8AH2nT/vik/wCGS/hbgD7TqvAIz9rTv/wCvFI31G4VVS9uA2cblkO1qe1ver88d9M5X+HzDWs/E/w6pxV+Gqf/AIMX/wArPKWOyxy/3Vff/wAA9n/4ZH+Fe8SG41UnGObmPken+rqB/wBjf4RuxJudYGRggXif/EV43u1JoykM83zbmSTzD8tZd42o5WP7bKHb7zLKf8aqn4n+HU3/AMk1TX/cRf8AysbxeVf9Aq+//gHuo/Yx+EIGDdawRnJBvE6/9+6kT9jz4UIjILzWMNjP+lx9v+2dfOEs9/aXYZdUnQ7vutK3+NSf2pdqWlTUrlpG+bb5p/xreXiT4eKOnDdP/wAGL/5WZRzHKnLTCL7/APgH0jH+yR8LovuXmsfX7Yn/AMRU9r+yx8NLRNkdzqh92uUJz6/cr5xsb/VJbjzprm4jVvmT5zXRaHHeTRPFPdyOVbP+tO1qUvEjw7ir/wCrdP8A8GL/AOVl/wBo5Y/+YVff/wAA98sv2fPAdgu23m1D6m4XP/oNaEPwi8KQLtWS7PuZV6+v3eteUaIbidQq3TN+6YL++KqrVvaLcX1orPcK5eMqPMOdsi1nHxG8OZO64ap/+DF/8rG8zy2l/wAwq+//AIB6LbfDzQrU5jluD6hnU5/8drWttOtrTTxpsW7yghXk84Of8a4eyiu4lSZp8+Z9zaK7DTVdNAVZcKwibJJz681+l+GfFvCecZli6eAyaGFlChOUpKfNzRTjeD9yNk7p3122PVyrMMFiak1SpKLUW9+mmmwg8Naech3lYHHBfHT6AVYGm26xrEC21egAAz+QrJWULue2D+rw5/8AHqtFQkQfcERfv5NfnUfELw7Wi4ap/wDgxf8AyszjmmB6Ul9//AJxoNiu4K0gDMWI3DGT+FVbjwVotyhjlEpBOSN/f8qfLa3Lzec0BdF+bcx27awvEdrKJAyySB4fRvl2tTXiF4dXt/q3T/8ABi/+VnZSzLD20hb5lm8+EfhK92+d9p+X7u2UcfpVKX4DeCpozG9zf4Jz/r1/+Jrltbnlt4HNsSyjcrLuK/8AAq898Q6tco7RR3Eqsv3VEx2tXRR8QPD6e3DlNf8AcRf/ACs64ZlTteKPXpf2afh3KBvuNT4IIP2peo/4DUcn7L3w3lJMl1qhDDBBuUI/9ArwaTV76NvJhurj5fveZKfu1Vm1TUmkXZdSfN8zfvD92uuPHXAD24ep/wDga/8AlZlWzanCN3G/zPfH/ZP+GUgO6+1fLHJP2tM/+gVBL+x/8KpQQ17rPzdSLxOf/IdfPV5rF8JHb7fOuf4vNP8AjWdd6vqjReVLqE+xd3Kyn/GtFxzwGo/8k9T/APA1/wDKzya2d4KMbulf5/8AAPpOX9jn4UzIEbUNaGO4vI+R6f6vpTof2PfhTbqFjvdYwO32tP8A43Xypca3eyMTHqVx8v8AC0zfL+tOttc1adkji1ScH+FhM3+NJ8ecA/8ARP0//A1/8rPOeeZZf/d19/8AwD6xt/2TvhfbYMdzqvyrgZu0OB/3xVyH9mr4dwjCz6ic9Sblf/iK+Y9F1q+df315cNIv8X2g/wCNdDY6zdzXCma6kVdm5VaQ1jPxC4BTs+Hof+DF/wDKx/23lslf6svv/wCAfQ0HwE8EWyhYbjUBtGF/0heP/Hamg+Cfg+3IaOe9yO5mX/4mvC4b+/jjEiXbyt95f3x+WtTS9YmbMM1wyurbnXJ3Vzz8Q/D9b8OU/wDwYv8A5WdEc2y/ph19/wDwD23T/hh4e01w8FzdkgY+eVTx/wB81raboNnpWowanZySLLbyiSPLDG4fhXjlnq108CJFM7H/AGWNeqfA7xbpug+PLK/1WTbZxyrvMi53L/FWdTxE8P1C3+rdN/8AcRf/ACsl51l8XdYdff8A8A+lfD3/AAUV+Pfhf4fXPw50bTfDkdpd2pt5bo6bIbjYVKnDebgHB9Kwvgp+2h8VfgBoWp6H8OtF8PQtq+BqF/c6c8lzIoOSm/zBhT3wBmvvD4kftGfs1fFP9kbSU+G0Vkuv6IlvLp6vZLHI0kf3scc18taf+zB8bvht+2d4V/aV+NN3aDTvF9x5ljamQPujWP8Au9F/2a+fxniT4Y0EpLhalLT/AJ+JW/8AKR6WV5zg8x5qc6fI77b3fTseReNP2qviZ48uWudatdKUs+4Lb2jqFPtlzWIPjZ4x3ITDZEIcqhhbb7DG7pX0n8fLPSjq19NBaRrKZd8iLEFIj3bvSq/wk0q08QrNNFpKKscW7aYw3/fVeLPxZ8K1HXhKl/4NX/yo+mp0/aHg9v8AtI+MIbt7648NaDcytAYla4sXYID/ABKPMADe9ef+LjaeNdVOr61pluZD0WNCFAznAySf1r7T8QeKvAvgnVEsLzTLfUXKM0qraqyw1xl74i0DVll26RaM3zPueMbYa9DB+KfhhUhePCtKP/cVf/KjzcbKlRnyyhzHyK3gHwizMx0SLLnJPOemOtZ158IPCV4csbpOuPLmAx9OK7f9pP4xSeHrddG0bTHeOXc3mlwBXxr8Q/iDrN1qRtri8m2yMxz5pK7q9GPif4cyjpwvT/8ABi/+VHnzxOAo6ypJf16H0Bc/s8eCLh941HVY/aK6UD/0Cq4/Zn8AgY/tTWMYxzdp/wDEV8j6n4x1J2kha9nP+15p/wAa5a88ZaxPOiR6hdeWzbX3TN/311ran4i+HFf/AJpen/4MX/yo4qmdZfTdlSX3/wDAPubRf2cfAeh3K3Vvf6rIy9pbpcH8kBrudCsbfw7fLf6cmHUABX5HHevFfhx/wUB8P/sw/CbRfDPg+K0vboWbf2neXdos5mZv9+ty9/4Kk6N4n8FC70qO1h1Bfv8A2e3Cttrkn4k+HUXpwnTf/cRf/Kj0qeLy1K9op+v/AAD6D1L42+OdV0X+wLme3FtkYRIyOn/AsfpX05/wS8/aiubT4lR/APxDooZPENrJBpF7ZqQ0U6RvIfO3PypVWAKjIbHGCSPx/P7WfirxL4qPiPxD4gvrmeWXazXU+EZf4V219u/so/FJ/AHxL8HfFVSD9mMVycNgESQkHn6Oa+jyvFeH/HvDmcOlkUMLUwmHnVjOM+aXMoykrWhG1nHXe60sbYephcdSnToqz/qx+hPin9rrwf4B+IF78IvEOpLb3bRbYmml+6u7b92vP/E3jLSfiRpMvhW8mWaxVvkkZfmkb+Fq8E/4Kha54Y1r4Xr+1F4Pv47TWNNv4/tUa/6yaFvvKteN/s2/tOX/AIkuIEu9TuJ5mdVlWR/lb/dr+Rlzyj7WD92R61HD4eUOWovePqr4E/s9+LU8bPNo+pXFxEt/t+zzfd2/wr838NfUd5rGq+GvDcOlXmiRw3bJuna3Tay/w/LVD9kb+yNb0K31Sa2VZWbczRv+93f3mruPjSumqxSKzb5YmX5W+ZqdaMY0HJfEeVWlKOMVM8K8X302pXn/ABMkaTb/ABNXhX7a3iC80f8AZ1ltoYvJs9Y1GGCBZIt3mSK38O7+7X0N4k0220vRvt80bDd96T7rL/s18b/t2eMLzxJ4g0TwBczSS2Ol27XirDcbo1kkX5fl/vUcNYf6xm8ZS+yXneI9nl7ivtHz5a6RebW/fRhlTd+8StbTYXbZDbP+98r723/2anabazW80XmzfIz7WZU+bbV+G33XSuNvnNL/AH/4a/WZRhL3T4vDx93UntbeFbd32eUy/N++/irSW3ddqP8AeZfn8tvlqGGzcRs83lojP8ke6tOxsy37lHX5tq1PsY/EejGXL7plalpcLbpZtqN/EzfN/wACrI1HTXZfOSaTf/Ev8K//AGNdj/Zb+S0I3RFflaNvmqCbQ5mt1mFm3zJ/EnytWlOmpS0OeVT7J51daKjQO/kwzL8r/M+5mrB17QftDNc71j/56/3K9LvtFhj+eEbxIu1FVVrm7/QfMhZUtsKvzN5la06f2jzcRV908m1vQZomaaHb++2/Nv3ba5bWLV4VdEds/d3N92vWde0N47OW58narPu27drL/wDY1xuqaK8Lf8ey7Wbc8a/3a6adOMjyKlM861bSxHCR8zNGv3mf+Jq9S/ZCs/sdrr8ezH7237YzxJXIavo6bXR337m+SNq9B/Zjtmt4tdLoAWnh4H0ev03wjv8A6+4W/ap/6bmdnDUYxz+lb+9/6Sz5v/aAsUk+K3iQgr82sT7lK/7ZryTxBpPkyH522V7v8b9O874k+IZCmVbVLgHcvfca8p8TaS7Yfzm2/wAKtXxnEMf+FvFf9fJ/+lM+czDm+u1Y/wB6X5s801Sz8ubfsU7qghtWkVnxjbW9q2kvGzb3Vd3zKtUo9Jdm2P8AL8n3q8bl5feOMpw2TyZ+fH+1VuK1eGVJH3fd2otatjo825fITen+1WvY+H0umWaZGT/Z2/dqOX3dC5HP2ukzSSbI9zbf4a14NBmk2pD93+NWrpNN8NqyB4flRU+WtPS/DbztFcp87L975avl5/dI+E52z0Py9qeSw/2q2bfR9pXbw395q3F8PiOHe+1/+Bfdq9H4bm83Yib41/5bK/y7qylzco4mJY2tzCqf6Nv2/NtatjTbFJF3um5m+Zdv8NaFnod55vlodqt9/wAz5v8AgNbmm+F3VVZ7b97/ABL/AA1hKMzppmRb6G9xILxE+RZf4aW60d5GfEOP9n+GuysfD77Um8vCw/M1X/8AhG7U/f8Allk+ZGX+7XLU8ztpy5ZaHmFxpEMg8l7ba33vmqhLo26Rnd1G7/VMv8Nejal4XLSP8mX2feb7tYmpeH0t1SFId7b/APdrKPx+6dMZRl7p7akMLSB5tu5fuR7Pl/4FUFxbv9q2743iZfnZfvVsyWr21r9pSLa6syr538S1W8794k0MOz5NzfJurzJUT7SMTGuIUt5DDN8rSfMu1fvL/tNWdf2Kecr20ioNm7dJ/DXTahbJcLvSzZhIm5v96srULW2aNYfmZ1/h2/w0nS5dUKpLlic7q0c0cbpZzL/e3L826ufvo0C7PO5b70bV0lxZ7l3ui7du1N3y/LXM+II0s2MyfIrN95VqfYyUrHFUqR+0ZepTfdm379332/u1Ta4/5Y+cp3fxU/Up90jSQp+62/Nuqrat5KfvnjxsVUjVfu/7VbxjynnSlzS8jYs2maNfnyq/3V/hrZ09khh3I7Jt2sjN96uZjvNzeS6cK/8AC3zVu6Sv2p/JRN38XmL/ABVjWjyxKoyjI6zTbxGhS6mm27m2NJWlDqFt/qfmdY93y/drnLOR4YUSE4TYzbf7zVaW8ttxvPJbzW++y1zcvvanVzSNj+0N0LIlsyuqfd/vf7VUby4cR75tzr9146zrjUpmZXeZY23bWjZ/++ai86ZSZpp1x8y7d33WojGZUqkuh7D4eYN8F5GQYzpd1gf9/K+c2uEdlTfsl3fd+8rV9DeF23fAt2kO7Ok3e73/ANZXzVqV1+72I67Nu5l/2q/fvFayyLh6/wD0CQ/9JpnTn7k6OH/w/wCQ/ULhGVjN5jOr/wALVnX+sRhWSxdQn97+KorjUIUVnRFV2+/uesa81ZAqv/e+7X5DCjzHy0sRLk5TZt9SkuNrufmX+791qbc30PmL/pkjt95l/hjrnYdYeOZk879yz7U/u1dutQhmhDw7gm37tdlOPKYSr8pozXUdxGN+3O37yv8Aeao47qaJWS2fZtbayt/FWTDqW24fjejf3m+7T11Kb5od+U/vMtb8suU4albmlc2luEaTZ0H8G5qsLfTKPJR9jf3q52S+e14++f4Ny063v/NV/wC8z/8AAqcpfZFKp7v946iPUsMUd1fd9+rNvqTzM/zrsh+6q1zUNwZG8t3Yj+OT7vzVo2d88jb/ADFwybdy1zVJTN4/zHV2OpbpVCXP3k+Tb8taul6pt2qjybl+bcqfLXJrqCWsMUe9f/Zv9mtXS7yZV3vMrbfm+Vfu/wB6uKodtH4jt7fVJmhT9yoT722N9rVpLrW+3857zczOqvIyf7NcVZ6pDcKyedtVf+ejVYbVHMaOkzMzfNtj+WOvHrLm1Pao1OWB2C6ptVLmNP3jK3zM1Mk1TLM73O/7vmqtc8uvPDZ/67BkTZ/lqkXVpolb/pomzatRGnIupUgdCtw8krI7ttm+5SrdWcarvm+dV+dY03Vi2987xqjvIpV9v+1t/vVJb3CXEh+fzd25Xm27dyrUezI9pD4S3NdJdq800rNE391dtYV800sazW0jJ95XVv71bWyGaFX37hGn+r3feWqN3bvHC7vtZG2/u4/vVcY8vuhJcxzupzX7R/Y3O9933f8AZrm9Qt/mmSaZtq7WRW/irsL6F4W87yZMMn3v9quf1zT0kk8t4d3+0q11UakY+6jklR5tZHL6havDJshdn/2v4dtYmpfKzTbPmX5UbZ8tdRq1r5e77u1fvVgatMgGx+iv8+3+KuuNTmmOOH5o8py19bzbmf7rfwKy1n6hbO8bBPmffu+aty+UyD/U/K3/AI7WXf2TyK3zso+9XbGXumNTD8pjzK8ch9KW1t3jbfNHt3fNVjyUbO/5v4qljtQzbHfedvy1pzcpz+xn8RY09X+byUYN/eb7tdDpdqlrcRbE+b7rMv3aztNtUh2edCwbZu/3q6PS4Ukj+eFW3fcZkrz61aB0U6Muc1dPhRdjpu3fd/2d1dDZxzRsm+RQ+z7ypWPYrCIvJy3zfc2/w1sWcfyMrzM/mfNu/u15sn73M4ndGPL8JrWavHH8k33v4mX+KtXT1DK37rfFGnzMv96s+xaa48uFNp2pt+X+KtSxV2/0ZwuPuurVjKXu8x1R980IUfa8Lw7yzbauTrM0cUybc/d27vu0Wq+XiaaH5fKbbHGn3lp+n2u0lEhb7/3mrl974oHSuWOjkWbPzrVUdEXds+fd81WrNXmZJrZNjbf3sci7lqOzhmhQu8KuJN33m/8AQa19Lsd0iujtsXa77f4V/u1pCXvSOyPN7L3S/pdq8W3y4Y9n3ZW+7/3zW5p+moq/vk+78vy/3araLps1uqrNc74t7NLuXduWugtbNJpFeJFcQ/LXp0o+5eJl8MinHYvJMuxP9Z8vzVp6PpH7v5+rfcXft27atLp6RhU3q5X7kcK7latezsvL2vMjb/7u3+L/ANlrrhKNMcpSlIrf2Z5liNm1U2NvVv4akt9JLfuYduzbu3N/erYt9N87fNMOI2+Vl/ib+7tq3JH9ojX7TtRdi/KyV3R5Kkipe7H3jnV0Pdcb/lZvl27vvVV1DR9sgR0ZQrfvWb5lau1hs7b7Q0yWfnPG219r/wAVMutJdpi8f3lX5GZ/l/4FWsfdkY1pLk948x1Lw7H57hE4X+Ff9r/ZrDvtO8jbbTOsI2/uvlr03UNB8qOW8dP9ptqfe3fxbq5bxBodnJL8/Rv9VtrlxFSPwnnxlCUzzvUNNk8yOKGNVbc3y/e3f7VTeHfh7r3izWo9B0S2a4mvH8q1ht4mkaaT+7tWuo0LwFqXjTWLfw9pWiXE13cOsEEdvFueRmav2S/4Jgf8E0fDH7Nnhiz+J3xLsYb7xdcRLJBG8S7NNVl+6v8A00/2q4oyVSrGETnx2NpYWlJs+f8A9gD/AIINaPdaNafEf9ryBnE0Ucll4ahfDNH95fOb+H/dr9B7X4f+Cvg54XTwV8NPB+n6Ho8UW2Kz0m1WJdv+038VeoMqbCBXnvxm12DRrAtM+0fxf7Vc3EK+rYHT5nzeBrVMXjo855n4x1+3tsrEGl2/8865STxBCzKdi7G+b723a1ZXiv4laJBIYUv4dzK37uR9tcRJ4uttVulvBqXlIr7dqv8AK1fkmI9mpn7Fl2DoRpe8z0m78SQiEfZrjLbf9X/do0/xZpzolvczNE7My/N92vL9Q8bY02Qw3MLlpV23CvuVVqjp/izVbP8A0bUvLzHL961b5WX+GuX2nLC6PVjl9CUfiPZriaCKY3KHzpP+WSxvS/20n2WS2uYVlST5fJkTcrf8BavGbHx5r3h23vtS1XWPt8Mcu6KG3i2yQru+7/tVtW/xOmvI5X8hlVov9HkZvvf7taU6s1Exll8Ho3cpfG79h79i79oBoj8YPgjpbX7RNEuraOn2WdVb+80f3mr4u+N//BB3SNHvH1X9mT43yXULK3kaD4si2tu/hVZl/wDQmr6/1r4qPafZg6NdPJ8sse/ay/8AxVTQfEabT1uEnuY5Ujt2ZPJl3MrV6WGz/HUbwvzGdHDTwsuejOSl+B+O/wAbP2Vfj9+zfNFD8Y/hvfaYm/Yl9Cvm2kkitt/1i/LXDXFuG2wwzKV/jbf91a/dSPxponjrSx4M8c6VY6lpVwn+n6feRK8Uy/8AAq+Mv22f+CS3hHVLCb4xfsS6kbcKjPf+A9Vus+d/e+yP/wC02r6TA5thcauSXuz7Ho0c/qxfs8XHT+ZfqfnrdfuZAg8tlV22M33v/wBmse+WG4bdvVdr/PWz4m0/VfDuvXPhvxVpVxpWoWcuy60++t/Llt2/2lrHutn2p0f5D/E38Neh9qxljcRSr+9CXumX8m50toWHz7mbbu20v2G5WbZM6qyp/f8Alar9rH5URTbhl+6396plt0mm3o7bf9patS5ZRsfHYyn7TmRX/s879iW+3zNvzU5bPdCzJ8qr8qLHWhDZzbQ7wqgXds+bczU63ieNj5MPyyfP833t1Epe0PI9nGNjEvrHy43mcNu2bVWsXULV418wR7WX+Jq6y83rMd9tyzf71YOoW94uoSvsjxvXb83/AI9XRHl5tCfZ+6c7cH7ZiZEX+9uZPmZqqLHuk+f76/M6/wB2tK8017iR7nzvm3/xf+y1TbT5oZGm2K5k+5/drpjUjzcsjj9nIls43vLhXd2/dsu1Vau40m3RdtztU+Ym35q47SLV47hHkh2uv3o67jwrb7m+0yfNtT/Vt91q1jL3NCffOi0m1RWG92RV+Vmat/SVeQN9sdfJVtiLN8ysv+zUOhafDN/pM0zLu2t5ez71dDp9i8sf75I/OZ/kj2fw/wCzWkTjqxl8UR9qrRshfzHTYu+Rf7392us0Nd3h5FBPKP8Ae69TWHp9j9nkV5oWVlba3+0tdHZIY9OVGIYhSCVOQTzmv3TwPi/7azCX/ULU/wDSoHu8Nz/f1V/cf5ozUhmW6L7/AJpvvtJ/6DVuNoZo1mgRVf5vu/8AxNPhhWGTzpkm+58u1NzbqfpNjtdrxU+f+JW/hr8ajT92585HFDLiN7m3Xypmf7y7Vb+GsfXLebyXd3Zm+6jKn3a6C+V7aFXl8tP+eUcaVgeJp3s7d32KAvzbf4q3jROyOYe9Y4DxUqNayOlyzLv2/wB3c1eYeKrpJLr50ZVVtrxr/DXpfi5PMhlSFI96/NtX7q15h4q3sxn+6rfM/wAnzM1dVGMInTDMOWNkYd5JuXekzRvu+b+KoGum3K/3V/iaq1xdXKyOm3dt++1V5fOZW+fC/e2q1dUfdicWJzKUpDru42yM6PGRv+9WPqU3mSedvZTs/v8Ay1PfXDyDy0fDN9xttZl8z/Km/d/e2/xVpKJ5dbGc3ulZZvLk+cKy/wAVT6XfbZmk+b/4ms+6ZJGCb1H+ytLZ3CfKm/b8/wB3+9WUo8xx/Wpc2h0+mz7ZCiOx3fwt/DXQafqSRxgvz/erjrW6SOPY/wB37v8Au1uaTcfvPJeeuWUeVs6KOI/mOr0+4hgk3w/8Cratb7z5vOeZmZfvfL8rVyNnefMmblSrfL/s1uWOpOZE+dW3LXFUj9o9CjWlsdj4fvPtEfnO6p833V+9XQWerfZ5A6PIybNqN/drhtPupvkwY1Hn/PIr7WX/AIDW9Y6ojM0cm3zf4JJP/Qax5Ym/tOaNj6W/ZF8Vf2l8QtK0HW/EO+1k1SNGhV9sX3q/Xb9r74e+H9Z8J+HvEtkzEeFrWJrWRW+WNWjxX4a/AnUrmHxxbTWcMnmNcQtF/F8277y/3a/Yz4i+NPFWtfstaD4q8T6fOsGsWq2cDN8v7yNfl/8AQa8PN8LX9leEeaJ6GT16EsbBTlyyufLXxx8VQ/8ACSBHeSWW62/dbdtrqfgvZ2Gn2MmL9T5lvuZVavF/id4sfT5Gmd2edpVWWaR/mXa33a7X4V+Mkh8M/b5rZYUZGRmZtv8AwKvz+o58vKfqdFx9pZnOftAePLDwrqypYXmfOn2Iqxf3v4q4m1+MGlaTp8t5fpsiji2v/emrjfj58WJv7cub+51KF910yxLs2syr/FXzr42+MlyrN9muZPMV2bdv2qu6vWwdGXLFI8zMqkOaTNb9pD4wQ61eSXXnSS7Xbyrf7vkrXyz4m8QzXl083nf8tWZVb+Guk+I3jq/1dX+0zSOzP88m771eYa9rgW48lNufup/tV9Hh6fNLlPgMxxyjLkLPmXOrXC2dr5hdn27q9a+GvwF03UrEf29tt3b5vMk+ZVrkvAdvo+kWcOq6leR+dJ/D/dr0HS/FifYXSG88pN23cv3q9KVSNP3IbnnUY+0lz1TE+JX7H9zqelteeGNYhl2/fhWvErj4K+PPDd43+gSFVba7R7mr6x8N+MLaxhR01Nm3L86t93dWlofiDR7fWIRNp9vMjfM7Mn8K/M1XTx8oR5ZRMsRgYzqc0JHjfwD/AGcfiR8VtcTw94e8MX15eQv88KxfMv8AtfNX2bf/AAw+I9/4Lj+Evg/daeJYraGwi+XJimi2rIMD0CPXef8ABLv49+Fda/a41WbVbW1SG4s1gt2ZFVV2r/dr074NXmk/8N8C/wBQt0ktD4x1N2jLfKV/0gjn8q/Y/CXFUa2U8SrltbCTv6ctQ+k4bjVpUa8m/s6fieB/Hr9jX9oHRf2XX0a78YXGqPI0dxq/mbmdo1+7GtfL3wV8Val4B8UR2E/mW81vOq/MzfKv+7X7+fH7SfDHijwf/ZWj6bCkFwu+VY13bv7u6vxT/wCCj/w3h/Z9/aKtNbs7Nrey1iVtyqvyLIq/3q/nn2dCdL2dM6KebV41Y1JS8j9LP+Cf/wAZH1LS7W1Fzsm2f6xpf9Ztr6f8Va/ZeI/9PuUVHZtyyN91a/LT/gnv8YodWkhtt8aOrbEkWX5q/QbRdal+xw2d75yeXErfvPutXz8uajTlCZ9Th5fWK/tS18WJIbXQYbaZ98HzNLtTd/3ytfml4w8RW3jLx9rGtveb/Mv5EiZm+VY1+VVr74+O3i59P8A6hqt5NthsbCaVVX5Wb5f4a/OHw/fvJZwwzbklm3Ovyf3m3fNX1PCGFipTqng8RYiPtYUjbs2hkhV0RXZk2vIv3Vq3Y2r28wMKLI397f8ALtqG3unbZNv3tu3eWv8A6FWto9vux5033l3fc/8AHa+85InjUakeTWRoWOn/AGrEyOy7XVvu/K1aEGnpIzzpMp3bm3fd/wC+aZp9q8iv+53N8rJGzfL/ALtbFrZzKzFE/wBd8m1U+7/u1py+4aSrc2sfskFnpb/fuSqI3yqv+7/FVmTT3/2m2pt2r/7LWhptil1tmufluG/iZ921au29vJJbNM6ZH8G5fvVrTjDYx9p7vMzjtQ8PwyMUhdYi23ytyfN/u1z2peHZrhZofs27b/tV6Hc6a9xJ8ltuib7+75tq1Tbw3NHMvkosKr8ybauNP4TmqS5o+h49rHh3c7w52uybdrfwtXJa54Z+8iJ5bq/9zbur3LU/CrpcSvsb92+5ZF/irlNU8JpcSPC9hI7796SN/DW1OPvHj1qh4xrHhHMZhRFxJ/EtdD8EtKGlDVI9pBZ4c+nAfpXSah4ddt1sk0e6N23KsTU7wxpLaWZy2QZQjFG6r1r9M8J1/wAZ5hX5VP8A03M6+GakZ8QUv+3v/SWfP3xe8NJdeMdYmhdhNLqMhTC/7RryfxN4T2tvuZmO776qvy19K+P/AA3Nc69eTblbzZ34H8K5/irzPxR4UtmWV3tsiRq+Nz+N84xX/Xyf/pTPnMfL/bqv+KX5s+ePEGjuZm5xt/h2VQt9Jm3K6IrN/tf3a9Z8TeD0VWuURh/s1zDeE7lZPkTf/stXhSic3MjK03R52Vk+X5v4q6bS9BRdruiqdu35av6LoM7L89tt/urXT6P4bdpF3plF+9HWXvS90rmM3R/CP7tXg2oWet2LwPbQyb02uqxfPtX5lrp9F8N20atvhkd/7u77tbdrocLTBHSSIt8zxx/equXlkZHBx+Ddyslmm5Nu7cy/NU9n4Zm270g+X7u1V/ir0q38N/a0byT8kcq7vk2s1aVv4Pe4X5EYf7Ozaq0csJEylM860/wS/wAvyNvXb/q23bq6LTfDKK+zydzr8qrJXoOm/D37ORMlg3zffmb5t3+1trX0rwKjXUk0ztt+780X3m/hrOVM2jUlE4HT/CLlhM9mw+XbtX7tXJPA9z9o2THYrfLub7q16NF4LS6VEHyGN/uq1adp4HsxGLZ7NijJ95q5qlOR1RqRPINS8EOqv9mhWVfK3Ksf8TLXNat8Pf3J89PMk+83+zX0JdeBbYLLM8MYeOL5WZPmWsLWvA8MjP8AOq+Yu35Yvut/tVEqfMbRqfynKQq6xqiSbW3tvaR/lWoJN9psRLbeJmb95G3y/wDAqJrh7ebHzGJlZU3fdX+7WfcahcybtiNlX+f5/u153LKJ+gx7k0cfnTK77c7mZmV6rapZw+T/AMfKxBv73/stTx3bvvezEKPs+X/d/wB6o7i6hffLc2yod6ruVt22q9nyrQupU93lkc9qVm7QvvSMoybfm/h/2q4/xEqNHMj7f7vmR/3a7e+leS1dLPbvbds8z7rLXHeKIY5tyfZsKr/P/D/vbaz9ieTWjy+8cPqTeXJs3sRSxzWzbfX7vzfxUupLFHmZJmRpPmT/AHapfbELIkPyv/z02/drKUeaBy832jbsZE2kof8AvqtjQ2kjkPOxPvLurnNNmkjVIXnXH95vvVq2d0kjCaF2YbG+Vqz9n7vvExknPmOh+3Ha77ONn/AqsrI8kLJ9xfK+833V3Viw6hDNC9tv3fut23+7/wACqWK6hhYW2/5VTb8zbqj2c+W3KXCpyyuWLiR7eP8A0lN+3+L/ANmrO1G+mjs2SF9xVdybvvN/tVYvrrcvk7ONyqjM9ZerSOtw77NqMm3ctVGMuawqlT+U978FzrP+zs06twdFvMH/AL+18q3lzDHEPsyMu35d26vqbwQY/wDhm19jZUaJejP080V8malMk2dkPz/e2/3Vr968VoXyTIP+wWH/AKTA9DP5JUMLf+Rfkindas/2hYXdWP8AeWs3UL7ywV37tvy1DfXXkyO/mY/u1kXmqpuw82HZv4q/I6R8TWrcpZutQSPbsdgn3tu+rEOveXbhEfd/tL91q56bUk3F32qFam298nzP527+8q1tHzOb2k/iOqh1JJJFeF1fd/47U39pbleFvutxu/2a5nS5N0i7H2lf7z1rR3CfKh3H5K1+GRHNzGqbzzmV33bl+/8APSw3E0395d3ytVKFk8zf/A38X8VXLRZ9rJv+9WUpFU/i0NKzkf8AjPzfw7auWt15cfycf7P92s2CZ4ZBDN93+9/DV21bd9x2K/drGUf5jvpx6m5DcLMyb9qKv8S/3qvWd9tmHz/KqtuXd8rVh2+FVZJ5lRGb5a0FvCxKb1Td91f4q5pRgd1Pmj7xt2t062aOj5kbds+T/wAdardnqMaQ7E27v4925lrm/t00e75Nu75vmq3Y6h8qI7L8rbkjrz60Tuo1I8x0C3DzbbK53Oi/d2p/47V5bj/SEmtvMRvvK23dWDHcTLH88zMv8XzVow3Tqqu4bH8ar91azlGXLoaVOU3JLqZSgQMoaX96zL96rdrIk0myFF/h/eR/8tKyLOZJr1EmSR0b5krUtYZo1M0PyIv/AH1urCUY8vxD+KZdWPz4diOqbX+8v96i8h2t9mR28qR12bfm+anGbdboju33P3rSfKtPVrxlGxGQqnyfJuXb/d3VnU54+6dMeWWxkXizLu8mXzPJRlRv4axNQt0khd0uWyqszLXS6hY+Ssps/LEf92P5vmrDurVwoguU/wBr5U20ox+0Wcvqdu6wujwqtxu3bpPu7dtcteWrxsXm5rufEVq6tshTc+35P7tctdWrzSO8MKvu+9t+Va66dT3blyjy7HKaxazblWN2Ufe2/wB2s26h3Mfn3N/erevLH7VMyP5ny/w1VuNPf+//ALq12U6nLoc/LzGB9lQbkT7rfxN96rljY4aMuvP92rbW6blQRqzL83zLV2xs/wDlm77lZN27+7SqVv5S6dGMfiLGm2Matv8AOVpf7ta9nCnmB4UZd397+Gq9nYwx+U/k5f7qsvzVqx2T2u5H6fe/4FXnSlE2lTgWre33SK+zDr8vy/xVqafC8c3nQpu3Ntb5Plqrbrtk3q+VkT+L5q1LGN1b53w/y7Y1p83LDQ5eX3y9p9rdbvkDff8Al+f7tb9nGgtxv3Ef6tNy/NurJ0+H7RtTfIEk++392tiyZI42tkmb5UVfMX5vm/3q4pXlL3Tqp+7ys0dNhmWREm2/IrL83y1pbf3iwpyv8W3+H/ZqnYx/apA86MzzL88jfdb/AHa2bRd8Z+dRti27WTbub+7WfNyxOmO4/wAmGSRZNm5mVmRm/hWtLS4UWWJ4dxXb8jM3y/8AAqo2zIyqkMMiyfxbvu1saLb3N5JFeQphV+VFV/lWrpxh8R0e0l9k6DTWhktv321om+55db+l6b+7R027JH3P/wDE1k6PapNEiTJ8zPu3M/y/8BrptJs/OZETb9752/vV6WHlze6c8qnNEsafpKQwuIYWBZtyNt+7WlbxusgdE3fLu+5T9PhuYY1h8xn2pteppLNI03zbirfLF5f8VdNPnLp1CXT5vm2eWwb+CSNvlqSPeszbHbZs2vti3f8AfVQW9ncxsz723r/Fs+X/AHa09LtLmGEvv80Kvz/Jt211xly+8VKX2h0Nl5y+dv8AvfcVk27f71T29i/l/Oih/vfN/FV61017hdlzNGwVF/1fys1W5rbFvsL+Vu/4FureM5ROSp70TktWhhlkV7kLtjdt8bfN/u1yOqad9qk+xzIzpvVn+WvQdUt4ZLaXf+7Vk/hTdXY/su/CVPFXixvGGt2dvNp8O37Gtw/+sZfvf7y152ZYilh6UqkzjjH2MD6N/wCCX/7IGm+CJE+NPxJtrV9YuNy6Tbybf9Fh2/eZf7zV+ieheI9KsLSO2u7lYxt+8z/LXxLp3xqs/DP+gabND5zQbPLb7qt/D/47XP8Ai79rTUtr6gqSW4X9xFIt1uVmVf4V/hr4mjm2JjjPbQPMx1OOJgoyP0Sutc023sft7XKiLs+75a+Yv2sviheWWl6q+k38OIWy+5vurXzov7fHiSPQbTRL7WFjSafa0kn3dq/8tF/2t1eZ/tIfHh/EngiXxDo9y13Nbz+VqNxNcfNMrf7P8K17GMxk83oq552GpxwdXnOM8ffHC8utUkRNV87zF27l+ZW+b7u7+Fqx7X44X9nb/wDH5Mkkn3FX5lavEvHXxB022b+yra/aSZn37o/u/wDAazNN+IU1hZ/Zv7S8pVfa/wDFXx9XLakp8qPtctziVOPxH1N4d+Oty9vs1W5VE2fuo/u7v9qpbz4i3PiSN7Cw1j7P8isjN95a+XNP+KUN9Iba5hZHjT5ZmlVfmWut0v4lQ+IIYrl9eVJWRvN2t821fu15OIwU6J9TRzql7I+jtB+IFta2z6bf6/5vz/vfl/8AHant/iRZ2ObOzv5PJ83dAzPt2/7NfPA8aX9vavDYXKjzt37xvm+b/ZrO1b4ieJNF0+K5h1iSRd/meTMm35vuttasPqs3H3RfXnUlpI+jdS+KVtqEySJNveFvk/u/8CoX4kaba3CTWfy+c6q21/vbv4mr500/4mQ6q6PDeMjqm64Vfl3VteH9evLr7keXhRl85flX73ys1ZRw/vHVHE+0peZ9IaT460pZP9JuWhmWVWabdu/4DXf+GfiBp6/Z3R2YyM32eRXX7v8Aer5f0/xU8McRmdcM22VYV/i/vVreD/HF5HYyLbP5Kea3kbf+edTOi1PmpnHWrQ5PhPRv2xv2J/gb+3F4fhtprlfDnxCVW/snxdGi+VI38MNz/eVv738Nfk38afg38Uf2efiJefCX42eFZNI1rT7hkRd3yXi/wzQt/wAtI2/vV+qdr8UvtVpbw/bJPI2/e2bW/wB2q37Snwx+HP7ZHwVk+G3xIto/7b02Bn8F+JpF/wBJ0+Zf+WbSfeaNv7rV9nkub1eT6vivlI8lVquGnzUvh/lPyUjhfaqbMn+D56u2qoqNC77lX7yt/D/s1qeNPh74n+GvjC/8DeMLNbe/sbpom/hWRf4ZF/3qoWqhV+f5gv3/AOKvYqPl0HKpGt7yLCtNJCqedt+dWX/apbXfNI01zDsVZdibqW3/AHKu8NywMn+xu21b8iGO3EKJvDfNub+GnGVonHKJm3kkMMZaFG/3W+9WPfW+6KV4fk2/wsv3q32hdWHmCPym+batULrT08kIhbDS/Oq/w1pzcsomfvnMzWIhX+JWb5vLVPlaqf2N5I/O8na3zfLu+7XSXFmJI/kTDL/D/eqrdaW8cf3G2sn+srb2nv3MPZx5TK0uzuZFH2aRleT+L+Ja7LwvYzW8yQ7923/nolY2n6Y+4bJtn8MUiptb/erpdHsZI2VJnbdJt+b/AHa7sPyyPNrylG521jCnyl5vNVUX/V/NXT2rfZ7z/XL8qfIyp/DWT4fhhmhRLaaHaz7kjjT+L+Kt61t/9HVJX+Vm+TcnzV6NOnCR5dat2LVrawybrq5m3Iqbljb71XraJRp4iRNo2EADjFV/nWVU8vDMm1mZPl/4D/tVctolhgEcW7Azt39evev3DwSio5vj7f8AQLU/9Kge1wxO+Mrf9e5fmiG38mb/AFL7Bt2qvzNt/wB2rLbFtWS2i+Tdtbc/zf7O6oWjeSRVSFtq/LuVquNYpHMv77en/PNvlr8g5mfHRqGdqESXAXZcyD91t2/wrWTfRzXUZ3uxTyt27+FttdDdQzX37mEsWj+/5ifw1j3yzeWqI8eyNNr7qs0jWl0PPfFy+XGUuX2M0X3q8r8UWvmW3nWbq0avuRmdvvV6x4wkSaFJkhX5mb7vzLu/vV5n4kjfa6Xnkl/vfu/lVWqoxlylxxB57fLLJJ8j7lb/AMdaq91JtjV3hw6/eZa1tQaG2ZvkXdu+fbWYypMzok+5W/i/u11fD7oqlafIY01w8kbd/mXYzJ93bWdfRvH++SH5meugutH8yRUcNhk27ao3WjuqMiRt8rbd33lWq5uX3Tikc7Mz+Z5OzP8AEtLC25jsTeG/h/2q0brR3WYL8zfLtqKPTdrNDs+9/EtTUj2IiuUfp8m2ZXmfG3+8v3q2rG6SGRJv4Nn3l/h/3qy/LRfkdGZl/hq3b/KrJHu/6Zba56kTppyN6zkfbvfaV27kjrStbrzI/n3ASLtrB02V7XbHNMy/wvurVt9nll3dv4di15eI5vhPUoy9z3TZsdWuYZEcJ91PvL/DXQaXN9ujWaYMwV1bb/7NXKwxzSLvR/mj+5tevUPgh8P7rxVqBd4WI37POZdrVz4en7SdmaVK0cNSlKR0vw71T/hD9c03xPeOyPb3Su7btystfuP+y94p8Pftq/sFX/ws0q/3a94biWXTvMX978q+ZDIv+98y1+O/xi+G+meFPCqabC6veNF80avu2rtrf/4Jtf8ABSXxV+xj8aNO1LXLy4m0yGX7Lf2twzN9qsWb95/wJfvL/u17/wBXpKjyo+UhjKs8Z7W56D+0do+sWsl5Z3NnJDfWtxtlj3/NHJ/Fup3wj/tWbwvc20yed+63xNv+Zdq/NXvP/BULQ/h/rnxc0n4/fCrVrS78LfELRo72C5g/1Zm2/Mvy/wAVeLfD3R0ghm037fsga3byF+6u3b91v71fk2d4L6pinBfCfuuS5j9fwEKq+L7R8V/tKfEy2s/Gl/C7srWtw0XlyJ825fvV89+IPHX26aV0ud7M7Nu316X+3pZ3/hn4jX9qjttml3Juf7zV82x6k/nb3dldf4f71e5luDpyoRkfOZ3mVWFeVI321Ca6mZ33YX/x6uY8U3U8OpLMi/w/I1aWm34mZUm+f/Zql4xXf5fOF+7ur0KEPZ1/ePjsRUc46SK9rr2p3GyFPm2/M9dt4Y1p5nSG5v8Ayv8AelrlfDekw+XvT7/8H+1XfeHdL8K68YrfWLNYivy+cvystd0vZSi00PC06svikd74T1DwlJCs154thVY0X5Wf5m/2a+hfgr8Ifhp8SvBtzrdt4wt5b2OJlt4Y/mbd/tV8leIv2edN8QN9p8E6rIVVdzR/aK1fhL+zr+1FHqyp4GuJi0m7b5dxt3bfm+7Xm1sLKXvUqp9HhYx5eSVJ/wCI+n/2bfgTrej/ABqS80fUoRcWsv8ArGn27v8AZVa988Bz6tp3xzjmjvSl5HqlzunzuO/EgLe/evizwX8J/wBtjUtQS80SHUIrlpWiaaOXazSbvurX1p8KZPiJ4d8WaSNPsBf+JrYiOSCVgPNuAhWTJ+u41+w+EWHqU8k4mlJ3vg5/+kVD6TK8PRhhq0YJpuL39GfYVn+0vNa6f/ZWvPI80Nvt27ttfAf/AAXU8eeG/GXw08Ia14e1JRPZ62Fe3+XzG3feavYf2vta8eeF/hTeeLbm5sbPUoYvPnWO48xo2/u7q/Kjx78RPHfxm16O48Z6xJfeXLuih3MyLX865Rhq8sYq0pe5E+TxUvY/up/Ez6G/4J8/Fi58P+Orezmv1hSSXdLI3zeZ/s/71frl8NfF02oaHbagnnPaTW+7bJ8zV+Mn7LPhXVdP8aWEypnbcK21l+7X61fAW+k/4Re1d3kaOGBV8lv4mrgzWUfrH7r7R9lw/UlChzTMf/goN8RLPwr8DW0HTZv9N1q6js3kkf8A1MLfM21f738NfH2gtCot9jrn/noy/dWvSf27Pie/jv4vR+GNKdW03Q7f/Slb70lwzfLt/wB1a850GB1ZHh8v5X3bpK/SOHcH9Xy+PN9o+RznGfWcwm4nV6eEaMJC+/yfm8zZ95a6DR7Uz4mdGRF5RVf5mrH0PHyB5oy//LXb91v9mum0eGGGRXSGP5n+Zq+kjH3Tzo4rl92RrabHNbtv+zM8jLsRW/iroLKFlUJs/dsny/xbWrN0+18zbc/vN2zcrL91a6Ox0iFY1WF/lb5nZf71aU6f2jf61L7JHDb/AGO8V/JUiRdqSM+3b/wGtCO1ea3MqJhvlR2jdV/8dp0OnpcNI9z9+OX5a1bXTxCqIlnt27dm1KvllzEVMRKMTKk0m8imST5R5bfP/u1X/sXd8k0EkXlvuRli3LJXWRaWjTL/AKNl2X7yp/49VyPQ3+SH7TuSP7m7+KumnT9w46mKnLY4HUvDu6Sa5tofJ8x9m3b/AOPVz+reGZt0kzx4ddy/N/er1680CGaNIXhYuv8AyzZ/masq48Lu0m+5dd+9m+7t21fs2cFatKR4nqHg94V87ydiyN+9kjXbuauc8SaONLulcSu3m54cYxj0/Ovcb7w+8W/f/E/ysyfw15d8YbD7Ff2mVILCTORjOCtfpHhUpLjnCX7VP/Tcz0+E534jpL/F/wCks8w1zw2t7dvOu5CRuKg431xXirwWHjkhjeOL+Lds3V7n/wAIzHd6Zb3UsDfPbj5x9K53WfB73CtCkMcoj3Oqtu/76r4/O4RlnWJX/Tyf/pTPn8fKX1+q/wC9L82fOfiDwTM1rsmdWeOVl/dxfMy/w7q5q48CvaM0/wBjYtsVv7q19Bax4NmMn7mKSbd/07/N8tc/qnw7trqQLNYNGVbft3turxJR5TllL+U8r03wrNHcbFhZ3Xarrt+7XV+GfDdreTLLc7Yk+b5Wrr7HwXGsfzpJv+9t27d1XdP8L20EZ2QMx+bZt/irl5eU1jIzLTw+kcafuVYM3yMq1vab4PhmukuUjVH3bHmki/hrU0nRZrfZ9mDFWRdkbfdWu00fw+7bYUeOWNfmeRfvbqn/ABEylE5O18OusabLb70v3v8A0Guk0Pwak2zfD80bbpY5E+9/wKur0/wyiuiXUMexfmSFf/Qq6PR/CMMiP5KfeRWdmp+zM5S5TkLXwTttB5z7vm3fu/4v9mtO38Jwxsv+h+bt+/JXaadoLtmZEX5fu/w1Ym0naqOnmbI02bV+61EuX4Qj/eOQtvB8MCvss4ZJvN3bvu1pW+jwyM3yKsUafdb5a3bjR5rhd7pnc3/j1OuLNJF2TIpb+FV/u1zy973jeMjmptHtpoXS2dS8ifxJXO6x4fhWx+SHb97zV/vV38lv5M37lF2/dVtn/oVYviKzSeQo6fdTajL83zNUcv2jXmPkbWNcRY2htod235E+b7q/3qZb3FmzJ8kfzfM0n96sNdUfcHmffIq7f3fy1PbzOvzo8iD70Ue3+GvPjH3veP1SXJE3ZpnhUPDe4Rl3bdv/AI7UV1HDuMMyZT7+5f4v9mqXnSNMNibQy7fmb+KmyTXLXSu7L+7/AL1KXuyFUlSUSWSOzyyfMjqv3WSuQ8UW7tHI95MyvJ8ybn+6v92uq1C5TzBC9zlV++y/ermvGF0kkMjo6uioyJuX5qiXunj4qVP3kedeIfmUOhyivt3bKyZLjcySbNp37dy/w1d8QTTeTvR8u331Ws6JoWVt7tv21ly/ZPMqSNKxa1kbzo/3rKnzLW5b3U0NuvkouyT/AL6WsfRtk0iIk25v4/krftbP5ofnZ/8AgFT7vwi5fd5izbrCsWya2bZtVvlqaSN7eIb4WQ/7vzU/R4YbfLumx1fc/wA3ytV64bbH/o0Pz7N25v4a05eWRXvS1MxpPM+eaaMhvuMyfdqlqUcKsiLNvT+Nv4a1Ly4hhRpkh3n+792szUWRkSVE2Mqbty1MYilyr7R7v4Kbd+zc7qSc6HenPr/ra+QNajmaNnR2+VtytX2B4Jbf+zgzJnnRL3H/AJFr5G1yRJLffIm3b975K/dvFLTJMh/7BY/+kwPU4jd8NhP8C/JHF6lNcjKO6kVj3Fx8xfYrCtfXNizP5P8AFWK1vt3vHu3L822vx+PvRPialQgkm8wLs+YNUdv+8k2D7392pfsb/wAH/AttTWdkI2MvzBl+b5lrWPUx5ZSkS2av52+ZOa2rOTdJtmTnZ/eqjb2Mqzecj8SfwtWrb2MLSfJ13f6ys5S5jSNORbtU8zZ2C7ldatQ/KyvLuyq7aZa2v2dVD7Xdm+etKOGFWWZ/MO7cvyr8tZylE6KdGew2G3kkb+//ALK1dhhaFV2W2TJt+VakjtZvs4e02iT5V+797+9V23sX3f6SF+ZNq7XrCVaB6MMORrbxrDv+bLN/ElEcm24Lvu/d/LuZank098/cY7t2756dHDDJbp97HyrurGVS5vGnKUhVH2mNfOfJ/wB6rNuu350+fy/4aS3tUVdkMf8AF8n+zWrY6R9nw/lq25fnrhlU5TvjRkyDTw7Lv+X7v3Wf71adizyTNv8A4k+8v8NMgs0VUdCwRvl/3q0rG33R7Jpv9rav8VZ+2Y5Yf4S7Yrtk3wjcjfJKq/e/3q0VZ4XXykklRfv/ACbW3VDptvtjVERQ3/j1a8NvIYV2TL8zs1Y7fZKVGUh2mqhU+c+d25nVv/Qatxvu27E2jYzfL/DTvsiR26uEy2+rCwolv5MM21lRtn+01c1SXMdNOjOPumVeWcLfI+4q38Uf8NZ+pWMdw2yH7m/bub+KupksZpFXYjFZFVfl+b+Gsy4jubiFZvO+bf8A98stR7TljzHTGicfrFq+Hd0+bdt2qvzbawtT0l4/4F+ZPvSV297ZpIvkuiu2/wCdt9Y+oWe5gjpwv8Vaxre6VKnJe9E4G80d4ZGmuU3hk3bqgm0fnznhVX27ol/vV1Wpaf5bKm9XDf8APP5t1VGsEWF4URQZF3bf4q3lWvuOOH9w5CTS3lja53+UW/iZfu1LZ6TN5hR33/7tb8mjoyrJ5P3fmVWpfsdssPzowlVfnVaca3vBKjH3bFW1t0hUJDym7a+2pYbzduREV9u7azLU726R4SFF+b5nkb+L/ZqvJJDHMrujIrfKv8VRF80uaUTmxXu7Fy286b/RvOVfm3eWq/eatXTV81R59ttbd8zM/wA1Ztq3lsJkfJbc23Z92r9jN5n75ZtjNL8jMn/oVFSUpXSOOKtys6HTVkgUpD8qK237/wAta+nxvbLvttvzSrv8usvSYUmJS5h81pPlRo/l2/8AAa19PjSSZf8Ax7+Hb/vVxSly6ndGP2TXsFFvCj/u2C7tyt97/eWtDTW2lHmdiZPvbqzre13N5MKZ3f8AjtaVsvmMz3Lsrt8v7uspcvNzo6I80tEadva2fmec7btrbtu/5v8Ad/3av6L/AKP/AKNNO2/733NqstZMdvdeZ+5mVkmT5vm/eVraQqW3l/aXaJf+mjbtrVtGQ4y97lOw0u3hbbMm1fL+bb/drrdHheaOJ4XaIsu3/e/2qwPDckMcfl3Pls3y7fk+auq0nTYYbtLxLmRHVNu3buVt1d9H3TnqS5tTQs99vIg370b5Nrf+hVakV5RG8O7asW1YasafDDMywpCzN97d/e/2auw2b+W6Rrt213U4++Tzcu8jLsbOZ4/PTl2+6y1tSW8lvGr+cwdXVmZaktdFeDdbIkmWdf8AdWrTae8Nsz2zqw+83+1XVGPNMmVSPKFrNcwsj71y3zeWybmZakkZAqzb5GRUZv3n3dzVUuIbmxZ5ryGbfsXypFf5V/3lql4o8RWfg/Q31vU900K/JFaw/ekk/hXbSlLl94uMYxXvSKN9ND4g8TWPgy2RW+1Sr9tWN/3kcP8AEy17LD4q0Hw3odnYeGN0Z0+38pYdnyqqttryn4NxwtY3HjbUEmttavpWWW3mXb5duv3VX+7WX488dTW+oTCGZvObd5XlvtX73zV8Tm9aeOr8kdkeVPEc2sT0Hxt8cv7J26t9v8t/ueS3zK3+1Xm+tfFt5NQe2eZndpWZFhf723+KvKfEnjy/1y8ubm83JB96JZH21x3iDx9c2cLecP3sf3FVv4q5sPg5ROOVSXKetap8WrmwjSbWNS81Ld28qbY3mR7v7tYGvfFpNWt3mg1KaNJPmlhZvnavHZvipNdXT2015v8AM271b/0GsDVtehuJvkmZBub95u+Za9ajTlH3eWyOGpKB1WoeLn1bUvtKQyJLI0kXzP8A6v8Au1SbxNqumxqIUV5lXO6T+KuM1bxdCyuiQ/NHt+b+KRqqt4yubrY/nKzfdaP/ANlpyw/wyjua06nKdlF423OZriZWeSVju2fMv+zWxoPxceGNLB/JtkZVR/L+b+L/AGq8wm1ZJI/Jtkx5n3938LULLc2Z/fbW+X5GWuHEUL/EdsMRVhrzH0F4Z+I0M0Ys7O5mfbKzSrN/D/dZf7y11LeItS1JY7DUkjuUX/VNH8u3/wCxr500HXNSWSHvJ/e+8zNXq/g/XtVvL3fczfIvzKrP92vCxlNUavMfSZfWnWja51FnHeWeobEhYbn3Iv8AC3+81d/b2qaa0NtD9omtJkX95M3977yr/wACrN8O2dhqVuk2n2bJtVftDTfxN/ervrPww8mm/aUh8xY9q+XGm7y/9qvP5qUo27nuU8PV+yOXS0t3RLKbymXaqL/E1dXpscOmwpHbTxod3/LZflVdtZVvp5tdQ+021szRsm1ZPvfd/wBmtm38P2euaa/9pSyPtf51VtrLtopx5fcOfGe1ia2jww31nsuUwu9WuGVNq7f9mr9vpWpNfO9nNhPmaKNvvrU3hfR4brVrbR97XT3FrviVV3Mqr95Wr0K18B6bcLbm2muCI0ZXXbtXc396u+GDnU9654NbGSp/4j4n/bw+EPiHxJoo8T21tHcX2kp5vmNF89xH/wA893+z96vkCKJIZGhhDMVav12+JnwbTxJpFxol+i3iSRbdqxfvIY6/LH46fCPVfgP8dNR+HWpQyRWd5K15oi/3o2+Zl3V7GBxU6l6M1sRRxHs5afaMmGZFUPM/yr97bV1fs0cgR93975azbNkNyiIn97za0IZHlUfuVlb7vlsu1q7Obm909OMftDprPzmEP7tDHt+Vfvbf71Rf2fcw7d9mx+9tZfut/tVpL9j87yY03Lt+9tqOGPEx852+aJv4N22lGOpqYt1pO6aZN/zfxstQSaakil3mYsybYlb7rba17iOZZo3ebc3/AC13fLuprIn2cedCyuv8TJ93/gNdTjzanHzRjzGJZ2LzTYmhZFX+Gt3RrG5hvPOublWSP5ait1+0QOkM2/zG+eRmrV0m3uY5khhgkcL8v3PlavVwsZHjYqUY7nZeG1hWGNIdqM0XzeX95q6CxVGj+07G+Vlbc396uW0uORYUmR2V1l+9s210Me+aBkaZV/v/AD7d3+1XdGieLVle5pWq225538tZd2/bJ/FuqxHCsECxEkAIM4OSOKpQttkijmh3N5X3o/7v95qvp5YYFHyuchge3rX7d4JyvnWPX/ULU/8ASoHu8Ju+Lrf9e5fmiWOSGFv3cPmfN95X27W21ZdXMbTfaI0fyvut8zbv96q1vC8Kyu8zfvF3bl209FS3jLukkg2fMrfeWvx2NT3uU+R5BGuXbT0tZpvlX5t2/wDirn9WvJriF9kKtufanz/+hVp6hvaNXh+7s+aNfvNWNqkkMymF0w7fNuV605mTy++cP4uXZl5nZdv/AHytea+KI0ZZH353bVr0vXkebzUdMqyfvdzfNXCavpv2mQ21zDs8v5dv8X+zV85pGPKefXGlvcSOkKMzs+2n2eivHGd9ts28fKn8VdZHoLzXDpbfK0fy7m+XdVuLwwi2/l2yMz/xtW0ZQH7PmOQk0FG+583mfxVRk0RNmxE3/P8A6vZ83+9XosPhU3kaeZujZU+6qU5vC6KrDyNyt8q7lq+b7MROjOR5Y3h0LcSh4cTKnz+Z/dqhdaLNGo2PtbZ/dr1ibwTMsnkyp8jf3k2/8CrO1Lwn57ND9jZVj+X7n3qFKcTKVLl+JnmMGlvIpjc5f+Nmp8Gm7W37M7V2qq12eoeC0hzNbJlf/Hqzl09LdVR4Wdlf5NtTUpy5JWLp+6YwtfLX7jMZE+VZPurVmPeW8tOWj/i/hauq0P4X+IfE1wf7Htmd1TcsarXMXGk3mk3Ettfow2y7WXY25fm+auCVGcjeOKhT6nU/Dnw3c+JNaTSk6zOuyNf4q+pPhb4Zt/AVwlzebfssa/6VJv3KrL81eG/AfxF8N9H8caakOqxm5mlVU8xNu1v4t1e3ftZeLLDwv8N7/R/D2pbb68t2g8tZf9XuX/WV04fD+z96XxHmY3GVakuX7J4l8Xv2pFvviJfHStYjmhjuGT5m+9XCeI/G3/CZM2t200cVzG25I1+ZdqrXzzr1rdeH9Sf7TqvmNJu37X3V1PgvUr+O3+02E2/au371dP2ji5ZH1r8Cf24fFvh34er8AfHN5HPoUN19q0O4un3Np8jfejXd/DXvfgH4sJeXFo7uzxsisy/w/wC9X5oeINceRn3p8y17B+zv+0LHMraPqV5JHcwr87SS/K22vkOJsr+sRVWB+gcIZzDCz+rT+0d3/wAFJNDs9U8cHX7BJClxFu+58qttr46utJvIZC7/APfVfVvx4+JWm+PtJgvL+5meS3+Vmb5ty/wrXjLafpWpbkS5jG7721a87KKkqeGUJI789w8a2Jcos86jjmhbf/49Ud8v9oTrbRorbfvbv4q63WfCP2GRXhT727+Cqmn+FftTb3/75r1qctj5lU5RdpRGaTZ+XboiQqp/g/2aZezzW7P5M2GWunh0V7e3VHTdIvy7mqva+DY7++VJNyn+9975qrmhz8x2+zvDlicnY+MPEmjzYttVuE/6Zxv96vUPhT+1V8SPBeoQ3lhK22P5Uk3bW2/xVT0X4K2Gvaglm9ysKr97zH+9/wACr66/Zl/4Js/Cvxd9gufE2tyOlwu91jTcse7+KuLFVMM/dmehgaOb05Xpv3SP9m39tq2v/E1tYeJNKmBaVmi2y/xf7Ne4fC3WI7n41Wmuxnylmv55l77QyuQP1xXpvh3/AIJV+Bvh/pMV/wCGNQW4SFfNWaa1Vm+avNvh74eZPjrD4aT5TDqtxCMDpsEg/pX614QKP9h8T8r0+pT/APSKh9vldTFVaUvbb2MT/gpl4jh0H9nbX9Re0Yrcr5cG77rN/s1+cfwN8Dtq1y07pv3bXZq/Yn9rj9mf/heHwXvfCX2b52i3W+5/vN975a+Avhn+zT4t+H91qFh4hs5oXt/lTd8zN81fzvg8bSw+AnTUvePl82w1SOOjKfwnT/AXwGYdct4fJjzG6s8nzbV/4FX1t46+Nln8IPha+qo8f2zylitVhb7szLtVtv8Adrx/4T+Gr3w/p/8Ab2sQ+THH+8lmb+FV/wDQq84+I3xGvvid4qmv7mRls4W8qwhjfb5i/wB5lroynAf2jX55fZMquYSw+F5IblGHUNSvtUudS1u/a7ubxmlnmb7zTN95q6PQbo/aFR7zc+3ci7Pu1z+k6fDDJvR5MK+75vm210uixw7ldPkWNfu7PmZq/UKPJCHLE+Xmm3zHY6XDDIpd4VCN8z/L8zN/s11+j2tz5yTbN0apufcv3v8AZrlNHU3Cqk1yzhfuRsn3f9qu20eK5VsTXKkKm5Pk+XdXfT96BjzS+0dT4Zt0Zt7usUTfMiyN8tdNpdnbX0KYRdituSRmrA8MqjPsSbLx/O2371dnpMc1xG29I03bdm3726t/hD20uhJDo7wxiF3jy3+1/DWnZ6clv89s8MkU0X3VX7tT2lm8zR3PkrEi/NtZPm3VdhhWFvOm8sKvzfLW5UqnYq2FmkbJ+5b5fvt/erYt9JtrjZMltCzfwrv+b/ep1nY/6R86fKzruVa1bPTXkZUd1AX7jfxbaIyOeUio2k20iuiQrGV/1W75qy7zw6jM7zc/w128empfWuIYdyq+1WqGbRYG8y5e2V/3u1JK6KfunBKoeX6x4bh2nzE81FX5V/irw/8AaR0/7Bf6SOf3kMrfP97qnWvqjWtBTa/7tf8AZVflr5z/AGxbOSz1HQRKSSYLjr7GOv0nwsSfG2FflU/9NyPa4Nk3xJRv2l/6Syp4f0Z7jwtp0iMyq9lDuCr975RWZqnhm8aZvN3INrOkka7VX/eWvR/BmjS3Pw20R44iC+nw7WK8fcFLqHhm5aU7I1dfu/7W6vj87Vs5xNv+fk//AEpni5hP/b6v+KX5s8b1bwukO7YjY2fJ/tVk3fgpGuE2JG/lpv8AMj/i/wD2a9i1HwqjL5IfY7fN92sa68LzQ7njRWCvt+X5q8OtGPKckZHmH/CJmNjvRWVvmWSo/wDhHoVt2uYYV2N8qN/dr0m50O2jWXZDsH3n/wBpqz28NyW+H8mMLI3+8qtXHKPMbxjy/CctpOhPHC7okZ8yL7sjbfmrstD8P7Fi2QqRJ/d/vU3S9DT7ZvmhZx8reX/tV12i6ci3CNNNs2/8s/4azkOT5iLSfD3lqrvbK4X/AJZqv3q6G08Mu3kuifN8rMu//wAdq7otrDJG6I+4fM/lr/yzaug0nSXupkfCpF8rNG3+1REzlzSMmHw26qERGJb+6ny0smhfZ48ojF/mXayf+PV19npm2NPJ5WP7jf3aJrNG3/aZPlbo395qmp/MEZfzHGyad5cex4ZG3Lt+b5m3f/E1n3Vr9lby3kUqybfL2/xV1uoafDHbj528xm2pH/C3/Aqy7rSoWvHh8lXPzL5f+1/s1hL3ionI3Fnsj2TbYtzbt2z7v+1WTqFukaskNgzuq7vMrurzw/MyxPCiqv3XXf8AMtZF5paR3iwzJvjb5nZW+6tTKoan5uQzPMu+H5X3/ulq1as8f7533bf+WLM25qwbG8vLVndP3X8UW371XrGd75ldyodfl3Vyy/mP1CWIjKldmurX7BPLTa/8KyN8rVJ9tigRtm4v/Eu/dUFmr/aPtME25mXZub5ttSSQJbun7ltzfKkmzbWceQ8+tiOaPuhcfvlM3ylV++2+uS8VXzq0lzDJt2/KitW9qV19lkOzy5EXlpF/vf7Vcf4gm85me56MrbmX+Gs5SPN5pc3McnqRupvuIu1v71Jptm/2pYLlG+/t/wBqpVt5pNk2/cPl+b+7W7o+mw7t72yuW/hb+H/gVc0pco6cfae8SabpdtDIltbQsVX5vm/iroLHT5FkXenDfMm7+9RpOlzRys/zY2fPt+9W9Db20i/ZpE2rIv3v4lpQ5feHyGa0fkqyfZt27+JUqX7P9ojVHtmfb8vyvt/76qb51kWL5dvm/eb73+ytQ3C3NxHM7wq3lt/q9u6r9n7uge0k5amZeW8KsOwb5mWsbUP3iy7NzN/AtbV15jRnzkkt1jT5I2i+7VC8kgjtGuUTft+X5WqqXNH3TOUYy8j27wVgfs1vg5xod70/7a18iaxE7Qsjuyov8VfXXgtz/wAM0ySSNk/2FfEn/v7XyJeM8xfZyNu7a33a/dPFBWybIf8AsFj/AOkwPW4jlGOFwl/5F+SOM1aF2b/XMG+9838VUfsczMkmznZ8+2trULcfaG3vgf3W+7Vfb919+Nv8Vfj0tj4upGUinDa7T88P3quLB5n7nr/fp0dn50y8bm3fPtrRsUTaZk+6r7W+So5kdVGiV4bXavyJurVsbG52xO6KgVtyMv8Ae/2qmgtoWcIm3ZH99lrSjsXZU2TNj5d9c9SpynVHBy7Edtpbr84RSd+5mq5b6fatI33sbPl3Vp2dm8sw8mP/AHG3/eqwumgR73fY27+KuaVTmjrI6qeFlzRK9lb/ALxSn/LNP87ql+ywvNG77nP3n8t/lX/ZqRbOZpNmxU2/xL/FVy3sHkbenlqv97+9XHKpKPwnpqhHm+EhjsYWO/8Aebo/vrJU0Nqkcmzydrt95Wq/BCYVh2bnP3WbZu3VZjt4WWHzn+Wb+JvvNtauOVaf2jtp4VfFIpw2MKws6Q/8B/8AiquQWNyzfvk+ZV3bV/u1fjtdsmx/4fmRauWOnzTEu9ssK7fvLUyre6dEcP7xRt7dEyny4j+/u/hq5Dp/lTJ5M3muvzOqrVhrGGObyZvmfYzJu+78taGmKnkyzOipIrbIt33qz9tLluFTD82hLp9jbLIqI+5mi+dtvzLV/TdNdpGhmRdrfLtb+6v/ALNRa2Lxqmx42+RV/wBpm/vVrWcbyqsKQxqy7vNZvvVnUrcsviD6tIS1s442WHyMhn+TdU66fNDI81zCyLJuaJamt7Xy1HnI3/Af4asMzrGn3nZV2rJWEqnve6bex5YxKE0ZVo5rZG3bdysr/eWqEweFghhVPvbtyVsXTQCBYYH2CP8Aur/31VbVLN47dZn3bfvLTjLmL5Z8hy19DNHM6Q+XvX5vlX+Gsa8he4kL3MPnNH8y7k+Vv92uu1bTYbpXmSTduX5GX5a5/ULNrht77R/dXdWnukcsonNXFukjH7NCqNJ/47UFxGjbEmRsxuyfd+9W3dWsMMs32naNsqtuj/u0xrO88v54967922tvae4ZS7GHHZzNI/3Y0X+H+98tMWN2jdI/mZU2u1bMdrC0L3PnK33t0a/dWq8cSKrbIVUN95mojH7RnKp9kxLqPzV+Taj/AHdtVGhvJFZHTYy/Kv8AtVs3lq8B+5/tfcqs1uklykw+ZmTbuVPu10xlb7J5lSXNLlZBa28zSeZcopP8TfdVlra0ezea437FRVRm+/8Aw1Bo8KSN5ezG3+HZu21sadYu0w3tuVUb7v8AF/tVlUlyy0RFOPM9ZFrSY5Jp97w4P3d1dHY2cLbnmeNkkX5W+b5mqjbx20eN6M8i/Oqr81bWmx7meHfGF3bkXZXHzc0eaR6FKNqliSPT38tEtvk3bd+75dq/xVfhs5o42hhdlRfu7vvM1WoY4fL/AHPzOyfNGybvMq7Hp800nmeTlflX94n3ax9pKR2RjDm0KlrazSzLcvw+z7rfw1v6TYpcKHdM/Nu3bKit9PSaNbZ/Lfd/31W3pWkvcKh85l3S/wCrb+GtY+9ykShKL3NrSbeGRvOTpD8ny/3m/vV2+j2u6FA+4lfl+auY0uwS3/fbGab5vlX/ANCWur8OQ+XGkLvIUbbvb+L5q9bCx5oHnVpSpyNqztb+Fm8mHafveYyfdX+7Wtp+m+ZKghSQ7ovvL92Sp9Lt9tqybNyt8v7z+KtzQ9NuLe2S2udq+ZtZ2+6u3/Zr06NOWxwyrGfZaXeeWXfbt/5aqv3auR6bFDjiSZ/mVv4du6t2z0tG3wrwiuv3v7tXofDaTTulym7dt8qNf/Qq6+X3iIVpS0OPutLSa2aG5dnWT5W/2f8AZrjtT8Mv4m8Yf2NZ6rCkdi6u9vN/E38P+7XrfirQ303w7fTb4UZV2xSSf89G+Va5ux+Htt4Ft7bVdYT7Pef6q6WR1bzm3fe8z722vEzrERw9C38xVfETl+7MTx5dQWuhszpa2lzvVEaH7zN/F/u/3q8C1q6vI9auLm/vPMfb8jM/yxr/AHa7j4uWd/Z6lqdhO8bzTS7d3m7v+Bbq83urW5vlj0ewtmeRom3eZ91W3V8pQUpbHFL3vdOe8bWN5NZpPawx7FiZ/wDZ3f3q838YTGOFpobndc7F3ei16v4uW80XS4dEvLnesbblWOL5m+X7u6uAvvC9zrgTPnR+Y+3ayfK3+9XVRqRjPyCpGUocpwEOmpJdrvRWeF/NlkZ/utVDUr+S4vlmtk4+47fxN/tV6G3w9uby1EKuquz7V3Nt+b/a/wBmsnUdD0rSY0s9Vv7X7W27zZI/+Wf+zXrxlQlI4KlOUYHAeIrKG1kifzpJkZN26Ntu2s1Y3tYpX+0tvVt21mrqtet7G1kR9/mBvmdv4dv+zXLapqENxL+4SN/7jbvmWolKEjL3ojvO1Web5H3Rybdnz/NXQaPYa9JGdh80bvl3L/DXP6XqFhK2x5sS7921vl212ek606sjpCqQt/t1wYyo6cYnXh/3kviNLQ7waVMX/drc7du2RvlWu/8AA+uTLIHudr3DbV2qn/j1clayaVqGIYZrfd99Gk/vVoaXq02m33k3OpQskjf8s/4a+eryjWlK59nldqUo8x7v4Z8TTW0Ze5+5s/er/Dt/vV7X8JfG2la5o7Wf7mUbd3mN/er5NsfEDsv7nVW2sjL8392uy+G/ji88M3CWVtfraJ5q/Nv+Vt1eNiOWn7p9vQq0oy5e59VR6DZyKtzFCspj3bfLfatVZNH0/RdWtr+GFmhkb/R/MlZmkb+Jv9pawvAPjL7ZpOy/vI0MMrLtX5d3+1Q3ipJr5YrmbeluzeV8zf8AjtTRrcs/eNa2FhWPq/wL4N8Pa1qFt4h0ezWK8ktVR5I28uONf4vlr0S3+HsMccsKWEm1n2o25f8Avqvm39nf4mWGpeKrOzubyZotvyRzfdjb/wBmr7t8HaX4f1jQEfR4hM6xL/pTNtVv+A17uBrVK0LwkfDZ7lcacuZnkV98NbbS7lLm2voXlb5Z1+823b91q+O/+Cs37Fd58RPAdx4z8AabHZ6jocX26CNkZpJFjXcyq391q+/fH3h3R/D6y3V1CsJV9zeX8qyfLXN6lqWia9pAvNes1vbab/Q/LZty/vFZdzVvLEezr899jx6WEnKN/sn89lrqE01nDc3On/Z3miXf833WrVt5HWPfDDtljVVfzPvfN/FXpv7aHwTm+C37RGr+GDprNpdx+/sLxU+ST5m3ba87t5Ps5/cJhf4d1erTqe2ipLqevT5+QmzdSMEd9w2bd23+GlkkuYWlmmtv3G3bFMr7dzU6OV2ZbZ/MV2Xcu37rUy437Ve5h+Rv/HV/2q3px98uVSJBcQpJvtpk3nbu3M/8VN2eTIHeZlXd8256hkupvtDuj/7K+Z/tUBoZrh7Z3Vnj+b/Zrrpxl8Jw1JQ+IkhW58tHhRfN+ZXXZ8tdBose7Y+9lk3f3vlX5axLVfOm2Qo27b/u10mkxosnk214odfllZl+9Xq4ePKeZiOV8rubOjyJHdfZp929UX5m+6y1tx2MEMOYYVkb7+1vmrM0/ZIqpv2bfvq33Wrdsyb6NIZkUeX/ABK+1mWuqMvtHmVI/EmETTeXE80zDc6r5i/dX/gNXo42B8qfaSGKnb0IzUK2L28m+GbDN8ryfw7aktUnVFj2nzAcAZ754r9q8E4xjnWPt/0C1P8A0qB7vC0eXFVl/wBO5fmjQtVhVpNkMYC/Kit823+9SMySTNcojN5fy7W+6y/3qVdn2hJvs23/AHk+7UTXfmKqD5n3/NGvy7f7vzV+Iy5ovQ+a5fdKd5sjs2dNrS/Ns/2f7tc1qm9nYb5MNt3+WldCsP2hmCIu7+L+81Z2oWcNqyuJtsf3XX/araPuwH7Pm+ycXq2kpFHcOiMzr/y02fM1czq1mImZ4drs3+t/vL/vV6Bq9r9lkESPv/2v96sn+xPMuGdIFUN9/an3mpxl73KafV+bY5XR/Cs1wu+bdlfmRV/5aV0mi+C0uI0+zW0kok/1u75dv+1XX+H/AA35ipC6SebC67VWL/0Ku20jwe7pFNsjI3fxL826u2jH3jT6vy6Hm9r4DhazTfZ8Kv8AF95m3UN8P3Yr/oeUjfcklexx+DUuJvntmE0cvzeWn3v92luvA9tbx74Y5JPMl+638NdHu05kypT+0eK3XhD5Wgez3Rt95l+9XP6h4TS181I7ZmXZt3f3v92vfZvA8NtZ+XNCqybm/wBrbWJr3glLXfcPDCU2f6z+7/tLS5oyOeVPljeR8/Xng/arQzW3+kfe/dt/47TPDvw1m1TVksktmmaRljiWNNzbm/hr0rWtNhvl2aDZ+cyttuLj+GNf7zVc0fxp4S+Gtrv8PW32/WfK2pfRptjt2+78taqMYx1PJxWIjT+EvQ6HoP7Pfgu+trxIz4gvlVZ4V/5d4f7rf7VfL3xO8UW11qVzc2yQ/N/Cq/xV6T8QrrxV4u1Bry/uZNm/c80jtukZvvVwGv6LolvCEmvFLf7SUvi1Z5/PKUjyXUpb+a4W8015Eljfcm1f4q1td+L3jDX7NdE168uLiVYvkkZ/4as+KNctoXYWEKsqy7VZU/8AHq4vVvEDxSbH25/2ajl9/mNI8xwXjaO/kvC81zkq38VXfhr4qubFntpvuM2395UPjRvtUjFNpXZuTbXNWtxcwtvR8Or/AHt9XH+6OXmel641s0bujq4ZdztWBY6lc6Lqi6lZvt+T96qt96oND8SPdW/2aZ/mX+Km3kKMrTfKV+7RKMKnuyHGpOlPmiem6x4g1WTw7DqTpus5tv77/arndN8Y3drdN/q9jfd+SvTP2E/Fnw38Ra5N+z98YLOGPRPFG21t9Wm/1mn3TN+7kX/Zrn/2zv2SfiR+xj8XJ/Afjl2u9OuH8/RtWhX91eQt91lavMrZVS5ZTpRPWo51V5488il/wkyX1q8KPGzN99tn/oNWPDeqJDdLZ71YbfkZl3V5xDq1zGqok2f4vmrV03WpGm85/kZf4d1eJKj7Pmcj06eKUpczPSdYntpLUI6Kq/xt/wCg0zRdRSGT7T8qbfl+WuIm8WblW1f5f/HqSHxE8MezG1t25Pm+9WVOhOULHcsbScj0/TdesLi8SaG88qZZfm/utX2r+yP8ZP7PtdP0F5lk23EbeYv3W3fw1+c3hvxJPcX3+kzL+8bdu219Q/s0+JLizuLP7HMvyyqvzPt+X+9Xl5hTnGOp9Dk+MpVpcvMftJ8NfiBpXijQX0jUpoXCxK1u2/a23+7Xxf8ADo2i/tiuZwghHifUchzgYzNitD4Q/FS5sZkea8aaBty7Y5drMv8AerkvhvqS3X7QkOrM+8S6vdSlnP3gwkOT+dfrngvVcuH+Kb9MFP8A9IqH1+HhShdxZ9V614ss5dS5eOKFX/dNv/8AHq8H+K3h+w8QeOBc2aKltJuWWRdu5l/3q7L4saf4k1LTQdBGx5vlVl/8e2159qVvqvhPwvfa/wCM7zyYrW1bbGvzM0n8LV/LVCM6uIvH7R89mFSNSrblPEP2ofilYTTxfD3w3N8lqm66kWdd3/XP5a800GG18tprxGD71+X/AHqzLrUpfEGtzarNt824Zt25f4d1bOlw21xM73k3k/dVK/ZMow9LCYeK+0fJYyd6tzY0ux+zTbJtvyt/ndXR6bBtk2Rwb/7qr/E1YunWrmPZ9s3J/GrN96uh0tUtWhRCsq7fkkV/utX0FPkkeNKR1fheOaPy98DK/lbXaaVf/Ha7LSWmhXZCWR1ZdzN8ystcZpt5bLiREkdf41bau3/drorHWLZY2RN25Zf7ny13U/g905ZShI7nw7J5l0jzQ53bvNbf8qtt+Wup0GSe4sY7l/LZ5P4m/wBmuC03WEVvJeZVDbdi/wATNXVafqkLKvkP/q/mf+61bRlzC96J3WmzvGuyHywv3dyv95a0LGRJ5D5CL5qv+9WT5q5XT9UT5N7/ADfe+atSG+S3ka8hkXYqLvbd81a/CZ80v5jp7OQxwqkN4u6SX5m+9W3p/kzRuiJICzt5Sr/EtcpY6hMzjyXhwv8Arfk+Za6PR9UhVkdJPlZ6uJjUl9k6/SfJnt0y+11T541/iWrlxGnlh4YVXa+5FX5qzNPuraKb9zMu5vm+58yrWh9oh3K+/G5d23bW8fh5jjl7plalpv7p5vlLsn3m+7XzD+3hbSWeteHLaaSNpBa3Bcx+5jNfUF1fbbiWG2mVwvytHIn3Wr5k/b4Yyax4akZFDG3ut2zp1i6V+j+FKtxthvSf/puR9Fwc4PiWjb+9/wCkSPQPhVp3274UeHxlAF0eDJPUZQVe1Hw+iw/wptTbuVPmb/aq58FoFl+EXhuRQpC6Hb/J/ebYK1brTbw/8e1qz+Y3zRs/3Vr5PPP+Rtif+vk//SmfPY+X/ClW/wAUvzZ5/faPDNJKjWzfudu7dF8rLWdqHh9LdiiWcfzffZn+Zf8AZ213MypH5ibM+X/C38VUNUsEmG+5h+dvm3M9eDW2JpyPPtS8P2ccbw2ybXX+Ksu+s/Mj8nzpPl2r5bLXX61bxvveF9rr/d/iWsK88mF22TtIFX5tvy1wyNfi2KGn6Z+7RIdqv/HI33t1dLpGn+WqO6bf9nd/6FWMtxNJGjwIy+X8v91mrpPD7Q3EexPOTd/eT5Was/fA1tH010jRPJVPMl2o3/PStiPZGFT5k/e7W3fLTrGNJLdZvJ37dqp8+1qWa38yTfN5JTb8+5vutVfY1MZS/lLcV6n2na+3Yv3o4/l3UT3CNcNDs2rv/hfctZUWpJHI0KfNM3zIrfepW1JLfd+8UH+7WVSXKSS6hHcxyNND5eGTdtkf7v8As1mX29mL2yKryMvlNu+7/ean32qQyTbEm+VotqNIi/NVaGbzmi+Rf7yK38NRL3tjaMixcWs0zeSkm/8AvySfxVWbw+kkbvO+Ts+793dW1ptvNJGj3iKob5nZXqe60vzFd403Bvu7n+7WMom0D8drW+S4ZH8/59u5Fatax8mHDpyzfNurnbJXjkjE0Ma7fl/3f9qt7T3dl3o+N3zJ/vV5vtD7ipiJS5jUhuHsbpN+1lkf/d+b+7Us0011H50d/Hv+b93I1VWvJlZvJ4lj2l/97+9Ve8uvLX7T8pdn3I393+9VVKhzxpuRnatePskCTM7/AGj5lb+H/ZrntQmdg/zso/3PlWtrUmmmn3xztmbd/H8tZjQ/K6Q/MWTd+8+6zVh7bmj7xcaJQs7FJPmRP9lG/vV0Ghw+XdIk21WX+Gqlnbw2bMk23d97/darenyWzKXfcPm3Iy/w1jzIqPLHludDZW/kRmZH+Vk/esv3ttT27QsVudjK8f8ADt21W0ybzIzM/wAu5du5qka6SPCPzuXbub+9/s10RiTUlCIqzTRtve5XZI+1Vki+ZaikaaRNifKzLtdm/u02adIV87Yvy7WZd/8A47Ucl0jqEmhVkm+ZF/i21pH3vdOaUipqGy4j/ffP8m1Jo3rC1aSKzhaG2hX7nz/7LVp310iyNCkPyKvy7X+9XN6ndbn2PtZm+bbt+9WtOMI7EVJH0L4Dcn9mNnI/5gV9x/39r5EvLjhXmdkO75P71fW/w+AX9lxtpJ/4kV/jPbmbivjnVNSdpNj7WOz+H+Kv2zxRV8lyH/sFj/6TA9biRp4XBp/8+1+SM3Uo08x7jfn5/mqsq7ZFdE3JRMzySNC+7C/9805WhXYnk/8AAq/GD5unH7Mi7DZbtjoiov3mX+9V2wt4X+T94u5Nvy/w1Ss50WZe277q1raX50LeZsYrv/4FWVSR34enYv2cKWsfko652/w1r2Nu7bHmiVdz7dqr/wCPVTswn+p6H/dre021mST9yjH5d25q86tU5feZ7NKhzbGjaWO0LsRv+ArVmbTYfLV4U+Rm2/3tv+1T9Ftdmdk33X+dW+9Wu9jI0QSbazb9rs38Vccqx6csLDksYElj5cy+S7bmXbTrO1MczIn3vvPGyfe/2q3bizSOZX+/5fypueq0luisu+H73zfKtctSp7xEKMea5Ts12yJNCjbmXbu/hrStbHdMzw2zN86tuamR6bsXZNc7Yt22Jl/iq9HJDHvhhTajbfKVm+7WMqnN7p6NGjHdkUlq8VwvnO3D/wAP8VX7VZlX9zMrBfvr/dqBbWGT5JnZkV/4f71TxK/mHy5t3ybfu7aUqn2TdU/7xMq+QPueYjfd/i/3qv2ckMdwmxF+6v3U+9WfHdv8v2mZdv3UVa0tPnRmTYjfK25mX+Gp5kX7GPN8Jq2tvCrDyIZLhf8A0GtCxdEmTyYWbb/sVSsxC7bIZd/z7t0f8Na8LfZ4U8652/7qfNtrOPvBKjMkjmmb3PmtvVk2qv8Ad21JJ5jhEtnXaq7XX/2aljjQS796h1f/ANlpJpIYZG8l2Z12/e+Wn/DMPZ8xJNJ5Mi3Lwxt8ir5a/wCs3f7VVNT/ANcE+x7mkXcy/wAK1LcT+XumttpZl+dmXczVVZXkb7+1f4/MXdR7xZQkS5+yqgRYXVv3qqu75f4axtR03zCfJdUZXbezL/rF/wBmukktZnkVPlVdm7d97d/dqv8AZbwTTP8Ad3fP8zf+g1vT97Y5JS5fdOSns4VVoY/MKq+7a3zUyb7TaxbJnUpJ97+L/vmuhvtL3R/Oi7JPm3b/AL1Zk2kpGvyJ/Fubb/EtVGMFscdapIxHs7lIV2Ivzbt6rVO5tfLZkhTc33n8z+Fv9mt2Sze4ZkhdURX3/wC1/tVV+zzTXCwpD/eX5l/8eraPxnFUlKUdDJWHazI80bf3m/iVqj+zwzbZ4XV5P4f9qtKS1n3Sp5MZ2vt+b71VdxRtk0Ko0fzp8lbQ5ubmZySkQ6XZzWZfyY1USRf6tvuq1blnst3CD5/kVHkX5qzrG3tpJHmRFRfvNWhptukczfO3m/df+6y1jiI8xth/eNuFVt5Gmk2lmRVVdm3dWrp8iLIqbI938G7+Ksm1ieR/Jm+VPuxf3l/2q2obPzlXfyisrPu+VVbb/DXBLm5fhO6n8Rr2az3qpsTLSPh/l+XdW1DC6tvfj/2Zf4qy7PfGhcvgr9zb93dWxoMkLbo/3bq3yozfw0R5Oa3Kd32OYv6fawSSfuYflb5vMVvl210em2tnJILxE27flXanzNWbpum/Z2Xy03p/Guz7q10ej2SWqom+QKr/ALr5q7KNLqjnre7EtW9u+7HyiX+Fv7q11nh9YVkjffub5vm+7trM0tUm83z0Z13fvdybWat/Q7GY7fnXbHK2/cnzbf4a9nCx5fdPLxEpcvMdP4dl86QoXj2/xr95v96uls13Qs0yYXftiXZu3Vy3h+N2medAw/vN/wAC+7XXabInmrNC7bV+/Htr16ceU8mUpc8jZ0+N5IfO2Rqq/fZf4q3tPhtvmR9x875YpNu1lrP0dra6mR0RZ0X5dqr8rbq29Nt2a3+R1ZfuV0cvuj8kZvijw1D4im07R9jbFulnuIV+bzlX+9XM/Gy1v9S1y4mtbO3a2tYl8r+Flb+Fa9C168+w6em+5hiNvas6SeVukjWvK/FGqalqmmh9Vto3W8uFWWSPcvyr/FX5/wAQ1JfXbfZIpylUnc8X8eeHZta1ybWLb5FV1WWNX+62371cpJ4ZexvDeQ22ySb5ZZFf/wAerufF2nzN4gFn9sWG0WXc25du6s34gK9uW1CwRo08pYov7jSf+y140Z8kdDX2c/anCeLrG51BbPTYUzFDFueTbt3N/F81c7rmh6Vo9xb3J3Mm/dcRzP8AKzL/ABV1Hi37Tp+mvcwzbxtVXX/0LbXnmuapNHbyec6sv8PmfM1KnU925208PJHH+OteeHVnuUvFmXzf3Xlt8qrXAXF9f6lrTzTfvdv3lX7zVf8AF1xbbpHkfbt3b4/vLXF33ixLWP7NZu25UVmaP79elRqU1DzPNxFOUi94pu3Wb/SXjjSaJWVZH+aua1BnuL5Utn3/ACfdVayLzXLm6uH865bYvzIrUln4iextxIjru3/e/irqp+7Gxx1Ixeo+4kuVvEm3qJd+1m+9V688Y3NrE9sk25FRfmb5dv8Au1nfbLa6ZLmG5Vfm+633m/2mpdc0u2uLdZvOVt391f8A0Ks5QjUiuZkU4zj7w/T/AB1qsjecl4yOv3Nr/LXTeHfHF/HeIl5DvWRW82SR/u1wcdu9nHsS2VlX5ty05fEV5br+7Rvlb5GrhrYKnL3oo9Cjip05R5pHvmg+Pvstiz3OpKv7rb+5i3sv92uq8G+OLnct+k0LIzK3mXD/AHW/u7a+b9N8ZO+62eZQW/i2/LXY+F/E1gq7Jnkd2+4u793XkYjC8vvSPrsHm0JRjeR9g+Cfixpt9dLazX+2WaL7sn8W3+7XoUnjbzo4kv7a3hWP/ln/AOzbq+V/hb40sLm7hTUtSsYnj+W3mZtzf8Cr3Dwr4ZsfiRdIlz4h2Sr/AKpreX5WX+9Xh4iLjPm6H2GHxP1ilzQ2PbPgb8XvD2k+K4rO5hZtr7XkZNyqv96v0V+EvjXwS/h3Sdam8QRyHd5SRRy7V/3Wr8ytD+DWq/C/xBJqWy6u2W3Votr+Y0n8Vfbv7MPiDwV8QPA9nZ23l22pW67GtWXay/8A2VVQxUcLpHqcmOnSxVLlke9fEm5XXkNnpTxzwrubc0vyrXi/jBr/AE+5s9BFk0TSSxsixq2yuzuvBOpeHrq41J/Ekjxxtt8n7ytu+9Whpuj6d4muLC8lf/j3n/f/APTSP+7U1sd+/wDfPNq5fD6tFwex+cn/AAWe+FltoEnhL4hWdm0Dx38lrK0b71aSRd21q+JJFh+dJn3/ACKv39qs1fpH/wAFydPkvvhBaa0ty0aWviq1eKGOL5Nu1lZq/NqOW2WH5IfvV9dklT22D5vM4a9OeHlb+6TR3B3LDsVJVT5P4tq/3aqXlw0jL+53bvl3K+5f+BUt9HtYXNtbb327d2/a1Q3Vx9mjbYfkb71e7GJ58pe6JhPJV03A/eaRvvVY02HzJH859yt97/4mqdvM91j7rBv4v7taVjbyXDo+9UT7v3PlrooxnE5akixpNql1JLNCjE7vlX+7XRWNnD5j7JG2/wC0vzVSs7ZNqbvmbY3y+Uytu/2a3NP0maPZ8m4f3mr1acfcOCp/hJtN+aMb/wC9uSNvlatrTZHWZSibv9pvmaqmnw+Ym/ztxXd95P8Ax6rDM9o8To+6Jv4dtb8hyVP5TV8x2UwRuro3yyrTnAZyqPuz3A61QhuraGRnh+T5/nZv71W42IgDOwchfmKdCe9ftPgpHlzvHr/qFqf+lQPoOGl/tNX/AAP80W7W6e3kZ/l3bNu5n/hqG4V1uns5vnh3L96L5vu/3qqTagkLi2hfci/dZqurN5kKPcvuVmX5t/y7q/FfZ+8fMRj7xE0KQqdm5Ny7kX+9VGUw3ELXNs+1W+5u/vVbkv8AzJJUEGWX5vlqgt9uk8nYvlw/Ntb7qrUSlOMdTejR9pLQp3EaMuxLZcq25JN38VaOi6Dc31wj36K3zf8ALP7zNTNLsXuLhJng2RK/ybW+Zq9B8D+F/MxvmZl+bylk+9urSjHmPUjg/ZxViz4b8Jpu37I/m+aWTZ8zf3Vrr9J8NzXStvdVLLvTan/oVaPhXwv8qXLpGm196f8AxNddHoPmW5e2RY2j/i2/w13xlyx0IlT7HKWOhwyQ+fCnyyP/AHdu7/gVSX3h+2sbf7TfoqRw/K9wz7dtavi34heD/Celu80P2iWNNzxqnyrXgHj/AONWveKLpraFGuEkbbb28K7VVf8AaojKXNZI8vHY6hho8rkdF4u+JnhjTZJrawtZL64Vt22OL+H+9urzbxp8SptXuks9ShmuIZEZoLGxXcqt/dZqd9ovI8/29fx2X96GH5mb/ZrMvfHGg+HY/N0eFUm+YJNs+at6cT5rFZjXqe7E0re71W+hW5fw9Z6fab/+Pf7jSL/FurE1q88N2a74Y7fdJ8ySN83l7a4vxZ8ZLydZYU3B1Vv338O6vMfEHxQ1nULje9zu/h+/92tOZHmxi/tSO28ceOLa8upIba537fllZv8A2X+7XmuvaonL72VV+4u2sm98WXNxNsmOVb+L+L/easi+1a5jmb59zN/C1OS5jWPvfEUPECbpGmhfhvmaNVrkNc08M2/zthVvu11VzqRZX85/9la5/UJppH2STKp+6rNS+IuMjgdfa5t2b59tZsuyZTMiMB/FXVa5pcNxGyfL8rN97+9XMeS9rI9q6fLv+9soiacxVjupo5vkfdt/u1safq0b/uZuVb761z90r2VxseHZ89WLe6SObfv20+VE/Ebc19daPfRajbTzBo2Vt0b7WX5q/Ur9lfxR8Pf+Cqn7Hd9+zf8AFHUoW8b+Ebfd4cumf97Mu35fvfNX5VLfJdW/7ybmvQ/2Rf2kvFv7LXxw0r4keD9YmhaG4VbpY/8AltHu+aOqjOVOV0Zypxloh/xm+APj/wCBfjvUfA3irR7hJrGdlaTZ8rf7S1ylrNtkP2lGRl+Wv2P/AGkvg/8ADf8Abm+BumftB/D23t/tN1oyz3/kr92T+JW/2lr8wviX+z/rHhXV5Ib+w2Osu1GjX5f+BVxY7CxlHnhHQ0w+O9lL2VTc81kbzMTfcOz+F6csiSKHn4+X71ad54N1LT5pUmhZhv8Au1XXSX3bJoZNjfd+SvD9nKOx7Ea0ZRvGRf0W1TcjmbP+7Xrfwn8Ra94baJ0+dfN3bd/8NeefDT4c+I/Hvi+18M+GLEz3FwSyR+aqDCqWYksQBgA19sfAL9mTXPh94c07xv8AEn4WXKabc3rQ2utSRM9rLPGQZI0cfI7qjKSoJI3DI5rqfDXEmb4T2uXYCrWg5ct4U5SXNa/LeKettbb21OrBYqFGpd1VF+bS/M3/AID+NPFuuapbaVbabMBNb7omb5fvfL8telfDAPpPxis0u5gGgvplldsYyFcHNerfs1/ALxv+0L4uvx8EPA11rv8AZNuJr0QQrBHboc7VaSUqm9sHam7c21sA4OPIDN/wjvxWvG122msmttUuUuoLiBlkhbc6lGQjKsDwQRkGv0rwi4c4gwmX8T4DEYOpTrSwcoqnKElNynCpypRa5ryv7qtr0P0bJ8fh6mFqz9tGfLFttNWSVz6x0m/1LxIyWejwxzHZt3N83/Alr5P/AOCjPxU1Lw/qNh8HP7NutPm1CJb+XzomjeSFW27l/wB5q+mf2Rv2gf2Z/A3i+01r4v8AjmWCzjX97bpo9xMR/s/IhrH/AOC3XxO/ZN/bL0bwX8Qv2Z/FIvfFnhmdrK6sptFuLMT6e4zw8iKvyt2Jz6V+W8OeDfG8JOtXyzERt8MXRqL/ANtPj804lw8aqjTnGUX2aZ+cljeJG2x9qL/G3lfMy10uh/d27Nz/AN1W3K3+81QwfC/x6DiXR0wq4BNzGM/k1a2mfD/xfbsslxCRwBtWZPl/WvsKHh7x1DfK8R/4Kn/8iefWzHBT/wCXsfvRPa77VUd0+fczVq6bqCNIiXLrH8zMn8K10/wt/ZM/af8Ai3FLqvwy+DfiPxFDbORNd6RpElxGhPYsgIB9s5rJ8UfDD4l+Cdem8N+N/Ct5peoW8o+02Oq2pgliPoyPhh+VdFDhHimtiJYaGCqupHeKpycl6q118zzKuKw6V1NfeXtNvLaRdn2be67f4/vVvabqVz9neHzo2b7qf7NcVpmm+I7OV2ewQqwwVMoOa0LdNfRmPTccBfMHAr1o8B8aR/5l1f8A8FT/AMjjWMoKWsl953+l+IJo40muXj2fdbzF/wDHq6XTdYeO3TY7IsifJN/tVX8Ofsh/tc614ei8VaP+zT41uNOuoTPDdQeGrh0nhIyHXCfMCOQR1HSuT0/xJNp90Y9UjmjeJjG8EynKEcHI7H2rmwfC3EuOlKNDCVJuO6jCUretk7bdS/rVKMdZI9d0TxB9qT53j279v+9WvY6hM0e+bbt+6nly/erjfhJ4Z+I/xbvZbH4Z/DvX/EM1oGLLo+jz3YiRv7/lodv44rofHng/4nfBeC1v/it8LPE/hu1lfEVxqvh+4hikb+6GdACfbOa0fDXEFPFrCvC1FV/k5Jc3/gNr/gRKtSkubmVjq7PVIHVUtnba33m37q3tJ1a2hZPnmD7l+VU3L/vV5D4W+JehaxqbWmnXjj9wWZdjK2B15Ix3rqtL8QXgHlvcqyt9xll2s1cmNy/MMqxXsMbRlSna/LOLi7PZ2aTsZ+0jKHMnc9b0nXEaFZELb/uu3+zWwutLJbhHuVBk+Xb/ABbv9mvMNN8S3knV1ZPvPHDW3H4ihZmfzFLK+23X+Jm/2v7tZRkYVI8x12p3ztIHhRn2/K+56+cf269h1Hw0YidnkXW3P1ir25fEFtNs3ncGZv8A9mvCf225mfUPDsbFDthuiChz1MVfo/hXOMuOcKvKp/6bkfScGQtxHRf+L/0iR7X8Eiv/AAqDw0zbVP8AYtuqqf8AcHzVta00ccKujr8q/Kyt/DXJfBzWBb/Cfw/ayFcNpFsu70+QVs6priyRy2yPGgb5EmX+Kvjc7nbOsT/18n/6UzwcfT/4UK0v70vzZT1a8gaI/wCsLKm1PLX7v+9WPeXlzG2zf5X7rb5n96nzal9oXM25fl+eRW3M3+zWPqGoeZZhH2q6tt3LXhykZRj9opa1evNseFG2L9/y221zeoaghDw/K2373yfeq7rl5Naq+zcu51+VU/hrjNa1L95LZpuRG+40b7WrnlI6OVnQ2upfMsPU/wADK/y10/hy686PY94uGT7u/wCZq8xt9Uhjx5LxsZF/heui8P6w9mqpDdLhk+7t+ZWrn5uYJHqWk6htjMds7P5cu3bUlxcOYYkhufNRm+f5Put/tVyFr4kmWMrvWP8Ai8xadceNINy5mZN3y/KlVKXQn3DYuta3XG9Ewyu2xm/h/wBmsq68QQrcF5n4b7i/7X96sK41pod8KTbHmfcm5vvL/erBvte2/uft/wC7VfvM+5mrKpUHGPvHZT+Irb78L+Xt2/x1fs9Ze6aVPOXe3ypt/hryW88WbZP3Lrvb5dqr8v8AvVd8K+NppPkdN9wvy7mrGUuYfL757tpN9Ctp++mjdF+XbI3zNVxtW+0Ls8tk3JuRf4a4Gx8UJ5KXM3O77vzfe/2ak1LxRND5bs6qmzdt31nKRfL75+U+hq62ux4WKbNvzPW5ZyILVLb93sb+HfVCz08xM/75iPvbdv3auWcczNvhEinc29W/9CrwZYj3/dPu6dGESbdMrK838Xyr/u1BdSW21Uh8vP3mWrtvbvJH+++Z/vblqG4sU8l3jeNmbcrttqfbc0uY19jymTJawrGrxupZf/Zqht7M7nkdvvfw/e21oNZzIo5VW+8yqtNa3m2s8KZPy/M38X96nzc0feJjGX8pnbrYMqPuc/dT5vutUkfkxzRo/wAzL8u7f96l1K1aw/d7Mbf4aybi72wvs3Nuba1ddOMJ8tjjqSlGVpRN2z1KZv8ARk3IPmZf7taH2wMvlum4R/3f4a5mx1DzIx+++WOr9jq03mebD8m5NvzfxV0R+02YytLlsabSJJH52/btb522/wDstRRzJHCd8zL83y1Q/tB4Zt/nKGaL96zVWvNSF0u9PmCp8u6nGJjKp7wanqkJ3fPsb+9XN65qXmNvtn4+7uq1qV08bb0df3ifOu6sDVt/mO8LqC33Pn+7W8Y3OOpU98+pPhtMJP2TDM46+Hr8nH1mr4pvNQeRmdP+Bsv8VfaPwzJk/ZBJHU+G9Q/P99XxV9jfyykaZ+fb9/8Air9n8U3bJsh/7Bo/+kwPoOIkpYXBN/8APtfkit5n2jekPyVbs45lVP7n8dLY6f5cjK/zO391PvVah0yaObePmX71fjT+L4jwqcfeJbWGGWTZsZtrfK38Na9nDOP+WyquyqMNu9u2x0zufcu1P4a0FjdRsRN/z/w/w1zVOSJ6uHpmzpLQ/fTdt+78yVu6bP5bF4ZmUMvzq38VYOmyeaodE3Bn2qv92t3TYXkj/h/76+9Xl1tj2MPTqy5bHRaS0yrFDbJnd/e/hXdW7bxpHMvnOz7fmTclYOls8ewbNh2fKrVprdIzQpMm9N38TbfLrzKn8x6fN9mRLfLN9lZNiqJn+833mqCaF4UKIONu5d396nteRNuT95Ii/L8v8LVB8/nCNPubfvK3zVnL3o/EKPuj2urmRYXR1Xy02/8AfX+1ViGNIdvnf6xtv3kqCOEtcbx0/ut/eq9DG80zXMyLv+6i/wASr/FtrLm5TeMeaRHDDCyskMLL+9+dV+9T4983yP5imrMa/wACIu6FdvzPtZqfDYvHGEeFl3ff/vbqcZc0jaMeWXMRWdqJGXZCqhVZtzN8zVo2cc8kiwoissi/d3/xUsdi6yD5l+X5kbZ81XLS1/fAnaVX5tzfxN/dqvcO6j7xZhb7PDEnnMm5/n2/xVp6fcJBGmx9219r7vmZqotGYdr3KZ/etv8As6bvlqxbyPCqOibd33f722pjE0l8HKzRhvLZvnuX4ZPmZabN9mSHYtz87N8rN92oLWN5GV0T5Fb5P7q1dtbW2uvn2KxZ/wCLd8taxjDm945alP7JXjt3kjFtD8/zbfm+VVpYbONbiGaa5ZHk3fNt+Vvlq5Ja/uWtvLjc7N+7+7/s1at7OG4mZIbbb5cXyfxLtolK0eU5qlPuUfLea3XyYVSXfudZPvbalW1m8tH3/N91FVf/AB6tBbRJmZPOzt27P9lf7tLdw+ZNvs0YP91/4qPdjojkqRMZtJRoUd33srbn/wBms7UtP8uZt8O8r/t11E1q8jI43IPN/wBWqfLVHUrdPLf7L8kv3mb+H/dq/djpE4pf3jkrjR/Oj3o/l+Z8u3+Kq91p/kyhIUyqp97+9W9dwpHCNQuvmMfzbV/hqBrdLiX5+sf+qjZ9rN8tPm9/U5qkZcpzd1prsyPGka/N8y7vm/3qrSaWv2NIXdi7S/d8rc3/AH1XSR26XkYmlh2qq/6lfl/4DTZoZGby0ttzR/L8rfw10c3uxSOXl5pXic3HbzQ/PCnLfK21Plb/AHqt29rMsS7y33Pk/vVsfYXj2OifvW+5/d2/7VO0+1mto0e12sGZlZv4VWolyy1FR5o+6QaXYpDDJvdoljTbW9pcKKqQp+88xPn2tTdPh3JsdPmb/a+ZW/vVqaevl25hmT/a8xf4q5pS5Zcx6VGn7xNprOV85E27n+833WWuj0S1SSRU3xq0i/Lu2/LtqhbKkdvDC8O5FTdAuytLT1vG/ewwxp91d0abWWsYy5p8yPTjHlh7xq24mnb/AF0gKvteTZ92uj0mFLWQIgzt+bzP4t3+0tZGnybo4U+6Wfc3+1/tVu6fcQrMru6nd8q7fl3V6VA4a1O0DpNNhmWJrlId25f/AB6tbT2hFu0LzK5Z13R/3qy9KuI1hMPkyIsjbUb7y7lrU09vLmRZvnbdu+58q17WHieXVlyx+E6HQltrq4/ffu4mTci7f7tb2lW9rCq+SjFF+98/zbaxNDt0jbYiNtbd5TNXR6THNGoT5X3J8m1vmr1I+6eZWjyyOj0K/kZXRJtkTKzJtT5l/u10cMrwr5s21UZ1ZGVdzM38W6vN/GnxS+FHwZ0ubW/iv8RNF8NQRrvim1jVI4Gb/dj+81eV6X/wV2/Y68SfESx+EXwf1jxT8QfEOqXCwWGm+E9DZ47iRv7rSbaUqnLDmMXWoRjrI+ifFFxN4ovLlLDUpporO6+zeW1rsSPavzLu/irzj4ta9Do+mwW3nLHLs+WGN/vf7TL/AA16N8PdJ8Q2PwzvbzW9HurXVdS164e80e8b57GRtqrCzf3q+V/2n9c8Q+F/Fl9qv2BkRYPKlVm+ZZP9mvzzMq31uvPkFR92XqUfFHxK0S1vjeXV5MWb5Yo9nzbq4rV/jgl9pc2lb12Ryr5rfL83/Aq8S8V/ErXvE2rD7ZD5QWVkVfvVreCNNe81S3037MzyXG2KCGOLc00n8Kqv96vLo0eX+LK1j28HSlW+E9b0O4TVtLuZtYmVbaZF8qSR2bav+7XlXj6+8PWf2m20HWFnjjbb8rN+7k/iVq/RLQvEX/BK39iHwLpfhP8Aa9vYvFvxBvLCO6n0S2iZrfT2ZdywuIm2q397dXz98Xv2n/2cPi28+k+B/gX4L/sC6l22tvpFh5U8a/3mk+81YYqthcLCNRPmb6I+kynI8djHJVockOkpdfQ+DPiHqE0in7NMvy/M7Mn8Neaa9qH2e43wnKt99levqT4zfs122oeF9R8f/BZLzUbKzRrrV7Fl3S2cf+z/ABMtfJmuXCtcM6J8snysrJt217GV1qWLjzo+Wz7L6uXV+Uc2oQzRGTfuVl+7TDaJ5auj42t8q1Bbyfak8yFFbav3t33qdb3l/IzWboqRbtyyV6dSPu+6fO/FrIms43hLF93zfdq3b69cK3kzbVj+7VCZXLH99vVvl21JDNazb4Xhb93/AMtI/wCKuaUfslxlKPulu+Wa6t0e2+T+/tqte6XDdTvDDcyRr5W7bH/erofDdmLqJEez3p1T/dqxqHhKFlTyf3f3m3Vl7SMJcpp7GrL4TibGzmiwZpm3762H1XypERHb723/AIDUWqaOIV8yCbdu/h+7Vnw7b2cciXM0KynY29WqKkY1I8500ac6fum34RuYlX7Q8s3yvtVt1fZX7H6+J45orvw94P1KdZFVHaSDaqt935Wavmv4dX+pLJCmleBmuhD83l/Z9yt/vM1fcH7K/wAUviRpGoW39saVss/K2yq21fLk/hXbXyOZ1HKL5Yn2mSynbl5j6Ct/jN8fvBOoQf8ACQ/Bmxn0+4iWBdQaeFp1hX7zNHX0B8F/FHw68dWUmsP4e/s3VVWHb5f7v5t33q0fgN8Mfh/+0d4GfTfEunafq10sH+re42yw/L/Dt+7trzX4ofDm2/Zb8WQXmm3/AIo8PWMl1HEsmsJ/aFjJu+7833o1WuOnhZVKfPDY6qmKiq8qM9z6bh1mx0uyntteij8xvm8xdzVDo2uaKshSzmhRNu5mZvvVk+D9S8U+OvB8esaL4u8H+IbZXVWls7hkdfl+bcG+81TxaRYrI/2rw1a7JNvzRp/DRWoxjOMJI9HBuFSlKPU+cP8Agql4Jk+OPwA1qz8OaysI0OzW9WSKLclxJG27bX5RWNvNcQpsdlkaLf5cif3v7tfuZ+1/8NZvGf7OHizw74X0mSGQ+Grho0tdq+ZJt+Va/FbS9F228NtMn76HdF8y7WXb95a+q4f0pzj0PEzWpSlKHIjAk092jbejJu+by2Sq8On/AOkI8IkV9jfLXWalpfnw/uY22L80rb9rVmLp7xyDfNhoX/1m3d/49XvQ933meZKMTMtdFSRUh6P1WT+8v92tWx0l9xtgitu2r8qbttWrOG5k3iHco+7u/vVtaXYo0xSGwwy/c3fe/wB6vUp8/JqedKXNMP7DSOZI4fMYx7W+Zf8Avpa14fN8xbaFNyMm5l/u0+xsbn5oYYWd5k2r5jf+PVLb2KMyTeQ25V2bm+6tehRjE56kv5SG1Xyo3REZG3svltVhVuZlIebZ/wBM1Tdupz2vll3Tbs/3/mprRzQozv5jOu7738O6uuMYnLU5+UqyjbGQ7q3z/wDAdv8AerQtVij0gLGRsERxg9uaxL5UbytkfyMq/Kv96te0YnQctnIhYH8M1+z+DC/4Wcf/ANgtT/0qB73DP+81V/cf5oox3010yuAr/vfnXftWr7zxyWfnfdVm+TclYcM8yzBEtlKbN8rfxLVqW4to7GPzptm3dsXdX43y+5ynzXLEbfXz3UiIjx+WybvmqCTU3uplhmdpG27UjX7u6sqbUJlkM33f+esatuq7pc0zP89tuf8Aux/e/wB6ueUJnq4Wmd14TXy496BZtu35dm5lr1XwXpUPmedNucMjbFZfmVq8q8Ntua2R0+VW3/N/s16TZ+Jk0W1+2XLybmXcqxy0U3GmejKolC0j1C1utK0mxjtpkjik270jk+XdXPeIvjBCt0dK/tCO2t/mZG3fe/3a8a+KX7QFh4b0G41XVfENvbCO3ZYvtT7mb/gNfMPij9sa2ub+7+wXLXcmzylupvlX/a2rXTCPN77Pks0zmXNyYf7z6R+MXxd/tqS68GfD1GvLuOLdK2z/ANCavFdW8ZeKtPjmzYMpVN0sjPu+bdXkl1+1Br2m2d1YeFXa2mvtv2q6VP3jViap8dL+3sxNqV5nd9/b/FXVGPLE+Ylz1HzTPUNQ8eeJFZ0eGbbJ+9dpP8/drC174iefbjfcsJWVn2r/AHq8l1v9oK61i4Pkv5SL8vy/xU3TfiVbatcb9SRdn8at/FS5pyHGnI6O+8eJcSPbTTb9rbvL/u7qxbzVIZpH3zbPn/heqWqSaVeRrNZ3kafNu8tq5m8vHhfyU3My/wAX96rHH3ZG7ea0nmGV/vf3lqhcaw8iv87b9m3czfw1k/aJliZ5gpo8x1YPnf8ALu8tarlHzIsXGoIq702/7u/71RsyfK6HH91aPLh8zZs3N95GpJGT7WyMmfl+8v8AFTjyi5ub3WZl5bvIqp94turndctZll+VNp3fIy11zL5ZLu+1v4dtZ+paa8zIh+9J/t/doi+YRzGrabbapH50LqxjTc6r/erDuLe5jZUmhZWrc1TQbzTZmvLNPutuZf71XdHt9K8Tx+S/yXP93/apgcvHJNGwTa3+9UU008cyuj7Sv+1XZXnw98lWm3sm3+KsS98Nur70+b/ao5Zhzc0j7w/4Iy/trXPgPxHJ8BPG2syHStcl8uykuJd0ccjfw7W/vV9J/tPfBXQfE2qXCf2bGGaX7yrt/wC+a/I7wfJqvhXXbXXtN3b7W4WVNr/NuWv0j/Z9/aY/4XB4FsX1vc95awKl0vm7pN3+1upe05IcsjlxlOM+WX2jyLxP8BbnwrdXFtHbM9vJ/FJ8zf8A2NaPgD9lez+IUkVnDYXEcsjbH3Rf8tP9mvr34V+H/B/jLWLew162txFJLuZW+b5a/TL9j79jv9iC60Gz1iezivtUZN/79fLVW/2a5vqcefm5vdMKeIr83LE/Jf4Xf8EsPi34D1XTfil4e8N3WomeZbGytLOEtJLLMwgRFUdSWcCv1c+LP7FH7TPiv/glb4F/Zm0XwbHL4z0bWUuNU0ptZtlVIVlvGA80yeW2BLFwGP6V2/8AwVM8C+DPhz/wT88Tz/D7To7AwalpTwT2rkOrC/gIYMOQR61478aP2hfjrov/AAR2+HXxa0n4u+IrbxRqPiFYL/xDDq0q3lxH51+u15g25hiNByf4B6V+xcN0c8XDeULLZ0kv7R09pGT/AHvsVyt8sleFubmStK9mna51QbvP22r5ena/5nz1+wj8Uf2+/wBnv4ieLvhB+zj8L31jU4YJpfE3hXWdNZ0spbcFTMR5kRjlHKBQ37wlVCudgr5v1STxv8S/iVdzXenXepeI9e1qV5rW2syZ7m9mlJZFiQZ3tIxARR1OAK+1/wDgg5d3V9+1B4zvb25kmmm8EyPLLK5ZnY3tsSxJ5JJ5zXi3/BOOaztv+Ci3g271GaKOCHXdQlllnYBIwtpctvJPAAxnPbGa/dauc0sq4lz2vHCU/bYahRqynFOMqsvZ1JWk7vRciUdLpbuVlb7TgyDnlmOV3Z0pL00ZN/w6k/bz/wCEN/4Tb/hRNx5H2P7T9h/tS1+27MZx9n8zzN+P+WeN+eNueK+f9a0XWfDer3Xh/wARaTc2F/ZTvBeWV7A0U0EqkhkdGAZWBBBBAIIr9gLLTtXg+NEfxdvP+CvWgz266gJpPCudPXS2t882whF7gLs+UPzJ/FuLfNXxf/wWa8TfA/xp+1NaeKPg74s0vWLq48OwJ4lutHvFnhNyjMseXQbS/k+WDhm4VQQpHPn8AeJmfcQ8Rxy3HU4VIzg5qdKlXpqnKNvcn7aK5k09JxtrpbU+FxGFp06XNF/e1r9x8jV6J+yZ8GrH9oP9pDwf8HdVuLmKy1zWY4tQlswPNW3UGSUqTwDsRsMc464OMHzuvpj/AIJDaxpGj/t7eDjq8ir9pgv7e2LQq2Zms5doyfuHryOe3Qmv1DjDH4rK+E8fjMN/Ep0aso26SjBtP5PU5aMVOtGL2bR9B/t+f8FMPij+zH8X1/Zg/ZTtdH8PaH4J061sp5TpiXDNJ5KsIUEmVSKNGjX7u4srZbGBU3xS8a6N/wAFMf8AgmdrPxw8d6DBZ+P/AIXXM7vdaXDhJAqxvJhWJIilgYFlzxJDuHA2n5I/4KM6dqOl/txfEy31QsZH8TyzIWjC/u5FV4+B/sMvPfqeTX0x/wAEtZ4fDP7BX7Qvi7xJLt0ltMnhXfapIvmLp827hjhyfNiG08dPU1+HZjwtkXDPAWU59ltJRxlOeFn7WP8AEqurKCqKUt5KanLR300XY7oValXETpyfuu+na2x+ftfTf/BKLUf2b/DH7TD/ABA/aP8AGGk6VbeHtFnvdAXWkHkSXqkYfcwK+Yib2jXG5n27PnCg/MlFfvfEOTriDJMRlsqsqSrRcXKFuZJ72umtVo/Jvbc8+nP2dRStex9n+Pf+C2P7WNx8X77XfAF/okHhaLVW/szQZdGSRZrVXwgklYCbc6gFiGXBY4C8Adb/AMFvvh94SkHw2+P8PhtNC8S+LNLki1/SnWNZmMcULoZcEF5I/MMRfaeAgJG1Qee/4J4/sVeFPBvhtP29f2xbiHQ/Anh5Fv8Aw7p+pKQ+pzKQYrlk+80W/b5UYBadyuAUwJPEv23v2rPEn7bn7RE3jK2Se30aN107wjpV4yRm1td3ymTDFRJI5LuxYgZC7iqLj8bybI+H/wDiIuG/1Yw8aVDL4VIYmtBWjUlKKjGi2tKk4P35yd+V7tS0O2dSp9WftXdytZdvM+9viB41+P8A+z//AME9PhRe/wDBPnwTHqtpcaVbya3eaZpR1G4h3wCWSUQ7PmLzmXzHKfKQBhc8aH7A3xa/bD/aL8K+OdC/bs+Hvk+CJNBZBqOu+HhpjTBwyzRbNqCSLyg7M+35CBzzxhfFL9oTwr/wRz/Z38Ifs9+CtMuvF/i3VLaW/mXWNVf7LZsx/ezBVHyxGbcscMe3IV2Z92Wdn7LP7f8A4e/4KW6R4i/ZA+O/haXwtqfiHQ5xa6l4U1WWFbyMDMkaBtzRuqfNtYyRyKrhlx8rfjmJynM8Vwni8wo5ZCphJV51Fj2l9a9l7W7qqHMpvls9bpW15bXZ2qcY1lFys7fD0vbY/MrTrrR9F8Y3zaRcNLYRzSpaSMxJki3/ACEkqpOVA/hH0HSuw03xhbM0Sb9wb5n21x3x68D3/wAAfip4i+GGuXcc1x4f12fTXuIiGWQxuyh+CcZABx1GcHkGuWs/Gezb9nmk3L8zqv8A7LXoeM2Ko1uLoVaMuaMqNNp9002n81qZ4JNUrPuz36w8UW0MiTJfsD8qqrf3WrorHXkkjk2Oqv8Aw/w14To/jRJBsjuYyv3fvfMrV1Gk+KN1mN7szM/3Wf7tfkntPsnXyzPYbXxJCzJbXM3O7d/eryX9qHUBqFzozK+Qi3Ax6cpWxpOveTIl59pX7/z/AD7t1cj8d9Sm1ObS5pX3YSbBAAHVemK/SPCad+PcKvKp/wCm5n0vB0X/AKwUm/73/pLPYvhrrX2X4baNDblo5RpUI34z/CK2brWofJWFN3yr8zbvu1514A14w+DtOtt64FlGo/2W21oSa9DHj52ZvvL8u75q+Lz6rfO8Uv8Ap5P/ANKZ5GMp2xlX/FL82bd5qz28jzJDlJH+Zay7zWk8xkfdIn3X2/w1nTau/mFEvFDMzNKzf8s6y7zUbyNPtI2lm+bbI33lrxZVDn9mP168Mil5H37G+dVrlNWuLBlNyke5t33lq3rmrbYy6zKnz7W2r8rN/dWub1LU3+wB3dmWOXZ83y7Vb/0Ks5VC/ZsS4vHjme5hdT5b7tv8O7/Zra03XEtG37NjMy7Y9+7/AL6rh5L65N1FbQyYWRWZPM+VGVat2+v7HTznjR2+XzKzjKP2hcsz0+HxAlxalPti/d3P8m5WqHVNaj+R/tMaFdvm/wCzXF2+tQSQokM21lT5WWqV/wCJHWNXlfczfL83zNVRkZ8h02peLIYrzelzteN1Xds+by/7qtWRrWuJKyTWz7U+66yNXKah4ofzG/1mf7sf8TVg3niLzGZ5pmUMv3Vespe8VHlN688SJ5kzo3K/LuV6k0XxpNHIlykyqrbl2/3ttcBqGsJ80MNzhGbc6/d3VDpetJJdCaR2T+5urjlU5TqjThyXR9AeF/iHbP8A6m/2t975n+Wr83iaZl2fafN/hRZHrxzwnrWI0R3Un5tzb662z1BGtfkm85f4/L+Xd/u1lzSXLyj+r8seZnylHp/l/vkhZXkf5GkTbTzpryN5yf6xvl+Z/vVu3GmySSSfIr/8C+7Tls9zbNm4/wAXyfdr5n23Kffex5jOh0nyYZER/up937rbv96q8lqk6s7w7PO+9XQx2M1x8kjt83zf7TVn6lp8bODv2n7r7aqnU933jb6vzJWOZmXy2Ih8xFkb52/vNUflzbZZ3eTb/B5j/drV1S1VkH+9uZWb+KsfVriHzOXYP8uxV+Zfm/irrp1uaASwvLH3Sjqzedbkh12r821fvVgXV5tuGfew3JuStPWJHt7UmbdlfvMrferDvJkbHkupXZ89ejhbbHmYqjOQ6Obyo1RvmO/d9+rdnqU0aiH7qx/Mism6uekmfzPOT5G/753VNHceSy7UZf4mZn3V3yieVKPIbkl552f32N1VJdQQL+5fd/D96qclwk0sf77Ztb/dZaZcbFjWOZF+/u3bvvNUSlymFSnPl94Jm+0SNDM+Gk+X/drLvm3fc/h+7uqzdXTxt++Rn3f7FVpI/tHzwwq6t833qqUupyyp+8fVHwsjP/DIoiwcnw7qAweuczV8i2uk3LMYvJVW/jVq+wvhTEP+GWI4SMg6HfDB/wB6avma30MSMr741b+FVav2LxXny5LkGv8AzCx/9JgfUZ9GMsPg7/8APtfkjHstL8uFUdNjM/yVeXw7MW/cv95K6LTdBTy0huU2/wCz/FtrYi8OvGw2bdjfL92vxCWI5TzcPR+GRw66DcrComg3fNuWT+JaktdL3bUQNjdufb95q7O60H7Gphuk3/L8jR1XXRYX+dH+Vk3LurD2nNuerTjHnOcsbOWJTzj+FGX5q3dGt5vMR9/y/L91tu6pl0W2W3WFHY/MrJHt21o2tnbW0KI6MV/j3fw1yVKnNHmPUo7sWBo5FSbzJPKX7jfxVOuoO0mx3xu2t8yVB5cNvM485vKZ/k3feVdtH75Y2R33/Lt/4FXDy/zFSqRWxaa4m+0OjpwyfPtf7zU+OOZgqJ95fmdmb5az/MeGQb5v4Puqn3qspHDdR+dM6ouxd+5/vVlKP2gjU5vdL0MQW4D/AGyOFf4P96tDYjQpsmVXZ9su371ULWES2/necu3d8u3+Grq7Pm2T71X+KplUOqmW4YbZbf7/AJjb937yrNrG0i8Rskit95n3Kq1VjmS4k2TDcy/Kjf3quRqkaeWkytJ/00/2aUZG8eaUvdLljdJcR7441Hltu/efearpZJZPJfazMm/aqfdrNg33S/adysq/Ky1cb5Y/OWFhHt3bf4mrSMfe5jrpy5feLcLTrDL5jrDuXc391qnjuPlE3zMfuOv92qtqXuof3Yj/AHi/8B21ah0/fGPk2s25VZf4q1px933jeXve8ixb71txNM8m7c33f7tXrSP/AEHzHud2377fd3VHptq7W6P8u77rQ/3Wq3Z+bCp86ZdjPuePbW9OjGXunNKpyyEjjMli1y/y7k3bVfc27dWjpq/uVe2m8v5/n8v7v+1UUa2d0G37gF+X5m+9TvtCWrCFHXCy/wBz71KtRl0iYSr05RvKRcjhh+zlPmf5P4fvLUbTJ9ojuURt7fN8rfLu/wBqljuIW/cpy6/wt8tV7qd4JGheT5JPuNv+7XP7CcZKRyyqwlHSRKzQ7vnfeWX5lb5VqndW7yRvM67Y9u6tOOF2h37N+3+L71VZtjWbh9u1V2usj/w1XJOJ5tarCMveMe6VJFUNtV9nzrH91lrOmt/OjR0to98br8395a2prPbtffG25tu2NKZJYw3Ehkhmj37lRo1+8tFpxOfnhIx4YfO8tII5E3fwyPt+b/4mpYbPaz/dDbPvf3mrQuLUR3P2aNPlZNyzSJVyxt3+/c+Sieb88e373+7XQ5Tl9kx9yM+U57+z7mGFLq2uFZ2b513/AHW/2qktdJ89fs03ybl2/u/u/wDAWro4dJR43S2HO9m+b+7VqPR7aSJvs24oqfd/u1lKc+U1p0lze8ZWm2aRw7BCxdvldmX5lrXhtXKjMLbtm2KT5fl/4DVm1tfJjRE+Z2+Vdy/eqxHofl/M6K7xuyvt+bbXNyTqHo03Rp9SKG1f7OIbmZiqy/d31prbXV1tTe0YX5UZl+ZWp9nY7oVSZI1bZuRW+81WJI3s7dEe5jO5t+1Wp08PWlV5eU3li8NTh71SJNHbpDGib1R/K/1i/e/3a1dL+aMQv5iN97d/FWcti90qiZ1QN9yRv71a9tNZ2Vu9zc6rC32e33s3m/e/2a9zD4WvH7J5WIzjLI6Odzf0WJ47pYXuWfcq/u/4m/2q6TR5YdNUWFzNyqfJHI25q4aHUNYVU1W81qPSNPkXck0y/vZP91f4a1rjxNZ6PZy3mm2bRytFte8ZN8s1exDD1Op8rjOIaesaMTrb74meGPBenya9qrzeVH8sqyJtVf8AgTfdr4I/bI/4Lc/EfTJ9T+Fv7L01lptus/lz+JEhWWbb/EsLMvy/71eff8FK/wBtvXrm/f4G/D7UmjVV3a5qEbfNI3/PFf8Adr4cY7ucV0xo31PGljMTU96Uje8dfEfx58UvEcnif4i+MNS1zUZmzLealdNK5/76r9mP+DUX9lHSv+Eo8V/tmeM9Ijf/AIR+L+yfCrSQf8vUy/vZlb/ZXatfjh8Kvhn4w+KXjC08F+CNAutS1K8lVLa2tY/m3N91q/rM/Yd/ZV039jv9i/wP+z9olnHbX2n6JHea3df8/GoTR+ZNu/3W+X/gNfPcT476ng+SHxSPQybCvF4v3tkaHxM0XwxY61f3Oqw7Zr6Xz2jVvmZv7zf7VfF/7WnwNvNea+1XTnZreG93L523dNHJ/E26vpD42ePNY0uaabxPo8MDt/r449zRzL9371fP/jL49eD9BvlufFWpQtDJFvuI5vmZY4/u7a/NKOInTjzH0n1eFSryny3N+yjo+k3g16awk3szS3SyO2xd33W/+xrsvgh4P8L/AAX8J+J/2k/FVms03gu1kbQY7i3XY19IrLB8rf3fvVi/Fz9qiz1jxBc2GlPG9pHFuikjfcyru+WvHv2l/jZrGqfsi6P4TfUZmm1zxfNcXSt/CsMe1Vbb/vfdonPE4lx5/tH2/D+XYenXjJ68p8+X/iLxt+0T8TtQ8R63fyXl3fXUk+pXTLub/d3Vk6tL4i+HMzTaVdTRiNtqyRuysrf3a9f+APhP/hBfhqHa2hl1XXHZ/MjPzLGv8NY/x88e+DPD+lDRIdEs59QmbdKq/M0K/wB5q7faU/bRowjzRPWzXH1YQdRsufs//th6xDqcel6rO1tPDEyvIq/LdRt96Nq4T9orwz4e/wCEmj17w/bRrBev8iwy7lj3fM1ebJ4iln19b22tI4EVv+WdavibxBc6hpscPnZhX5tv92uqODqYXFxnR92L3ifnuZZj9dptVNSdvCaaPDseFWaRN3zfwrVG509PtDWaf3NyL/FRputf2ppZf7eyPap8iyPu8z/ZrS0W8S4tVvHTefutu+8rV6salX7Z81KhHmjymZb+Hb+ZhNbJ8m3+Kr0Xh+8t7hneHKNt37fu112i6lYLZ/6MitLs3N8lbOnww3EcU14io7Ju8uP+GuOeI/eSO7D4PmnEyvCelTWcY86H5JPufJ/DWvdafbQ2LQ7I1Ez7l3L8y/3quqyR3Cw2z8/dfbUt0sN1pvzvt2y/P5b/AMVeTVlPn5lI9/2MKMOZHD3nh/7RdtDDCzQ79u6tTRvB+m6RajUnRWK/djb5mqz5yWszwzJGPM/1XzfeqrqEl5Gqo/yhm2pt/irqlipU4cq6nlVIx57mnb+IPFU0zvDr0lhZrKreTCu1ZG/hrpvDfxu8W+E7Wa2TxLdXDyNu3SS/dZf7tc5oNuJLMQ6lD+4VN7fL92tQfGb9nb4cWKW3juOOV1vVaKFYvMkkj/irz4U/b1eSMOb0Lp1amH9/n5TqtD/bd+LvgGSHxD4S+LVxpWob922xumRmZW+VpP4a++v2Wf8AgtrqnjzwXP8AB79qDw/pfjG3urP59QO2Kc/8B+7X5IfFz4q/sz/EfZP8N3urSWOVv3bW+z5W/wDia4mO48W6Nfx6r4b8QSBoX3RFf4v7tet/ZbhCy9yX94y/tOtKd6nvo/o5+CH7SX7H1rd3Fh4Xs73SLy8dWsbdk/cbdv3dy/LXX6X8WFtfGVppuzzbPUN3lXC/dX5vu1+D37In7S3x1uvE9tpWo69G0Sy738xd3lx/xbVr9Wf2RPiVZ/Eq1h1LxDqqqNNi/wBFbe26SRv9mvBxmAlRxC5pe8foGS4uhXw8n37nvf8AwUq/am8H/sr/ALNU15qWoxnUvFP/ABLtGjkb5JJG/ib/AHa/IO+kvZNSN3JtR5JdzeTuVa92/wCCt/xu8PftOftReH/g7oV3Nc+FPhfpyyX95D/qptUk+Zo1b+Lb92vAWunaRpvut8zJtbdur6vK8L7OlfufLV6kfb27FjdDcMHR2dm/iVvmqjJCvnMkb7Ssu5o2pYZnjVHmdtzfN5f92pZGhuofuKH+638W3/davSjGEp8tzmlLrylu3s7aSNXmRo0b7ix/NW7pdrM15sRG2KnzNJ95V/vVj6bb+XG375flX5fm+9/vV1WlrDeSGEowdYvmZq7af905pk0cdzGB5McjJ915N/zL/dq19heGFkeH543+bzKm02xuY2Te8aKqN8q/8tKvQ2afZWREaUxv93f5lenT+A5nHmMaRYbePyXk3/P8rSVV1Df50r/dk/u7/wCGtKRd0jQvc7mWJldV2/erN1dtxZ5trR7VXdv+7XZGJhL3vdkZV0r+Yru8YRU+dV/har9sFbQ9u8MDCwyv41DfTIyuk275k3LuT7tT2mU0bMhDfu2JweD1NftHgtGP9r47/sGqf+lQPb4bdsVVj/cf5o51pHh2FEY/wtueqU15NIpPzFV/hb7v/AasapeQ2rb9/mIqbvLj+VmrmNUvoY18tEYrHL8rb/mWvyj2fN7x8z7bl2LMl5NJcP5M2xWf5Gar+k3CQt5dzc7mb5pWVq46G8feIZJssqf99VpWOsItwiQzfOzbUrnqU+bU6Y1vc+I9S8P65Bb2aRo7FW+VGri/ip8fLbwjp76am24mX5v3j7dv+1XMfEj4nWfhGzZEuWlm/wCXdYfu18z/ABK+Il/fXTzalcs0zf3nqI0eboeVmWbc0fY0v+3i38VvixqviS+k+06k0jTN95n/AIa4q3vnt7d7l5l/vbawZtS/tC8d55mJ37lVadqF8kdt5PnbP9ndXTGnCJ4NjRk8QP5jvI7bf49r1h6vrmpalcDZNuij+X5ag+0Qyw/OPvfdqGNkiV/n43VX90rmLf2v7PDvdNq/+hU1vEj2e5IX2Ls/3v8AgNZuqatDt++o2/L/ALtYtxfPMu/5mLUc0R8p2un+LppJPJd2Ybf4vvVvWt4mpQ+c7qPk+T5/mavLrW4fer/eH+996uo8J6xukCTPx91d38P+zURl2Mv8R1qQib/SVDNufbtanLb7Vb727/fqa2WRmR/4G/h/u1ZmtfJkEKI395WrQn4feKqxGOPfsZfM/ip11Hube6M3y/eq+LN9q7v9371Maz3SNuhbav8ADVRjGURfaKFtbvIu/YoLfxNViTSnC73RRtfdV/S7EM2/7y7tzLsrqYdDhmtV/wBAVv4vlrOJUpHns2lpJH5czqrfx1y+seG/s8g1DSv3My/3Xr0nxBoqWbPGm1XX+GvPNQ1Z217+x7l9ir83+9Ve8HNzbl7w3qmq31j9g1K2Xdv2vM38VLqWj+T8/wDAv8X96tK3js1jVEdv7u1f/QqmvI0mz/FVRAwrWFN3yJ/d/grsvhx8SL/4Z+IbbWLaeT7HNKsV7bq+3av96uXWzRmKb+aka3eS1NnM6n5P4v8A0Koj70zOpT5oH6R/BXx5pt3pdt4h0rUldGVWiZvvV9q/s1/tBTW7Wlt9v8k2qL8rPt3NX4//ALEfxmSO8f4darqX76P5IPOf93tr7d+H+tar4ZvormO5Yo207V+WtqlP3PcPFlKUavKz7d/bv/aW1K6/Zl1H4VX2qpNbeJbm1ls4ZZgZVkinjmkIHXYNoBPYsPWr/wCyt8Qv2bP2vf2AbX9iP41/GvTfBHiDw/qbT6VeX8ccKNCkxlSVGkKRSMRNLGy71kOC2CMk/I37RPjB/F9l4enaTIhiuFCnquTHx+lWbT4XeFNEsNA1nWvDj3tvqenW9xKPtbplnQE42kY5zX7zgo8P5J4UZficdVrU6k8RKtTnSjCUqdWHNBPlm0nHljqnu32OmlWlSlrrdW+R9M/8E4/En7Pn7GH7bHxC8OeK/wBpDw3qOgW3hee00zxTGzxW18yzQzMikgqZAsbDarsHYBY2kJFfNP7IHxZ+H/wf/bL8N/FL4hTGTw3Z65cjUpo7ZpQLeeKaEyGPG5lAl3FQCxUEAE8V2Xjb4FfBn4i6v4Z+CPwB8LX0XjrUna81SS21E3SW9ljhDG7HEhwSPWvlv9p3xncfsr6V4w8RXfhGLXLjwleT2n9k38skSzzLP9nAcxEOMOwYhSM7cV9TkOfcJcSf2vjYzxE3PDQjXco04NxpwnFypqDaU5JydnaKdraaH6FwbKawGPStpSk1v1TP0sP/AAT+/wCCYDeKz8dD+2hpn/CBlv7S/wCETGr23neX9/7Pv3+fsz8vleV5235d2/5q+Uv27Pip+zv8V/jrdar+zH8KdP8ADHhmxt1s4ZtPgNumqsnH2r7PhVgBGAFChmA3v8zED4y/4Jk/Fb4rfthfHeXw78Tb6yt9OnCGLS9NsfKhtw0m3Adi0rn33EV9Q/8ABe34Dav/AME2rT4Oa38G9WkisPGUl9b+IZriATg3KJG8SqZc7BhmyByfWvn+HPEfhrK8zWNx2NxuLlCLhTU404xjF2vzRhNKpN2V5zu+tr2a+HrU5uPLGKXocJXS/B34peIvgl8U9A+LfhKOB9R8PapFe2sV1HuikKNko467WGVOCCAeCDg180/swfG34gfE3xnf6P4s1iO5todLM8QS0jjw/mIucqAejGuM/aM8X6hpHx11KyivZo0W3tyCkpAXMKdq/Tc+8TspjwQs6o4WVajWm6LhNqDaalzXtzq3u2t1uYU6DVblk7dbn7o/GT4Z/wDBPX/gpjrFj8e9H/ai0/4feKZtLt4/Eul6nJBGzMqDAdLhot8iAiMyxsyMsa8cZrhf2t/2jf2YP2av2Pf+GF/2O/GVv4pl1m8kPjDxEB5w27kd281VEUkkhWONTHuVI4iCd21q/GDR/iPFIsem6lqbfaEVmtpFuDtZW/hraudZu1Qf6Y3ypudvOLV/NeXcd4TC18NSxEK9bB4WSnRoTrQ5Yyj8HNJUVKcYX9yLdlZbrR+39UjUg5wkrvd2/wCCfql+wT8Iv+CbHjv4D+LNc/ax+I66f4ptpJRBBd6y9k9lbCPMc1lGjYvJS2/KFZcFFHlgHL43/BM34FfslfFL40alr37Q/wAWtJtNL8NyrcaL4a8QSpZprQ3nbJM8jeWUTCloAxLlufkVg/5YalrlzJMkw1SZNq7t28/NWZN4yvd2Ib+Xhv3v7w19DjPFvFYqlmUacsRB4vl5f30WqCWjVJOl7qkrp2aezT5lzHLHBJOOzt5b+up/RN+2p8B/g1+2j4gsv+Ei/wCCjPhHQvDWkKP7H8K6fPYtBBJt2tM7G8HmykEqGIARflUDLlvij9sP9j34Nfso6NoXjP4TftfeG/HeoT6lhtHtIYnmiCYYTfuZZkKAjBEmzORjf8wH5VjxVOsph/tKf94u1GaQ7qjk8XX3nB0mcpGv8U5WvH4U8S8z4UjQw9OrOeFp3XsbUIxknfeSo82rd278ze71uXVwsat21q+uv+Z++3xM139g/wD4Kt+DvC/ibxj+0Fb/AA6+IWjaSIL+DU2jhUZO6SLE5RJ0Dh2jaOQMBJ8wydoT4GfDD/gnd/wTR8ST/HbxP+1jaeOfEcWnzw6JYaKYZnQOmG2Q27yYkYAoJJJEjAcg4zuH4Dt4y1OTaft86p/10NMj8ZahAoT+0n3q33fONea+NIQwEsoo1MRDLpXvQVWnpGTbcFVdDnUHdq2umjbvc0+ry5udpc3ez/K59lftg/GDVfil408RfGHU4orS68R+JJr14o0AWLzWdwnAGcDAz1OMnJJNeOaP428m6aZ7ltkjK3l7/lavIf8AhNpp0a4lvJWU8CJmJq7b+KrmaRUhuYwiv83y/N92vM4z4rw/FObRxVDD+whGnCnGHNzWUFZa2j08jTD4adKFm79T3PQ/GVhMyfJviZvvM3+rru/D/jVLiP8A0N2RWdVddv3v92vnjw/rXl7Hn8t2ZNyMv/xNeheHfFDxtHvmZF++kav91q+MliObWJ1xozPcbPxBeTSRJDNt+b5IZPvbf4mrN8bakdRmhbaQELjJXGenNcjo/iK5m/ffaVkf73zfLtrUbWX1mNJpQu9V+Yocg1+m+D9fm8QcJHyqf+mpn03ClGSzulN/3v8A0lnfeGtce20W3is2yUhTevvirMniaGPdsfj7rtv+bdXBDxUlpAtkjRq3l7dze1R/8Jh5yNDDYbTs+Zq+H4hr2z3FL/p5U/8ASmeVi8PJ4+o/7z/M7ybxVNDMiPc53Ju3f3qrzeKZpVDo6lPmXd/tVwlv4kmk8ua5jWI7tztH83y1Y/tiYM3kzMg37tu35a8GWK98f1Xm942tU1aPbMjwsf4tq/3v9msHUrh5GmdnkXyV3eWvzNUVzq9zNtmO0hvnRv4VrK1K8eRi/wBsYfN80K/wr/eqPrEpbFyw8YwJLq6hWSF0/ii/1jN935qyZPEG5nTf937zN/FS6ldeZG+/ciM3yfxfLtrBvj5itNs8zavzbm2s1bU6hy1MPH4jqI/ElnHsmmf5tmzzP937tQyeKkvpmRJlO5G+ZWrhm15FkT5Nqxy7tqt/s/3qrf8ACSOq703A/wATb61OfkOqvvEUMkawwvvb7vy/3ax7vWIR/oyIvl+V/e+7XPya/wCZC01nNv8A4Xb+Jqz5taRrdnRJP91komTGJr3GuQ/OPmX97u27v/QaWx1TbmMOzJJ92SuRvNSe5U+duVvvf7VX9Pu55o47n/VL91FZvmb/AGq5K0eY7KZ6b4d1BFaNHdVPy7K6+2u4b6XzvtMjL91FjfbXmGi6g62p/ffe2/6z7tdRpd8YlEOxkVVX95/C1YRlGPu8x0xo9kc82jzRsU2KfL/i3fe/4F/FQ0aW9w0EcKu+z523fdraktXEYSZ/uvt8v+KkuLe5aN0hKl1+Wvj/AGnN8R+g06PMc4y+TJstk3bvv7n+Zao3kflu8yfIrP8AdX+Jq2b61SNm2MzFlXbWHeXH3pZnYsr7kjWtY1OaHunpU8L7vKc/fSJMzpMm6sLUlhWT99J5YrZ1SORWKJcspb5vmrG1RXkczfMrr8yL/C1dtGXuHV/Z/u6RMe+bciTI+9/4pG+7WBqK2f8AqfM2M38VbF19pmm2P0Z/urWXqDeWvkvtZt/8P8VepRxHwnnYrK5cvMZDb5PMeRGXa33qGnebefs2xFRf+BNSszqyuj7/ALzbW/u0lrGkmx0K7WTc3zf3q9CNaLifK4rAzpyL8MKSQ/P8zf7PzUyT5t8Lp/urUunw/Z22JMrp/H/tVaksfvSQo21f9n7tY1Kh53sZy0ZkzW+1Vfzud/3V/iq5ounPtbZ8v+zsq3b6TGsiTI//AALZWppun21yrIjsZV+Z/k21Eqn9446lM+hvh1AIv2dEtwwYf2PdjPrzLXhFhoqNJstrZU/vNs+9X0B8P4PL+BUcEgz/AMSy6BGfUyV5PpemvMqoibPnbav91a/afFpxeS8Pr/qFj/6TA+gz9L2WDT/kX5IzbPQ7Zdzo/wDBtWT+7V9bJ5LhIZtwRtu+Ra3rXQ7ZWTzrZcSfLt/vNVmbTRHtSGwXydn3a/DKlSHwnl0JSiYUmnfuVhT98y7tit/FWfcaTItwXtrZRGr/AL3d/wCy11raXcyRlIbNR8n3o/vMtQyaG6Wav9jV1VfkjWX5lrmqS9melTlzbnLzaebiQI9tJhn+6v3l2/xVWazmVfJRPl/jZk3NXYRaS7Wzp9mkRFXf9z+Gq13p0f2V4fm2fe2su35awlKXwnTT/mOSuLVJo/OTbuZ/vVUkZ5I0m+7u++v3dtdDcWKSQu/2b5P4W/irEuNPm/uRtKybvLV/vfNWfN7vKa/3iOHYshSeba7Ju3feZamtW85lhfbtZdyN93c1H2PdCg+x7JF/iX/aqeO18nYHffK33KXuRIjKRYs4bmSSJCVVdm75f4a0I4Z41W2dF2L8zMqfxU2zsXZ0f5fm/vfLWxHp7tHsSFt2/wCRt/3V/vVySqHoU7/EVFsUmj37Knj8u4ZLN3jC7vnkb+GlaDeG2fK/3dy/db/eq1a2SbkuYUYRs33ZF+9Vx5ZqJ0U5E1hH+7aaFFJ+75bfKu2tOzX7G7XNnDu+8v7tN21v7tLpVrDIoeFmVW/hkStmysYVkSZI1T5/nb+7RzTOqNT2exlWMf775LZVK/Nu3bq0mt3XaIXZzIn+sVflVv4lqSa0RdSdERvl2v8Au0/hrJ+NnjhPhv8AD+bUvDztNqcz7YFVf3Vuv8TN/tV6uFw9XETjY480zShluG55y97+U7nQfBNzeRp9vvLexST5lkupdjbdv92tBfhg9xZyw6J4z0u7uY03W9vJLtX/AGa+I4fjd4z1LXpb+8166eS42rceZLubav8ADXR6D8cvFVrfJdWesSJJG+7asv8Adr6GngadP7J+aY/iXMMVV5oS5Yns/wAZvFXxa+GsLWeveDNPjtvveZprM25v95v4q8ruvipf3FoupWviFV8tvn2y/d/2a7W4+MUfxK8A33hvxg+97ja1u0b7pFb/AOyr5S8eXeq+B/FDJbXLRpH5iy2q/dZa640YdIniyxeKqSvOpI9b1D9orxbpuobLbxPJ/wB9feqCH9p7xOzDztY/d79yxt96vK9S1Dw9Haw39s/mpNEsqNJ95W/iWsG68WWbzOn2aPbv27t1V7Kl/KNYnFR+1I+iof2ltYhjDpqv3l2vGz/+PVNa/tMX9wwRL/5lf5l3fe/2a+arXxPpskmx93+z8/3asf8ACQabEu/7TJu30vq2Hlq4h9axUvtH0lqX7QmvSRj7Hfxodu3av8X+01SQ/tD+IXhihS9YfL8zbvm3f3q+arjxVD5izf2kzfLjb/dotfHDruS5vI3Tf8tP2FLpEXt8RGPxn0+37QmqzSbJtYkYLB8it821qsn4/X80gd7yOXd8ybv/AGavmqHxk7fxqf8AgVI3jK/U7PtHy1H1WG6iH1nEfzyPpmz/AGhpoXZLnVN3mf8ALOP7rVctfj99oZEsPFDRMqbfvbvmr5Sm8ZXjP532ln/h/wB2o/8AhMoYVZPtKr8/3VSl9XpS15R+3xS+3I+rtQ+N3iTa6W3iRZpZE2/M23d/tVGvxq1vT187UtSmilk++0d1/D/s18pv8RNrfPeNtX5fl+XbVab4tTKvkpebgyf3qtUIR15SY1cRH7R9oaP8evD1wrQzeNmtvkVUa6f5lb+7ur1jwjcaJr1rDf6br1rf7l2tJDP5n+7X5fX3j77Wqv5zBlf+Jv4q6b4W/FLx/pOqInhrxDdW0i/xQysqr/tba05XHYxkpVNZSP051jXtE0m3ijS//eqm1Lfd95v7q1V8TfGbwB8EbGPWNVe31PXJomS301k3RQ/3d3+1Xx3b/tCeJ7jybzUtea5uLODZA395v4mqlJ42vPGniJZtQvGY/ebc+6oi5SkZ+z5Y/EfXXwz8feKvi14i/tjxa63Vs0TbLNfljj/iXbWf+2F+0xbfDL4W3k2ia232prPy7VVT5d33fvf7NcZ4D8XQ6V4QS8sL+SP5V/2fmr5T/bg+Ll/428WLojvGsNquzyY//Qq1lHl+EVJ+0lzHguqXOq+J76bXtVvJJrm6laW4mk+ZmZq9S/ZB/Yq+Nv7ZHxd0/wCD3wd8H3eq6nfXEaZt7fclvGzfNJJ/dVa534c+CdX8eeItN8H+FdEa/u76eO3gt1T/AFzM33a/pO/ZO/ZQ+G//AAQd/wCCTPjX9rvxpplo3xHHg2S7uLxk+ZLiZdttap/tbmXd9K3hDlpc89iqtZyqqjDf8j54/wCCbf8AwTN+BvhT9rh/2UPhnNHqUXwtij1P4zeMm2+ZqWqfK0Onwt/DGrfe2/3a/U7xZDpt19pRJlhMP3WVv4q+Ff8Ag2n0maH9kLX/AI3ePXkbxN8R9fuNZ1a/umy0ytM235v7tfYHxO8QWdu815C63EMiN5U0fzL/ALVfknFGN+s4ySX2T9I4fwTw1G8ux578ZLPTP7FZ9YSzumZW3rJ/6FXwT+1B8H7XUtQbxP4eSQfat0UtrGyskK/7K17f8ZPjTqV5qV5o9hfxtF5+12b7yqv3dteLeKfiBpusWo0q/v44Nrt5Um7azNXzeFnKW7PbjhY+05nufHvib4Q69ot1ea3rtzdIiy7ovLTb93+GoPFljN4w/Zz0nTbzzm/s3xyqpcXEW39zJH833a9d+LvjLTbHS/s15qv9pSSbldVT/UyL/Ey/+zV5pZ+NpNe+Gut2d/YRomn3tvexLG3935fu16FSpXnS54n0OUVI0cQozOo+D7aJqnxE1jw89ntfT/D0n9msvzKsnl/K1fFfje6vL24lvNRud9y0snmzM/3vm+7X2V8H9Pv5Ne8QeOfDd+zyafo0k6Qq67pNy/d2/wAVfCvjPxJNLfTJsVEaWRvL/iVt3zLV5LTnWrzkYcQSjGhYbY6hpGlTx2Fr+9nupdnmN/DWj4ksZtEtfLfcV/iz/e/u1jeA7LTr7xpYvqR3RM//AAHcv3a6j4rSJb24eFF2ebt3V9HiFy4iFPufn81zRk2ZGh77y1d0RY1/hVq0dIW8sYftTpJ5bfw7qq+DYXa1R/l2yP8AxV0t1ZpJDv8AOVAv3F20qnNzSMpa0lpqVtO8SPYzM/8AC3y/f+atqx8XXMezZMzLt27pP4Vrk9Qh8y4Kb2VW2tuWtvTYd3zx7v7y/wCzXnVox+I3wdWcauj0O+8M332pD5O7bN8/zV0UPh68uYd/ksu35lhj+X/vquS8F/K299rMrqybq99+G+l2HiK4jmgddquuyFf/AGavDxlT2crn0cf31I8x8M/C258UeIBZwurI25l/3v8AZq74q+EV54f8RW+g3Ls0cf72eT721f8AZ217+3gnSvAjDxK/lwvbuzRQxp/epfh7oOieLPiM0z3MKXKy7UvLr7qx/wB6vOjinKfN9k4Hg579T578O/B/Uvjd8UofhLYaxNoNrfQL5F5fN5G5m+6zf7NenfH7/gkh/wAM0fDePxr4q1WSHX47rbYatap9ps/LaP8A1m5t275m+7X2Vb/sT2fxeWHxDoT2o1a3i3Ws1w26JmX7vzfw17BP+xL8ffEnhGPwf8Q9YZrCGLcvl6izJG235Vhr6LL81lh+WNOPzOatl9HFx5amkj+cvUPBlz4R1680SS0keazlZJ2kgZP3n97a396uh0e6mjsR9pdvlXb/ALtfqt+1x/wTV8MeF57GFP7Q17W9e16zs1uL7a0vnNJ833f4VjrhP+CiH/BNn4afBfS9Qs/ha8b3Om2dusW3a7yMy7pK9avmmGxH8RnJ/ZWJoT9nA+Kfgi/xEXWJLz4faVNeSSRNE/lp83lt/DX2Z+z78UPjf8F/B8viq70G4tbqa1aDTbeSXb+8Zdu5t392pv8Agi54H8BxeNpbD4i6V9pSS88ho5Pl+zsy/eavr7/gsD+z7Y/Db4b+C/iR8N9P3aJbzSWGttb/AHbdpPmjmk/2W+7XkxhDF47kPoKdGvl+GjPm+I+BraH7Cs32+8865uJZJ724Z9zSTM25mak+0JcMyIm5fuqzJ95v71SfJDv2Ooimf/WKv3v9qq6x7ZDH8zlk/wCWdfWunGEOU86MrT5hqSTNt+8zqu1l2Vcs/tPltDbWbOkf8KpSraQyKltO+5vK2/d+bdVu3s59pG9drfK8jN97/drkj70fM3lzEmi/vpFme5jVW+Z12/Mv+zXYaWvzwiYqdzszbWrntA0fyZmkeGF03N8yv97/AHq6zT7cf8uyLv2bZfL/APZa9DDx9nozz63vF/TYUmupU+XYyfe/iq+s3k27w/MHX5VVUVd397dTLNfJtfORI9rNtXbUL33l25e5hkcM+3y12/u69WjzchleMd5FDVtkyp5IaNlbc/y/eWsa6vJmke2mCpu2tE0O1l/4FWzqypMreTN5X+1t+9WDeW+2GW5877r/ADfJ96u+jGMfiMZe7rEoveeZJLDDNGG+7t21ctm/4p5mVM/uXwBznrWf/qo/JeFisnzeY33Wq9ayGTw28jMpzDJyBx/FX7b4ORis0x1v+gaf/pUD1+G1fE1pf9O5fmjhbyaaONvJtt259rNv+Za5rWrhG3u+51X/AGtu5q2dW1Sb7OZn3YVWV1VfmriLy8Ox5kTG5/vf3q/JpR5o+6fEynzSuMuLy5VUh+07Ubcz/wCz/s1qaPdQWqm/v/uRruaRf/Qa5+FYrp2S2hYBk3eZ/tbq5n4pfEKwtV/sTSpvlt3b7RJv+Vm/vVzVv5Tlq4j3TM+LHjqxvry51KGbY0nzbV+ZV/3a8E8ZeKPt146b62PHni8XE2xHz8m2uCuJJry92Iitub71L+7E8/l5S1Hqrxrv85lVf7tTQ+dI38WGXd/eq54f8J3l2vyQ71kfbXTN4Lm0+NPOh27dvy1pyi5oHJxw/d3vn5PkqrfTOsjb9qL935v4q39aW2tWaBPv/e27Pu1g6kqTf6xFbb97dUSLj8Rj3W+Rj2DfxVWVX+5sXNX2h3ebsG3dVbycKnybd33m3Uv8JfwkfnZ+T5v73y1e0W4eOb5G+VfmqrN8sfH+7SW7OPnQZ2/+hU/hJPVvBN5Nq0aQvhnb5fmfbXc6b4PS8jZDy/8Atfw15J4K1j7HdRTO/K7a9r8KaklxarNC+5ZF27l+bbTjIzqR5omRfaK9kyWr7X+f+H+GlttHudweZP3Xzbd1bEi+ddNvhZVjlbc2z7zVNHa2zMuxGXb/AOPU/h1MYy5TM0y38q6/1O8b/u10tir+SqJtSL+8tZ62W1lTZlVb+H+KryyfY7dt/wDu0lHlKl73vI4/xpeJpt8+xPlkl3MzV5r8SNBm+yxa3psnzruZ9td38WI3t7WGZLlnG/e1ctperJq1vJYb1YN/DIn3Vo5eWRpGUpQKHgfxMmsWPkvdf6TGn3WrpljeRXT73ybmVa8l1YXngfxY4hRkG/dt/wBmvUvCepQ65ZpqSP8Ad+8u2l/dCS+0LJDunUp8m1PvNUGUWQf+g1rXlvFtZERvv1nzxxhVR5vn/vVUZAVLXVrnwP4s07xVpk3leTcKzzV+lP7NPxQ034reC7LUkeOWWSBWlbzV+8v8NfnNcaXZatYvprso/dNt/i3NXtH/AATm+Klz4N+JUXwu8SXn2e2vLjZbs3/PRvu/99V10Ze4eVjMPzfCfc3xCiuYTaLcLgfvNv0+Xivqb4a+E/CnjP4T+DBq17b/ALvT7ZJSgzIo2DK/+O185fGvTZtLtNFtZmPEU21T/DyldR428Q+Ovg1+xB40+O+mWtzMuheBpZLHarbftEsYii+7/d3bq/Y+KaV/CDJor/n5U/8ASqhyU3dRU+p+YPxm/be+JGrftsePPjT8H/idrXheVvEE1jolxod60EkNnD+5j+b/AIDu/wCBV9B/GjU9Q8X/ALLV3rninVLjUbvUdEs7m/vbt90tzM7RO8jk/edmJY56k1+atlPc2zrctc5f78sjfeZm+Zq/RjxzOD+xdazuw+bwrphJ+ogrz/DGLjleeP8A6hpf+kzP1ThOKWBx6X/Pp/lI4X/gn58Sv+FK/tDeHtY0FIYUmvVS6aT723+H/wAer9Zf+Dpq30f4w/8ABIf4fftCaa8bzaD400u4imX+Hzo2jkX/AL6Va/EPwrqD2OsWmo21yqSW8qusjfeXbX66fF74n2P7U/8Awbl/Ev4Xatfx6hrfhHS49VttvzN+5mWTd/3zur8hjKUKyZ8NGUbuB+bf/BP/AFKLVfFV/dRyK3/EjIYr6+bHWH+1vcT2vx71N4UZ98NqNo/64JWV/wAEt9Z+3fEHWrDDDy/DofaenM0deqfFv9nr4hfGr49X9l8PvD8+p3c8UCJbWkO+QkQLX7JmNWNPwKwspv8A5in+VQz5JPFuK7HhskyXVn51q0e9f9v7tdT4B8aJrVr/AGTc3K+bC3yN/FJ/s1yd54d1Xwrq1zoPiGwuLS5t5ZIpYbqJkkWRfvblrmhrT+GfFiebuWOR12t/tV+KLklHmiduHqTjPl+yex6t5M0azJHu+dvl3fdrnr5XaTzt8brv3bf4q39FjTXNJTVbYfMyMzeXt+Vqo3Gjw+csMMjMnzNK2z+L+9RKpy+6eh7GMveMUzSGZnh8wL/B5jbqg8yaP9yPmDPudmatB9DEe6ZNzqvzI1V7rSfLV5nfyX2fP/tVlKQ40e5Sa4dZPn3EL/FuqC4unuN+xFG3/wAdq42nu670Rm+fb81Rw6JMtwdiNsb5t22sJc8Y8xtGnzSI7e6u2h2TJ8v8LLW9psyLHDC6M3z/ADsv3qp2dg/mO+zYip/301aFna3K3Cwof9p1b+7XJUlPlOynTgdNp948q7E3Af3f4ttdRpWsfu4Xhdk2/wCtX+KuN0uCb7OuyP5m/wBvbtrqND/fL9phRmj/AL2za3y1w1K3sdT0aeFjI7rSte3XG4zSb/u7mT5q7fwncNLHNF5bqse0IW79eleX2104khm875Y1216D8NRi0utsxkUurBj3zmv0vwTxMp+JuCj3VX/01M+kyLB+yx8J+v5MkvtQl+3TIoUmOVgHY528+lI+pJbgzWc0kn8P935qo38yw63dCZt26ZsH+5zUunxpM0UzvudUZm3L8tfAcTY3l4jxi/6e1P8A0tnHXy+U605eb/M0o5kZUmRGaRfmdVl20lvdO0zzW3mBPuyt/ep1rZw7Umh5LLtbd95alk0+aTDoWDx/wr/Ev+1Xz8sd7w1l/uDftW7YjvtWRtv+zRMr3S7Lm42tGjbl+7tX+9SzWs5uJbZNqJt3f7P/AO1Uc0LrMnkp/wAsmXdIvzf7tb0a0qn2jKph4xhsU763mMf2l32/PudV+bctc7qVs7QvNNDHmRGVFVK6uazeGFzC+Cr7mVf4WrOutNhuITM6N9/97/D8tejhqnLH3pHlVqPNocJqG+PaiW0ats+7Iny1j6gHtbrzhuQMyt8v8P8Au12WqaHbXcjonmbd275vu1QfRUhdN8O5W+/t+Za9WNSPxHnVMP8AzHI3SpJarNCn3Z9qSK23dVSaL98zpuy332V66m68NQxxsZvkfd8u7+JqoXli+nr50M251VU3Mm7d/tVpzcxj9Xlz+6YMNgkKqiJv8z5fmrSsVSJQjwqrr8m7d92ntp7+c7787n27dtT2NjlVtkT5PvIzJurmrbHRTo8s+Uu2t0V2o8O7b9+Nt27durqNHvIVj2b8tuX95I9ctawvGpTzsP8ALu3P93/gVbGnXD/JbO+9lT90zfd/2a82R6Macoq7OtuIdri58ld6vuTctUru5/eK77d/8S7K6G8091jLzRq7fw7U+6tZGpadCsf2yZ1RNv3tn8VfH/aP0ijTk4+6c1cfvrdXd4yV/ib5VrntW/eStMj4VX3Sr5XzN/utXVa1HZq2yGHIb5tsn8VYmqR3M0e90bcqblhVvlrop+6ezhcLzHH31+FGxJsvH8yKsVY99vZm2bVZv9uug1az/dukO5NyfO277tZeoRwyKyXKRllVVaRfvVtTqez90+gw+X83xHLalazNMro6s8f3FrL1KB9whmdUZl+7/FXTaitnCv2nzW2b9yRqm5VrBvo4M+T5LM7N/rNn3a7qNbmFistjymLNYusZTequr7U3VFDbzSN9mdNzK6/Nsq/IEhGzf8sfzf3m3U+1sRGp2Rtlf4m/ir1qdblh7x8Jm2X8vMP0+z3W6/J82/8A1e+ta30e4uIx5I3bfl8uT+Ko9Pt0WOLzk3P97+9W5br5sib9oLIy/L/d/vUe0lzaHxmIo+zgVbPS2k2OkO1I3Zdv8S1sabZz+RJtdR86/Lsqxb27sphf5ol+bzF/iWtPTV87CQuzwqu528rbWP8AekeTW5ep694Mgf8A4U/HbuQSdOnBOMZyXrz7T9LeaH5IZEfZtbd/C1eleE0UfDKJFjZ/9BlBR8ZJ+bI9PauX8O2MKyKRCy7t27cn3a/bPFybjknD0l/0Cx/9JpnvZ5BToYS//PtfkirDp5kkjTyVLL95vu1oNapFA2+zVPn3bt33v9la0rezS3V0SCORpPli8x6kms0Zvn2u6s235flWvweVT3uY8ijTjHVHONpsykbNqyf8tW3fMv8Ad+Wl+y/uzYTRq8m3d8v8K/xVtTaTczKsyJuPlf39rfepI9JdZseRsVXVmb7yqv8AtVEpe0jaR00exgfY/s7Jcw9NnzKtV9Ss/M3uLOP/AGNr/eX+9XQx2qMzukyrGzt8q/Nu/wBqqtxYwuomhfak3y7m+Xc3/stc0pe+ejT5pR90468015WkvJnyGbb833fl/wBms6ax+Zfs20ov+xXZ3Wiv9oVEEe7+JWf5Vqh/ZdmsLQ+TudpWVZNvzUv3fNzdDSUaso2OXWx875HhZAv3dvzbqS10v7++3Ziv8Lfe210DaXCyyvC7Jt+82371Q29nN5w8nbj/AGl+9/s1MpQ5gjGRDaWszXHku7bV2vtb5q0V3zSBETc3zfd/9BpYbea1+dLb7z7n/wBpaufZU3JM7bwr/e/+KrA7YxlErQ2EFq2x08pvm3/xfdqzZW/+ledDD91Nyf7W6opLV47hrn7Mz7mZUVW3bq1dFh+0tHvTeFRldt/zf5WnTlKMd/dNvinymtoujJIqWyOyrt3Vu2fh+SaNzJ9xU/i/ipvh+zhiX9991vuMz/w/w1oeKlOm6HPcwuzn+COP+9VYeM62IjBG1bEUsJQlVnLSJ518SvjNpXhHVn8N2d5HNqUjqyQ+V81uv91v9qnfE7VPDfiDw7b6CmlRxG8t/n+0L86/L95a+Z/i43ir4f8AxQPjHxJDIrXFxulVnb5m/wD2a9T+I3xFsL7Q9A8bW95DNb3EH3VRtsP8O2vvMJhY4SlZbn5BnGYVc1xUqjl7v2TwLxl4fn8O+IrizR2T978rf7NOs7g/Z1dPvr8qSf3q6341WtlfeV4n0p1KXEW75U3LG392uHtLh7hVfCl1/hX5a7tfiPJ5vcOp8L69MsywpNtO/duZ/u1x/wC0NDDNcR31s7Zk+8u+tCG8TT2aZEZTt/v/AHa574iXCapZ72ueFTb8zfNUy94qPvHDWOqXMKrZ3T70/g3VVvFRpN8Lsqt81LM/2WYOiZFRXVwksexIP4/vNT5eXQ2jLmI2uHh3bJttEupTLGu/cw2/w/xVWkVGTeqN96opG3L9xcr/AHqfwxD3i39oeNl37iqr/FRHqm5hvHy/eSqDXW1Sj9f7y1E0zMqyZ+bfupFcvKbS+IEDf6za6p92optcdlKQ3TL/AHKyfOf77yc/3v71EbIdrvubbVfZFymnHql2u5xM3zff+annWpljH77ft/iasuWZ9xpPtLrDj5TUi5ZGlJrlzdSEu+Vb+Gq11cI2E8mMVSeR3QPv203zMqv7tjtoHylppLfcifdrsdJvP+EV8NrPDNvuNQVkVv8AnnH/ABNXF6db+beJvDH+9urQvtSfVLpcriKNdkS7/wCGqk/dsxcvvnYaLrczKru+Qv3a9X+Ecf8AamoDzl3K33f9qvEPDsEN1Mi78D/Zr3H4c7NP09fJ+Uqnzs38NO0Ix5jGZ6n4s8ZW1n4fa2dGEdvBtiVfl+b/AOJr468dalc+IvHVxeB9/mS/dWvc/jN4wh0vwu8MN5IrSIy7f9mvDfAtrNqXiVpkhaV22/LULmlVHHlpw5mfrR/wa4/sAab8ff2lj8fvHug/atE8ExR3UUMnzR/bm/1P/fO3dX27/wAHjfxsvPAX/BPTwj8F9NuWjbx94+hhulSTb/o9rG021h/d3bfyr6S/4N/v2U4P2af2CPDd9qGlfZtV8VRLqeoGRfnZWH7vNfnj/wAHqOvvL41/Z/8ABkwZ7fytWvWj7Ft0a104ufKuRfZRllsZVE6j+0zoP+CLH7V9h4L/AGOPDvhjW4ZGjtYFgW3hXa0aqzbfm/iavpPx98dtB8ReHX1LQvEkM5+ZZbW3/dtH/sstfK//AASH+B/h7xB+zDpWg627Mtx5c8U0kH+pZm+b5q+ivjJ/wTu17wr4dvPGHw98etDN/wAfHk3zr5TL/wBdK/DswjTqY2c0ftdCXs8LC/8AKfPXxX+LGlahJND9jVJpnZrrzIvu7f7rV86ePvEtzHcPDo+ox3CQ7mlWOX5lWtL4qXXxR8M+K7zRNe8N3yhk/wBdCm6KRW/iWT7rV5neaD4k1icvdO0PnL95bVmb/gVGHwvu8xt7aMYf3jhviB8SJrqV4ft8flRtuTb97/dZqreBZLzWL93ke4W2uott1Gq7v3f+0teir+zdDqEi6lePHcJMnyq0DR7qztQ8F694Llms9NRltlTcjW6s33f71d3JGnEUJ1/iPObi8+IXgvXGv/DGsTQNbv8A6P5cu3ctcjqHgPwl8Qrj+yvElnDp2rzSs32yP5Vbd93cteueJG/4SzT99gjS6pGm+Bl+Xdt/hZa868YabYXnh5PE9heKt1DKyXVu3ytG3+7WeHqToTfJodWM5sTDX3jyfxF8GvGvhPxh/wAI24USQurRXG/5GVvutR8UJodNs4NKluYXuVddzQvu3Vc8ZeKtY1KEveX8kjxp8rM/8NcNobTeItYWa5mVoo3z+8WvocP7TFctWp9k+HxnLh5+yX2j0Lwvapa+H4Jk/hZmdW+81ak+tOsa22yFl2fut33qzo9ht9j7m3Jt2/w1LHpqeWn7lt0a/JXLWlGM+Y55PlVkT/2X9sjW5Tlv4lVfu1p2Nr5OD83+zu/vVn2P2mz3/IyozLtbf95q2IZEXY77fNb73mferhxNT3TpwcoxLul6lZ6fMuyP55H+bc/3a9t+A/jKG3mjT7Yrt5u542+XdXzrc6k7TCaSHcV/iX+7Wr4T8bPo99Fc7Nrxv8vmP8teRiMLUq0nKJ6+DxlKnP3j9AY7qw8a6b5M0MKrJF/rP7v92nfD34Q/2bqU32O5kuV3K0u77v8AwGvnD4V/tD3LQ29tc3m5lf5o/wCFq+mfgj8XtKVYftLySbhul3N8y7v9mvGlGUY8s/difU0KWHxK5j7F/ZH8NeM2e2sbGzjkMjbrVW3fKtfU8a/Gq50qDTZobGFNjI7R/Nt/u181fs5ftEeD9PW2ub+2ZAqbEa1+Wvq3wP8AFbSvGMca2w+zxhP9ZN827/ZqsP7Bx5eY58Zg6+H/AHihzROS1f4ReG/C/iPTfHnjN49Qk0WKSfTbOQLt+0MvzSf71fm7+0dq3iT9or42eILOG2/126CLTV+Vodv8Py/xfxV+jf7UnxO0fwP4Uub/AMlbqaGLzWWT7u3/AHq/KHxJ+0Po/hv4ral8XdHmtYvLlkllj837237vzf3lrsdOnbkj6m2X04Rh7aqvekdT8Efhanwr+H+qa8eNS0m/VpY9qrL8v8TL/s19ueFvG/w//bK/ZS8Tfs7Xs32qTWPDUkVncRxeYy3SrujZf91lr8yrf9oi/wDjJ4w8R+J9BuVh/tD97f2cKMqtN93d/u7a+tP+CZfxNm+GvxGikuVX+z3u7eFEj+bzGb+7/wB9V6NGNSlWjUUh1qcMVhKkHH/D6nwfY6Tq1nCfDetwzNqOn3ElrdRrFtZZIWZW/wCBfLUyxmRvOSZn/hddm3bX0h/wVZ+Blv8ABP8Abv8AF2l6SPK0bxZaw+INMWP5dxuPlm2t/D81fP8Ap2jpDH5DooRW2o2/7y19soc3xSPhKdaPLdRGWtinzeQ+3b827+L/AL6rSs9NT7ON/wC+f5tjbfmX/ZWnww2zfuYdzo3zLtRv4aljaaS3aF4dnmfL838VKNLm1ibyrRjuXtH8nyZnttsrR7V+Z/u/3q6Gzb93++3J/D+7+b+KsnTLXz40RHZXaVWfbW9bwwrb7/mYLu27vl/3t1enh6cYw944ZSlUkWVW1aHZ9m8vy/ux/dqtJHbKhmRN211by2+81TQ28N5IXm3MFfb5e7crLt+Vt1MuLdIIfv7PM/ib5mrtpxCPNUKGsWsMMrO8youxvmb5vLWsK8WBd0abcrEu7d/Ev8NbeqMzWM1s+5PMX7yurLt/3a526uHmmWFJt/8AFEzL/DXdT+P3ialORQvry2ZjDDu3yJ86t91Wq7bE/wDCMOVBH7iTHGT/ABdqzb7yZlWZEZNsv73cn3v92tCFw/hSVoyQPs0uC45H3utftHg875rjv+waf/pUD2OG4P61Wb/59y/NHlniiSaG1lmg/eu3/wAVXK2qpfXDQQv5vztuWP5trV0Ovb7yXf8AKVb+H+9tqtY29tYxy3l/M0McKs3zfKq/7VfkcvhtE+IrRlHUzvEnh+5sfD83lbVmuIv4U+ZV/iavnz4mWttoqun2lSv97fuauo+LXx8upbyb7Nqv3V8pNv3fLrw7xd4xm166+0u/zf7L1z80uY8r+JqY2r3T3Fxvi2vu+/trofhz8P7/AMTX0KIjE7t33KyvDOivrGqIkaMXkbb8tfV/wb+Ftn4D8Mp4n1VI0laL5d0W6tInNUl7vKjJ0X4W6b4a0dp7xI1dV3KrJ81ef/E3XLDTZJLa2mjY/wANdX8XPi08cbQ2r4Ma7UVUrwXxJ4k/ti4fz3Ynf95qzlU5pl06MY+8JeXj3TGZ5s7v9n71ULpoyrbNxb/dplvIjrsd2P8Au/dqG8Z4s84FVGJp8RFPI7Lv2Y3fw1Ey/KXxuX7zVIoSaQOflVqRIUjZo9mWb7u6jm/lJjsNaPzofP8AJ2rT7e3SSP5E3f8AAaVI23bE+7/HVm12R2+z/wBBqZRK+GAab51rcK6JuZWr2X4V+KJmsVsPlbbuZF215LZwoy7zGylvv12Pw/1r+xdSi3u3lfx/Luaj7PukSjzHpDTPMzTb9zbt21X+7UqyJ5fnec3zPurHvLzbeb4fuN83zVNJdJbqXd/l+9tZqfNKJHLH7J0K3Ds2/YrorL/FU1xdTLy/zbn3Iu/5WrO0G6S7j8lH27vm/d/xVq3FqFkO/bhU2/N/DVEc3L8Rxfxm+xyaD50Mf+rXa22vIvC+qeTqDJv/AI9tet/GAGTwu6InKu3zMu3dXhen3Xk33/2dTzfZNYy5vdOo+Lmg/wBqaTFrFnCu2GL5mj+83+9XO/C7xpPoN/8AYHmby5Gwyt92u70vyfEHh99NmdTuT+GvJtf02bw9rcsPksu1vkq5R/lLpy3gfQUapfW5vIfmH8DLVO+sXjYYT7v3P9msn4I+Lodc0/8Asq7uVLr8qq1ddeWm7986MtTGRFT+Uw7GMRzKzorD+CrV1dalpOo23jDwxc+Tf2twssUi/K25W3LVe9hNrKPO2/8AAa2PCujJ4ruho803ktJF8kzfdVqqn7szHEcsqR+lDfG7Tf2hf2fvh98TIHT7dLb3lrqyr1E8XkKSfrnIr9fv2R/2ePhb44/4J6eH/h54/wDDlrd6f4w8GW7anFOinzVkgU1/PX+wvD4i0XQvEnhDV7qR7bT9Qiazjb7gLqwdl9m2Kfwr+ij9jfVpvHX7E3gPwxpF2bTVNO8H2CxqOsiiFcH8RX7fxIpx8IcmXapU/wDSqh5b5G218j+cz/gr1/wRl+KP7CWrX3xd8J6LNqHw/vNTkihvrdNy2O5vljkq18X7s6f+wYl2n/LPwlpJH529f0n6z8I/h3+038GPFP7OXxi8PwXtlrNnJBe2VzFuZNy7fMH+0rfNur+dn9vj4Zx/Bn9nvx38HFnEy+FCuiiRh98W13HBn8dlYeHdSnUynPGtGsNK/wD4DPU+/wCBIVIZdj03zL2Tt90tD4s8H+MI76FP3y7f8/LX15+y/wDtr6D8Afgb8R/h74wMl5p/ibwRfWFnY7fMjkuJF2xr/wCPV+eGh69No98uH2bfl/2a9W0HxNba94fNt8ryLF95q/FYylzRPl6lOPLdHpH/AATd8OXHhf49eIdPZR5T+Fd8bAfe/wBIhr6E8P8A7U/j79mr9r2bxF8P9VS0lRreO8Z4VctC9ugbBb7vBrwz/gndqE178ZtahuGJNv4adI8/3ftENZf7UXiWPTP2uNZtGuioW3sy2P4c20dfseeYeniPAnDU57PFP8qhz4SpVp4lSW6R+on7cX7GfwE/bA/Yd8Qftk/BDQ2sPGHg2wW91a3h+ZLrdtVt235vu/NX42+OrOG+0eLUi+4xru3L/E1fY/wD/wCClX7ZP7Ofwn1j4UfBzxho/wDwjviCyZL3S9S05ZdrMu3zN33vu18oeJ7G5utNuE1W8a4nm8x55NqrukZtzV+IYCnUw2HVGfT4X5HQ43rurF7/ABep2nwD1j7ZpJtodpaRFl/3v9qu71DQ7aWTeltJn726vHf2arq5s9ShheGMLHL5TRs+75a+hJtFuftHk71d/u/7q0qkZU5nv4P97SOMutN2nfN8iL8u2qt1oZ2/uYZAv8G75mrvJtH8yApLCrfNtSTZ/wChVWbQ7lY3/cq+1FXc1YnXUoxitDhJvDMcciJ833tzfPt3UQ6LukfZ/vIq/wB2u8Xw680f76237f8Anp/7LSWvhm2hcoE2Ue/GJUaJxi6O8ceyaHbt/harEOl/vmeL5kb5WXb92ulPhkys7/d+b5lbd81EekvDtT5kT7u2sqkToox5ShY2MNuq23k/PJ8r7a09Ntd0f2Z3ZUX+FW/iqS302BJmj2bv4ovOVv8A0KtbTbFPOd3hjKrL93+KvHxEYy5j1sPzdCa1tflWa5hbCrtZY/71eg/D2ForecncQUj2lmz2NcdYw21vJF+//cx/+hNXbeBwFiuI1PygptBXBHBr9E8D4cnihgLdqv8A6ZmfTZQ28VG/n+RTvrFBqczbnLyTOdg7L/eq/pOmwzNvSb+Hd8v3akcr9qnZlUO0hUEt/Dmr+j6f8o2Q4T5mT/er8v4ol/xk2NT/AOftT/0uRvOMXUlbuXNL0+2t5kT75kb5N0X8VW7q1RmP2OH51ZvNZX+7UkPnRqhR40+T5G+8u2p5vJaQQu+15Pmdo12rt/hrwVHmnzEy92HLymLJZvJCXhttiqu75v4qgWJJl+0wvvRvl+V62Psdt/BMrhvl3M/3WqvJYwqkoTcP4fMVdv8A3zXpUKxx1qPumVJG7wtbOm4r8+7Z95qz5rF7qaNLlFXd9xvur/wKthrPyZGe9RUh2bvv1Vms4Vm2Om9JvmRV/ir1aUuWR5EqMKnumRfaXN5LfY/ldv8AlnVCbR3WTzpkVTt+Zl/hrpWtUZl32rQpG23b/eqnqdq6x/YzMqK38TfdX/Zrsp1vd5TJYSMtWc7Jo9t5exAu+b7/AJi7qytS0+GSHYiKp2fJ5fy7f/iq6m40v5Wh2bj/ALTfvKx7+3SOEOm4FW+VVfdu/wCA1p7SfNZHRHB0ub4TktRs5vkheCRS3yxNH8rUklmlvIsPzI/zN5ez7y1u31j5k3z2bPtfcm1tvzVNb2264b7T5cj+V95vvbqqpU933jL+z4+1MG10ua3tTCiSNu+f99825v8AZrV03TXjYTeSybl2vGrfKtX7fSZvLV96qytu+Wun0XTXnsw6Q7fMl3bm+X5a5ec1+p+4WpmS4jDp5au3ypt+bc1QND9sjMMlttdfmZZF+8tXJY/OUyecqtH8q7v4afGyXC+dD5gb7ryN97bXycacoxufZYepGVU5jULVFY/3Ff5mkTdtX+7WBq1htj86Z1RJPu7fm2rXb3EbyQyTTIpSNd33/vVzer2Kbi88SqWXc/8As/7tdUactj6bBy5Th9Q09GbZDYeaaxdWiudzFLNY/MTc275v+A12l5YJ5ZmQMfmbYuysXVNLvFhWabcHk3KrMu1WolTlI+rwMonF30MNvav8m1vuuv8AdrEltUVuX+X5vlZPvV0msWMyxnlSy/N838Vc83O5Nn3UZf8AgVKnzU46HoYinSlQ5jNvrdGhG9FT+/TI18sr5PKt92rs1i80fk71+Z/4aiVXhV/k+98rbv4a9XD1OblPzzOKPUtaXDD5bGGbe393dWrp91bLcFJEYlk2p/eWsRZpoZGh+5uXan95quWeoOrOjpIP4E3bdzV6FHm+I/Ls0lyz5TpbUf6Ps3/vFT5FZ/vf71bWkzblGyFf9uTfXN2epJ5yRzQsqs+FZq6PR7oQ3AR+Il+dG21FVVIwPnqkrz5T2TwiNvw+hBJGLaXJ79WrN0+zM0guYXbZ/ufNWr4YZR4FicsMfZpCT6ctVOykeNY/szxiJduxll+Zv71fsHjK7ZJw6/8AqEj/AOk0z67M6EqmHwvlBfki3bxwthPsyr8nysq/M1WFs0muGCfIjfMqs33VpNJjcsf3zb9+1tzVqw28Ls6TIuFRfmXa25q/BqlT4YxOOOF5feM2PT03J/tf7VU9WtUhSZ4YdzfxeW/y10kbov750XLJtRdnzNWfeNbXEbpCmP4ZY/71XTkKUY09jmrlblo3+zJHv/jaOqElrtY3O9Vf7rfJ8tbt5pLwt51rCrt/zz+7VGRX8kOEUfIypuTdRUpwLoy5TLurPz1KO/y/7P8Ad/vVX+zorGZ4Nj/9M/4a0biGaGPzH8vP3dy/3agby41875kfZtZv726oUfsov23ve8ZF5HDIu9Fk3btu1fl21R+zwx7/ACXZXVty/PW3fQzR/ubx2YR7vu/+g7qyZrN9rn+Jv+Wcj/dpSjGJfNzEbb7hi6Phtnyxsvzf71W185ZDCjthk3MzL/FVNmeSREm3fL8zqtXdPuoWWVE3F9rMqr95VrmlGZ105R5i5Fa7Zgn3l3/JJ91lrZ0e0SFU/cx5Vv4U+9VfRdkw+RFxt+dpPl2/7S1u2qoWWySGN0jVm3L/ALVc1STjLlOunHmL1jbrbvHM1tG8kf8AyzZ6o+Hfi/oll4yu9E1KzWaOGDY0cifMzVbbVbCxs7i8ufl8u32oq/Krf8CrzDS9JfVtYudVheNdrM3y/KslfX5Dgeb99I+F4pzSL/2SHzO2/aV8O+Bvjf8ADW80rStBji1G1l3W80lrtdmVf7y18d/Dm8vJvDuq/ArxOjW17Hun0hpIv3ny/wANfW+i+JrbQ9WiS5jmeVtu6ORvu15l+118M3+1Wnxj8DWarqOmy+fcLCvyzKv3t1fUfFSPhIynFHj/AMM9WtvFGl3HgPUvmaRWW3kZPm8xf71cf4o0W/8ACuuTQ9DH8jbm+7V/xJqFtofiiLxnoj/u7pVfbH8u1v4lrb+IWqWHjq3t/EKWCxFUXd/tNWcZcpcY8vwnIR+S1mu/5dvzfM26uU8RXELq8L/NW/qF4kJe2+bd/e/hrjdeuIbiR/O521RcYmLfQ7lkTf8ALVPzPl/d7WFXLry1Yon9/wDv1RjX940CJt/vVXxGsRGh3fPv+X7zK1Q3C/3z/wB8ir01u6xhPmcN/DVS6X/pn8y1JRQk3s77D92o5G3NUt0dzbAnNQfOFJf+GgcdwzJt6fK1Sqs27Z8v3ahLZC59akST5vegQ5mVfkAXO371Rec7fJimyLtaljX5hl6B8rF+8Pkpg3rgB8bqmGyM/PRbKZp0THP/ALLREI7lhpfs9nvT77fKm3+7S2uW+fZUF0yNcHZ8392pLePzG/121m/u0fETI6rwyvlus0O1TXq3he4v5rf/AFyqipXlPh9fsixzTOrbf/Hq7qz8TTNZpZ2Vts/4HWnuxiYSjzFX4talusSk0yuy/crc/YH+Ftz8ZP2jPCvgm2T99q3iO1giVvmX/WL8teX/ABE1OaS82TTbj/Ctfa3/AAb9eA/+Eu/4KAfD55oVMVrqn2h2/wB1d1aYWP7xGOLl7PDM/rI+FnhWw8D+BNI8IaZb+VBpmnQ20ar/ALKqtfhJ/wAHqVjMPih8ANV2KYvsGqQfe2nc0kdfvbpF5i1V33D5f4q/FP8A4PMfh5P4m/Z9+Fnxctot0fhrxXNa3kiruCLcL8uW/wCA1jXhOXOb4KtSpxgjD/4Jd+PLbTPgjpDwzNuhtY/3Ky/e+X71fT3jL9qq6ure4s3RbhI7fZ5Myfd3V+Zv/BOX4uPp/wAEdLtdiuY4tr3S/L8v92vXdb+NU11cMlzcssW/a21fmZa/EsXTjHGzifteCq3w8Kh79qGvfCLxBa3P2+2aK5aJvlVFZLf5vl+WvKvF2ofCy0y/+hyyR3CrtVFXd/wGvK/E3xFdbGYQ3k0QkT/lncbdu37teI+OPiFc3WoTXM2sNLMv/H02/bu/u1pRVX4eYKkqHPzSPW/it4/8K6Pb3l9Z2cMRkf70d1uaNl+7tXd8tfNnxI+NV5rDNpVtPMSu4O1vcbPvf3ttYXjj4lX+pRy2cNtazRTf62O4+9/vbq4Vdck+0PeWdqsMXy/LG1ejh6NWXxbHBUxsPhge3fCi68PeH7W08T+MJLdEhTcnmMzL/u7a4f8AaI8YeAPHGuPqvg/wwthffduri1dlW4/3l+7Xn2s+NLieFrN7nA/55791Zmk6sLy8Uvufc3zs3zVpHByjzSbDEZpT9lyQMjxZp9/cafcJsVGX+633q5fwrHDDfLHsY7fvqv8AFXqfiHS7OTT5P9J3S/d2/wCzXCw6L/Z128yHaNu7dXp4WtH6tKJ8pjpSqVec6ixk8u3QQozfw7ZKmbUZo2f/AENVTftT5/u1haHcTKrbIfl3bvvVLc303lt/CjfeZq55UrS1iT7SPL7xrTalCp3/AHCv/oX96ga4kjffyG+VWX/2auea6SaNX6t93/ZqRdSeFlj37l+66r/DWUsPzRsRHFWkb0379Sjncu370f8AeqrNvkuNzv8Ad/hVPvUumXELRomxd27c7bvu1p3K2f2NUWFd0f3q4pS5Jcp0+05/ecg8L69d6bf/AOu+Xcu3c9fQXwZ8fbrpLY37GWR925X/APHa+Zo5vLuN8abt38TV6D8LtWmj1RE38KyrXnZhho1I8x62T5hOnXjGUvdP0s/Zl8aQzSb7+8kRfKZ0jk/iavs34HePptvnXN5IdPj/AHrQyfIsf975q/PD9mjWnjVLZ0kuPlXYq/KrNt+9Xsni79pTR/DujxeG9Nv98ELK2s3DO21v4tsf+7XzFFTniLRP0mlioYjDF/8A4KdftueKviPdj4BfBTU9Lht4dy6veSy7Z5N3/LNf7u2vyk+K0Pj9r6XStV+0TfZ5WiTbuVWZfvf71cj4t/aA1rVv2h/FHjbVNakX+0PEVw8CK+1fJ3fu/wDx1a9x0/44fDfxV4VtodS1JWuo5du2aL7q/wAXzV9xDBf2dGLnHmf8x8x7SljY/u58vL9k8u+Cfxe174b+Plie5kjWaVYrqFn+Vo2/2a/Wn/gnHcw/FL4q6HpNpYeWt226JvK2xLtb5W/2flr8sfixN4G1rUIde0GzjSaOVf3y/wAS/d219/f8EmPi+3hCfTda0+8ju72wRoVj+80a7t21WqMTXoUXGrym+WxxM3Uw/N732T17/gujrGlXH7XvhHQNNljZrDwbJZCTb97y5F3f98tXx9YtDcXRheza23fcX/np/tV1/wDwWR/aB/tP9uH4dWrXDJP/AMI5dXF/5kvzeXNIu3cv/Aa5SxtbmZleMM67Fb5vvN/dr6/AL61ho1v5j4vH/wCxYl4ZfYtzepoQW8yqgRJPm3bFb7rf7NT2un+XI7Ikasr/AHWXdtqWHT0j8uF5l3tu3sv3t392ren2McfyImx2+baybv8AgTV6tPD8sfdOH2kqgabao8aXPV/m3/JtWr9nNlAiIwRXZd3/AKFTbW10+3ZU3yI33VXf8tWLiG2kjPnTMm2XduV/lVa7IU+aPwlQ5ia3ZI9z+TlNm1mZ9v8AwGmX+yS3ZLZ1zs27f+ebU+RUuGFs/wC8/usqblVdtQzWt5HG8xRkeNNzK38S1p7GUT06EY81jD1BtpO5GVv4JGX5dtYt98siwyXLY37f3afMtbmoRoqs++QN96JvK+81Y+pQpJI0zw/Kv/LOR9u7/gVbRlyyOiWHjy3kYd5G6xMsybv4lb/4mteAlvCcmCAfs0oyO33qrahBbMw8jajbW2Rs/wB1v7tXLZQ/hpk6Zt3z9ea/Y/BySeb45/8AUNP/ANKgehklJRxVW38j/NHk2oQ/Zmx1Pzb1X7y15F+1N8TE8N6HH4M012+0t89xcRt/D/davbPESw6dY3GsXnywxxNLLuX5lVa+KviRrl58RfFVzfwiSX7VKzRK33tv8O6vyLmlI/Os0l7P3TzrWNY1LVrppnfJ/wDZqteH/B2q6xcL9mhZ9332X5q9x+Bv7IHiT4lXyNDpTGPfu3N8y19R+Bf2J/B/gPyrzWPJdoU3y7v4f9n/AHq09nGMfePAeIl8MYnh/wCzL+zNBGqeKvE/7q3hTzW2r83+7Wh+0Z8ZLOzmfRNKvdkcMXlKsfy/u67j9or45WHhHR/+EQ8Lpa26Q7llaH5fu/8As1fFvjjxlf69qT3E0zFv7zfxVjKXtCqceX3iHxN4qv8AVLp5nmb5vlVf4dtc7cTSSMz/AHfmpsl5tkO8sy/7NNWYNL99sN/epRibR5ZFmGR1hZ/PbP8Ad/hpkjea4T7p+9upPM2q33jz/doZkk3eW/z/AMCt/do+H4g5ZRDa8i7H4ZX+RqI18uTyU+f+JGanMvmK+R/wKm/xA7Pm/wA/LVgWfL+YfPuMlTx2IWHZvwf4FaorCT5d6Q4Vf4a0Y5I2kWd03bf9il8IvckQWfnRtsdNo+7/AL1dN4VtUuLhN7qpZ/4nrFe33ZfZyPu7auWqTWNxG6chfm/3amURf4Tvtahm0uxhuUdmP3Xqj/ayNgPcqzf3qZNqlzfeHXhkdsbF/wC+q52TWt1vE7pg/d/8epylHluL4Ze6ej+C9StmnG+ZflrsJms5labyd+19rs3y/LXlHgvUlhvPIQZDOqt8/wAtenSXD3FuHRIwG+VP9r/aq4GFT4zmvirHB/wjUqb8rXzrqMiR6mU/2/lZa+hfilJ9n8LmF0+ZU+bd/FXzjq0/l6kyd9/zVlyG9OPKd58PdWmWRbZ9uGqt8aPCvlj+2LZPlrO8D3nl6hE4/v8Ay16h4p0mbXvDaJs37k3U485Upcsrnivw/wDEU3h3Xo5t7bWZVr6XhZNa8P2+pWwVVkT5NtfLGs2NzompvAybWjb5a+gP2e/Ey654b+xzfO1vtO3+9T+0FSPNHmLV9ZPD9xPvfw1Hod5Na6ojom397uZmb5a6TWNJ87e8Lqy/M0W5vvVzV1C9rl4du5X/AIq0fvaGMZQ6n2P+zVqFjrGl3+q2kmWljtlmQfdVlVxx+GK/az9kb4v+Hdf/AGQvh38W/h1cq954R09fD3iWzjkIaOW3IQlgOu5QG/4FX4hfsbG3n+HdxewS58y4VSmc7ML0/Mmv1R/4JmfCfxF8K/2efHh1lbm1v/El3Z61p9vdndHd2EqiQSRDsRux+FfuWfUpVPCHJVf7dS//AIFUPHxFSNOUklvsforFqlhq+lad8XfC211aJWu1jX/WRt96v50v+Cvk8culfHG4jb5W8XX7KT6HVeK/oG/Yu8RnXfh5eaDc7cWN15aIvZf7tfgx/wAFMPhn4g+Kms/G/wACeENPkub1td126t7eJcs62t3LcsAP9yFq4fDWD+oZ5Se6w8l/5LM/ROCnFZXjqn2XRb/CVz8gbxRJGJk+Xd/47XR/C/WkW+WG6ucqz7flrnWk8u3NtNuRlTbTdFmez1VXR9qK27dv+9X4zKPJM+YjZo+zP2CNJi0743a3PAi7ZvDTNuVs5/0iGvJ/289TbSv2vdYnj72tluO7p/o0dewf8E8rqLUvFuoX7gecNDZSR/dM0RH8q8b/AOCi0JX9prWZliyWs7M/lbx1+1Y7/kx2F5v+gl/lUOKD5cW/Q634U69Dr+iRvM/3k27l+WovFGiyQtNv2vu+4q/w/wC1XmHwF8ZPDfLp83CM/wB3/ar27XI/7S09LhDn5Pk//ar8R5Z851VPd1Ob+E+gz6X4qea2+ZpGV/lf7rV9U2/h/wAyxttV3rNK0S7mX5W/2q+cfA9x/ZviCPzvLTzGXe0n3a+pPDOmw3Wl2z2G1921X8t/lWscV3PbymVoyUjL/sOGSSVP3ij5n3bf4qhm8K7fk8qNv7zN/wAtF/vV2cWj/O8zp8u/5GX/ANmqWHRZo2Donys/z/7P/Aa4Iy97mPW5eY45fDsIHnJbNj+63zfNVa68NzNM3lQ7g3zO33dtd/b6L8/kukmVZv3i/wANQto1ybzeifMsW2X5PvNWsZF8qPN/+EfupJpPO8z737rb/DVabw26t51zD8yvu27/ALtemXnh942Z54WR12/Lt+9VSTQfOje5RPm+Zt3+1/dquXm94qnzROAj0vKtsmU1NZ6fM100c0y/vNvy7PmWuqvNHezj857ZfvbnVl3fLVGaxhWRvL2gN/E33lry8RTnI9XC/AUre38zZZ+TCu6Vn3SLu+Wut8FYEVwocNtKjI/GuY8txtnR5FX/ANlrpfAkkslrO0wG7eOQc561+g+CcZR8T8F6Vf8A01M+jyjl+tRt5/kSecp1WZmfcquVK/3a2Le4gZXRXZVZ9qfN/wCg1zGq25Oq3DRL1fMjf3eatafrVtb2f77cxWXbtZfm3V+T8VR/4yTGt/8AP6p/6XI6pStUfqdZDdeWwhmeMqu0bt27/gTVat7rztsW9lmZ2+bd95a53T9Q3fvkRl+b/vqtK1uEkuw+/P73cjbP/Ha8GnHlj7ope9K8jUmjRt37mOTa/wB6P+KqrRzRr/pLxoI5Wba3zKy1JHev8sNtCsSLufdG/wA26nyTOy75kUbvl2t8yr/wKuqj7xjWiUNzysyFFVJF3eZ5X97+FqpfY9t1strn5Nmf+Bf71bUlu8m3Ztx93cq1Ev2OGRfnjVd/z7ov9mvUp1IxjocFTD/aM2GR928wxumz5tvyt/wGotQhs2s1S/8Al2/Nu/iZqv3UKtGyPbRhpF/5afwrVKS3jmdI25C/89P4a6aco81xRjywMi5tdqMHhaRm2/vPustZM0KLM9480Mpk/uxfdrodQs0jxbXMG9P+Wsbfd2/w1lyQ/vPMh3IrLt+5826unm5jqjTj7pjQqjeY/wAuxtyptpI9PDKqRWzblT52ZtrKtaDWm26aEOzLGnyNIvzf8Cqdbd1uI3+6kn3lqZS5S/Zd2Q6RYI0iPcwrtX5F3f3a6/TbWz8yK2+6v3l3S/K1YNmsLW/nJbRo6xfIyv8AL/u10WiyWduy74fnk+58n3awjHmmRKnyxsZlkqW+/wCzTb337lb/ANmq/CryRiW5hVfLTc26sqORGm+R12Ku1G3bflresY0aWP596yfLtb+7/tVzfVeWPvHRg8RDnMnUNJ3XGy2dWWRd37n7tUb7T4Vgl3vtaZ9qsy7ttdZHapC3yfM0K7UkVfu1TvLNLiObzrVmVvmt41/ho9n7sUfV4PER+ycBcaNC3yfZm/h3x/dasbVNLf7QyTI0iqjNEzS/davQdS0+bavnfKyurbVSse80W1W38mGzW2VpW+9/49WVSj1PqcPiuXU8u1zQ/tFudm0bvm2s1cNq1rDb3R3wqu6LajR/dr17XLGGNXSFFeJW2LJIm2uL8RaTtm2WyY8v+LZ8rVzSjyy5uU9KWOhKHvHE3NujLs2bR/Gy/wAVZ9xJvk2SIyeZ8yK38VbWqWrpcC2Ty0/vs1Zd1bw25NsjszRv8+7+KunDx5veR8Zn1TlgVwqbm877sf3FZ/mWiO923CTeZG3l7vvfwtUd0IoVZEdUP92qzRzXDPeJ8nybtyr/AA17uHjGUD8mzKXNP3TpdLvNsgLurD/x2up8NapCzJv3I38bSfdrgdNaRYd6Pvf73zPXQaTeos7pNCoGzajbqyqPl0OTB4WdSZ9FeELjz/hrDcu2c2UpJK+7dqxtN1SBoRCky75Nq7tn/oNXvAsjn4OwSZJYabNz7/PXGaffNt2Xn3vlWJlr9d8ZFB5Hw7zf9AkP/SaZ9tisPJ0qKttFL8jvPtzSS/vo2Pz/ACf7VbGntDcYh37VWL5P96uJtdYe3aLzpGKtKqbfvbf9pq6O01aFd/kxeay7WT5tu3/ar8G5bdDnlg5RidVH+7j2TTRgNt+b+L/gNV9Qt/Lh875VVvut/FVZNShmj2PlZJEoW8Ty/M3rtmTDM1aR5ubU4sTheWN+UpahNvjif92VjVll/h+Wsa68ncH3/Lv2/K1at5cQLZsjooSN9qsq/NWJeSPNIH3xhlX7uyuv2cJanj1JSo6lW6vprWSSFEVvm2vt/hrP+2PHy6MXX7m5/lp80iWcjJvVmk+b5j/FVKS8e0kV5kWTav8AqW+5/vVhyolVJT1HzTPclER9+35nXf8AdqnI3mQPeZYOrbdzL91qcuoTXUoRE3N/DGv8VZV5cOofzdw+f56JU/d906I1I815DJFdLpHf52/vK9bGkzbRshh3J/Dt+9/tVh+e8Vwuzcm7/wAd/vNW1prXJlXztpVU2p5bVxVo8ux1YWUZVToLHfCy7H3xSMq7W/8AHVrZtbyGzj+eTyW+ZW21z1jbvcSLbQ7ZFZ1+X+7UHjzxRbaLobfabzykunZIvk/iWsMPh5YutGCOvF42lgcLKozk/ih48v8AUGmsNHvNiwqzIqv823+9Wt+zr4q+1aXqUOqvCvk7fu/eZa4hYbaRWv7x/vK33U+aSsb4f+Jv+Ed0/wAQ2fnSb2fzYlr9KwtOOHoxjA/HsZiJ4qtKc/tHV/EXx9Db+IrqPTXba3+oaT5tv+7Ulx44m8TeGWtvtnmM1vsaFvu/drynVPG0OtSx6qjxujf6pf4VqnZ+NLzR4bh/tO9G+Zlb+H/ZrWMZfaMOXljY57xp4Zv9FYeHtZTat1un05l/u7qX4d3TzWM/h7UtrM25oPk+bdXD+LviXqWueKvO1C8kfy2/dbn+Vf8AZrW0XXNl9Hre/bL/AMtVV6qXJKRUecf4i0W50mR0kfO52b/d/wBmuD8QSI0zJ5fzf3q9c8ZW9nrUKarYP8zLuljryDxtDNb3pR/k/wB2lKMTSPwmZb/LcL91tz02aRFun+6GZ/u/xVHpt2i3A85MfP8AJUszJJqT+S+4bv7lTzcpfuiMybS7hs/w1XuIXjjbZ8zN83zVa+zlpFmR/vfc/wBmlmhdfnkfe1Vyi+EwbqPbJvd9x/u1HIu35CmK1NQs0jf59uG+7WfIzqv+7/eqYFRIFXHJpfu/OvVad96Ty+opu5P8mgYrSPu+Y4+amt/fBprM+Pubv9qlb5iv8NVylcrHFvMHNWLMIkZbOHbj/gNQrJtX7i0I5jk2P81Ll7kiyLtbzEf5qns5JGb+HctRzSIsfyfxfxLV7w/Z+dIsju2Vf5l/vVUdifsmzo2xpP38yt/Ftat7+0o7e12PxtTcrLXOeekcmyGH5P46g1rWkmtRDDNs2/L8tHwklHWNQ+2ah533Ru/75r9NP+DaXQ7O8/bi0HWL+FtlnazTpNv+638Py1+Xm52kXZ8y1+rf/BuDpM0P7R0GsI+1YbBtjK+3c27/ANBrbCx/f8pw5j7tA/ps0rUY7nSVuIZg4ZMqy1+f3/Bf/wCDFt+0D+wF498GtazTX1nYNqOmxr8376H5lavtLQdcubfSVe5h2Lt/4DXjn7S2oaJ4i8M3+n6rbRyi4sJoNsn3W3Ltr0qmH5YTPGp1nTqxkfzX/wDBPv4n3kHgCXw3NcyRSw/Kit91dv3q+ktH1K51Ngkj7/L+Xc33l3V8d6v4d1/9lT9srxj8Fp4dkcetyPZxzL96GRtysrf8Cr2/S/FXjCdg3k4Vvm3K/wB2vx7PsCqWMk4R1kft2S5l7TARS6HfeNNak0zfZujJu+42/wC9XhXjbxRcx3E1mkyxJI26WT+Jq0/GnjzxI0MiXj/vY22xSK+5VrxzxLqGt6g0syOz+Zub79cVDD1Yx946cViox94s+ItbtrxWm+VnV/mZX27lrndS162jzsh3rJ8v36y9RuL/AHohfbu+8u+sxVubpmKI277rV6tHD80uaZ81iMZJz9021v3Wbe83+8rVds9STyx8mxVf5KwbGzv2X7M77trf3a6G10dGZY4UZn2Y27a3kocpz/WJS903YdchvIxa22mqzL8ryb/vVj6tY/Z5C8w3bl/hqy1reWeLZNysvzNI33aZrEn2O1KXM0Zdk+Zt9cs4xpStE29pzRMZm8tVdEb+9tZ6RtQS6xvTG35dtV7rUPPt1SF1A37fmqD7VCqsic/JWnLOpH3jmlU+yTSXO3cnzbPvJTdztN58O5k/u021WSSFvvbtv3WpGZI38nzMj+6v3anlZjzc0TX0q8JBSSFt38Tf3q05LzdGX3rtbb8q1gRq8Mfkw8NsrVtWS4ZH+78n3tny1zVKMPiiXGpP4TQ0mxSa6CIjGvafgT8J9S8W30cMNg2/cv8AD83/AAFq88+Hfgn/AISDVIk/56fd+f8Air3rUPi5pvwv8NJ8MfBu1NSuLX/ibXnm7mt1X+Ff9qvncbOpKfJT3Pey3Dyl70j0bxJ8WtB+F+kxeAPD1ztu9ipcXi/eVv4trVW8L+JLHxFY/wBlQwyR7YGSWNpdzKv97/vmvljxR8SPM8RS3szsfn+9u+61ei/AXxlbah4sT7ZeMqyIq+dD83y/7VdFDK6dGMZH1uHx8f4VI4b9qT9hfVdHjPxF8APNdabfNvWOT/WK38W3/Zr50tfDnimPVF0SFrpWkfbtjf5lav3Q/Zd+Cfhv4xSN4e1J7O5s5NNkb7L9nZpF2/8ALT/ZrmfG3/BG/wCCGh/Gq0+LV/4kXR7O1ZZ5dH2My3TL8zfN/CtfZ4bGYf6rH2j2Pl8VlmK+uSdHm5T8aP7H8ZeCvEU3h7Xtakja3+ae3uH/AHke6vob9kD9r7w9+zA1/r2t63eXiyRfuNNtW/1k38P+7XOf8FbPCNn4F/4KF+OrPTbSOCzvILG6sFh27fLa3Vd3/jtfPVncTRtsRF+X+JvvV6VTJsHj6UZT6nhQzrHZZiZcnxRPRPjn+0H4/wDjJ+0TJ8ffHlypmvtsEUcLNttbeP8A1cdfdXwP8Qv4q8A6Zr03lyzLEsXzf7vytX5z/wBnprmi3Wkv8zsu5Pk/ir7c/wCCefie28YfCtLCbafsq/Pu+9u+61ep9VjToRhD4YnmU8bXxGMlUqvmlI97+xwxI6Im8xtulb+7SWtvcsq7XbLfLt/2au/Y5biSJ4XVGjfY235V20q2t3CyJc3imVW3/wCzV0aMT0JVJhDZ3MMhfqvyq+6tJdNf7Ozz7Qdn+rVfvUSQzTQq9y6p/st/Cv8AwGr0apCsX2l5JfMbZtVPl+9XoRp/DE2oVoop2dncttcTR4VNqbU+Zf8Ae/vVLJbOyvD8z7l3bmT7tbOn6PYLIJrNJA7Ozyr/AHamm095oXSH7+791/u0/ZxjM9jDylzcxx+r6SnlmZLbcvlfJXJ3djNCu+HzJUj/ALy7ttei65psMkbTIiq3yqkO9vlb+L5a5bUrU25ZIpo2XYrNIqfxf3f96so0+WV2exH3onF39iJlR/sf75k+Vv8AZq3Cgi0J0I4EL8Z+tbdxpttcMl5NbNv2MyfJ92s+5hit7eSJ8lQh3ce3NfrPg6n/AGxjv+wap/6VA9TKafLWn/hf6HhXx8864+H95pNvcxq940aRbXbcy7vmWsP9nn9iSfUJrTxP4qkjtLPazr9ob5v+BU79pD4oaT4B17RtNeGNyzSTuqp83y/d3Vwt9+29r+rSQeG9NvPJt40VIo93y1+RU5TpxPyLOoyqY6UeY+zm8U+A/hzocWieEobWJ412+ZH/AOzV458bv2gNSjsbjTbaSMBvuSRv/rP9pq4q1+IQk0NNVvNY3Oy/dkl+b/gNeF/Gz4xPfM0drNnd8u3+6tTKU5Hl06cYHM/FjxxeaxqUjvfs3zV5pdaghmb99z/H89R61r1zfXUrvMxO2sxLhNob/vqlGJ08vuFuS6eRm2PgP/E1FvI6gd9tV1b5V2fxVatLZ5Gx/wABar+ySW1kQqrn5m2/99U7a8ap8m41Yt9PeGPyf4lqOZUjYohZT/tfdanLYrm7CKvkr5z8t/s1D9q3RqPvf32qTzP3bfdVmWoYVmmk2TTLto/vEe9KBoWrPMR/Cu/+Kr1qu2M+duB3fJVKzk/d4Sf5v92ryvuj8z+Jf71EthfaL1rskjCZ3VqQxwx4SZF3/wB6sLT7pGZ5P/HW/irZs5hIq+Tt+b/bqJf3Q5jet1SbTXR0yVX5FWuFuL6aOaVJnUlX+7/drrYbx7LdC6NsZa4TxRNNa61LC6bQzfJVe5yExly+6dR4T1ZLeRHTd8zfOtey2d5Mvh2F0+f5Pl3fw18+eH9UeGZN6bm3V7Z4buPtHhuN0f8A2k3VRE+bm5kYnxSvJv7FmR0bKru+Zq+e9ZkdtQdtn8Ve6/Fq8f8AseTCbvn+6teCXs264ft81LlRvT3Ok8Et/pip/DuWvd/DbvqGisg6bPusn3q8C8Gs8c4fzPlr6G+HLPcaLEnzKuz/AL6olsTI8a+N3hN7O4S/httqN/FR+z34lXw94si87lJv3TR16l8YPCsOqWL232bYqpvVq8E0ye58N+I1m+60cu7a1HwwJjLmjyn1pqFnCrM6IvzfdaOuS1yx8uRf3W/crN975a6Pw3qn/CQ+H7TUofnZol+61VtSsdzF3T5Wb5KI8pnLmvyntf7Bs0v9jeJLUnEa3Vs6JnO0lZAf/Qa/oz/Zy+Glh8VP+Cffw+bQfIOsRfDy2g067IwUlFuB5bMOdu9a/nN/YSSaPTvEsc0SqRNacr3+WWv2P/4IZft32euaXqX7LvjHWD52ialcLpIlkXCxeYx2LX7XxLVqUvCHJJw/5+VP/Sqhyxp0qladOps0dD/wSS/bYXx/8RPEHwu8cQw2GvabqU2k6tZru+W4hkZflr51/Zu0LSvEv/BXi58M65ZJPZX/AMQfEttd28vKvG6Xysp+oJqT4paEf2CP+C0Gs+IfGem6hbeDPiVq0epadfabEqotxJ95fm+X733qxv2d/Fh0z/gqefGlphQnj/XLpQ3PykXbYP4Gujw/hF5bnNWP2sNL/wBJmfVcBVWsuzOjU2hTl91pf5H5W/8ABaz/AIJz65+wH+294m+GttpzJ4c1mRtV8H3Cr8klrI27y9396Nvlr41+z3OnybJk3N937v3a/qz/AOC+n7CHhT/goj+w3f8AxZ+HtvFc+Mvh/ZSappDQL+8mhVd00H/fO5q/lv1a18lnhv4eGbbu/iVq/Gq8VUiqq6/F6nymDrulP2Utvs+aPpn/AIJkXjT+ONbjJzjRCc/9to64r9v+xa6/aM1zcoJFpZtF6/8AHuldd/wTEtltviRrscS/INAwD/22jql+3vo89v8AG67161wwltbeOZW6cRLX65jnzeB2F/7Cn+VQ1fL9bfofMnhnVJvDfiCJHb5Gb+9X1D8O9YtdZ0XZ9syzRfdX7tfNPjLQUs5BfwptGzcn92vSP2e/GiMwsLl4x/CjNX4rKMoy5jq5Y1D1DVLFFV33so/vfxV7v+zP4y/4SDS/7HvLlvtdr8qKv3mrxbVLdJUf+JP4mjrQ+EXi7/hC/GVvfzXrQ20cv73/AK51GIp+0pamuDrSoVY2PsS102a3XftVU3/3v/Hmq5DY7vKS9uYWWT+KP5W21f8ADa2eqaXDeWj+Yl0iurf71X5rF/tT+dDH8qtsZv4a8aP2on2kOWMIyMZrENNNcpzM3yptf/2Wlt9LmaT99bK7SfM+35fL/u1t2FvcrIm+2/dN/rW+Xczf7NTx6f5fm/Zn2su7czJuX/erqpx5SpRhL3kc/JpPmMqeSobYu/c/zLVG80G4RmdE2ur7naNPu11dxp6Md8aKPM+bc33WX/ZqrcWaLMszoxMbfJteteXrEv2fNvscNr2jw27bLYfe+bbXI6tZ201w/nOqlf7tejeIo/LV0tvur8/+7urg9cX+C5SPfv3Sqv3mrhxETqwtNxlpsZUy7NronzL8v+ztre8GmX/ShNFtYMoxtxxzisGSTbb7LN2WNW+9I3zLW14DkaRLrc+4goC2c5ODX33grGS8TMFftV/9NTPpsqjJYuLfn+RS1S8c6jcxoR8s5VAG6tUFveOqyzPN95tyeW3zLt/vVU8Q3ccetXLLF0uWUr75+9UcdxZxr5c0yp8zN8v3q/KuK1y8QY3/AK+1P/S2U5fvZerOh02+hiZH87ezfNu+6tbFhqSNMyQzbiv3F+7trjdJuIfOl3uu2P5f9qtWO8SO3i2fON7eUv8AEtfOxpxK5jpYby5W2U20OxtzM8jNtVqtrdItqsPks0n3du/+GuSW8fbsSZYxu+dZP4f92rENwkimeZGRV/h3blZa6KcvZil73wnVf2l9nWSN3b5W3LHH/wAs6hkuobi4dPO3DZuij8r7zbvm+asdbqFYVENmv7z5vvfeqaPU3877NNHtdX2p8v3lrshU6nPF+9yl9FhXek3mMPvJt+9S3kiTSIXmYbdvzVUW88xt6chfuSf7NPhkhZBsf5t/9z7q11U+Y0jThLcbLb7b5vMg+Zf+Wn8NUZrW/uJnxwW/2/vf7VaS+TcKfOkk27vkaR6j8l3lH2l8n5vmZq29pFnTTp8xktb7Wi+8LdWZXb73zVFbr5kcT3KSebH99vvVqzW8NsoSFNir8yKq/KtUJp0j+/OqM0vzt/e/2axliPsm3seWXNIfDHCqq8kflq1XNP1JGmTEysFf5V+7urMmvIWxvnVEj+9tbatQ2+rWCs298v8AfXd93bUU6nvcxliKfNEsaTH5jeTI6lvN3I38P/Aq6rT/ADiv2nYp8v5XXbXI6DdWrXw/cq0K/cZfl/3a6XTZJobYpvyixfJ/e3bq9mOFPksHjuV6m5NFcrG6Q2ak7l/75qK4Xy13xbWG/b5bfw1HHcJDGv77D/3ldvvUbnkZYftKn5NzbazlThy6H2WBx0YxKGsWrzWjJ5LM7fN+7f7tZGpaXDcRt/rFaNPkVfm3N/eZq37eHbuM8Mm6Rvnkb7tI+jp5jPDNx/47XLUpxie9RzCR59qGjzMuy/SMp/C1c3rWhwtG++Ndm/7rfLXoN9o8y3CvcopRXb5VT5WrN1bS0+z7Hhyu/wCbd91q4akYfaOx4zqeLa1oNzDOERF2/wC0n8NYOpaO7SeZbJIyf7ler+INHhZm+9tX/VN/E1cxqGivIj+SmI9u5P7y1zU6kPhPMzHEe2gedSabud5pkYFf71VPsfnTL53Cbv4a7XUvDcLRiRPut99Wesq60eaMb4YVO37td1PFRUeU+MlhZyqmE1uI22Q7VZfv1cs7mSOLe8Pytt3r/wCzVNLZ/ff5Vaoo7d1Xe6ct9/8Au1MsR7h7mW5Tyy5mfQ3w/d5PgbE6Mdx0y52nvnMledaHs+0IsKyMjJub/er0T4flk+BUXmDaV0u5BAPTBkrzfR7xIrgyfN8ybfv1+0+Mkr5Bw5/2CQ/9Jpn1uCwUa3MpLbQ6KzaS33TO7B/9n5q6PTNSfcJndnXZtdWX7tcYt0I4xv8A3Urffb+6tbGjXU0ca2c028Q/fb/er8HlsXiMvhGB2NvfQtZpcwuxaR2/dt96pZL5/MPnWys/ytBGv93/AOKrI0/UHZoVe5kxJFslkjf/AFa1ejuJoZEuXT5N7bv7zf3aujKcZe8fMYzC+zEupHYB3fcW+bb/ALNZ1zFCv+kpM0L793y/Nuq9cMFk/fP8qxfKzVnMYbiNLkeZiNWr0cPKEY8x8Zjo+8Z+qIkyvCk251bc6/3t1ZO65t4fJRIy8j7fmrXuLX7RcCaZ1Yfdba+1qpLazeQ/nPtlX7qsu6meVGN5cqKK/u5ilu+197DdH826qV1C/mbEePYrfvVVdzf7taFv9pdvO+b938y+X/FUN5Cl1I6IjIfveYv97+61ZVJcp10/eM+GNIGCXMLSIr/Ku75V3VrW7Iy7O2//AHaoNC8mwb2Dq/yzNV6z/wBVvmdVZV3O2371cU/fnyx0OyjPkgbenWd75rzJu+Vfn2/dX5vlavL/AIja1f8AiLXDc3kzOti7LBD95P8Aer1TVtesPDPw9ub+5eNb24XZEq/ejX+9Xh9xq25pnfa7N/rf71fVZTlqwseefxHw+e5tPG1/ZQ+CJLca1Dx9mflV+f8AhauB8QaxNayav5N/+8ktWb5k21sXGsQ/an875dv3WkXburjvE+oPJcSOU37lZNu3+Gva5keDGXMc74X8TyX2mvZu+3y33LVbxJr01vZvDC+3cm165LQ9S+y63cWzuqrv+bb/AA1Z1nVHm3Ojtt+7U/Ea8v2jntSkK3hmL7h/drQ0fxE9rsff9779Y+rXEzMfT/x6q0Nw8Mmz7o/vVXvDPSl8bTTWqIk/8G3atcR4q1Z7i6Z3m37XrO+3urDfM33/AOGq11cb2MzPv+ep+yVFdSezkjmmCdBVv54p2fft3N93+9Wfp94isf3K/e/iq5DcJNcStvXf91F/hWqiEi6sPnAp8zL97cvy/NTrhvOJR0bd92ls5t3yI2P4an2/aF8nDfN92iRJlX0ICj5GO37tULyPZIX2bS3/AHzWpJI8LOm//gNUbqNt29ujfw0cvuDiUWjEY3r/AHf/AB6omVBtf+7Usy7JsO7fLTX+6amBYxsK3+9TY1SRfmFO2p5fzPxtoVkA/eL92q/wmhLbxou6SnyRoyq+aiWfbJj+KpFkRmbZ8q/wU/hMyGRvmCfMK19PuPslq+yT52X71ZTHkO/PzVJJIVX5Pl20vhAueXMq+cz7T/HuqrcR/vPv7v8AdomumkkV3fK1Esg3BE+7Rze+KI61jMjq7v8A71fr1/wbr6GF+Ik9/Mv+rtY1Vt/3vmr8hbFWkulR143V+y3/AAbo2MMnifU0eNUElvCu6Rvut/Cq11YL+KeZm3N7DQ/d+G+vG8M9Wc+Uvy79zbq8H/aCh1K40+5hEMgDJt2s3/jtetabrW3R4ZvtKv8ALt+X+KvOvihrFtNG6Xm5EZGXaqfM1etWlGUbHz655H4Uf8F0PgjeeD/HnhX9pnRIdzLL/ZustGm3y/8AnnIzf+O14z8P/ipqWuaFDZ2t+vmbdz+X8tfp3/wUi+Fvhv4+fA/xX8N0hWaa4sJGsNy/NHNGu6P/AMeWvxh+B/iCbQbiXwxrULQ3lncNBP5n3lZflZa+Dz7CxrR5o/ZP0HhvHyivZSkeo+Ntems7x7OZFdmRX/2f/wBquI1rxEn2fZD8rf8AoNdL4sv5ri1d0jVw38X8S1wGqSbpN77lG7+Kvmox93lkfS4itzakEl0j/vl/4EzVWs7qbzC7u2yopo2bckKfNv8Am3PSSXDxxhNn3U+fbW0Y3PHqS5joNL1az8z/AFKsN/ztXT6fq0PmK8KcbNu6vNPOdWZ0f5fvfera0G+nuF8n7SxH91WqpU5Sjy9DOMonS+JvGlhHCkKQ7m+5uX5mZq5TVr57pdjou+P77Vt3Glww/wCu8sbvubfvVlappVsvHkbhJ8qbayjGJpUqS+Ex1keNvs2PmX5qmhyyqMr5rf3qmaxKqnyfN/ufepjW8xXfNt++yuq1fuSMSb5Gz2Zflp8MTsfLSLPz/wAX3ahjjdJmdPu7P4f4astJbSMqu7Db8yMrferP/CVH3S3Zw/vGz/F8tdHpOhvcXUNnHD5u75vl/irDs1e4/wBG2Km5Fauz8LskNiPJttr7/vbvu1wYyc40rxOnD8kp+8dVJqln4B8Oslncr9rZFVdq/NG1cHrXiSaxie5v5mNzM7PLM332b/4mtLxE2pahJ5yWy7Y1+9/eavK/iL4uTQ9Sks9Sm33iou23j+7H/vVz5Xl1Ss/5mz0q2KlGPLD4S5Lr1zcTPc314w3P/DXf/D7xF4k8MtBqulWczN95dybVZa+en13VdVvA8shyW/dKteqeBNd+Jul6cdSe+kks44NryXS/u41/3q+ixeXVI0uWJhTxWIpS5oM+w/gd/wAFfPGf7H3iKy1RfhbFqDRov+kQX/ltt/iXb/EtfYPgf/gv/wDsX/tDwPZfGfwvdeD7hYFi3TKTHNub5tzCvxK8UeNptc1BpjMs25PvL93/AIDWV9smuG+fbtb+7So8NwrYflm3GR6dPjKrhf4kFNn0X/wVd+Onwo/aG/bq8Q/EL4Faw174Yh0uzsLC6aLarNHH823+8tfPI3huZNy1FGr7lfzF/wCA1Lbwo0jpvbb96vrcNRVChCn/ACnxGKxEsZip1nG3MzqfA+oPa3SoNvzJ/F/DX2D/AME69DfT18R6U9s0aRtut/n+VfM+6y18XaLdQ299E7plPl+Va/RH9h/w2lj4FufEKJGEvoo03bPvbf4d1dfN7vKclH+PE9ks7P7Ooebc6L/Ft3Nuqe3t7OOTf5y7Puouz5v96rl1aw2Nos1m8m1U+dlp8ln9om85HbKpu2/w7qdOPunr8w/TdJTzXQ3KqjfNEq/erRtYUkuG2o22NP4k+bbUelf6tUR+WTa+162NO094pt7ou77sSt/FXXTjbQ2oykP0ezhjtVdEZ0k+bzF/i/2qufYZrhWeZ13Mu3y4027f+BVdsVSSNXZNn/TPZUqKiqs3zJu+5Ry8vvHu4eUzmdY0XaoeFGx8v75vux1yGoabCyzpvxGzs+7Z95q9D1RfMt/9Yynd80a/d/2WrjvEFrjMPy7vv7m/iaspS6nvYVc0oxOaax3bfs0TKNi/L/eWsHXrdYtTnt4yFGQAc9MgV1Fn9p85POf5W/8AHa5vxYEXWbkwtuXAKk9/kFfqfg3K+a49f9Q1T/0qB9JhMP7Jyfkz83/2zvH02tfHTWNKhvWa30eKOziXZ/F95q8z+Gun3OueKLazQbjNKqqzfdq1+0Brb6p8fPF04fcs2syfN/u/LTvAMkOi2dzr1y+Ps8X7pf70lfkPwn4bjeaWKn/iOu+MXjz7DeTaPpV5ugt08r92/wAu5a8f1zXJtQkMzuzNTvEWuPqF89y82fM+ashm8xi+/NL4viMOUb5j+WXfrUkK+Yi92/u0kcLyJn73+ytaulaO9ww2Qtmr+IciGx07ciuXZt1bWn6SzSb0Tcu2tzRfB7rD5zwbl/2q1W01NNVvMRc/3a15eWPKY/FLmOcuLN7VS4RtzVmzruk3gfdrX1q6RlZ4Wwyp92sCaWaSRn3rtrKUiox5iG4mkZhsRm/2mpbNXVmR0/2makaT92u8MWp0Nu7HY7712bqPhH9g09PX7SypvVVX+8lbDafM1rvh+Yt9+sXT7jbPs8v5P42rpLOS2EOzftDVXMTLYzPsc0LDzE2urf8AfVaeks63I+78r/OtLdJDx5PPz7X+f7tNjkh3L5Pyms4+8HLHqei6Xoej6tp6bJtpVNvy14/8YNPfR/Ewtndl+X/erudHuHjtmSzdlZdyv8+5WrhfjJNcTX1tNO6lvK2s1IcY++Zfhi58y62TP/Hur3jwTNu8PiPzFYLt+WvnTQbt4boP8pr3X4bXf2rw7M/zDyU3Oy/eq/hCXPsY/wAWtQdtLebqjbv+AtXicm+SYu235q9M+MmpbbYW3n8Nu+X+9XmKcMKZdOPLG50Xg9Q1xGm/Zu+81fQ3wzkeTR1hhTPyfNtr5/8AB8byTRun3d/8VfQ3w3t4Y9PfaPk8r5f9qjm+yYy+Mf4wvIZIz8kgMa7NrfxV4d8QvDL3DS6lbWzZVq9j8TR3OoTM9ykg+fbuaslvCr3y/Zns2f8AiWTZTjyGXvc/MWP2cdck1Dw7LpT3i+Zb7WWNv7td1qVhuje5R8n+JVT7teT+BY38A/E6G3mdhDePt8xvuq1eztcuF2b/AOPb838S1Pw+6aS5Ze8ep/sSwywW/iZXl3KZrQp7fLLxVT4X/tKeL/2Z/wBqnVfHnhm8ljFv4hmaeKNv9bH5p3LWt+xzapbJ4meOYMJLm3baP4OJPlrxj4vgp8YvEUpkwx1mcYP93ea/aOJkv+IN5Kv+nlT/ANKqHDDm9vI/oq8afD74W/8ABZf9hDSvEXhnVhF4m0y1+1aDqqsvmQ3ar91tv3dzLtr8/P2aNW1zwB+1rZ3vj7TrifVLK71WDWLeGLMhuTa3MUuF9Q7E49q80/4IH/8ABTC5/ZL/AGiYvgJ8SdY2eEPEk+2CaaX/AI85m/8AZa96+HGNd/4KZ6jNpl2TBcfEHXJWlgj8wvb77pn2juTHux9RUeFNWdTKs4ozfurDyt6NSPtOFqUaeBzSsvidGX4KVmfcn/BPr4z2HinxBefDfxBdE22u6cYfsrtuVm2sv8X+zX8zH7UXw70TRf2jPif4M0dFa20Hx/qlratH93y1uG2qtfvx49h1T9jb44WXxavrdbDQZory98OtM/7xLVYW27v9qv56de8ZXniT4yeJ/EOpXLSN4g1y8vJWZNvzSTM3/s1flmNjLD1H/LI+Cy+pCtTjzfFG56f/AME1bO5sPipr9tJjYNBbaSuD/r4q6L9sXRIdX8Y62hhYyiG2MbKM/wDLJaq/sCWhtfi7rpOfm0E8s2T/AK+Kt/8AaHla5+MGp6YzrseGAYbt+5Wv1nG/8mPwv/YU/wAqh0yl+/b8j5LmtodW06awuUZXj3LuauY8GapL4T8VGGZ8bZfl3V3HjDT30HxhP8i+XcPtRV/u/wB2uF+IGlfYNQXV7ZPk3/O392vxeXve6dlGUubmPp3w7ep4g8PR3jvhW2/Kv/oVVNQheO4kmh/hX+H+7XD/AAF8YPqGlrYPNv8A9lm2/LXfaxE9quxE3tt+8r1HN7tgqR5Zcx9Zfsb/ABSfxl4Jbw9eXO6fTX2eX95mj2/LXsPluxdPL+Zvu+dXxB+y/wCPH+HvxSs7y8m8uyuv3V02/wC7u+7ur7qlKXE3mJNHLbyIv7yNflk/u1w1o8srH1WU1vaUOWQ7S4Zm80ImV2VPbJeMpMKfMvy/7y1ZsYYVj2bNif3d9WLXTUjXfCjK+/am2iK5j1PaS92JnXdrDJhJLXarfxM+1V21QvLV1kkSa23ity+t3l2o6K4VvkXb92q11Z/u33zttb5vMrojT6oqNWXNyxOG8RLZxyfacNmRfvLXnPiK6htbgw2yMXb5kaSvSfE0CKyPCkgSFG2RyL8u7/erzPWrGaEn/Vum9tvzfNu/3q4sRGHKephfeOcnWa3uNkL7xu+eRf8A0Gum+G0iML5Yx8u9GB9c7v8ACuUure23TI9zMiKu6VVf/wBmrf8Ag87GG/jJyFaIqc5yCGr7nwXa/wCIm4JLtV/9NTPfyqP+0xl6/kZfiWbbrl7HK7H/AEhiMduazobwrMwhf52T+JN26ovGN3cL4nvoowNpu3ViW461jyakJpvO+bbGzKrL8qtX5lxPHmz/AB3/AF9qf+ls5qvu15erNuORI9QzvX5tzfLVyPWIfM2JJIzL8yfw/wC61chca48K/uXwyttT5vvUN4mgaHY8qpNs/h+bbXgxp8xh7Q7NtY2qJppv3rbt6/e+b+KrNj4iRnb9yymOL91Mz7V215/N4gFxGib/AN60Xz+X8u6mf8JFNDt+dn3fKnz7qJU5S0COK5NT0218STblT5UWPdvb+9/u1Ja6tNIzu9yu1vk3L8zbq81t/FkLKttM/wD9jWp/wk7283yJGw3bf3b/AHmrf2cuoU8RCUz0nT9Wha4EKOzpGm3+7tX+9VzT9YSS3k8l2Ks33d9eZ2/ix4ZXMG4rt3ff/wDHWrQ0vxM/meek23b8zK33ttZ806Z3UK0JTuekfbHmjVN//LL5lb+GrEkltfWfmQux3fd2/wB2uJsfE9t9ohd7zajL821tzNV+38WJbyLsfYnlfeb5W20qeI5XoejTj7xuXnzSI+zcv3fvVk6lNbSTR2LTK7fNt+Xbt/2t1Yt94geQM9lMq7n+Ztm6sW+8aeavyTN+7fa6/drKpU97midsY81K5ratrXmQtbIG+VPn3Rfe/wCBVlRa55wi8mZmTZtRW/hrB1TxVNc7kR1Z1T/V79u3c1ZU3ibbEUNzsRfmRv8Aarpw9Y8vFU+U9M8O6pbbYrZHUv8Ae/3q7DR9W+7C8yoGX5mX5vmryXwzr0LN8833f4f7tdrpuqboWRHX5m3V9nKn7vMfklPFTjI6231Sbz2SaaRfn/e+Yvy7f71aSTJcSRBPu/e2x/LurlrS83eZs8xl+6jTVrWt15kavM7Yjbcu37q1jKjGWx7mDzCrE342+0XT3PneaGi2su/7tTeWkjOiOu3725U/8dqto86Ru8z2ysv3X3fdZams45pLhb+Ha6L91furt/irzqlO1z6fD5lLlTKF9awy3Hzv/D8rfdrF1C1s5ISly7EN/E1buofvl37F279qN93bWXcKkcyzP8z/ADLt3V4mIjyyPYp4z91c43WtP2Mu92f+4uysebR1uFWbZsGza7L91q6y+jRpFs03D97ubdUH2G2TciPu3M3zL/FXn1JcoU63tjg9U0F5GaZ/3S/elZvmXdWLfaLtgZ9mG+7tWu+1O3mjjeF03KqfKrL95t33qxdW0ubzC8yKFhT/AFap93dR7T3Tow9OMp3OH1TR0t5Mum1fl+WqbaS5Zk2SJ+9+7t3V2OoaeJGXz02fw7f4lrOmsU3b33fN8yNRzS5OU+xy+nCMT07wTbfZ/g0tsw6afcjBP+1JXmmm6fM0e/ycsr7VVm2/NXqnhWHb8LVh8sD/AEKcbR9XrgdG0/zPkhfHz/Osj/dr948ZZcuQ8N/9gkP/AEmmejlKjz1v8X+Y3T7GaVk851cqu35vu1o6bbzRscOxDfKm77tWIdNMcEcMNtt/i3f/ABVWlsfOzYPM2N+1vL+9/wABr8EjU5TfGU/c5mSaWzyQqVRUDJt2yferRVXjs1R3VV3qz7m/h3feqvp+l7Wb/WMPuurLV6GFJDsRI2+7t3fd2/71dXtGfDZhLlG3Vim24mmnzt+baq1R8x1VHhh3+Z8m5V2/8Cati6bzGbZHJKrJtb/Zb+7WZfLM3zzOzKu1fLZtrbq7aM48tuU+JzD4uaJl3W+a8e2Ta5V/3W19rVn/ALmSR5vmDx/MjK3zSNWrJCjMkybWLPt27Pu/8Cqmtv5l0yJCyMz7VZv7taSlzQ908fl+0MZXt5vtk07R+XtX5V/1n+9UV5HbXUY3+ZFt+8rfdZq0ZrRGhXy9u/ft3bGakWxm8vZ8ruvzfu12+WtcNSTlqjro0zHa1RZkSF2H+z/Cv/Aas6LYPdagltNM0sXm7pd38S1alt4W3u/mJ8/3m+81P8A2r+MtW1+wsJlf+ybJnn2t91v7q/7Vd+W4f22J16Hl5xiPquG5Y/aPOfid44e81SfSrPaI7d2VV2feb/ZrgdLvB5k1s8y+Yz/dan+KLpLfxNf23zEtu+VvlZa5KPUraHWvs1zuUbN26vs4x9nG58DzTlPmZU8Xao9rM+98fPt3N/D/ALtY2oap/aWn79/zbdu5Xql481aO81BpERm3bvmrmrfUJrXd3T7v3vu1EZfZOj4jl/E8n9n+IpJId21vvL/tU2TUppoT2/2dlM8YNuvldI+W+Zv9mqlrcbY97vurWOxXMNuJMzP/AAlX2tVa4mhY7HT7tLcSOzM4f/Z+aqsr7lFL7Q4j5JnEe9Pu/wB6o5Wdfn/ipIW+Y8fL/dpxUbd79f71Ei/hFs5N0y1d0tHlmkjT+9urPs5Ns/P96r+jzeXeGZd33/m20v7opGpDHtkK9NvzVZjkfOUm27m+X+9UM0YwNnP+1Sx3XlxmBP4fv0pcsTP4hLyFJG3pu+X+L+JqpTyeaV/ib+7sq150zfPs+RU+838VQSKh3eT95qAM+4jT5nwuWqm0bj+PdWj5O5m39F/iqrNG+3eiUR900K6HaPu/8BoLbWb5P+A0/wDiaPf/ALtRSfe3ZzTjIBwERG807zHHyJwP7tRxkg5xkU5sKcZpAO3Sffcf8CoaTzDv3/71RMH6sKVW+UjtQBatI5bmeO1QBjO4RGJ7k4FfaXw1/wCCDX7a3xg8Ka94u+GsGjaxp3ha0F3r13Ym4ZLWP8YgZGxltiAttVmxtUkfGehyb9YskZP+XuP5v+BCv66/+CXf7HHxG8I/se+PrjV/Emgv/wALb8NhdBFjqBuFtAbe6hBneNSoOZlJVC5XDA4YFR9jklPhrD8OYzH5mlKpCpQhTi5Simpyl7T4Wm3GCcuyt12cz9q6sYx21v8Aofzy/s9f8EJv2tP2kPivp3wp+F3i3wndapfszkyXNzHDbxKMvNK/k/KijqcEnICgsQD+hf7M/wCyR8av+CK/xTfwJ+0uLLVRqNnHdWN94TuWuLa8iB2lo2nWJhhgVKsqsMZxggnvPEP7MP7Tf7Hn7Yfhr4R+AfHWlf8ACeXNzbHw9qXh7WU275/kVJBKFMeclWSVQHU8B1YZwP277b9pnSv2jdV8O/tZeNY9e8V6fBDE17a3EbWxtyu+IwpGiLEhDbtmxDliSoJOf3LBeGPBuYcUYaWAr05YKpQdRQ55+2k+ZJTjrbkV0nfW91ZvWPkV26tBxqJ3Tt5f8OfSdv8A8FYfgxDYx2Y+HnilNhydi2/P/kWuJ+In/BRb4a+Mdy6f4S8RQBgAWcQ54+khqP4Of8EWP2uPip4Mg8Z65deHvCSXkaS2en+ILuX7U8TKGV2SCOQRZBHyuQ4IIZRXh37TX7I/xy/ZI8WQ+FfjL4VFqt4JG0rVLSYTWmoIjbWaKQdxwSjBXUMpZRuGfUy3hPwXznNJZdgsTGpXjf3Y1m27b8vSVuvK3bqcs8FOFPmlFpGp4q/aF8J+I7meVdE1CNZHJXCoCM/Rq+BPjt+wZr/jH456p8TPhT4i0vTtM1ZxPcWWpNKJFuD99hsRhhvrX05SxxvLIsUalmYgKB3Ne/V8D/D6qrSoz/8ABkjTDV6mFmpU3qeS/Av/AII2ftwftRPIvwc8N2eswQyGG51JWkhsoZAqsUa4lVYw+GU7N27BBxzS/tC/8EF/2+P2d9NbxF8WvB9jZaSpUyavaztd2kW5gqiSWBXWIlmCgORkkAZr9f8A9uz4z+J/+Ce37Lnww/Zf/ZvvX8Malq2lNe6/qdowN2Nqp5pDkZDyzyOxcYKiIKu1eBm/8Er/ANrf4g/tQ+JfFH7In7UniGbxnoniLw3cS2j61JvnXaQs0PmDDsGRy4JOUMQKkZr+eqnh3ldbJ6nFeHwEHlsJS9x1av1iVKE+SVRO/s09HJRa2W70v9DLM8RKaoyn73orX7dz8LJf+CeHxPdht8ZeH9oOdpef/wCN1ufC7/gkj+0r8cPH+n/DT4V3+j6trWpSFLSxt5JRnAyzMzRhURQCWZiFUAkkV9g/FXwXL8OPif4j+H0ySK2h65d2BEzAt+5maPkgDJ+XrgfQVneGvE3iLwbr1p4p8Ja7d6ZqdhOs1lqFhcNFNBIOjo6kFSPUGv2qfgF4d4nL3VwVKXNKN4OVSbjdq8XJJptbXSadtmeV/aWK5/ef4HFP/wAGq/8AwVPcH/infCPK4P8AxVkHP61R8Tf8GzH/AAU9+F3ha98b694Q8Oz2GlwNcXi6br0dzMsajLMsUeXfA5woJx2r6Qj/AG7P21J5Vhh/ad8dO7sAiJ4hnJYnoAA1faP7b3xj+KP7Lf8AwTz8L/Ab4g/EjVtY+I3xEtWfxJeanqMk9xbWjYe4iDFjtUBktsdGBkPXNfkGaeDeLyXNsvwVeOHqTxVXkUYfWOZQiuapPWpZRhHffVrQ7YY/nhKSurLy+XQ/Gj4Jf8Elf2qf2iPEraD8G/D0HiG7twgumshMIbYPnaZpWQRxA7WwXYZ2nHSvUPGv/BtV/wAFQfDemXHiOf4f6LdW8C7ja6XrkV1OB/sxRku59lBPtX6jf8EqLnXvEn7AHxL8D/s2a1Zab8Uo9UmlS4nRQ/7yGMWzbnJGCI50RiAqvkkdWaz+xr8Kf+Cv2kftGaJqPxq8XeIIPCdnen/hIh4i8SQXtvcW+07kjjWVyztgBXUDaSCTjOfL4h4K4Vwmb5lTw/1fDwwTt7PEVqqrVrQUrwSlFWne0LKTel+hrTxeIcIXu79UlZH4K+Of2Bviz8PbTV5vEGsaZb3GhxTtfafOs8U8bwhvMiZXjBVwVIw2MEYOK+f7r5rjZ/ef/vqv2f8A+C0Ou/D7xJ+038Wbz4d+Q1vHpUtvqUtsuEkvo7PZcEfMQSJAVYgLllbgnLN+L16rSMmw4bbt+Vq+a8UOFMh4ew2U4rLKEqP1qj7ScJycnFvldtddL28+x0YKvVrc6m78rsSSSQy4REVf4qZHDBJcb+nyfP8AL/47UCyQ2+1Eh3t91/nq1YW9zeTLGkLbm+7X5LGMTv8AiNfwrYzX2qJbJtb/AGWf5q9Hg0dtNsW2Ju+ba23+Ks7wB4PvLOFdVv7ZovtCMq/J91f4trV0nibXrPRdFfVbny38uLbFG3y/NXkYqt+95YRuejhcPzayNT9nzwC/xB+IiabebZrPT7W41G/h27ttvbwtIzf+O18YeINRuPGni3UPEz8ve38ku1U+6u75V/75r9MP+CTXwr8T/EjxB4/1vwT4ek1XWm8IXVrYWqqzfvJvl2rWh/wW6/YD+HH7N/w6+BfjPw/8N9P8L+KtWt7yy8U2emuqrcLDGrLI0f8Ae3My7q9XKcdQw9aVGXxM9rH5VVnTw/s/tHxH+yP+zxqPxe+IFnYP91pfkVk3LurY/bk+J/hnVviI/wAHPhRZW9poHhVFtb+4s5dy6pfKv72T/dVvurXtvhLw/afs4fsSeKv2gNURYNVmiXSvDTRsySNdXHy7o/8AdXc1fDtmXkgyzMXZ90srdWb+Jv8Aer38u5sTUlWnsvhDi7D4fJcJRwcP4so80v0QCN45Nny7Vq1DCi/cfb/F8tJGqL8n3y3+zUjRvu2Rpt/v7vu17R+cS2FaTj+L/eqS3O4+Xt+9/Fv+7UO6Ers8n5Vb7y1csLczQ5RFpykKPPEkST7LNH5Ltu+9X0B+zV+1V4w+BPjTwx9p17b4P1SdrfxHbzLuW13femX+7tr5+aNFmVs7i3y1r+Ire5vvh5Klna+dLb3Cskkf3lVvvU+Xmiac0r+6fr/4Z1nwr4ys01P4e63b6xYXn+qutPuFkWRdu7d96rjOlvMl46Mo+7t/9mr8XPDvjXX/AIa6na674Y8SalZ6la/8eraffNH5Pzfwqrba+0fgR/wVM0Sz+FM2m/G/SvtXibS0/wBAmtU2/wBoRt/z0/uyLV060afxHXCUJH2/p8KTTfJIpMjbXb7rKu371b+k5b7+5Qz/ALpVT5mrxD9l39pXwB+0locuseG7mSw1G3+a60e8lVZ1/wBpf7y17ZY6gjTJNc+Yjr8iR7a1jW9p8J6NGMpQN2zi81pbmG23/L87bvu06SJ45tiIv7x/n8z5VjqGxkmhjf8AiWT5tzP8qr/EtTFopv3yPlVT5l27t1axkerh+bl0KWpWsJkKPuCfd3L83zVyniSzSHfvmVvn+T+9Xa6hCgs3+X5GTdt/irifE1y821Nnmqv3I2+X/gVctapLofU5a+aRg6aqfaGRIVZ2bd8qferkvGsXl6/dQmU8Kg3rxgeWvNdx4bt3a4MKW22Vk/h+Zf8AvquR8axMfG9xEF2s0sfB7Eqtfq3g075zj/8AsGqf+lQPpsPOLlJeTPx++NENxafHjxLaAfN/bEykt/vVB4q1aHT9Ft9EQbWj+dv96us/aY8OyaX+1H4qgvDwNRa4DL/EteZeINQ+338k+zf89fkkfgPwnF/71Nf3inL+9lO+iGNGb2pY7V5GxXTeG/Cd5qEyeTbb/wC8uyqjHmOWU4xIPD+g/aGX5flr0nwn4JRYVuZoVC/w/wC1Wn4H+Hr2savdIr7vm+Zfu1Z8UeKLbw/F9jR18yNdqsy/drb4fdMeaVSXKiLVprCxt/J8lV/hdq5TXdcj2l3uWO7ms3WvFlzfM3nfxP8AeV/vVjXl49xGOxrPmmXy+5oQ6lePJcvM53bv7tVGmIXZTppvm2eXz0qPy3ib95Ux934iveFEkZk+5xtpbNvm8j73yfPTWj3b9n+7VixjHmK+/bt/8eqhRl9ks26zM5T7q1tadI7Qqj7cR/3f4qorbzXEf7lNm3+L+9V2FXhjGxKCZe8WWt5pmZ0RlLfe21DIs0JPkp82/b8tamm/MuX+bb/e/iqxHo6TSLCkjAs+6l/dJ/wlPSdSns22IWxXO/FWXz7aGb/prXZ3Hhm5tY2eHcR/47XE/En5bZEmVi6t/wB81HL7xpCWpxtmSlyAf71ez/C3VHXR7iz87/WRbvlrxWP7w5zXqPwzvoYdKldH/wCWXyrV83LEqsc58Vr0TaosL7fl+/XKWse+4WtLxhfPeaxJv+Yq+3dVXSLd7ifYlMfwxOy+H+nvNcfOmNu1v92vVLfxZpvhuNYXudu377R/NXnmhwzaRpiuIVYqn3lrO1bULm6lZ9/8X3t1RL+6ZfEeo3nxQ02YN8m59m7buqBvihc3H/Hgiwp/d2V5jDHeTTD52+Vfuqta1qHsVZJH5VN1Eeb4iuX3eUl+IHiK5kvLS/uXb9zLvVY/4a9v8JeIn17wpa6lsWVvKVWZVrwrUoX1axkTZtCruf5a6T4C+OvsdnP4Yv5stC/7j/ZWnEnl9w+yv2OZHe38ReZtz51sfkGB0krxn4yxBfit4jYwHadYnLEt1+c167+xTefbLfxIxfcVktMnGO0teSfFBHvPjD4mijAcprVx8p/3zX7TxNHm8HclX/Typ/6VUOBXjXkcT4g1DUvDuoWnifR0aO5t3X9591q/Wn/gklrU3i/9sf4Va94luN82riSW8kkPLyTadOWJ9yzfrX5ReJrF9Q02aF/m2xbkVf7tfb3wJ+PGqfsx+E/BPx50dz9o8NWel3I5xlSsUbj8Vdh+NYeGKSyrO7f9A0v/AEmZ97wk+bBZh/16f5SP1N/bF/Z21X4w/Anx38GoL+W48Z6TfNFoytulurq3b7sca/8APHa3/jtfzMfG/wAE+LPgr8bNS+HvjLTZrO/0nVJLWaGZNvzK22v6pviX40uv2gPgt4b/AGxP2d/GE9nJr2jLpeuzaay+aqyfd+b/AJZsrfxf7Vfjn/wXK/4JreIfCWpeF/Gfh1LfUvF2rJJJqmh6fO11eLGu399Lt3NuZmr4fmpY7KOaUo80du/mj8jo8+BzbkjGXLL7vI+cv2Dtk3xC1G8yC7+Hzlh0/wBdF09qT9ou7Ft8e9UYEsRBbfIP+uKV6N+x7+xN+1R8CfBa/G340/B7VvD3h/Vov7N06+1S1MH2i4YiXaqNhsbI3OSAOK+z/wBnP/ggzYfttaVbftReOv2g7Tw9o+sTNDbaZaaY010jW7GAl2JCAExkjnoa+8zKrCj4F4WUnp9af5VD6SnRnWxbhBa2PyC+Lnh/7dpf9pQ7t8Ls+7bXB6hp7+IND3+SzfuvnVkr+mzwZ/wbbf8ABN3whpEt/wDER/F/ijam+X7RqXkR/wC1+7jWqOp/8EYf+CFc6p4Mvvgr/Z0906+VcR+IbiOTc33drM3/ALLX4U84wSlqz1qWW42rH93HY/mO+FmsTeHfFSQyrgM2Pmr6JhP9pQwukOVuIN22P5q/fn4Of8G7/wDwRTl1GbxZ4b+EGp66ltcTQSrqniOaWDdH95tq7a6HUPBX/BFb9kS7m8Mah+z54K0+e0kVbO0bTWvLiX+795mqK2a4Kjyzb0kdOHyXMsZzU4QcpR8j+e/w/wCC/FuoTJN4b8PahcPHLtRrGzkkZf8AgKrX3f8As96X8VPH3w5017z4e+JP7Rtbf7PKraDcbpmX+Lbtr9Y779uH9lr4K+EtG1lfhfoXhuXWIWk0nw9a6NCmoeXu2qzxov7v/gVc18Lv+CvPhXXfGWseGtf8JQwJYzqbeaGRctH/AMBrircQ5fGav+R7+X8J57Ti5wht5o+KfC/7OP7RviqHzNK+Bvii4RbfdK0mjSL5n+7Xf+Ff+Cfv7WPiSFUh+COrWisyr5l48ce1f+BNX3Ha/wDBUT4Jy2rTCKYvGjHyU4b/AL5rjfit/wAFk/g74B0K6ns9JuJ7rZ/o0ef4v9qpjn+W8t0/wNZZHxDKfJ7K3zR4FD/wSg/a9mj8xNC0NE27vLutcXzWb/gK7azfEH/BKb9shLNifBWj3Kf88bXXo9y/7X+01Ubj/g4D16bR45H0LTneKWRZWjuPmb5v7tcR47/4OFfiJLpV1b+HNHs7WVp/3V1v3PGv+0rVhLiOlKN4wZ6EeG84pytOcIlTWv8Agmt+3RNbtbJ8AL+UrKyxbb23b/gX3q4DxV/wS4/bvsmd5/2YNZnC/cks7iF//Hd1TaT/AMHB3xe0fxn/AGjc30dzA1nJb+U27/WN92SqviH/AILu/tAeKrZdHsdcl0rdKrNfW8i7v8tWE88jKPvUpHpUcgx8Ze7Whb5ngnxY/Z6/aE+EvmH4nfAfxho0Sy48+88PTeWrfxfMqsu2sT4Nzw3K6pLBdRyATopC/eTG75W96+vvAX/BbD496bMLfVvFlprFsqq0qX0aybl/iVt3y1zX7W/xp+Gf7QEvh74m+D/hvoeg61eQ3KeIp9EsFgF6wMZiaQJwzKGk56/Ng9K/T/BLGUK/iZgoqNpWq/8ApqZ9Fgcpx+FqRqycZQW7Utb27Hxd48vZI/FOowLKwBvpDk/7xrmLrWtkeYXx/Duatfx5Pb3HizVpba4Yj7fNGy7v4hIQ35EA/jXG61JtZXR1+X+Fmr4HiaF+I8Zp/wAvan/pbPCxk25ya7siuvEm3akMLN/Cn+1VaTxVbJu/c43fxLWHq15tkOxGXd96suaSaWNdiZSP/b21xQw8JRPna2InE6qPxY+0+TNtbdt3N8u6nN4wn2o7pGqfdRv4mrjbSWZ2L/N+7+Vfn/hqyrzLMnnfxN825K1jh4bHH9cqyOwtdcS5jffc7d3zff3Vbj8RPHJE6bnWNv4f4a46NraFv3PmO/8Ae2fdq8rTMrPv+9/C38VRUpyidFPEc0Tq4/FTyMyCbf8Axbd23a1WrDxVM22Hev8At/3v++q4/hY96J977m6pLe4fcYXfYdy/eauGpRnL3j28LiPh7noGl+Lrlplttit/d8v5ttaK+ILmaNUvE8z+6u75t1ee2s1yrGazT5t+1GV/lrVtdUufLbfNw3y7d+7/AIDXBKPLM+lw8jpr7XJVWZMyQt5W92X+GsnUtYmmxG958u35P9qmRzSWrND8yxtt2bvvL/e3VBqEf7sbIdv9xWX71KXKej7SMYlGS6htpPOd8bn+bbUH9pJIy/O2Ff8A1bfxU7UkTbvhmbd8rfN92qMjeTIsJRsbPn8v+Gt6MeaR4OMxHLzHXaHq0yyN53l4ZNz7f4t1dr4f1SaNVd3V/mX7r/NtrxrSdcms45PORj/stXa+Hdc3Wqs7shZPm/2f7tfcU5e6fj8j1vR9aE0gfylXy2+7I/zf7tbWj300zb0uWi85vu/e2steb6DrUCx+TD0bbvb7vzf3q7DQdVdS77Iy/m/d/vNRKUEdOHrS2kdtY3H2NUTezvH80si/8tP+A1ajvN0z3UIbzJnVdqv/ALP92sOLUppLPZt2yNuZ2/h+9VhpraNneH50XazV5tan9o9zC4qX2S3qV55bAOm4/d3L81QNcTXNwHmgjlC/61W+8rUyRnaMW0PDsm6JpKfG1zLCba5K/Ku5mX+9Xz2IVKR9JRrVeSJVWDzpBNDCv32V/nqNY87ntl/2fLb+9WmljM23emxVXdt+7uZqtLp/+jojou/71edWjCOqPVw8u5xWoaa8P3E3sqM3zbt22si+09Li55MgMiqz12mqWSQt/pO1X+4zf3axb6z2/OEmabb+93P/AA1lGXMerRl72pyN1Yuryu6eavyqnyfNVObT4YVbyVb/AGFrp7rT4Zd7w7o3/vN826svULFI0/vHerbttLm98+oweKOp8MRGP4bpEyuSLKUEN1PLVxmkw/vPnTK/88/4q7vTNp8DsEcsPskoDL1P3q4Kxknt2H2aFsfNu3N826v3fxoV8g4a/wCwOH/pFM9TKqsFOrfq/wDM3bWN47dUS22Oyt95ttSwh2mP7vZt/wCWiL8rf8Cqpp90ZoTvRtn3t38W6rthv8sI77Nz/PGzfK23+7X4PzcpOZYqPL7pbsYUk3lJmi3PufdVpY4Vj3Wz7V/gVU+7UCx7YHme4h2r/CqfxVYj85WZ7aFVVvlrWMpx94+IxmI9tLlBf3Hl/afvtudVqpcWe6P7Z/Gvyo38Natvbu0fFsrPHuWKTduamXFujWqO8MyBvmfzH+9/wGto1re8fM4inKpPYwbu32q6XKfJJ8yMtQR6akr/ACIzDYuxd/yrWncWLyLIdjJtZvmk+WrGm6K9vH9pdMrNt+7/AMtGWn9Yly+8cv1ecZGfDZ5V7aaZsbN+3+7/AHasNp7tiF0ZG/iZmrVXT/lXcixKz/xJ8u3/AHquJpMMjNC8yq2/dEv8LVwe0fPv7pvToy5rM5C80mGO3e5mO1dvzRs/3q4j4P8AjWHR/EnjuZ4WtlaeFd0O1lbcu1f91q6v4nao+k6hHpsKMrLEz/u3/u186eG/FD6b4o8SWcyTFr61ZvLjl/5aK3y19vklCpTw3tX9o+D4grxni/Zx+yVvilefZ/GVzC8ckXmSs3mSfeavPvFkj2N9FeQ+Ydzbf3lbXjzWvtlxb63skzt2StI+7c396uf1q4m1Kz+0vMpRkZtv97/Zr3IylI8L3TlPF2oTTaozptCsn3qzrySFbYzTbfl+4rfxUniK4VZN7p8+2ud1rVnkh8k7g397+9V/aKH+Il84iZ0wrf3axIpvJZofmrZgc3uijzDny2rMkj3Sb4PvUfCEfeIpP3f393+9TJB5kf3OP71SXCBl+5yv8NQTcMqb9w/urRI05eaRAx29ak2Oyq9RuN5yaVfu7PMpc0S+VCx7Nxy3SrWmSPHL9/738NU6ktWCzLu6UhSidJbSSeTs7L/FTZFeNm2fNu+9UdnM8kOxNtP3eThFTKf7VP3dzEb947M7W/vNUF0zxr8nB2U9piy4mk+X71MkV5FP75WWl8Og5EDL5ihEfbJtqG4DpDh91WrhflR4X+eqszO6N87FVo+If2dSvI2G+cq1RMoZ99Sts279nzVFMvaq+EuO42HO7bu2nNSbUZN9Mt13ueean2umUdFqhy3I8pko9NfZ/DUjR7V87fTRJkYC4NZklvQ5HOuWeHyPtcf/AKEK/qX/AOCSd7eL+yJ+0aq3coEHhjdABIf3Z+wX5yvoeB09K/mI+CHw08TfF/4p6P4D8JQK95d3itmR1VY40+eSQkkcKis2OpxgZJAr+iH/AIJf/tqfD39lvxV4m8EfG6PUJPBvjSwjt717OIyrazKSnmOgIbYY5JAxTL8LhW7fsXBeS5tmnh5nDwdCVR+0w0opLWbpVPaTjHvJRtp5pdTlq1IQxEOZ23/E8y/Ycurm9/bV+GF1eXEksr+OtNLySuWZj9oTqT1r7R8ZeCfDfjr/AILyWtl4muFWOws7TUbWFkQia4g0tJIl+cjGGAf5QzZToOWX5/8AHF3+wH+zj+1j8M/iV+zN8VvEniLQdI1221HxPHPYGQWqxTqwELyLCzsVByhU4AB3knaMz9rj9srQ9Y/4KETftY/s5akb2HS7mxk0q41TT3jiumgt0hfMZKyeU4VhzsfDHhTX7Dm+BzTi7P3i8BRq0YVsuxFKMqlOUHCpKpFRjJNe63a67x95HJCUKNLlk07ST07H2n+2frP/AATy+JfxmutN/aG/bE8aaJrXh5ltv+Ec0rULiC106QKCWREtGG9shjJuYngZwqgeMf8ABT/9rL9kn4sfsseGPg58KvixfeOvEGj6vBJbazqFrM1xFBHE8bvPO8cQd3DKCQrFipLAHDVq+M/jF/wSS/bumsvjF+0HrGs+A/GcdtFBrlvAJkN6URcZeKKaOZF5RZMRylQAwACgeK/8FAf2yv2f/ih4C8O/syfsp/C+y07wP4TmZ7bWL3TNt1LLkr/o7OzSJE4w7vJiWVtu4Dad3wnBHC1aGcZTQq4bHKphJXmqvs4Yei1FqThNU71YzltGMryTvKWmu9eqnCbTjZ9r3f8AkfJ9XPDupR6N4gsNYlEhW0vIpmEMmx8K4b5WwcHjg4OKp0V/VU4xnBxezPJPvb/gu3bya545+GXxNsHkfS9Z8JzJZvvymVlWXIGOpWdMnPIA9OeB/wCCJ/hzUda/bgs9WsxL5OkeG9Qubso2F2MiwANxyN0q8eoB7V3XwR/bd/ZH/aS/Zn0b9l3/AIKEf2pDeeHpQmieLrW3c7Io02Qu0kO6RZgjNGd0bI6orMSxrbuf2wf2A/2DPhV4m8M/sHXWq+I/G/iKzEUfia/tnkjtmBIRpHnSMYjDu6pHGVZlUPxyP5spy4ky3gKrwLHLa0sU1OhCoof7O6c5u1V1dklCWqa5rrZX09N+yliFiOZW3t1v2sfHv7ZXi7T/AB3+1f8AEXxZpTyNbXnjC/aBpJN5ZBMygg4HBA4HYYHOM15pXv3/AAT8+JH7Kfg/9oKfxZ+2j4d/tnSrmxmNpealYtf2sF6zAma5twrtPuXeAdr4ZgdpOGTjv2wfE3wB8YftDeIfEP7MvhiXSfB9xcKbC1eMxIz7R5kkUR5hiZ9zLGfug9EGEX9pynH1cFmkMgjhKqp0aMGq7S9lK1o8ile/NbW1r6O6Ss3xTipQ9pdXb26nq/8AwSV/Zph+O/7Tdv4z8UWit4Z8BRrrGqyTD9286k/ZomPu6mQ54KwsD1rgv2/P2lp/2qf2nNf+I1pdtJottL/Z3htCeFsYSQjAdvMYvKe4MmO1en+Bf2yPgL+z7/wTR1z4SfDHW72D4jeLr2ePxXPd2HlJa2jcSSrPynlfZ18tRu3h2dyqjBPxKvxk+ELkBPir4bOemNct/wD4uvmsno1MXxtjc9zVeyVP/Z8NGp7r5ItOpVSe6qTsoyX2Y22ZrN2oRpw1vq/0XyP0/wD2V7/wB/wT4/4Jvj9tXSPCdnrfjzxrM1lp1zc+YY4g08iRW7cqVjUQPK+zaZGAXdgIy8N+z7/wWr/aTg+L2n23xxOj634W1XUY4NRtbbSEgmsYXbaXgaPBbbkHbJv3BcZBO4c9+xh/wU1/Yn8Qfsy/8MS/toatBqPh4XZ/sXWdP1JJxBEZDKquIn86No5MlHjD5V9pUKp3eg29t/wQt/ZYuNM+O2r/ABm1rXRa3qvoVhq7z+Rd3cZDqsYe3gjldSAdrybP7wIr8jxv+rVHMM2p8S5ZVxmKr1ajpVYRVVOk9KMadRStScFo9murey64uo4w9lNRSSuttet11PBv+C4X7M3w/wD2cPihr0HwysYdP0jxT4KudVXSIGfbZzMJ45QgbIWNmTeqg4XcygKoUV+Is1vMq74du77rV+sn/BUj/gob4C/bQ8X+JviDYeLdNsdHtPDFxp/hrSbjWoJJlgEch3sqMR5sjsWKrnGVTLbQT+UckyTWju4+Rf8AvqvzDxhlmNPJshoZjVU8TChJVPeU2nzKyk03eSVk3d3aer3O/L+VzqOK0voU7PT3mkPz7G/j/wBqun8C+GbrVtUi022Te7Ovy/erC0eJJPn+7/F8v3q90/Z48MwtcjUprBnb7z7f4VX+LdX4TiJyp0pXPbw9P21WJT8Sa5b6Pp8Vgk0bvZp8/l/wrXk/j7xdc65Jt85hDG3yba6r49al/ZesXFjbTY85md12/dX+7XllxeJdRNt3Y+7t/irmwWFjpPc76uKlT/dn6ef8G/Pxavvh3458QajaWE00MGkfariTz9v+r+8qrTP2wtJ/aB/4KJftXTfFfx/4buJfC2jxNZaXpNnudLGz3f6zb97dI33q+e/+CP37QulfCL9qLSdP8Q3VnDp2oBrW8W++6yt/DX7R+HvFnwd/ZMste/aS+J3xK8JaL4L0pLjUPKSeMz3m1d0UMafxfN8tTSwl8zcXufreR43J6WSfW6qvUhH3f8j8Xf8AguDceHvhr45+H/7IXgPdHYeEfDMes65DG25f7Qul+Xd/tLGv/j1fDW/d8mzc392vU/2rv2mL/wDbA/af8fftIeIdPW2HjLxBNeWdqv8Ay72/3Yo/+ArtrzC4037O29/+AV+hYOjGhQjA/C8+zKpm+ZzxNSWrC3Z2X5+v8dTySf6xPl2L9yoY4/3ex3w396rG1/4NrfJt+aurlieSNXZuXYjfe+dmrY021eWFk2Lj+HbWNJN91Jkwu7/gNdj4P0lL5VQr8zfw0yJe6ZN5Yzww7/vvt/uVom4+x/DvVbn5d8dvub+Fq2te0V41/cpu/h+9WT4zP2X4S6mEh5by1bd/D81ZyKpnlEep/Z4mvZ5llmb7sbVd0+SZVa5cfeffuasKxtZrqUMseRW62+KFkz/wGg1+E7f4d+Ptb8J6tBrGg63cWN5ayq0Vxay7W3f/ABNfdn7MP/BU12mh8MftIaas8TMqReJLFfnVW+VfMj/2a/N+x1SaFldE2stdR4f8RTLCN/8Avbdm6plH+U1w+Iq09j90tB1vSvF3hu28YeDb+G/0q4+a3vLeVWVv9lv7rf7NSyTTR3O9zIiMm/cv8Nfkz+zX+1l8YP2edSe8+GfiFYra4+a8028XzbSb/aaP+Fq+pvhp/wAFWJrqaHTfip8K7X7NM/m3F9oNw0bbv91v4f4qX1iUdGj3cHmFCMfe0Z9htffavuQ/Lt/hbbWBqVj5ly+9F2fLsZpfmaofhz8X/AHxq0GLxF8NPE9vfpJ80Vm21ZYf95avSRvJCLZztk83c21Pl/3azrVos+owNRSipxkV9Nt92zybZY2b76r91q868cxovxPnijXj7XDgE9PlTivVNBtZvtSQfZpNy/Lub7teZePFeP4uyhhgi9t+q/7Kdq/X/Be39sY9L/oGqf8ApUD6fL6/tKko/wB1/ofmd/wUi0v/AIRv9qLxBd20OwX1lC33Nv3lr540rRrzU50jRGYv83ypX2p+3R8HfEnxs/a+m03R7Dzo7fSYV8uFN25v71dT8Gf+CZ+q6W0Oq+PYfscEn31+8yrX5VTgvtH4pm9aNPMJwXc+QvA/wR1vXJoXhs5m8xtrMqfdr2/wj8CbPwtYrea3/ooVW3s33t1fUXi7Sf2df2edGmRPJvJIYtqxyfI33fu/LXxh+0J+09N4o1Saw8PWy21uv/PP/wBBqpVI/ZPN9nOp6D/id8StN0WH+x9A2rt+Z5P4q8c17xJLqFyzu+//AHqx9W1q51SZnvJmZmb7rNSRq8j/AO03+392op+8b8vL7xN5010yuif8BpWh2wt3ZqmsbJI4w7uy/wC1U81qir02/N8jVRPxe8Zd1Dvfd977vzVJ85I3sodv4anmt4Vh2bF2/wDj1QvIki/+PPS+2VKMSNlO5v8Ae+8tWYV2zb3Vcf3lpkaom5U+633P4qtMx8pMIv8Ad/4FSXuy94Udje8Pyw3MCo/8P+xV6exTzNkJYLsX7tY/h9d1xsfcxb+61dPb2bpyibv7rK1HLGQ5PlM6xaSOTe7sqf3Wb71alncPHdfO7Mjfd/2agnsY2kV03SM33l/u0jW7rIH2Nt/h20fCTHmlA6/TptNmj2Pc7mX5nXZXlfxskha6RLbjc27bXVWupTWbbPOYGuF+Klx9ouYn3/71OPMVT+I5BPvCu88D332XQ5nd8FU/hrgq6jR5vsPh65ldP4dtEo8xrU2Od1Kd7q8kkk+9vre8G6bPNMjJ/wACZv4a5+CJ7ib/AHmr0Lwvpz2dj9p2fw/dojsKp8Jf1aVLa1CQvjctYyxwzN8/K0mtaptkaJPmLNurPt752Xe7/Lup/ZMeX7R0FvdQpCqJ/D8qN/FUsbSXUmdn/fVUrFXmVT5O2tyxtvs8fz/Lu/hWlGH2QlV5S7pOmpHbSuE/gavPJ9Wm8M+NHmhm2rv+Za9Ek1ISN9jS5UL/AHf4qwtH+BHxd+MXi6LQfhj8OtW1q+updsEOn2DSyTN/sqtVKIqcoyPpr9jj9obwj4HmvLDxhKba11ZI2S+VWdYXjD4VlVSSG3dexHvke2R/GH9lLVr+S8XUNCnuZmLyzNojF3Y9WLGLJPua6b9gz/g2G/4KA/GfTIdY+MOnQ+ANEuHV0k1yfbc+X/1xX5lr9Hvgl/wam/skeBraGf4nfGbxRr10sOydbHy7aNm/8eZq/TOHfFrOeHclp5WqFGrTptuPtIttczba0klu272vra9rGM8JzzumfmI3j/8AZhkh85xoLJ0DHRePp/qq9D8JeDR8UH07wZ4O8JHXF1ZY49M0iz08zfaFIBjVIQpyMAEDHGO2K/UyT/g2u/4J0NJbFIvFgSDbvj/tv/Wf+O/LXyx+wv4P0L4a/wDBV3RPAHhpHi0zQPG2s6fp6yuWZIIIbyJASepCqOe9fqPB/iZjc/y/Mq9TC0IfV6MqiUItKTSk7TvJ3jpsrdT7bhDCOng8wTe9Jr8JEHgz9gD/AIKeeE/CX/CKeBPhb4u0bRbhNz6Rp/iSG0gYEdGgW4UA+xXNPtv2Bf8Agp/4dvW16x+HXiuyuUT5ryDxZbxyBf8AeW5BxX6t/F39qv4b/CK3P9vazGsm/atfGn7U/wDwVv04eHLrRfh5f2s1wZZF3LL8zR7fu7f71fk1f6RuYUk1DL8K/wDuHL/5M8jA8JV8XZttL+vI+Ividpv7VWuasfhr8WPFXiHWLnT5g40nVPFP24QSYKhgpmdQcEjI7E1+nH/BOH4S6J8JP2ftD07xd8SYZ3RZZvsvmCNIHkkaRoypJzgsRnvjOBnFfj54i/a0m8J6hc/Ga5uYbwX11Ikscy/6THJu/irFs/8AgqT4ztbyO2sL+4jRnZkVW2sv+z/tV+ecb+MnEvHGVU8txOGo0qMJ8/LSi43lZpN80pbJva1763srfYYHg/AYWTlSqyUmrcztovKyR/R5B418HwKlomsW0ny9pFNc78TvC37O2seG5/EvxI0HQprSzj81ry6hQFQvo33q/Cr4S/8ABVHxnNcW1lrfiS4V5rqO3gj3MzNIzfw16/8AtOftvfEvwHotnpXjDUo5XhiW4TS7iJpFmbbujZl/2a/MqWb1KfuzpnYuEKcZc1Osz7y0f43fCLT/AAZe/B74ZWt14Z0zV3kS3ubOVnut0jfeVW+7ur83v2j/ANnj41/sMftTXfxd+MXiS38Z6P4gtW/4QjxBq1vtttNb+Lz4/wDn4VfurW5+xT+2xpXjjxY2sa3eMbi4ut3mMnzR/wC7/dr6p/aUvvg1+018DtX+AvjCdkttQ/fadqN7tlls7xfmjm/76/hrhp47nlKNZ/4fI+qw2AlgpxlhvhfxefzPz1174ueA9e1a88f+KvEk1zPqC/uLrULpnvL7/dX/AJZx/wCzXl9n8Rpv+FlQ6r4A84W0zsjMvyqy1698Cf8Agk74z0mbUfiB+118XdPs9Nsb2RLVtLl8+W8j3bo/L/hjXbUn7R3jz4LfDmzTQfgD8E76+TR4Ge41S8ibc3+03y1206PNFa83MdNbMqVGteH2e5Mmm/FTS5X1vxJqSwwMi+RC3ysyt/EzV86/tGfEzXrdryCz1hXdXZd0b7mVa9W1LxN4w+K2i2sPijxzefYJrJXSz01Vj2qy/L81ZGj/AAA+C0dxFc3+iXl+6/LE2qXrPub/AGlX71d8MjxMve0R8niOJI+1l7OVz4lvPiN4q+2LDC7YklZf3KMzSf8AfP8AFWjbt8TtcUPpvgzXLtZH+9b6XM3zf981+gPhXwj4A8Jag03hvwHoth5nzOtvYRqv+y3zV00etXMby+TqrRLt/wBXb/Km3+KvYpZXhqceWR5VbHY7Ee85H5wN8Ef2h9S8nUtK+D/iS88yX7sdlt2r/wACp/iL4S/tUaPH51/8EPFEUUe3fItluVW/h+61fo7NeXlzcD55pXjT+/taobi+vDCUS5uFXr5fm7f/AB6r+p4WJnGtjPszPy+vrj496PeJDeeFPEln/wBM/wCzZG+bd/u19Vfsp+J/GviPwLcR+MtKu7VrW6CW32yDY0i7eWx+Ar3bXPtLrNeQXLYb/W/PuZv+BVyFpuN7cyMWILLhn6nrzX6V4NYShDxMwVSO6VX/ANNTPpMhxWNjifZVJ8ylf8Fc8H8f6Vb6X4p1ee3yTJqcszMDkeYzN8v5EVxGpYM/z+Xhm/hr0b4uW0za3qDEzAG6bH93Ga87urWaaHyfJ+b+Dd/DX53xJH/jIsU/+ntT/wBLZz4uUnOXq/zMC+s3mco7sy7/AJ/k+as24s9rGzTzPm/h/irrxpe5lfYqj/po1TLoO1mSHan8btt3K3+zXBTqR2PnMRTlI5X+x08svv2bdv3qaunXMbG5mm3pv3J/FXeWPhl5YXmmtm2bN3yp92kbwjcrIf3Oz+L5l+9WntoRkc31WUoKSOMsrF0jhd3bLfNuVf8A0Kr9vo811MEhdtypu3bN22tubQEb5NjKGWpF059yO/y7vllVflpVKkJS94ujRnExV09/7/8AHtfzPu/8Bp/2F23zTJsRf4mSuh+x+X5UL2zbG+R2ZPu1Jb6Hc3DFPup91q4KlaEo8qPawuFn8Rg2envb7Psybov+ee/7v+7WlYwQsQwTD/Mvlr97/gVXLfQ4ZIfMdGX+H+6y/wC7RHbvbs88Ls6L8vlt8rNXBI+jwdOUR9vCW3zOjDbtZ2kf7tOuIXW1cuNw/jX+7Vux01JpvJ2b/LX51b+Kn3Gn+dZiHY0Qb5vlqeX7J7NGnzR0OaurF2mKImxFi+9J826siaz/AHgld5Ei/iWN/mautbSXaPZM6n+Gs/8Asl51dHeMBVbbXRRlA8PGYOXPzM4C11IzXDed8pb5k/urW5o+seYyo9+wH8Fcku9pN7vmTf8AJtT71La6k8fz71xX2EJfyn5BKPL8R6xpuvcFPOVlZF3+Y+3ctdz4f8Q2ybZkmZo2b5WWvDNC1nYyeT8/95pH+9/s13Gh+KnXc/n4ST76q/3WqK2Il8I6cep7HpmvJdSK+y4cs+3y4327f9qt/T7xNrJcozOzfw/xV5Z4f1y2lX/XNlX2/K1dZo+tM0eyG5YOrr5sleTiMRPWCPcwtP4ZHb2mzkzQs5mX+Fv9Wq1fhazaNd75ZnVU/i3Vzel6k74hS/8A3bff2/xV0ujzukZd3X5v4V/hrw8RufUYXXc1Ft0aGO2eTe7ff8xflWrs32beuEZF/hbZ/wCg1UgkEcYdH2/N8zbPvUv9oeYuxH8xl+Xatcso80PdPToylza/CZWpx2crN9mTj+Ld97/gVc3eW9nawsiTTZV/4vmZv/sa6HUrzyYVm3xn+KX/AL6rB1a8hVXmmlVmkl2p8v8AF/CtEf7x206kYGVdM8OLnzl3Mnz7vlZV/wBqszUJJ5rX5J98X92tS8+xpvmT55pPlf5t3/Aa56+kmkjdESRdv/LNaz66HqUcV7OZ2ukBIvARCsSq2koy3turgIJJmtTcwOvzP8m77u2u+0qRH+H7OCxX7HNyep+9XmU0bvtRH3fP97f92v3rxnhKeQ8Npf8AQJD/ANJpnVPMZYRp9zoLFprGFn+3rs8pWZtm1a1rWOG4zJvyyv8ALtf7q/3q5/TZvtMiQv8AMipt2tW5Z/uLlLx32wr/AAtX4DKM+blOCtmntomlYyPCG3jeqv8AIv3VZa0VsUmZ/nZWkT5P97/dqlZzQ3C+dC+NqbmVl/8AHau2sc0g2O6yLIny7k+Vf/sqXN7usjy5VOaRPb27x7NjruX+L+H/AIFSHT0m2wwpJhW2qrP8q/8AfVXrDTLm3gRNi/K3977y1qw2dtcRibZHvZP4v4a56dYytzfZMRtI+1Mr+TIFb7zb/wDx2thtLtmjj+zI2xflT5PutWla6L5bRvCilWfdtWtKGzkuLcW03yfP/q2f/wBmqeaMmk5Fxo+5zSObXTZvsYWZ/MG7ay7PvU29s4dNs2d9sSKrPLN975dtdCump88MKK6N9xWf7teeftReLD4F+Hd48Lq8twqwRRr/AHm/+xruoQlWxEYHNi5Rw+FlUfRHkln4k/4Trx7rGpbFW3tbVktfOuNy7dv3q+dfGmoTaB48a5SZQs0zI7Rvt+Wux8D+NE0PWNVsIEWJLi3VZZF+Zo683+L0iXX+nwo2/czK1fpdGn7OjGkfj1WpPEV5TcjM1jVJLy4vNGm+XvAv8O2sHS9chjmbTbybake7ZVG41aa8VLxOXX5XXd/drnteuJluH1KB8Kz7tu+tRR/lL/i6NzcS3ML7g38NcfdTPdKUf5TH9yt268Rf2lpOxNu9fuVzl1Huk3pu2f7VHvyHEv6PdJGrQ3L5Rk+6v8NVJLhIZsmZvl/hqKNvs7B9/wAtMnBlcuE20FlopDcfvkf5f7tVriNFZvKTaGqOKV4n21Z+1QyITs5quYPhKW35s05PvClk+VmSm1RXxCP901ImSy1G/wB008fIyf3anlCRr6e0a437l/vMtWZFh+dN7f738NVdNb5dm/duq1cyiSEp0VU/hrOUp/CZ+5zkEkkbbtn3qj85Gbp/wH+9SPOnl/u3yf8AZpi7Np+fbVFD92z7gxUVxGjSNIU/3/npzbNuzp/tU/anlsjv/wAC/vUE/aKkzfvOOPk/76qu/wB01Zmjzl/u7aqfwfjTj7pcYj7VtswOauTWs0kn3/vVTsXzcL8ma6RYEmtg+za+3alVGMxS5UYFwkiybJui1C0bL9zpWtdWqeWibMP/ABLVDy3iIx12/PUC5j27/gnAhH7W3h8t1+yX3/pLLXr3/BS34y/Fj4b/ABZ0LSvh/wDEbWNGtpvDommt9Ov3iR38+VdxCnk4AGfavI/+CcygftbeHc9fsl9/6SS12v8AwVjx/wALr8OFv+hWX/0pmr9zyfE4nBeCWLqUJuEvrS1i2nqqXVanFNqWNV+xyHwH/aF/aE8V+MV0/VfjJ4iuIlhZmSXVpSCdvpmvY7/4rfGmwjW5HjnVJUQZfF6/6814H+yppqXmvX135O6OG1Xd/e+Zq+gVs4W2xp92RG2K38VfiWN4mz6ElbGVf/Bk/wDM48Xyqpoj7g/4J8+LvCfxP8G2R8ceH4NSvA5jke5t1keRgvQ5HWvtfw98CvgvqGnxNc/CnQmnfh0TTo+P0r8q/wBiv4np8M/HEWgvNNFb3E+9I/u/N/F81fpz4b+PHw38G/De58VeP/GWn6Pp1nBuuta1KXbFbr/Erf3m/wBmvWwHFedTw1p4qpdf9PJf5nl1FUlU5YFzxx8B/g7pMLsPhxosA2ZVVsEDH9K+U/2v/jt+yr+yjo6X3xNk0iwvJ4WMGg29sr30hH3Ssajcqt/eavCv21v+C6Wr+Mrq/wDhj+xtYf2fZSRNa3XxC1aBvtd0v3Wayhb/AFa/7TfNX5V+O/FPibxX4uvdf8X+I7zV9Qmnbz7/AFG4aWWX/eZ6ipxNn1aVli6v/gyX+Z6uCy+75qjPrXx//wAFNdc+IurzHwjp0fhjSYnUWtvCA1zIP7zyDp/u1na/+0n428W6BHeWnxV1fTNQj5R7HVJFikX/AG0zw1fIO98ZD45qwup38YCJdvt/368+pmXEPtLxx1X/AMGT/wAz1FhoxleKPoXT/wBoD9pnWIT/AGD448VXmDhpIb2Zv61U1D47ftgC4a1tfE/jHOcKVmmb+teLab8RPGujqE0rxNdWwX7vlS7avD41fFvem34haplfu/6W3y1pDNs/hvjKv/gyf+Ztyp/ZX3H6PfCm58Z+KP2LTN45vLy41m88Makt1LfMTMzEzqu4nvt2j6Yr4+8KfB+2haKF7m1kmZv9THPG0i/7TKtfVXwJ1DX/ABH/AME+0vtTvZ7rULnwhqwM0jkyO2bkLz69BXxj8JfBOq+BfElt4qv3k+0w/N5e77395Wr9W8YfaV8myCc5Xk8LFtvVtuMLtvuzz6FRU/aWetz7S/Yv/Zfv/Fvi6yeHSvtFusvzbW+Zm/2a2P8AgsdqFtdftDeGP2afDvmNpfwt8Mq10u7cralefvG3f7Sx7Vr7X/4JY6H8Pbr4Tv8AtCaqkNto+j6XNf6lJt+WFbeNpJPm/wCA1+cnijXtT+NHxC8U/HLxI7Pf+MvENxq0rSfeWORv3Uf/AAGPbX4rTj7OhdnHTqPnlVkeN3ngWa+0/fCjKY9uxq4vUvPtbiazmdkVX2vu/ir6D1DSYVXYHZEX5UjrkfFXwoTxpH5EP7u5bau5fu7f71clbBxxf+I9LD42cPi+E4Dw7dQ2tv52yRP4fubq+uP2Q/Deg+JtBks33STSKrL8m3d/s18neNvh54q+Gd5BZ+IYG8u4T9xIv3GX/wCKr2z9iX4tJofxJ02zvJo5LJZW82OT5dvy/wDoNfF59g8RTpuB9TlGKoVKsXL4TU/ai/ZN8aeIvGH9o/D3Qbi+Zkbda26Mzf8AAa5T4M/8E5f2iviZ4nt7b/hANS03TWlX7RqWoReUsa/xMu771fob4V1azj0C38VaVcRm+a4ZZfsqfL97cvlt/u1u+LP2irDQ47zXviR4wkt9I03Tftk8zRfu4VVfur/tNXJgs1n7ONOEfePpa2V5fOXtec+Bf+Cxnhv4afsxeK/hf+zZ8DfDFnpl54b8KLq/iDWIVX7TdXlx8v7xv+A7q+M/iF8Y/in8ULK203x38QdU1WztW3W9pcXTNFH/ALq16B+1H8adb/ae+OPiT4161JIF1S4WLS4bj/WQ2cfyxL/3z83/AAKvJ5rU28hfZuTb8lfo+Gw0XShOovePhsRjKsas6dGTUH0Es7jawRH4/wB2r7SPJGA7b3/g21mQq8M2/d8rP86tVy3uETMz/Kn/ACy2128vunnSkS28myb98i71/i30s10NrP5NUdUZ4ZEvPlZPuvt/hqBbnaQjzMf760uYZr2snmSbH3K/91q9l+Degp/Zb6rcpt+Vdn+1Ximh3KXEyJMfuv8Adr3v4e+Rb+FxMJtz/wB5qfLzGcpFLxhZpDdvsRpUXdvjVvmriPi8ws/hvJZ/KryXC/Ktd74mk8yMvt2p/e/2q80+Nl0kfh6KL5t32hV3N91v71Eub7IoL3zgLKKHT7Vf3eX27qZNdJcfP/C3+1UdxIjRqibm3Lu/3aqx3AZv92o983j8RbhkRlZ9m0fx1qWN1ux5U3y/e/3aw47j+NHbG+r0Nwn2dE3baA97nOw0HXLm3uAEmZVb5dtdz4f1yFdsaRqTv2srfNXk+m3STTbAjBdv+sb7q1q/8JhDplwn2BGeRf8Al43fLuqvdKPZ9P8AFlz8P9Qj8Q23iG60qeGXzYri1naOT/gKr96uv1T/AIKkftSyaOmg+HvG1udq+V/al1YK9zt+7Xy42sXOpahLf6lfyS3DN/rJHrc8O2v22ZX+81R7GlKV2VRxOIofBKx6Bq37Q/7Q/iaZtS1740+JJZW/5537RLu/3Vr7L+BHxF8SW/7PmlfE3xHdTazqNjp095K17MS1yYZJCqMx5wQgXJ7V8H6tdWtisNtBcqWk++q19qfB9Fh/Y6jWZyqjw5qG44GVGZ8/lX6/4NU4wzbHJf8AQNU/9KgfacH4qtWxtdyk3+7lv6xOQuP2zvDFn8ZJPip4b8NzRLeRRxPZ3EXzW7fxf71dH8WP+Cgm3QWtbDclxNE29tnyt8vytXytJ4i0qzXZpqb9vG5vvf71UL64sNcVvt9t5is+394+2vyH2cYnwlaXtqvPLWRzPxi+PHiHx5qjzXl9JJ5m7f8AvflrzS5vri+m86R2J/hr2K8+EvgnWbX5JpLQqm3dH81YF18C9Y0+YPYOt7C3+qWNNrVUY+8HwwOGsdNubhRv+b5vvVtaf4f8yRcv92u20f4SaqrLbJpUxO/7qr91q1tN+EevPcNbx2DBv71b+zI9p/McTFpsNra/Ii4V6z9UmRV+R1WvTrj4FeP75fLsNKZg393+9WQ37NPxguZFR/BkjIzfPN5qrtX+9S5BRrRkedXMe1TvfLN/eqFVTn/vnbXsln+yD4tkk/4mviTSbBNqv5lxeq23/eq237N/wx0dvtPiH4tQv95mjsbfd93/AGqy5YfCXzfaR4psc/3dq/LV2zt3WP54WU/7Ve1aX8KfgCyqltealfTNKrRL5qqrR/xfL/er1P4f/sm+HvHV8mm+DPgteTNcS7Eur64ZlX5fmZv4VX/eq40+Y55Yjl3PlLSLe5hbf5GTv+9XW2du81uj/edl/hb71ffXh39mf9mb4Q6PPbeLfhjpvibxJ9n8q1jVma0s22/eb+81YWg/s2/DTVNY+2ar4YV3ZVaXTbOLYka/7P8As1XLTM/b1f5T4l+yvaxrv3Dc1MjiRbrY8O4/d3fw1+iUP7PPwKtbo2z/AAl02K3hZW3KjeY3y/MrV5t8WvgV8H/Mlu/CvgOFIlbbLIr0/Zlyrcvu8p8V6hYoreckLNt/u15t45mM15sHRa+7dD/Zl03XN7p4VWNPmZZvmVWrTu/2KfgtY2Ym13wxb3N20W54YWb5qXLHlCFbll8J+dWk2JurgJiui8QWM9loax7P9lttfeOk/sE/CvVNUhmTwfDZwSL/AM92Rfl/2q67w7+xP8AdJ86HUvAcepvv3RW9xKzL93/x6nCMf5gljJSlpE/NDw1pH2i6R50wiv8AN/s139xZXjWYs9MtriZ1X5Vt4mbdX6KWXwf+FHhqFE0H4N+H7OX5lVZLJZG/3vmrpPDPwjub6H7Xc6bp9nDbory+XbwxxW6/xMzKv3aUuWOpP1iVSZ+V0fwp+KniC8CaV8OteuVk+55OlyNu/wDHa+ov2Vv+CFf/AAUc/an0yPX/AIf/AACvrLTJuftmq3C26/8Aj1fof/wSY+Cz/wDBRb9py/8AD2l+cnwt8Cyq+rXkabV1KRW/1at/dZlr97vC3hHw94K8P23hnwtpMFlY2cQjtbWBNqRr6CuiNShQjdwvITWJxWkHaPc/nY+EH/Bod+2jqzw3fxL+LvhnRIpP9bHHK0zxr/wGvp/4ff8ABoF8GbfT4/8AhYf7S+sveNFtnfS9OVl/4D5lfs1swPljH4GuR+HPi9/G8Op+I4Jt1m2qTWthhfl8uFtrNu/2m3Vnic2nTpuUYRj6L/O4U8opuXNUnKXz/wArH5zfB3/g1P8A2Dvhx4ng13xx468UeKoIJAyafcNHbLJ/10ZPmavvX4Ffsn/sw/sv6RHpHwK+Cvh7w3DCm0XFjYL5zfWVvm/8er0jc3c1ka1bzXnyIjbPvNXxONzrEyd4HuYfC0o+6WrrxVZKjGGZWEf3m3VxGv8Axshs5pIIbmNpFbair/eri/j78QrfwLobusjIkcTO23+HbXyav7V3/CG/b/GetnfbNKrQW7JuaT+LatfPVsdmGIu3I+nweWUILmlHmPtPWPjVcaP4fXWtV1FbTzpFiiVm5aT/AGf71fkV8L/iI/w8/wCCiF58Rb66jZrLxrrU0srnarlvtQP0BLfrXtmm/tbeAPj94ulv/i1qt94ZttPv1ntfMRl/4DHXy7d3/hu0/ae1zUb7Fxpa+ItUfLHIkj3T7Sf0NfvHgxUqzyHiNT/6BJ/+k1D6zJ8NTo0a6jFK8X+TLn7fX7bt54k8VXdsl5NbPay/PCu7arN93/8Aar4q8YfFabWGW8m1Jre9mf8Ail+VvlrR/bi8babq3jee5s9RZ0kddkkdxuZVX7q7v9mvmjVvGk0l0yJc5C/d3V+M08LSlC6PJWIqUpcszsPG3jrWPFXljWLyQXML+Vu+6rL/ALX96uKuPEGvWEjQxw+dubbEyv8AMrUxfEH2hvJd1DyP8rSV9mfsd/8ABHzxd+0P+z4f2tviz8aNB+GHw4S5YWviTxLbtLPfhflb7NAv3lVvl3NW0MLSStI9KWMoRjF31Pnbw74R+Iug6LZ/EL+29N0prN1uLCS4v1Z/l+b5o619H/aY1X4xeJtXPj/xlcX+q3Uu9Fkl3Ky/d2r/AHa+mr79i/8A4I1+HrBf+E2/bk+InjL7KzLOug6dDaW03+7u3Mq1xnj74I/8EmvDscWt/BaHxRDeKrfZb661xpG8z+FmVa5JQy2Xxz947q1XHxpRUIWj5nI/Cfxd4q8I+KUfw9DIf4W8lNv3q/RD9iHXtNsdcitvjxpv9sahMm+10u8bbHa7vuSN/e+X+GvyQ1T4gal8NfGe+bW5r+2Vma1uN33l3fLur6r+Ff7cXhXxVr2meMUmaz1lbCO11L7ROqpIsa/Ky15mKwtOPvRReFzD937Pn/xH7YfDWz0nwt4rstW/4QzSr/QZv3U9g8XmNbq3/LRd33q1f2k/2RfAPxH00/EP4f2dr9ssoma40m9TbBeQsv7yP5f9mvkH9kP/AIKC/Ca/8PC78R+KYb4WaM0sfn7YlVfvbmb+Kpf2af8Agof4hvv2ltV8H+Jri9vfCeu6jI2mwPLtW3t9vyrH/erLC46UI8jiRmWUyrVVVpz+z9/kfAOuX2ifCX4i3nwZ1vWI47631SZ9Gs9nl7rVpG2qv97b92um0u8t1VIf4tzMjM38VeWf8HFfh/w5Z/t+WNt8Lbq609v+EZtdRtVtfla3aSRv/iab+zn4i8bap8MtMvPHepefcqyq8yrtZl/vNX6LhZVqmDjKfU/Mak6dLGTproz2qLU5+P8ASVk8xP3qrF92ry3k1qoPksHk+V137VWuVs9W+zzboSu3Zu8tU+8v97/ZrX026uZLrfvaRG2ruX+Fv9qrlHl1O+nW+wdXp+9o2h+0szfL93+L/Zp+5JJAknnRJJuZFb7y7W/iqhpa3ir5T7Y1jbc/8Tbf/iq2Psr3G3fPvX725krlqS5Tvp1PsmNrGmw3EMyb+G/iWuRurKSzuXDmT5mPDnvntXeTWv2WN9m3Kt8ism1V3fxNXL+LorWK9C2YITc2cSblzx92v0rwYd/EXB+lT/01M9vI3J5lTv5/kzxr4i6Ib7U7uEkoHLEqDgcn71cJceHUtWebfkM33W/hr3TxV4VN7a+a0bASKHJ/2d1ef+JvCqNM3+hqkf8AeZ6/L+J6kpcQ4yP/AE9qf+ls1q/xpX7v8zgE0lGV32fOr/M3/stWbXT7mE+ds3rtXcu//wBBrebQUMizTJ86ru+X+Jakj0Gbzt/k/Lt3ba+ejU5dDhqR5veQ3SdJhuFebyNrtt2fP96rS+H/AC1WZoVmC/dZX+9WvoOhpayOk3ludu5V/u1uaf4TMcRhm01Sjfw7tu3+LdSlV5ocxtTpy/lPOrzw/wCTF5zo22RvuqvzLVJtBuZJNj/M+z7zN95a9R1Dwj8yySIzBdzfN91qzpPCaNIkiIxH3dqp93+9TjWnL3WRKjLnsonCLodyrjy+qv8AxVdXTUt2CTQ5RV27o/mbdXSyeHYYwiP8zMu3b/E1WbPw7IFWb7S25V+dmSsZSh8Uj0sNTlzcpyseizSQtCiMq7/+Wi/d3VlXWl2cbO7/ACqv8Lfe213WqaXeMuya5XYvypI38Vc7q1vctJNtmjKqq72/2qzp1OY9yjHlloYJXy408v8Ah+ZP722pGuJo5tn2WRxs3fL96mX023Z/Ei/M7L/eqH7Z5x+020i/L9xd3zba1jKR69GnGQjTf6OEKSB/vbZP4Vpk1mkK/wDLNmb50/2qYs1tNCNiZWNNqbqTzIfOyj/dXav8TNWtP4jjx1PqeXarYPY3Don8Kfe2bWrFmkdt8aDB/wCedepeJvCvmKs+xV/i+X71cdrPhSazk85Nv7z/AGa+kp4jofi+Iw0o6nPaXcTWbb4UY/Jt2/71dZouoXTRh9+7au3aqf8AfVZEemzRMu/aGZa29JsZo5UtYUx91mb7v+9RUrHNRpcsveOt8PyQxqtzM7fN8u1W+7XbaTqWJPkdn3J97+KuA0uxeGNnjfzv4krq9Nvfs0Pnb8PH99VrzqnvS5uY9nCx9n7x3ei301nt3orf32rr9C1hLhV/1aLs/i+WvLrHUoZI0S28xTvVvmetqz16Xar3Lw7N+1t3yyM38NcVSjKUz1KOI5T0tdWtpoW8lJBHu+VlTau6o7rVEWb55tiN9xl/vLXJaX4gmaN3d22fdRo/mXdUOra69vdb/tPzfwNG/wAv+7trGOHn3O6OK5o+8dBq19Gsn75+W+VG+9urIvrx5DseZif7sf3d1Y114mc5+07W/hRf4qo/29bTb5EdnKv8m37q1UqMuX3zT65SjLQ0by++aWGb7yt87fd2rWRdXkMz537Sy/eZ/wCKoNS8QW0K7/Ok+b5fLb/0Ksu8vpmb7iu2/d83y0/qvuxKlmUUeq6C0k/w43eblmspsOQP9rmvMVjdmEMKLj5VlX+7XpPhlwPhaJC5IFhOST1/jrzyx3tKPkV0/ut8tfuPjGpLIuHEl/zCQ/8ASaZ3ZpiEqVCT6xT/ACNXS1+zqET5nZvmZvm+X+Gt2xsz5becjCON8o2/duasfS3RYfM+0qkv/PNvvNWrDLCsyQu7O0L/ADf3dzV/Pcqkuc82GK5ocpsaT+8keHpJHtZ/O+626tuzCXClPJZJWT+F/lWubtbya3VU8jO5tyVehuJPJ/4+WZ1b/drmrU/3vOduH5pSlI6G1uob6PZ50Z3ffVvvLtrWiurZdttNbbH3/wAXy/w1yEN+8kkWy227f4fu/NWtZ6s65TyVfb8qbn+7XFKnKPvROyn7szrLW4hk2wj5Ek/hZvu/7VasLW0zb4PnSNNv95q4iHVkhZnM+f73ybm/75rVtdVdXR4V4Xav/AacYx57m3N/MdPH8reS8ypGu7bJInzV8n/t6ePEvPE2m+A0dcWe28umVPm3fw19G6n4kTTrW5uby+VGWJnRvK+VdtfBHxa8YXnjDxpqnieaZm+2XTfe/hVflVf92vqeHsL7TF+1l9k+T4rx31fAxpR+2clpmvFfE1xamZYkuLdk3NWD4uuHkhe2fkxqqvVbxRffYNUS8875f9ml1TVIdQt/tj/P5y/Mtfe8v2j82+E88m1AafcPDcrwrttVflpl3B/aFr5Py7WX7y1L4s06PdK6bUXf8n+1WNp946/upvl2/KlPlQSlMz7q3urHd2RmpkkjzbpFdvmrW1S3+1Wp2P8AN975awnieBz6Uy4/CEgKBlxTJJE28VMpWQh/mP8AvVHPCit/8TSlIqPmR7fMzv8AvUwllb56e33zs+7Ss26P56UTQbTWV2bpTqKctgCpGj2hNn/fVR1LJJuVS6YK0yJblzT1/ds+/dt/hq55iLjZDlf4kqlZyJ5Owp81WtrrIQj/AHan4jIim+Vm/wDHaZJIm3Y7sp/u0sjOzb6Yzbmx/F/eqTQfHJwqJ/6DToIYWVqjSR4+j/dqTzn8obP+BVfoZy+KxHcbVjLtxWezbqs3zlhhvl/2aqnk5NL4jWmPgO2ZCP71dhb28clur7MfLXGhtrq5/hrs9JndrON0m3bk+61UTWKl1b7lyn/fVZslt97ZBuNdFcwusfmPDtrLuFfcdibaDH4fiPXP+CdUZj/aw0BXAP8Ao19tI/69Za7H/gq+qn40eHGIJx4YHA/6+Zq5X/gnnE6/tX+H3f8A59L7/wBJZa7L/gqmm/40eHe3/FMD5v8At4mr9lwH/Ji8X/2FL8qRg/8AfI+n+Zy37H+jvJoesar9mXYssaRSN97+9X0P4J03R7q087yfPuPvJ/D5deYfsg+A7aT4Nw6lczTRNqGqSbWjib94qr/er2XS9JttHX7NDYSSt5W7dJ/6Dur8FxFGUqtzgrc0q7scb401a50PxMr2G2B4/nRlb7tRftBfHzxz8btH0rwl4k1DZomiwK0Wkx/6uaZfvTSf3mqb4jaK8MKarqVmyNcM33lrz/Wr5I0SGzTG77y1hToe7bmLpxieeeMYbOC1mv0tlTy03LtTbXhV3I01y8rvuLOx3ete0/FRrm18Pzb5vmk++u+vFXh8v79ejh48sD1MPyqmMeNVUU5Y3kpT3/2fWpbeGRmDhcf+zVudHMyHyXLKnZq9E+DfwdvPHN8LmaBvs0LfOzL97/ZrO+Gvw51Lx54mttEs7OZ1kl3SyRr8qx/xNX2T8Jfgrf8AiDULb4dfDfT22RyxxXEirubb/eXb95qiXPKPLE4sZiZUz1v4W6Bb6T+yxH4d0dBGseg3sUKp0ViZun4mvk6TQX+2fYIUwscux5N25t275q+5fE3w/f4O/DrUPAcSSCTSdFlGJvvFzCZDn8WNfLnwt+GviHxprS2em6bIqM677pvlVf8Aar9m8V4t5Rw9f/oFj/6TTPKjU0cj6w+G/wAYNX+E/wDwSK8T/CLRHkTUfiF4th0G3bzV3R2O3zLuRf8AZ2qq/wDAq8Ek0m2sdL8iFFhWNFSLy0/hWvUvjF/YOg6L4Y+Hugozw+HdNk+0XDJu+0XUn3pP/Za80uI7zXJPJeFj8/3fu7mr8WjH20vdL9p0exz39izX14+yNnaR1Wu68N/DvTfDGi/8JJ4kh8lm/wBV5n97+81dJ8O/hfZ2Ni/iLW91vEvy26/7tcR8dviw8zP4b0p1XduX5f8Ax6lWxEMLS5Y/EKXK/Q8x+N3ii28a3klhDbedbR/OjbP/AEGvGLvUtS+H/ihP7Bm/eKm9f92vUraz8y6+0/Nll27ttcD8RNFe28QLfww7oZk2oypXku2I/ivmOqjWnT96J6B4I/4KKXvw68KDwn8QtE1K/aDdPZrZ3Hlp523au5q8p+Lv7Z3xU/aI+yaP4huY7LSrX5V0+z3L9ob+9K38Vcl8QNHS+0+Qwwtvj+ZN38X+7XncUz2dxt3421tgcqyylL2lOFpHtRx2Kr0uVyPT1ukn8v5GP+0qfLWdd2aNC29PmrH0PXJtvzu2G+XdW3HcJMrQ9vvbq9nmOSXPGZhXSPbr5LorfxJUc2yFU2btjf3a1dQhQR7HmVqzZInm/c+XsZXpy5hx194lhkmmj+yzJ8jfLWXNvs7h4XnUlW27v9mrLB47hfOf5VenapbvfW/nBF8yH/x6iWxcSx4XZFut7oq/PX0N4XkL+C7a6+0svmLuaNk+61fOHhu4f7ZG+z5m+XbX0Bodx5fgdZnfd5fzRR0RlymVSJpXkL6hYyo7r+7Xd9371eOfHiTy9Jtrbfs/f7mhr0iPxM62YT+KRN25a8Z+M2sXmoalFHN9xXZkpSJw8feOSjvJmjKO9RvJ82xxTKOCKfMjq5R63DxR7A9Wobh5I/nm2p/s1QTp+NPaT+D+7THyo1G1J/uJ8sO37qUsNw8u1Efhv4f4qzF+Y/J96r1jNNHJ+5g8x2/u/wB6szLlOkhj03TbUXl47E/wr/E1amk+Orm482HQ9HVVX5WauRnjaE79avMOv/LFW3NUkPirWBYvpWnzfZrVv9bHH/FV/CHxHb3V9pWiyJf+JL3zb+SLclrCu7yf96vt74LaiLz9hldSMW0N4W1Ntuc9Dcfma/OW11D5lcI3/oVfoZ8B3B/4J9I4XA/4RHVsAf71zX654N/8jjHf9g1T/wBKgfYcEpLGYhL/AJ9S/NHxXpusPdXzoj8yfc21ryLpmn7H1jW22N96GFNzLXAJqdyrDyXk/efKixpuauw0JvCvguGLW/HkK395J/qNB3/Krf3pm/8AZa/IvhPi+U6nwrpOuasq3Oiab9mtPNVP7S1K42r/ALy13eoX3wl+HLQ2Gq+Kpta1hdz3Cr+7trdf7qr/AMtK8M8RfFTxb4w1KOa/vMW1u6/YrGH5YrdV+6qrXOX2ralcXz3N5eM8kjbmap5plfF8R9J2v7QHgy3meG2hj+zq25o1+8zVUvP2sNH0Ni+m+G7eVm/ik+avnGK6mjjLo/zM1Ps7O81OZHSGRi38WynyzlpJhywPbNe/bE8Z3CyW2lTtbJJ91Y/l/wCA1xV98dPiLr0vkvqsy7vv7X/8dp3gn4E+PPGd5HZ6bokxaTbt+Wvqr9nH/gl74n8TTRX/AIw8vTrdZVaXzvm3L/Ftq/Y296RjKtTpy92J8v8AhvRfiX8QrxNKsIby7Mj42x7m3bq+n/2e/wDglb8afikyX+vaDdafbLt837VE25q+8/hD+zT8Af2S/Bdz4217+y7e2s7ffLeahtjkb5vvLur5o/bE/wCCziWsN54D/Zmf7HFNuin1Bn8zzP8Aajp81KHwmcVUre9ex6Rpf7Gv7LX7Mc1s/wATtY0+81WS33Raasqs/wB77rN/DXfah4g0258OjRPDFtZ6PZXHzxR6XF96Nl+6zfxV+Xvw5+IniTxZ8QLn4heOdZuL+9m+Z5rqVpK99b9pzXVsYLC8muES1i2xSK3y1HPVI9j7x9dQ+BfhFpOi/wBveKvFUKf3odu6Vv8AgVcr4m/aM+Ang2NU8Nw3V3NuVNzbf++v92vib4tftRa3qEkltYXmfk2xSM3/ALLXEWnjrX/Fmool5eMfM+bc396sv38pGvs48p90t+098H9SuHFzY3lt95Uh/d/vP4vlqfxB+0l+zfqGivYJo91EFiXz1aJd3/Af73zV8cW+nvfN/wAtMbfvK1al5pdh4d0s3Mzx/c2p5jfxVpH2sY3bJ5YylyntV5+014DXW7iz8JeHry20tdyxSXnyu3/AaJP2lvDcMcKeH9NkaeFtss00W7/K182trH9sXvlaVN/s7v71dd4R+G/iTxBNDbfvNjN8/wAv3t3+1R7OUve5glyU9j27/hfmpapH9ms4VQru3Rxr/rNzVt+E9Q8beJmltrCFlmkXduk3MsfzVH8N/wBnn+zbUfbLDY6qru275lWvfvAeh+G9Ls/s1hFbonlR/wCkN97dWkacacfiMZVPabI5X4ffBvWL6N9b1t/KVZVV5JPmaT+9trwD/gqB+1Y+gIv7HvwguFgn1JY5/Ft9Zj97a2/8Nv8A7zfeavo79qf9pLQfgD8HNT+J1/rEYuLVNul2MMH/AB9XDfLHGv8A7NX5l/AHQ9Y+Knxqi8VeObuafUNY1uO41Sb/AGpJPu/7q7ttRDkky6dH2ceaR/TV/wAG737KWnfsyf8ABO7w5evp3k6n4ukbVL12TDNH92Ef98/N+NfeJJOMV5t+ypYWHhv4BeEvDFmipFp+g28Cqv8AsxrXo5mRByRV1+f2judWFlTVBWMD4r+JIvBfww8Q+LWmMX9n6NcXCyL/AAssbFf/AB7Fct+zZpLeHv2fvClrdERzyaJHdXW7/npN+8Zv++mrj/8AgpR8QJfAX7CfxT8UaXtknsfB9xIqbu33f8a/L/xj+3n+3H8WvAOg+GPBetR6FpS6Xaosml3X79Y/JVVX/gVeHnFSVPDKNviPUy+nTxdVx57WP198X/HD4QfD6DzvGXxD0qw/uCa9VS1fI/7Tf/BeP9kz4La23gnwPb33ivVt7JIthF+4hZf7zV+bt98HfGHiC4+3/GP4tahNbx7mltbi8Zm8yuT03xl+yj8LZLzVZvAd14n1j7R8+5WjRf73/Aq+Tti37spRXotfvPo8PgcvpyvJSl+CPp/4lf8ABSP4oftD3R1O+0GLR9Ml3fZbGA/M392uE8VfGqaHw69tbaIuoXUkqqkc33lb+9/31Xzf46/a+8c65rESeCfhXZ6RayOsEUP3njb7qt/3zXo/g3xlf+GPCt9q/iqa1h1VYIWt45vn2xt/FURw0YbHqwxMKn7uJYsfG3jDxBrmoQ+LbaGGJUjfzpk2xw/7rV5n8evHmofD7w14j8c+FdRjuJbWSRra6kbCzI8uwsT/ALSsefeuW1L4u63481zxP4J0bW7i6abbO8ccvzbW+XatdL4p+EGq/ETwNcfBy0jMd3cWi2uy5l2FWiwSGY9PuHOa/ePBqEHkvEdtnhJf+k1D6DLZtUa1t1F/qfJnwr8A/EL9sP44aH8H/BOlySaz4q1dbPTreP8A1bSN95pG/hVV+Zm/2a+5vjb/AMENf2VP2ara20P45ftT+M9f1w26/wBo2PgTwzC9vYt/Eu5m3SbW+XdXhf7HPgv4i/sD/tYWXxc8S3Nm9vpuh6klncWsqyNa3EkLLE23+Jqwfjd+3V4w+KHjy38eX/iS8iuVsI4vmuNqqy/e/wB7c33q/F6mIlh4eypRPHwuEo1H9YxMv+3T17wb/wAEj/8Agnf8UL6Sztf+CkHiDRrlv+XDWvBsKyR7v4fvfer374//ALbnwr8A+Ebj9kPwlqMepeHvhjpen6To1v8AZVjguI1j/eTeX/eZvmr80r74669qnio6xDeSRSq6sm19qs1c78Xviff+LPFUnip9v2+6iWK9k3f6zavy7q4KtXGYmPJPb+tz0o1snw0ZTo/F/e/Q7H9qTxH8Or7W5db8H6Ja6bNMzNLHZptX5v4dv3a8cj8TX8iukMzJ8m1dr/M1Mg0/VdfmSGYKjbt3zLXT+HfgD8SPFFu15pRhwv8Ae+X+KuqjBOHLPc+YxWZYipVk18Jm6B4F8beLozczQTLZ7lTzpPurXs3wh/ZHspIf7e1LWFuIo4v3sK/drj9N+CvxR0XUIvC+rePIdN8xt/ls25W/utX0/wDsG/sTyftIeMtY+Hmv/tE6xpt5ZwbUutLVfL8xl+Xd/u1x4xVoJy5oqJlhac8RU9xSuY/xSm8GfD34f2WjeErOx8P20MWy6mbdumb+9u/3q1P2Rf2rIdN8bWXizxpq9nFo/huLeuqK27azf7P+1X2f8M/2SfgJ+wva6d4c+KOveGfG+p61p10mt6l8QLJXgtY925bhVZv3bKqtX5a/tofGD4Y/Gz9qrxn4n+COh6bp3g9bpdO0aHTbXyILiOH5WuFj/wBpt22uLJcLHNcTOlf4ftG+PzbGZTaUv/ATf/a2+Pk37c37X2u/Hia28q0uorew0aHbtZrO3+VWb/aZt1el+F7pNN023htodsUcGxFj/h214Z8GRZ2t41zPtaaNP3W5Pu163oN9H5azI6rt+Xy1/vV+mU6P1eEYI+Kp4iWKryqz3kegaTvureL9+qps2zqsX3v+BV1mlTPblEtoWVPupt+6tcB4f1qOJ0R7lYV+95ddto+qblE6bWaRP3rLL8u2s6kOvMehTrHbaTC7XG93Zt3313/L/vVtwt9nsxv+9t+Td/drldL1hJrcpM+14/7r/wDoVXjrXmb0e2aJF27FZ9yyVwVI856dHEc0NDQuLn9ym+2Vzt/esv8Ae/hWuV8YrslhGQSWkJwMckittdSRWdHTG751jVvl/wB6sLxbc/aDbndkgOchMDGRX6P4MXj4l4NPtV/9NTPpuHpKWYQt5/kyafRjf6LAix5Vo0Z2rldc8PpdJsSFU2/Kzb91dtpk62+lQlZWwYhvRl9v4ar6hp7pdfZoZlf5V8plTb/wGvyriv8A5KTGX/5+1P8A0tnRzXqzXm/zPNrvwfZyTM6bWb/2X+9TY9Bfb50KSEb9v3PvV6ND4bRrhv3Kt5m3fI38NOfw7PHMr20LLt/2PvV8pUxHNLlbOiNE5PRfC9sk3nQo0x/jVovlWuls9F+1W6O8Kqn3dzfK1bGj+G9rK8tzJtX5W+fctdfpvh+GS3+zfZY32vuRpP4fl+7WMq0Y7nZGjLocC3gndCdnlhG+bzPvL/wGs/UPClztaaaFkdf4lT5a9X/seHyV8m2V/LT/AFa/LVW48NpNH5m+N42f5/LesJ4qXwhUwsYnjV54XdWZEs4Wf/llI3/sq1QuNJmWPzvs67W/8er1XVPDtt9oYpZrhV2vIvzbmrkfEelvbsyI/wAi/fatadbmlymtOjKMOaRw99Z+ZI0KJH+7+aVWT5V/+KrjtatUDTIkKqG3NL5cW2u/1eZ41+R9kSv8y7PmauM8SfaW8wJwJP4mT7rV20ZHVh5csveOE1aO2hw/ysyttT5m+b/gNZrRusz5RdjfLub+GtrWEmjtGd4t7N8q7flWueW4eNvLwrfO21lfctdq5pR909ijJe6TfIsbQu6j/a27dtSwzPNGJlhXZGnzSb/utVSTzpl/ffKi/K+3+KrdvvaFNiR/e/hq483UjFRjKMjqdY8PoY5JHhkT/gG7c392uR1jwrDJsT5d7fLtr1TWNNjbfbJcyIvm7lVW3LXOaxos0bMnk+YkPzfu/wD2aumjWlL4j8wxGHhc8xuPDsMjB0+YK+2Xcn92ren28McK7NoZn+Rm+8y10l7p81xJ8+52jTbL+621Xt7OGGYwvbLjflW/irT6x7vxHH9XlGr7pDZ2Mit+5h3hvm21pWtjNuXfFkN9/a/yrU6x+TZhETa0fzfL/Fu/vVJBNMLeI/Zmf+H7lKNTmjZGkafL8RNHshbyYdv7xv7v3aFnhhYI83y7927+7Wa3nRy+TDOx+f72/wC7RcXX753R1dP49v3du2tKcb+8pGcqkTej1Z49vkps/wBrzflZaoalrlyoZ5pmZf4d1YP9oTN8kyM8rIq/K/y/7NF5ceSwT7T935fu1tGMY6GUqkuX4i+2sT3jM81z8v8ACu/duapI9SeRXuXuVTy/uq38Vc62pJHMfLdaZ/aiKrwzeW8W1XSOtJU+aFjm9tOMjdk1J1X7U94vzN+6jk2/N/s1BHMl1I9zNMzOv3tr1lzXiXEiTXK703fuvl3bWqeG6SOQvvVU+Vfl/hrKpyqBEcRKUdT3Pwgqf8KlRYxgfYJwMfV685t1dUXUk2v8/wC6jZdqrXongxv+LQo/X/QLg8fV68xhu4ZP3szsy71X92tfsHjK5/2Jw64/9Akf/SaZ9fnDTw+Fv/IvyR0+nybpo7bZmVvl2yf+hbquyXSLN5LzLu37lrmLfUn+1jyZmlbYy/N8q1fm1D5v3Ykyv8O3dX87Vuf29+XQ5KHJKJuw6m7r53lLGy/L8z1I1x9oVIUuVRtn73zH+ZlrmZNQ8yYJ9+Jm+ba+1lq39rmZv3x3rH/49WXLKUeY9WnW5fdR09vq1tDDCnzSv/e/vf7VWrHXIYX37d+75dv97/armY9Q2xxJ9xv4G3/Kv96pLfWkh/c3LeUG2sjf7X/Aay9jPp8J2RqX6nUtrky/JZwxqv3vMVvmWrWna08bF0mUhovmZvmrkI7rzJv3j7wy/My/KtWo9T8m4CIn8H8NbqnzbE88lIk+Nnjx9B8A3lzDeNHLJB5SNH8zfNXxtrV08MjfOzp975vvV7J+0J40m1DWk8PQv5dvbxbpdv3v96vGdWZJGbuq7v8AeZf9qvvcjw/1fC80vtH5pxLjPreO5Y7ROV8WxfaLUps37l+7XL6PrG1WsJpGUfd2/wB2un15d2770q+Vt3fd21wfiCOa3vGuIX5/javaj7x4FP3SzrUz3jGF+K5a8t3gm+TcwrdtbxNThVPmR1/i/vVT1axm8tkR927/AL6olErmM61vk8wQu+f9mpdQt0ulD2yKrf7NZMy3MM3z8H+7UllfeSzbn6/3qfwj5SOSOW3kKFv96nrsmj+T5WWrkkCXy70dfuVmyRvby7G6rU/EUPlhEZ2VE/3jU8bJPD84+ZagZXR/n+Wq5Rx3Eoo8zeaKocRkf3x9anm56dqiC7WWpJt7S+1TyhIu2EZjkTZt+b79XJF2qdnX/wAdas+zDsv3K0RvZN833f7tTAkgmkfazyJ81VfM+b5Eqe+Xg/O2P7rPUKqnlr8n/fVP+8T8Q9XT7mz7tO8xF3b3/wBxagj2LN9/+CpJmhxvzupBIq3Tb2AFRs3lr70s33/wqP761US4/COrr/C8if2fEjov3PvNXHq2eDXV+E2RtPEaP8/+1T5kFQ05pvMZvn+VvlqjNDvZsfKPu1dmVI5/nG7bVSdvN27H3bd1RL3dDH3JHrv/AAT4iCftW6G+/cTbXv8A6SyV2H/BU0MfjR4dIUn/AIpkdP8Ar4mrkv8Agn2f+MqtBHX/AEW95/7dZK7f/gp1bC9+Nnhy1Emxn8ORorf711KK/asu/wCTGYv/ALCl+VI5J/72vQ9U/Z58Pw6L8C/D1mEuE8y189o5P9quj1DULbTVed03Iy/xPu3f7tPi/wCJT4V07R7OZW+y6Xb2/lr8vzLGu6uL8dahc6fp+HdmmmfZuX/lnX4HUlOUpNHn+9KfMcx8QPFd5rmoNDCGeGH5UaR//Za5ePSUt7eS/vNv+wsj/erUu5kt1k+0vtdm/u0uk+Bdc8XTp+5mwybkX+FqujRn8ZtGpyv3jxD42NH5aWcybGml3bV/hWvL9Ut0VTs+Xc9eiftBW/2f4mT6Jbah5v2GCOJ9v3Vk2/NXAXVjeTR7ztdl/hWu2nGfKenT+Ey1QtWx4d0G81u+hs7CGR5Zm2RL/eb+6tVrHS5vMCOrKzfd+Svt/wD4Jw/sl3Orf8Xv8VWCtBb3HlaNayRbt0n/AD2rojR5jPFYj2MTW/ZV/ZD8SaTo9ho9hYSS67qksf2hV+bbH/zz/wDiq/WP4K/sP/D39kP4Hv4w8YQxjV7iJn3Kv+rbb5jKrf7Ndl/wTz/YRtvBOk/8Lo+LVh9nubhGewhvItrRx7fl/wC+q8p/4KVftUal8SvFMPwz8Hzf8S2ziki3WbKqxt91v++q0rcuGhdfEfPSqSxHvTPm74n67a+Ptf1jWTGTBqDSDbI2SU27OT9BXHQ2KaTp/wDZulafDbwtAqu0aqu7b/tVsW9tFaaZ9nDllWM5Zuc9c1xPjLXftFydN0e/k/eJudmT5Vr9Z8WIueS8Pt/9AsP/AEmB1RheJzMnnaheN50Nw8sm6N2+98u6u9+G3wuh+XWNbTZEvywLI3zf981B8P8Awe8khurrcWjbbtb5f++f7y1r/ETxtZ+D9JbyZleZovkWP7y1+J1sRSwVK4SOf/aS+IVh4b0u30HSrlo5mfZKsdfNl5Nc6tNJ5yMXb52krqfHHiy68TapLqV5czM0kv3W+7HXMyR+ZJvR2RN25o1r52piPrEuaQ+WUoxGTf8AEo0z7a/yiRNqN/C396vO/GGvQ+c0Oxs/wKr1v/ELxgkcaaVpiSOzfKq7vlWvPtaZAzO+7zm+bbvq6Me5tD4TJuLf7RM6O+5pN3ys1cB4/wBBfStU81E+ST+Ff4Wr0SLyvtnnTblH8FYPim6ttShmtnTO5Pkb+7XqYeUoSO+jLl944Kxu3t22/wB2t2x1TzF8nr/Fu31zdxH9mlZD/C9W7G4/g3/NXqfYOw6aW6hk3R/eb+JVqBpHbbvHP3ty1V+1P1R03qn3tlWrX5UV3dWf/drQzj7pG0b3HyOjD+41EPnN987uzf7tWmj+X7/Kv/DTZLPbcGbewb/ZqdfhHKXUr6bZvY6wsKchvmir2/R7zyPh6jv823au7Z/s15NNpPnWcV+isz2/y7l+9tr0m1kh/wCFftHMinbKuxv7rU/fIkY11fT28Oyb7uyvKviBM82ulN/3Vr0PVL5P3rzPuG/5d38NeXeILh7rWbiV33fPt3VBpRKVFFIrbqr4jYFXApaKKoCRY0Xl3x/srU6ahc7fs9nujVuy1X8z5XcnJ/2qWO6mVfkpfELlRKLG8lk3ujf9dGqRlhhUpczMf9mOq8l5cyffmZh/dqJiWbdml/dFyls6htXybZML/F/tV+iX7PTNN/wTojPUnwdq+P8Avq5r85beF7iQIn/Amr9HP2f0WD/gnWiK/C+DtX+b/gVzX7B4O/8AI3x3/YNU/wDSoH2HBkUsZXt/z6l+cT4GtZodFhZ4Zle62/NJ/wA8/wDdrNlvXuJDNNNudvmdmf71QtM7D7/C0kbJJ8ju3zfcr8dmfFe/9o3NJ+aze8mh2hvlWo7WxudSvFhh+Z5H+7VqSFI9Hhtkmw/8a16Z+zn4M0S68SR3/iRI0gh+d2kfau2nGIjY+Af7F/xF+L18JtN8N3D26/62bym2r/tNX0Ev7Mv7OXwJhhtvH/xCsbrUlVfNsbfayxt/dZq439oD9ve/8I+B3+HXwfmj0mO43JLJYuys0O35VavkiTxtr2tak+pXmpSPPI/zzMzNuolUlKPukSp825+kHw/+OnwD+HccM2m2sN1cNLul27dqrWp4u/4Kg+Hvh3o7zeGNNjiuG3M9rIisq/3a/OJvG1xptiYYbmRX+8+2uY1nX7y/kV5ppC/97f8Aw1lKM6m8hxpwUT239p79ub4tftEapN/wlXjC+ubbzWX7PJLtRV/hXateP6WlzfXS/LisiGHzm2Abmauo8N6b5MyNMjKG+WtIxjEfuRPTvA7WtnpG/ftK/f8Al+9TvEHiy/WMw280ixfd3bvl21V0Ev5K20L79v3Nv8NXrHwjqWs3z2yQsxk+9troMvf5zC03RNS1S63+W0hZ/k3fNXrXgn4bPb26zXW0Ns3bv7tdR8Kf2f7mGz/t7VbPEUarsZnrttc0fStH02TY8YMabVXZU80Sebm+E4q3hs9JhFy8O5I13M275mb/AGa8v8eeKL/xVrn2LTd3l72/d10fxK8XfaJ/sdttHzbdsbVT+H+g6Da3D63r1zHiT5kj+981R7TmJjRnH3j0P9nv4Pw+IFhl1VIbdYf3sv2ivprwfpfgDwjpkUO+H7Zubzd3/jtfJN58eLDw+zw2dyscX3d3+z/drB1T9pPWLqRUfWG8hX3bt+1qy9p9k19jzR94/QlfGHhtpt6bYopPlRfN+78v3v8AdrA8UfETStOZ7mw1ViYWVkVZflX5fvV+f2oftVa3bvvtNYmYxp/z1rDvP2ovG2oRyo+pSbG3ebt+61KPNLRkxo8p1n7aXxO1P4v/ABWtPB8OqtPp+h/v5Y1ZvLa4k/8AiVrqP2P7Gz034jaVf+R80OpW+5WXd8u75mrwrwGz65NLqty6vNNO0r7q91+CrfYdSS8s/leGWNk2tt3Mtc1Sp7OrE58R/Kf0x/sl/tFWGp/DvSLO9uWdo7VV87d95dtez33xt8N2dqlzM+5W4VVb5mr8nP2M/jxef8Ivav8AbNm2Jf3ay/dWvpi1+Jl5qkYs/t7NF5XySL8u6vbpyhUhdxPM5p0/d5juf+CiPxPs/ij+xx8XPBPh6xk2XHgHUAkjJ96ZY933v+A1+SnwB+OtnN8G9A1u5v4UH9g26SqrbpGZV2/NX6Z3lm/irw/rfhjUrzzINU0a6snVn3LJ50LLu2/8Cr8Avhv8QvEPhXQ9S+F2pTNDd+GdevNLuo1+XcsczKtfO8Rx5sMpRXwn0XDtaNKrI+vfiB8etKufOhhud+75/Mb73+9Xifi74kW2rTzXNntiaSX/AIE1ebar4qvL682fatm5dyfPXO33iS4t7r7T/aSokKbdqpur4WVacj7KnjIy0Pa/C98+rXiarretxxtHu+98q7a4342fHy51LxE9homqsbaO3WBfm+9XmmqfELXplWztr+OFG+//AHq53Ut8zM9y7Ft3ztv+9WtOVWUbSIljIxj7h75+wL4ihs/jxeXOsfZ2ims1l86T7u5W+7Xvnxh8ZjTtL13x1AuAZpLlVRuAHkzjPp835V+f+l+INe8N6out6DfyWlzD8rtvb94v91q+wfiXrMkn7LTa5ekO82g2UkpPcuYs/qa/e/ByMlk3Eb6fVJ/+k1D6XhnG06mFxClvGN36WZ4j4y+LHifxZJse/mKKvyeZL/D/AHa5+38B+Cdd8C6xqupaxdQ67avHLpFvb7fLkX/losm7/gNYl14ks5I9iTbFX7yr95ql0W6triORJpv9Z8yLH/8AFV+IfBq5HL7alWlrK5w2pXlzY3DGF/49u1vvbqfpcmpahJ5E1nvf73y/xNXT+NPh/o6TLeaPqXnSsm+eFv4WrL0fXE8O3EdzNb7drr95K6ZKlOHu+8ePUUva8sixcW+uaTb/AGx9BvNv/PRbdm2/8BroPDf7Qn/CO2/9lfb5Eb+7cIy17T+zz8fvDf8Ab1tD4h0e3lRn8po5ol/eLXvWvSfsZ6feRXfjz4XaTqVtJ8zqsSxNG38O1lrzY1MJU9ypeMkbU8PWTvB80T458NzeLfjlrlrB4EeTU9RkfZFb2aM7f981+mH/AATC/wCCdH/BQH4RTXHxG1n4J6e9tqEvm2q3mvQwSs395v8AZr2//gln8Uv2Z/Dnjy3+Hvw7+Hnh3RzfJJcNeWtnD5/lqv8AFI3zfer7rn+Knhu1uv7Nt4Y2j3fLJCqqq1yYh4Tlt0PbwuGxeElzw3Pxn/4OCfhD+0l8HPAXgnx18XfFuiyv48164sL7R9DRvKsYYYd0cPm/xN/6FX5h6LCkLGCBNkW9dsa/w1+qP/Bzr+2J4P8AiX4q+H37HPhKaO4ufCt7J4h8UNCys1vI0flxQt/tMvzV+VOn3kDats+XEn8Sr/47X2mQYOhhMDH2ceW+vqfnud1KtTMJqcuY9G+HupTabDLB5ylmfdub+7XceHfFybfk3LtT7zN8rNXk2m6hJaqyIm1WT7y/w1f8M+MMRpveQur/AHl/hWvaqfAedR933T6H0XxVDuSZ3Xatvt+ZN3/Aq63T/Eb2scU32lVfZu2qvyr/AHa8F0LxIt1dIiTfMu35Weu20fxnN5LJ57bl+V91c0eY64ylGR7PY+IIV3+VuDSLulZW2qzVqw61Db27v9tkzH/qoZJdy15N4b8VI9uYZvk/6afwtXR2+sp8n77O5FrGUftHXh6x31hrlzbzF3m+6v8AF97c1Lql3HdJEVO1lB3Rf3Olcxa63N9+Z13t97/ZrT0+6a7DydQDgMepxkV+i+Dcb+I+Db7Vf/TUz67hef8AwqQj6/kzo7K7lNpG8gZhEgXy1bkr7VdjmtpE+0+dIrsn8XzN/vVzNpq9ukjQRSEGP75WTGW/u1a03XEYzJDPHu3/AHWf+L/er8p4tt/b+M5/+ftT/wBLZ0KpKOKm13f5nV2dun2MpDN975v96r0drNHh/wDWN8qvCzfKtY1jrCWqql593Z87L83zVr2uoJ+5SEs4m+avh61OXNzcp7OHrcxu6bY2s376G2U7v96tnT7e5jj+RF+ZmbdJ/C3+1XPQ6sm1PJjYN/d3/LWhDrx+0JbQzKGZWZ7ff/7NXnSlKU+WR3+05jaaJ1mFzCn8Py/3Vaqt8u3dMgUbk/u7arTa4kLMiOpdvuqvzbaz9S162tY2mubloyz7WaR/++ajl5dipVOb3WQaps+xzXmyRPMXY6r/ABVx+uSPNC1t/AsStWvrNxc3CunnSOPKV3Xzf4q5m+1J03ec+G/5ZMr/APoVdMYy925cZfZOX8TbJJlhxIxb+Jv71cV4ksZmhkeaZv3fzO1d9qUcxm3p87/wf/FVxHiaSF43h8n5t3zfN96vVo/vdjGM+WZ534hZ5N+x2Td8yL96sKS3mhDwj+FN27Z8u6up1uBJbpdiNCqpt/vbqx7uOOP93Cm0/wAfl1304np08Ry6lGO3ePdPN5hP+1/FWhp+n+cf9Svy/wB1vu/7VNWR4YQ6WzKn+18ytWjpMfltD8+yKZPnbb8kn/Aq09/4i62JjytHockKQ4hSdZWb/lo1ZGoQySM9s+5m27vM2V0l1YuJGj+4n3t1ZOo2ryW7vchjt+5tb7tT7sZe6fE1PeOYuNNe4VXebLN/yzX5vlqhFp80amYOreZ/d+8tbC21wrIlzNMsW/5ZNvzMtFro8LXUPk2EmJEYsu/5l/3lrblhGXvHPKPu3iUrOzmuI/kRdyv+93fNVn+zXkhim3r/ALS/drasdBe3jldH3MzfN5abVWra+H4bqY74Y/l+8zS+Wq1nzRfwmdSjKJyVxpaRrJ+5be3/AC33/LurBvNPeGGP5M7VbYv8LV3+oaDDGfOSGRiyN/qW+Vq5TVLN41be7A/M3zP81dtPljDQ4KkeX4jkbyZ4WXztqLu2/L/DtqlqWouq/uXz/Cm5v4as6sba3aWaZJG3J/vVzepalNCzJsUt/Bu+7XVTjKUos8mtU5fdLv25E2uk3K/Nu3/eok1KFwdgZpNv3lrnm1rbv2JtT7u3ZV+3uplm2O+dybt23+H+7XdKny+8c/NKRr2+oOtvseFtq/6pVqza3UNqXlf5U+81ZtrJIrJNH5ixs33f7rVfh03z5Bvl3N83yt93dXHUjCUrMr35aH0F4GlEnwSjlkPH9mXPPTjMnP5V5at08d0mzd5W/cjf3vlr1PwOxPwURmiVf+JbcfIg4AzJxXmNrY7k+R921NyNH81frHjRLlyPh1f9Qkf/AEmmfbZvrQwn/Xtfkixbt5cm/equ0X3W+7UqxzPh3Zf+A/Nupmnw+ZCN6b9z/d21Z2+ZtdIliX7u1W+9X8+xnJx9w4I+7GJDJbzQ22+F1R/4f/iqns5kb7k3yqn3du1d1RyQozPjbH5e5WVm+anx2rw26PBM2P4Vk+as51JSjy9jrp1JR90kkmufL875W/iT593/AHzUtrqDzNEnkso+/tb7q/8AAapN/o/3DIrfd+X7tSWm/ck3nM6r8z7qco80IxO2NaMYaG3askkDzPud9+5VX+FqmupnsrV7l7xVWOJnfc33WrOW8RG+R2V2+/8AN8tYXxW8SRaTov8AZUMqr5yN/tV34XDe2nGETnxmMjQw8ps8j+I2uQ6xr1/qtzC37xPvN/FXm99qaW8jvHMw/wCBV0HiyZ2V4U+cK7bJGevPNSu3eb55l3f3q+9pU4xpcsT8vqT9pVlN7li8vkkZvvb/AOJW+7WFqMCXXm/w/wC1/darElxHLJsR9pjXc3z0tts8tt7/ADM/z1vymXunJ3Vvc2N9+5fipY9aQt5Nzyf9muh1TSIREZvJ5b/vmuHvrhLe+l2HaVb+5T+2OPvF3UrOzumEyQ/O393+KsS6tZIpG/dbQtalnqyN/rguV/8AHasTWcN4vyP96l8RfwmBBczW3yDpVmZDfL52V/8AZqs32iuql0f7v3/krMVnhk+R6kv4gIeFsZpXm81drJ83rVvEOoxAqwEu3G31qpPC8LlHGMUAMf7xprLnkU5m3UgbdzQWLHhm4NKykmmKu2lrQCzZt83rWhbs6/I7sQyVm25KkfdrRRt1vn2qYmMiO5kf+4p/2qgLSN8vZf4qmkjRW+/j/Z2VUkYLnY+P4fmqZS5vdFykjNs+5Iuf9qmtJGFZGT7v3KZCybmR0zQ8if8A2NPlgWQyfN8+ykoop/ZKiFdR4LXzNPZEG5t9csxwOK6TwT++tXtv9ujmFU+E3bpfLj/cyL81ULqHZ8+f4P4av6mqW8ImTd9/bt2VQmk8tc9C38NHMYSPX/8AgnuGX9qjRAX58i93L7/ZZK9Z/bp0BfEf7VHgXS5Y1McunW6yMy5wBdTH+leU/wDBPuNP+GqNCl3Ek2t71/69pK9+/aY0yLVf2t/CaSSOBb+GfPdU/i2zy7f/AB6v2jAP/jRmL/7Cl+VI4qq/2len+Z2OqalbRyXDp5ezd8it95dtcJ46l/tC8FrNMrMvz7pH+7W3c/6l3MPz/wC1L95mri4rXUvGHiZLdJt0K/K3l/Nur8Gp0+afKcXtJRlzGl8Pvhjc+ONcie23SwtLsVdjNu/2v92vuf8AZ1/YMhm+HWreNvFX7nSdJ0i6v7242fLHHHG0jLu/u7VrL/4J+/sn3/jnXLCyTTbrbcSr5q7dvlx19vf8FZJdB/ZK/wCCRfxY17w9M1vcXHhePSIm+6/nXTeSu3/gLNXv08PGjhbnLF/WMZFH8wvinXE8VeKdV8QRzM4vtUuJ4mZv+WbSNt/8d21nxx3KyKidP49taGjaO5tIS+07YlH/AAGrnkwwzDemAzbdypXGfSc1jpPgd8Kdb+KXjTTvA2iWEj3mrXUdrZqq/ekkbatf0rfsFf8ABN/RtL0nQbnxVoNrFpvhfSbe3+WD91cXCr+8kVf96vzA/wCDaT9l3RPj1+3NpureJ7BrjTPCekzavIpT920i/LGrf8Cav6CvjJ4ts/Cfha40Dw3bJbwKnziH5fM/2VrsoyjGJ4eOqTrVf7p88ft0fHt/CPhW88JeFbxbG12bHaNdu5VXbtWvy48c6ii6lePM7YkuN67n3N81fW37ZHizUtSkENtdSPEr73Vl/vfer498YWc17q8t5bRrsb7zL/FXNWj7SV5GMfgM6eV5tFmkVTuMD4BGDnBrj/DPh+FZPOd5LmX5l+5u+au503Sbi4EWjsS0k77BvOSS54/nWr4q8O2Hwst4rB5F+0yMyxRt8zK38Nfr3i7Up0MjyCUumFj/AOkwNOZLQ47WtYh8G2rTXkK/a2Xavz/Nt/u7a8W8deKNS1q+e5vLnarfLFCv8NejeLmudUhuHmmWW4kdm87b8qr/AHf96vNvEmmW1n5r3Nzt27W3L/FX86YiU8RLml8I5S5vhOWvoHkX532/w7W/i/2q4/xV4qmVja6bu86Pcu2P7q1qeKvESX15JbaVu2/daT7tcffN/AjyMzf3vvVxU/5Xsa05Rj7sjEvZnW6d33O7J93+7WLfiFVLybXb/wAeX/ard1Q7oykdsyLs+eT+9XD+PNfsNFtHgtXxK33mr1KEZOR004zqe6Zmu+KIdPjOyZX/ANquR1PxPPLlLPdjP3mrNv8AU7jUpjJK5x/CKgZscCvZp0Yx3PTp0VAGZ5GZ35NSRvtPXH+1TKK6OU0kbOn3SLGEHzf71aELPJD86NXPWMgjkXY9dHpqm8XG/lf4aOUktRK6ts37v7tacVvJIqv94/3m/hpun6akki/Jkr8ybkrYktPKVJoXXay7WWnze6YS+L3g0G1S4hlsn2v5iMu6P+GtuS1ez8AvZ3PmZjlX7v3vlrO8GyQrrH2Z9qbl/i+7urpvG32ZvCMt5Ci75Jfm2v8AdpRIkeY+JNQSOxld32t/drgJH3ylg9dJ4w1FJYfLR/vfermQDnLVJ00/hBhkcUKu2looNQopGOBxS0AFFFFVygFLHhm4NCx7l+em8sPSnHYmRaiuEjj2Inzfx1+iX7PmD/wTkT38G6x/6FdV+ce75sV+jn7Pv/KOJP8AsTNY/wDQrqv13wb/AORvjv8AsGn/AOlQPseDV/tlf/r1L84n50xk7tnf+9uqxDIkd0oyvy1T3t605ZnXPz1+Pnxso8xuTaw7TL/s/LV+L4gaxYw/Y7O5ZNyVySyOn8dL5zsfnfijluHKy1faveahM8tzNvbd95qktbpLZV/vVQbZjilE0i9GoCUTRuNRnui6Oyr/ALtJa2fmSCGY/wDAqqQLDK338VZW6VF+dvlV6v4RcvuGzpOmoI1m2LuXd8tdHosyKqF5vl/u1wn9pzRt8kzfLVjTb52z515Mw/uq9EdjLlmet6HfbrpfJvI02t/FXsvw18R/DTwCn9q+NvE9ncTL86wxv96vlttQ0q3tWmuZrpfk+SP7Rt+asDUdViumzskd1+6zS7qzlGXQrlj1PtLxt+214CgD6boN4qRqnyL/ABV5r4w/au/4SBX8m/bDJ91flr5wS4ST/XQ7v71Ss1mql0jVWap9n7oRjE9Sb4oaM159tub/AHDbu+/96mXnxSs7iZPs1+sQb7+1/vf7NeTNePHI3yL/AHals3825Akfcn3sGrgOUTuNe8dPeXAh3rs/urWReeJPM3/v2X+H71YU198v8P8AwGo/Odl42/N81Ll/mJj7ppnVHbKbtr/epJNSmjt2S2mbe1UIrpyu/f8A8CqW1vPnZH2tTCXunrXwhjabw/Dsdd6/e/hr2z4c3X2dvM+Y+Wm99qbtq14H8FdSddLkskmYlbj5VZv4a9r+Hd463ywpuCTLsb+GvLrR9/3jz8RH3j7x/Y58ZPJDZwojSIr7FVV2tur7g8ByTXlv/plyqIy/Jt+9ur8z/wBlvxIlrqDw+cybvL+WOVvmZW/u193/AAl+LiQ6ampWEO9422SrJ8yr/tba9LA1vdtM8qpTlL4T2zwPpOsf29CsNyyQrcf6xn+8v+1X4W/tqeCL/wCEP7eHxg8GeT5cbeL5L23VflVobj94tftp4d+L81nqx/sG5hieaJt9xJ8ywsy/3a/M/wD4KsfC1Na/aw/4Wp52+HXPD9vFcXjRbfOmh+Xd/vbanNPZVsM4HpZTKdPERTPj/UtSv9vzp5RV9u7d96s+4k/cu77Q7fMPk+9Xd3ngHUNQuBYaPA1zMzf3P7tc78RvDmp/DB9Li8d6NdaU+u2bXmjNfWrJ9qt1ba0kO77y7v4q+Jll9WpG8In1Uq3s5ayOcvI5reUvs+Vk3fNVNm2sN6b1hXc7N8qtXG+LPjpZ6XI1no9q1w6/K8kn3a838Q+PvE3iOeRrzUpFjkP+pjbatdmFyWvUj7/uoyliP5T03xJ8WtB0e4ltoU+2T7tu2F9yr/wKvtn4mXIuP2LlvHULv8K6c+30JEBxX5j6VuOoR4Xdlq/TH4s7rf8AYdIjxlfCWnAflBX9BeE+CoYbJc9jHrhpf+kzPpeGqknhMe3/AM+n+TPiOz8aTNceS9tlFl+WRq0rPxwin76oN+3bXC/PDcNs8xPn/ibctXoWdpNiP/tV+OyyvDVN4nx9PGV6fwSPRbfxNNeL+5maV/4/9mqlxffapPs3nfMrf3v4qw/D+pXNvKqCH5G/5aVZ1iz8mZrm2mwu/wCasKeS0KctDSpmVWUfekdn4A8J+M/FmoJp/gLR7rVb9tzxWtn80n/Aa0tY1D4x2M7+Htb0HWra6t5dz291YSb93/fNcx8M/iN4k+F+uWHjzw9eSQzabeRzqyvs+7/u19p33/BQ8+MPDFlr1neTXVzdf8fVrbwK0kn+zuZflWvXwPC2VZi7TfLI8XH8SZnltpUo3izK/wCCd/7QVj8F/ii/i34u6DeaUsem+UuqX1u0UTKzbtys1fTf7Vn/AAXI+Hvwz+Hd7o/wQ1ux8VeMb61b+wbfT/3lrp7f89p5P9n+Ff71fFnxq+LXj/41aLcaV4h1W1s7a+t9iabY2+75d3yr81fPHiX4d6x4Lwtz4emtrTbuVvIZV21lmfh7Qy+rHEKfNB/ZO7L/ABEx2YUvq8klIdrXjDxh468Ua18SPiJr02s+IvEF011q2pXTbpJpv/ZV/wBmqMbzQ6kroihG/hb+9SRxPGreS8bPv3f8BqPd/piQoiu9aRhy+7EzlOc580jo7hvJ0+W5dG3bf4q5fRdceC7ZN+5d/wDC3zVv3myaw3o/3l+fbXCrcwrfMj/K+9vl+7tp/EaUdz1Tw74k/eJDI/y/3m+9Xb+H/Ej42CZWeb5v7teLaHqnkyBw+3/a+9XZaPrUqtsdGdtn3v4aylR5jo5uU9e0vXHuIUm+aJWdv93bXV6D4kd4US2mjRG+bzJH/h/iryXR/EMMMiohYMybnb7y7q6PR9QgaFPPfJZPnVX+VazlR/mHGp72h6rY65cySCF5o3Vm/wC+v7tdp4MvzfJckk/Ky/J2Xr0rxvT9ciWZNkylFRlb5a9P+EMolsbt1nDqzoUx2HzV+geDtOS8RsHLyqf+mpn1/CdTnzqn/wBvf+ksl1C6a31e4isZ1QGbfcFV+YjPNaunatuuEdNuxXbdI23/AIDXMa1Kttrt3LNtIaZwGZ+nPTbVq11S2jmX/Vh22s7bP++Vr8x4rp+2z7Fxf/P2p/6Wy3W9njpv+8/zO5tdV+0XDv525m2t5i/8tK14dUhtLj9zC277yNv/APHa4OHWnjYPbPslmdt7L81WYdam2pHDfyF403N867mr4zFUfd5Ynp4fGe8d7b+IGhuInR97bP8AU/e3VpW+tzMxhjm3bvlfy/8Ax1a85t751VJppv3X3maT71a+k6oiyeZC7Mv8NePXw7lVPVp4qMtUdlJrVzMu93VJV+Xav96qt9qUMcfnTfN/e8z5tzVi3F0i3m/ZvCoreZv+8v8AdqGS+eONdu3+981Ycvs+Y2lU5pFjWNQeNvOTy1Zv4Wb5l/3a5241KaaUP5y7d3zNUt/qSfZ5ZJn3bW3IzfN97+GsDUr6GOHY6KvzMj7V+6v96tKPNLcunUjEmvNUjhjcu8aDzflZX/hrm9evbaGxdH275Pm8tvm+aodV1yGON4bN2x/Hub5t397bXJeINcmvpjZpeRh1+ZpGf5m217GFoy5jmxGI5eUr69ND5zwQ+X+7Rfu/Kv8A9lWPJH9ouHTdkt91l+9VbUNagupC6bf3fy+Wr7mWqf8AbTo0XzfumfdE0f8Aer144fmjGxH9ocupuwzJDb/fYoqf99f7ta+m3CR2apMm6JfuR/3a5eC6eO4KO64k+bd/drWtb6OTa7vJ83y/NVSo8vwkVMzjOWp7hcWc0cju+0/N/vfNWbqmmpdW/wAkPHlfvdvy7a2lZxcP9m8yFW3Inzfw1JDb+dK6eTuRlVX3fd3V50pSjqZxlCRx3/CMWat9pSSR/nX5qn0/w6i3Hmw+d8v3m27mbd/erp20/ddeT5Me5m/hrS0vRdzNv+R12/Ky/LJSlKIcq5eWJiW/htI2/veZ/eX5t26rj+GhZt52xcM/zbq6eGzRdjmRS2759zfdqWTTpreGSZ7aMvN8rrG+5VrQipT5TgdQ0Xy4dn2bZ87b1ri/EmizKsvkptT5vlX/AOKr1PWh9njWGGFVZk+6zbt3+1XEeIoXmjdPJ5b5vl+61XCc46nn1ox10PGddtPszKjovzfN8rfLXD63ZvNcPNav8sb/AD/P96vSfFVi6q/ksqCNvkbyvmrktU0lJG8t/wByW+438Ne5h48sOZnzleO5xMNu8f79PmG7czSfw1oafcXUm77TGzL/ALX8VWptNmWTyblFYNS2Mc0jvbTOuF+ZW3V3y5ZHDL3eU0LG1875NjRIybt275VbdW5odpc3UyQud6t/y0rN0+3DKba5dj/fX+Fa6rRbHy1S2hZW2/d2p92vPqctM76fvHsXhG1nt/hGlrIxaQafOCT3OXrzq3tYbWNfs0G12RW27NrV6f4ZTyfh0kcZ+7aTAZPu1eeXkUN1dDzplj/dfKzJ95q/W/GOi6mS8PSte2Ej/wCk0z7LN5RjhcL/AIF+SH6TshbyURiWb5W27VWmx2pt8Q7Msu7aqp97/eo+0Qt8kM2x2/h2/NU8Pk2wKJNJuZ1bdtr8C+rxjUPMp1I8vLKIq6bNNtd0V327kZUqG6jLwjfOzxsnzqvy7a0lj8uOWG26/K3zN96s273xsYUuYyW+9Ht27aylQ5feKjWjTK0kjwyJ86o2z7u/dVaPVolYpCjff3f7zVV1K8CSMIUX5trP8vy1lw3ztM2xF2M3/LOuqnRjL0I+uSjLQ3o7raxuUkVE3fdb+Fv/AGWvNfiZ4k+2axtd8RKjKu1PlZq6nUtSh0/TZZt8jMyfIrJ81eG+NvF1zcXU1zNKy/3Pmr6HKcHGnKUjws6xk6lJQKmt3TzedNbTZ+bbtrhda3rI3zrvV62rHWHaN33s27+H+7WJr0yTS+d/6FXvQPmZGb53+3y1Tw3SRyBN/Lfe21n3Fwkcfyc1TivEkmZxuDr8u3dQUavijXktdNeKGZt7LXBzSSSSNMXyzVratcSTN/u1msjt1StBxlcrZOeXY1Zs9VubOZXSRtq/w1C0Pl7eflamsh27kWpkafEbsOuJeK6TIuGqhq2n+SouYU+RvustUPnUVpaPqkKt9mv/AJ0b5V3fw1IuWW5mxO8Mnmd60murbULEo8aiZfuNT9R8OTAfarSZHik+ZdtZcgmtpNjqytQP4huHVvnopWbd2prNjgVfMiwUYWlooqAHwLubIq9bsnl799Uo1/5Zn+KpoWRW2P8Awv8AxUTMZE8kjK3/AI7UEyoy79n8dLNJtb5JN1I0u77+3/gNARIoup+tNkG3kvk0rYx8lMf7xoKEooooNBH+6a6T4fyJFJNvT/gVc2/3TXRfD+VFu33pVx2M5fAdTqFv5kO9H3fw1k3Vu/nP/s/N9yujms0ht12bfm/iWsi6hdm3u7ZpfEc56t/wT+jK/tSaAw2qDa3vyr/16yV9O/HpYNO+OEXiG5jUiPwnFBGf4gz3M3Svmj9gGJ/+GntCfbwLW85/7dpK+i/2tL/7F48sI1KqZtJjBO7BIWWU4/Wv2XB+74E4v/sKX5UjixH8dehgnXLm+mM00PyLEzvGyV7L+wP+znrHxe8ZQpJpUnk30qv+7Ta0a7q+cobua4ki01LlovOlWKWRX+ZV3V+1H/BG/wDZR03/AIQ3S/F1h5awwzxw/f8Amb+KvxvLqVKVTnkeViXJU+WG59ofsb/sX6B8FfBNtf3kMYnmtV807fm21+WX/B2d+0XBq/wO8N/B/wAI3Ey2OqeL4kuvLuP3U32dWb7tfsp+0p8X9E+FXw51C3h1SGG5WxYBWfDKv3a/mq/4OAPiJ4Y8WfF74b+A/DepXReGyutUv7WS682NZGbbGy/71d1ScqtPnn8jvw+Fp0K8Yw6fEfBun2M0caP5KudnzVKghkmCX9hu+b5dtaVnZeYq/N8rfw/3qsx6LukCI7b91cX2z0JW+E/bD/g028Gy2E3xW8XxQxxQNolna+dt3SRs0jNt3V+iP7R3i5LfNnZzKFj+RGX7tfnJ/wAG0PxUtvBfgX4teCby8hRrrS7HUUZfvK0bNGy/7vzV9gfFL4oWGrXks1tC0vnL8i+V/wCPV20fePDxMeXlPDP2gpvtVnNqt7M32nfsVv4VWvEdB+Eut+LNW8mzs5EST7snlbo46+i7vw3qvxA1SZHsNsTf6pl+Wovjd8Sfh1+yX4PhtoJIZvEF9Fs07T12yTs2370n+zU4ipSpwvMy5XzaSPkrx3oy/DX4ny6XcwmUaVcW7yR8DfhEcj8a888ZeNte8VeIpvE+sCNrmZ22Rt/yxX+7XQeJfFuteObm+8X+IJS95emSSYsPqAPwAA/CvOtU1CG1jm/fZX7rfP8ANX6P4z1H/YXD774WP/pNM1+ymyO8WG3Vrm5uYUXbu27/AJa8O+KHjD7drDw2c2yPbtWFX3LurpviR48RbR9K0253vs2quz5dv97/AHq80j02a+unuXVvm+bctfz1GtKvsXGXNuZt2yXErbLrG5t33Pmps2ivGz3OpXKt5f3P4f8AvqtSaztrWF5rmFTt/wDHa4jx14uKwulteRpDv+eT+9/s1vGjE2jT5pmP8SPF1nZRultMoVvmfb91a8R8R65c61fPLJMzIG+TdWh438X3OuXjwxTN5StXPKMDFe9haHs4XZ7VGl7OI1V3U5V20Ku2hW3V2cqOgFXbS0UURAkVvmyP4a3PDt1I0gT/AGq5/cfu1e0e+eGZE3/xUpRIlE9Z8O25WFfuhvvL8n3atX1h+73iHd8vzSfw1neC9U3xo833W+Wusks4bhfvsibfkqvscpjKJx1nvt9W+0oigq+5GWuj8aXv2fwSXTbhn3PJv+ZflrD1axfTZvMhh+VX3basa1Nc6t4BvLOGHbtt2dt38O2oJ9n7545qN295cM+eP4ahjh82THrTWOFr0r9k7wn4S8dfHXQvB/jOwkubC+uGSeON9u75WoqS5Y8x1xjzaRPNmBXgiivsX4lf8E6NB1Oea++GPiGSwLXDCKx1D5olX/erwjxj+yR8b/BzO914PmuoVf5prH94u3+9XNSxmHq7SNp4WvS3ieY0Vf1Hw7rWmStDf6ZNC6/eWSJl/wDQqqfZbnbu8lv++a6ozRhcjop3kuv3kxTeAKQuZCsfmz6UlFFBQrNur9Gf2fP+UcSf9iZrH/oV1X5y1+jX7Pn/ACjiT/sTNY/9Cuq/X/Bv/kcY7/sGqf8ApUD7Dg3/AHyv/wBe5fmj85k+8KGG00lKx3GvyL3T48Siil3fLtqSeZCUHPeiigdkSK3ylD96mD5l2fw0csv0o+98iD5qBRHrzt/2vStKyVLWHf8ALtV/mqhHCjf71TXV4qxeVC/P8VVzESVxNSvjeXG/HyDov92qrOQ386WRm3mmVIy1bs8ak72pl1Ih27P7tQq2T8v8NDNu7VXxFcrF2D1NWbf93C29P4Kgjbc3z/eanzSbcIjttqSZIduRVxu3UrNlV2cn71Vy20kUsTMrbw/NAFqbfCn38j/ZqLzAv3Hx/fqNpy2Pm6U3zB/cH50Adf8ADXxA+l6wltv2pI/8X8Ve9+CNatm1S28l2c7/AO592vly2umguUuU3Eq33lr234X+LIdUhhm+04dU2uu/5lrjxlPmic9anzQPrv4L65c6Lr9lfvMu2OX5W2/LX1V4N+LFta2ahLnaWdvmZvlkX+LbXwT4J+IlhCI3v9VjhRf70qqq13cX7XfwF8AWq3/ibxrFd3EL7fsNs25v/Ha8b2uIheMInlexq8vwn2w3xo+15fTUkb/Zjf8AirnNU/Zr1v8AbmuLn4Y6V4wh0bxxHpdxP4Ih1BP3WpX0a7ltWZvu+Yvy7v71fGXij/gsB8OPD6TWvwy+Gt3Kyrtgnm2pHt/usrV5P8Qf+CtX7SHjG4SbwTBZeGp45d9reaeWaeFv4WVv4Wrow9HH1JxlKBtSw+JjNTXun3/+w/8A8E6/ivD8QbzxP+0/pWpeCfD3g+Ka7+Jeva5b+Ra6TYw/NLGrN8skkm3av+9XwF/wVJ/bw1r/AIKA/tm6n8adA03+zvBmg28eh/D3Rdm1bPRbf93D8v8Aek/1jf71WP2pf+Cq3/BRT9sX4b2HwS/aP/ar17XvDdjbx/atFjSO0ivmX7rXLRqv2ll/6aV4HDEjQ/PCq/Jt217kIxjK6iepzSUPi1OV8V/NqDzJ0Z6y62fFUSRXHl7MCsatTSnIuaDG8mrQqn96v0m+Odwtn+wnNPKdoTwppuSO3NuK/OLwbZ/atchR3xtfdX6JftJyGD/gn/eyKenhTTO/+3b1+veGH/Ipzz/sGl/6TM+v4Y1wmPj/ANOn+Uj4WWZLiH5HXbVmxjjmkZEfdtri7PXpIVMPr91j/DW5pesfdSN/95l/ir8dPipROphRFwmdyrWza2qX2nvbTf63duRq53T75LhhMnH9+uhtbh1nWG25Vk+8rVpT3I5oX5ZEcMf7uXSb+HI2fe317j+y5+xz+1T4806O58LfDe9bRr5mlg1CC3Zo9qqzbty/d+VWrxq7s+ft9sm14/4f4f8Aeav3K/4N7/8Agqh+w/4c+Alp+zN8bfENt4V8ZPc/2fK+qhVs72Nt3lMsjfd3bq6cPjfqVWNSx52PwksbS5Iux+Qmv/tTeAPh2raR8P8AwaPEGt2dzifULlP3Uckbf3f4vu1+1v7J/wAPv2IP2zf+CW/jLx/8btM8Pt4gtPAd9qN79h2pc6XD9lZlby/vKyyKa+Mf2TP+CWi/DL/gqB4ttfj54GhvPhvdeJLi5t9Us4Fls2t5rhvL/e/dX5WXb81fVX/Bej9mz4Z/8E7v2XNc+NP7Jvh6405/iFpyeCtUt7eTNpZ290dxuN277zKrKq0sfmlfMa0Vz/CceCy+hgY88Yb733Pwc0FU/su1/wBJmcNEzeZ/eX+Gq02oPF4khttn+si+fbWja6Wmm2KQ9oYtm5n/ALtcbpOqTap463o+7a2xPn+7WcfePZR6T/rNNJ+9t/u159eLt1B3TdnftavQ7dvMsf3L7tytv2pXGXGmus0yb87Zdzt96spbG9GPvO5DY3j+YUT5GX726uw0PV4FjVJpm/3o65SOF4mRNnzrWlpsvk3B+7/wGtYxHU909B0nVE2q8L7i25dtdHo+oTSMqI8bIz/e3fNXnljqvlwhERmP8ddBo99DIqwsnlbfut/DUyp/aM41OWVj0Wz1gOyfvtu379ez/s8zRz2epvHISC8J2kY28PXzrZ6ttkZH2srfd/2a91/ZRuFm07WVA5EsBY7s5yHr9B8IqfL4gYR+VT/01M+u4Pqc3EFJf4v/AElkuvarAfFl9aTP928kC5X7vzGrH9uWklqnkuqP9193zNtrhfHWvmx+IGrRm5XZ9vnBU/7xrMXx4kcfyTr/AHdrV+a8T4WU8+xTj/z9n/6UznxOLgsZUj/el+bPSrfxA8PmbH+Rl2o0f8NXLfxZbxqEeaNGX5V+626vK5PG32iGKf7Sp/2Vaj/hLv3b7IV+V1+6q18tUwfNGXMa4fGcp7Pa69C3lvNNGV+9LGv8Na0fiKS1d4d/3fm2xv8ANXiOl+LplLbzJ8z/AHWbd/wGtuP4gJb3D6g9z9odk27m+XbXjVsHyn0OHxkJQjKx63/wkieWfkkiaHavlt825f71VdS8cWyyM8J2Bd3lRs25tv8AtV5n/wAJ07Ls+0sf3XzbX/h/iqje+NvMRJhMpSNdr7vvba5I4Pl956nd9eh0O/1DxY/lvczXKoNm7y/4v93bWPqviJ2jR3mYq3zOy/K1cNeeLnjkFtbOvzfL9371Zd94svW3Qo6u/wB7dv8A9WtaUcDUlyyPPqY/llY6PX/EE2353w+7564nVPFFzud98OPu7f4t1UtW8SujN51yu9v7r1yl9qzztLDDMqn7ySfe+9XvYXCy92MkcGIzE2pNc3SNc3L4Xf8Ad3fxU/T7pLiNn3qvzfeb/wBlrkpr2ZW2QuqKvzMv3t1bGm3k0j/I/mps2o2zbXrewjTieX/aHNI7OG+huoVT7Mvy/fVv4lq/a6gkeZHTytvzbWTcq1zelyTeX++uW3b9v3K1luLqTcnVt38X92s6lGEYF/XJc3NI+kIdQeSRHTdsV/7n3lrQhkST95cou2Nlbdv2rXDWviCaOEO/mOjPsSRm/eLWzp/iAyTfutzRrxukf7zf7S15VbByPbw+Mpcp2NjdObjzvJhd5v8Almvy7V/2a2LWRLdmk35bb8y7N23/AGmrkNP1q5dv+PlVWNvvN/DWjY6lbLIs2xk27v49u5qwjhPetLY6Y4vqzrIblLqM3CQxu6p/47/C1VppJWZzbNhmVm/efdZqrafqELfvPtmw/wDLWqV9qz3Tf6HtUSfdaZPmpRw3vPlFLF+7qUfEW+FWzMrlov8Alp8vl/7NefaxJ5Nr5kk292TZ5i/w112qXjtepO/l+XH/AAt/y0rn9WgSWGSOF/MCuvyx7Vruo4flPNrYiMpSZwesWsMl0z+Y37yL5N38X/Aa5rWNOQRlIbZijbmdv4d38S13Wp2e2Ty327V+Xy2+9/wGufvtPnWTYifOzs37z7telGjGR5NaS2kcLfWqeXl+GX5tv3WqmbflEhRc7d3mL93dXT61p/2xm2Qxuyt97Z96s0aX9on/AH0PzRv8+19u1q7Y0Tz/AGkuYk8O26bd95w6/wDj1dVo8CQzfvkY+Z/t7flrJ0fT5ftHzpy3zbfvMtdNp8KW7ZebJZsrtT5drVjWw/NO5tTrRien6CVT4a5hQ4FjMVU/8Crzu786Vvtlzt2/KrtIny7v9mvR/DP/ACT5MNn/AEWXlvq3WuN/s3zMJC6hfv7mT5a/X/Fmk3kuQX6YWP8A6TA+yz+rbC4N96a/JGVHap5i+cjfL8y/JtVv/sasLN/pMW+bdGyfLGv3d1LNYTNMtz9pVnVNvzfe/wB2mLb21r8m/wCVvk2/7W6vxGpheXU+a+uTHTfafLmlRGQLt2M33W/vVR1C+hWH7MHb5X3KzfK26rF95253SFX2vtl3P/D/ALNZWrSwq+x0Zzt/5af8s6qnhf7opYozbr7ZIpSGZdv3fm+6y1HY2s00n2Ysyfxbf7tTxxwwxvsmX+86tVm10+2kk89ptkzRb9rfdrf6qowI+tHMeOmgs7H7Nv3vJuVPn+avAvHGmvZ6hPbdXb7+2vZfih4mtrbXrPRPOjQR7mdpP4mrzL4hx2010LxJty/eZY678HTjTieRjcQ61TlPOILx7Wb+L+78z1T1qZ5GD722t/CtWNZmh8xnRPu1lX115kY2Phdv8Kferq5eU54lO4m8z50+U7qpSSPv379u75qmvN67kfd8v+zVCQ+Wdm9tqtSFH+Ukm2Mx2Ozbf4abHEWX/b/utUDSN5Z2fK38fz1Pp7edJ9/5v9qnGRUv7pXkXaNjpwr0zenmBNny1b1W0eNVd3/2flrP+dGHyUviCJJJb7t3l/NVdkZD861btZOf3xxVqS1SaFUoDm5SrpOsTWVwm58p/ErVtalp+la1Z/abOZUk+81c/eWZtmHerOl3H7l4XnxVRlylSj9pFCaF4pWhz92ihv8AXH+KijmNApsnanUMN3WpAVWx9/mpF+Zh3FRqu72pyBOfn7f3aCZbkzK6MPN5Vvu1EXOA3y4oZn27KYx+bPpQSDH5s+lNf7ppzL/t0BS1BoD/AHjSUUjHA4oAWt7wDIV1FzvxtXdurB74re+H7/8AE48sorGRNvzUES0iehra7bOLhtrP96qV9bpHNvG3C/w1oyXUMdr5OzezP8i/3ayb5nf7/wAzM9Bzx00PVf2EFiT9qHQvKxg217nH/XtJXu37ZMhh+IOlzl0GNGAjyuTuMsleA/sF3DyftU6ChdTm0vdwXt/o0te8/tooJPH+lh1ZlGjjAX1M0lfseGbj4D4u3/QUvypHBiJ/7RfyOF+HSwXPiK0e5+ZvN3bdu77tf0Of8EmNF8WP+wRF46+GTWf9t3sszWrasVSFpFXbGq/3a/ni0OH+w7q2uXfyp2dd0bPtVVr9ef8AgkZ+1Lovw5+DlnpX7Rmq3lh4It5ZLiy1KO48uO3ul+Zl2r975a/FKWIq0o+5ExjThKr755/8QP8Agop+0V8XNU174aeJ/A1jd6w3iCazv7e6vGVrdoW2su6vyP8A20virJ8YP2zvEusJCttbaWy6Xa28b7ljWNfm2t/vbq/XHxB40/YM0z44ax8e9N8bX1zDea9qWoy2MkWzdGysytX4h2mtW3i74j6/4th3GLU9burqBpPvKskzMv8A47XoSceSJ14eNRc0pHTxrlvsybXP95VqSNblbxETckX3tzfxVNaxvGq/PlW++1S6bp/2zUmkfbu/2nqfdLlGMT9LP+CEeqXLfFDxLo8MO3+0PAcyyqv3ZFWZfm+Wv0R/4V3c6lceYnmBJIvmj2fdavz+/wCDeNbCH9ojW7C/v42iX4fag3k7vu/vFavr39p79udPDX2zwB8FvLmvIf3V5qkafu7fcv8AC38TVvLERpxPGrx980f2hv2mvAH7MmkvonhLTbfWvFs0G2K137Y7P/ppNXwX448ZeJPHXiS48YeMNYk1XU7p5HuL6R9yx7v+Wcf91a1PFFxeXuoXGpalqsl5f3G55by4ZmaRm/2mrzjxJ4iNnILCwm82Xdu+98sa/wB7/ar53FVp1J3kClGXwnS/axD4WlvmXaI7WR8DtgE188+MviBqupXFxbWyKieay7o5f9Zur3eOaeT4a3MzMryHTrg5HQnD182yxpYyHzkV32bmj/h3fxV+veNMZSyPhtL/AKBI/wDpNMdpOSsVF0r7Urpf3O2JdzPJC275qp6tfW1vG3kusSLEy+X/ABNS63ryWy744eG3bI9//j1c3q11NJbx3+pIrK3yo2/btr8Lj7vum0Y8xl+KtecWro77Itm7az/M1eF/Evx0+r3j2NmyhF+Vttbnxg+I3nu+n6bdNu+622vMCWLfMcmvYwOFly88z1cLh+WN5BSMueRS0V6vKd4UjLnkUtFSA2L79OpFXbS1XxAFPt5HjkD/AO392mUm4qwokB3Xg3XHjm3u+dv3FWvTdP1B7yEb33btv/Av9mvDvD999nuNm/FeqeDNWeaNETafn/ielH3TmqROh1TRzqlk3+h5K/xf+y1k6bCn2O50yb5PMRo/ufw12tjJItizvt+b+Gub1zT3t77fZ/IG+Zt38S/7NXL+6KmeAataGw1Oe06eXKy177/wTr+H2s+K/jta63bR4ttLs5rq4btt27a8d8bacbnxncw2y8SOrbq/QX/glD8GrCL4S678S5kbfeaothYfL8skca7pPm/3q8/Mq31fCyZ6ODUZ143PRpPDMdrbx74WYb13sv8ADVy3sZrSGSZJt6/3VT+9XoWoeDXa+GYY1XZ/D93/AL6rOm8P2dqpt0tmMi/LEv3lr42o5z5bH2mHnCx5V4k+HngnxJH9m1Lwlpt5u+aVri3Vmrz3Xv2T/gVqrb4fAclqzbt7W9wyt/3zXvl/oLqqbLZUSN9u1v7v96ua1rTYYbh0O6Ir/DG27d/drSjjMQrxUth1MDhanvOJ8zeJv2E/hdqf7nRPEmoWMn8XmKrrXlnjL9hnx7pe+fwxPb6lEqfOqPtkZt3y7Vr7M1DTUt5GjdF/22X+Kov7Ff8AcokLI+zd5n97dXVRzTEwd5S0PNqZJhpS9z3T84vFnwk8eeDLw2fiHwreWx7eZbttrBk0yeFtkylTuxX6dT6PBHIXvLX7RuVfluolf/0KuW8Qfs//AAl8XGVNb+HVmC3zy3FunlSM27+8texRzilKPvHm1Mkr/YZ+dL27xln2fdr9F/2fQf8Ah3GgPX/hDNY/9Cuq838YfsGfDfVpJpvCWvXmnPu/1Nwu9F+X/vqvbfA/gC58C/sXXfgBrlJpLbwrqkQkj+628TsP/QhX7h4K4ujic3x/I/8AmGqf+lQPe4TwtfD4yv7Rf8u5fmj8vyMd/wAqGXdir13ot5azPbTQtvjfa2KgksblIw7wt/wKvyc+GUkQUU9oXU/PTSpWgsb/ABr9acu/HFGxvSjlTQT8Qd/nzQp2t8lJTQS2aCiTzvL+6cUskzSSM/8AepjDd1ooFyoVvmYmkopeWNBAsmVfikX5fn2Zpu75sU7+D8ar3TQX5Nu9P1pJm3/O4o/hLUYVvvmjmARVfvT9ybt1MBxyKEVzUi3Qr/eNCt/B0/2qMfwfxUHIBWq+EXMxY1zz61d0nWtY0kv/AGbctGZPvtVEsW60cqakSjzF/UtZ8Q3Ehh1DUpnPdTJWfuPIY8/3q0bXUklhFtefNt/1Tbfu1YXQnvJPtNnfwzIrfxPtb/vmiPILm5TH+8n3/lrY8OafG0h1K7Rtkf8Ad/vVfjsfD1nb+dqUMbO3/LOP+9UI1I3G2GzTyoV+7HVSJlItWcj3UzS/wt9/5614dn+p34+WsjTV2zfcU7f7tdFY26SR4RMt/tVUfeI+E47xlvWSJN/zL8tYVb/jtUjvhGm3b/s1gUG1P4TqfhfavJrC3Ozcq1+gn7Q9m17+wXdWi9W8LaZ+jW5r4K+G8f2azuLx32BU+Rq+/PjBcRN+wv8AaZMFG8KaWevqbev1zwufNlOd/wDYNL/0mZ9dwt/uuP8A+vT/ACkfm9d+H7yFm+T5f722qhW5s5G+dlr0aSOzulKeR/uMv8VZF54XS53ZTYrV+S8sT4mNSX2jC0zxLcxzLmb/AIE1dr4f1hJsbHXaqferi9Q8M3OnyF4UZlX7tLpOoXOnsu+baq/7dSVKMZHsdvdQyWYh343L/laqX9tYXUL2bvsX7q/3qxfC/iK1kgRLl1Zv71b9wvnfvkO7zH+Zv71ZmXwnNeI/2hvj9baTF8MYvjV4sXQYbhZYtI/t6byEkX7rKu7+GvozUv2v/wBrf9pb4S+Evgh+1L8Zda8ReD/DN59o8L6PcN8vnN8qyTMvzS7d3y7vu18vfEbR3juItbttqmNvnr9lv+CMvwF/ZC/bk/Yh8SfCXVdEtbbxjpOrW+o3HiSaJmlj02P5plX/AJ57fu/7VZVYxX90qpz+z90+Bv2qPhH8N/hH+yroPxOsPiFZt4q8QeI5rVPCKwN9pt7OFf3l1N/dVpNqr/er5T8AWV7f6u1xbDLj59tfv3/wU2/4JZfsjXH7IMfj7TfHepa94r8RWi2HwqtrOz2NIy7Wbf8A9M1XdX5z/sT/APBJP4tftDeKNUs/APhaS5n0fzP7bkvt0UFuse5mZmX+FvLatqco0qW9zjhWlL3ZqzPn2z0/UtLUQXkLIZIFbaybW21TtdD+0NI/k7R95K7/AOOXxWtvi58VG17SvAum+GNP0/S4dI03w/pbsyRx2+6Npmkb5mkkZWZm/wBqsfR9P/0V5JkUFn+796rjrHmZ6OFlzfaONk0HypPnf59/92o20ncrOm7d/u13lxoaIp875Tt+Vtv+srHk0J45G+Tb5j7nXdVxjyyNKn8pjW0bwrvmf+Kte1mfzN/+396mR2KQj54W+ZPlX71TW9juk+fdtj+5urblhI8mo+SoaMOobV2O+1f9n+GvoT9i68NzZ+Io2X5o5rYMfwkr5yhj2szp1b7m7+KvoT9hwyHTvEhkj2/vrX+Utfo3hNC3HmFflU/9NzPruCK3PxFRX+L/ANIkeX/GvWZLf4peIAjNui1WfP8Au7zXLf8ACWPgDev3KvfHNj/wuDxKqO2H1mdX/wBn5zXGMrxtsRMqv/LRa+Gz+nH+28U/+nk//SmePjakv7QrR/vS/NnRx+KppFCPCrKvzfNVy18RbrlZt7f3nrkFun8zZCG+b5mkWpIb65X99vZVVtrL5tfOVMPGXMXTxEonb2/iZI5N6TMFbcu7dU0Pi5BHs85TtfburjYdWRWT54/9U33qgTVJFjXhf73y1wVMHSl9k76ONnH3Tum8YTNIro7MnlbX+ek/4Sgbhsm+Vv8AvmuHhvnaUoX3M1WIdScrseZlCv8ALuSsY4Hk91HRHGc252MniR5FSbepC/f/AL1QXeuPNb/J8jN83365htYRoh5Zb5n/AOBVDcX03khN+fn+9V08HGOhz1cRKW5qahrEzfOdr/8ATRv4azrjVHuJNnyqf7y1Ukn27nRlLL/tVVWZ5Nv3t396vRp0fsnDKtOJoW949w33N38KyVv6PvjjWFNrrs2v81YGlwu8ez+NvuLXQ6LDcyPskhwsf975fmrSVP3TONSR0GnrNHsT5l+T51atu3t5lZEhdkOz59v8VYmmvcwxvDcpGdz/AHt/zVt6WzwyJNtY/P8AxfdkWuSVP3veNY1pHpl59pi2TPDsWTds+b5asRatNZzb0vGEWxWZf7zVl3urL5LI7qQy7kZU+7WXHq03nB4X2lf71aywvMdMcVyneabrX7tLlIWU7922Rvu/7VdFa6pHcxpN52/zHZXVfvLXmtjfQ7Yt8yt91tv8P+7XT+H/ABAkbfO6yRN8yKv3VasZYOPNsdMcZLuegx3UKrG7vIjt821f7tQ3jJJ87ybGV/k3P/DXOWurQ3O+Ge8kd/8Ad+6392lvtedo1SF2jf8A2ko+reRtLFR5CzePNqcnk+dt8tdibn+Vqz7y38uTGxoj95P7rLTmkdfke53Hb87SJ81VtSuP3aQw3KttlVWXb81dEcP7pySxRl6h53mHydv+q2/vE+7/AHax9UheFUme5Z/MTbtX+Fq3bmPzI5p/tPzq3yrWXNbpNcNN++RGVdnmfd/4DW9PD+8ccq3Nuc+ummZYv3KruZtjb/lamx6OskpdLPcf4/n/APHq6q30NJm2WtnxHuVvl+7/ALVX7HQ0hUbE3mT5Xk2bflrtjRgc/tmcjb6Pcr+5hdgVb5Nq7a39Jt3sU87e29Zf9X/z0XbWzb+H42f9yFKRy7dq/wALNWja+G3VNkkPm7X+fcn3v92j6sP6xE1tEhCeDBDjy/8AR5OB/DktXKeSkO9Emk+/95vm213QgZdLa3XJPlMB8vU89qw7rS/LjmfYqPD/AHn21+s+J9FyyjI12w0f/SYH2nE82sHgbf8APpflE5iSFGk3p/wL/ab/AGqpXCw7rgwpz/B5n95f7tbuq2qANND5bI332X73+1WDqUbyQumyNx8qxbXr8g+q9eU+O9tEydQv3baiTqHbd5/l/wALVkXF1iJcw7W/3q0NUmRoTDCi7V+aVl+X/drCurxI/Nd33vtVVVvurVxwfu+6ZyxRZtbhIZWjeCMrJ825k+7/ALNTLI9nDJePCr+TFu2/8CrPs9Qti62z2zD5FVlb7tZ3xO1qHS/Al68LtmOJtskf3l3fLWcsLOMveia+25jwD4tfFRNY8aaleJNytx97d/6DXOyePH1SHZ526ua8RL/rZvl3s/zN97dWPZ6pNDIfkqIx5TG/N7x0V9Ik1wz/AN77/wDtVmyfM4R/4f4akt7gyw/vPl/2v71RXi7tkzvhqCOWERk2z7jjlv4azry2dlb+9vq08iNMu/buX7lNupt3zpD/AB0ehcfdMqQJDnKfLup1vcOs6zBcU6ZnVnKbQWeqzb1X7/NOUh8p01vHDqlnwnK/wrWFdW6WuYXRlP8ABV/wjqyWt0IZn4Z60fGWgzW+L9Ifkk+b5aRPwyscuY2jkHzZ/wB6tnSbX7VAUf8Au1ntC9wq702lan0uR7e4/efdoCQzULWaGMpv+X+Cs+OTy2ztrodZVLi23oi7dn8Nc9JvWSguPvETM0jk5pVXbTU6/hT6qJoFFFFH2gClVj9zfik/4Bmj/geakBd3y7aSikY4HFXL+6Am3b82adRStyu+lEmQlFFFHKUFbfgWR49aV0P+zWJWr4P+XWEcpkrUkVPhPQ7xnaNfnX5f4l+8tZdxdJGp/c4Zk3VoXF09yqwv92sHVJsK3yYXb8qs9X7kTmPYf+CfCNd/tU6TM6bRFZ3jr75t3H9a+jv2sLeF/iRptzJGCyaKNrHt+9krwL/gm7pQk+OtvrzqAWt7hF+bn/UvX0L+1dOIvHGlqJGJOmgmLPGPMf5q/YMNb/iBGMt/0FL8qRwV05Yj3TyLxJC8MUd4j/3W+9X15+xPrz/FD4I3/wAKL/VY47j+0pEsFklZlVmhZV+WvjvxJvvf3KIse1NvzP8AK1dv+zX8Vrz4d6pczQ3LM0d5bzrHv2t8rfw7a/FsLU/e8pnLllSNv4sWHif4I/Cb4haN44SO31fT9BuINsifLM0jeWskf/AWr4x+FUDx2jFPn+X5fk+7X6lf8Fndc8B+Pf8AgnHo3xpsIoU1vUtcs9L+0Rr800bbpJFb/d21+ZXgSzSPTo977N33l/iruqRpKXuHXhuaOHXPudXu3WqfPtZv/Hqm0d3XUndtquz/ACf3fu0kCo1vs8nC/dWP+Kn2sKW0YmdPMPm/KqvU8oe9sfYH/BK/Xtb0n4vanqWg6xJb3Fx4PuoJWjZlZo2Zd3/oK19D+OL618LWLPcvJmTc7t93ctfJH7APxCm+H/jjVtYfSftzzeHLiCC183aqszL8zV6pr2tar4y1I6x4nuZEaTa3lq/yR/7K15mOxHs5csdzzsRHm1iS+LvGl54g86bSt0MX8Ks/3l/2awk0ua4maNIcK2399/e/2aluLqztbf7Tqu1FX5ov723+7VW61x2sRc3k32O1+bZDt/eyf3dteZ78Y80jKNM62OBYvh1dW4IAWxuBkNnH3+9fLWueJvPZ7PTX3FX2vJsr6U07UIdT+Cl1fWsTQq+k3YVSeVwJB+fFfLFx9jtYXMyNmP5ot1ftHjRJ/wBh8N2/6BIf+k0yo6Fa8W2sYftNzNu2y7v3n8VeT/F34i+RbTQR3Pzbm2r0+9XRfEzxtbW9tNsuWRF+b5v4q+ffFXiK58R6q9/NwD9xfSvxjL8L7T3pHrYPD/bZUurqa8uHuLl8u33mqOiivoPhPSCiiiqjsAUUUUSkBc0+GFoi7puqO4sHj5TmptKu4rdZFuXONvyIKn09od3+kuoH93dWMvdkZe9GRlEbOGGKA27mtPVI9NkuGENyr/7VUprN41zvX/gNVGXNuacyGwSFJg6jNd/4C1aG4u0R3+feu35K88VvLf5K2vC+ofZbwfPj+41XykSifQWm3yfY98zrtb79YXibVPtEMiPeKnl/dXZuZqoaX4ge40N33r8vy1zt9qzzTtvdh8u16XNzHPHmiZGqNCNQa837ZVT5GVPu1+0v7KvwR/4VD+yf4F8AfYNlw2jR6jfyRr964uP3jM3/AH0tfld+xv8As6ar+1Z+094P+BuiOuNW1aN9SZvvR2cP7yVv++Vr93PE3hu2jmfR9NRktreJYLJd+7y4412r/wCOrXkZpK8LM9HAy5avNKJ4dqHhPzlbfu+X+FvvVjXGkuuIURQ7N8jK3zV6zrmivbzbERU/h2q3zM1cjq2g7V4hz/F935q+ZrSlGWh9RRrR+KJ5jr2n+ZI8c22It8u2T+KuK8QafCzK6eWHX7vl/wB3+7XpviTS7xZNn2ZRD935vvNXF65a2yrLZwzQr/DtX71cUf3nvLQ9iniPaQOEuLOb7Zvj2lZNzbflpq2P+tDvIrLKo8uRvvVoapa7djw7d6uyxTNF80f/AMVUEzIsaJMjGRdv7xa09pCUrlyjzRuV2s/uwvMrsr/uvlpF86HzIZodq/xs3y/N/s1ft5I2khTydpX5d33adJavPuNzeK4j3bVZ/l3VcanLuTKP8pg31rPJHhLZt2xt7ferY1hFtfgRq4hHCaBfED32SGiHT5rq3GxG2/eZWXarVo+JrSGT4V6tZiIRpJo12pUdBmN8/wA6/oDwE5f7cx7j/wBAtT/0qmd+Uc/1ivzfyP8AQ/Mbw34Xv9a1p/tkLHdL/rF/ir1vSfhH4YXSnm1iwjkTytyNt+7XXfDv4V6bp2lrrFzHH5S/N8y7dv8A8VXK/F74hW2lrJbabc/d+Vdvy/LXwq5IrmPxv35zPLvil4X8Daey/wBlWDRP/Ftf5a8/lsUWT5Pu/wB5q1/EGtSalcO/nMV3/wAX3qp29q903mCo5uY6I+77pUh0ma6/1Kfd/vUv/CM6kq7xDuFdLoul7m+40qfxNWtdR2dpb796jb8q/wC1VcqJjLseezaTc26/vrZh/tVXa3f7ipXX6xqkNwGTYrLWRb29t53qzfwrUy/ulRqGMUfhKVY2YZH92t9NIs2/1iferRtdC01o13w7lp8oe0Zx6283UJTvss+MeV/wKvQ9N8M6JcYT7M3/AAGui0TwfoMMqv8A2bDt2bd01HKTKsePx6ReTfchZto3fKtOGh6kzbPscmf4dy172sem6XZva2Gm2+1vmdmiXdWKuh3PiLUEcQ72V/4Up8sQ9pI8butJv7OPzrm1ZV/vNUUaxPJhn42/3a9H+MmgpoukxLs+fdtZq8+02DdMsv8ACv36g15vcLdj4ZmvId+9VH+1TL3Q3sF/1i7WFdFZ/u7Mv/33urC1y53Myb8/w7d1BnGU5GUW2sSetM3O336c4yd3rTaDaIoYrR5jr/q3pKKuMSiSNsHe/wD+1Usb7W3pxUC71+THzVL87Y+7/utUGZMD50gff1+/VmE/88+WV6qRqi/f3VatvNZgifKy/wAW+jnIlH3jc0k+Wy70+999a6Cx2La750z/AH1WsDS5EmZP3e7+Gt6SRILAyOmAqfJt+VmoEcL4yuPO1Zk/hWsqJPMkEP8Aef71SalcPd3zzP8A36seH7f7Vq0cPbfWsTX4Ynaw2b6L4RZF+9JFur7c+PErw/8ABOYyxk7h4L0jB/G2r401fUbbTbeGzdG27fusvy19q/HuzOof8E/ZbSDjf4Q0vb+dua/W/C582U55/wBg0v8A0mZ9bwmmsJj7/wDPp/kz89tH8WTW6t9pfctdTpOsQ3UKO/zL/tVwl/ot/p8jJMjUyx1K8sZB87Y/u1+Pnx3LGXvRPS2sbK8Uokasuz7tYmreEUjj3pD/ALX3ab4b8Xoy+TM6ru+V2rrPtFtcQ+dDt+593furSMiLTOK06P7KyTRpg13fhnU4bixa2mT738X92sfWNH3Ik0MOC3+zTNHknspt5mYL/d20EzNfxdpfnafKjorBk2oypX0d/wAEN/jI/wANv2vPD/hjXvEOpW2i61fx6dq1rZ3TItxD97ay/wAS7v4a8EvJE1DTVTzsPs+638VZPwD8aXnwk+OGleKoXxLY6jDdRbv7yybmqJ04VKckKXMf0QR/tP8A7HnxW/bE0fwfqvhLUNHtPBeuNomh2eqaivkQ7d0lzcNH/CzNtVa8u/ZO8FeLL/4qeLPA+hftAar4N8NeOfEN5YX8mjxKslxp7TNtVWb7rMrferxj4nfDvwNJ8dvD37QmleNtJ1uHx9YTa59jsZ1Z9L8uNdzSL/DubctdD/wTh+JXh74+ftHRaJrviT7NYSSzNpax/K00y7tu5v4V3VxYiE48vIzzuWrWxPNLSx8bf8FZPgN8OP2cv+ChXiz4QfBjw5qFn4Y0XT9PTSnvl3fa28v99cK38Ss1eS6DYpJpYe5RkdXZWX+Kv1Y/bB+FPw9/bq8B+J/iXr2jx3njj4b6yunf2Lotwvm6lp8LfvJPMX+Lbu2/7tfmNoWr+G/F2paxqXhTw9Np1guqXCWFjcXXmyxwq21VaT+9XZGp7Slc78vlKVfkZl/YYZo2CQ8R/N+8rP1TS0ZmSBP7rbVrrG01GZN/7oqitu+9UV1YpcKr/ZmJX+Ja1jL7J21v5jg5NNmjbfs2/P8AJHt+7U9rZhd3nPs/irdvtLmjnPyb9yfMy/dqFdNhhkbzoWKqv3lrqjseLUqe9eJgLv2h7ny9y/L833dtfQf7F0AgsPEKZyfOts/lJXiq2KyZQ87k+9Xt37G9s9vZeICxB3SWpyvTO2TNfo3hR/yXmF9Kn/puZ9RwGn/rLQb/AL3/AKRI8K+OtqW+LPiTK5Da1Oc+nzmuJaPy12b2/wDZa9F+N1u0vxT8RyBE2rq8+5j/AL5rh7q3kVNibflT7zfxV8Xnn/I7xX/Xyf8A6UzxsdUlHMa3+OX5sypGeHdvRt38FMaQMp2bdy/eq59nuYY13ybfkqvH/rDv2/N97an3q8XlkRGUSLzkXr87LQziMN5O13k/h/u1JDbuyum/bt+41OmhRsOm7ev8TfxVyyjyyOynLmhzCQtNHGvo393+KpVuMQ+fs3M38NFrGkakPw33qGhRZEhO4q3zbv7tZSidEZe6PkuNq/uU+VU/h+akMjyN5+9fm+VPmpI4CqnyduPu/LU1tp+795sUfxbVWp5UORDHHt+d92V+6uyrml6XLfAbE+7/ABVYtbOa4m2eR975dypXXaDoMMcA/iXZ/c/iropnJXM/SfDLy5eH+FdvzfxNW5a+Gd02xJmlMfzL/vV0nh/wpDcSfaPI4+8i7P4q6Gx8IpNGNiqrt825Vq5ROL2n8pxNtotzCuzyWZv4NqfdrVTT5rfHnTbGVd3+ztrt28J39q0bpbMzNF83l/dqhfeEzGrzOkY8v7qyKzfM38NZ1I83wm0ef7Q/Vmv2nZ/JXDL8/wDdrAmv5vtR+dcbP7ldHfRzNDK8xZdz7drfw1y+pQw27fO+/d/dr6GOFhyeZxRxUuYlh1Y27Rpsk+b5vM+981bun695duNjt8r7trf3a5FZ0hU/eR/lVGarSXUxQIn3l+bctU8vjL7JrHHSjqd3/wAJQ8jb0mj3qisir/7N/tVaj8SPN5rpc79qf6v/ANmrgIb+ZFZ34M33G+9U32zYzb3xIrrsVf7tFLK+aLsVLMLwudxD4u/1aB2Zf4/Lf5m/2aY+oPIrOZoV3P8AdkbbJ/vf7Vctb6gLrbczp5bt8rq3/staulrHFL/pO0q3ypGv3lWtP7N5TL69zGwtwt7NshRQ/wB1Wb+Kpre3uVki865Vw3yvCqfw1Xt7OFfK2O2FfcjN/eratbNftQtpnbH3mbZVrL+U5/rnvF3R7GaSaP51bb8yLv2tu/2q6a10kqwhd42f73yvuVd1Q6Do/wC5aZ0jZF+42z5ttdhpGnpax+TMjNFJtbdJ96pjg77DljOWPvGHH4bTy2eSH545fkbyvvf7LVch8PzCP98jf6rduj+6rf3a6L7HDt8mT5n81fKZv4Vq4ui/M7wou/5vmaqjQ5Yk/WOY42dJHvjHMcMzgHd2qvrGk7W3w/N5i/e+8u6tHVIjB4geLptmHJOfTmpdSsYY9nkuz7tzMv8AFX6n4h0efKcnfbDx/wDSYH3XGOI9ngstfeivyicDrFnthz9yJkZX/irmbyHzIf3G4bk2MzJ/49Xe+IIf3a7EVdu5tq1xepxvCGkd2Mrff/dfdr8zp4fsfC1MRynH61b7pD+52IvyszP97+7XPX0LR7Xd9y/KrqtdZrVrumaHyd8ez59v3d1c7qEbsySG23fPt3bv/Za6Y4HljZRMI4iXxGX5EzKz72Rmb71c18Wle78NzQw7R5n3t33ttdXtuftHz7T8nzR/3a5P4sSbdPTZuiPzfNs/8drkzHC+zwrkduFrx5uU+bdc0+2maVC/3Xrm7yzSPc6bcLWlr2pPJqUqu+G3/dWol/fJsyuG/wDHa+U+LRHpR+H3inDK6qr7FVVT73+1VtmS8gYJJudv4qimtXDfvCxVvl21CsgjkRNjfL9xd1OISiQXCvHNs2KV/vVG0ybdjp8q/dapLp0kLbE/36oXk7x4kR/mqvcCN/hLMkL3C5jT7v3WqpcQtGuzHzfxtSR3jxYdDy33vmq1HIky7+rf3TWZcpcupnQsYZN/da9E8G61pvirRZtB1P8A13lbYm/u1wd1YzIvnJytO0fVbnQ75byHcpoHpI0tU0m60XUJLO5Rl2v95v4qrTQ+SwdOVZK66+jtvHGjjVYfluY12s33a5Vle1keGaRsr/C1HL9omIlrcZj2z7v92s/UI/LkLp93/aq1eSJuZ0T/AIFWfM+6TO/5auMiojA27mikVdtLTNQopD8vz4paACm7dvzZp6feFJU+6AUU3P3aU/N8maIgLRRRR8QBSMueRS0UcoBWr4PV21hNj4rKrT8J7/7VXYmSv8NSKXwnaXDItqPm/wBZu+b+7WBrEm3ds6/3fvVt3kjrA0f3VWsDyZtS1aCzhhYmSVV+WiX8xgfTf/BPOzew+I+hRykF547uVyOx8h69p/a0Rm8daYf4f7LAf2HmvXD/ALHei2lh8VNLa3UKkdnKEQptKt5Dbq739qtW/wCE+0uQu20aV90DOT5j1+xYGUf+IEYxr/oKX5UjyZ1IuvzHjmvW8ysiF1likT5F+61V/BNi9x4otJtNds+btbb/AOgtVjUo7/VNSWzs0x8+yLb8zV9Cfsm/sm+IfGmrLPbeFbq6upmV7O3t4tvzf89JP9mvxXC0Z1JmcsRClE8k/wCCkPxB8T2/7MPw8+C+qQ3CQXXiKbUovMf5f3cfl/8As1fPvhfENnDshVVjT71fUv8AwXD8Dn4Z/E/4X/DfUb9brU10O6v9S8l9yQtJIqqq/wDfNfMuixosaJD86Lt2K33q7uX3z0Kc5OlC5u2ph8vfNMzD726lWZGhEJT5v9yrvhnwzf8AiTVLXQdKhmuLm6uFjit44t25m/hWtD4lfDHxn8J/EX9j+LdK+zTb22x7lk3f8CX+KnGXKV9s9K/ZT/c6pfzQtGv+i7d0i/L/AL1e1LeXOrXDw6UnzRvteaZPkXdXjv7JOm22qXmow38MjpHbrLKv/Avlr1zxJ4qs7O4/srTYI43X7qx/eX/erycdKlGrzHl1pctWxFq1xYaAN7zfabz5l/vIv+6tcT4k1C8EM2sPeK00MX/AF/3a2vst1cyb7y8VFb5vO+9trkviJqT2+mw6Vsxudmlrx61aX2zHm5j1HwTI0n7OrSzRk7tGvSUz1GZeK+QPGnipY43s7ObcjfMzN/DX1x4PkD/syzOjdNBvgCPbzRX54fGPxzFYPL4e0yZmuZPluJP4VX+7X7v4uUJYjJOG1/1Bw/8ASaZ1YajKtPQ5n4meNn12+/s61uWe2h+Uf7VckuMcUrAt1NIq7a/LKNONOHLE92MfZx5QVccmlpGOBxQrbq1+EsWiiinHYAooopgOjj8yVUP8VDRPtZ9mQv8AFTaWOR49yJ0aswEqSGbayh32rUdFXyoCSaRGbeny062nMUoZRwKhqS3jfd/s1ApRO+8G6w81q9tM7bWT+H+Kq2qXjx5m2fd/u/erG8O6klrJvmfb/do1jXHmm+5xvpy90x5T6p/4IveOx4O/4KZ/DCXztg1a7utLn/3ZoWVf/Hq/cjXtBSx8+wSH5o5WRlm+996v53/+CfHiF/D37c/wk1p5mZrfx9p/zL/tTKv/ALNX9InjSzRdW1ATQsrfapG+Zv8AaryMdR5pG9OpyxPKfEuj2d0r+TDJCy/K275mWuM1aGGOeZHTcsfy7fK+98v3q9J8Sectu0bzMrN/F/d/2VriNUs90LTWz5Xyvn8z71eJWo8vuuJ6dHFcu55T4is5o5Hh8nasn/LST7tcLrlgm6XfDsdflVf4f96vUfFFsk0jpH5ibvvs38VcJrVrDDHNM7/OrfeauPlpS91Ht4XEcx55rFrmN8vu+8v91qobphGjzWfKqvy/xNW9qlrNZ3H7na+35fmbcrVlSWsLTJN5zLt3fK396sp0+V2jHQ9eNT3SrDG87SwzJHs2723L83/AWq3Z2/mKj9WX5vu1Fb6f9okea8RRIz/JtrWhV1s/3MOdvyvH/dq+WMpxM5S5feD+z3jkRJvu7d+6NvmVqZr8DL4K1GDdknTpwCVx1Rq19Nhdlj+Rc/3tny0zVLe1eGe2UEwtGVwR1Uiv3/wGp8mdZg/+oWp/6VA9LJKntK9Vf3H+aPiDWvH02n6HPps15t2ysGjX7qt/s14D441681a+d5psru/v16/+0xbw+H/GV3ptnbLDFM7Mir/vfNXlFv4dutQk3vbKV3/xV+e017SlE/JqkfZ1ZHJw2M11J5wh+X+9XQ6XoW2Nbl027W3V0Fv4ZsdN3b3Xev8ADWZrmtWdirJDNg7PmrYz+IS61CHTV2QhQfvba5zWNceRzBJNz/e31R1LXHumd0Rs7flaqLfN87/e/wBqlzcxUYltrhPMCI+fl+WrNv8AOo2J8/8AG1U4Y3kkCJ0rW03S5ZtrojZ/jqOWYvhC1WZhv2fL935q0tNXzm+dGVd3y1Nb6P5G15nY7vvbv4mrQ0+z2y/O6rtrQjm5dS7o9v5arvm/2katRtW8lVd/vb/vbP4qzluraGPZCm5qb/aEClt/y/8ATPd97/apSCXMdBp+nzaq2z77t8v/AAKu60HwnZ6DYia5dd/91vvf8Crz/QfElnp6rNM/zL821XrTuviJc30DW9u+8NubbJ/dqZe9rEOWUuU4X9o7Ura4voLa3m37f4l+61cNoVt8u/yd27+GtP4mX1zqGvL9p+UKn3ah0eNLdWHyk7Pu76Rr8MNSbWr57WFYYXbDJ861zV5MZmxs4/vVf1i+eaT7+Qvy1lSOm7ATbT+H3R04yEc7Dg0jDI4of7pp25NuacTRiUUUA7e3P8NSUOX5m+c4qXzFPzpubb/epiqjLn5s09f/AB2gzJI1jaNX+bP8dW7Pf5nnGNiN/wB2qm5Fj378n7vy1e0xn4Kbg3+1VcpMjf01Xl27/u/e2qtWPFGqJDpJTGG2/I2/5qk0W3O5Xf5VrG+I12rSR20fy/7NOK5SPtnJv9010fw9037VqyzOm7b93/Zrna7X4d2otbGa/k/u7aZtU+Eh8Yapu1BofO3iNdv3/u19/fFuYQfsDRTEDA8IaT1/7d6/ObVm8y/km+bDSs1foh8bWCf8E9N3UDwbpP8A7bV+t+F//Ipzz/sGl/6TM+t4VVsDj/8Ar0/ykfFF1bWGpWrNIin/AHqw9Y8BecrPbJg/wruqbTtVeT/lhs8v+9/FXT6TfR3S4MKlv4F/u1+Rcx8T/eieVtbX+l3DI4ZWWuh8NeLHtYwk3zfNt3NXT+JvC9hqlq9/bKu7+7/FXE3mi3mmt53ksqr92nL+aJftOY9F0vWrbVF8l3yPvbV/vVJqGj741ubbaP4dtcBoesXNjMvzso313Hh/xBDqGIZn+6+7d/epxl7pEy1o9vcs374fKvy7a5fx3b/ZdUhmSFsLPXZTW/kyfabZ2+Z/k21g+NtPmvlivLlG+X5mVaqMf5RS5j7/AP2JZpvin8K/CNnc2cNmNJ066066ks2/eTfeZVkrC/4J8eILPS/2gLGbXrm4g01tUmgezs9ytJuk8vb8vzLtrs/+CWqeAPF37F/jnRPDsN1N4o0nxbp9+8kny+Tp+1vOkX/0GvMvh7eal4P/AGltetvD141s1rq7T2TK3zeSzbttEY81GXIcsoylXPufxT+0T4i/4I+/HHxJ4mb9muC5sPiF4QuZ/Cr6zOIvs8kbMquy/wAWGb/gW6vzh+E8mpapoN9rGq+T9tvr+S6uo4V2xeZJI0jKv93burv/APgrh4y+PvxA+Pvhjxx8aviVqeurceGo7Tw+s4WOK1s1VW8tY1/2v4q4z4I2rzeF5Em2sv2hfl/2lX71c8aMqcbnVhKcadU3mtUaQPM+3cnzxr93/dqCbSkWQ2sXzq33tqVu/Z/Lb50+X733futVaRXt5Hm37tu7bt+63+zWlOPNM6K0uWBy81m7SGzeFfli+8vyrVRdPRV81PufxMtbt8uxdhm/1i/Myr/47UMmmpDMzpyNu3/Zrt+zZng1Jc1W8TBW1hb5ETb/ABOzfw17F+ypbJbwa6FdTmW3zt+kleX3EfyvM+7ezKqt/s16r+y3EUtNccOCr3ELKQMdnr9G8J/+S7wvpU/9NzPq+BJc/FFB/wCP/wBIkeNfGW1B+JOvYTKnVZmdf73zmuKvLFJZk2QqW+Xau2vUfidoUkvxD1qZZdobUpnw3f5jXPXHhmFmV5rZk/utXxmeR5s7xV/+fk//AEpng5jWtmVb/HL82efSWfmQvvjbc3+q/h21VuNN2sg2Ln+Na7u+8LTW43ptdPmVFase60OFgmxPvP8APuryvc+Ewpy974jlmt5lykKN8zfd/u09dPfcnyfL/F838X+7W1No77vJ87au/wCXy/uqtRLprtMYX27925f9quWUT0sPU93lM2PT32b3Xb833Wf5qmWyeNdi7S33ttaUMKRtsSFSv96nQQvGyo6Ns/vMtYyiehT3MtbF5NzQow+T7v8AtVftbF5lRIQq/wAPzfxVLAschZIYW+ZvvMlbml6Wkql7bk/d+aol/KORNoWk+Xb73+7/AHdld34Z8LmT959mjYf+PVR8M6Huj8mdGHmJt3bdzbf9mvV/BvhdDl/J8uLYqPHGnzNWlPkictT3iDQfBrraxQmFmEi7t237tdboPgd2V/MtmH8PmeV96um0PwrDZwtNclvmZdkbfw11lnpNtaqHeHEUjbNuzdtrOVb7Jj7CBwX/AAr/AMxWtjbSb5Pmikj+b5qyNc8D2y5SFN38X+7XslrpNnMrfPvVX2pt/vVQ1DwXYLC8KWzY3fN/dojLm+IUqZ836hZ/Z4wZrZtuz7q1y+uWLrG6PDgr92u81zTXmkaBIcqrs21f7tcdq1uitshTllbbuf8Ahr9F+r+6fKU63LI5Zi7XCpDMr+Z8u1v4dtPWGaFVgdGETfM+1/mb5qdcK8kjwlGYL99tn96pLeGFdltbWzbY12/vK6KeHtC8Tb23tPdHSWaKpdHZmaX7u77tOW1IzN827Z8vl/NSWilpPubhu2/K/wB2nstst2Y4XkKr8u77u2uijh7fZOeVb3CzpNnC0nnGZn8x922uh0mRFmf51+6pX5PmVa5qzW5kYfPuH8W5K6LTWmuE3oivtf5mX5WqpYOUfjJ+sc3unQ6XHtuP3zrtZflXZ/DXQ6fGjXEU32lmO3y33NWFpbIjI6P5vyfvVk+Wuo8OxldkM9tGn/AqwqUYRNIy9w6/wfZvFD9mk2na7Lu/vL/vV2GmWME0KI6SbI/9Uslcr4ZaGH+CTbJF80a/ers9Lk2wo824qu1fmf5v+BVw1I8srx+EuMoyjYuNYpNuCQxuy/xN/eqRbV5IxD0ff87LT0kT7RNG7xsn8P8ADtanIyXFul5M8jHZs3b9vy0uXsRKXKcFr8Mdt42eDAdUuYweeG4WtXWrBGaeb5S+z5VX71ZevqqeOmWNcgXMQAYdeFrb1KTzI2kgTJh+4qxfMrV+nceJf2blKf8Az4j/AOkxP0Hjdx/s/K3/ANOY/lE4TXrW2ms3RI2WSTciq3y//s1x+sXX2NUfZ80e1XjVd3zV3muWsbSOjvh/mZVZfm3VyGpWn2Jkd3Vk2/dVv++q/PqdM/Pub7RxusPDeySzO+X835l+6zVyOpRwNIz7JIy27fu/hrttUj8n5EkmL/3W/u1yuoWb3i+d9mVVZv8AWK//AI7XoYWPxKRnze+Y7w2SyG5fd/Cv+9/tVxfxesoWs4kSFg7O37xn/wBn5a9Alh3TH5GRWTbtauQ+L2nvb+G01B0V0tbqOX/gO75m3Vx51h1Uy+fL9k6cHW5cTFSPkHxBa/Y9Uld3+ZZW30+zZA29EY7vuVvfGbw9Np/iKa8hhbyZH3xf7tc3pbbmbY7f7tfmkf7x9R8RamkO3+6v8Tf3az7r+JML/vK9WbyZEU79y/7VUJpPOY/7X/j1P4ZBKMSORvNC7EVV/wBmqNwok27+Garc33VQPgf7NVmciTZsZqfL7pUSpL/rClCzTR/x4NXG035WkfpVV7dlByPu+tLmNOaMi7Y6s8kohm+ZP9yrl5psN3++T+5/crDU+WM5rqvAz2epI2n3O3zNvyM1EiJR/lM7w/rVzoOobN+6Nn+f/arY1aOz1KP7fpu3Lf8ALOsjxFov2W6KJtG3+L+9Wbbahc6fwjsv+0tTy8xXvDtQV1B38f7NU1+X71Wb65+1S+bvzVb7/tiq/ulREVtpzTw27mm+X70qrtoiULRRRTlsAUUUituo9wBaGXdiiimArM7cvSUUitvbFZgCrtpaKKACtjwWv/E0WaP7y1jK2eDW14Ng3XDzdlq47Cl8Jv6szrCzydG/u1r/AAD8IzeLviBbbE/dWrNOzM33dtc94gn/AHexP4q+0/8AgkP+xn48/aEk1nUvDGgzXO5lt4pPI/1ar8zM1T7OVT3EefiqnsaHMaf7OOnT2vxYsW8ghBDPliMf8smr1jxx8FfEXxs+K2keFvCeiXuqajc2fl2en2EO93k3sVJ9Bz1r374g/wDBO7Uf2e/DJ+J0sbyTWEEcepCdQptWkcRqBjqzFuR2ANbH7LviDU7bxFp+jeGNXbS9TXWVnXUbeH97ghFRN/8AdyHO33r9ryrDxpeB2KjP/oKT/CkeB7R1KXMjE+BP/BInQfhHq1tqX7Q2txprk0S3D+H7FvNntZGb5Vkb7u6vq3wn8PdK+Gvh2Wz8K6DJoVqsX/H1ffNcyL/vLXf+JJrDT/F194kjhuPEmrSOqXV95W35lX+Fvu1558Trrxnr0n2a9maC2k3K9naxNLLu/wBpvu1+URiqceWETJUf3vMz8bf+C3Grf2l+31p2g/aZJY9L8HWbRMzbvmkZmavCNPtXVvPf/lp/er07/gpxZu3/AAUZ8T6VePIr2OmWkT+c25t3l7v/AGavOLOHzpAj3Kr5bVwy+I+ljH91FnuH7JenfYdZ1Pxgk0aT2tk0Fv5z7WXzF/eSL/tKtQftHx22reE7DVU1KM/ZZ18pVl3yeXu27m/vbq5v4P8AxY0T4a6tNc+PLBr3SZE23Edvu3r8vysv97/dq/8AtG/Hrwx8VrrTLDwNo8lrpdnYQpPI1ksX2iRfu7V+8qrUS96pYyjz/Ea37Ndxr32e7s9NSR/MTa0kf+9/6DXrjaVZ6T5t/MitMybt0n8VeVfst3D2y6jPbOq/ul+bzf738Nek3Fwk0j793krF+93fw189mcX9ZvE82tz8w2+ZLiN7+aGN7fY3y/3v96vGPih4y87WpLCzTzdqY8yN/lWuk+JXxNext5tE0GaSG5mX/XKm5fm+WvIPGGpJ4P0ebUtbud02z7sn8TV5nL7afKRTjzep9T+D559N/YzvLuJ8yQ+FtUkRgc8gTsDX5c3d5cahcveXMrPJI2WZq/Sj4PavJr3/AAT4n1iXOZ/CGstz6ZugP0Ffmh5ntX9LeKMFDIuHu6wkF/5LTPZwEeVzXZjg27mikVdtLX46eiFFFFTzAFFFFUAUMdvWikZd1AC0UUUAFFFFTaYC/dapYztRnZ//ALKlmhRbVJg/zf3aYvzff+796jlMx6yTffBw1Cs8jHe+TTJGy3yU9Ng46VIHd/syatPof7RHgLV7WXy3t/G+lyLI3/X1HX9QvxChS41698mzVUW4Zom3/wCsr+Vv4fag2leN9G1VGw9nrNrKrbf7sytX9TnjDUEuLq3v9i7brTrWf/eZreNq48VT5uWREpcvvHBeIJt0bJIm4Rt821/u1xfihYbfda745vl+X/ZrstcuJlWXZCsvmM3yx/LtrifEXk/8toVT+4y/w15tajzaG9OpKUjgdcjma+3j5Xji/wCAtXC63bv5jvvVNy/vVZ/mVq7/AMTTIzP5KLsb5fMWuE8Qb0hXyX85mVllVv7q/wAVebOlGjK57GHqSicHqEMK+a802yJX+61ZMyzSKnyKrrLt2s/3q2dUvIYf9S+zc+5P4ttZLNE3yO7I7P8AeWKo9jzS5uY9SniPcCzhmvIUmSFQ6pul8n7tXNPtZt3k+ZJ8z7Nu3726jT7O2SHa/wAi/e3f7VXbXfb3CukzOn3vJX5f+BNSp0+WroOpU5Y8xb0+NLWzms4bnHlv8jM+5lb+6tV9XbyJJnmbAVMsfT5c1es4ZnjjTfGHm3M8aru+X+GsTxrP9g0DU7hgD5FhKxBXrtjPb8K/evAxp55mCX/QLU/9Kgexw1Uc8VWb/wCfb/NHwl+0RqFtr/xSu0d5H2vtT+L5q4q4utN0G18m5dR/tL81W/GWvPda/c6lN/rZpWavN/El9PdXDfvGC72r85hH91FH5nUl7SrKUi94m8bvdGVC/wDB8rL/ABVx11qE11Lvkm3Nt21O1reXDL5aN/3zWto/gXUr5kRLZn3fxbaqMbkxkc5BbzSfchZq19L8LXl0yfuW+Z/u7K9J8D/APVrxftNzats/i2rXZXHgfQPBOmrc6rAsUS/KrN96tOWFOXvGftPf908x0P4b3Jj86/RlT+P5f/Za1pLXQ/D9l9xd7P8AI38W2q3i74rabbzNb6JC2F+Xds+9XFy61rGsTec/X/aqJS5jT3pam5qWvQySecnWqEmvXPl/IWDfd2rTFtbWEGa/dl2/3q2vB9x4evFcw2DS7U+9J95v92p5uUj3zAW88Q3G7ZDINv8AeT71QNB4n3b/ALHIv+01eoWOraHZsAmnR7F++s1X77V/CV9CpfR1R/vfu/us1Iv3vsnktnda2v8Ax82zf3vmrVs9aeZRvfb8nybf4a7xbbwHfE+SlxEP9pN1VrzwHol5bG50y5xt/h27a0+H4SJHmfij/iaa4JvLbCr/AN9Uy6keGEH5V/vf3ttXdY2R60/k9IX2bqxNW1BGmcb2/u/dqPt+6aGbdS+ZIRvYhXqCl65NJT+I2iHBFIq44FCrtpyfeFUEgZdu6mKNq7yKk+8xSmqNvSgOYdG3zBEqS43x7k602nq7qmwfN/vUEj4W8xW7N/erS0mBJNvz7mrNt1cM+9M1saDGhmRP7396gmR1Fuk0dutyk20Km3dt3VwvinU31TVHn37gvy7q7XXdQTS9DZ/lQ7PkXdXnRdnJduu7mojzjgLCpllCJ/E1ekaXappvhuJAn+u+auA0S2+0X6J/tV3Oragn+jabHtTy0Vt1P7QVJfZMnVNK+bzpOGWvvX47Ar/wTxKqQP8AijtIA3dOttXxEslteW+9EZ9rN95K+4vjzEJP+CfkkQXg+ENKAH421fr/AIYa5Pnn/YNL/wBJmfXcKaYPHv8A6dP8pH5+6fcMW2P1+9WzZzPGyuPlb7y7XrEjhmttu9P/AB/dWrZ/M2/fX5DHY+M5jo7K++QI77tq/wBypNQ0m21CEuiK52/drHtZHXc5f/c+etWzuvLK/d+WtTLlkcnq3hu4sZj95N3zVJpd2beRE+ZStd/qGl22tWXyQru/hZa4/WNDezm2Q7i38W1Ky5UaR/lZ1tjq32jS4kd9/wAv3V/hqDWl+1W/91GrD8N332VhC+3av3K39QkS4s3dHUfxKtVGX8xnUjOR9if8EWdW1XVPiN4z+CFheeUPGHgi8giaP5WaSP5lWrf/AAhqL4p/4SqzSNJrVvKut27zWZW2t83/AAGvDf8AgnH8Vtb+EP7W3gXxVZzR25bXo7O6aSXav2eb923/AKFXv/xgk174L/tReNPh7co0ljp+tzeVDJ8q7ZG8xW3fxfe+9W1GPNKSOWp7soM5v/gplJpXiZfAPiHTLyaabT/D6xXCyS7tsjSN/wB81zH7PsLr4G+0zQ+an2j5l2fdar/7QVunijRfO8+PyVg3RQxpuZW/u0vwH002fwrs3udyPJLI7r/EvzfdauepFxid1GMvanS3Cv5P2l7bb82379ZuqMkKpsmxF95l/utVy/ZDIZodoVk+X5/mX5vu1zuuagkDSJ52359u7+Fmp0Y/akTipfylW6unVvOm2/M+3dUUl5t2pC6jb99t1UZtQRnOxNqfe+akMiNJs3/e+bdXYeJzTjL3i8sUN43ko/yt8zt/8TXrH7Nlutvp+qJGfl3w7cnJ6P1ryq1byvnTc6K235vvbdtes/s5MHstUdIiil4cZXHZ6/RfCdW47wvpU/8ATcz7DgP/AJKih6T/APSJHDePjt8Z6rstvNYapKef4fmNY91HbNmN0Ztqfe/hre8dW7r411V0kfedQkOAvBXdWfZwwz4mRN6/e2r/AHa+OzqX/C3iv+vk/wD0pnzuacqzGsv78vzZkXGn+c6pBCoP3/Mb/wAeqjqGi2zY32y7vu10clvC2XLzfu3bY0ifwtSjTXkgKPbfeXcu5/mryJS5djgj7vvHBXWgujfc3bn+ZqpSaTCq75o2PlvtTbXZalofkyJs+VG+Xy93zVkXFn5e5IYWST7v+zWFSM+Y9rB1OaJzv9nwxt58O4vv27V/vVN9heXdvRnH3dy/dWtBrG5+V3di/wB3cvy7lpiWkMaPvG5/vblfbWEonrUY+8VLOx8mTYjthf4pF/1ldH4ds7ZcJHD8v3l+T71Y9vbozZ2b1/2v9quo0CzhW4E25sKq7/8A7GsJfGbyjGO0TvfAunorLNHC29fldZE/h/2a9i8J+H7O4t4X+9u/iX71eceB9NeRVuftLLuVVRZP4f8A9qvZvBdnNJGiCCNl2ru/vK1ZyqGNSMOpsWemTQrsdPM27VSPbWxHZ7pE85/nklbzWX7rbaks1e0Vpprb7ybUVn+ZasRw3Kr+5EcZk+40kX3f71c0ZSlLmOeXu7iW9nDJ5Uz/ACIzbkj+6zNU02nzXUchWHa0P3V/iZf4atxr+5eaBIwu/Zt2/eq1ZrtZnW2kRdi/d+6y1tGpymfLynyJ4gb7Qd/3trr919qt/vVyeuQ7ZHmfl97fKtb+qahmzZEdVC/cb726uX1SREy/k8Ltby/4v96v2Gn72h8NHlUbsxZPOVtibS6/eZvvLT4bdLeF3eFvm+b79X49Nma9d/JXLbf3lW7fw55jFJ0kA/vfxVqpUoijzxMaRWg7xpFt+8v8VSw281xtR/7nzyRp8rV0LeFXmhTybbcuxV2tTZPDM0O1GhkXdu+Xf97+KtvaUvskL4jAhjeOQwpu+X7+7+GtvS5PMVZsKPmZdtRS6W9vHvlhmZ1fd9yraWr2sg85G/ePt+WorYiMoijH3/dNvTbhGjRHjZgz/wC7urp9HvvtCoj7tkf93+H/AGa5K1X7OzI6NsWXdFuf7vy1saTqCRK0Pk7W2blb+81cPtoG3LLqejeF79FuETYqy/xL/s112n6xDC0sKfM7SqssK/w15Zp+teZCJp/mf7rf7X/Aq27HxVNH87yfe+Xdu/vVy1ImkfM9IsdYtplCJCvyv87Kn8X+1VptUe8j3wbd6vt+avPbfxE/l/uXjzH/ABN/FV6PxRNI2+bayt8u6P8AhaspS5ZGsY8wa1db/GJu+OLiMnIwOAv+FbGqavDbyPI8myRv9b5b1zF7fmbUX1CQbSHDHK9Me34Vma1r3nyDe8mz73ytX6dx0k8vylt/8uI/+kxPvuOVFZdla/6cr8olnV768upGuUDJEysrybK42+1BLiaazuUb/Y+T+L/ZqbWNQuWk8yGaZArL8slULjUt0MyJDh2bdK0f8S/7NfAUZQjH3j87qcvumVqnnXDHYkizLFtRWrA1KGZyIZuGV/4k+X/9qt268l2fyvMi2/Mvz7ty1k30HlzCHy+PlZGV90daxxUI6lyozkZkMP7tvOeNhsb7v3mrA+Kdul94D1O2S5j3rZSbdvytuVfvV1Mi21ux37cK+3bt+bdXI/FrfD4H1ObfvRrVlfdU4/FQ+qzX901wtGXtoHg3hW48PfFLwmnhjxDeLDqtqvyTTf8ALRa888b/AA18Q+A9X+z39tMsbS/Kyp8rL/erN1TUNT0PVPtmmloju/hrufDX7REOoWv9k/ELR4dRSRFHmMvzKq1+XrXQ+o5eXY4aa3huoTsjZnX5vmrEmX98UC17JZ+EPhL4quWn8PeLPsDyN89ncfdVf96sbxd8CfENrGbzR4Y71FbbutZVZv8AvmtI/wB0nm5jy9mdfk343Uxt6/6n/wAe/iq9rHhfXtJm/wBM0e4Tcm5PMgZdtZu1448v97/aWl9k3NGyvIVZftNTSSaPdMUkmUfLt3Vl2UUt1OsIT5m/iq7e+DtSgXekbNtGXap5CfdGXWh2zxt9juVP+yv8VU9NuJ9M1BJgWRlbGajltdS01t7pIn91qZLcTTcO+6qkUdB4kuPMaG/R93mJ87NXPTS+YvlpWnb3MWq2gsp22sv3G/2qy5oXgkaF+GWpiOMRtMZtxzTlbPBpPL96Cx1BbbzRSMu6tAFopGOBxSI3Y/hQA6iiil8IBSv940gOORQG3c0viJiFFFFP7ZQUUUUuUAre8GnCzv8A3krBre8NxhdPd06/79TIiXwFiffcXwRIWPzr8v8AFX9En/BAPwn4Y+Cf7Iov/FulNDp/9qW76prEdvuka4m+byW/3Vr8Iv2O/hpZ/GL9o/wn4A1JV+z32vQteNJ/q1hWRWk3f7O2v6y/hl+yB8Pf2ZPh5rvgrwzfR3XhXWpk1S3025iX/R7jyVX7392to0ZVIvllaR4eYV5RlGNvdPlX/gqZ/bOtw3PiL4Pa+JPh68du+sWEhCNHe+Yqoyq3zOpyD7YrkP8Agn1pXwg8P/DXU/iv4ssvtPiG18RtZacjjKRRG3jYOR/vFh+FbH/BSH9nTxPoHgu3+NngnXC2hy3kdr4m0uSTAgbpCyL/ABDeVGa8z/Yk1zTdS07xF8MdRvBDJfRi8sWmP7vzIwAy8c7ipxx6V+3ZTTlS8EcTF+9/tK/KmcMnCSemh9I+IPjRpWtal9m+2bYmlXbHap8rL/FWdrXiywWN7bTblrcN/Dt3btrfdavMPDem6rDfPDDZ/Nv2r5afL/vLXVX0KaHbpeX9z5T7t7rcPt2qtfjtSUojp8t4n4c/8FAdem8Uf8FG/ijql5eee0eqLb7l/h2xqu2uS0mNJtqP91V3f8Cpf2g9cTxR+2D8UfGEMqyJceLbpVkj+7tVtq7abp7fKsycNs+bbXHGPMe7P4IxJbxQzCH5T8/3v4ahVv8Als6fN91F+7t/2qmWZG/c/wB5vnZqbNJ8u9+F3feo+IiX7uPunrP7Pa2dvDfb3Xe0S/NJLt210/izxQ8Nq8Ns+F/5ayK/3q4b4Ptcm1uYbBJHdlXbHt3bm3fdrV8ZRppNxJ/bzrF5abvLavnM0lP255mIj+8uc9r2pDTPM8Q63c+c6xfLCv8Ad/h3V85fHH4g3HiHU/sK3LMN26RWfdt/2a9C+LPj2eLTptZu3URp+6t41/5af8Br5+vLuS+upLub78jbmrXL8NeXtJnpYGjGUeeR+jn7Pf8AyjcT/sS9Y/8AQrqvzfr9IP2e/wDlG4n/AGJesf8AoV1X5vM2OBX9AeK//IlyD/sFj/6TA1wnx1PUFXHJpaKK/GYncAbdzRSKu2lqgCiiigBd7etJRSK26l8QDtqfwUBDI3FJVzSdPm1KbyYev3qcfemTKXKV44HZtv8AFW/4B+Fnjz4neKLPwV8PfDGoazq99LttdP021aWWRv8AZVaTTdBm/taKw3LukdfmZa+gvGvwY+Ov7K37PfgT9prwhqv9jxfEfU9QsdE1DTbpo76NbXasrLt+ZVbd96qqcsYnP7SUp8sT518XeENb8G376Tr1s8U0MrRSq38MittZf95ay12eXX1r4f8AB8fif/gln8Q/iJ8U9VjRNF+IOl2fw886BWnvL2bzGvVWT7zKse1m+981fJLMitv/AIa54y5om0WMJR23gYFSLsZl2feqJR82PSpoWh3/ACfw1XoVLcuaRO8V9HdJw8Msbr/vLItf1J3WoXN54Z0G8vPmkbw9p7eWqfeVrWOv5bLLZK6Fj96WP/0Ja/p1vtUhs/B+g2fzPNJ4X03/AFiNtWNbWP8AirnxHwnLiPhMvxFcQq2yZ2i2/daP5fmrh/El0kau6Iyy/d3NW1rmsPwnkq7K25tvzba4/XNQdVDzzeYvzb2Zf71cfs+b3hwlKxz2uXiR3Dvc2yqV+VW3/drhfEl6itMls80fmbf3n3q6fxNdTQ/JPtRdn+sb5vvVxGuTeTcSI75+60W37qtXn4iPvHqYepynL6nMkkjujqX8/dtZPurVBpo1ZspIRuVpfk+WNf8AZq5r0z/8sU3/AD7tyfLuZqzWjS1i33O4rGu51WX7rf8As1c3+I9OnK8NC3Zq/R5tm35Ukkfc22tbT981vG8yNmZPn/uyL/vViWsxbYo24k+bzFXa3/Aq0LO4eP7/AJ0asnyLHtZW+atI046GntDa01bmDHk7V8tPvL/Cv92uc+JtwsfgbX7qTcQuj3LNnqcQtmtq1vn+ztDDuMi7l3N/FWF8Unx8P/ELhw+NFuuSMZ/ctX7d4FK+fZhL/qFqf+lQPoOGZ82Jr/8AXuX5o/NbXJPtV000MrP/ALX96qFv4Z/tKYOerPUWo6g8Ko8f+7Wn4R8XabZyD7Ym4b/utX55HY/NJc8jqvAfwRTWJUfyfkVvmZk27q9o8I/CHwlo8K3OpvH95UX59rL/ALVeZ2fxkttJhVLDy/LX7u371Ynir47alMsnk3O7cn9+n7b3fciZ+znKXMep/Fr4z+Ffhvo7Q6OYzeeQyoflr5X8dfFTxL4vvnmv7xvL3fKu6meItW1XxVqD3M0zPtqnH4ZEgDv93+7Sl70eZm1OMaf2ShDHJcSb/mLr92txVSxs0mZ+P/QWqCOwS0XZs+Zf4f4aiuPtl9hE+7911o5UHmZ+qatcalfbPm2b/lrpPD95HptqMuu7726s6z0NLdvOmT5vuqtalvZ2e1Ud8/7Lfdo5Qv71iyL68vmZPLZVb7/+1XQaPoNy1qjujKPvJuqv4T0+zvJm+zW3mOrLsVfurV3xdDDdXhhudVm+4qy28Lbdv+zVe4Tzc2xauJPD2jx77/VbdJo33eWr1k+KPihpVjp62Hhvc1xIn71mT5Vb/ZrmfGngV47FdV0rzGRV+dWfcy1y+n71Vkd2Wsyo/wAxozXW23d0fczfM7f7VYV1I8sxd6uX1xt+R0Zd1ZxJ3ke9BrGIU35F96dRV8qNAooopgLsb0oXofpQzbqFO00pbEcrHwru+d3qSNU8yo1Z4/4Ny06ESbt2zmlykyLNvHMz/J/3z/erotFj2r+8TKr/ALFY2mw7bhXTmuo09U0+3e6/hVPvURlymcpcxiePtQSaOKwWHb/Furmqt65qE2oalJNI+4L8qVUqjePuxNvwJatNrKOEyV+b5a1vEEgm1R5tmxl/h/u1V8DLHb29xdhPnVfk/wB6rVwu6NpnfcW+/wD3acY85lKXvD9Lvvs67H6feWvvv40xpcfsFtGM7W8I6X+X+j1+e+51jHzbP9pq/Qj4vlR+wYpbOP8AhEdKz/5L1+u+GC5cpzz/ALBpf+kzPsOFHfCY/wD69P8AKR8A3Fj8+90/75ptrN5Eyo/T/arY8nerSPu/4DVKSxQSB0+dl+8tfkEf5j4otW6pcSM8LqP7+6pBdTRtvd9rbqr2cscVxsdKuXEaXH93a38W37tMDb0HWiu37y/wtu/iq/faemqQsj/e+98v3a5O3kmt1Z4U3bfuLu+9XTaHfboxvTKf+zUS5ufUzMC40/7DdeSU+Xd8yr96tBmM1mYfJX5k/u/NWrqVjDeKzoiod21f71ZM0b2Hzypz/tUGhZ+H/iq58L+LrHWLP/j5sb+G5t/96Nlav1A/aW+GOifGz9o3RfiQibLTxp4FsdSguPtSqnneTtZW/wB1lr8mtUvIIboO6fumb52av0V+C3ijXvif+yL8LvGdheNLd+Edem0S4kmb/ljt3Rrt/u104KX+0rzOfFQ/dXPPrXS5pJrzwvMkcvlyyReZs+9tb7y11U3h2bwj4dtIfsawwtFu8mT+Jq2bzw/C3iK/s5ns0v5rprhFVWXcrfwrW58VNah8Xfs86Df23hVrKXwveTWWrXHm/wDH00jfKzf7q1eMp8tWRWHqc1L4jyDXtas7VPMm+VlTdtX+KuL1jWvLmdJI96f3m/vVc17VJo2m877zPtiXfu2r/erkLy+/0gb+f95qwp+8RXqSjHQtnUN27zivzfeq7Z3X74RhGVfu7q5xtQRZH3+W6N/eSr9jceWuwv8Ae/u1t/hOCUTqNPvkkjaEcf7X96vZ/wBm+4e5sdUkZifmhxk57PXhFnfJ8kPyv83zLXuH7MVx9ostXbYikPACEOez1+i+FH/Je4X0qf8ApuZ9XwGv+MooPyn/AOkSOO8fiCPx3qcpMrML+bA37UDbj1rNjm8uZUR9hZWXcv3dtWfH95s8f6y7xnZHfy7gejfMaz4rp45P3zq5V9yK3ytXx+ef8jrE/wDXyf8A6Uz5/NZR/tKt/jl+bL8mF+S1dnRUXdu/vVb3JH88cLNui2/N95azYblJI96Jlo9zL/DtqeO4uUtV3v8A7XzfeVf7teZynncvN8I3VFSONnR+Nu35qx7pkht22W2/5d26N/u7q0by4RbeVHRsfMzeZ/7LWPcXD+UgR4y33dzfxVz1OY9LBy/eFS8kEjbEtVRvupVWOP8A0jM0LP8ANtSprpoZJld/n2/LuX5d1VppN0nkvt+/uZV/iWuOUox90+gp7D4ZHmZ0R2yr7dtbfhqR1m8m5dVVfuKz/ern1aCaTYny7U3Ve0nUHtXV32ttf+5u+Wsai933Tfm5T2nwPdPHAr3k0bOrr93+Jf8Aar2/4f3jtDC8zq7r8/mL826vnHwVrSRn7Mkyq0f8Tfxbq9i8BeIEt5beF5mKSfN8rfKq1zy55aky+E9f028e6uPs1zbbh977QqVttbzsn+pZyvzRM3/s1cf4d1bTbjY8Ny25ZdiNv/zuro9J1KFWCPbbX81t0jfLurGXN7r5Ti900bLZuPnSf7/+zSRxyQzM6TZH3Io/4abcXDzQqtt8v3vmqhqWtvaw+S80bSbd237q1p7SO4HxTqV86M1sjrtZ/nZv4aRbd764M0zttjXdKy7W3NWGNSubhfkuVKs/3f4maug8O28ilLl4fuvufy28xVav1WOK90+Sp4X3bmro+j7mxsV12r8rV0mn+HY1VLl0j3Kv3aZo9kk8yuvPlp88kny7q6fTbHbGHe2Up/z0/wDHq5a2OlKOkjslhfhM5fCdtcbHjtmCR/N/wKluPBaLl7k7dr/LG33lZv7tdpo+nzTQiZ9zCRdyN93/AMdom0+2jLQgqo+VFb733fvbq4pZhKMr8xjLCRied3mhwxxpMSxM25Pm+9D/AL1Zl7pKNcD7HujVvusz7v4a7/UdLdpGtofMCbdysy/K26sTUtL+x5eG2xTWYe03kTHCyhscsrw29wiOiuF/vfeZqI7rb5jv5ip/spVu6s5rWSbY/wA80u5Gb+7WdeTW0kB8n7irtddv3qUsV7xp9T7l6x1p9Pxv3b/uvuf+GrNn4mS3kfZNuH91q5K4uUt7dbVNu2P+6jfL/u1A2tOsaRP8m75dypWksdzbGUML3O9bxck0buHXau35aY3i6CzYhNzfPtZt/wDFXANrnlRs+9l2p8jNWbJ4s2xrc75Nky7mVqr65zlLDyie76ffGbwob6J2Yi3kKljzkZ/wrh7zxJbTMZkmZjJ/ra2/Cupi4+Cp1MfNjTLlsDvt8zj9K8lsfEFzcQh5J4wzL91v4q/T/EGtGGUZQn1oR/8ASYH3nGtKTwOW8vSivyidsupQzM0z3Mm1V+b5vvLUc+o/MPnZ/MTbtV/u1y1jrDqu+2mZvM3Iysn/AKDWjb3KbldPLYb1X5flZm2/xV+XSxnLHlR8LHDXleZo7nkjTzvM/uszfN5dI2+RmTezbU+X5f4qhs2hk23KTf3kb5/lqyykSI/y7Y4tu5fu1nUxnKdEcPKRUuI4XbeifL8v3m3fNXAfHqNP+FT63M6b/LspG+VNteiX0fmRpBD8zMjfvF+7XFfGyz+0fCnXYU3FI9NkeVW/i2/3ayxGM9pQ5eY2o4WMalz4w1aPzrFfmZtyKyKy/wCzXLXEL2txvT+9Xa60v2PTYpvMbay/Lu/3a4bULjzpSf4t1fN+6elH4gh1SaF1dZm+Wul8P/EfX9PmR4dSk3L9xt1cgqFm21a0+3dm2PuA/vVJXLA9Qs/i54rmiENzqTTRbG/d3C+Zt/76ouPE2j6gz/2xoNjcq0XybYtv/oNcJ500asiPytWre6f5WTnd/d/hqoy5ZGUtjfk0nwNffvrbRJrfam793P8Adq7DcW0lm1rDuddu35k+asS1mdx9zarfM22tPS5nWffPM2F/urVxFKX8pDrnhW8uoYtiK0ez7rL95q4/VvBmq6fKdttJtX+7822vWtWW21zRdnzI+35GV9rLXneral4k8OzSWs037v8AvL/F/vVPwjjKfMcoVmt2+fhlp9xdeeNkyfMvetr/AISawuTs1LSo2/2l+9WdrTWAYNbJyy/w/wANHNE2+IoUUUVBY3y/elVdtLRVfCAUUUU47AFFFFMAooooAKKKKACiiilyoBVXd3re0yaa30kum3bWCpw3z/NXQWUciCNIduNq8GiUTGqfRf8AwTv0+bTfFmp+PEhX7THZ/ZbC4b70MjNuZv8Avla/qG+HHxph+OP7O/hX4j6HrCy22reFbeK4h8r5o5o41jk/8er+cfwL8Nb/APZ30/w34J1V9uoXmkQ6tfwtFtaH7Qu5V/7521+y/wDwQ/8AilD4/wDg34v+Dt5qqyXHhe6t9RtbVvm22s3+sZW/3v4a6MPLlmfPYqpKpLQ7j/go28nhv9ljU/D1/JiTUUspoiW4l2XcQOPzrwD9gXwFD4o0W41eTTVmNrrEqh40BlUmCIjGe3Wvd/8AgrzbXD/CXSbm2gZreCURNJt4UNIjD9RXhP7Hvxu8DfAH9nTxJ498XeLo9NlTX5FsbdU3T3cgtosJEP73Nft2EqRpeCmKl/1Er8qZzU4ylTsdvq3ijwB4CttS1jXtehhms7qRZVa4VfJ+b5VZa+H/ANrD9rzxh8bLq58N+Cb1rXQt7farj7slx8rf6v8AurXP/Gb4yXPxW8V3usanbSWNrfXX2r7HJ96Rt3ytI396vKvHnjaw0vRZ/I2ysyyNtVfm+VW+Zq/nrG4+cp+69DelGV+U+MfCljNeeINYuXDFW1eb+P73zV2Nu3kr5PzfL/DXG/DeN76zmmmfa81/I+5v9pmrs/JTcmE3Oq/Jz8q/71d9L4D2X1HzQvJDv2LiT5fmqNti/I8KuPu/7tTXB3Q/J/wLbVJo9u/zkZlb7u2r+ID1/wCAPijw34K8N+IfEPiGGOW4j+zrpas3zeZu3NtrjPiV4yvPF2sXPiHUrny0+Z3j3fKq1Q0CRDYsj7di/wB3/wBmrzD4+/Eh7iY+EtKmUL/y9NH/AOg14lajKti7HPHD+2q/3TjPiR40fxZq7JbP/osPyxD+9/tVzZOeTRSAY716tOMacOWJ60YxhHlifpD+z3/yjcT/ALEvWP8A0K6r83y23mv0g/Z7/wCUbif9iXrH/oV1X5vMu6v2nxY/5EuQf9gsf/SYHHhPjqeotFFFfjHMdwUUUnz+1UAtFFFLlQBQG3c0qfeFG1FUbPvUogNUYGK0PD1++n3n2mPrt21QrY8EeHb3xN4httD02NXuLqVYoFZto3M22nzcvvEVPehY1ZtcvdS1RLmaZs7l+996vtD4Z2HwK8dfC/4b6r+1pD48ufBPgV7hvsvh3UVb/RZJPMlhjWT7rSN/EtfNmp/BLTvh/wDFr/hWvjT4neH4bu3ljWe80+6+1wRyN823cv3v7rV6N+1Z8U/Fev8AhDSv2fPDXhHQ4LrS7VZLy68P3O77Rb/dVdv97+Ks69eMrQW7OWitbs9P/wCCnmp/szftAfBHRfj3+zx8SNB8F+EdEvF0nwH8DrOXzbqys/8AlpdXLI3/AB9SN+8Zm/2V3V8CS793FO1CyvNNvHsL+2aGaN9ssci7WVqazJxvpxjKJ2jWG2ShndpN6CkY5bipLdfmOf4aCPhNfwjp/wDaXiTTdNT5TdX9vF/31Iq1/S94nWG1tbXTXEzJDpFnEsf8Py28a1/Od+zb4dufE3x58EeHkh859Q8X6bEkK/e/4+F+7X9E3jq++x+ILy237mWVkRm+b5V+Va5cQ/hic1bocpdXiTXDQvbMnl/3m2rXO6o00bMifLu3N81buoNJI0r3KRqu3ayr8zVg603mLvfcSy/JJ91q5pe7EuPxnH+JLf7VbhHKoq7Wfd821q4bxEsy70tnb/b3fdr0XXFhXf5wjCRovm7vvf8AAq4HxEtz572yJ5nztv8A7rLXJU5pyO+jE4fVJHtWRJkZPM+dvkqnJdTHfJs5V/mX71XtYbdutoUkdNzfKvzf8B+asuFZtyzQo27ftlVvvVzyjM7oyLFvNNG3mujS+Z/D/dqzZ3ELRs7pJsWXbuX/ANBqpH5PlrcvbSRyebtTbT4Vh+a2Tdne0rx/3m/2amP940lHmiWlv3jOxJpHZX+Vf7u6ofHTG4+G2uG4JGdHuw5PGP3bg1VWSRZn2IzBmVW3fwr/AL1XdYhF78P9Qt5WGJdNuEYjnqrCv23wLnzZ5mH/AGC1P/SoH0fCitiq/wD17l+aPyq1PVHabf5e3/ZqlDdvDumR9n8NX/E2jvZ39zZzSNuhuGX5vl/iqlPapCvzphW+ZK/OYfAfnsiRdYmjX55mqpJrE03Dvn/ZaqcxfCp33fJTJB8+08/3v9mr5fcJujXtdchjVUfcP9qtSPxFprQ/fX73yf7Vcltdm2fdX+Gnx71XZ/7JU/ZGdO2oWDbd6LuX+JaZJqkLLsRFT+JdtYVv8o+/81Wo28ydXd2qpS7GZPNqTIu/fuNVbrULmTdJvbC/d21JJH5iu7v8u/atWLXTrZnH2x9gpf3So7EfhfxlrGg3QubUZVfvV1+m+PPD0twXm0eTfI/zNI9VdH0bw3Lb/JD+9X7/AM/3qmuNN0f7Tss4W/22kquX+UOZHq3hTQvCvjDw+88KNE3lMssbbf8AO6vD/iR4TfwbrUkKcozfumr174WtNpuj3Ezw7UX+H+9XL/GzS5Nf0ptZCZ8v7jLTl7xl76nc8Wu5nuJi70z+H/2aiT74/wB+koOvoFFFFBoAXbxRRSbflxQA5V+b79G35s7KSljUSHZQTzMlLbgP9mpI49zLzhqjVDJIz/3as2qozLv6fx0+Yk1tFtst52xSqt8tafiS8Sx0V0L/ADSf7dQ6XZou35N/8VZPjS+Sa6FjC/yx/eWp5kZKPNMw6WNcsOPlpKn022+1XaJ/tUcyOk63QbEW+i7OvmfNuWmSQ7/uJvq1YzIzLZu/lIvy/wD2VOmhfcyo+F/gb+9Sic0tinNbv9kVH2/M/wDFX318aW8n9gPKk8eENKA597evgib/AJ47/u/xV96/HBHf/gn+yIdpPhDSucdObev1/wAMFfKc8X/UNL/0mZ9lwpf6nj7/APPp/lI+HLeTzIQ/3m/36WSNPMTyf9ZWbYXzw/u3f5v462I7q2k2I8K/c3J8lfkX2D4rnKiw+XP52cLvq3C0O4/40k0fytsTKstNsVeORpN//jtKOw/8JLGu1fubd1WdLvvsrbPO2DfuqJ4U4m3sN1ElqjbRsyy/N81HxQFzWOjW4S6t/wBy6qV/iasrVofs8jfP5u75qsaPcRtGE+4zfeWjVoy3zp97ZVxIt0OQ8SMWtpfn+Zv4Wr7P/wCCYuvf8LC+CvxF+DP9pNHqENhHrehqv/PSH/Wbf+A18Ya03mQzpNDtb+CvWv8Agmd8crP4NftUeGdT151OnX1xJpuqRyPtT7PMu3c3+yrVVKUoz5hVqfNStE+spPGniG61jQbnxVC1tHDE2y4ktfmm/ut/u16Notnbap8MPG3gPVdS+2Pq1m15YKz+WsMy/N5i/wC18tfoqP2Qv2b/ANuf9lfQrDTbLT9M8V+H7C4t7DWLS32RfL93d/vfLXwNpnwr8efsu/GSz8JfE62jtP8AiZfZ7ea4bcskf3Wk+avpcdg41sNGvT/7eifP4bEVKdX2M9+h8R+JNWdbrfv3qybfm+XdXOXmpJCwR3+993/Zrv8A9srwqnwx/aQ8U+CbaRnt7e/+0WDbFXdbyfNG22vIpdUhLDfuY/3a+e+E9SN5Q5TVk1KGPdDhnVv4lqzY6gmVdN29v4d1c1JqTtJ+5dfvfKtS2usPDI7+c33/AO792lGRUoSXuo72HUodzlLZl/uRrXvf7JtytzZ646k/6y34PbiSvlmx14xxoiTN8zfw/er6S/YmuRdab4iffuPn22T+ElfpHhN/yXmF9Kn/AKbmfU8DQtxLRf8Ai/8ASJHNfEeVf+Fga3G0zZ/tCYnaM/LvPy1kx3dvcKEdPmbavmfxbqg+KGqiH4na+EkUeXq04I2/7ZrNXU0m3+d8oj+Z1/hr4vPdM7xX/Xyf/pTPnM0p/wDCnW/xy/NnSR3iKoRHjba6ttqU6g8cm/zt7K/3a5y3voVZE8xVWP7q7P8Ax6rJ1RGtzPMiqm35GZtrf7teZzTjE4o0/dNDUL7zl865mX5V+633q5261RBMYflG1t21VqtqGsfu8fKr/d+b+H/gVY9xrDtcb0mXc3y7qzlI6cHHlnqbDaluUoibE3fxfxUf2jD80r7drOqoqr826seK68yRZN+4/e+WnySbVaPzP9Z8ytv+7XJL4j3qfMbFxcJHGv8ApOwf3l/9BpY754pPk+Td/CrVmNdPbqmxFbbtV1+9uqbdMzbEkjwu5nbZUcvKbSkdv4V137O3lvcq4avTPBfipIbUQzXPz7PlWT723dXg+l6s8cImh+Tb83/Aq6jSfEyKqvc7V+X/AFi/N81RKP2omVSUeU+nfC/irav2bzsoyf6tvuq38NddpviosuLm5aQRtuSNW/h/ir5v0Px15P7uabarL8rbvmrstL8b/MiQzfd+dZG/iqJR7nJKXvHtf/CaPDZmCFFkT70C79rN/s1j6t4yd45X8tdjff8A4v4fu159N44eRhsust/e37VrD1b4geXE01zMsTN95fNaspU+aIuaETyPR9Q8ySPekar/ABx7vlWu18MyRxxkpHGi7trQx/Lu/wBpa800Wazjjaaab93v2/L95t1eheF2+zLE+/7vy7pP4v8Aer6ytiuX4TCnh4/Cdz4d+SFYRGqbk+633q67QZEwyOnlt92KPZXFaXHDPIJi6zbfvyb/AOH+7XTaXctDGk118qK25Pm3Nt/hrjlipS1NpUYna6PNMzf6TN/d2SbdtXZrVGzcwhdk27zV3/8Aj1Zmi3Ft9k3wTNM0fzOv8NaUdul5tSY4LJu8tfurXFUxQ40jP1SzdVR9m8wozbVfcrVgavbPJ9y2kVpNvy/7Ndmqp5aRu/735l8lkrH1izdfNtoZt25/3Uf/ANlXN9ejGRUcLzS0PPPE0aSs1zbbU/e7Ny/NXL6jNNtaBPlP97ZXc65ZwwK8fnKzb2b7u5d1crqFrB8s0Ls0rfK3y/K27/aqvr0u4SwvvfCcdq19DF/y7KzxptaT5l/4FXMzaw7SKn2lsruV2+6tdN4ktUjt2hd2UL8u1v4q8/1z/Rrp0+Zn+9tZ/lX5a6qOM9oc9ShKJZuvEU1uqO7/AL1W2/L/AOhVj6lrl3H+8eZm+f7tUZtcmt7j5EX7nz7qydb1qGWNnfqtdUcR/KYSo8p9V/Da43/s1rcMTj+x708+zS14fod9H5aO+7Z95Fkr2P4S3Ak/ZPjudxIOg3zZ/wCBTV4P4T1Kfy/J87Lt9xWT7tfrfiXU5cpyT/sHj/6TA+04shfB5d/16X5ROys5kjZHv3VU/vL/AA7q19Pb7Psh/wBY33l8z+7WDp80/wBoieaZXXZufalb1hdeS3nO/P8Ad2fxV+PyrcsviPkYx/mNjS/JUqn3nk+9D/s/3qstskZn2KVb7y/w/wCzVOxWa4Znfa0W3d8v8NXo2RYWm/5d1/u/e3f7tc0sZL7RtTpkcnkRxb0h2S/ebbu2r/s1x/xRhhuPhvroebLNpsibv7rN/s/3a7ia4eOzZ0hYfL/FXD/FRobL4Y69fpCskq2DN/d8tdy/NWf1qUjWVHlPjH4qX0Nnb29hCjfKvzf71cAzfx1t+PdZfV9ZeZHyKy9O0+a+uRFGjGrjEmPuxuFjb+fKBt71vW+nvDb8HczVs6D4Ff7LvkTa7VDr7Q6XCyIMndtq+XlM+aMjG/fecyO6k7/u1oaTapu+eFqw7jVEZjsRvlpIvE15byB0fil8Ivfkd5Z6I8n+p2hPvbv4v92pI9Njt1i33Knb8zs38NcpY/EK/DeTO+1W++y1c1rTLnV4vPsdaDI23YgpylzClHlOuF5YSL9j/tKEj/rrUV9pMOuWv2aYK8ez5JF+avO5vDWuxMXhDOF/iV6m0xPHdsv+hw3RVfm2/wANTHnKjGO5V8S6DcaLqLwn7u75aypGfOxq3Nb1nUrgbNVsSHVf+WifxVhzO8jbmpGsRaKRWzwaWr+Iob9/2xSMu2nKu2hl3VAC0UUVcZAFFIrZ4NLRHYApeVNN53b91LUAAbdzRSKu2lrQAooooAfHHukXf0avW/2VvA2m/EX46eFvBmq7fsE2qRy37M/3YY28xv8A0GvKLH95IN/G2vev2WfDepWV5P4wsJZEmX91ayMn3W/i2/8AAamXu+8ceKkoRPrL9rbWP+Em+M0/iq2mV7aZVit2V/uwrtVV/wCA7a+xf+Df74jWfhL9q7WvDd5qvlL4i8HzQMsn7xZGjbctfn7pdrc3V5/aXiR8xwp92RPvN/er3f8AYB/aAh/Z7+Plt8WraFbiHSbK4/0eR9qzNJHtVaxp1LS55HiOXOfqr/wU9+I3grW/2cZ9GbWoY9Qk1G1Sytj9+4dHBcD2VQx/Cvys+Lut61Dd2elWDEpHG00fmyfJFIx2Fwvc7Rg+wFbni74q/E34/fHKX4n/ABL8fxXGZ5hpPh6zi2W1jAUIAX+8395q4r47WllN4itZ7y8nVRp5QxxvgYLNz9a/YqWJ9t4C4yf/AFFJfhSHFezkctdWaPdP/aXiFX3Kqsqy/Kv+7WJ461DwloHgvVHSaNy1rM26NNzM3l1Pb6f4es1KfY1Xau5dz7ty1zPxq8QWui/DPVb+zhVCulzRbWX5fmXbX8+r95Vi0VR96rFHzp8M7d4/DsE3k8ybm3N/vV1Mm+HOxPl+9838Vc/4H/0HwzZwpD/rIl2bWraWZ5Y/33yn+61fUwjoerU+ImjjS3jaZJmK7F2rVSS48kF5Plb723+GoLrXLaPMP2nZtfbtasa+8RIvyQ7Sn96l9rQXLGRqeIviU/g3wfdJbJH9ouG/dTfxL/u14TeXc1/dSXly7O8jbnZq6D4iatLfXsMH2reiqx2BuFaubqY04xlKR2UafLEKKKKo0P0g/Z7/AOUbif8AYl6x/wChXVfm/X6Qfs9/8o3E/wCxL1j/ANCuq/N+v2XxY/5EuQf9gsf/AEmBw4T46nqFFFFfix3CMMjiloorQBFXbS0UFd3FT9oAooZfmye1FPlQCt+7au1+BWr6bo/jb7Zf7d/2C4W1Zv8AlnN5fytXE0b3Vg6Nyv8AdqZRJ5UbUWnzLdPc3k2597M0m/7zf3quW8F42rrq763J5i7dszP8/wD31WENVuQhTPBqM31y38bCr9zlMOSrzXub3xDvbLVtcTUbYbpZrZWum37t0n96sBd67t9G4NIzvTJF+bmp5TeO4L8nCPU0bKmE6bv71RbTu+SrEMM0mfumpJaufS3/AASn8K/8Jh+338LdKmto5IYfEa3T+Z/0xjaT/wBlr9w/Fd1CLyWZ33edcMySL/eavyN/4IS+CpNY/besfEkyRvD4b8L6hes0n/LNmj8uP/gXzV+sd9Ntt/kfeJPvKz/xf71ediJe8Yv3nymRdN+7d/m+V/7n3qyNSie4ZY3s22sv975lrS1BrZbqSGO8Zwvzbf8A2VarXiw3UaTfbP3f/LKNflaSubm6s6oR945XVrVxG6PbZf8A5a7n+Vv96uS1LSUhYiZ23yP8vlv81d/qcJaRke2XLbvNb+Jq5i8sZppJSzyJMqfJtiX5f9ms5e9E6qcTzTVNHcRo6Iqtt2urL92sSTT3jZ7Peqn7zbl/75r0DUrFBueZY5TMrK23+GsO6tLZQ1s88hRtvzNV/YOnlOVW3uo4/J2fMsv72T+FarFZlZJ98ed27zFb5q3NUtUWHfbQt838O/5WrNuP3yt9p3IJNv3fm2tXLKjKUtS170feM2SZJVdEdkZpd25futt/hrYMYk8HTwncN1nKPlPPRunvWTJHtaVBMuyN9y/P/wCPVtGQjwxNLvPFtIQynkAA4r9p8DoyWf5jf/oFqf8ApUD6fhWFsTWl/wBO5fmj8z/jp4bfwv8AEi/heFkt7iff+8ri9auNtqnloo2/w19V/tUfClPFnhd9dsH3zwt8y+V8zf8AAq+SNehubeQWdyjK8bbZa/MsNW54nwWIozo1PeKD75GX5/4ql8t2XYNtQK22T/Z/u1Pbx/effk/3a7Obmkc/2B8caeZ845+7UbMiyN89SeZ5cZb+Gq/yeYetMB5zHJvdKuWsbzN8ifMv391Vlj/eM7u2Nn/j1aFnC8O19jb/ALz7amMeYiUSXb9nVt6bvk3VWvbx5JlRH+T/AGaTUL6ZZGh34Zvvr/dqpG0zNwn+/RER0Ol3lyq70euo8P6XNfTRo8n3tv8AHXJaLE8zhN+0f7Ner/DnR4fJ8z7Kqbfli3J/49T5eUcubkN1NN8nSbews9rH/lrtqa+8DzSaTNvhVk8r/d+b/ZrrvB/hWHb9qmddy/N8qrtatPxBbJcM9sj7Pl/u/LWspGEZfzHxF4r0z+yNeutN+b93K33qz67j4/aGmj+PJtnzCRPmb/arh6zO+n8IUUitupaUdiwpfmWkpFbIpgKW280/bvxsprIFApwY5L0uZAEP3q09LhSZvnXH8Py1ST7wrc0O3TztjvtpmMjXhaGztn+fbtT564nUbp7y8eY/xPXS+Lrp7XTvJR23M38X92uTUEDBoCnHqxa1vDlm6yNN/Eq7k3VmW6+dNsrqtN0/ybXydm5/vfNVfEVU5ugkbfZ2+fcf4q1Ix9ot/kRhu+b5qz5V27UgjbG/73/stW7G8Ty9nzGp5eU5/iJLq12oHTlv92vu343K3/DAzIDtP/CJaUPpzb18NSRo0P3GX/gVfc/xtLL+wW23kjwlpf8AO3r9d8L/APkTZ5/2DS/9JmfacKf7nj/+vT/KR8BTxvCyom1j/eqxYXE02Wf5T92msJvM/efKrUscZjbfvr8kifHmowk3B4fvfdTdQJplk2b1H+z/AA1Bp9w6N5fysy/xM1W1jSTLvH83+zR/hM/cFt7hJJFhcM396rEcyXEOzO35dvy/e21Vh+WPdv2r/eqZW24dEZ1/j/hpa/ET8USW2vvst4IUT/crZuv32ns/3dq/erHWRJFXCfd/iq3HdTSW/k/xL99f71WP4fhOX1uHbcOkLt937zVzeg6hJo+vLdrMyvG+4bf7y/MtdR4i3qx/2v8Ax2uEupdt57K3zNWZpT94/pW/4Ik/tBQ+NP2c4YfOZ7q4sFVoZvlVZNu2voT9rD9mbw9+1d8HY9N8SaD9j1zT1ZdD1KHbvaRfuqzV+Q//AAQp+P1/pug6n4S1LW/Ljjuo2TddfMq7fl2x1+3n7Hvjf/hZel6n4Ytk+3JZ3SvdNJ96Hcvy/wDAa9rDY2q7QlL3Tx62Ejyua+I/Bb/gsB4BuvCPi7wN42udNjtru60OTSNZXdul+0W7fK0n+8tfFz6om77mx2r9qP8Ag5A+Adgfg/qHi3QdKkF7perR39vNDb7vLj+7Krf/ABVfh7PeeZJvd2wq/IzferjxFOVKVgwdT2t2/iNBtQT75TD0+TVv3Y3sy/32WsRrzaF3v8rURX/y7C6tXLKXKd/L7x0On+IBGyfI2zd8rV9Wf8E+9QW/0rxQMcx3FoC2c5+WWvi+O8eP597I33v9lq+uv+CaV19q0fxa2zG2eyGfX5Zq/SPCX/kvcL6VP/Tcz6rgqMVxFRt/e/8ASZHB/GTxA9n8ZPEsSsny6xcDkZ/5aGs/T9e8+HZcj5W+ba33v92sL496zJbfHPxWqP8AKPENyG+X0kNYsfiiaGH/AFO4/wC196vjc+/5HeKS/wCfk/8A0pnzWZ6ZlW/xS/NnokesI1u8aPiKOXcu2ql54utoY2mdFdV+b71cTL4gu5G2P9xvufPUcEm6b53Vf71eVLl5jijGXLdHR6h4oe8uNiPsVvurVdbpGl3u+1ldfl+9WRHMkb70RmLfK9SrJD5g2fK38TNUy5eh10ZRibsNxuZHtn5bd8uz5asRzKJFV3z8rfwVjreTf67+997b/dq3b3CQwvshbd/Buf8Ahrm5UejTNSNvP3Ike7b9z5qk855EWabav9xVqlDcboymz/f/ANqprfybnL/ad+5v87aXLzQOnlRZWbyY49n3G+/u/vVLDrU1vJFcw7flfbtVqrx3Dx24heaNdy/Nt+am/Isfr/srRTiYVNzsNL8VJJCH3qfm+9u+9WzY+NJYZEdvM2TfxM/3a82t5prfaJLZm+b5FVPmWtG3vH8tf3Db2Rv4/wDx6rjRjKJ5lao6cj0abxw8cH7l/mVNr/Nu+WsHVvHE11sthcsWX+L+7XL/ANoXkm5C/wAkabfv1QvLp9y/PsX5tu2q+rmPtub4jc8N3/nN8k22Vfm87dtr0Lw3q20JeWyKjN9/a+7c396vFdAvkvJV3/IzP95a9G8LXkcMm9LmTcqbUVfutUylLlPa5Y/ZPXdNvoJLdJkmxNvb7vzfKv8Ae/u102g3iKySJu/haVY/m215roerPCyPbTN5jbV27vvf3q7DQdY09Llkjm/e71Xyf4vmrmlKrGI+U9N8P3kMkiO53fLu2r/FW7p8kNuqTI7B/NZ9sP8AF/vVxnhfVEVQ7uqNHuZP727+7XU2sk21XdN25lZNvy/99V52IxH2Tqp04yjHlNS8/eIHmST94+7csVZetNc3Mb7HjhibcvmSf7P92r81/wDZ4mmhdn2/M+77tZOoQwXG25SGQ/P8kO/7teRPERp/ZPQo4X4XGJzmrW6CZC80kT/di3fdkrnNW0n7K32a5Tzl3s8Xz7lX/arttSW2uI186Ft0jbVWN/u1gaxYpbwCFEZF+98vzK1Y/XPaRtc7f7N5o8x5Z4s0/wA6N/k37dzN5ny/NXmXiC1ma8dPO/g+evZPEmiu0ZkmTbL/ABqv3a848QaDMscySIy7U2v8v8NevhMRyzj7x5OKwMtzzbXN8DbHTcy/NuWuZ1i+/ct+/bGz7qpXba9pqLan7yuvyu1cL4is33M6fJ5nzV7lGp7/ACnhVKfLLQ+t/g5M7fsbxzE8jw5qP6NPXzn4LuHZk3zMDv8AkZv7tfRPwZVl/YwjWTk/8I3qOf8AvqevnDwbavJIkL7sL/47X7T4mK+UZH/2DR/9JgfV8XK+Cy//AK9L8onovh+6haYQu8jJ/A23+Kut0lkW1/fIu/c3mqz7vl/3a5bw3ZpJMqIq4rr9NjtoZFTDSmZdrMv+zX4zWj7x8jGVjSs1+yx70Thv4d33alWN2kZ/J+6/mI3+zRDawtc/Pcq7xp86r/6DWlY/vI1mdP8AVozbv4v92vPqc0TriVdszW8n7najPuikkfd97+GvKv2nNQfR/gz4lmSZUT7EqIv+823bXr+pSQTWCbHkQt8yx7N1fP8A+29qkP8AwrtvB+murI1wtxesq/N5m75Vb/0KscLGftb/AGS6ko8p8c2ts+oXXzx16b8O/hskdv8A2lfosQX7nmL96rPwp+Ef9pSf2xqSYgjfd/vVsfFr4haP4djOiaJcruhWvaj7vvHDLmloYPjDxRZ6DZvDCMbfl+X+KvLtV1i51O586SZsfw0/Wtdn1q486Zv+A1TjjeWTatH95m0Y8sRA27mlVNxwtaGneG7i8RrmUNFDH/rZGX7tLcLa225LBPN/2mo98Ob+UzmVwv3KvaNr2q6PKv2a5YJv3eX/AAtTFjUE/aZsf7NT2MkNq29LbNKQuc2JPiB4tkj/ANG2xjbt+WKoY/EHjCST7S+q3CfwsqtTH1CTyRDsX5vm2rUtja3OoTrDIjEyfcpxjzGXNyxOh8F3H/CQLcaf4ks47kMnySMvzf8AfVc94w8DxafC2q6RIrx5+eFfvR11ENna+HdP+zWzs1zIn72T+6v92orfTWmt/wDSXWGGT77NTFGU+c8yTr+FOZc8irWtWq6fqk1sj5Ct8jVU8z2rM6h1FFFXyoAoLbeaRjgcUn3/AGxS+0A6iiijlAKKRjgcUK26iIC0UUVQBS/wfjQGK06P5uBQBs+DdHu9Z1mPT7OzeWWZ1ijRU3FpG+VV2/71ftV4F/4J3+BvhT8B/BeiXnj+1stStdDt5/EelzWSvI11N80nzfe3Krba+Lf+CBP7H9h+1f8At6eGPDviSzkm0Tw3FJ4l1lVi3L5dr80as38O6TbX7GftOfA/wR8bNcuLl7mTRdRsb/fLdWfyrdL/AA7l/wBmt6NOXJzI+czKvzVeQ+Nvjh+zL8KNDtYJ9H8yRI0/dfKqrIrfxNXlX/CpdE0+3lhh1JoYvvrDGi/99LXrfxw+E/jbQfEt14bm8QzSwqi/Zdz/ACsq/wAVeUa94V8VaOxSa83n/lky/wDs1eXWlVcruJy0Y04x0K3gjRLPTPH9qYZixEUm3Emc/Iai+OFkLjxNav5W4/YAuP8AgbUvw90i/tPG1pPezksEk3Koyv3D3qf4z6bNf69a7GYKtqudrYx87c1+uYRSl4AY5f8AUWvypGp57Do/zJN5zSBVrzn9qu6Sz+D2ozIP9c8cDbf4dzV7JZ6LNHNvdFG35XZl+aSvFv27o4dL+F+n2aO2+81mON1ZPuqvzV+EYelz4qJthOapVPGbXUP7NsYFs03pDEqv/vbap614m2/6h8Lt+bc9Y9xqD/ZfOSZv9nbWbJcvNIru+6vqua8D1Ix5ZNl3UteeZV/iP/oVYeoaxcySMiOyt/6DT7q4eBf9YrLWbeXCMv8As1jKXc0jsZeoSGS6+/uqKiT/AFx+lFaHTH4QooorMZ+kH7PP/KN1P+xL1j/0K6r836/SD9nn/lG6n/Yl6x/6FdV+b9ftfit/yJcg/wCwWP8A6TA4cJ8dT1Ciiivxg7gpVXc2z86bt+bNDLupR2AWkZc8GlpCd33KYC0UcAUUAHBFFFIq7aAFoop0XQ/Sp5gE2utJRS7RxUk8yBPvCrunrukGU3D+6tVkV+Plx81amhWvmXsSf8CoJP02/wCCAPgN4bP4pfFqaHZtgsdGtZlT+83mSL/3ztr781K4hW1eF32bX3bVr58/4JBeAZPh7+wPot/qttHFceMtcvNWuFZPm8lW8uJm/wCArXvF9N8zIm11/ut8qqv+9Xj1qjlVlEr2P2ipIsDEW0a/NGjN/ebb/tVTmZ/mTyYWeFG8pmX5lp02oom+FIdv8PzN/C1V7q6ka4MPk8bf9Yr/AC1P2TeMfeKGpW/nbP3zfu/71ZWoWaTKfveZ97zP4q244/OuFR4coqbnbf8A53VBdW8zQyTTfw/xRtu21nP2h20eXVo4rWtNn3O8KNv3/LHt+XbWPcaGvkul5tz8uyNlruJrNbpnRz/B91U+9/wKsjU9M3Wr/I27ay7Vf722r5eZeZrLb3jgtc8P+Swv97Db9yNfmrIvrfbGJkhkTsrL8u1a7W6t91v9pghXzVT51k+VlrCurSGS18lHVn3/AN37y0f3pGManL7pyTabbWcjbIWU/eVmrSEAtfDckLsJMWzkkchsgn+tTXlnbW1x/pKK0km75mXau2mFEXQXRWJX7O205xwQcc/Sv1/wPSjxBmLX/QJU/wDSqZ9fwo08TW/wP80eP6to6NbvZzRM6zf61f4d3+zXxX+1F4Rs/C/i50sLZk86VvN3fdr7zks5lmltvOVlbc27+Kvl/wDbS8FfbLdNehhwGdvu/wCzX43ga0frNuY+azCj7TC866HyzjZIJuOPm+arVuyM3nPJz/s1DJsjkZHTd/vUscaRtw9fRR2PnV8I+4bco2f+PU6GDaocbWbZUW794EdN1aAhRV3pDllT5FqeXsIYsfl7UT5lqVrhId3ko3/fVKtv5m5If++qRbcx/wCyNnzL/FQKUftFSRTN87jeW/vU+GF1+TZ/F/DVq3hTaqI/8e75kqaOHMjb0Xb/AHquOxMvM0fDlvuugiOzru/hr13wbeJp6hHRSsfzfNXmXhGFGukT5VP8FepaT4fudQjRzD5QVPvf89KvlhIUvdgeg+F/HWmnT3TyVRvuouz5q1F1ZNSVPs1rv2/Ku6vO9N8O38d8qGaT+Jm3V1+m3Ft4f0nfdXKs+z5VZvm3Ue7E55e8eKftdeG3VrfW0h27fldv71eGV9G/Hi4m8QfD+91KbbvjZW2/7NfOVKR3UZe4IwyOKWm5+7Tl+/8AlSNgpf4PxpvCr9KWgApyLtGKbUi/eCPSlsTLcsQp5kmE2/7y103h+12tvf5f96sDS4ftEjJs2LXSTSf2fpst191fK21BlLnOe8XXz3WqPDvXEPy/LWTSySeZKZH/AIvmojj81tnrWhtH3YmhoVr5lwH+b6V0sDeZu+fB/iVazdPh+zWq7E5b+Kp4ZHT92nCt9xqXwnPKRbbZIvkdNvzbaqLMkbHzH+b7qLV+P97HvR+dv/fVU5oUti00yKxZ/k3fw0c32SY/CXrO4eTYPOwy/Ltr72+M4P8AwwiwVhx4T0zBb629fnzHqLxyZfa3+zX6B/GNz/wwX5mCT/wiGlnp3/0ev1/ww/5E+ef9g0v/AEmZ9pwp/uWPf/Tp/lI+EJIfOc7/AOL+Jvu1UVvvI7tlfu1o3UHmKiPDtDL/AOPVWkh8uTzkRTt/u1+QR2Pih1oqxMUT5T/FWlZzWytsebcW/wDHay2aGQbNnzN/d+9uqeONI5hv+/IlEtgNONi0eyFMj+Kk8yHy9/RFqrHJ+8+TduX5amEM3yJDyjfeanHmDkLFvIi/xq39yjy9w853Zf8AeqBYHjff53Ozd/u1ZjuIW3v5e5W+/T5gMDXm8yR1PCx/+PVw998t0/b5q7XXJjJcSw7GVV/ib+H/AGa43U+bpuMfWpl8WhrTjyn0x/wTC+Kt18PPj3ZNE0ZS6XY/mf3v4a/oJ/4JLfFm/b9o3WvDd/Nutdc0tVXd8q+Yv8VfzHfAHxs/gD4k6V4kVGcWl/DK6r/d3Lur+h79gnXtN1CbRPjHoOpXUdvZ3Czu0e3/AFLL/F/7LXRTqRjGSZ5GZ1pYaqp/ZPq7/grP8B4fjJ8C9Z0q003znuLCSB2V/lk3Lt2tX8p/j/wnqvw98bax4E15GS70fUZrO4Xbt+ZW/wDQa/sNuNc0r4nfD3UtGmmW5+0WEn2K8ki/ds235Wr+U3/go98MfE/w5/aw8VXPiRGE2rapNdbvK2fxba9CvaphYzj0OTBuEMT/AIjwncF++/y/w/xULM5ZUd9oaoVkeRt4jwF+5T1VJG2F2+X+9Xl8x7XwlppH3Nv+5/dr65/4Jhn/AIknjBf7tzZD/wAdmr5B3Oy/O+1v9mvr/wD4JjBho3jAnvc2WP8Avmav0fwj/wCS8wvpU/8ATcz6jgxW4jov/F/6RI8K/aFkd/j14vTZwviG6+b/ALaGuYhuk2tJNwyptSun/aGL/wDC9vGEQ/j8R3Q/8iGuVt43ZdkKfKtfHZ7/AMjvFf8AXyf/AKUz57M/ezCt/il+bL0M27B/vf3vvVI1xMuVT7jffaoFd1Vn8lS1I0yKoR32mT5f+BV5RxRiXvMdlR0+UfxrvqzDN8xm8lXl+XYtVF+aRX8nPy/dWrkJ8tvJh2nd81TI2pxhGZcw6/6SkLNtT/V76uRs7Qr5gZd3/fVVFRNu9H2t/tVcFvuX/WY3J96uf3YnoU48pat5oY1XyNyfI2/d825qtQpCu1IUw7fw7arWscPkjhm/3atWak4KTNt+7tb+GseaZ2x5uXmRP9nf7OcP935fmpV2Qr8n3vvbaSDf86J/vf71OY3PmB4YW3Knz/PXQc9YVmk8zzng+Rvlfa9TW8iRzK/2n5I0bcuymNC0IfznwG+43+zUkEky7Rv37fmfd92toxPExHx8shsk1s1uZvmXd/EqVQuoyJF2eYq7/uird1HuZdm1U/2fl+b+KqepSTbdnnb/APgH3avl5Tm5vsnPafevJMiQ8FW/1jV23h/XE8lkd9jL825f4a8u02abzF+dv95a6CzvgzB9+4158ZfzH0R7N4b8UeXHFvuV+Zf4fvf71dx4X1u2WcOkqpuX591eB+H/ABQlqu+YqpVPkZa6jRfHASZvMm3fPudW/iWolzSjoVHl+0fSXh3xNDJHGiPHsVVbds+81ddpWseZBKiJHs/56fxLXzjofxAbzBNNMzpu3RQ/3WrqtK+IlzJMjw/Id/zs38S14uKjPm5kexg4xPaZPED2zLDpupRsrNtljb5mZf71Nm1aGa9DpDDlbfDbXb7v/wAVXndr4yeScv5yn+Hcv3m/2q0rPWnvo1fZsVW+fd8vzfw14lapy+9I+nwuHv8ACdW115qx3LvsK2/zq38P+9Ve4jcK+z96qt86slVLNk8s2yQswk+/N/eqz50z/Om5VV/lXf8AeWuD20paQPTjh6X2jntctdyp+6+Vvlf5vu/7VcLrGl3MzSpbOp8ttu7/AOKr0XULX7RE0bwsgkfcn+9XMalY2saumxlddyyqsXzbq9PBS9vM8TMKMactjx3xho/l7vJ2qfNbzd33a898Sab5TPEX3BX+Rtle8a5ocLK7ujOm3b8y/wDj1ee+KPDbrumRFLLuVGb+7/dr6/C+9HU+IxUZc/wntHwstXh/ZB+yuo3Dw7qIIH1nr52+H9rDJfKl4/l/JsXd/u19M/Dm3Mf7MYt2AH/EkvgQT0yZa8C8G6XMs2/tvXYtfuniW7ZNkn/YNH/0mB7nGLf1LL7f8+l+UTsvDum+T5bP5Y2/Krf3q7Xw7Z2zWZSNJDufbukT7y1j+H4XaSOGEfO33a7LQ9NeO6Z3RSZF3f7tfjMj4uPu6Faz0+ETNsfcJN2+rEMKRwzQ2z+bM21UZn2rtrSuLN2meF4V2fd3L92orC1ht9014uyH7zSMm7btrjq04y0Z2U5cseYk8RWb+E/Ccvjm5RWWH91b7l+9J/u18q/Ha8m1XQ7m/vE+0brhXlbb8zfN/FX0N+1Jqzt/Ynh2N5vsn2fz/LVtqyfL8rV88/FCF5PBt8lh9/yv3Sr8zVth6cIxM/bSqS0+E8w8TfEKbQ/DZtdKfZuXbtjryG8tda1u9+1TCSRpG/irqm17TftEUOp8ruXzVb/0GvY/hd4+/Zp0eEP4q8K3F5L5W1FjZV2/7taR5VK8hy9rH4D5/wBN+HPiHULhE+xvhm27tlddL8ONF+H1n9v8eSeTNs/0ezX5pJG/vN/dr174hftIfD3SbO4sPg98PbW2m8rbb3l187r/ALq/3q+bvFF14h8QatLqus3M000j7mkmatfaR2gKn7WWtQl1zxN/bV4ttDttrb+COH7tQMqbWS2/76rJWN9x+RhRHJcq2yN2qNfiNuX+UvJZlW+d1ct/eqwtvDCux32t96qEM0zN8/y7fldqvW7PMy/e2/3m/iqjOXulu3td0Y8zrXcfDXQ4Lq4abZ86xMyLt/irkbE2y/O78t/DXf8Age6/s2MO/wAiM3zs1KK5SeX2m4y88P21jm81KZlTzd27dXDazrU2u6t9gs5pPJjf5F/hWuh+JniIaxeyab4c3GST7+1/lVawrfwvqXhvw9N4hubZi4X5G/u0x/DI5vxQ0J1hvI/hVQ/+9WbvX1p80jyuzzNlmfczVEy7aDoQ+iims2G49KBjqKKKXxAIq45NLRRR8QBRRRRyoBFXbS0qru5TpQy7TimAY249f4qlt4/3io/FRLwu+rOnIJJgnks5Zv4aXMiJS5UftF/waW6JeeHfi18RfGGzamqeDZrJmZFZfLh2yfe/vbmr76+OUk2jeLnvHdvKml2QMqbVVq+PP+Dea0s/g58OvGOoaq7Qy2/h+3tftEa/euriTzGj/wC/arX1d8aviJ4eutLXUrq8hfyWZoo2dV3V2U6sPZHyWKcqlY+WP2uPFmm2vjCz+23W77VB+9jh/wBn+KvFtQ8ZWF9uhO1Sqfwv/D/u1e/aY+JWleNfHxtobCREt4ttvMqbl+Zvm2tXlmqeIobXc8L/ACqrbG2fM1eLWxHNPlN4e6jsvCd79p8XQBJlK5l4Axn5TT/idOsWt2wKZLW4CnH+0a5X4TXsl14+thKQCEkxj+IbGrc+MG9vEVoI5FTZaZZm92YD9RX69gqkl9H/ABjf/QWvypBF8xl2947TN8/mlX3bm/8AQa+bf+CgGqRyr4Y0dbmRna8mnlhZ9y/d+Vq9xm1pLW1W5mbfKvyytH/F/tV8sftha1c6t8QtKtJm+W3tZGi+fd8rN96vxHCyhLERR2YOP708xuptyhPu1VuLpFxAn3vvfLRcSOqtvfj+7uqtdS+XGHSGvbl7x6luWVitqF5uXf2rPvJtu3f92rV1Nt++/wArf3apNvaPe6bl2VXulRKv/LWik2/Nmlo+E3CiiijlA/SD9nn/AJRup/2Jesf+hXVfm/X6Qfs8/wDKN1P+xL1j/wBCuq/N1G7H8K/afFZXyXIP+wWP/pMDhwnx1PUdRQw3daK/FDuCimqMrinFd3FABRRRV8qAKKC23mkVdtMBeCKXadu6hW29qAGOMrml8JMgY7jSUm75sUtMoKXcdu2hhtXFC4b5O9Zkx3JLdXDL/tVv+HLCbUb6Kws+biaVYItv8TM23/2asGNEUAua+hP+Ccvwgs/jP+1p4F8IahCz2f8Abcd7qP7rcvk2/wC8b/0FaitUjTpyl2FGMqlWMUftB8LPDKfCn4K+CfhpDbLEmg+FLOzlb7vzeXub5f8AearGrao8m5ESMxfeXa235aZ4s1ya61y5nv8Ac4kut0S71/1bfd21zepXG75Em3H+H+9/wKvlI1faT5n1PWq0fZqxY+1TSTK7vG6r8rL/AHv7tC3xe4TyZmX+F12fK1ZbTJdQt5txk72+ValtZPKuFR9vyr8kn96vQjLml7xzxp8psxs8lps2YfZtiqOZfs6ukn+ysvz/ACtUUN00y/67c6/xL/DUUc1sqrv5dnbey/MrUc0fiOjl/lHNC/kult+7WT5m/vKtZVxax3VvK/3fkbYzVof6M2XgTcV+XduqrfQwrvnTb9751/u1VPYVSXKc1eWexm3zbmZ1bayf6vbXPajpqPm2T91tl3eZ/tV1OrfKuxAzs33Fb5WZf9muY1SN4VZz5mJG3RNu3L/wKum3LHU4+b3vdMO+LtL0YozbU8z/ANlqs8Z/s+SJ4wuY2BUdB14q/fX1tJI1tNcqrRxfJ8+2qJIFi5WTOEb5j+NfrvgnCEc7zCy1+q1P/SoH23CElLE1tf8Al2/zRxDQ7bib7NZqh835/wDppXln7Tngv/hIvBNzctCryRqz/wC6teyzKtxdBP4Vi3blTduaub8SaDYa1o89teJJ/pETJLGybvL+WvwGMpRqxcTzJU41sLyH5ma1pP8AZuoTWbx7vLb726qGHVjv+Xb/ALFegfHTwj/wi/iq5h+7ulb5dteeyN+89q+zpfvIRZ8hKPJPlZF5iLGe/wA33qt2uqBCUf5l2VR2hW+T71OVZNy7Dz96nylG5b3yNj58f3Vp7SR+cz7Mlqxo45t3lp/vbqtWt1tk2TOqlv4qqJlyovbZuyZ/2qmWR2Vdgz/C6tUEVx91P7z1PD/pDeWtUHuG14ZvktbpXdMbf4a9k8H+NIVhS2ePLRpuVVrwuzjeGZHM25v9l66bRNUurVvMfdj/AGmpR934jOpHm+E9d1bxheXV150MKqq/NtX+9WPdapqOrXBmmdid21I/7tc3H4301secW3t/Ev3Vrb0L4g+G4VSaZFkdX+dvu0+axPw/ZJPiJpd5cfDe9sPIYLJB95k+avmaWNo5WRhyvDV9jTeLPD3izwq+lWd1Gr7Gby2/vV8n+ONGl0XxNd2Uibdtw22g2oe7LlMeiiig6QopGOBxS0AKn3hUsKuxb/dqJRuNTwr90VmRLc1dBgeRlTZ/uVP4vvHhtEsy/Lffq14ZjRlVJk27v4mrn/El4L3VJJE4VflWq90iPvFGtDQ7dJJWebd/sbapRQPM42c1p6aqJMLZ+mfvVRVSX2TV/wBav31X/wBmqu37xt/k/df5KvrDHJG1Zl1vt5tnzfN/DS5oxiY+zNfS5kklG9MBvlqe8t/MjCPtcqn8P8NZek3yeds+6W/vVrwsk0Z+fYd3zNRHYqUTFuoXjuVdPut99a/QT4yyFP2AVkB5/wCEP0nn/wABq+D7q1RszI27/ar7t+O+6L/gn3J6r4Q0r+dtX694YO+U55/2DS/9JmfYcJv/AGPHr/p0/wAmfDdrfPIoSZP/ALKpWjSTbcom3a27bWHZ322b+Iqvzbm+7W1FeQSRkQzZLLX5DzcsT4zl5ZleGN4WPnJ96rS7WjDpub/e/hpVt3ZmTeq7v/HabHHPCzb/AJt3yoq/w0fEHLyj44ZIPn61ZhWaSPbs2/J8lQpHcsE2dP7tTRs63P32/wCBU5e6RERm8tvJ27mZPmajzNsbDG35PmZadNHM0iu83H8LMtRXEyeS6Tvhv71KS5g+EwdY2bpX35Lf3a5S7YSNsT/x6um1aZ41O9Np2ferl7hjIx3n+Kj/AAm1Pcn0a4+yX6TbsfNX7L/8Ek/2jE1r4A/8Il/as00iq1vdNH8rfL8y1+Lu5lO9etfaf/BJH40P4Y+KqeD7yRtmobVijVvvSf8A7Nc9enOpSlynnZ5hnXwM0j93f2c/2nIfB9mnhLx/rW3TpGVF/vQ7v4t1flv/AMF+vhDpWofErVPiL4PuY7m2s71ZUa1+ZWt5vl3V9k+NNHmttmq6PMxh2xs/z/Kzf/FV4R+1X4Bf4neHdS0G7f5NY0aRJZJt3ysvzLt/2t1edk2ZV6K+qVz4PJM0q/WFQqfZPx2kVFdvkZX/ALrU+Nk279jbm/hqbWNL1DSdSutKvE2y2c7QOv8AtK22ooY3/ubW2f8AfNe7KPQ/Q4y5o3LEapIzBOd1fYH/AATLVRo3i9lGM3Nl/wCgzV8iRqiqd4ytfXv/AATQTZo/i4KwK/aLLaR/uzV+keEqkuPcL6VP/Tcz63g124jor/F/6Szwr9oREPx58WsIW3f8JFdfN/20Ncqruql9nzf3Vrrf2g02/HjxUSvH/CRXTf8AkQ1y/l/N5ao3zPuRq+Ozu39uYpf9PJ/+lM+bzPm/tCt/il+bEhk2rvTd8zfdp7N5279xt2/xNU0UE3yps2/xfL/dohgPzP8ANhv738NeV9g4/thDE7TMd+1fvPWhZq8knyIu1fuVWjt3j++6/wB7dVm2ZCwT74ao+yaxj7+hoRnyY9nys392rlmz7hv2tt+bczVRtm3SfIm1v4GarUNvtkCXO5mX77LXPKPMenSiXY1Rt8ny7W+6qtViz+aHzH3I235lb5t1VYY3WRZoYf49taNuzs2/f8v3flT71R7stjsUuYkt0RvKmcfJs+6v8VSJavIxcXjOsKfOuz7tLHC6xo6fOf4dvzbf9mp4ftKr5zn5G/iV/vf71MwqR5veIpLXzJPkTcu7+L5adHbw/Nbfd/ubamkj8lmd02bfvfxUNb7pN6Ozf3G2bflropy+yeNiqc+a5VlRVjXcfl3Ns3VRvFRYWfYvzVpXSo2zzNrfN91X/iqheQpbyfu9u2T+Gteb3fdOWMfe9483jk2t53nNuZ/lWrVnceZGzzIw8tdu7f8AerN+0TSSI/ULVu1k3R/I7Lt/iavLj8J7cDZs7942CbF2qv8AC33f96r1neeSyzJM27+8r1gWd0/nNbOn+sXdWjaNtwkMfH3dv8VL7HKb05cx2Gk+JHk+RJpCy/drtvDupagVXZIuJPl2t/DXm/h9X85/Mh3f8D212vhu68tt8FzGjq6/eWvKxkf5T6DA+9a56N4dupvM2TXLLtTbF/d3V3Hh2J7yQv8AbN8qrtddm5WrzzQ7pFUeWi7mlX7RJIm5v+A16H4UmT90j7lfd8nlrXzWIp8sZcx9Zg5fCjsdPheSNHm4Vl+RVT5d1aCwokiw7GU/eT/ZWovDtu8ccXnD5l+bb/eroY4Zl+fKurfN5a/w15sOaM7HrcseXmObvrFIIlmtkVZF3fvJHrA1C38u3E15bfvmdmVlbd/FXY6pp0LM38C7d33N22sfWLWFYRD9mUuq/eVfvLXtYX93seHmC5tbHCa1b7reZNi+az7fvfK1cT4i0O2mhZPJVPM+7t+bbXpfibSUtZNk0Pzt/CzbfLrmr6xhW3eG2jYyt9z+Kvq8JKPLzI+Hx0eaXvHWeDbZIfgObZwNv9lXYOemCZK8b0nSU8zZ5Klm+ZG/h2/w17l4bt1X4Sm1miCD+zrhXXsPv5ryzTNPSGIPCih2/i/h21+8+JbTybJE/wDoGj/6TA6uNLrC5e1/z6X5RNzw/pbIsdy6LFu/8d/2q63TbObc3yW+9UVd275mrnNLaGHykdN275d0f/s1dHpMiXDBLZ2bd/49/u1+Oy/unxMXyllYxeKZssiL8sqr8u3bXJeK/HGm6xcXOm6NNI0FjtWWOOX/AFjfxf8AAqPi38RoPAfh+Y2dzCb+4iZIoV/5Z/L96vO/hGZodH/tW88wtqkrPEzfLu/vNWEufnOipL3Trvj1O+seE/B3jmGZjp+tadvi+0LuaNl3Lt/2du2vCPFGpJ5MlhMnzN81er+LtSe8+C9/4G1PUma58G6tNPp27/lpbyfNtX/ZrwyS6e6U6leJuVk+Vfu7qJfETTly+6zwf4j6Fc6X4onhSNtn3kb/AHq5zzrhR99gK9X8dXmm6lqmblNzfd/3VrmtQ8BwszTWsy+Uy/eD1fLM6+Y5O31a8t2DpMwrptB+ImkC3Fh4k0fzkb780Z+asi88I3NuzbHyn8LVQm0ieKTy/MUk1oP3ep3LXHw11lv9GuFt2b+Gaib4f6VJH52m6layqz/djlrgJLaaNv8AVtt/vVNCupIuYJW/4C9HNL4ZC9mdPefD+/hdvJhU/wDA6rN4XvIVR3TarJ/frGTWtXtFBS8k3f7TU0a3qTf8vLf3vmalze6HL/MdFa2cNuyPNcqrK3zr96tqGR7zZD5jOFX+/trirXVn8xZJpuV/vVv+HfE0K6pG7/Ntf5lb+Kjm5hcvuHoOj6Homh2f2m88tJWVW8vZWV4k8TG8Y6b9jj+zN9+P+HbVu8l03XLhZodWjSST5dsj7aIfDum2sJmv7nzP7qq25v8AdojymSl/dOOl8KeGdVsZFgdoLn70S/w1xF3aTWd08EowyttNeu61oNna2ralZvHEf4l3/Mq1wHjaSw1K8a7sCvmR8S7f4qJfEbU5HOAE9Kcq7aFXbQzY4FL4TYWgNu5oYbutIq7akBaKRjgcUtXHYAoopdjelMA/g/GlaR2WlWPb99PmpfLf7j9aXKjMYoI+VDx3rtvgl4bh1zxnbSXO1orX9/KrDcrKv8NcfBD8wXNfQn7NXw5tlsV17UIZEEj7tzL8rL/drGtUjRhqc+KrezpSPsT4K/tTeMPgp8H7jwX4SSFJ9a1ePUri6b70e2Py1j/4DVKT40fFH4hak954q8W3XlRsyxQtP+72t95tteZWlvNfXCb9uyN2Tds+bb/dWuhhjMNq6JNHskdfNbZ8y14ft6tSR89GpKT94t+INavJr4F7ld6t96Nv4W/9mpVDzfJMm1t38TVWhhsLaZEuRHMytuSNl3bv96pri6kmuGS2s1iT70rNUSlyxL5TrPhOnk+NrOPaD8k2GK4P3DWj8apnXxHbwRKNzaf95ug+dqzfhUoHjqyO5WzDJtO7LY2Gr/xyuDF4js0W3Z/9Dy2JdoI3txX7dlf/ACj3jeb/AKC1+VIPhjocfJpthYyNc397uYJ86qny18k/tYapZ6h8cJrW2+VLPTo4k/8AQq+n9e1D7Pbb5n/hZU+f+H/4qvjn4uXqap8U9Zu4Yfl81URWbcy7Vr8Xy2PNX5jtwP8AF5jAl+b5KhvIXaFkRMLv3f71XI7dCux9oP8AeqvfXibWRP4fvf7Ve/yo9P8AxGRNb7c1HI22H50w1PupH3NDs3f7v8NU5pnk3I75qYxNIkH8W+iiiqlsbBSL8u40tFKIH6Qfs8/8o3U/7EvWP/Qrqvzfr9IP2ef+Ubqf9iXrH/oV1X5v1+0+K3/IlyD/ALBY/wDpMDhwnx1PUIztOXGRQ43nJpvXbTq/F47HcG7c2+ikVdtLTAKKKKzAR/umnK3bZndTX+6aFXHAoAcVY9qP4/xoY5bikKbuDxitCNmFFFL91PrWZYbG9KGb5VoVttG3a3I+Wq+EB9vvZxF8vzfxV+kH/BDX4O3lv4l8V/H54Gb+xdLj0nTm3/KtxcfNJ/5DWvzp0S0lvNQiVOBvX5ttfth/wTn+E/8AwpT9jfw3Z6lCsWoeIribW9WVdysvmfLEv/fK/wDj1eRnGI9jhH5npZPhfrOM9D2i+iSTe8zqvmJ8vy7q564VFbek0b/wsy/w/wC9WjqFwkrMjo0bK38T/My1l/aobVmfepLP83y18jha04e9I+ixWFK3lo0ium3K/wAS/LuqWOGDKb/m8v8AhptxMjYh3xosn3I9/wA1Ekc23y9igfdXb81evQlzazPFqU+WZFJqU0V0qTcIqMqMvy/99VHcalOWhdHYKr/Ky/7vzLTbqRvu/Y9vlptfc/8ArP8AarMmZLaTZbTKBvZZWb+Gu2jHm91HPL92bK6h51u00M21v7rfe/2qguNWhmV2srnczJ95vlrJtr2G1mebC7PurJJ95qikvN8b3EyMPL/5ZtXdTpzicUqnMTXEyTKby2TfIyfPu/h/2qwNTXzpnR7yPa3y/wCzVu9voZIVkdGiVUVtv8SrWNqV15as7w+Zt+Z2+7t/+KrflMoyKF02mx75kRS391lVmZf726q0eZdIbY28tE2CR1JzVHWrx5IHhhmt2RX2fu/4atWEijQvNReBE5APtmv1vwVhFZ1mHL/0C1P/AEqB9twfK+KrP/p3L80YDTTRwxrsZlVGVFj/AIW/2qxtckuLyGR9jJIvzMsfyrt21cmaG43pM7I7MrLtes7WryaOxmyVZ9jKys/zV+A+zlGem55NKtFQPjL9qyxhvtWluUfL+a33a8EmaRZG2c19EfG7R/t2qXaIkbM27ZXgeqWr29w8KH5l+X/Zr6ihHlpRPm6kuarLmM5bd3z/AHv9mrcMChc7GojVmb9y6/Kn3aGmeNtnzf7u6to+8TLm5hsjeSqpDTIVQyF/4qV1dm5p3lPtH8IpBIsQs7Kp2fN/erSs5DC2/ZuaqFu3SP7wZdtW4WSNU3/+O1UiTSsZE8xYXdd33t1dJo+n/wBqYh8ln3cKq1ws2oeQ2U5K/wAVdH4K8bPpd0iO+4fd21PxBL3Tb1D4d62rb7aGRU/grJvvC2vab99G/vLuSvXtH+IlneadDD+53fxbv4qW48TaVds/nabC6q/z7YqKcomMubmPIdL1zWdHul4YfP8AK392tTxxodt470p9VgRVvIV+bd/y0rubzw34M8SSBLbbaT/e2yVb0v4S3Nq2+0v43j/uxtTj8IlLld+U+XpoJreZ45kwV+VqZnJ612/x08G/8Ij4sZIn3JcJv/3Wrh1XbWnKdsZc0RQ27mikVdtLSKFVX3Vf0/5mEOzP8W6qEbDdnexrd8NQxzSMmzlv4amRlM1ZvJ0/RXmLsr+V8rVxrM7MXfq1dH43uBbwx6bF/F80tc/b2/mZfY2F/u0+ZFR90uaX5MJCTYNXJIyJt43bfvfLWXHHNHMuz738O6tKKRmjG/70dHvmcpRNfTZHaHzmh+X5dtQ6pbuqibY25vut/dqzpsyCLe/z/wAPy1FqkbqvD7xt/v8A3aI/3jP4fhMmS4fbnfyv8VbmkyOzfI+Rt+7WE3yts7/xVoabePbrscUw5u5u3EMzKP7lfcnx4Tzf2AJY1bGfCOl4I+tvXwzZz+fC3z79yfxV92fGdFk/YLKBcg+EdLwP/Aev1zwud8pzv/sGl/6TM+z4V0weP/69P8pH55tG8bfK/H92rGlyfZ5EfDLt/wBqrF9a/MxRFX+7uqlI3lrv2NivyM+LjLmN9bxJI/kjbc38VSrJNNJsT/vmsSxurlWZ/lxWnZ3+66SEv87fM9A5fEX2WONw7o26mf6tfMfduqe6kmjVU37hsqHb5g+/8mzc0jPQOQ/c8ny+c2GT5VaqepL5MZLpu/2t9WZWeM/PD5x+6rf7NZ2qt5a+c52qz/dpx5+UmXLI57WLwtv4Ynf96sNvmYvWxrVx95E/i/hrGpG9PYK7/wDZv8ay+CfizomvB1RYb+Nvm+796uAq1pN09nfxzI+Nrbt1AVI88LH9I/wh8P6l8Qv2WdJ+MGm20NxYNKsEskfytGzL8skleaeMvAt/dabd6zoFy0otWZ/MjbdXD/8ABGP9paH4ofst6j8B/FuvNCjQSQXC/wAW5V/dN/s/erQ+HPxI1X4P/EK8+EXxIvPtFvHcNAl5JF8qx/7VKrk0cTR+s4de9H4j8rzfBU8vzRTjpc/Kr9sfwOngv9obxCiWzQ299dfaLWNn3N8y/M3/AH1ury+H/VHen++1fdn/AAVo+Dtm0cPxL0HTY9kN1Iktxu+aaNvustfDNrCjf6N8oVf7zVvJNwiz7nLsRDFYWLH6evmN5bpsZfu7q+v/APgmmjx6L4tV8Em4sjke6zV8kWquzNsO5t33m/u19df8E2Tu0fxa3rc2fH/AZq/RvCX/AJL3C+lT/wBNzPtuClbiSj/29/6RI8Q+Puz/AIXj4udl3ga/c5T/ALaGues7W28tZndlMn3K6347Wu743+KF2L83iG5bcf8Aroa5lYUVgmzmT5V/vV8dnvL/AG3iv+vk/wD0pnz+ZytjqzX88vzZFJFuVk6fPtZv4qfDG8ah9/zbPu7PvVI1m+7+Jtvy7qdItzHGqQo2Y/uNXly5Dh5ve1K0mRF9xgu75FqW1VFXL8fxf71KqpMrb933/m3Utvb7d0kL7t3y7aykbU5e9EuW7ozJ91P/AGWrluPMk+d2Ct8q1BZ2/wAo3ov+0tXrPfErO+5v/Hq5/dPVpyLdtHDG6QpM23b91qsiGG3xsk3Fm+dt9V7fzmwiSLtZdv8A9jVq1je4bfInG77tZ8vKdnN7vulm2berQxvsff8AMy/dqzFb+ZD+8TaqtVO3CQt86YVW3ffq1GUWFfOfPybmZf4qqP8AeOeQeWjTOk24r/Cu/wD9Bp6yPFtZ9zJs+6vzf99UkLQvJ5zx+Wnlfd/i3f7NSQySLC8O9Q7fcbfW9P3ZHjYzmlsVpPmKOm5h93b93bVC+mSNf4dsO75t1X5Fe52vMmfl+9/DuqnqUNmpZHdWRtu9fvKrV0nHGPL8R5Uv2m3/AIN2779WbX94uxw3/AabIqLIGj6VYhx5yuifK38P+1Xi8yPoIx5iza26FfMjjw7fKlaNqmZEfHy7vnqpDCkaq6SM21vvN/FV21hmkf5Pvf7VRLm+ydtGJs6KqW7KmMszfP8APXXaLJCtx5Pk/LtXczfwrXJ6Xa7sfZnzI332/u12Whx7I0y6su/5mrzcRKZ7uFj2O38P3Ft5kaI/Ej133hmaFpvs0ybfLb5Nz/NXnGiyQqyRyQsoX+9/E3+zXX6FI8zR3MN78y/wsu5m/wCBV4FT3uY+jw8pRPVvDdwY40hm+5G+1tv3q7W3jE8KXMzttk+WJa8z8P3SWsjW8KN53yyvul3K3y13fh+dJIUciP8Ady/Iv8S15vLGnL3T2Iy5oamm0LtbvMm3cq/3Kw9XtIZVR32n5fmkX5drV0Sf6n5EVir7tu75mrO1KztmWR96/Km5vk+Vfmr1MJ73unkYzlOLvtPtriTznSRvMRt6yfeWsTUNDtre1kSGTe3/ADzX5q6rUtPuZpGfzl+X+FnrEu2dGNnBDv8A4UZV2qu7/ar6bCRltFnxWYe9zOxpaZbKngk2rDj7JKDg+u6vOv7L+zq9ym0Q7ti7q9Mso3fww0ccQDGCQKue/wA2K4u40+aRdkyNG2zcyr/DX774lTtlGRf9g0f/AEmB18YQ5sFgP+vS/KJm6P50MSQwt80n3dvy1f1jxJbeGdMa/mudrqn7r/ab/ZqnfWv2PF4qM38TfJ92vKPil44fULoWENyxSNPlXf8Aw1+Pz+I+Epx98wfFniDUvGXjTfePubdti2/N8v8AFXex6lDoun6H9js1kSO/aK6VU/1e5fl/4DXGeCbWG3sTquxvNm/1Uezd8taOoeJLO10G60q5dkmuIN1qu/5lkX7rUpR5Y8pfNzfCHxU1qw0fxBBrdy7Q2mof6LqNuybo9u75Wb/0GvFvirqiaDeTW2muzWDfNZf7K/71d54q16z8UeG5X1iFsN+7aPd8zMv3q8Z8UagmszS6VNNJ/ovyru/i/u1n7ppGP8xyt1cTSSPc3LttZ/4arWfiLU9LVw/zQ/3m/ho1C6muJnt/ubf4Veq1vIkjPbTfP5ny1fLzGxsQ69DeQ75nU/3qq3C2cm14UVNtYOoWt5pswTe3ltSQ6xJ8qSfrSDl/lNCSFAweb/gG2qupahDBH5EMK/7bfxU2S6RkZ8sStUJ980hffndVy+EqPvfERySNI2+kyGHBWpI7V2Vn9qlW12rh0o5UXzRKx+4PrTo5HVg6NytTpa+YSWX/AIDUn2FF+4agRu+GfEkc0KWV583zfJu/hre+y6k0zPp82V+9XBw27rcfI+K7zwnqTxwoknzv/C1BlL+6QXi63dK9hczMEZP4lrNTwy9vvkfaVVf4l+9Xd6hqVh5av5G41SuLVLiT9ynzN/DVxjMiXus8jnjkinaKRMYblabXXeOPCr731K3++v3465Gj/EdUZcwUUUUe+UFFFFHMgClCljhaWLofpTmyozioJluKu9k+ROf4mpHV2P8AdajG1dvyt/srVizs5riZIURvm/8AQqrmROx0/wALfh5qXjzxFDpVnDuXer3Df3Vr6+8H+B007T4dBsvM8mPb8v3d1Zv7IPwt0HwP4Q/tjxPpM1xqGobZfMX7sK/wq1ewx+KPDEcM8KabHskXam5f/QWrxMVW9tK0Tw8RW9tU5bnLNoOsRKYXSOLa/wArf3f9qpbXw7DbyKb+WR3/AI/9n/vmtS81rSmkRHePYy7nX+Hbu+WmyXln5zbPlK7m3L/drzpfvI2OL3Y+6VLeGzhVvJhWU793zf8AoNQNcuN3nbd0n3vL+7Vm4VJmZzMyKvzbt/zNUDKnzOY127t21azlyx3kXGXue8dP8I5JpvH9tK+zYUl2gdR8jVc+P8xi8QWvlJukNiNo/wCBtVH4PFG8fW0kM8mDBIGjZePuGtH46zRQeKbKWcttWxyAvc72r96y3ll9HvGf9ha/KkF1Y4G38OJNG9/fuyBn+Ztv8VfEvjG8e88fa9eRupaTVptu1dv8VfaXjjxVZ+G/D9zf/bGxJEzvG0v+rbbXxHDsvNQurxPmWaeSRmb/AGmr8ey+MT0sD8LZA0jtu86b73/jtQyWPmR74d2f9pq01sxJGziHb/D9ynR6bMI9mF/4FXsnb8PKYjaTCy797fLWZqNktv8AMiY9q7D+x3klDuMLs3bW/irH8Uaa8No03b+GlKPKXTlzHN0UUVB0Cb19aWiitAP0g/Z5/wCUbqf9iXrH/oV1X5v1+kH7PP8AyjdT/sS9Y/8AQrqvzeZscCv2fxW/5EuQf9gsf/SYHDhPjqeotFFFfi/947gooopgFFFIzeiVmA5RuNCfeFNf7ppY8pQAqv8ALx+FJIibsRmlYfNj1pKqQCLv705/vGkop8qAKXc8h+akUN61JCuXAc/epe6Znr/7F/wUufjf8efDfw9SFtmralGkr/wrGrbpP/Ha/cXVobC12aboO2Gxt4o7eyhVfljjjXaq/wDjtfn7/wAEX/gz9jXWvjfqulRuLGBtO06SZNv7yT7zbv7yrX3tcSTRwmYwN/u/3a+Bz3FyqYzk+yj9A4cy/wBnhPbS+0Zd5dFf9GmdX3S7E3N826s+4uHjLQ2ybv4n3fdq1fTPCsyJCwT5WfcnzbqqTL5jOmyOXdt/3vu/drxvby5/7p6lbCxqcyZPZw/MP9Tv/wDQastH9li/f7RG3937y1nxslvMieS33dztv/iq4l1ugX7TDtT7zru+7XbTx/tDyamB5djPvvlaO58jcGfbtZvmZa5y8jtlhUuV3+a3y7/vbm+7urqdcuraSMzTP/HtXb/DXK6tdQq0mLzlfuLs/wDHq97A4jueLjsM47le6kh+z7+rRv8AKq/w/wCzVZtReONv9Zuk+/UF5qVszF3diknzI38LLWHfaw8dx5KO2N+1vl+Va9unUj/MeBUjOJfvtWCyeT5zI+z/AJ6/NWJq2rbiYQiq+7d/rdytWTNfP5zfafl3S7dq/d21l3195d2yfaV/6ZK3/wAVWr2MeYtaheHb5lsixuy/vdvzbq2tJmz4OE20nFtIcMc5xurhptYDbUuU2M275V+V91dnoMsZ8BiQfdW1lHJ7AsP6V+w+DEEs5x9v+gWp/wClQPs+C5Xxlf8A69y/NHHXF8kMgjAyqqzP/stVC+urmPTbl3TzX8r/AIF/vNTI7yG4mMz/ACrJubav3Y6y/Fl4lnodzfpc7HZGX5X2/L/dr8QnTjz/AAnzNOtywPl79orxYmizSwr/AK64VlRm/wCWf+7XjGqR/ao47nfncm7dW9+0J4o/tzxlJDCu2KNvk+euc0u4e403yX/hr1+blhGxwy973ilGvlMU2fx0SRjdvd8VbktNqPsRmP8AHt/hqGRfupsz/tVp7nxEkDfu8fxUsMjtJv8AO+X+7UEkjqp7/PUTSOq1BUY8xr6bG810uybhv4a2JtLd/wDVpXPaXefZ5ld3xXVaXrltLb+X90/3qr7RnLYzbjQn+d0Rv+BVW/s28hdc7d/+zXSeYny/xf7NTR29sy/6j5lp8qDmMLTdU1iz2p5zKyv8ldNo/jbVYfkmDAs/zs3zVnNBCy79i/K/3Wpyy+T9yH733KOWURSkd7peoWesR+TdN5TyLteRflauk0qy1u2kjis79pQybfv/AC/8Cry/T/tPnJ/e+9uZq9b8B6x/Y+hvquq/c2/Kv/stHwwJly83Mef/ALSvha5m0i21sbWMPyy7W+7/AL1eG19A+NvFln4j02/hvHzDMrLEv3tteATBFmfZ93dT5uY3pjaKKKDYlgP7xQ4rpfDS/Z5Xnd9u1Nz7f4a5+xh3Scjd/erpppIdL8OvKj/Oy7drLU/FIwlvoc3rF9/aWqS3Mjs3bNWdJk8hPJcKwb5ttZ0a/NvFaenw/aGXen/AafxDkJdKizDZDt/2t1TJvjZf92ri6Wkm7ftbbS/2eVXGzHyVXwkcyHWd5/y28j733lqSZluY2hdGVKgt4fL++mV+7uVasrC6t8ny1HL9kPd5inJp6s2+Onra+WRvmqeON5ptj/LUv2fcuxIfm/vNT+EYiK8b7Iptvz/ItfffxpkC/sEGQN/zKOlkH/wHr4E+zuuU+Yn/ANBr75+NikfsCldhJHhHShgfW3r9f8MP+RTnn/YNL/0mZ9hwp/ueP/69P8pHwVc3HmTeS6bv92qzKm0O6NtWrKo64kmjxt/u1Ktv5keOu75ttfkMvePjI+6VIYfMO/qKuW8yWr/6nll/75oht1jbYE+XdTVh23D/ACMF/wBqlzIcpcxM19959/zbPu05pn8vYlRRRw/65x937tS/IzB3RlH96q5Y/ET78Szbtt43/wC1/wACrN1Ro2V/kZt3y7anbzlUun3W6bWrPvr4Q2+zf/F/FSBRly2MHVJELkbPutVCb5tz7PvVevpEkbekP3qz5Gz/AANQbQG1NbofMXZ96oMfvAuKt29u7SeZ5O6lzIuW59k/8Et/jvN8IfiNaSzXMf8ApE+147hvlZv4a+0/2kfiFpHxO+IVvr2laCti8lmv2xd3yNcf3lr8r/hZdTaRHFqlg7QyRvueRU+avuv9m39qnwl8TtDtfA3xIeG2v7WLZa3UkSqzNXtZLj6WCr+/8Mj5DiDKpY+HND4j2ib4C+Lfjt+zbqr6xon2qw2TW9vMqs22RVb5Wr8qNa8O3/hnWrvQdSt/Jns52ieH723a1fv9/wAE4Lyw0vUPEHwP8c38b6R4os9thNIq7d23crK1fkh/wVO/Z3f9n/8AbC8SaPb2fl2GoXTTRMv/AI83/Aq681p4ed50jmya+H5KUtP8z5xjV2kZIfl/3a+tf+Cb8ax6L4rC8j7RZ8/8Bmr5PtY3yHL4WvrD/gm/s/sTxV5aYH2iz5PU/LNX1XhN/wAl5hfSp/6bmfqXBcubiSj/ANvf+kSPH/jox/4XT4phdWA/t65ZX/u/vDXNeX+8TyeS33Gk/wDHq6X46KX+N3ikAKSNdudo/wC2hrmbeSWEbETb/CzN/DXx2ef8jvFf9fJ/+lM+czPTMK3+KX5smaP94d78L/yzqKZf33nI+x/4lo+eSTyelOaP94H35aRNvy15EY8pzS+EZ5PmL5z+Wdz0+1j8uOSZPlLP96nRqkmLZ4fu/fVf4aFLxx5Taqfx7qXxRF7seUnhj8pw/nfe+9V9bvy32Q3OF/2UrP8AOdV2I6/N8u6pI7h4/wDRn+f+F/n+9WMonqUfdgasMnkso2fP/A1WfOeXY/k8bvnjX+GsyG4+YR79qr/eepIWQqyJ5gMn/LSN/u1h9s64yNX9yqu7ybwrfd+7Uy3HnSMU3Afd2t92syGTcu9JtwZ/96rDXD+c6Ptx975aOb7JFSXMi5DNMriF0+6/zsv3ammkSSP7m1N38X3lqrDMi5jmPLL92rMcz7i/7tg3y/NXRH+U8rEcvLoElvJJE3+kfKv91KgvFT5/J3bWSrZt3+zo6Pv3fwx/w1XuGfbvNzt3ffVlrWP8qPN+H4jziS38lvs2/wC98u3Z/FT7ddzYdPm/2qmmDyXXz7nH8bbP4v71TLbozoifc+9uavO9n7vvH1FGUZFm1tHhUb0q6tvLEyYT73/jtJZ2qSRpv4b7y7q1bO1hkXycNt/ibbXFU54nsYenzajrGFFy6csy/Pt/vV02m27QwpNsUs38X92su1tUj2wpIv8Ad8xvlWtnTfkXY7/xfNtrzMRKUtj2sPTtD3jf0ffeTRpC7blX/gK12uk3Dqrwv+7Rvm8z/ZrjNHj8pjcv8is+1Nr/AHq6XSrh2aJNi5b5WXf8qrXk1pc0j0MPH3dTuPD+oGNkd412qnySfxV2+i6l5LJvlVopPm8z7rV5npvnW7LM7sqb/wCH7q11mk6o7SDZ8qL/AHk3bq8+VP3uU9WnU5oHpFjqFtIrP1SN/mk+781OuLx7iMpD99lVmjkSub0nVEa4/wBGufkX+Fl/hrQbWkuoRDPc52/eaP8Air1cLyR908nGe0KOsN5as8fzyt8rbfvVgTR3NvdSO6Llfm2s/wAv/fNauuXEKRsm/ak33Grm7i4fznhR8L/Fur6DDxjGPMj5XHRkdLZO0vhws7qSYHBK9O4rlNQeHy1+zbm+7v8AmrpdMkYeEPNVcH7LIQD+Nedav4gfT/MmmmVEW33PG3y7a/c/E+dsnyFr/oGj/wCkwPQ4sV8Hgb/8+1+UTI+J3ij+zdNTQoVZp5FZmk/2a8Zjs5ta1x7Z7aN2kf5ZGbayrV/UvGVz4m1S7ufOzE3zW+5/ur/dqxoumx28L39y7O+35G/551+UUY+6fBS+PmiaV00el6fsRPu2+1FVvu153qF9NqV00zyeWsabUZvmrd8SapczXImtpv3XlbW/vVwPjLxFNbw/Y4PkLbmZvvUv8IfDEg8aa88+pBNNmZ45G/f7V+61ch4wuIWmZLN95X5dyptqaG4CwmF9z+Z/t/drDuoprC7aG8m2/eZG3U+VGvumJq+yS3+0pDsm31jCZxJvR/m37q2bhrm6mZ/3bNvrKuoEhbzk/wCBKtHwmkYwN+3WHXNNRJpl3Knz/LXP6lphs3x/dq7o+rJHebPJ+X7u6ta80+G8h3w/NL/GtEthfCcms0ynDpuX+61TwTQ/LvRfl+bbUmp6bNbybmh2lv4aqbnjYj7rKtLlLi+Y0LeRC2/Zj/ZpfLRVV3+9Wesz7Vbdt/2hUv2p41+/u21JPKX1lRYfkeopJ4Wb7nH3qprcbv4G25p3mfMU/wDHq0KLPmJ5n31Vv/Qq3dC1B4WTY+0qtcyq7vnT+GrtrcvHJ/wDa9TzGfunWzao80yeYdqfera0uZJFb5Pu/c3f3a4+G4eTYm/dtrs/D8Pmafv3qZPvfN/dp/4SZDtUt7aS1Kb12f7VeceMPDK6XMt3bPuWT+Fa7LxRq0Ufyw7vlTa9c1Oj6pb+e4bbt20xxlynKUUs0bwzMjpgrSUHSFFFKoy3NKWwDo8fwVIsacv2qFW29qmjhdvv8/3afL9ozGLHuY8fLXuf7LXwQfxtqEfirUrZvsVnKuxWT/WSV518NPh7ceMdWFu7+VbxsrXEzf3f9mvqf4P65Z+D0l8N2yKLaNldVZPmas6kvZnBjK3ucsT6T8M+B9N0nQXcW3nSyRbtu35VWsW48J2GqaeiJprW5+9KsyVf8P8AjKfVvDdvMkzErAq7o2+9838VLdatquqQmF7/AHvu+Xy02t/u158qdOOi2PI5eWJyN94NtmZprBG/i/h+9WXN4fubWT5/MSZk2vt+bbXYw6s9vdPD9jZ1Vfmk/i2/xUNqGmyXzH7G0x3ruaP+7XP9XpSYbHEfPAsqO7Hc2x/M+9Un2tFl8lN27Z95q6S+sdHuL5U2SKGfCKyfMtMutC0pXb99t2/M/wDs1wVsLLnL5eUf8HXup/HdrIV+RVlB/wC/bVr/AByis18Q2l5dTqoWwIw/T7zc0z4avp9v43htYDGkjCQnauDJ8hri/wBsvxJe6b4m07SLVwBNpRc+o/eMM/pX7tlaUPo+4tf9Ra/KkONPmlyniH7RXjhNQ0u5ttKdhCsTbG/i214V4V0vdpaXOyRvnVV+eu8+K0k1v4XuIZgrSySqqNu+7WF4RtdukpC+0D7rfw1+R4GnyxZ6mHjy0hI7JFPkum4/7P8AFT00lJJvnRf+BPWs0KblgHzbaoSW80k29ywNehzS2NY+7rIhuLP7sezlf+BVz/jK2hbSZnSHdtT/AL5rr7PZIp/c43PtrH8eafDHo92+xtixMybaPekX9o8nf7ppaKKZ1BRQrIV96KiMQP0g/Z7/AOUbif8AYl6x/wChXVfm/X6Qfs+cf8E21x/0Jes/+hXVfm8rbq/avFb/AJEuQf8AYLH/ANJgcOE+Op6i0UUV+MHcIfmbfmlDbuaF2c7aRV20viARVw3PpTqKKXwgFKvQ/SkoqQE37mNLRSMu6r+GQC0Ab+1FLyppgCr8xra8DaNNrXiC20yC2aZ2lXbGv8XzVip94V9K/wDBM/4JXPxd/aO0W2dF+zWc/wBovWk+6sa/Nub/AGa48XWjh8PKb6GmHoyr4iMP5j9O/wBkr4V2vwh+AOg+D7Z/Ku5rL7Vfq3/LOSRf7v8Au16ZJYTRtFvdWeP5nk37d1TtY/6Y+91fa+1WVPl2/wCzU50/7Ux3pIrruVm/vf7tfk+JxXtsRKUj9mwuHjh8NGPYwptPudqfaEZf3rb2j/iX+GqFxptzbskdskjf34d/zf71dra6enmGZIV/3W+61V9W8PvI2/ZM3mRN8y/wqv8ADurnVb3kE8PS5eZnE/Z3hk2dW3/O3+zUkk025Sibm/jaRflkro4fDcPkh3h8ot/e/iWs28sfLXyU+8sW7d/Eta0anN7xy+x5o80jmtUmdeX2q/3lVfmX/drk9WvLP7cIQ/kzSbldfu11niJYbhvvsj7dybk+b5a4XxNdQzaeZHjUFX+ZtnzN/tV7uWytK72PCzDD+7oZeoX6XCv9jdl8t9vmLWRdXVzMzO9yxMb7k+fbU91ePj7mFVNqqv8Ae/vVzepas8MzTecu5l2Ov8K19Lh6nQ+NxlPlkWNUmh8tn+b5U3Osf3t396ub1a5xb+dvYlt3zN/Ey1JqXiBJo3tkRS0ifejasC+1yGRt6bmk2/xP8terT5pcp5FTkJr7VHmjjmh2tti+9/Etej+GcN8L1/e7wbCb589fv140upQyfcdt3+y/y1654PlVfg0JnyoGnXJOeoGZK/afBtWzjHf9g1T/ANKgfW8Eu+PxD/6dS/OJ5nb3PmbPs025ZpW+X+7XLfFnVJofDNz9jfbui2/7taUN4beM/eUtu2bf/Qq8++KXiqG3aTTZtrvMm7y2/wB2vxrllzHx8ZSkfKPjpXk1y4uX++0rfM1UNGvPs0zF/utWp423yaxK7/cZ91c+spinVk+YK+6uiMfd5TSJ0/kyRq7+Tjd/erKvpHjwj7sf7NaEN4l1Ypvm+9/drMvPm3OjsW/u1X90jl98pyNlvdqjkx/B92pJJHZgmxai2nd8lSaRBt7SeY74NWLXUJoW3+c23+7UTQzMN/3qU27r8nf/AGqr/EBuWHih0Ub03D/arb0/xG8jNCm1d1cRHG7fJsartr5sbLNsb/dpc3KTI7LzPtDb9iq33dtT2VruzNlX/wBmsfR7xPIX52P+9W3p94gbyUT738S1rzc0TE0/Dun/AG64+zbNnzqq7v7tdf8AED+0o9JttEtrWQJDFulb/wBBrm/Cd3DHqUKXO1Pm2uzf71eqXzaa2jtqqQ/bC0Sqys9PmI5o854Rr032e1l+TCbPmXbXnEzFpi+zG6vc/H8fhvVrVLaGwktpGXd5f3q8e8ReH5tKuN6Qt5TfNUfD8RtTlcyqVWfdspKdCqCRd+6g6TX8O2vmP8g5/iq14wmEccNmlzuRfm21P4YtkjUzO642bvu1h65fPeX7vvVlZ/vLS+2Y8vNMpk+Y1aOnzTx252P81Z8Y/eYetnSVhWNhs/4E1QEi9pd472phd97tTdSmubfds+cVFbqnmHyX+Zal1L99OIUdt+z/AIDQR8PvIr6fq1zcN5P3f4fmq/dXiW8bPs2/w7lqO3sYbWMyVHfMkkJhfk/e+WtBc3vjrfUt0iu8y4b+GtixTdDLNC7FFrnbfT3umXCfdrqtBmezt3R0UIyfMq0B8RmyX32X3VvvV96/Gtt/7AxdCBnwhpZH/kvXwpqljbM29I2279v3a+7PjXb7v2CDbK2P+KS0pQfxt6/X/DHXKc7/AOwaX/pMz7LhR3weP/69P8pHwGusJ5nlu+dv96tGHe8P2npu+5WZb6SjNs+Ulfv/AN2tGO4e2jCPtx93bX5DKMYyufG/ZJbfevM0O8f3qkkWFmR3TBk+Wq011OJN6Ju3f3f4aiWaaS4SF0YBv4qQSjyli4t/9tg23/vmq8av5e/zmI/jp/2h1uPv/dfbTG+VVTfuT+81L3ugvd5hJGfOx5lP/stZ2qKqyfc3bf4lrRkt0uN2zctMbQ55ov4f71EolnNzWbtJ8nA/vVCbGZ12bN3y/wANdP8A8I/5Kqj/APA6kbwn5S74X4b7v+zTjEz5uU5C006a4uvISNifate80+bT1jieHYy/f3Vu/D/Rba68eQ2Tur/Oqv8A3a9C/ah1rwR4i1fw94a8B+A4dGHh/S2ttW1D7b5rapcM27zP9lVX7q1nKITqX5TA8G2v/EjV/vf31/urWnazzaTcLqtm+2aP/VN/dqv4NV5ND2bMbX2tuq9cRyRtKfJ+X7u5a3p7GNSMZaH2D+xb+3NqVjJZ+D/G2vNaTQp/oGpebtbd/Cq12/8AwVO0HVfjj8O2+MupaU02o6TErPNbxf66Pb95mr4A0+6vNJukubZ2/c/Mu371fUfwN/bAfxF8M9S+Dnj+/jZ7rTWt4rq63bWX/a/2q1jWnS0+ycFXDxn7/U+R/L8ldnzff/iSvq7/AIJzNnRvFKkYIns8/wDfM1fMOs6b9h1i5tra5V0hnZFZX3Ky7q+n/wDgnTn+yvFmQB/pNn0/3Zq/RvCb/kvML6VP/Tcz7bgdy/1ho3/vf+kSPG/jkUT42+KlMTBv7duWV/X94a5qO385ld41zv3bv71dL8dlQ/GfxSWbaf7eufm9P3hrnY1/eNs+6v8Adevjs8/5HeK/6+T/APSmfO5jJf2hW/xy/NiBXW4EMnyt/dX5ql2pI3k7GLbfu79u2ntC7R74H27fu0ohLQ7P3j7fvfJ8zV4/2DkjKW8iNdm5k37fm+9UCypCVRPm+fazbNy1buFQ7odm1tm5Vaq7RC3dnfzAI/4mquZBH4rC+Z5f+kiH5lptoySKuz+9/FUUzbf3Xk//ABNSbdtwyJJt3JtrGR3x2LMNw/2dkeDf821G3/dqeOaHaPn+X+CqTFNqu7t8y7akVkaFpEdW/uVly/aOqPOaX2tI4fJ6bl/herNjL5kaTP8AN8rbt1ZUf+qTfyV+bdVyN/JZXR2X5Nv+zS5UKUpGtbSecud/3f4f71WLdtyvNMija/y7X3Vm2s27Y7/LuT5NtXbObazO5VGb5mVnreJx1uXoaFjMVVdj7n2bVjX5aZcrBsKOjMZPlpkbAyIkKYXZu3Sfw0jMis2987fl8tfu/wC9V/4Tg92XunGX1r5LtM+7bu/v1LYrvj87yVbd9z/aWrGqRp882/I2fdp1vHtVY1Tjb8jKlR7GfKe3Rlyk9rbpM2Xh/h/75rVtWeFl/fbl/u1ShjeO3/cup/2a0bVfm2eTtG2vPrUZRPawtT/wIu2Me5Vab/lp/Ey/MtaOm3HnS7PJj+X7q/d3L/8AFVm29ykbF96/991dtZEutjwpsMnzfc+XdXj4inL7J7NOtKUuVyOg02R45g+xSzP/ABfwrXRabfJHCiI+1tm7d/8AFVyVi32eOKa5Crt3KzK33mrXjvNuJpLnai/NuVP9mvJqe9I9Gm4xOxtbra2938zcyq67/wCGug0vUPs00XnQ7k3bn3VxOl655bLv/wBIWRP3rK+1l+X5Vrd0nUplji2PDjeq7ZG+7XLKPKd8akTvtJ1GG3YvN0/ur/tVpLq1hG0iJtfy4vnk3bf4ttcTp+uOZGdEVjGjNt3/ADNWjJqkMLI7p/rE3fe+Wrw/PGRjiJQkaeuXEP2hEuZtiNKyfN93/erntQ1K2hm8mN8Ov8Wz5WqXWL+Z42hfaRvVkZvm/wDHqwdUvEW382ZtzKny7vvV7uFre57x8xi6fNJyPSdHS4vvAnlWjhpZbSVYiG/iO4Dn615V4p+E3xVutFns9F0dTLcjbMXvIhuHrktVnwz8UfEPhG1XSozHdCeTdELndtiz1AwRgd8Vz/iH9srxRp2uTadpmhaVJDDN5bSSCTJx94jD8iv6Mnn/AIZ8WZNl9PNqtenVw9KNO0EraJJu/LK9+W620eup62MxvDWY4SgsbOcZU4qPur08n2OftP2YvjQlwh/sC3jVW6m/iPHpw1dHqfwG+J4tvI07w/Gw27SrXsQ4/wC+qvWX7VPjCWyS6utA0zdIcqsaScD8XqjqX7YPi+0lkih8O6XlO8iyf/F1z/UfBiMr/WsT9y/+VnlQocD30rVfw/8AkTlLz9mH44OS0XhuMgggKmpQjb+b9K5PWP2Nv2h7243x+DoWG/duOrW4/wDZ66rVv2//AIj6fN5UPg/Q5CPvLtmyP/IlYdx/wUp+J0RITwR4f47Ms/8A8cpfUfBff61ifuX/AMrGqPA3/P2r9y/+ROef9iP9pBFaRPA0DMynC/2xbfL/AOP1jX37Bf7UupzNLP4KtVI+4TrNsf8A2euwX/gpn8WiQT4E8OYPQ7Lj/wCO0y6/4KefFS3GR4F8On5f7k/3v+/lV9T8GJe99axP3L/5WEcNwN0q1fw/+ROGb9gD9qcWjQjwBaFy2Q39tWv/AMcqhP8A8E8P2ry+6P4f2h/7jlr/APHK78f8FQ/jGyGT/hAPDIA6gpcZP/kWqkv/AAVV+MiqHT4eeGcHqClxkf8AkWo+oeC//QVifuX/AMrNPYcEW/i1fu/+1OKj/wCCdv7WET7ovAVoM/8AUbtfl/8AIlbuhfsHftRwZGoeA7RQRgkazbE/pJWmf+CrnxnySvw88LlR32XP/wAdqWD/AIKpfGl0WWb4deGVRjgER3H/AMdp/UfBeOv1rE/cv/lZLocDf8/av3f/AGpR1X9gH9oS6QmHwbakj7v/ABNLcZ/8frmr/wD4JzftTOxMPge0ceg1q2H83r1jwb/wUs8f+IZxbaj4O0GFs4IQTc/nJXW6l+2t8Tre386x8MaDJu+6WSbA+v7yksD4LS0WKxP3L/5WQqPA1P3vbVfuX/yJ85r/AME6f2smTa3gG0GemdctTt/8iUi/8E5f2slBA8CWgz6a5a//AByvVPEP/BSP446KzLH8PvDL7Wxylx/8drAk/wCCrnxpiQs/w78Lgjtsuf8A47Q8v8F4/wDMVifuX/ys0jS4HltVq/d/9qcYn/BOj9q8gq/gG2A3ZGNctf8A45SJ/wAE6f2s1BUeAbTP95tctf8A45XZD/grD8Zud3w58MDH+xcf/HaVv+Cr/wAZVfb/AMK88L/98XP/AMdo+o+C9v8AesT9y/8AlZXsOCf+ftX7l/8AInIJ/wAE7v2shFg+A7UMPu41u1/+OVLD/wAE8v2r1H7zwHabv7w1u1/+OV1p/wCCrXxmwGHw98L4P+xc/wDx2g/8FW/jKOB8PPDBPpsuP/jtH9n+C/8A0FYn7l/8rEqHBEdqtX7l/wDInP2f7AH7VEGC/gW1POcf21a//HK6HT/2JP2l4bcRT+B7ZMDGI9Zt/wD4uprT/gqh8Y7ghH+HfhoMfRLj/wCO1dP/AAVD+KkcZkn8DeGxt64W45/8i01gfBf/AKCsT9y/+Vk+w4H/AOftX7l/8icxqf7Bv7UF1K0ieBrZjuyrDWbYf+1Kji/YE/acEarJ4HthtGcLrNt97/vuunt/+CoPxjuX/d/D/wANY7jZcZ/9G1dP/BTL4rKGZ/BHhtQq5O5Ljn2/1vWj6j4L2/3rE/cv/lZLocC/8/av3L/5E8w1T/gnV+1bcXHnQeArQ7upGt2o/nJVX/h3J+1r/wBE+tP/AAeWv/xyvSj/AMFRvjC0vlxfDzw37FluMf8Ao2lvf+CoHxnjtzPZfD/wyxX7yOlxn/0bS+p+C8v+YrE/cv8A5Waxo8E/8/av3f8A2p5r/wAO4/2tP+hAtP8AweWv/wAco/4dx/taf9CBaf8Ag8tf/jldr/w9j+NP/ROfC/8A3xc//HaP+Hsfxp/6Jz4X/wC+Ln/47VfUfBj/AKCsT9y/+Vl+w4K/5+1fuX/yJxcf/BOb9rQct8PrP/weWv8A8cq7pn/BOb9qKS7jXUPBNtFGWXfINatjt/APXoHhL/gpj8evF+qx6Rpvw28MvI/UrHcYH/kWvXtG/av+ItzGBqfh3RVkC/P5SSgE+gy5rKdDwUpL3sXifuX/AMrOar/qJD3ZVqv3f/ann/hf9jP4teGNJj0y08K2o2jMji/h+Zv++q11/Zg+M0Uonh8NQqw6bdQhx/6FXZN+1Z4xTIbQdK3B9pBEn/xdMm/au8dJsRfDOlbmGcEydP8Avqud4bwQlvi8T9y/+VnO6PAL3rVfuX/yJ1nw18AeONC0RtM1/SFVpBkk3EbAH8GroYvBuqrIZ2i+cH5DuXgfnXMfCv47+KPHN3NbapoVqoiAO+zjfHP+8xrsZfGOpqp2WkJbOVVsjK/3utS8B4HdcXifuX/ys5o4bw8V0q1b7l/8gcpqHw18bx3sk2nxKyOrLhJlThvqaSDwL8QbcqsmkrIsabV2XSLn9a27/wCJup2dzHGLCBkkGMhWyD+dE3xN1NWXyLO2fPVBu3fhzWNTKvA7ri8V9y/+VFPCeH0d61b7l/8AIHOyeAfiXI27+yo1bbgMLiPj/wAeqpJ8OvinNGI20CDcFI3tdx//ABVdM/xS12P5ZbSyRiMpuD4P/j1D/Fy5t4Q86WjOekcatz+OamOWeBdv97xX3L/5UDwnh7/z+rfcv/kDO+Gfws8WeHfG0HiTxAwZIg4yJlOMxlegPqa539qj4NfEb4neMdN1PwXo8dxbQab5M8pu442VvMc4w5GeCK7zwf8AEzUfEniaHRrjT4EhmD4eIMWBCFuTnA6VoeP9R+KOnTrH8PfClpqCG33NJd3CpiTJ+XBde2Ofev1nJci4FzXw1r4HLnia2D9veXLByre0Sg7KMab91Llb917vUaoeHqldVqv3L/5A+PviL+xb+0Z4hhtrHSvBkEkUcu+RpdXtwT+b0ul/sU/tFWcYR/CNsNn3R/atv/8AF17rcfED9usayLa2/Z+0A2eObhtXhz+X2rP6VqWvjL9sJ3H2r4M6Ii98alHn/wBKK+fo+HXA8YWjh8x+dCf/AMpOlU+Abfx6n4f/ACJ4Kv7Gn7QDgmTwvbKSMfLqUHyj/vurQ/Y0+NJCRt4TtgqptyNRhz/6FXuq+Lv2tyxDfB/RQAeD/aEfI/8AAipovFf7Ve0mb4S6PnPAW/j6f9/60/4h3wT/ANA+Yf8Agif/AMpE4cAPevV/D/5E+fj+xj8b443SDwpD935M6nB/8XWP4y/Yn/aP1TQJ7TS/B1u08yBdp1e3GM9eS9fTg8VftS7xn4T6Tgrk/wCnx8H0/wBfWV4x8cftoWGkmfwZ8DdEvrzeAIZ9UhVcdzk3K/zo/wCIecE2/wB3zD/wRP8A+UlRhwCmrV6n4f8AyJ8Z/wDDuP8Aa0/6EC0/8Hlr/wDHKP8Ah3H+1p/0IFp/4PLX/wCOV9P/APC0/wDgpZ/0a14X/wDB7b//ACbR/wALT/4KWf8ARrXhf/we2/8A8m0v+Ie8E/8AQPmP/gif/wApOj/jBP8An/U/D/5E+YP+Hcf7Wn/QgWn/AIPLX/45Qf8AgnH+1oeD4AtP/B5a/wDxyvp//haf/BS3/o1rwv8A+D23/wDk2j/haf8AwUs/6Na8L/8Ag9t//k2n/wAQ94J/6B8x/wDBE/8A5SH/ABgn/P8Aqfh/8idj8HvhP448JfsVr8G9d0yOLxAPDOpWZtFuUZfOlM/lrvBK8715zgZ5r4tb/gnF+1qeR8P7T/weWv8A8cr72tvHXxI8Pfs66h8Tfil4TtNK8S6VoF9f3+kwTCWGN4FldF3JI+4MqIThyfmPQ8D5A/4ex/Gn/onPhf8A74uf/jtfQcf4DgOGEy2hndStT5KKjTUVaXIlFfvE4NqWiurKzvoZYahwLeTp1qr112/+ROK/4dx/taf9E/tP/B5a/wDxyk/4dyfta/8ARPrT/wAHlr/8crtv+Hsfxp/6Jz4X/wC+Ln/47R/w9j+NP/ROvC//AHxc/wDx2vzf6j4Lv/mKxP3L/wCVnV7Dgr/n7U/r/t04r/h3H+1p/wBCBaf+Dy1/+OU3/h3D+1r1PgC0P/cdtf8A45Xb/wDD2P40/wDROfC//fFz/wDHaP8Ah7H8af8AonPhf/vi5/8AjtV9R8GP+grE/cv/AJWHsOCv+ftX7l/8icV/w7j/AGtP+hAtP/B5a/8Axyj/AIdx/taf9CBaf+Dy1/8Ajldr/wAPY/jT/wBE58L/APfFz/8AHaP+Hsfxp/6Jz4X/AO+Ln/47S+peC/8A0FYn7l/8rD2HBX/P2r9y/wDkTiv+Hcf7Wn/QgWn/AIPLX/45R/w7j/a0/wChAtP/AAeWv/xyu0b/AIKyfGkDP/CufC//AHxc/wDx2hv+CsnxpAz/AMK58L/98XP/AMdp/UfBj/oKxP3L/wCVh7Dgr/n7V+5f/InF/wDDuP8Aa0/6EC0/8Hlr/wDHKP8Ah3H+1p/0IFp/4PLX/wCOV2v/AA9j+NP/AETnwv8A98XP/wAdo/4ex/Gn/onPhf8A74uf/jtH1HwY/wCgrE/cv/lYew4K/wCftX7l/wDInE/8O4v2tN27/hAbT/weWv8A8cpf+Hcf7Wn/AEIFp/4PLX/45XpGjf8ABUn4u63GbWDwJ4ZS8P8AqY3S42yH0B83rVC4/wCCrHxvtpmt5vhv4XR0bayslz/8dpfU/Bf/AKCsT9y/+Vi9hwX/AM/av4f/ACJw8f8AwTj/AGsiw8z4f2gHf/ieWv8A8cr7Z/4JvfAy8/Zo0HV9Y+JNvb2mr6gUt47aPbMRCRl2Lx5GcgcV87+Af+Clnx48eeI7bw7p3wz8OSS3MojRYorjJY9uZa+9/D/hS3v7Czk1K6cTyWiNeCIbVjlK7iozk4rxc6oeBEKPssVjMUk+yV//AE0z18nwHC9XEe0w85trvt/6Sj0TTPi74GtExLfyE55PkPkj8q1bP41/DJZWF1qjsrY+ZrSTt9Fri9I+EmgX8ImuNRvFHcIU5/8AHa6LTP2dfCN64WXWdSXK5XDxjn/vmvjHlv0a5R1x2N+5f/KT7hUsO4rc6OL46fCKJ939uyHnAP2GXgf981ZuPj58GpQXj12QF2GV/s+XgHr/AA1n237JPgqeHzz4i1XA7Bosn/xyrUf7H3gGRjjxRqxA7AxZH/jlZrL/AKNMf+Y7G/cv/lJoqNGPulS++NvwqeV2t9dkI2lVb7DJu29v4awdU+KvgK7RWg1hg38ebST5v/Ha6GX9kTwaCUh1/VmY/c5ix+PyVzOofs9+GLOZoF8QXuVJG5gmAR2Py9a2p5b9G37OOxv3L/5SRUpUOXW5zOr+KvC95OXTUJHGSQTAw/pXH6syXzq8NxsVRt2qnOM5rrNd+HWj6ZOI7bUpnUHErMV+X9K5q+0tbZ3FuzPtkKYbg5FejQy76O9PWONxnzS/+UnlVsLlc7qTl/XyOQ1XRNbmDC3t1kOc7vMALt/eOaxNT8GeMLmdwLZGiKfuhFIibW9+ea6XVvEOoabIyJaI+DgYycn0+tcnqfxi16wLL/ZVrlBkhg33f++q97DYDwHfvQxeK+aX/wAqPm8VhuGHJqpOfy//AGTL1X4Z+PrjIs9LQEr95rpMZ9etY0/wY+J1w29tMiVmfc7LdRnH+7k1b1D9pjxJaMUTSNNBVcvv8zj/AMerPH7Vvi3d5baBpeexUSEf+hV7NLL/AAV5fdxWJ+5f/Kzx6tDgm3vVav3L/wCRF/4Ut8TyVjXRYlj/AIl+2R5/PdXqPhjw5q9h8MB4ZvoFS8+wzxGPzAwDMXxyOO4rzSD9qHxbI6o3h/TTnrt8z/4qrtv+0j4klG+TQ7HGcYUSE5/76r6fhrOPCfhnFVa2ExFZupB03zRuuVtN2tBa6Lv6HTleO4MyitOpRq1G5RcXzK+jt2itdChdfAv4iGPFpYwLuXDBrpcr+Oa808dfsl/HzxH4lS+stDsjbxwldzahGCT9M16v4g/aa8RaLa/aV8NWLARlizyuAMV45bf8FNPHl74kvNHtfhzoxhtmISVriXLY9ea+fjl/gnusViPu/wDuZxww3AnLpVqfd/8Aannmv/8ABPD9p+/unktfDmnMrNkZ1iIf1rHb/gmv+1WTn/hF9M/8HUP+Nehar/wVU+JdhI8cfwy0FinUNPMP/Zqpf8PaPigeR8K9A/8AAif/AOKq/qPgp/0FYj7n/wDKzWGG4H6Van9f9unO6J/wTr/agtYXiu/DGmKD0H9sRH+tR3H/AATo/amlfJ8N6a3zZ3DWYR/Wu60n/gqT8TNRT5/hloSv6Ceb/Gi//wCCpHxNsjtHwy0Nj6edP/8AFVH1PwT/AOgrEfc//lZDocDc2tWp93/2p52P+CbX7U3O/wAL6Yc/9RqH/GlH/BNz9qf/AKFXSh9NZh/xru1/4KqfE9ip/wCFZeH8H7x+0T8f+PVKn/BU34myjKfDPQfu5/183/xVH1HwT/6CsR9z/wDlZXseBv8An7U+7/7U4P8A4dwftR5x/wAItpeP+wzF/jUh/wCCb/7TZw58N6duH/UZi/xrtx/wVO+JaDfP8NNAVT91/tE2D+tRt/wVX+JQGV+GGhdcczzf40/7P8FP+grEfd/9zF7HgaP/AC9qfd/9qceP+CcX7TOf+Ra00D21iL/GlT/gnV+1FGqxDwzppVf+ozD/AI11b/8ABV74lBii/DHQQf8Aanm/+KpU/wCCrXxPZtrfC7QR/wBt5/8A4qmsD4Kf9BWI+5//ACsaocD9KtT7v/tTnIP+Ce37TsChV8Madgdv7Yh/xrQtv2B/2loAWHhnTQx6/wDE2i/xrXX/AIKr/Ekjc3wx0PHtPN/8VV7Tv+CoHxGu5Ak/wy0ZQ33Cs03zfrT+o+Cn/QViPuf/AMrJ+r8Df8/an3f/AGpT0/8AYZ+P0TB5/DlgrbcM39qRn+tdT4a/Ze/aH0mOS3u9As3jP3VGqR4P61d8O/t/+O9bIWTwJpCE9Assv+Ndhp37VPxC1BFmPhPSYomztlkkk28fjSWB8E+mKxH3P/5WZyw/AcdHVqfd/wDamG37KXiy/iB1HwXYCXfw63qDav8Ad4Nc1rv7B/i/V/Nh/si0CSAgH7avFel3H7V2t2G2O80HTncpuYwzOV/nViH9p7xDPp7XqeHLFSMHa0j9D361p9R8F9vrWI+7/wC5h7LgP/n9U+7/AO1Pk/Xv+CaX7SsWpyroeh6bNb7v3UjatEpx7gmoLT/gm5+1OkgE/hbTNobOf7ah/wAa+hPGn7cHxE0ATnR/AmkzCEZ3TSy4I/A159B/wVG+KjTGGb4V6GpHpPN/8VUPL/BTrisR9z/+VmkafAvL/Gqfd/8AanNr/wAE/P2l7fT3hh8NaY0hTaudWi/xrnp/+Cbn7VLS+ZH4V0zHp/bUP+NelXf/AAVI+IUFwbdPhvoeV++Wnm4/WoJP+CpfxQCmSL4Y6CVHVjPN/wDFUfUfBT/oKxH3P/5WP2PAv/P2p93/ANqeeL/wTY/ao3iQ+GNMyGz/AMhqH/GtCL/gnX+0+iHd4X03J7LrMX+NdY//AAVV+KkbfP8AC7QMeouJ/wD4qr0P/BUP4lSQea3wz0IfS5m/xpPA+CnXFYj7n/8AKwdHgbrVqfd/9qcAf+CdX7U4kJHhjTto+7/xOof8asRf8E8v2pduZfDWmA/9hiL/ABrtl/4KifEpsEfDLRBnsZ5v8aQ/8FQ/iiEDn4YaFjGT+/m4/Wn9R8FI/wDMViPuf/ysPY8DSf8AFqfd/wDanJQf8E9/2mo1O/w3p59F/teH/Gqk3/BPD9qWWUlfDGmKD1xrMP8AjXbD/gqP8Tdm9vhjoQ9B9om5/WlP/BUn4mbcj4XaHk/d/wBIm+b9aj6j4J/9BWI+5/8AysPq/A0f+XtT7v8A7U5PTv8Agnv+03asHfw1p2QMc6vEf61r2/7Bf7Q4t2iuPDlhknII1WL/ABrYtf8AgqD8T7iLzT8MNDX2M83+NWB/wU88fjHm/DjRhn0mm/xo+o+Cf/QViPuf/wArF9X4G/5+1Pu/+1Obuf2CP2jnO+PQLHOM4GrRfe/OvqL4nfDLxh4k/ZSb4WaVp6Ta1/YFja/Z/tCqpliMO8b2IXA2NznnFfP3/Dz/AOIJGV+HOife/wCe83T161E//BTn4rTz7bX4daBGgHJladifycV9FkuceEvDuGxVLC4ms1iIOnK8W3Zpr3fcVnq97+h6OAxnB+W0qsKVWbVSPK7ro77e6tdTlk/YP/acjYlfBVr0x/yF7b/4ukf9g/8Aab2/L4EtCf8AsMW3/wAXXYJ/wUr+LAjDT+AvDwJ9Fn6f9/Ken/BSj4rlA58A6Bg8nCz8D1/1lfOfUfBn/oKxP3L/AOVnlfVuB/8An7V/D/5E4qP9gv8AadVh/wAUVajHrrFt/wDF0+T9gv8AaXaZXHgq2wv/AFGLb/4uu1/4eS/FLdtHgjw8SBlsJPx/5Epo/wCClHxRMXmHwR4eHttn/wDjlT9R8F/+grE/cv8A5WDo8DL/AJe1fuX/AMicdH+wX+0kUKz+B7Y4+7/xOLb/AOLpI/2Cv2lUO4eDbUD+6dXt/wD4uuxH/BSb4stEZh4D8PADswnyf/IlKv8AwUi+Lhj81vA/hwBl3KAlx/8AHKSwPgt0xWJ+5f8Aysf1fgjl/i1fuX/yJyqfsJftGiPL+CLVn/7C1v8A/F0L+w3+0qpYnwJbEEYx/bFt/wDF11K/8FJviyzbT4F8PAj7wKT/APxyh/8AgpL8WowrnwL4eKlsfLHcf/Haby/wY+J4rE/cv/lYex4Ij/y9q/cv/kTlh+w5+0vIY9/gG0XH3ydYtj/7PWh/wxF+0N9mMf8Awhlru9f7Vt//AIutpP8AgpN8VS7B/A/h0DOFG2fJ/wDIlXV/4KJfFA2T3h8G+HhsjLY2z9v+2lNYPwYlLTFYn7l/8rJlh+ButWr9y/8AkTh/A37CH7R2k+I7jVNW8GW0SnPkumr25z+T1qax+w78fLstLF4Rt5ZC+4FtUgH/ALPXQeA/+Ci3xV8V209ze+CPD0YjbagiE/Pp1kNa2oft8/Eq1hD2/gvQ3IOHY+dgf+P1DwPgv/0FYn7l/wDKwlhuBuZXq1fuX/yJyvh/9jP9oCxsXt7rwhboWkzj+1IDn8nqxP8AsdfHtovLj8J25P8AeOpwf/F10+nft5fEW8gSWXwnoa7unyzcj/v5Sz/t3/EqJiB4O0TAGcFZs/8AoyrjgfBi3+9Yn7l/8rM5YfgWUuZ1qv3L/wCROPk/Yx+Pi/6vwhA3GOdUg/8Ai6gP7GP7RSsWi8IW6jbjaNXg/wDi660ft+fE8u0f/CH6BlRlhib/AOOVH/w8D+J+WU+DNBBHTib/AOOVp9S8GtvrOJ+5f/KzH6rwDzfxqv3L/wCROUH7Ff7QxVI38HW+M5b/AImtv/8AF17z+x18G/Hvwg07XrTxzpUdqb2a3a18u4jk3BRIGzsJx94da8zX/goF8TGQk+DNCDDsVm/+OVDe/t/fE+6spbW38LaPBLLEypLHFKWjJGNwzJjI6816+QZl4TcNZpDMcJiK8qkOaylG6d4uL2guj01Wp6WW4ngrJsXHE0atRyjeya01TX8q79zg/jjIg+NvipHG7/ieXPHp+8Nc9b/vG6qn99tn3qq3WqX+qajNqmoX73F1dStJLPMS0kjk5LMTySTzmpbeTy1/fPvVf4tlfjGOxMcbj6uItZTlKSXa7b/U/PcVWWIxM5r7Tb+93Lp+zLgumz5Pvb6W6VPLEyf3P4XqD7QjNveFWRvl3MlRXVw7Rs6Pg/7P3dtcXvfaM/djSJbi73SfIkY/hRv4ttQXEaQ4HzbG/vNSec7M8Lop+T5G/vVHJNMsafI2F/hZaUpS+FBTp83vC/aEnymxTt+Z1amfaEjZv3O7+61OuJIYo1R0Xc3zVXa42/PMm5vvIq/xVjLY76dvtEse+SQzJ8n/AEzZvvVOvkNDs/h/2ap/aIZF3v8AeqZZkbaiD51/8dqDcvQt9nXeNqr/ABrvq5bzSRyb3TerfLtas6FvtDbLqFfm/iq/FJsk+zTbWVvuVO0+YUv7pdguizG2+zK4Xa27f/FWhbSJHMCiMrN/Cq/NurNh7b/lbd93ZVyO481vvsn8Tt1rWMebc4K0pRloaq3CeSqeTlf4l/ipitP+72TbG/g+Xa23/aqKznk3Nvfeuzcn+zTmuIfOSaZNzr8r7XrXl5fhOTm94//Z\n", - "text/plain": [ - "" - ] - }, - "metadata": { - "tags": [], - "image/jpeg": { - "width": 600 - } - }, - "execution_count": 38 } ] }, + { + "cell_type": "markdown", + "metadata": { + "id": "hkAzDWJ7cWTr" + }, + "source": [ + "        \n", + "" + ] + }, { "cell_type": "markdown", "metadata": { @@ -665,35 +658,35 @@ "base_uri": "https://localhost:8080/", "height": 66, "referenced_widgets": [ - "cef5e9351ca743bcba5febac0b096a30", - "ec326c52378f4410920c328f221e0514", - "83000c64a11c4ae8abd6f0ef2f108cef", - "0f7899eb719f4a9c9852426551f97be9", - "886ac5b18b3c4c82bf15ad5055f1e17e", - "4e67b3c3a49849c7a7ba28b7eec96e7a", - "62c3682ff1804571a483d46664533969", - "599dda3b608b432393760b2ca4ae7c7d" + "2e915d9016c846e095e382b6a02ee773", + "cb7fc3a5c6cc4fde8d2c83e594a7c86e", + "ac3edef4e3434f4587e6cbf8aa048770", + "853ac234cc2a4236946fc516871e10eb", + "13842ca90c0047e584b8d68d99dad2b1", + "f454999c3a924c7bad0746fb453dec36", + "f94a7ca8c1f04761bf38fdc5f99664b8", + "9da1a23b042c41618dd14b0e30aa7cbe" ] }, - "outputId": "56b6402a-81d5-41d0-a3c8-8889db1fca6c" + "outputId": "3606f305-aa67-43fd-d5d6-93d1f311768c" }, "source": [ "# Download COCO val2017\n", "torch.hub.download_url_to_file('https://github.com/ultralytics/yolov5/releases/download/v1.0/coco2017val.zip', 'tmp.zip')\n", "!unzip -q tmp.zip -d ../datasets && rm tmp.zip" ], - "execution_count": null, + "execution_count": 10, "outputs": [ { "output_type": "display_data", "data": { "application/vnd.jupyter.widget-view+json": { - "model_id": "cef5e9351ca743bcba5febac0b096a30", + "model_id": "2e915d9016c846e095e382b6a02ee773", "version_minor": 0, "version_major": 2 }, "text/plain": [ - "HBox(children=(FloatProgress(value=0.0, max=819257867.0), HTML(value='')))" + "HBox(children=(FloatProgress(value=0.0, max=818322941.0), HTML(value='')))" ] }, "metadata": { @@ -716,30 +709,30 @@ "colab": { "base_uri": "https://localhost:8080/" }, - "outputId": "a5d41761-f1a0-41fe-d0bb-4cceebd7c4a6" + "outputId": "20fbc423-f536-43ff-e70b-3acf6aeade99" }, "source": [ "# Run YOLOv5x on COCO val2017\n", "!python val.py --weights yolov5x.pt --data coco.yaml --img 640 --iou 0.65 --half" ], - "execution_count": null, + "execution_count": 11, "outputs": [ { "output_type": "stream", "text": [ - "Namespace(augment=False, batch_size=32, conf_thres=0.001, data='./data/coco.yaml', device='', exist_ok=False, half=True, img_size=640, iou_thres=0.65, name='exp', project='runs/val', save_conf=False, save_hybrid=False, save_json=True, save_txt=False, single_cls=False, task='val', verbose=False, weights=['yolov5x.pt'])\n", - "YOLOv5 🚀 v5.0-157-gc6b51f4 torch 1.8.1+cu101 CUDA:0 (Tesla V100-SXM2-16GB, 16160.5MB)\n", + "\u001b[34m\u001b[1mval: \u001b[0mdata=./data/coco.yaml, weights=['yolov5x.pt'], batch_size=32, imgsz=640, conf_thres=0.001, iou_thres=0.65, task=val, device=, single_cls=False, augment=False, verbose=False, save_txt=False, save_hybrid=False, save_conf=False, save_json=True, project=runs/val, name=exp, exist_ok=False, half=True\n", + "YOLOv5 🚀 v5.0-330-g18f6ba7 torch 1.9.0+cu102 CUDA:0 (Tesla V100-SXM2-16GB, 16160.5MB)\n", "\n", "Downloading https://github.com/ultralytics/yolov5/releases/download/v5.0/yolov5x.pt to yolov5x.pt...\n", - "100% 168M/168M [00:01<00:00, 156MB/s]\n", + "100% 168M/168M [00:05<00:00, 31.9MB/s]\n", "\n", "Fusing layers... \n", "Model Summary: 476 layers, 87730285 parameters, 0 gradients\n", - "\u001b[34m\u001b[1mval: \u001b[0mScanning '../coco/val2017' images and labels...4952 found, 48 missing, 0 empty, 0 corrupted: 100% 5000/5000 [00:01<00:00, 3008.87it/s]\n", - "\u001b[34m\u001b[1mval: \u001b[0mNew cache created: ../coco/val2017.cache\n", - " Class Images Labels P R mAP@.5 mAP@.5:.95: 100% 157/157 [01:17<00:00, 2.02it/s]\n", + "\u001b[34m\u001b[1mval: \u001b[0mScanning '../datasets/coco/val2017' images and labels...4952 found, 48 missing, 0 empty, 0 corrupted: 100% 5000/5000 [00:01<00:00, 2653.03it/s]\n", + "\u001b[34m\u001b[1mval: \u001b[0mNew cache created: ../datasets/coco/val2017.cache\n", + " Class Images Labels P R mAP@.5 mAP@.5:.95: 100% 157/157 [01:18<00:00, 2.00it/s]\n", " all 5000 36335 0.746 0.626 0.68 0.49\n", - "Speed: 5.3/1.5/6.8 ms inference/NMS/total per 640x640 image at batch-size 32\n", + "Speed: 0.1ms pre-process, 5.1ms inference, 1.5ms NMS per image at shape (32, 3, 640, 640)\n", "\n", "Evaluating pycocotools mAP... saving runs/val/exp/yolov5x_predictions.json...\n", "loading annotations into memory...\n", @@ -747,14 +740,14 @@ "creating index...\n", "index created!\n", "Loading and preparing results...\n", - "DONE (t=4.88s)\n", + "DONE (t=4.82s)\n", "creating index...\n", "index created!\n", "Running per image evaluation...\n", "Evaluate annotation type *bbox*\n", - "DONE (t=83.47s).\n", + "DONE (t=84.52s).\n", "Accumulating evaluation results...\n", - "DONE (t=12.96s).\n", + "DONE (t=13.82s).\n", " Average Precision (AP) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.504\n", " Average Precision (AP) @[ IoU=0.50 | area= all | maxDets=100 ] = 0.688\n", " Average Precision (AP) @[ IoU=0.75 | area= all | maxDets=100 ] = 0.546\n", @@ -829,35 +822,35 @@ "base_uri": "https://localhost:8080/", "height": 66, "referenced_widgets": [ - "217ca488c82a4b7a80318b70887a556e", - "4e63af16f1084ca98a6fa5a282f2a81e", - "49f4b3c7f6ff42b4b9132a8550e12186", - "8ec9e1a4883245daaf029458ee09721f", - "9d3e775ee11e4cf4b587b64fbc3cc6f7", - "70f68a9a51ac46e6ab7e51fb4fc6bda3", - "fdb8ab377c114bc3b862ba76eb93cef7", - "cd267c153c244621a1f50706d2ddc897" + "6ff8a710ded44391a624dec5c460b771", + "3c19729b51cd45d4848035da06e96ff8", + "23b2f0ae3d46438c8de375987c77f580", + "dd9498c321a9422da6faf17a0be026d4", + "d8dda4b2ce864fd682e558b9a48f602e", + "ff8151449e444a14869684212b9ab14e", + "0f84fe609bcf4aa9afdc32a8cf076909", + "8fda673769984e2b928ef820d34c85c3" ] }, - "outputId": "9e4788c2-e1d4-4a13-c3d2-984f5df7ffab" + "outputId": "4510c6b0-8d2a-436c-d3f4-c8f8470d913a" }, "source": [ "# Download COCO128\n", "torch.hub.download_url_to_file('https://github.com/ultralytics/yolov5/releases/download/v1.0/coco128.zip', 'tmp.zip')\n", "!unzip -q tmp.zip -d ../ && rm tmp.zip" ], - "execution_count": null, + "execution_count": 12, "outputs": [ { "output_type": "display_data", "data": { "application/vnd.jupyter.widget-view+json": { - "model_id": "217ca488c82a4b7a80318b70887a556e", + "model_id": "6ff8a710ded44391a624dec5c460b771", "version_minor": 0, "version_major": 2 }, "text/plain": [ - "HBox(children=(FloatProgress(value=0.0, max=22091032.0), HTML(value='')))" + "HBox(children=(FloatProgress(value=0.0, max=6984509.0), HTML(value='')))" ] }, "metadata": { @@ -918,25 +911,31 @@ "colab": { "base_uri": "https://localhost:8080/" }, - "outputId": "c4dfc591-b6f9-4a60-9149-ee7eff970c90" + "outputId": "cd8ac17d-19a8-4e87-ab6a-31af1edac1ef" }, "source": [ "# Train YOLOv5s on COCO128 for 3 epochs\n", "!python train.py --img 640 --batch 16 --epochs 3 --data coco128.yaml --weights yolov5s.pt --cache" ], - "execution_count": null, + "execution_count": 13, "outputs": [ { "output_type": "stream", "text": [ + "\u001b[34m\u001b[1mtrain: \u001b[0mweights=yolov5s.pt, cfg=, data=coco128.yaml, hyp=data/hyps/hyp.scratch.yaml, epochs=3, batch_size=16, imgsz=640, rect=False, resume=False, nosave=False, noval=False, noautoanchor=False, evolve=None, bucket=, cache_images=True, image_weights=False, device=, multi_scale=False, single_cls=False, adam=False, sync_bn=False, workers=8, project=runs/train, entity=None, name=exp, exist_ok=False, quad=False, linear_lr=False, label_smoothing=0.0, upload_dataset=False, bbox_interval=-1, save_period=-1, artifact_alias=latest, local_rank=-1\n", "\u001b[34m\u001b[1mgithub: \u001b[0mup to date with https://github.com/ultralytics/yolov5 ✅\n", - "YOLOv5 🚀 v5.0-158-g78cf488 torch 1.8.1+cu101 CUDA:0 (Tesla V100-SXM2-16GB, 16160.5MB)\n", + "YOLOv5 🚀 v5.0-330-g18f6ba7 torch 1.9.0+cu102 CUDA:0 (Tesla V100-SXM2-16GB, 16160.5MB)\n", + "\n", + "\u001b[34m\u001b[1mhyperparameters: \u001b[0mlr0=0.01, lrf=0.2, momentum=0.937, weight_decay=0.0005, warmup_epochs=3.0, warmup_momentum=0.8, warmup_bias_lr=0.1, box=0.05, cls=0.5, cls_pw=1.0, obj=1.0, obj_pw=1.0, iou_t=0.2, anchor_t=4.0, fl_gamma=0.0, hsv_h=0.015, hsv_s=0.7, hsv_v=0.4, degrees=0.0, translate=0.1, scale=0.5, shear=0.0, perspective=0.0, flipud=0.0, fliplr=0.5, mosaic=1.0, mixup=0.0, copy_paste=0.0\n", + "\u001b[34m\u001b[1mWeights & Biases: \u001b[0mrun 'pip install wandb' to automatically track and visualize YOLOv5 🚀 runs (RECOMMENDED)\n", + "\u001b[34m\u001b[1mTensorBoard: \u001b[0mStart with 'tensorboard --logdir runs/train', view at http://localhost:6006/\n", + "2021-07-29 22:56:52.096481: I tensorflow/stream_executor/platform/default/dso_loader.cc:53] Successfully opened dynamic library libcudart.so.11.0\n", + "\n", + "WARNING: Dataset not found, nonexistent paths: ['/content/datasets/coco128/images/train2017']\n", + "Downloading https://github.com/ultralytics/yolov5/releases/download/v1.0/coco128.zip ...\n", + "100% 6.66M/6.66M [00:00<00:00, 44.0MB/s]\n", + "Dataset autodownload success\n", "\n", - "Namespace(adam=False, artifact_alias='latest', batch_size=16, bbox_interval=-1, bucket='', cache_images=True, cfg='', data='./data/coco128.yaml', device='', entity=None, epochs=3, evolve=False, exist_ok=False, global_rank=-1, hyp='hyp.scratch.yaml', image_weights=False, img_size=[640, 640], label_smoothing=0.0, linear_lr=False, local_rank=-1, multi_scale=False, name='exp', noautoanchor=False, nosave=False, notest=False, project='runs/train', quad=False, rect=False, resume=False, save_dir='runs/train/exp', save_period=-1, single_cls=False, sync_bn=False, total_batch_size=16, upload_dataset=False, weights='yolov5s.pt', workers=8, world_size=1)\n", - "\u001b[34m\u001b[1mtensorboard: \u001b[0mStart with 'tensorboard --logdir runs/train', view at http://localhost:6006/\n", - "2021-06-08 17:00:55.016221: I tensorflow/stream_executor/platform/default/dso_loader.cc:53] Successfully opened dynamic library libcudart.so.11.0\n", - "\u001b[34m\u001b[1mhyperparameters: \u001b[0mlr0=0.01, lrf=0.2, momentum=0.937, weight_decay=0.0005, warmup_epochs=3.0, warmup_momentum=0.8, warmup_bias_lr=0.1, box=0.05, cls=0.5, cls_pw=1.0, obj=1.0, obj_pw=1.0, iou_t=0.2, anchor_t=4.0, fl_gamma=0.0, hsv_h=0.015, hsv_s=0.7, hsv_v=0.4, degrees=0.0, translate=0.1, scale=0.5, shear=0.0, perspective=0.0, flipud=0.0, fliplr=0.5, mosaic=1.0, mixup=0.0\n", - "\u001b[34m\u001b[1mwandb: \u001b[0mInstall Weights & Biases for YOLOv5 logging with 'pip install wandb' (recommended)\n", "\n", " from n params module arguments \n", " 0 -1 1 3520 models.common.Focus [3, 32, 3] \n", @@ -968,34 +967,38 @@ "\n", "Transferred 362/362 items from yolov5s.pt\n", "Scaled weight_decay = 0.0005\n", - "Optimizer groups: 62 .bias, 62 conv.weight, 59 other\n", - "\u001b[34m\u001b[1mtrain: \u001b[0mScanning '../coco128/labels/train2017.cache' images and labels... 128 found, 0 missing, 2 empty, 0 corrupted: 100% 128/128 [00:00<00:00, 1503840.09it/s]\n", - "\u001b[34m\u001b[1mtrain: \u001b[0mCaching images (0.1GB): 100% 128/128 [00:00<00:00, 198.74it/s]\n", - "\u001b[34m\u001b[1mval: \u001b[0mScanning '../coco128/labels/train2017.cache' images and labels... 128 found, 0 missing, 2 empty, 0 corrupted: 100% 128/128 [00:00<00:00, 475107.00it/s]\n", - "\u001b[34m\u001b[1mval: \u001b[0mCaching images (0.1GB): 100% 128/128 [00:01<00:00, 98.63it/s]\n", + "\u001b[34m\u001b[1moptimizer:\u001b[0m SGD with parameter groups 59 weight, 62 weight (no decay), 62 bias\n", + "\u001b[34m\u001b[1malbumentations: \u001b[0mversion 1.0.3 required by YOLOv5, but version 0.1.12 is currently installed\n", + "\u001b[34m\u001b[1mtrain: \u001b[0mScanning '../datasets/coco128/labels/train2017' images and labels...128 found, 0 missing, 2 empty, 0 corrupted: 100% 128/128 [00:00<00:00, 2021.98it/s]\n", + "\u001b[34m\u001b[1mtrain: \u001b[0mNew cache created: ../datasets/coco128/labels/train2017.cache\n", + "\u001b[34m\u001b[1mtrain: \u001b[0mCaching images (0.1GB): 100% 128/128 [00:00<00:00, 273.58it/s]\n", + "\u001b[34m\u001b[1mval: \u001b[0mScanning '../datasets/coco128/labels/train2017.cache' images and labels... 128 found, 0 missing, 2 empty, 0 corrupted: 100% 128/128 [00:00<00:00, 506004.63it/s]\n", + "\u001b[34m\u001b[1mval: \u001b[0mCaching images (0.1GB): 100% 128/128 [00:01<00:00, 121.71it/s]\n", + "[W pthreadpool-cpp.cc:90] Warning: Leaking Caffe2 thread-pool after fork. (function pthreadpool)\n", + "[W pthreadpool-cpp.cc:90] Warning: Leaking Caffe2 thread-pool after fork. (function pthreadpool)\n", "Plotting labels... \n", "\n", - "\u001b[34m\u001b[1mautoanchor: \u001b[0mAnalyzing anchors... anchors/target = 4.26, Best Possible Recall (BPR) = 0.9946\n", + "\u001b[34m\u001b[1mautoanchor: \u001b[0mAnalyzing anchors... anchors/target = 4.27, Best Possible Recall (BPR) = 0.9935\n", "Image sizes 640 train, 640 val\n", "Using 2 dataloader workers\n", "Logging results to runs/train/exp\n", "Starting training for 3 epochs...\n", "\n", - " Epoch gpu_mem box obj cls total labels img_size\n", - " 0/2 10.8G 0.04226 0.06067 0.02005 0.123 158 640: 100% 8/8 [00:05<00:00, 1.45it/s]\n", - " Class Images Labels P R mAP@.5 mAP@.5:.95: 100% 4/4 [00:04<00:00, 1.17s/it]\n", - " all 128 929 0.633 0.641 0.668 0.438\n", + " Epoch gpu_mem box obj cls labels img_size\n", + " 0/2 3.64G 0.0441 0.06646 0.02229 290 640: 100% 8/8 [00:04<00:00, 1.93it/s]\n", + " Class Images Labels P R mAP@.5 mAP@.5:.95: 100% 4/4 [00:01<00:00, 3.45it/s]\n", + " all 128 929 0.696 0.562 0.644 0.419\n", "\n", - " Epoch gpu_mem box obj cls total labels img_size\n", - " 1/2 6.66G 0.04571 0.06615 0.01952 0.1314 164 640: 100% 8/8 [00:01<00:00, 5.10it/s]\n", - " Class Images Labels P R mAP@.5 mAP@.5:.95: 100% 4/4 [00:01<00:00, 3.88it/s]\n", - " all 128 929 0.614 0.661 0.67 0.438\n", + " Epoch gpu_mem box obj cls labels img_size\n", + " 1/2 5.04G 0.04573 0.06289 0.021 226 640: 100% 8/8 [00:01<00:00, 5.46it/s]\n", + " Class Images Labels P R mAP@.5 mAP@.5:.95: 100% 4/4 [00:01<00:00, 3.16it/s]\n", + " all 128 929 0.71 0.567 0.654 0.424\n", "\n", - " Epoch gpu_mem box obj cls total labels img_size\n", - " 2/2 6.66G 0.04542 0.07179 0.01861 0.1358 191 640: 100% 8/8 [00:01<00:00, 5.40it/s]\n", - " Class Images Labels P R mAP@.5 mAP@.5:.95: 100% 4/4 [00:02<00:00, 1.43it/s]\n", - " all 128 929 0.636 0.652 0.67 0.439\n", - "3 epochs completed in 0.007 hours.\n", + " Epoch gpu_mem box obj cls labels img_size\n", + " 2/2 5.04G 0.04542 0.0715 0.02028 242 640: 100% 8/8 [00:01<00:00, 5.12it/s]\n", + " Class Images Labels P R mAP@.5 mAP@.5:.95: 100% 4/4 [00:02<00:00, 1.46it/s]\n", + " all 128 929 0.731 0.563 0.658 0.427\n", + "3 epochs completed in 0.006 hours.\n", "\n", "Optimizer stripped from runs/train/exp/weights/last.pt, 14.8MB\n", "Optimizer stripped from runs/train/exp/weights/best.pt, 14.8MB\n" From 1a10b0ecd2aa44d95436b1a343b6b2242ba5c9f5 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 30 Jul 2021 01:04:36 +0200 Subject: [PATCH 0450/1976] Created using Colaboratory --- tutorial.ipynb | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tutorial.ipynb b/tutorial.ipynb index 3f3f73ad4443..8d9c3f8b7a15 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -585,7 +585,7 @@ "\n", "`detect.py` runs YOLOv5 inference on a variety of sources, downloading models automatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases), and saving results to `runs/detect`. Example inference sources are:\n", "\n", - " " + " " ] }, { @@ -627,7 +627,7 @@ }, "source": [ "        \n", - "" + "" ] }, { @@ -1028,7 +1028,7 @@ "\n", "During training you will see live updates at [https://wandb.ai/home](https://wandb.ai/home?utm_campaign=repo_yolo_notebook), and you can create and share detailed [Reports](https://wandb.ai/glenn-jocher/yolov5_tutorial/reports/YOLOv5-COCO128-Tutorial-Results--VmlldzozMDI5OTY) of your results. For more information see the [YOLOv5 Weights & Biases Tutorial](https://github.com/ultralytics/yolov5/issues/1289). \n", "\n", - "" + "" ] }, { @@ -1057,7 +1057,7 @@ "plot_results('path/to/results.csv') # plot 'results.csv' as 'results.png'\n", "```\n", "\n", - "

\"COCO128

" + "\"COCO128" ] }, { From 8d3c3ef45ce1d530aa3751f6187f18cfd9c40791 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 30 Jul 2021 01:35:39 +0200 Subject: [PATCH 0451/1976] Fix weight decay comment (#4228) --- train.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/train.py b/train.py index 250342acff18..cf50a5d553e3 100644 --- a/train.py +++ b/train.py @@ -128,9 +128,9 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary for v in model.modules(): if hasattr(v, 'bias') and isinstance(v.bias, nn.Parameter): # bias g2.append(v.bias) - if isinstance(v, nn.BatchNorm2d): # weight with decay + if isinstance(v, nn.BatchNorm2d): # weight (no decay) g0.append(v.weight) - elif hasattr(v, 'weight') and isinstance(v.weight, nn.Parameter): # weight without decay + elif hasattr(v, 'weight') and isinstance(v.weight, nn.Parameter): # weight (with decay) g1.append(v.weight) if opt.adam: From 94686575024f055e603c1b20a36dcbfb1418c3fc Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 30 Jul 2021 16:00:47 +0200 Subject: [PATCH 0452/1976] Update profiler (#4236) --- utils/torch_utils.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/utils/torch_utils.py b/utils/torch_utils.py index d86267b26356..55a5fd7875bb 100644 --- a/utils/torch_utils.py +++ b/utils/torch_utils.py @@ -22,6 +22,8 @@ import thop # for FLOPs computation except ImportError: thop = None + +logging.basicConfig(format="%(message)s", level=logging.INFO) LOGGER = logging.getLogger(__name__) @@ -103,11 +105,10 @@ def profile(x, ops, n=100, device=None): # m2 = nn.SiLU() # profile(x, [m1, m2], n=100) # profile speed over 100 iterations - device = device or torch.device('cuda:0' if torch.cuda.is_available() else 'cpu') + device = device or select_device() x = x.to(device) x.requires_grad = True - print(torch.__version__, device.type, torch.cuda.get_device_properties(0) if device.type == 'cuda' else '') - print(f"\n{'Params':>12s}{'GFLOPs':>12s}{'forward (ms)':>16s}{'backward (ms)':>16s}{'input':>24s}{'output':>24s}") + print(f"{'Params':>12s}{'GFLOPs':>12s}{'forward (ms)':>16s}{'backward (ms)':>16s}{'input':>24s}{'output':>24s}") for m in ops if isinstance(ops, list) else [ops]: m = m.to(device) if hasattr(m, 'to') else m # device m = m.half() if hasattr(m, 'half') and isinstance(x, torch.Tensor) and x.dtype is torch.float16 else m # type From bceb57b910cfd3ce6ad77494782f73a380345d43 Mon Sep 17 00:00:00 2001 From: IneovaAI <67843470+IneovaAI@users.noreply.github.com> Date: Fri, 30 Jul 2021 17:39:48 +0200 Subject: [PATCH 0453/1976] Add `python train.py --freeze N` argument (#4238) * Add freeze as an argument I train on different platforms and sometimes I want to freeze some layers. I have to go into the code and change it and also keep track of how many layers I froze on what platform. Please add the number of layers to freeze as an argument in future versions thanks. * Update train.py * Update train.py * Cleanup Co-authored-by: Glenn Jocher --- train.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/train.py b/train.py index cf50a5d553e3..1d3404ffc414 100644 --- a/train.py +++ b/train.py @@ -53,9 +53,9 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary opt, device, ): - save_dir, epochs, batch_size, weights, single_cls, evolve, data, cfg, resume, noval, nosave, workers, = \ + save_dir, epochs, batch_size, weights, single_cls, evolve, data, cfg, resume, noval, nosave, workers, freeze, = \ Path(opt.save_dir), opt.epochs, opt.batch_size, opt.weights, opt.single_cls, opt.evolve, opt.data, opt.cfg, \ - opt.resume, opt.noval, opt.nosave, opt.workers + opt.resume, opt.noval, opt.nosave, opt.workers, opt.freeze # Directories w = save_dir / 'weights' # weights dir @@ -111,7 +111,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary model = Model(cfg, ch=3, nc=nc, anchors=hyp.get('anchors')).to(device) # create # Freeze - freeze = [] # parameter names to freeze (full or partial) + freeze = [f'model.{x}.' for x in range(freeze)] # layers to freeze for k, v in model.named_parameters(): v.requires_grad = True # train all layers if any(x in k for x in freeze): @@ -442,6 +442,7 @@ def parse_opt(known=False): parser.add_argument('--save_period', type=int, default=-1, help='Log model after every "save_period" epoch') parser.add_argument('--artifact_alias', type=str, default="latest", help='version of dataset artifact to be used') parser.add_argument('--local_rank', type=int, default=-1, help='DDP parameter, do not modify') + parser.add_argument('--freeze', type=int, default=0, help='Number of layers to freeze. backbone=10, all=24') opt = parser.parse_known_args()[0] if known else parser.parse_args() return opt From d8f18834a246cfe3589406635c7e990f8043130a Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 30 Jul 2021 18:17:19 +0200 Subject: [PATCH 0454/1976] Update `profile()` for CUDA Memory allocation (#4239) * Update profile() * Update profile() * Update profile() * Update profile() * Update profile() * Update profile() * Update profile() * Update profile() * Update profile() * Update profile() * Update profile() * Update profile() * Cleanup --- tutorial.ipynb | 4 +-- utils/torch_utils.py | 76 ++++++++++++++++++++++++++------------------ 2 files changed, 47 insertions(+), 33 deletions(-) diff --git a/tutorial.ipynb b/tutorial.ipynb index 8d9c3f8b7a15..b16506275288 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -1172,11 +1172,11 @@ }, "source": [ "# Profile\n", - "from utils.torch_utils import profile \n", + "from utils.torch_utils import profile\n", "\n", "m1 = lambda x: x * torch.sigmoid(x)\n", "m2 = torch.nn.SiLU()\n", - "profile(x=torch.randn(16, 3, 640, 640), ops=[m1, m2], n=100)" + "results = profile(input=torch.randn(16, 3, 640, 640), ops=[m1, m2], n=100)" ], "execution_count": null, "outputs": [] diff --git a/utils/torch_utils.py b/utils/torch_utils.py index 55a5fd7875bb..4956cf95d1ca 100644 --- a/utils/torch_utils.py +++ b/utils/torch_utils.py @@ -98,42 +98,56 @@ def time_sync(): return time.time() -def profile(x, ops, n=100, device=None): - # profile a pytorch module or list of modules. Example usage: - # x = torch.randn(16, 3, 640, 640) # input +def profile(input, ops, n=10, device=None): + # YOLOv5 speed/memory/FLOPs profiler + # + # Usage: + # input = torch.randn(16, 3, 640, 640) # m1 = lambda x: x * torch.sigmoid(x) # m2 = nn.SiLU() - # profile(x, [m1, m2], n=100) # profile speed over 100 iterations + # profile(input, [m1, m2], n=100) # profile over 100 iterations + results = [] device = device or select_device() - x = x.to(device) - x.requires_grad = True - print(f"{'Params':>12s}{'GFLOPs':>12s}{'forward (ms)':>16s}{'backward (ms)':>16s}{'input':>24s}{'output':>24s}") - for m in ops if isinstance(ops, list) else [ops]: - m = m.to(device) if hasattr(m, 'to') else m # device - m = m.half() if hasattr(m, 'half') and isinstance(x, torch.Tensor) and x.dtype is torch.float16 else m # type - dtf, dtb, t = 0., 0., [0., 0., 0.] # dt forward, backward - try: - flops = thop.profile(m, inputs=(x,), verbose=False)[0] / 1E9 * 2 # GFLOPs - except: - flops = 0 - - for _ in range(n): - t[0] = time_sync() - y = m(x) - t[1] = time_sync() + print(f"{'Params':>12s}{'GFLOPs':>12s}{'GPU_mem (GB)':>14s}{'forward (ms)':>14s}{'backward (ms)':>14s}" + f"{'input':>24s}{'output':>24s}") + + for x in input if isinstance(input, list) else [input]: + x = x.to(device) + x.requires_grad = True + for m in ops if isinstance(ops, list) else [ops]: + m = m.to(device) if hasattr(m, 'to') else m # device + m = m.half() if hasattr(m, 'half') and isinstance(x, torch.Tensor) and x.dtype is torch.float16 else m + tf, tb, t = 0., 0., [0., 0., 0.] # dt forward, backward try: - _ = y.sum().backward() - t[2] = time_sync() - except: # no backward method - t[2] = float('nan') - dtf += (t[1] - t[0]) * 1000 / n # ms per op forward - dtb += (t[2] - t[1]) * 1000 / n # ms per op backward - - s_in = tuple(x.shape) if isinstance(x, torch.Tensor) else 'list' - s_out = tuple(y.shape) if isinstance(y, torch.Tensor) else 'list' - p = sum(list(x.numel() for x in m.parameters())) if isinstance(m, nn.Module) else 0 # parameters - print(f'{p:12}{flops:12.4g}{dtf:16.4g}{dtb:16.4g}{str(s_in):>24s}{str(s_out):>24s}') + flops = thop.profile(m, inputs=(x,), verbose=False)[0] / 1E9 * 2 # GFLOPs + except: + flops = 0 + + try: + for _ in range(n): + t[0] = time_sync() + y = m(x) + t[1] = time_sync() + try: + _ = (sum([yi.sum() for yi in y]) if isinstance(y, list) else y).sum().backward() + t[2] = time_sync() + except Exception as e: # no backward method + print(e) + t[2] = float('nan') + tf += (t[1] - t[0]) * 1000 / n # ms per op forward + tb += (t[2] - t[1]) * 1000 / n # ms per op backward + mem = torch.cuda.memory_reserved() / 1E9 if torch.cuda.is_available() else 0 # (GB) + s_in = tuple(x.shape) if isinstance(x, torch.Tensor) else 'list' + s_out = tuple(y.shape) if isinstance(y, torch.Tensor) else 'list' + p = sum(list(x.numel() for x in m.parameters())) if isinstance(m, nn.Module) else 0 # parameters + print(f'{p:12}{flops:12.4g}{mem:>14.3f}{tf:14.4g}{tb:14.4g}{str(s_in):>24s}{str(s_out):>24s}') + results.append([p, flops, mem, tf, tb, s_in, s_out]) + except Exception as e: + print(e) + results.append(None) + torch.cuda.empty_cache() + return results def is_parallel(model): From b74929c910f9cd99d2ece587e57bce1ae000d3ba Mon Sep 17 00:00:00 2001 From: Kalen Michael Date: Sun, 1 Aug 2021 00:18:07 +0200 Subject: [PATCH 0455/1976] Add `train.py` and `val.py` callbacks (#4220) * added callbacks * Update callbacks.py * Update train.py * Update val.py * Fix CamlCase add staticmethod * Refactor logger into callbacks * Cleanup * New callback on_val_image_end() * Add curves and results images to TensorBoard Co-authored-by: Glenn Jocher --- train.py | 29 ++++--- utils/callbacks.py | 176 ++++++++++++++++++++++++++++++++++++++ utils/general.py | 5 ++ utils/loggers/__init__.py | 45 +++++----- utils/plots.py | 6 +- val.py | 10 +-- 6 files changed, 230 insertions(+), 41 deletions(-) create mode 100644 utils/callbacks.py diff --git a/train.py b/train.py index 1d3404ffc414..d4a5495d3b3b 100644 --- a/train.py +++ b/train.py @@ -34,7 +34,7 @@ from utils.datasets import create_dataloader from utils.general import labels_to_class_weights, increment_path, labels_to_image_weights, init_seeds, \ strip_optimizer, get_latest_run, check_dataset, check_file, check_git_status, check_img_size, \ - check_requirements, print_mutation, set_logging, one_cycle, colorstr + check_requirements, print_mutation, set_logging, one_cycle, colorstr, methods from utils.downloads import attempt_download from utils.loss import ComputeLoss from utils.plots import plot_labels, plot_evolution @@ -42,6 +42,7 @@ from utils.loggers.wandb.wandb_utils import check_wandb_resume from utils.metrics import fitness from utils.loggers import Loggers +from utils.callbacks import Callbacks LOGGER = logging.getLogger(__name__) LOCAL_RANK = int(os.getenv('LOCAL_RANK', -1)) # https://pytorch.org/docs/stable/elastic/run.html @@ -52,6 +53,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary opt, device, + callbacks=Callbacks() ): save_dir, epochs, batch_size, weights, single_cls, evolve, data, cfg, resume, noval, nosave, workers, freeze, = \ Path(opt.save_dir), opt.epochs, opt.batch_size, opt.weights, opt.single_cls, opt.evolve, opt.data, opt.cfg, \ @@ -77,12 +79,16 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary # Loggers if RANK in [-1, 0]: - loggers = Loggers(save_dir, weights, opt, hyp, LOGGER).start() # loggers dict + loggers = Loggers(save_dir, weights, opt, hyp, LOGGER) # loggers instance if loggers.wandb: data_dict = loggers.wandb.data_dict if resume: weights, epochs, hyp = opt.weights, opt.epochs, opt.hyp + # Register actions + for k in methods(loggers): + callbacks.register_action(k, callback=getattr(loggers, k)) + # Config plots = not evolve # create plots cuda = device.type != 'cpu' @@ -215,13 +221,15 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary # cf = torch.bincount(c.long(), minlength=nc) + 1. # frequency # model._initialize_biases(cf.to(device)) if plots: - plot_labels(labels, names, save_dir, loggers) + plot_labels(labels, names, save_dir) # Anchors if not opt.noautoanchor: check_anchors(dataset, model=model, thr=hyp['anchor_t'], imgsz=imgsz) model.half().float() # pre-reduce anchor precision + callbacks.on_pretrain_routine_end() + # DDP mode if cuda and RANK != -1: model = DDP(model, device_ids=[LOCAL_RANK], output_device=LOCAL_RANK) @@ -329,8 +337,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary mem = f'{torch.cuda.memory_reserved() / 1E9 if torch.cuda.is_available() else 0:.3g}G' # (GB) pbar.set_description(('%10s' * 2 + '%10.4g' * 5) % ( f'{epoch}/{epochs - 1}', mem, *mloss, targets.shape[0], imgs.shape[-1])) - loggers.on_train_batch_end(ni, model, imgs, targets, paths, plots) - + callbacks.on_train_batch_end(ni, model, imgs, targets, paths, plots) # end batch ------------------------------------------------------------------------------------------------ # Scheduler @@ -339,7 +346,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary if RANK in [-1, 0]: # mAP - loggers.on_train_epoch_end(epoch) + callbacks.on_train_epoch_end(epoch=epoch) ema.update_attr(model, include=['yaml', 'nc', 'hyp', 'names', 'stride', 'class_weights']) final_epoch = epoch + 1 == epochs if not noval or final_epoch: # Calculate mAP @@ -353,14 +360,14 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary save_json=is_coco and final_epoch, verbose=nc < 50 and final_epoch, plots=plots and final_epoch, - loggers=loggers, + callbacks=callbacks, compute_loss=compute_loss) # Update best mAP fi = fitness(np.array(results).reshape(1, -1)) # weighted combination of [P, R, mAP@.5, mAP@.5-.95] if fi > best_fitness: best_fitness = fi - loggers.on_train_val_end(mloss, results, lr, epoch, best_fitness, fi) + callbacks.on_fit_epoch_end(mloss, results, lr, epoch, best_fitness, fi) # Save model if (not nosave) or (final_epoch and not evolve): # if save @@ -377,7 +384,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary if best_fitness == fi: torch.save(ckpt, best) del ckpt - loggers.on_model_save(last, epoch, final_epoch, best_fitness, fi) + callbacks.on_model_save(last, epoch, final_epoch, best_fitness, fi) # end epoch ---------------------------------------------------------------------------------------------------- # end training ----------------------------------------------------------------------------------------------------- @@ -400,7 +407,8 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary for f in last, best: if f.exists(): strip_optimizer(f) # strip optimizers - loggers.on_train_end(last, best, plots) + callbacks.on_train_end(last, best, plots, epoch) + LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}") torch.cuda.empty_cache() return results @@ -448,6 +456,7 @@ def parse_opt(known=False): def main(opt): + # Checks set_logging(RANK) if RANK in [-1, 0]: print(colorstr('train: ') + ', '.join(f'{k}={v}' for k, v in vars(opt).items())) diff --git a/utils/callbacks.py b/utils/callbacks.py new file mode 100644 index 000000000000..f23d57a6c043 --- /dev/null +++ b/utils/callbacks.py @@ -0,0 +1,176 @@ +#!/usr/bin/env python + +class Callbacks: + """" + Handles all registered callbacks for YOLOv5 Hooks + """ + + _callbacks = { + 'on_pretrain_routine_start': [], + 'on_pretrain_routine_end': [], + + 'on_train_start': [], + 'on_train_epoch_start': [], + 'on_train_batch_start': [], + 'optimizer_step': [], + 'on_before_zero_grad': [], + 'on_train_batch_end': [], + 'on_train_epoch_end': [], + + 'on_val_start': [], + 'on_val_batch_start': [], + 'on_val_image_end': [], + 'on_val_batch_end': [], + 'on_val_end': [], + + 'on_fit_epoch_end': [], # fit = train + val + 'on_model_save': [], + 'on_train_end': [], + + 'teardown': [], + } + + def __init__(self): + return + + def register_action(self, hook, name='', callback=None): + """ + Register a new action to a callback hook + + Args: + hook The callback hook name to register the action to + name The name of the action + callback The callback to fire + """ + assert hook in self._callbacks, f"hook '{hook}' not found in callbacks {self._callbacks}" + assert callable(callback), f"callback '{callback}' is not callable" + self._callbacks[hook].append({'name': name, 'callback': callback}) + + def get_registered_actions(self, hook=None): + """" + Returns all the registered actions by callback hook + + Args: + hook The name of the hook to check, defaults to all + """ + if hook: + return self._callbacks[hook] + else: + return self._callbacks + + @staticmethod + def run_callbacks(register, *args, **kwargs): + """ + Loop through the registered actions and fire all callbacks + """ + for logger in register: + # print(f"Running callbacks.{logger['callback'].__name__}()") + logger['callback'](*args, **kwargs) + + def on_pretrain_routine_start(self, *args, **kwargs): + """ + Fires all registered callbacks at the start of each pretraining routine + """ + self.run_callbacks(self._callbacks['on_pretrain_routine_start'], *args, **kwargs) + + def on_pretrain_routine_end(self, *args, **kwargs): + """ + Fires all registered callbacks at the end of each pretraining routine + """ + self.run_callbacks(self._callbacks['on_pretrain_routine_end'], *args, **kwargs) + + def on_train_start(self, *args, **kwargs): + """ + Fires all registered callbacks at the start of each training + """ + self.run_callbacks(self._callbacks['on_train_start'], *args, **kwargs) + + def on_train_epoch_start(self, *args, **kwargs): + """ + Fires all registered callbacks at the start of each training epoch + """ + self.run_callbacks(self._callbacks['on_train_epoch_start'], *args, **kwargs) + + def on_train_batch_start(self, *args, **kwargs): + """ + Fires all registered callbacks at the start of each training batch + """ + self.run_callbacks(self._callbacks['on_train_batch_start'], *args, **kwargs) + + def optimizer_step(self, *args, **kwargs): + """ + Fires all registered callbacks on each optimizer step + """ + self.run_callbacks(self._callbacks['optimizer_step'], *args, **kwargs) + + def on_before_zero_grad(self, *args, **kwargs): + """ + Fires all registered callbacks before zero grad + """ + self.run_callbacks(self._callbacks['on_before_zero_grad'], *args, **kwargs) + + def on_train_batch_end(self, *args, **kwargs): + """ + Fires all registered callbacks at the end of each training batch + """ + self.run_callbacks(self._callbacks['on_train_batch_end'], *args, **kwargs) + + def on_train_epoch_end(self, *args, **kwargs): + """ + Fires all registered callbacks at the end of each training epoch + """ + self.run_callbacks(self._callbacks['on_train_epoch_end'], *args, **kwargs) + + def on_val_start(self, *args, **kwargs): + """ + Fires all registered callbacks at the start of the validation + """ + self.run_callbacks(self._callbacks['on_val_start'], *args, **kwargs) + + def on_val_batch_start(self, *args, **kwargs): + """ + Fires all registered callbacks at the start of each validation batch + """ + self.run_callbacks(self._callbacks['on_val_batch_start'], *args, **kwargs) + + def on_val_image_end(self, *args, **kwargs): + """ + Fires all registered callbacks at the end of each val image + """ + self.run_callbacks(self._callbacks['on_val_image_end'], *args, **kwargs) + + def on_val_batch_end(self, *args, **kwargs): + """ + Fires all registered callbacks at the end of each validation batch + """ + self.run_callbacks(self._callbacks['on_val_batch_end'], *args, **kwargs) + + def on_val_end(self, *args, **kwargs): + """ + Fires all registered callbacks at the end of the validation + """ + self.run_callbacks(self._callbacks['on_val_end'], *args, **kwargs) + + def on_fit_epoch_end(self, *args, **kwargs): + """ + Fires all registered callbacks at the end of each fit (train+val) epoch + """ + self.run_callbacks(self._callbacks['on_fit_epoch_end'], *args, **kwargs) + + def on_model_save(self, *args, **kwargs): + """ + Fires all registered callbacks after each model save + """ + self.run_callbacks(self._callbacks['on_model_save'], *args, **kwargs) + + def on_train_end(self, *args, **kwargs): + """ + Fires all registered callbacks at the end of training + """ + self.run_callbacks(self._callbacks['on_train_end'], *args, **kwargs) + + def teardown(self, *args, **kwargs): + """ + Fires all registered callbacks before teardown + """ + self.run_callbacks(self._callbacks['teardown'], *args, **kwargs) diff --git a/utils/general.py b/utils/general.py index a414b391d24e..ed028d2b3765 100755 --- a/utils/general.py +++ b/utils/general.py @@ -67,6 +67,11 @@ def handler(*args, **kwargs): return handler +def methods(instance): + # Get class/instance methods + return [f for f in dir(instance) if callable(getattr(instance, f)) and not f.startswith("__")] + + def set_logging(rank=-1, verbose=True): logging.basicConfig( format="%(message)s", diff --git a/utils/loggers/__init__.py b/utils/loggers/__init__.py index 06d562d60f99..5d4377d54155 100644 --- a/utils/loggers/__init__.py +++ b/utils/loggers/__init__.py @@ -29,10 +29,12 @@ def __init__(self, save_dir=None, weights=None, opt=None, hyp=None, logger=None, self.hyp = hyp self.logger = logger # for printing results to console self.include = include + self.keys = ['train/box_loss', 'train/obj_loss', 'train/cls_loss', # train loss + 'metrics/precision', 'metrics/recall', 'metrics/mAP_0.5', 'metrics/mAP_0.5:0.95', # metrics + 'val/box_loss', 'val/obj_loss', 'val/cls_loss', # val loss + 'x/lr0', 'x/lr1', 'x/lr2'] # params for k in LOGGERS: setattr(self, k, None) # init empty logger dictionary - - def start(self): self.csv = True # always log to csv # Message @@ -57,7 +59,11 @@ def start(self): else: self.wandb = None - return self + def on_pretrain_routine_end(self): + # Callback runs on pre-train routine end + paths = self.save_dir.glob('*labels*.jpg') # training labels + if self.wandb: + self.wandb.log({"Labels": [wandb.Image(str(x), caption=x.name) for x in paths]}) def on_train_batch_end(self, ni, model, imgs, targets, paths, plots): # Callback runs on train batch end @@ -78,8 +84,8 @@ def on_train_epoch_end(self, epoch): if self.wandb: self.wandb.current_epoch = epoch + 1 - def on_val_batch_end(self, pred, predn, path, names, im): - # Callback runs on train batch end + def on_val_image_end(self, pred, predn, path, names, im): + # Callback runs on val image end if self.wandb: self.wandb.val_one_image(pred, predn, path, names, im) @@ -89,25 +95,20 @@ def on_val_end(self): files = sorted(self.save_dir.glob('val*.jpg')) self.wandb.log({"Validation": [wandb.Image(str(f), caption=f.name) for f in files]}) - def on_train_val_end(self, mloss, results, lr, epoch, best_fitness, fi): - # Callback runs on val end during training + def on_fit_epoch_end(self, mloss, results, lr, epoch, best_fitness, fi): + # Callback runs at the end of each fit (train+val) epoch vals = list(mloss) + list(results) + lr - keys = ['train/box_loss', 'train/obj_loss', 'train/cls_loss', # train loss - 'metrics/precision', 'metrics/recall', 'metrics/mAP_0.5', 'metrics/mAP_0.5:0.95', # metrics - 'val/box_loss', 'val/obj_loss', 'val/cls_loss', # val loss - 'x/lr0', 'x/lr1', 'x/lr2'] # params - x = {k: v for k, v in zip(keys, vals)} # dict - + x = {k: v for k, v in zip(self.keys, vals)} # dict if self.csv: file = self.save_dir / 'results.csv' n = len(x) + 1 # number of cols - s = '' if file.exists() else (('%20s,' * n % tuple(['epoch'] + keys)).rstrip(',') + '\n') # add header + s = '' if file.exists() else (('%20s,' * n % tuple(['epoch'] + self.keys)).rstrip(',') + '\n') # add header with open(file, 'a') as f: f.write(s + ('%20.5g,' * n % tuple([epoch] + vals)).rstrip(',') + '\n') if self.tb: for k, v in x.items(): - self.tb.add_scalar(k, v, epoch) # TensorBoard + self.tb.add_scalar(k, v, epoch) if self.wandb: self.wandb.log(x) @@ -119,20 +120,22 @@ def on_model_save(self, last, epoch, final_epoch, best_fitness, fi): if ((epoch + 1) % self.opt.save_period == 0 and not final_epoch) and self.opt.save_period != -1: self.wandb.log_model(last.parent, self.opt, epoch, fi, best_model=best_fitness == fi) - def on_train_end(self, last, best, plots): + def on_train_end(self, last, best, plots, epoch): # Callback runs on training end if plots: plot_results(dir=self.save_dir) # save results.png files = ['results.png', 'confusion_matrix.png', *[f'{x}_curve.png' for x in ('F1', 'PR', 'P', 'R')]] files = [(self.save_dir / f) for f in files if (self.save_dir / f).exists()] # filter + + if self.tb: + from PIL import Image + import numpy as np + for f in files: + self.tb.add_image(f.stem, np.asarray(Image.open(f)), epoch, dataformats='HWC') + if self.wandb: wandb.log({"Results": [wandb.Image(str(f), caption=f.name) for f in files]}) wandb.log_artifact(str(best if best.exists() else last), type='model', name='run_' + self.wandb.wandb_run.id + '_model', aliases=['latest', 'best', 'stripped']) self.wandb.finish_run() - - def log_images(self, paths): - # Log images - if self.wandb: - self.wandb.log({"Labels": [wandb.Image(str(x), caption=x.name) for x in paths]}) diff --git a/utils/plots.py b/utils/plots.py index e13e316314dd..252e128168ee 100644 --- a/utils/plots.py +++ b/utils/plots.py @@ -281,7 +281,7 @@ def plot_study_txt(path='', x=None): # from utils.plots import *; plot_study_tx plt.savefig(str(Path(path).name) + '.png', dpi=300) -def plot_labels(labels, names=(), save_dir=Path(''), loggers=None): +def plot_labels(labels, names=(), save_dir=Path('')): # plot dataset labels print('Plotting labels... ') c, b = labels[:, 0], labels[:, 1:].transpose() # classes, boxes @@ -324,10 +324,6 @@ def plot_labels(labels, names=(), save_dir=Path(''), loggers=None): matplotlib.use('Agg') plt.close() - # loggers - if loggers: - loggers.log_images(save_dir.glob('*labels*.jpg')) - def plot_evolution(yaml_file='data/hyp.finetune.yaml'): # from utils.plots import *; plot_evolution() # Plot hyperparameter evolution results in evolve.txt diff --git a/val.py b/val.py index 86439b1380dc..58e8170da86c 100644 --- a/val.py +++ b/val.py @@ -25,7 +25,7 @@ from utils.metrics import ap_per_class, ConfusionMatrix from utils.plots import plot_images, output_to_target, plot_study_txt from utils.torch_utils import select_device, time_sync -from utils.loggers import Loggers +from utils.callbacks import Callbacks def save_one_txt(predn, save_conf, shape, file): @@ -97,7 +97,7 @@ def run(data, dataloader=None, save_dir=Path(''), plots=True, - loggers=Loggers(), + callbacks=Callbacks(), compute_loss=None, ): # Initialize/load model and set device @@ -213,7 +213,7 @@ def run(data, save_one_txt(predn, save_conf, shape, file=save_dir / 'labels' / (path.stem + '.txt')) if save_json: save_one_json(predn, jdict, path, class_map) # append to COCO-JSON dictionary - loggers.on_val_batch_end(pred, predn, path, names, img[si]) + callbacks.on_val_image_end(pred, predn, path, names, img[si]) # Plot images if plots and batch_i < 3: @@ -250,7 +250,7 @@ def run(data, # Plots if plots: confusion_matrix.plot(save_dir=save_dir, names=list(names.values())) - loggers.on_val_end() + callbacks.on_val_end() # Save JSON if save_json and len(jdict): @@ -282,7 +282,7 @@ def run(data, model.float() # for training if not training: s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else '' - print(f"Results saved to {save_dir}{s}") + print(f"Results saved to {colorstr('bold', save_dir)}{s}") maps = np.zeros(nc) + map for i, c in enumerate(ap_class): maps[c] = ap[i] From cd540d8625bba8a05329ede3522046ee53eb349d Mon Sep 17 00:00:00 2001 From: Ayush Chaurasia Date: Sun, 1 Aug 2021 15:36:53 +0530 Subject: [PATCH 0456/1976] W&B: suppress warnings (#4257) * Improve docstrings and run names * default wandb login prompt with timeout * return key * Update api_key check logic * Properly support zipped dataset feature * update docstring * Revert tuorial change * extend changes to log_dataset * add run name * bug fix * bug fix * Update comment * fix import check * remove unused import * Hardcore .yaml file extension * reduce code * Reformat using pycharm * Remove redundant try catch * More refactoring and bug fixes * retry * Reformat using pycharm * respect LOGGERS include list * call wandblogger.log instead of wandb.log Co-authored-by: Glenn Jocher --- utils/loggers/__init__.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/utils/loggers/__init__.py b/utils/loggers/__init__.py index 5d4377d54155..be76d0c17f1b 100644 --- a/utils/loggers/__init__.py +++ b/utils/loggers/__init__.py @@ -134,7 +134,8 @@ def on_train_end(self, last, best, plots, epoch): self.tb.add_image(f.stem, np.asarray(Image.open(f)), epoch, dataformats='HWC') if self.wandb: - wandb.log({"Results": [wandb.Image(str(f), caption=f.name) for f in files]}) + self.wandb.log({"Results": [wandb.Image(str(f), caption=f.name) for f in files]}) + # Calling wandb.log. TODO: Refactor this into WandbLogger.log_model wandb.log_artifact(str(best if best.exists() else last), type='model', name='run_' + self.wandb.wandb_run.id + '_model', aliases=['latest', 'best', 'stripped']) From 53bfcbe0ae48bb31c80378d8487a2b85c6bcc702 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 1 Aug 2021 20:36:00 +0200 Subject: [PATCH 0457/1976] Update AP calculation (#4260) * Update AP calculation * Cleanup * Remove original --- val.py | 41 +++++++++++++++++++++-------------------- 1 file changed, 21 insertions(+), 20 deletions(-) diff --git a/val.py b/val.py index 58e8170da86c..4c1d7d26b0de 100644 --- a/val.py +++ b/val.py @@ -50,26 +50,27 @@ def save_one_json(predn, jdict, path, class_map): 'score': round(p[4], 5)}) -def process_batch(predictions, labels, iouv): - # Evaluate 1 batch of predictions - correct = torch.zeros(predictions.shape[0], len(iouv), dtype=torch.bool, device=iouv.device) - detected = [] # label indices - tcls, pcls = labels[:, 0], predictions[:, 5] - nl = labels.shape[0] # number of labels - for cls in torch.unique(tcls): - ti = (cls == tcls).nonzero().view(-1) # label indices - pi = (cls == pcls).nonzero().view(-1) # prediction indices - if pi.shape[0]: # find detections - ious, i = box_iou(predictions[pi, 0:4], labels[ti, 1:5]).max(1) # best ious, indices - detected_set = set() - for j in (ious > iouv[0]).nonzero(): - d = ti[i[j]] # detected label - if d.item() not in detected_set: - detected_set.add(d.item()) - detected.append(d) # append detections - correct[pi[j]] = ious[j] > iouv # iou_thres is 1xn - if len(detected) == nl: # all labels already located in image - break +def process_batch(detections, labels, iouv): + """ + Return correct predictions matrix. Both sets of boxes are in (x1, y1, x2, y2) format. + Arguments: + detections (Array[N, 6]), x1, y1, x2, y2, conf, class + labels (Array[M, 5]), class, x1, y1, x2, y2 + Returns: + correct (Array[N, 10]), for 10 IoU levels + """ + correct = torch.zeros(detections.shape[0], iouv.shape[0], dtype=torch.bool, device=iouv.device) + iou = box_iou(labels[:, 1:], detections[:, :4]) + x = torch.where((iou >= iouv[0]) & (labels[:, 0:1] == detections[:, 5])) # IoU above threshold and classes match + if x[0].shape[0]: + matches = torch.cat((torch.stack(x, 1), iou[x[0], x[1]][:, None]), 1).cpu().numpy() # [label, detection, iou] + if x[0].shape[0] > 1: + matches = matches[matches[:, 2].argsort()[::-1]] + matches = matches[np.unique(matches[:, 1], return_index=True)[1]] + # matches = matches[matches[:, 2].argsort()[::-1]] + matches = matches[np.unique(matches[:, 0], return_index=True)[1]] + matches = torch.Tensor(matches).to(iouv.device) + correct[matches[:, 1].long()] = matches[:, 2:3] >= iouv return correct From 306fc0119a94915b91fb6ca6f46f2d50437152e3 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 2 Aug 2021 12:42:23 +0200 Subject: [PATCH 0458/1976] Update Autoshape forward header (#4271) --- models/common.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/models/common.py b/models/common.py index 24f02c2a584c..8319552de5f0 100644 --- a/models/common.py +++ b/models/common.py @@ -232,10 +232,10 @@ def autoshape(self): @torch.no_grad() def forward(self, imgs, size=640, augment=False, profile=False): # Inference from various sources. For height=640, width=1280, RGB images example inputs are: - # filename: imgs = 'data/images/zidane.jpg' # str or PosixPath + # file: imgs = 'data/images/zidane.jpg' # str or PosixPath # URI: = 'https://ultralytics.com/images/zidane.jpg' # OpenCV: = cv2.imread('image.jpg')[:,:,::-1] # HWC BGR to RGB x(640,1280,3) - # PIL: = Image.open('image.jpg') # HWC x(640,1280,3) + # PIL: = Image.open('image.jpg') or ImageGrab.grab() # HWC x(640,1280,3) # numpy: = np.zeros((640,1280,3)) # HWC # torch: = torch.zeros(16,3,320,640) # BCHW (scaled to size=640, 0-1 values) # multiple: = [Image.open('image1.jpg'), Image.open('image2.jpg'), ...] # list of images From 9c6732f61c5dc013114e6797905c5e3410cd8201 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 2 Aug 2021 15:13:55 +0200 Subject: [PATCH 0459/1976] Update variables (#4273) --- models/common.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/models/common.py b/models/common.py index 8319552de5f0..e6b7b5182283 100644 --- a/models/common.py +++ b/models/common.py @@ -30,7 +30,7 @@ def autopad(k, p=None): # kernel, padding def DWConv(c1, c2, k=1, s=1, act=True): - # Depthwise convolution + # Depth-wise convolution return Conv(c1, c2, k, s, g=math.gcd(c1, c2), act=act) @@ -183,11 +183,11 @@ def __init__(self, gain=2): self.gain = gain def forward(self, x): - N, C, H, W = x.size() # assert (H / s == 0) and (W / s == 0), 'Indivisible gain' + b, c, h, w = x.size() # assert (h / s == 0) and (W / s == 0), 'Indivisible gain' s = self.gain - x = x.view(N, C, H // s, s, W // s, s) # x(1,64,40,2,40,2) + x = x.view(b, c, h // s, s, w // s, s) # x(1,64,40,2,40,2) x = x.permute(0, 3, 5, 1, 2, 4).contiguous() # x(1,2,2,64,40,40) - return x.view(N, C * s * s, H // s, W // s) # x(1,256,40,40) + return x.view(b, c * s * s, h // s, w // s) # x(1,256,40,40) class Expand(nn.Module): @@ -197,11 +197,11 @@ def __init__(self, gain=2): self.gain = gain def forward(self, x): - N, C, H, W = x.size() # assert C / s ** 2 == 0, 'Indivisible gain' + b, c, h, w = x.size() # assert C / s ** 2 == 0, 'Indivisible gain' s = self.gain - x = x.view(N, s, s, C // s ** 2, H, W) # x(1,2,2,16,80,80) + x = x.view(b, s, s, c // s ** 2, h, w) # x(1,2,2,16,80,80) x = x.permute(0, 3, 4, 1, 5, 2).contiguous() # x(1,16,80,2,80,2) - return x.view(N, C // s ** 2, H * s, W * s) # x(1,16,160,160) + return x.view(b, c // s ** 2, h * s, w * s) # x(1,16,160,160) class Concat(nn.Module): From 587c4b4b81fb4e9423e33a2a235731742386d03c Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 2 Aug 2021 15:36:30 +0200 Subject: [PATCH 0460/1976] Add `DWConvClass()` (#4274) * Add `DWConvClass()` * Cleanup * Cleanup2 --- models/common.py | 11 +++++++++-- models/experimental.py | 2 +- models/yolo.py | 4 ++-- 3 files changed, 12 insertions(+), 5 deletions(-) diff --git a/models/common.py b/models/common.py index e6b7b5182283..30e7319f98a0 100644 --- a/models/common.py +++ b/models/common.py @@ -30,7 +30,7 @@ def autopad(k, p=None): # kernel, padding def DWConv(c1, c2, k=1, s=1, act=True): - # Depth-wise convolution + # Depth-wise convolution function return Conv(c1, c2, k, s, g=math.gcd(c1, c2), act=act) @@ -45,10 +45,17 @@ def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True): # ch_in, ch_out, k def forward(self, x): return self.act(self.bn(self.conv(x))) - def fuseforward(self, x): + def forward_fuse(self, x): return self.act(self.conv(x)) +class DWConvClass(Conv): + # Depth-wise convolution class + def __init__(self, c1, c2, k=1, s=1, act=True): # ch_in, ch_out, kernel, stride, padding, groups + super().__init__(c1, c2, k, s, act) + self.conv = nn.Conv2d(c1, c2, k, s, autopad(k), groups=math.gcd(c1, c2), bias=False) + + class TransformerLayer(nn.Module): # Transformer layer https://arxiv.org/abs/2010.11929 (LayerNorm layers removed for better performance) def __init__(self, c, num_heads): diff --git a/models/experimental.py b/models/experimental.py index 276ca954b173..581c7b14b61e 100644 --- a/models/experimental.py +++ b/models/experimental.py @@ -72,7 +72,7 @@ def forward(self, x): class MixConv2d(nn.Module): - # Mixed Depthwise Conv https://arxiv.org/abs/1907.09595 + # Mixed Depth-wise Conv https://arxiv.org/abs/1907.09595 def __init__(self, c1, c2, k=(1, 3), s=1, equal_ch=True): super().__init__() groups = len(k) diff --git a/models/yolo.py b/models/yolo.py index 2e7a20f813e2..9f05c8329f38 100644 --- a/models/yolo.py +++ b/models/yolo.py @@ -202,10 +202,10 @@ def _print_biases(self): def fuse(self): # fuse model Conv2d() + BatchNorm2d() layers LOGGER.info('Fusing layers... ') for m in self.model.modules(): - if type(m) is Conv and hasattr(m, 'bn'): + if isinstance(m, (Conv, DWConvClass)) and hasattr(m, 'bn'): m.conv = fuse_conv_and_bn(m.conv, m.bn) # update conv delattr(m, 'bn') # remove batchnorm - m.forward = m.fuseforward # update forward + m.forward = m.forward_fuse # update forward self.info() return self From 388016e9e3fd84255444356b509862b935105d97 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 2 Aug 2021 15:48:53 +0200 Subject: [PATCH 0461/1976] Update 'results saved to' string (#4275) --- detect.py | 2 +- export.py | 4 +++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/detect.py b/detect.py index 80517f342a41..88d1d9ca3800 100644 --- a/detect.py +++ b/detect.py @@ -189,7 +189,7 @@ def run(weights='yolov5s.pt', # model.pt path(s) if save_txt or save_img: s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else '' - print(f"Results saved to {save_dir}{s}") + print(f"Results saved to {colorstr('bold', save_dir)}{s}") if update: strip_optimizer(weights) # update model (to fix SourceChangeWarning) diff --git a/export.py b/export.py index c98e92d972c6..83e293b72e73 100644 --- a/export.py +++ b/export.py @@ -155,7 +155,9 @@ def run(weights='./yolov5s.pt', # weights path export_coreml(model, img, file) # Finish - print(f'\nExport complete ({time.time() - t:.2f}s). Visualize with https://github.com/lutzroeder/netron.') + print(f'\nExport complete ({time.time() - t:.2f}s)' + f"Results saved to {colorstr('bold', file.parent.resolve())}\n" + f'Visualize with https://netron.app') def parse_opt(): From 621caea53c393ca8b46261d369a6314f7d2736d7 Mon Sep 17 00:00:00 2001 From: Ayush Chaurasia Date: Mon, 2 Aug 2021 22:11:52 +0530 Subject: [PATCH 0462/1976] W&B: Fix sweep bug (#4276) * Improve docstrings and run names * default wandb login prompt with timeout * return key * Update api_key check logic * Properly support zipped dataset feature * update docstring * Revert tuorial change * extend changes to log_dataset * add run name * bug fix * bug fix * Update comment * fix import check * remove unused import * Hardcore .yaml file extension * reduce code * Reformat using pycharm * Remove redundant try catch * More refactoring and bug fixes * retry * Reformat using pycharm * respect LOGGERS include list * call wandblogger.log instead of wandb.log * Fix Sweep bug Co-authored-by: Glenn Jocher --- utils/loggers/wandb/sweep.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/loggers/wandb/sweep.py b/utils/loggers/wandb/sweep.py index 8e952d03c085..2dcda508eb50 100644 --- a/utils/loggers/wandb/sweep.py +++ b/utils/loggers/wandb/sweep.py @@ -4,7 +4,7 @@ import wandb FILE = Path(__file__).absolute() -sys.path.append(FILE.parents[2].as_posix()) # add utils/ to path +sys.path.append(FILE.parents[3].as_posix()) # add utils/ to path from train import train, parse_opt from utils.general import increment_path From 2d99063201105d992f8b0dada3c9c7a206e582e7 Mon Sep 17 00:00:00 2001 From: junji hashimoto Date: Tue, 3 Aug 2021 01:47:24 +0900 Subject: [PATCH 0463/1976] Feature `python train.py --cache disk` (#4049) * Add cache-on-disk and cache-directory to cache images on disk * Fix load_image with cache_on_disk * Add no_cache flag for load_image * Revert the parts('logging' and a new line) that do not need to be modified * Add the assertion for shapes of cached images * Add a suffix string for cached images * Fix boundary-error of letterbox for load_mosaic * Add prefix as cache-key of cache-on-disk * Update cache-function on disk * Add psutil in requirements.txt * Update train.py * Cleanup1 * Cleanup2 * Skip existing npy * Include re-space * Export return character fix Co-authored-by: Glenn Jocher --- export.py | 4 ++-- train.py | 8 ++++---- utils/datasets.py | 45 +++++++++++++++++++++++++++++---------------- 3 files changed, 35 insertions(+), 22 deletions(-) diff --git a/export.py b/export.py index 83e293b72e73..cec85958b4a9 100644 --- a/export.py +++ b/export.py @@ -156,8 +156,8 @@ def run(weights='./yolov5s.pt', # weights path # Finish print(f'\nExport complete ({time.time() - t:.2f}s)' - f"Results saved to {colorstr('bold', file.parent.resolve())}\n" - f'Visualize with https://netron.app') + f"\nResults saved to {colorstr('bold', file.parent.resolve())}" + f'\nVisualize with https://netron.app') def parse_opt(): diff --git a/train.py b/train.py index d4a5495d3b3b..34bd8e73c290 100644 --- a/train.py +++ b/train.py @@ -201,7 +201,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary # Trainloader train_loader, dataset = create_dataloader(train_path, imgsz, batch_size // WORLD_SIZE, gs, single_cls, - hyp=hyp, augment=True, cache=opt.cache_images, rect=opt.rect, rank=RANK, + hyp=hyp, augment=True, cache=opt.cache, rect=opt.rect, rank=RANK, workers=workers, image_weights=opt.image_weights, quad=opt.quad, prefix=colorstr('train: ')) mlc = np.concatenate(dataset.labels, 0)[:, 0].max() # max label class @@ -211,7 +211,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary # Process 0 if RANK in [-1, 0]: val_loader = create_dataloader(val_path, imgsz, batch_size // WORLD_SIZE * 2, gs, single_cls, - hyp=hyp, cache=opt.cache_images and not noval, rect=True, rank=-1, + hyp=hyp, cache=None if noval else opt.cache, rect=True, rank=-1, workers=workers, pad=0.5, prefix=colorstr('val: '))[0] @@ -389,7 +389,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary # end epoch ---------------------------------------------------------------------------------------------------- # end training ----------------------------------------------------------------------------------------------------- if RANK in [-1, 0]: - LOGGER.info(f'{epoch - start_epoch + 1} epochs completed in {(time.time() - t0) / 3600:.3f} hours.\n') + LOGGER.info(f'\n{epoch - start_epoch + 1} epochs completed in {(time.time() - t0) / 3600:.3f} hours.') if not evolve: if is_coco: # COCO dataset for m in [last, best] if best.exists() else [last]: # speed, mAP tests @@ -430,7 +430,7 @@ def parse_opt(known=False): parser.add_argument('--noautoanchor', action='store_true', help='disable autoanchor check') parser.add_argument('--evolve', type=int, nargs='?', const=300, help='evolve hyperparameters for x generations') parser.add_argument('--bucket', type=str, default='', help='gsutil bucket') - parser.add_argument('--cache-images', action='store_true', help='cache images for faster training') + parser.add_argument('--cache', type=str, nargs='?', const='ram', help='--cache images in "ram" (default) or "disk"') parser.add_argument('--image-weights', action='store_true', help='use weighted image selection for training') parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') parser.add_argument('--multi-scale', action='store_true', help='vary img-size +/- 50%%') diff --git a/utils/datasets.py b/utils/datasets.py index fffe39a61459..1c780cdbac4b 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -455,16 +455,25 @@ def __init__(self, path, img_size=640, batch_size=16, augment=False, hyp=None, r self.batch_shapes = np.ceil(np.array(shapes) * img_size / stride + pad).astype(np.int) * stride # Cache images into memory for faster training (WARNING: large datasets may exceed system RAM) - self.imgs = [None] * n + self.imgs, self.img_npy = [None] * n, [None] * n if cache_images: + if cache_images == 'disk': + self.im_cache_dir = Path(Path(self.img_files[0]).parent.as_posix() + '_npy') + self.img_npy = [self.im_cache_dir / Path(f).with_suffix('.npy').name for f in self.img_files] + self.im_cache_dir.mkdir(parents=True, exist_ok=True) gb = 0 # Gigabytes of cached images self.img_hw0, self.img_hw = [None] * n, [None] * n results = ThreadPool(NUM_THREADS).imap(lambda x: load_image(*x), zip(repeat(self), range(n))) pbar = tqdm(enumerate(results), total=n) for i, x in pbar: - self.imgs[i], self.img_hw0[i], self.img_hw[i] = x # img, hw_original, hw_resized = load_image(self, i) - gb += self.imgs[i].nbytes - pbar.desc = f'{prefix}Caching images ({gb / 1E9:.1f}GB)' + if cache_images == 'disk': + if not self.img_npy[i].exists(): + np.save(self.img_npy[i].as_posix(), x[0]) + gb += self.img_npy[i].stat().st_size + else: + self.imgs[i], self.img_hw0[i], self.img_hw[i] = x # im, hw_orig, hw_resized = load_image(self, i) + gb += self.imgs[i].nbytes + pbar.desc = f'{prefix}Caching images ({gb / 1E9:.1f}GB {cache_images})' pbar.close() def cache_labels(self, path=Path('./labels.cache'), prefix=''): @@ -618,21 +627,25 @@ def collate_fn4(batch): # Ancillary functions -------------------------------------------------------------------------------------------------- -def load_image(self, index): - # loads 1 image from dataset, returns img, original hw, resized hw - img = self.imgs[index] - if img is None: # not cached - path = self.img_files[index] - img = cv2.imread(path) # BGR - assert img is not None, 'Image Not Found ' + path - h0, w0 = img.shape[:2] # orig hw +def load_image(self, i): + # loads 1 image from dataset index 'i', returns im, original hw, resized hw + im = self.imgs[i] + if im is None: # not cached in ram + npy = self.img_npy[i] + if npy and npy.exists(): # load npy + im = np.load(npy) + else: # read image + path = self.img_files[i] + im = cv2.imread(path) # BGR + assert im is not None, 'Image Not Found ' + path + h0, w0 = im.shape[:2] # orig hw r = self.img_size / max(h0, w0) # ratio if r != 1: # if sizes are not equal - img = cv2.resize(img, (int(w0 * r), int(h0 * r)), - interpolation=cv2.INTER_AREA if r < 1 and not self.augment else cv2.INTER_LINEAR) - return img, (h0, w0), img.shape[:2] # img, hw_original, hw_resized + im = cv2.resize(im, (int(w0 * r), int(h0 * r)), + interpolation=cv2.INTER_AREA if r < 1 and not self.augment else cv2.INTER_LINEAR) + return im, (h0, w0), im.shape[:2] # im, hw_original, hw_resized else: - return self.imgs[index], self.img_hw0[index], self.img_hw[index] # img, hw_original, hw_resized + return self.imgs[i], self.img_hw0[i], self.img_hw[i] # im, hw_original, hw_resized def load_mosaic(self, index): From 771ac6c53ded79c408ed8bd99f7604b7077b7d77 Mon Sep 17 00:00:00 2001 From: imyhxy Date: Tue, 3 Aug 2021 19:11:42 +0800 Subject: [PATCH 0464/1976] Fixed logging level in distributed mode (#4284) Co-authored-by: fkwong --- utils/torch_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/torch_utils.py b/utils/torch_utils.py index 4956cf95d1ca..628f672a010d 100644 --- a/utils/torch_utils.py +++ b/utils/torch_utils.py @@ -23,7 +23,6 @@ except ImportError: thop = None -logging.basicConfig(format="%(message)s", level=logging.INFO) LOGGER = logging.getLogger(__name__) @@ -108,6 +107,7 @@ def profile(input, ops, n=10, device=None): # profile(input, [m1, m2], n=100) # profile over 100 iterations results = [] + logging.basicConfig(format="%(message)s", level=logging.INFO) device = device or select_device() print(f"{'Params':>12s}{'GFLOPs':>12s}{'GPU_mem (GB)':>14s}{'forward (ms)':>14s}{'backward (ms)':>14s}" f"{'input':>24s}{'output':>24s}") From 4103ce9ad0393cc27f6c80457894ad7be0cb1f0d Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 4 Aug 2021 13:17:35 +0200 Subject: [PATCH 0465/1976] Simplify callbacks (#4289) --- utils/callbacks.py | 41 ++++++++++++++++++++--------------------- 1 file changed, 20 insertions(+), 21 deletions(-) diff --git a/utils/callbacks.py b/utils/callbacks.py index f23d57a6c043..a204ec1ceaaf 100644 --- a/utils/callbacks.py +++ b/utils/callbacks.py @@ -58,12 +58,11 @@ def get_registered_actions(self, hook=None): else: return self._callbacks - @staticmethod - def run_callbacks(register, *args, **kwargs): + def run_callbacks(self, hook, *args, **kwargs): """ Loop through the registered actions and fire all callbacks """ - for logger in register: + for logger in self._callbacks[hook]: # print(f"Running callbacks.{logger['callback'].__name__}()") logger['callback'](*args, **kwargs) @@ -71,106 +70,106 @@ def on_pretrain_routine_start(self, *args, **kwargs): """ Fires all registered callbacks at the start of each pretraining routine """ - self.run_callbacks(self._callbacks['on_pretrain_routine_start'], *args, **kwargs) + self.run_callbacks('on_pretrain_routine_start', *args, **kwargs) def on_pretrain_routine_end(self, *args, **kwargs): """ Fires all registered callbacks at the end of each pretraining routine """ - self.run_callbacks(self._callbacks['on_pretrain_routine_end'], *args, **kwargs) + self.run_callbacks('on_pretrain_routine_end', *args, **kwargs) def on_train_start(self, *args, **kwargs): """ Fires all registered callbacks at the start of each training """ - self.run_callbacks(self._callbacks['on_train_start'], *args, **kwargs) + self.run_callbacks('on_train_start', *args, **kwargs) def on_train_epoch_start(self, *args, **kwargs): """ Fires all registered callbacks at the start of each training epoch """ - self.run_callbacks(self._callbacks['on_train_epoch_start'], *args, **kwargs) + self.run_callbacks('on_train_epoch_start', *args, **kwargs) def on_train_batch_start(self, *args, **kwargs): """ Fires all registered callbacks at the start of each training batch """ - self.run_callbacks(self._callbacks['on_train_batch_start'], *args, **kwargs) + self.run_callbacks('on_train_batch_start', *args, **kwargs) def optimizer_step(self, *args, **kwargs): """ Fires all registered callbacks on each optimizer step """ - self.run_callbacks(self._callbacks['optimizer_step'], *args, **kwargs) + self.run_callbacks('optimizer_step', *args, **kwargs) def on_before_zero_grad(self, *args, **kwargs): """ Fires all registered callbacks before zero grad """ - self.run_callbacks(self._callbacks['on_before_zero_grad'], *args, **kwargs) + self.run_callbacks('on_before_zero_grad', *args, **kwargs) def on_train_batch_end(self, *args, **kwargs): """ Fires all registered callbacks at the end of each training batch """ - self.run_callbacks(self._callbacks['on_train_batch_end'], *args, **kwargs) + self.run_callbacks('on_train_batch_end', *args, **kwargs) def on_train_epoch_end(self, *args, **kwargs): """ Fires all registered callbacks at the end of each training epoch """ - self.run_callbacks(self._callbacks['on_train_epoch_end'], *args, **kwargs) + self.run_callbacks('on_train_epoch_end', *args, **kwargs) def on_val_start(self, *args, **kwargs): """ Fires all registered callbacks at the start of the validation """ - self.run_callbacks(self._callbacks['on_val_start'], *args, **kwargs) + self.run_callbacks('on_val_start', *args, **kwargs) def on_val_batch_start(self, *args, **kwargs): """ Fires all registered callbacks at the start of each validation batch """ - self.run_callbacks(self._callbacks['on_val_batch_start'], *args, **kwargs) + self.run_callbacks('on_val_batch_start', *args, **kwargs) def on_val_image_end(self, *args, **kwargs): """ Fires all registered callbacks at the end of each val image """ - self.run_callbacks(self._callbacks['on_val_image_end'], *args, **kwargs) + self.run_callbacks('on_val_image_end', *args, **kwargs) def on_val_batch_end(self, *args, **kwargs): """ Fires all registered callbacks at the end of each validation batch """ - self.run_callbacks(self._callbacks['on_val_batch_end'], *args, **kwargs) + self.run_callbacks('on_val_batch_end', *args, **kwargs) def on_val_end(self, *args, **kwargs): """ Fires all registered callbacks at the end of the validation """ - self.run_callbacks(self._callbacks['on_val_end'], *args, **kwargs) + self.run_callbacks('on_val_end', *args, **kwargs) def on_fit_epoch_end(self, *args, **kwargs): """ Fires all registered callbacks at the end of each fit (train+val) epoch """ - self.run_callbacks(self._callbacks['on_fit_epoch_end'], *args, **kwargs) + self.run_callbacks('on_fit_epoch_end', *args, **kwargs) def on_model_save(self, *args, **kwargs): """ Fires all registered callbacks after each model save """ - self.run_callbacks(self._callbacks['on_model_save'], *args, **kwargs) + self.run_callbacks('on_model_save', *args, **kwargs) def on_train_end(self, *args, **kwargs): """ Fires all registered callbacks at the end of training """ - self.run_callbacks(self._callbacks['on_train_end'], *args, **kwargs) + self.run_callbacks('on_train_end', *args, **kwargs) def teardown(self, *args, **kwargs): """ Fires all registered callbacks before teardown """ - self.run_callbacks(self._callbacks['teardown'], *args, **kwargs) + self.run_callbacks('teardown', *args, **kwargs) From e78aeac973ea3a2c58d7577453473e48f4e7a0f6 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 4 Aug 2021 17:13:38 +0200 Subject: [PATCH 0466/1976] Evolve in CSV format (#4307) * Update evolution to CSV format * Update * Update * Update * Update * Update * reset args * reset args * reset args * plot_results() fix * Cleanup * Cleanup2 --- .dockerignore | 2 +- .gitignore | 1 - train.py | 32 ++++++++++++++----------- utils/general.py | 50 +++++++++++++++++++++++---------------- utils/loggers/__init__.py | 5 ++-- utils/plots.py | 50 +++++++++++++++++++-------------------- 6 files changed, 75 insertions(+), 65 deletions(-) diff --git a/.dockerignore b/.dockerignore index 9c9663f006ca..4248cb098cf4 100644 --- a/.dockerignore +++ b/.dockerignore @@ -8,7 +8,7 @@ coco storage.googleapis.com data/samples/* -**/results*.txt +**/results*.csv *.jpg # Neural Network weights ----------------------------------------------------------------------------------------------- diff --git a/.gitignore b/.gitignore index b07134d097dd..e5d02af960af 100755 --- a/.gitignore +++ b/.gitignore @@ -30,7 +30,6 @@ data/* !data/images/bus.jpg !data/*.sh -results*.txt results*.csv # Datasets ------------------------------------------------------------------------------------------------------------- diff --git a/train.py b/train.py index 34bd8e73c290..a7d61c8c5411 100644 --- a/train.py +++ b/train.py @@ -37,7 +37,7 @@ check_requirements, print_mutation, set_logging, one_cycle, colorstr, methods from utils.downloads import attempt_download from utils.loss import ComputeLoss -from utils.plots import plot_labels, plot_evolution +from utils.plots import plot_labels, plot_evolve from utils.torch_utils import ModelEMA, select_device, intersect_dicts, torch_distributed_zero_first, de_parallel from utils.loggers.wandb.wandb_utils import check_wandb_resume from utils.metrics import fitness @@ -367,7 +367,8 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary fi = fitness(np.array(results).reshape(1, -1)) # weighted combination of [P, R, mAP@.5, mAP@.5-.95] if fi > best_fitness: best_fitness = fi - callbacks.on_fit_epoch_end(mloss, results, lr, epoch, best_fitness, fi) + log_vals = list(mloss) + list(results) + lr + callbacks.on_fit_epoch_end(log_vals, epoch, best_fitness, fi) # Save model if (not nosave) or (final_epoch and not evolve): # if save @@ -464,7 +465,7 @@ def main(opt): check_requirements(requirements=FILE.parent / 'requirements.txt', exclude=['thop']) # Resume - if opt.resume and not check_wandb_resume(opt): # resume an interrupted run + if opt.resume and not check_wandb_resume(opt) and not opt.evolve: # resume an interrupted run ckpt = opt.resume if isinstance(opt.resume, str) else get_latest_run() # specified or most recent path assert os.path.isfile(ckpt), 'ERROR: --resume checkpoint does not exist' with open(Path(ckpt).parent.parent / 'opt.yaml') as f: @@ -474,8 +475,10 @@ def main(opt): else: opt.data, opt.cfg, opt.hyp = check_file(opt.data), check_file(opt.cfg), check_file(opt.hyp) # check files assert len(opt.cfg) or len(opt.weights), 'either --cfg or --weights must be specified' - opt.name = 'evolve' if opt.evolve else opt.name - opt.save_dir = str(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok or opt.evolve)) + if opt.evolve: + opt.project = 'runs/evolve' + opt.exist_ok = opt.resume + opt.save_dir = str(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok)) # DDP mode device = select_device(opt.device, batch_size=opt.batch_size) @@ -533,17 +536,17 @@ def main(opt): hyp = yaml.safe_load(f) # load hyps dict if 'anchors' not in hyp: # anchors commented in hyp.yaml hyp['anchors'] = 3 - opt.noval, opt.nosave = True, True # only val/save final epoch + opt.noval, opt.nosave, save_dir = True, True, Path(opt.save_dir) # only val/save final epoch # ei = [isinstance(x, (int, float)) for x in hyp.values()] # evolvable indices - yaml_file = Path(opt.save_dir) / 'hyp_evolved.yaml' # save best result here + evolve_yaml, evolve_csv = save_dir / 'hyp_evolve.yaml', save_dir / 'evolve.csv' if opt.bucket: - os.system(f'gsutil cp gs://{opt.bucket}/evolve.txt .') # download evolve.txt if exists + os.system(f'gsutil cp gs://{opt.bucket}/evolve.csv {save_dir}') # download evolve.csv if exists for _ in range(opt.evolve): # generations to evolve - if Path('evolve.txt').exists(): # if evolve.txt exists: select best hyps and mutate + if evolve_csv.exists(): # if evolve.csv exists: select best hyps and mutate # Select parent(s) parent = 'single' # parent selection method: 'single' or 'weighted' - x = np.loadtxt('evolve.txt', ndmin=2) + x = np.loadtxt(evolve_csv, ndmin=2, delimiter=',', skiprows=1) n = min(5, len(x)) # number of previous results to consider x = x[np.argsort(-fitness(x))][:n] # top n mutations w = fitness(x) - fitness(x).min() + 1E-6 # weights (sum > 0) @@ -575,12 +578,13 @@ def main(opt): results = train(hyp.copy(), opt, device) # Write mutation results - print_mutation(hyp.copy(), results, yaml_file, opt.bucket) + print_mutation(results, hyp.copy(), save_dir, opt.bucket) # Plot results - plot_evolution(yaml_file) - print(f'Hyperparameter evolution complete. Best results saved as: {yaml_file}\n' - f'Command to train a new model with these hyperparameters: $ python train.py --hyp {yaml_file}') + plot_evolve(evolve_csv) + print(f'Hyperparameter evolution finished\n' + f"Results saved to {colorstr('bold', save_dir)}" + f'Use best hyperparameters example: $ python train.py --hyp {evolve_yaml}') def run(**kwargs): diff --git a/utils/general.py b/utils/general.py index ed028d2b3765..15111b727f33 100755 --- a/utils/general.py +++ b/utils/general.py @@ -615,35 +615,43 @@ def strip_optimizer(f='best.pt', s=''): # from utils.general import *; strip_op print(f"Optimizer stripped from {f},{(' saved as %s,' % s) if s else ''} {mb:.1f}MB") -def print_mutation(hyp, results, yaml_file='hyp_evolved.yaml', bucket=''): - # Print mutation results to evolve.txt (for use with train.py --evolve) - a = '%10s' * len(hyp) % tuple(hyp.keys()) # hyperparam keys - b = '%10.3g' * len(hyp) % tuple(hyp.values()) # hyperparam values - c = '%10.4g' * len(results) % results # results (P, R, mAP@0.5, mAP@0.5:0.95, val_losses x 3) - print('\n%s\n%s\nEvolved fitness: %s\n' % (a, b, c)) +def print_mutation(results, hyp, save_dir, bucket): + evolve_csv, results_csv, evolve_yaml = save_dir / 'evolve.csv', save_dir / 'results.csv', save_dir / 'hyp_evolve.yaml' + keys = ('metrics/precision', 'metrics/recall', 'metrics/mAP_0.5', 'metrics/mAP_0.5:0.95', + 'val/box_loss', 'val/obj_loss', 'val/cls_loss') + tuple(hyp.keys()) # [results + hyps] + keys = tuple(x.strip() for x in keys) + vals = results + tuple(hyp.values()) + n = len(keys) + # Download (optional) if bucket: - url = 'gs://%s/evolve.txt' % bucket - if gsutil_getsize(url) > (os.path.getsize('evolve.txt') if os.path.exists('evolve.txt') else 0): - os.system('gsutil cp %s .' % url) # download evolve.txt if larger than local + url = f'gs://{bucket}/evolve.csv' + if gsutil_getsize(url) > (os.path.getsize(evolve_csv) if os.path.exists(evolve_csv) else 0): + os.system(f'gsutil cp {url} {save_dir}') # download evolve.csv if larger than local + + # Log to evolve.csv + s = '' if evolve_csv.exists() else (('%20s,' * n % keys).rstrip(',') + '\n') # add header + with open(evolve_csv, 'a') as f: + f.write(s + ('%20.5g,' * n % vals).rstrip(',') + '\n') - with open('evolve.txt', 'a') as f: # append result - f.write(c + b + '\n') - x = np.unique(np.loadtxt('evolve.txt', ndmin=2), axis=0) # load unique rows - x = x[np.argsort(-fitness(x))] # sort - np.savetxt('evolve.txt', x, '%10.3g') # save sort by fitness + # Print to screen + print(colorstr('evolve: ') + ', '.join(f'{x.strip():>20s}' for x in keys)) + print(colorstr('evolve: ') + ', '.join(f'{x:20.5g}' for x in vals), end='\n\n\n') # Save yaml - for i, k in enumerate(hyp.keys()): - hyp[k] = float(x[0, i + 7]) - with open(yaml_file, 'w') as f: - results = tuple(x[0, :7]) - c = '%10.4g' * len(results) % results # results (P, R, mAP@0.5, mAP@0.5:0.95, val_losses x 3) - f.write('# Hyperparameter Evolution Results\n# Generations: %g\n# Metrics: ' % len(x) + c + '\n\n') + with open(evolve_yaml, 'w') as f: + data = pd.read_csv(evolve_csv) + data = data.rename(columns=lambda x: x.strip()) # strip keys + i = np.argmax(fitness(data.values[:, :7])) # + f.write(f'# YOLOv5 Hyperparameter Evolution Results\n' + + f'# Best generation: {i}\n' + + f'# Last generation: {len(data)}\n' + + f'# ' + ', '.join(f'{x.strip():>20s}' for x in keys[:7]) + '\n' + + f'# ' + ', '.join(f'{x:>20.5g}' for x in data.values[i, :7]) + '\n\n') yaml.safe_dump(hyp, f, sort_keys=False) if bucket: - os.system('gsutil cp evolve.txt %s gs://%s' % (yaml_file, bucket)) # upload + os.system(f'gsutil cp {evolve_csv} {evolve_yaml} gs://{bucket}') # upload def apply_classifier(x, model, img, im0): diff --git a/utils/loggers/__init__.py b/utils/loggers/__init__.py index be76d0c17f1b..d40c0c350fde 100644 --- a/utils/loggers/__init__.py +++ b/utils/loggers/__init__.py @@ -95,9 +95,8 @@ def on_val_end(self): files = sorted(self.save_dir.glob('val*.jpg')) self.wandb.log({"Validation": [wandb.Image(str(f), caption=f.name) for f in files]}) - def on_fit_epoch_end(self, mloss, results, lr, epoch, best_fitness, fi): + def on_fit_epoch_end(self, vals, epoch, best_fitness, fi): # Callback runs at the end of each fit (train+val) epoch - vals = list(mloss) + list(results) + lr x = {k: v for k, v in zip(self.keys, vals)} # dict if self.csv: file = self.save_dir / 'results.csv' @@ -123,7 +122,7 @@ def on_model_save(self, last, epoch, final_epoch, best_fitness, fi): def on_train_end(self, last, best, plots, epoch): # Callback runs on training end if plots: - plot_results(dir=self.save_dir) # save results.png + plot_results(file=self.save_dir / 'results.csv') # save results.png files = ['results.png', 'confusion_matrix.png', *[f'{x}_curve.png' for x in ('F1', 'PR', 'P', 'R')]] files = [(self.save_dir / f) for f in files if (self.save_dir / f).exists()] # filter diff --git a/utils/plots.py b/utils/plots.py index 252e128168ee..ef850ee2f26d 100644 --- a/utils/plots.py +++ b/utils/plots.py @@ -325,30 +325,6 @@ def plot_labels(labels, names=(), save_dir=Path('')): plt.close() -def plot_evolution(yaml_file='data/hyp.finetune.yaml'): # from utils.plots import *; plot_evolution() - # Plot hyperparameter evolution results in evolve.txt - with open(yaml_file) as f: - hyp = yaml.safe_load(f) - x = np.loadtxt('evolve.txt', ndmin=2) - f = fitness(x) - # weights = (f - f.min()) ** 2 # for weighted results - plt.figure(figsize=(10, 12), tight_layout=True) - matplotlib.rc('font', **{'size': 8}) - for i, (k, v) in enumerate(hyp.items()): - y = x[:, i + 7] - # mu = (y * weights).sum() / weights.sum() # best weighted result - mu = y[f.argmax()] # best single result - plt.subplot(6, 5, i + 1) - plt.scatter(y, f, c=hist2d(y, f, 20), cmap='viridis', alpha=.8, edgecolors='none') - plt.plot(mu, f.max(), 'k+', markersize=15) - plt.title('%s = %.3g' % (k, mu), fontdict={'size': 9}) # limit to 40 characters - if i % 5 != 0: - plt.yticks([]) - print('%15s: %.3g' % (k, mu)) - plt.savefig('evolve.png', dpi=200) - print('\nPlot saved as evolve.png') - - def profile_idetection(start=0, stop=0, labels=(), save_dir=''): # Plot iDetection '*.txt' per-image logs. from utils.plots import *; profile_idetection() ax = plt.subplots(2, 4, figsize=(12, 6), tight_layout=True)[1].ravel() @@ -381,7 +357,31 @@ def profile_idetection(start=0, stop=0, labels=(), save_dir=''): plt.savefig(Path(save_dir) / 'idetection_profile.png', dpi=200) -def plot_results(file='', dir=''): +def plot_evolve(evolve_csv=Path('path/to/evolve.csv')): # from utils.plots import *; plot_evolve() + # Plot evolve.csv hyp evolution results + data = pd.read_csv(evolve_csv) + keys = [x.strip() for x in data.columns] + x = data.values + f = fitness(x) + j = np.argmax(f) # max fitness index + plt.figure(figsize=(10, 12), tight_layout=True) + matplotlib.rc('font', **{'size': 8}) + for i, k in enumerate(keys[7:]): + v = x[:, 7 + i] + mu = v[j] # best single result + plt.subplot(6, 5, i + 1) + plt.scatter(v, f, c=hist2d(v, f, 20), cmap='viridis', alpha=.8, edgecolors='none') + plt.plot(mu, f.max(), 'k+', markersize=15) + plt.title('%s = %.3g' % (k, mu), fontdict={'size': 9}) # limit to 40 characters + if i % 5 != 0: + plt.yticks([]) + print('%15s: %.3g' % (k, mu)) + f = evolve_csv.with_suffix('.png') # filename + plt.savefig(f, dpi=200) + print(f'Saved {f}') + + +def plot_results(file='path/to/results.csv', dir=''): # Plot training results.csv. Usage: from utils.plots import *; plot_results('path/to/results.csv') save_dir = Path(file).parent if file else Path(dir) fig, ax = plt.subplots(2, 5, figsize=(12, 6), tight_layout=True) From 86c7150cfd3ac926985ed8b0aa6550820c0d3ab9 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 4 Aug 2021 17:41:38 +0200 Subject: [PATCH 0467/1976] Update newline (#4308) --- train.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/train.py b/train.py index a7d61c8c5411..23f4971b1758 100644 --- a/train.py +++ b/train.py @@ -583,7 +583,7 @@ def main(opt): # Plot results plot_evolve(evolve_csv) print(f'Hyperparameter evolution finished\n' - f"Results saved to {colorstr('bold', save_dir)}" + f"Results saved to {colorstr('bold', save_dir)}\n" f'Use best hyperparameters example: $ python train.py --hyp {evolve_yaml}') From f409d8e54f9391ce21436d33334beff3a2fd4042 Mon Sep 17 00:00:00 2001 From: Sudhanshu Singh Date: Thu, 5 Aug 2021 01:41:48 +0530 Subject: [PATCH 0468/1976] Update README.md (#4309) remove unnecessary "`" --- utils/flask_rest_api/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/flask_rest_api/README.md b/utils/flask_rest_api/README.md index 324c2416dcd9..6c835936dde6 100644 --- a/utils/flask_rest_api/README.md +++ b/utils/flask_rest_api/README.md @@ -19,7 +19,7 @@ $ python3 restapi.py --port 5000 Then use [curl](https://curl.se/) to perform a request: ```shell -$ curl -X POST -F image=@zidane.jpg 'http://localhost:5000/v1/object-detection/yolov5s'` +$ curl -X POST -F image=@zidane.jpg 'http://localhost:5000/v1/object-detection/yolov5s' ``` The model inference results are returned as a JSON response: From e96c74b5a1c4a27934c5d8ad52cde778af248ed8 Mon Sep 17 00:00:00 2001 From: Yonghye Kwon Date: Thu, 5 Aug 2021 17:54:16 +0900 Subject: [PATCH 0469/1976] Simpler code for DWConvClass (#4310) * more simpler code for DWConvClass more simpler code for DWConvClass * remove DWConv function * Replace DWConvClass with DWConv --- models/common.py | 10 ++-------- models/yolo.py | 2 +- 2 files changed, 3 insertions(+), 9 deletions(-) diff --git a/models/common.py b/models/common.py index 30e7319f98a0..2d24672a6b44 100644 --- a/models/common.py +++ b/models/common.py @@ -29,11 +29,6 @@ def autopad(k, p=None): # kernel, padding return p -def DWConv(c1, c2, k=1, s=1, act=True): - # Depth-wise convolution function - return Conv(c1, c2, k, s, g=math.gcd(c1, c2), act=act) - - class Conv(nn.Module): # Standard convolution def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True): # ch_in, ch_out, kernel, stride, padding, groups @@ -49,11 +44,10 @@ def forward_fuse(self, x): return self.act(self.conv(x)) -class DWConvClass(Conv): +class DWConv(Conv): # Depth-wise convolution class def __init__(self, c1, c2, k=1, s=1, act=True): # ch_in, ch_out, kernel, stride, padding, groups - super().__init__(c1, c2, k, s, act) - self.conv = nn.Conv2d(c1, c2, k, s, autopad(k), groups=math.gcd(c1, c2), bias=False) + super().__init__(c1, c2, k, s, g=math.gcd(c1, c2), act=act) class TransformerLayer(nn.Module): diff --git a/models/yolo.py b/models/yolo.py index 9f05c8329f38..380f3401e5b9 100644 --- a/models/yolo.py +++ b/models/yolo.py @@ -202,7 +202,7 @@ def _print_biases(self): def fuse(self): # fuse model Conv2d() + BatchNorm2d() layers LOGGER.info('Fusing layers... ') for m in self.model.modules(): - if isinstance(m, (Conv, DWConvClass)) and hasattr(m, 'bn'): + if isinstance(m, (Conv, DWConv)) and hasattr(m, 'bn'): m.conv = fuse_conv_and_bn(m.conv, m.bn) # update conv delattr(m, 'bn') # remove batchnorm m.forward = m.forward_fuse # update forward From ce7deec440404d17b315768d955313404d70e776 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 11 Aug 2021 17:32:13 +0200 Subject: [PATCH 0470/1976] `int(mlc)` (#4385) --- train.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/train.py b/train.py index 23f4971b1758..24152f1a1198 100644 --- a/train.py +++ b/train.py @@ -204,7 +204,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary hyp=hyp, augment=True, cache=opt.cache, rect=opt.rect, rank=RANK, workers=workers, image_weights=opt.image_weights, quad=opt.quad, prefix=colorstr('train: ')) - mlc = np.concatenate(dataset.labels, 0)[:, 0].max() # max label class + mlc = int(np.concatenate(dataset.labels, 0)[:, 0].max()) # max label class nb = len(train_loader) # number of batches assert mlc < nc, f'Label class {mlc} exceeds nc={nc} in {data}. Possible class labels are 0-{nc - 1}' From e7fc27406ab32b9860c9b351b30ca1c47f543433 Mon Sep 17 00:00:00 2001 From: Yuantao Yang <31794133+orangeccc@users.noreply.github.com> Date: Wed, 11 Aug 2021 23:38:34 +0800 Subject: [PATCH 0471/1976] Fix module count in parse_model (#4379) Co-authored-by: yangyuantao --- models/yolo.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/models/yolo.py b/models/yolo.py index 380f3401e5b9..98e578d20384 100644 --- a/models/yolo.py +++ b/models/yolo.py @@ -234,7 +234,7 @@ def parse_model(d, ch): # model_dict, input_channels(3) except: pass - n = max(round(n * gd), 1) if n > 1 else n # depth gain + n = n_ = max(round(n * gd), 1) if n > 1 else n # depth gain if m in [Conv, GhostConv, Bottleneck, GhostBottleneck, SPP, DWConv, MixConv2d, Focus, CrossConv, BottleneckCSP, C3, C3TR, C3SPP]: c1, c2 = ch[f], args[0] @@ -264,7 +264,7 @@ def parse_model(d, ch): # model_dict, input_channels(3) t = str(m)[8:-2].replace('__main__.', '') # module type np = sum([x.numel() for x in m_.parameters()]) # number params m_.i, m_.f, m_.type, m_.np = i, f, t, np # attach index, 'from' index, type, number params - LOGGER.info('%3s%18s%3s%10.0f %-40s%-30s' % (i, f, n, np, t, args)) # print + LOGGER.info('%3s%18s%3s%10.0f %-40s%-30s' % (i, f, n_, np, t, args)) # print save.extend(x % i for x in ([f] if isinstance(f, int) else f) if x != -1) # append to savelist layers.append(m_) if i == 0: From b27f69f108a9b92f20bfd2725350bd86c313a177 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 11 Aug 2021 18:32:06 +0200 Subject: [PATCH 0472/1976] Created using Colaboratory --- tutorial.ipynb | 38 +++++++++++++++++++++++++++++++------- 1 file changed, 31 insertions(+), 7 deletions(-) diff --git a/tutorial.ipynb b/tutorial.ipynb index b16506275288..eaa886509a66 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -19,6 +19,7 @@ "2e915d9016c846e095e382b6a02ee773": { "model_module": "@jupyter-widgets/controls", "model_name": "HBoxModel", + "model_module_version": "1.5.0", "state": { "_view_name": "HBoxView", "_dom_classes": [], @@ -39,6 +40,7 @@ "cb7fc3a5c6cc4fde8d2c83e594a7c86e": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", + "model_module_version": "1.2.0", "state": { "_view_name": "LayoutView", "grid_template_rows": null, @@ -90,6 +92,7 @@ "ac3edef4e3434f4587e6cbf8aa048770": { "model_module": "@jupyter-widgets/controls", "model_name": "FloatProgressModel", + "model_module_version": "1.5.0", "state": { "_view_name": "ProgressView", "style": "IPY_MODEL_13842ca90c0047e584b8d68d99dad2b1", @@ -113,6 +116,7 @@ "853ac234cc2a4236946fc516871e10eb": { "model_module": "@jupyter-widgets/controls", "model_name": "HTMLModel", + "model_module_version": "1.5.0", "state": { "_view_name": "HTMLView", "style": "IPY_MODEL_f94a7ca8c1f04761bf38fdc5f99664b8", @@ -133,6 +137,7 @@ "13842ca90c0047e584b8d68d99dad2b1": { "model_module": "@jupyter-widgets/controls", "model_name": "ProgressStyleModel", + "model_module_version": "1.5.0", "state": { "_view_name": "StyleView", "_model_name": "ProgressStyleModel", @@ -148,6 +153,7 @@ "f454999c3a924c7bad0746fb453dec36": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", + "model_module_version": "1.2.0", "state": { "_view_name": "LayoutView", "grid_template_rows": null, @@ -199,6 +205,7 @@ "f94a7ca8c1f04761bf38fdc5f99664b8": { "model_module": "@jupyter-widgets/controls", "model_name": "DescriptionStyleModel", + "model_module_version": "1.5.0", "state": { "_view_name": "StyleView", "_model_name": "DescriptionStyleModel", @@ -213,6 +220,7 @@ "9da1a23b042c41618dd14b0e30aa7cbe": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", + "model_module_version": "1.2.0", "state": { "_view_name": "LayoutView", "grid_template_rows": null, @@ -264,6 +272,7 @@ "6ff8a710ded44391a624dec5c460b771": { "model_module": "@jupyter-widgets/controls", "model_name": "HBoxModel", + "model_module_version": "1.5.0", "state": { "_view_name": "HBoxView", "_dom_classes": [], @@ -284,6 +293,7 @@ "3c19729b51cd45d4848035da06e96ff8": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", + "model_module_version": "1.2.0", "state": { "_view_name": "LayoutView", "grid_template_rows": null, @@ -335,6 +345,7 @@ "23b2f0ae3d46438c8de375987c77f580": { "model_module": "@jupyter-widgets/controls", "model_name": "FloatProgressModel", + "model_module_version": "1.5.0", "state": { "_view_name": "ProgressView", "style": "IPY_MODEL_d8dda4b2ce864fd682e558b9a48f602e", @@ -358,6 +369,7 @@ "dd9498c321a9422da6faf17a0be026d4": { "model_module": "@jupyter-widgets/controls", "model_name": "HTMLModel", + "model_module_version": "1.5.0", "state": { "_view_name": "HTMLView", "style": "IPY_MODEL_0f84fe609bcf4aa9afdc32a8cf076909", @@ -378,6 +390,7 @@ "d8dda4b2ce864fd682e558b9a48f602e": { "model_module": "@jupyter-widgets/controls", "model_name": "ProgressStyleModel", + "model_module_version": "1.5.0", "state": { "_view_name": "StyleView", "_model_name": "ProgressStyleModel", @@ -393,6 +406,7 @@ "ff8151449e444a14869684212b9ab14e": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", + "model_module_version": "1.2.0", "state": { "_view_name": "LayoutView", "grid_template_rows": null, @@ -444,6 +458,7 @@ "0f84fe609bcf4aa9afdc32a8cf076909": { "model_module": "@jupyter-widgets/controls", "model_name": "DescriptionStyleModel", + "model_module_version": "1.5.0", "state": { "_view_name": "StyleView", "_model_name": "DescriptionStyleModel", @@ -458,6 +473,7 @@ "8fda673769984e2b928ef820d34c85c3": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", + "model_module_version": "1.2.0", "state": { "_view_name": "LayoutView", "grid_template_rows": null, @@ -564,7 +580,7 @@ "clear_output()\n", "print(f\"Setup complete. Using torch {torch.__version__} ({torch.cuda.get_device_properties(0).name if torch.cuda.is_available() else 'CPU'})\")" ], - "execution_count": 1, + "execution_count": null, "outputs": [ { "output_type": "stream", @@ -585,7 +601,15 @@ "\n", "`detect.py` runs YOLOv5 inference on a variety of sources, downloading models automatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases), and saving results to `runs/detect`. Example inference sources are:\n", "\n", - " " + "```shell\n", + "python detect.py --source 0 # webcam\n", + " file.jpg # image \n", + " file.mp4 # video\n", + " path/ # directory\n", + " path/*.jpg # glob\n", + " 'https://youtu.be/NUsoVlDFqZg' # YouTube\n", + " 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream\n", + "```" ] }, { @@ -601,7 +625,7 @@ "!python detect.py --weights yolov5s.pt --img 640 --conf 0.25 --source data/images/\n", "Image(filename='runs/detect/exp/zidane.jpg', width=600)" ], - "execution_count": 9, + "execution_count": null, "outputs": [ { "output_type": "stream", @@ -675,7 +699,7 @@ "torch.hub.download_url_to_file('https://github.com/ultralytics/yolov5/releases/download/v1.0/coco2017val.zip', 'tmp.zip')\n", "!unzip -q tmp.zip -d ../datasets && rm tmp.zip" ], - "execution_count": 10, + "execution_count": null, "outputs": [ { "output_type": "display_data", @@ -715,7 +739,7 @@ "# Run YOLOv5x on COCO val2017\n", "!python val.py --weights yolov5x.pt --data coco.yaml --img 640 --iou 0.65 --half" ], - "execution_count": 11, + "execution_count": null, "outputs": [ { "output_type": "stream", @@ -839,7 +863,7 @@ "torch.hub.download_url_to_file('https://github.com/ultralytics/yolov5/releases/download/v1.0/coco128.zip', 'tmp.zip')\n", "!unzip -q tmp.zip -d ../ && rm tmp.zip" ], - "execution_count": 12, + "execution_count": null, "outputs": [ { "output_type": "display_data", @@ -917,7 +941,7 @@ "# Train YOLOv5s on COCO128 for 3 epochs\n", "!python train.py --img 640 --batch 16 --epochs 3 --data coco128.yaml --weights yolov5s.pt --cache" ], - "execution_count": 13, + "execution_count": null, "outputs": [ { "output_type": "stream", From 11e4aebfefb0ce77972c0b083fac03f2da650b76 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 11 Aug 2021 18:33:34 +0200 Subject: [PATCH 0473/1976] Update README.md (#4387) --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index df4e9add519d..febf4bff9b40 100644 --- a/README.md +++ b/README.md @@ -112,7 +112,7 @@ $ python detect.py --source 0 # webcam file.mp4 # video path/ # directory path/*.jpg # glob - 'https://youtu.be/NUsoVlDFqZg' # YouTube video + 'https://youtu.be/NUsoVlDFqZg' # YouTube 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream ``` From 75d323dd8b13dfa16707a5174960844d99f4d708 Mon Sep 17 00:00:00 2001 From: Ayush Chaurasia Date: Wed, 11 Aug 2021 22:05:43 +0530 Subject: [PATCH 0474/1976] W&B: Add advanced features tutorial (#4384) * Improve docstrings and run names * default wandb login prompt with timeout * return key * Update api_key check logic * Properly support zipped dataset feature * update docstring * Revert tuorial change * extend changes to log_dataset * add run name * bug fix * bug fix * Update comment * fix import check * remove unused import * Hardcore .yaml file extension * reduce code * Reformat using pycharm * Remove redundant try catch * More refactoring and bug fixes * retry * Reformat using pycharm * respect LOGGERS include list * Initial readme update * Update README.md * Update README.md * Update README.md * Update README.md * Update README.md Co-authored-by: Glenn Jocher --- utils/loggers/wandb/README.md | 140 ++++++++++++++++++++++++++++++++++ 1 file changed, 140 insertions(+) create mode 100644 utils/loggers/wandb/README.md diff --git a/utils/loggers/wandb/README.md b/utils/loggers/wandb/README.md new file mode 100644 index 000000000000..8616ea2b6945 --- /dev/null +++ b/utils/loggers/wandb/README.md @@ -0,0 +1,140 @@ +📚 This guide explains how to use **Weights & Biases** (W&B) with YOLOv5 🚀. + * [About Weights & Biases](#about-weights-&-biases) + * [First-Time Setup](#first-time-setup) + * [Viewing runs](#viewing-runs) + * [Advanced Usage: Dataset Versioning and Evaluation](#advanced-usage) + * [Reports: Share your work with the world!](#reports) + +## About Weights & Biases +Think of [W&B](https://wandb.ai/site?utm_campaign=repo_yolo_wandbtutorial) like GitHub for machine learning models. With a few lines of code, save everything you need to debug, compare and reproduce your models — architecture, hyperparameters, git commits, model weights, GPU usage, and even datasets and predictions. + + Used by top researchers including teams at OpenAI, Lyft, Github, and MILA, W&B is part of the new standard of best practices for machine learning. How W&B can help you optimize your machine learning workflows: + + * [Debug](https://wandb.ai/wandb/getting-started/reports/Visualize-Debug-Machine-Learning-Models--VmlldzoyNzY5MDk#Free-2) model performance in real time + * [GPU usage](https://wandb.ai/wandb/getting-started/reports/Visualize-Debug-Machine-Learning-Models--VmlldzoyNzY5MDk#System-4), visualized automatically + * [Custom charts](https://wandb.ai/wandb/customizable-charts/reports/Powerful-Custom-Charts-To-Debug-Model-Peformance--VmlldzoyNzY4ODI) for powerful, extensible visualization + * [Share insights](https://wandb.ai/wandb/getting-started/reports/Visualize-Debug-Machine-Learning-Models--VmlldzoyNzY5MDk#Share-8) interactively with collaborators + * [Optimize hyperparameters](https://docs.wandb.com/sweeps) efficiently + * [Track](https://docs.wandb.com/artifacts) datasets, pipelines, and production models + + ## First-Time Setup +
+ Toggle Details +When you first train, W&B will prompt you to create a new account and will generate an **API key** for you. If you are an existing user you can retrieve your key from https://wandb.ai/authorize. This key is used to tell W&B where to log your data. You only need to supply your key once, and then it is remembered on the same device. + + W&B will create a cloud **project** (default is 'YOLOv5') for your training runs, and each new training run will be provided a unique run **name** within that project as project/name. You can also manually set your project and run name as: + + ```shell + $ python train.py --project ... --name ... + ``` + + +
+ +## Viewing Runs +
+ Toggle Details + Run information streams from your environment to the W&B cloud console as you train. This allows you to monitor and even cancel runs in realtime . All important information is logged: + + * Training & Validation losses + * Metrics: Precision, Recall, mAP@0.5, mAP@0.5:0.95 + * Learning Rate over time + * A bounding box debugging panel, showing the training progress over time + * GPU: Type, **GPU Utilization**, power, temperature, **CUDA memory usage** + * System: Disk I/0, CPU utilization, RAM memory usage + * Your trained model as W&B Artifact + * Environment: OS and Python types, Git repository and state, **training command** + + +
+ +## Advanced Usage +You can leverage W&B artifacts and Tables integration to easily visualize and manage your datasets, models and training evaluations. Here are some quick examples to get you started. +
+

1. Visualize and Version Datasets

+ Log, visualize, dynamically query, and understand your data with W&B Tables. You can use the following command to log your dataset as a W&B Table. This will generate a {dataset}_wandb.yaml file which can be used to train from dataset artifact. +
+ Usage + Code $ python utils/logger/wandb/log_dataset.py --project ... --name ... --data .. + + ![Screenshot (64)](https://user-images.githubusercontent.com/15766192/128486078-d8433890-98a3-4d12-8986-b6c0e3fc64b9.png) +
+ +

2: Train and Log Evaluation simultaneousy

+ This is an extension of the previous section, but it'll also training after uploading the dataset. This also evaluation Table + Evaluation table compares your predictions and ground truths across the validation set for each epoch. It uses the references to the already uploaded datasets, + so no images will be uploaded from your system more than once. +
+ Usage + Code $ python utils/logger/wandb/log_dataset.py --data .. --upload_data + +![Screenshot (72)](https://user-images.githubusercontent.com/15766192/128979739-4cf63aeb-a76f-483f-8861-1c0100b938a5.png) +
+ +

3: Train using dataset artifact

+ When you upload a dataset as described in the first section, you get a new config file with an added `_wandb` to its name. This file contains the information that + can be used to train a model directly from the dataset artifact. This also logs evaluation +
+ Usage + Code $ python utils/logger/wandb/log_dataset.py --data {data}_wandb.yaml + +![Screenshot (72)](https://user-images.githubusercontent.com/15766192/128979739-4cf63aeb-a76f-483f-8861-1c0100b938a5.png) +
+ +

4: Save model checkpoints as artifacts

+ To enable saving and versioning checkpoints of your experiment, pass `--save_period n` with the base cammand, where `n` represents checkpoint interval. + You can also log both the dataset and model checkpoints simultaneously. If not passed, only the final model will be logged + +
+ Usage + Code $ python train.py --save_period 1 + +![Screenshot (68)](https://user-images.githubusercontent.com/15766192/128726138-ec6c1f60-639d-437d-b4ee-3acd9de47ef3.png) +
+ +
+ +

5: Resume runs from checkpoint artifacts.

+Any run can be resumed using artifacts if the --resume argument starts with wandb-artifact:// prefix followed by the run path, i.e, wandb-artifact://username/project/runid . This doesn't require the model checkpoint to be present on the local system. + +
+ Usage + Code $ python train.py --resume wandb-artifact://{run_path} + +![Screenshot (70)](https://user-images.githubusercontent.com/15766192/128728988-4e84b355-6c87-41ae-a591-14aecf45343e.png) +
+ +

6: Resume runs from dataset artifact & checkpoint artifacts.

+ Local dataset or model checkpoints are not required. This can be used to resume runs directly on a different device + The syntax is same as the previous section, but you'll need to lof both the dataset and model checkpoints as artifacts, i.e, set bot --upload_dataset or + train from _wandb.yaml file and set --save_period + +
+ Usage + Code $ python train.py --resume wandb-artifact://{run_path} + +![Screenshot (70)](https://user-images.githubusercontent.com/15766192/128728988-4e84b355-6c87-41ae-a591-14aecf45343e.png) +
+ +
+ + + +

Reports

+ W&B Reports can be created from your saved runs for sharing online. Once a report is created you will receive a link you can use to publically share your results. Here is an example report created from the COCO128 tutorial trainings of all four YOLOv5 models ([link](https://wandb.ai/glenn-jocher/yolov5_tutorial/reports/YOLOv5-COCO128-Tutorial-Results--VmlldzozMDI5OTY)). + + + + ## Environments + YOLOv5 may be run in any of the following up-to-date verified environments (with all dependencies including [CUDA](https://developer.nvidia.com/cuda)/[CUDNN](https://developer.nvidia.com/cudnn), [Python](https://www.python.org/) and [PyTorch](https://pytorch.org/) preinstalled): + + * **Google Colab and Kaggle** notebooks with free GPU: [![Open In Colab](https://camo.githubusercontent.com/84f0493939e0c4de4e6dbe113251b4bfb5353e57134ffd9fcab6b8714514d4d1/68747470733a2f2f636f6c61622e72657365617263682e676f6f676c652e636f6d2f6173736574732f636f6c61622d62616467652e737667)](https://colab.research.google.com/github/ultralytics/yolov5/blob/master/tutorial.ipynb) [![Open In Kaggle](https://camo.githubusercontent.com/a08ca511178e691ace596a95d334f73cf4ce06e83a5c4a5169b8bb68cac27bef/68747470733a2f2f6b6167676c652e636f6d2f7374617469632f696d616765732f6f70656e2d696e2d6b6167676c652e737667)](https://www.kaggle.com/ultralytics/yolov5) + * **Google Cloud** Deep Learning VM. See [GCP Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/GCP-Quickstart) + * **Amazon** Deep Learning AMI. See [AWS Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/AWS-Quickstart) + * **Docker Image**. See [Docker Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/Docker-Quickstart) [![Docker Pulls](https://camo.githubusercontent.com/280faedaf431e4c0c24fdb30ec00a66d627404e5c4c498210d3f014dd58c2c7e/68747470733a2f2f696d672e736869656c64732e696f2f646f636b65722f70756c6c732f756c7472616c79746963732f796f6c6f76353f6c6f676f3d646f636b6572)](https://hub.docker.com/r/ultralytics/yolov5) + + ## Status + ![CI CPU testing](https://github.com/ultralytics/yolov5/workflows/CI%20CPU%20testing/badge.svg) + + If this badge is green, all [YOLOv5 GitHub Actions](https://github.com/ultralytics/yolov5/actions) Continuous Integration (CI) tests are currently passing. CI tests verify correct operation of YOLOv5 training ([train.py](https://github.com/ultralytics/yolov5/blob/master/train.py)), validation ([val.py](https://github.com/ultralytics/yolov5/blob/master/val.py)), inference ([detect.py](https://github.com/ultralytics/yolov5/blob/master/detect.py)) and export ([export.py](https://github.com/ultralytics/yolov5/blob/master/export.py)) on MacOS, Windows, and Ubuntu every 24 hours and on every commit. + From a4e4553040503b11df9283fda666736e9c57dd87 Mon Sep 17 00:00:00 2001 From: Ayush Chaurasia Date: Thu, 12 Aug 2021 01:26:25 +0530 Subject: [PATCH 0475/1976] W&B: Fix for 4360 (#4388) * Improve docstrings and run names * default wandb login prompt with timeout * return key * Update api_key check logic * Properly support zipped dataset feature * update docstring * Revert tuorial change * extend changes to log_dataset * add run name * bug fix * bug fix * Update comment * fix import check * remove unused import * Hardcore .yaml file extension * reduce code * Reformat using pycharm * Remove redundant try catch * More refactoring and bug fixes * retry * Reformat using pycharm * respect LOGGERS include list * Fix * fix Co-authored-by: Glenn Jocher --- utils/loggers/wandb/wandb_utils.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/utils/loggers/wandb/wandb_utils.py b/utils/loggers/wandb/wandb_utils.py index 66fa8f85ec4e..3f2684a7f3e3 100644 --- a/utils/loggers/wandb/wandb_utils.py +++ b/utils/loggers/wandb/wandb_utils.py @@ -157,6 +157,8 @@ def __init__(self, opt, run_id, job_type='Training'): self.data_dict = data_dict else: # Local .yaml dataset file or .zip file self.data_dict = check_dataset(opt.data) + else: + self.data_dict = check_dataset(opt.data) self.setup_training(opt) if not self.wandb_artifact_data_dict: @@ -505,4 +507,4 @@ def all_logging_disabled(highest_level=logging.CRITICAL): try: yield finally: - logging.disable(previous_level) + logging.disable(previous_level) \ No newline at end of file From 3e7c59ad3bf5414d4b2a26e018f397e27a51c6f1 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 11 Aug 2021 23:40:47 +0200 Subject: [PATCH 0476/1976] Fix rename `utils.google_utils` to `utils.downloads` (#4393) --- data/scripts/download_weights.sh | 2 +- utils/downloads.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/data/scripts/download_weights.sh b/data/scripts/download_weights.sh index 013036978c07..a576c956d008 100755 --- a/data/scripts/download_weights.sh +++ b/data/scripts/download_weights.sh @@ -9,7 +9,7 @@ # └── ... python - < Date: Fri, 13 Aug 2021 13:22:13 +0200 Subject: [PATCH 0477/1976] Simplify ONNX inference command (#4405) --- export.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/export.py b/export.py index cec85958b4a9..db5a6918845c 100644 --- a/export.py +++ b/export.py @@ -76,7 +76,7 @@ def export_onnx(model, img, file, opset, train, dynamic, simplify): except Exception as e: print(f'{prefix} simplifier failure: {e}') print(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') - print(f"{prefix} run --dynamic ONNX model inference with detect.py: 'python detect.py --weights {f}'") + print(f"{prefix} run --dynamic ONNX model inference with: 'python detect.py --weights {f}'") except Exception as e: print(f'{prefix} export failure: {e}') From d9f23ed6d65e985c07e9ef0ec77d476dd14e2b26 Mon Sep 17 00:00:00 2001 From: Ahmad Mustafa Anis <47111429+ahmadmustafaanis@users.noreply.github.com> Date: Fri, 13 Aug 2021 16:25:00 +0500 Subject: [PATCH 0478/1976] No cache option for reading datasets (#4376) * no cache option * no cache option * bit change * changed to 0,1 instead of True False * Update train.py * Update datasets.py Co-authored-by: Glenn Jocher From e086347377923076fb469a401b65980b98cd871b Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 14 Aug 2021 12:50:27 +0200 Subject: [PATCH 0479/1976] Update plots.py (#4407) --- utils/plots.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/plots.py b/utils/plots.py index ef850ee2f26d..7db527e14924 100644 --- a/utils/plots.py +++ b/utils/plots.py @@ -79,7 +79,7 @@ def plot_one_box(x, im, color=(128, 128, 128), label=None, line_thickness=3): cv2.putText(im, label, (c1[0], c1[1] - 2), 0, tl / 3, [225, 255, 255], thickness=tf, lineType=cv2.LINE_AA) -def plot_one_box_PIL(box, im, color=(128, 128, 128), label=None, line_thickness=None): +def plot_one_box_PIL(box, im, color=(128, 128, 128), label=None, line_thickness=3): # Plots one bounding box on image 'im' using PIL im = Image.fromarray(im) draw = ImageDraw.Draw(im) From 4e8c81a368d154fed3f27b16a728b6467ff60c6a Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 14 Aug 2021 12:55:22 +0200 Subject: [PATCH 0480/1976] Add `yolov5s-ghost.yaml` (#4412) * Add yolov5s-ghost.yaml * Finish C3Ghost * Add C3Ghost to list * Add C3Ghost to number of repeats if statement * Fixes * Cleanup --- models/common.py | 36 +++++++++++++++++++++++++++ models/experimental.py | 28 --------------------- models/hub/yolov5s-ghost.yaml | 46 +++++++++++++++++++++++++++++++++++ models/yolo.py | 4 +-- 4 files changed, 84 insertions(+), 30 deletions(-) create mode 100644 models/hub/yolov5s-ghost.yaml diff --git a/models/common.py b/models/common.py index 2d24672a6b44..5ef3996007a2 100644 --- a/models/common.py +++ b/models/common.py @@ -149,6 +149,14 @@ def __init__(self, c1, c2, k=(5, 9, 13), n=1, shortcut=True, g=1, e=0.5): self.m = SPP(c_, c_, k) +class C3Ghost(C3): + # C3 module with GhostBottleneck() + def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): + super().__init__(c1, c2, n, shortcut, g, e) + c_ = int(c2 * e) # hidden channels + self.m = nn.Sequential(*[GhostBottleneck(c_, c_) for _ in range(n)]) + + class SPP(nn.Module): # Spatial pyramid pooling layer used in YOLOv3-SPP def __init__(self, c1, c2, k=(5, 9, 13)): @@ -177,6 +185,34 @@ def forward(self, x): # x(b,c,w,h) -> y(b,4c,w/2,h/2) # return self.conv(self.contract(x)) +class GhostConv(nn.Module): + # Ghost Convolution https://github.com/huawei-noah/ghostnet + def __init__(self, c1, c2, k=1, s=1, g=1, act=True): # ch_in, ch_out, kernel, stride, groups + super().__init__() + c_ = c2 // 2 # hidden channels + self.cv1 = Conv(c1, c_, k, s, None, g, act) + self.cv2 = Conv(c_, c_, 5, 1, None, c_, act) + + def forward(self, x): + y = self.cv1(x) + return torch.cat([y, self.cv2(y)], 1) + + +class GhostBottleneck(nn.Module): + # Ghost Bottleneck https://github.com/huawei-noah/ghostnet + def __init__(self, c1, c2, k=3, s=1): # ch_in, ch_out, kernel, stride + super().__init__() + c_ = c2 // 2 + self.conv = nn.Sequential(GhostConv(c1, c_, 1, 1), # pw + DWConv(c_, c_, k, s, act=False) if s == 2 else nn.Identity(), # dw + GhostConv(c_, c2, 1, 1, act=False)) # pw-linear + self.shortcut = nn.Sequential(DWConv(c1, c1, k, s, act=False), + Conv(c1, c2, 1, 1, act=False)) if s == 2 else nn.Identity() + + def forward(self, x): + return self.conv(x) + self.shortcut(x) + + class Contract(nn.Module): # Contract width-height into channels, i.e. x(1,64,80,80) to x(1,256,40,40) def __init__(self, gain=2): diff --git a/models/experimental.py b/models/experimental.py index 581c7b14b61e..5c690cce3d99 100644 --- a/models/experimental.py +++ b/models/experimental.py @@ -43,34 +43,6 @@ def forward(self, x): return y -class GhostConv(nn.Module): - # Ghost Convolution https://github.com/huawei-noah/ghostnet - def __init__(self, c1, c2, k=1, s=1, g=1, act=True): # ch_in, ch_out, kernel, stride, groups - super().__init__() - c_ = c2 // 2 # hidden channels - self.cv1 = Conv(c1, c_, k, s, None, g, act) - self.cv2 = Conv(c_, c_, 5, 1, None, c_, act) - - def forward(self, x): - y = self.cv1(x) - return torch.cat([y, self.cv2(y)], 1) - - -class GhostBottleneck(nn.Module): - # Ghost Bottleneck https://github.com/huawei-noah/ghostnet - def __init__(self, c1, c2, k=3, s=1): # ch_in, ch_out, kernel, stride - super().__init__() - c_ = c2 // 2 - self.conv = nn.Sequential(GhostConv(c1, c_, 1, 1), # pw - DWConv(c_, c_, k, s, act=False) if s == 2 else nn.Identity(), # dw - GhostConv(c_, c2, 1, 1, act=False)) # pw-linear - self.shortcut = nn.Sequential(DWConv(c1, c1, k, s, act=False), - Conv(c1, c2, 1, 1, act=False)) if s == 2 else nn.Identity() - - def forward(self, x): - return self.conv(x) + self.shortcut(x) - - class MixConv2d(nn.Module): # Mixed Depth-wise Conv https://arxiv.org/abs/1907.09595 def __init__(self, c1, c2, k=(1, 3), s=1, equal_ch=True): diff --git a/models/hub/yolov5s-ghost.yaml b/models/hub/yolov5s-ghost.yaml new file mode 100644 index 000000000000..d99d56d26e85 --- /dev/null +++ b/models/hub/yolov5s-ghost.yaml @@ -0,0 +1,46 @@ +# Parameters +nc: 80 # number of classes +depth_multiple: 0.33 # model depth multiple +width_multiple: 0.50 # layer channel multiple +anchors: + - [10,13, 16,30, 33,23] # P3/8 + - [30,61, 62,45, 59,119] # P4/16 + - [116,90, 156,198, 373,326] # P5/32 + +# YOLOv5 backbone +backbone: + # [from, number, module, args] + [[-1, 1, Focus, [64, 3]], # 0-P1/2 + [-1, 1, GhostConv, [128, 3, 2]], # 1-P2/4 + [-1, 3, C3Ghost, [128]], + [-1, 1, GhostConv, [256, 3, 2]], # 3-P3/8 + [-1, 9, C3Ghost, [256]], + [-1, 1, GhostConv, [512, 3, 2]], # 5-P4/16 + [-1, 9, C3Ghost, [512]], + [-1, 1, GhostConv, [1024, 3, 2]], # 7-P5/32 + [-1, 1, SPP, [1024, [5, 9, 13]]], + [-1, 3, C3Ghost, [1024, False]], # 9 + ] + +# YOLOv5 head +head: + [[-1, 1, GhostConv, [512, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 6], 1, Concat, [1]], # cat backbone P4 + [-1, 3, C3Ghost, [512, False]], # 13 + + [-1, 1, GhostConv, [256, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 4], 1, Concat, [1]], # cat backbone P3 + [-1, 3, C3Ghost, [256, False]], # 17 (P3/8-small) + + [-1, 1, GhostConv, [256, 3, 2]], + [[-1, 14], 1, Concat, [1]], # cat head P4 + [-1, 3, C3Ghost, [512, False]], # 20 (P4/16-medium) + + [-1, 1, GhostConv, [512, 3, 2]], + [[-1, 10], 1, Concat, [1]], # cat head P5 + [-1, 3, C3Ghost, [1024, False]], # 23 (P5/32-large) + + [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) + ] diff --git a/models/yolo.py b/models/yolo.py index 98e578d20384..88adb71f8fea 100644 --- a/models/yolo.py +++ b/models/yolo.py @@ -236,13 +236,13 @@ def parse_model(d, ch): # model_dict, input_channels(3) n = n_ = max(round(n * gd), 1) if n > 1 else n # depth gain if m in [Conv, GhostConv, Bottleneck, GhostBottleneck, SPP, DWConv, MixConv2d, Focus, CrossConv, BottleneckCSP, - C3, C3TR, C3SPP]: + C3, C3TR, C3SPP, C3Ghost]: c1, c2 = ch[f], args[0] if c2 != no: # if not output c2 = make_divisible(c2 * gw, 8) args = [c1, c2, *args[1:]] - if m in [BottleneckCSP, C3, C3TR]: + if m in [BottleneckCSP, C3, C3TR, C3Ghost]: args.insert(2, n) # number of repeats n = 1 elif m is nn.BatchNorm2d: From 63e09fdc480c9398e7b7acb27083907ed29809de Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 14 Aug 2021 13:47:20 +0200 Subject: [PATCH 0481/1976] Remove `encoding='ascii'` (#4413) * Remove `encoding='ascii'` * Reinstate `encoding='ascii'` in emojis() --- utils/autoanchor.py | 2 +- utils/datasets.py | 2 +- utils/general.py | 4 ++-- utils/loggers/wandb/wandb_utils.py | 6 +++--- 4 files changed, 7 insertions(+), 7 deletions(-) diff --git a/utils/autoanchor.py b/utils/autoanchor.py index 2571fc99ac89..eca1d5be8ebe 100644 --- a/utils/autoanchor.py +++ b/utils/autoanchor.py @@ -104,7 +104,7 @@ def print_results(k): return k if isinstance(dataset, str): # *.yaml file - with open(dataset, encoding='ascii', errors='ignore') as f: + with open(dataset, errors='ignore') as f: data_dict = yaml.safe_load(f) # model dict from utils.datasets import LoadImagesAndLabels dataset = LoadImagesAndLabels(data_dict['train'], augment=True, rect=True) diff --git a/utils/datasets.py b/utils/datasets.py index 1c780cdbac4b..b402723f9c49 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -931,7 +931,7 @@ def hub_ops(f, max_dim=1920): im.save(im_dir / Path(f).name, quality=75) # save zipped, data_dir, yaml_path = unzip(Path(path)) - with open(check_file(yaml_path), encoding='ascii', errors='ignore') as f: + with open(check_file(yaml_path), errors='ignore') as f: data = yaml.safe_load(f) # data dict if zipped: data['path'] = data_dir # TODO: should this be dir.resolve()? diff --git a/utils/general.py b/utils/general.py index 15111b727f33..c70c21f47636 100755 --- a/utils/general.py +++ b/utils/general.py @@ -112,7 +112,7 @@ def is_pip(): def emojis(str=''): # Return platform-dependent emoji-safe version of string - return str.encode().decode('ascii', 'ignore') if platform.system() == 'Windows' else str + return str.encode().decode(encoding='ascii', errors='ignore') if platform.system() == 'Windows' else str def file_size(file): @@ -250,7 +250,7 @@ def check_dataset(data, autodownload=True): # Read yaml (optional) if isinstance(data, (str, Path)): - with open(data, encoding='ascii', errors='ignore') as f: + with open(data, errors='ignore') as f: data = yaml.safe_load(f) # dictionary # Parse yaml diff --git a/utils/loggers/wandb/wandb_utils.py b/utils/loggers/wandb/wandb_utils.py index 3f2684a7f3e3..019aebf094e1 100644 --- a/utils/loggers/wandb/wandb_utils.py +++ b/utils/loggers/wandb/wandb_utils.py @@ -62,7 +62,7 @@ def check_wandb_resume(opt): def process_wandb_config_ddp_mode(opt): - with open(check_file(opt.data), encoding='ascii', errors='ignore') as f: + with open(check_file(opt.data), errors='ignore') as f: data_dict = yaml.safe_load(f) # data dict train_dir, val_dir = None, None if isinstance(data_dict['train'], str) and data_dict['train'].startswith(WANDB_ARTIFACT_PREFIX): @@ -152,7 +152,7 @@ def __init__(self, opt, run_id, job_type='Training'): self.wandb_artifact_data_dict = self.check_and_upload_dataset(opt) elif opt.data.endswith('_wandb.yaml'): # When dataset is W&B artifact - with open(opt.data, encoding='ascii', errors='ignore') as f: + with open(opt.data, errors='ignore') as f: data_dict = yaml.safe_load(f) self.data_dict = data_dict else: # Local .yaml dataset file or .zip file @@ -186,7 +186,7 @@ def check_and_upload_dataset(self, opt): opt.single_cls, 'YOLOv5' if opt.project == 'runs/train' else Path(opt.project).stem) print("Created dataset config file ", config_path) - with open(config_path, encoding='ascii', errors='ignore') as f: + with open(config_path, errors='ignore') as f: wandb_data_dict = yaml.safe_load(f) return wandb_data_dict From 2da4e7acf7510dd2a249120c484c6e5157459b3e Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 14 Aug 2021 17:44:15 +0200 Subject: [PATCH 0482/1976] Merge PIL and OpenCV in `plot_one_box(use_pil=False)` (#4416) * Merge PIL and OpenCV box plotting functions * Add ASCII check to plot_one_box * Cleanup * Cleanup2 --- detect.py | 2 +- models/common.py | 2 +- utils/general.py | 7 +++++- utils/plots.py | 56 +++++++++++++++++++++++------------------------- 4 files changed, 35 insertions(+), 32 deletions(-) diff --git a/detect.py b/detect.py index 88d1d9ca3800..db0c545b0635 100644 --- a/detect.py +++ b/detect.py @@ -156,7 +156,7 @@ def run(weights='yolov5s.pt', # model.pt path(s) if save_img or save_crop or view_img: # Add bbox to image c = int(cls) # integer class label = None if hide_labels else (names[c] if hide_conf else f'{names[c]} {conf:.2f}') - plot_one_box(xyxy, im0, label=label, color=colors(c, True), line_thickness=line_thickness) + im0 = plot_one_box(xyxy, im0, label=label, color=colors(c, True), line_width=line_thickness) if save_crop: save_one_box(xyxy, imc, file=save_dir / 'crops' / names[c] / f'{p.stem}.jpg', BGR=True) diff --git a/models/common.py b/models/common.py index 5ef3996007a2..35790804ca52 100644 --- a/models/common.py +++ b/models/common.py @@ -354,7 +354,7 @@ def display(self, pprint=False, show=False, save=False, crop=False, render=False if crop: save_one_box(box, im, file=save_dir / 'crops' / self.names[int(cls)] / self.files[i]) else: # all others - plot_one_box(box, im, label=label, color=colors(cls)) + im = plot_one_box(box, im, label=label, color=colors(cls)) else: str += '(no detections)' diff --git a/utils/general.py b/utils/general.py index c70c21f47636..4fc32f5691fc 100755 --- a/utils/general.py +++ b/utils/general.py @@ -110,9 +110,14 @@ def is_pip(): return 'site-packages' in Path(__file__).absolute().parts +def is_ascii(str=''): + # Is string composed of all ASCII (no UTF) characters? + return len(str.encode().decode('ascii', 'ignore')) == len(str) + + def emojis(str=''): # Return platform-dependent emoji-safe version of string - return str.encode().decode(encoding='ascii', errors='ignore') if platform.system() == 'Windows' else str + return str.encode().decode('ascii', 'ignore') if platform.system() == 'Windows' else str def file_size(file): diff --git a/utils/plots.py b/utils/plots.py index 7db527e14924..71e90b00241d 100644 --- a/utils/plots.py +++ b/utils/plots.py @@ -1,20 +1,19 @@ # Plotting utils +import math from copy import copy from pathlib import Path import cv2 -import math import matplotlib import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sn import torch -import yaml from PIL import Image, ImageDraw, ImageFont -from utils.general import xywh2xyxy, xyxy2xywh +from utils.general import is_ascii, xyxy2xywh, xywh2xyxy from utils.metrics import fitness # Settings @@ -65,32 +64,31 @@ def butter_lowpass(cutoff, fs, order): return filtfilt(b, a, data) # forward-backward filter -def plot_one_box(x, im, color=(128, 128, 128), label=None, line_thickness=3): - # Plots one bounding box on image 'im' using OpenCV +def plot_one_box(box, im, color=(128, 128, 128), txt_color=(255, 255, 255), label=None, line_width=3, use_pil=False): + # Plots one xyxy box on image im with label assert im.data.contiguous, 'Image not contiguous. Apply np.ascontiguousarray(im) to plot_on_box() input image.' - tl = line_thickness or round(0.002 * (im.shape[0] + im.shape[1]) / 2) + 1 # line/font thickness - c1, c2 = (int(x[0]), int(x[1])), (int(x[2]), int(x[3])) - cv2.rectangle(im, c1, c2, color, thickness=tl, lineType=cv2.LINE_AA) - if label: - tf = max(tl - 1, 1) # font thickness - t_size = cv2.getTextSize(label, 0, fontScale=tl / 3, thickness=tf)[0] - c2 = c1[0] + t_size[0], c1[1] - t_size[1] - 3 - cv2.rectangle(im, c1, c2, color, -1, cv2.LINE_AA) # filled - cv2.putText(im, label, (c1[0], c1[1] - 2), 0, tl / 3, [225, 255, 255], thickness=tf, lineType=cv2.LINE_AA) - - -def plot_one_box_PIL(box, im, color=(128, 128, 128), label=None, line_thickness=3): - # Plots one bounding box on image 'im' using PIL - im = Image.fromarray(im) - draw = ImageDraw.Draw(im) - line_thickness = line_thickness or max(int(min(im.size) / 200), 2) - draw.rectangle(box, width=line_thickness, outline=color) # plot - if label: - font = ImageFont.truetype("Arial.ttf", size=max(round(max(im.size) / 40), 12)) - txt_width, txt_height = font.getsize(label) - draw.rectangle([box[0], box[1] - txt_height + 4, box[0] + txt_width, box[1]], fill=color) - draw.text((box[0], box[1] - txt_height + 1), label, fill=(255, 255, 255), font=font) - return np.asarray(im) + lw = line_width or max(int(min(im.size) / 200), 2) # line width + + if use_pil or not is_ascii(label): # use PIL + im = Image.fromarray(im) + draw = ImageDraw.Draw(im) + draw.rectangle(box, width=lw + 1, outline=color) # plot + if label: + font = ImageFont.truetype("Arial.ttf", size=max(round(max(im.size) / 40), 12)) + txt_width, txt_height = font.getsize(label) + draw.rectangle([box[0], box[1] - txt_height + 4, box[0] + txt_width, box[1]], fill=color) + draw.text((box[0], box[1] - txt_height + 1), label, fill=txt_color, font=font) + return np.asarray(im) + else: # use OpenCV + c1, c2 = (int(box[0]), int(box[1])), (int(box[2]), int(box[3])) + cv2.rectangle(im, c1, c2, color, thickness=lw, lineType=cv2.LINE_AA) + if label: + tf = max(lw - 1, 1) # font thickness + txt_width, txt_height = cv2.getTextSize(label, 0, fontScale=lw / 3, thickness=tf)[0] + c2 = c1[0] + txt_width, c1[1] - txt_height - 3 + cv2.rectangle(im, c1, c2, color, -1, cv2.LINE_AA) # filled + cv2.putText(im, label, (c1[0], c1[1] - 2), 0, lw / 3, txt_color, thickness=tf, lineType=cv2.LINE_AA) + return im def plot_wh_methods(): # from utils.plots import *; plot_wh_methods() @@ -180,7 +178,7 @@ def plot_images(images, targets, paths=None, fname='images.jpg', names=None, max cls = names[cls] if names else cls if labels or conf[j] > 0.25: # 0.25 conf thresh label = '%s' % cls if labels else '%s %.1f' % (cls, conf[j]) - plot_one_box(box, mosaic, label=label, color=color, line_thickness=tl) + mosaic = plot_one_box(box, mosaic, label=label, color=color, line_width=tl) # Draw image filename labels if paths: From bb0aed1ba6a62b940df902de7cc6741603bbe82d Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 14 Aug 2021 21:12:05 +0200 Subject: [PATCH 0483/1976] Created using Colaboratory --- tutorial.ipynb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tutorial.ipynb b/tutorial.ipynb index eaa886509a66..7cef01b6b651 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -545,7 +545,7 @@ "\n", "\n", "\n", - "This is the **official YOLOv5 🚀 notebook** authored by **Ultralytics**, and is freely available for redistribution under the [GPL-3.0 license](https://choosealicense.com/licenses/gpl-3.0/). \n", + "This is the **official YOLOv5 🚀 notebook** by **Ultralytics**, and is freely available for redistribution under the [GPL-3.0 license](https://choosealicense.com/licenses/gpl-3.0/). \n", "For more information please visit https://github.com/ultralytics/yolov5 and https://ultralytics.com. Thank you!" ] }, From 24bea5e4b7ad369753f45e93b736d9205c37d20a Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 14 Aug 2021 21:17:51 +0200 Subject: [PATCH 0484/1976] Standardize headers and docstrings (#4417) * Implement new headers * Reformat 1 * Reformat 2 * Reformat 3 - math * Reformat 4 - yaml --- .github/workflows/ci-testing.yml | 12 +++++++----- .github/workflows/codeql-analysis.yml | 2 +- .github/workflows/greetings.yml | 4 +++- .github/workflows/rebase.yml | 2 +- .github/workflows/stale.yml | 2 ++ Dockerfile | 2 ++ data/Argoverse.yaml | 2 +- data/GlobalWheat2020.yaml | 2 +- data/Objects365.yaml | 2 +- data/SKU-110K.yaml | 2 +- data/VOC.yaml | 2 +- data/VisDrone.yaml | 2 +- data/coco.yaml | 2 +- data/coco128.yaml | 2 +- data/hyps/hyp.finetune.yaml | 2 +- data/hyps/hyp.finetune_objects365.yaml | 2 ++ data/hyps/hyp.scratch-p6.yaml | 2 +- data/hyps/hyp.scratch.yaml | 2 +- data/scripts/download_weights.sh | 2 +- data/scripts/get_coco.sh | 2 +- data/scripts/get_coco128.sh | 2 +- data/xView.yaml | 2 +- detect.py | 4 +++- export.py | 4 +++- hubconf.py | 4 +++- models/common.py | 7 +++++-- models/experimental.py | 7 +++++-- models/hub/anchors.yaml | 3 ++- models/hub/yolov3-spp.yaml | 2 ++ models/hub/yolov3-tiny.yaml | 2 ++ models/hub/yolov3.yaml | 2 ++ models/hub/yolov5-bifpn.yaml | 2 ++ models/hub/yolov5-fpn.yaml | 2 ++ models/hub/yolov5-p2.yaml | 2 ++ models/hub/yolov5-p6.yaml | 2 ++ models/hub/yolov5-p7.yaml | 2 ++ models/hub/yolov5-panet.yaml | 2 ++ models/hub/yolov5l6.yaml | 2 ++ models/hub/yolov5m6.yaml | 2 ++ models/hub/yolov5s-ghost.yaml | 2 ++ models/hub/yolov5s-transformer.yaml | 2 ++ models/hub/yolov5s6.yaml | 2 ++ models/hub/yolov5x6.yaml | 2 ++ models/yolo.py | 4 +++- models/yolov5l.yaml | 2 ++ models/yolov5m.yaml | 2 ++ models/yolov5s.yaml | 2 ++ models/yolov5x.yaml | 2 ++ train.py | 6 ++++-- utils/activations.py | 5 ++++- utils/augmentations.py | 7 +++++-- utils/autoanchor.py | 5 ++++- utils/callbacks.py | 6 +++++- utils/datasets.py | 5 ++++- utils/downloads.py | 5 ++++- utils/flask_rest_api/README.md | 9 +++++++-- utils/general.py | 7 +++++-- utils/loggers/__init__.py | 6 +++++- utils/loggers/wandb/wandb_utils.py | 2 +- utils/loss.py | 5 ++++- utils/metrics.py | 7 +++++-- utils/plots.py | 5 ++++- utils/torch_utils.py | 7 +++++-- val.py | 4 +++- 64 files changed, 164 insertions(+), 54 deletions(-) diff --git a/.github/workflows/ci-testing.yml b/.github/workflows/ci-testing.yml index a7964ea01d5d..02e8f74bf56c 100644 --- a/.github/workflows/ci-testing.yml +++ b/.github/workflows/ci-testing.yml @@ -1,11 +1,13 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license + name: CI CPU testing on: # https://help.github.com/en/actions/reference/events-that-trigger-workflows push: - branches: [ master, develop ] + branches: [master, develop] pull_request: # The branches below must be a subset of the branches above - branches: [ master, develop ] + branches: [master, develop] jobs: cpu-tests: @@ -14,9 +16,9 @@ jobs: strategy: fail-fast: false matrix: - os: [ ubuntu-latest, macos-latest, windows-latest ] - python-version: [ 3.8 ] - model: [ 'yolov5s' ] # models to test + os: [ubuntu-latest, macos-latest, windows-latest] + python-version: [3.8] + model: ['yolov5s'] # models to test # Timeout: https://stackoverflow.com/a/59076067/4521646 timeout-minutes: 50 diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index 458465d90eef..2305ea07e902 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -15,7 +15,7 @@ jobs: strategy: fail-fast: false matrix: - language: [ 'python' ] + language: ['python'] # CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python' ] # Learn more: # https://docs.github.com/en/free-pro-team@latest/github/finding-security-vulnerabilities-and-errors-in-your-code/configuring-code-scanning#changing-the-languages-that-are-analyzed diff --git a/.github/workflows/greetings.yml b/.github/workflows/greetings.yml index ddd739ea5769..c557e77f3b70 100644 --- a/.github/workflows/greetings.yml +++ b/.github/workflows/greetings.yml @@ -1,6 +1,8 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license + name: Greetings -on: [ pull_request_target, issues ] +on: [pull_request_target, issues] jobs: greeting: diff --git a/.github/workflows/rebase.yml b/.github/workflows/rebase.yml index 38e14578216c..e86c57744b84 100644 --- a/.github/workflows/rebase.yml +++ b/.github/workflows/rebase.yml @@ -3,7 +3,7 @@ name: Automatic Rebase on: issue_comment: - types: [ created ] + types: [created] jobs: rebase: diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml index d620e540706a..c81c0ca18c2f 100644 --- a/.github/workflows/stale.yml +++ b/.github/workflows/stale.yml @@ -1,3 +1,5 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license + name: Close stale issues on: schedule: diff --git a/Dockerfile b/Dockerfile index e22c1106f23d..858b22bc6383 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,3 +1,5 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license + # Start FROM Nvidia PyTorch image https://ngc.nvidia.com/catalog/containers/nvidia:pytorch FROM nvcr.io/nvidia/pytorch:21.05-py3 diff --git a/data/Argoverse.yaml b/data/Argoverse.yaml index 3bf91ce7d504..1625dd1b9d2b 100644 --- a/data/Argoverse.yaml +++ b/data/Argoverse.yaml @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics https://ultralytics.com, licensed under GNU GPL v3.0 +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license # Argoverse-HD dataset (ring-front-center camera) http://www.cs.cmu.edu/~mengtial/proj/streaming/ # Example usage: python train.py --data Argoverse.yaml # parent diff --git a/data/GlobalWheat2020.yaml b/data/GlobalWheat2020.yaml index de9c7837cf57..75b3bfdff43e 100644 --- a/data/GlobalWheat2020.yaml +++ b/data/GlobalWheat2020.yaml @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics https://ultralytics.com, licensed under GNU GPL v3.0 +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license # Global Wheat 2020 dataset http://www.global-wheat.com/ # Example usage: python train.py --data GlobalWheat2020.yaml # parent diff --git a/data/Objects365.yaml b/data/Objects365.yaml index 457b9fd9bf69..dc5bfbc7faa4 100644 --- a/data/Objects365.yaml +++ b/data/Objects365.yaml @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics https://ultralytics.com, licensed under GNU GPL v3.0 +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license # Objects365 dataset https://www.objects365.org/ # Example usage: python train.py --data Objects365.yaml # parent diff --git a/data/SKU-110K.yaml b/data/SKU-110K.yaml index c85fa81d2e03..653485e2079a 100644 --- a/data/SKU-110K.yaml +++ b/data/SKU-110K.yaml @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics https://ultralytics.com, licensed under GNU GPL v3.0 +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license # SKU-110K retail items dataset https://github.com/eg4000/SKU110K_CVPR19 # Example usage: python train.py --data SKU-110K.yaml # parent diff --git a/data/VOC.yaml b/data/VOC.yaml index e59fb6afd2fd..8dbaacf9c290 100644 --- a/data/VOC.yaml +++ b/data/VOC.yaml @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics https://ultralytics.com, licensed under GNU GPL v3.0 +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license # PASCAL VOC dataset http://host.robots.ox.ac.uk/pascal/VOC # Example usage: python train.py --data VOC.yaml # parent diff --git a/data/VisDrone.yaml b/data/VisDrone.yaml index fe6cb9199ce1..7753da98269e 100644 --- a/data/VisDrone.yaml +++ b/data/VisDrone.yaml @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics https://ultralytics.com, licensed under GNU GPL v3.0 +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license # VisDrone2019-DET dataset https://github.com/VisDrone/VisDrone-Dataset # Example usage: python train.py --data VisDrone.yaml # parent diff --git a/data/coco.yaml b/data/coco.yaml index acf8e84f3e21..2ccc6478b620 100644 --- a/data/coco.yaml +++ b/data/coco.yaml @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics https://ultralytics.com, licensed under GNU GPL v3.0 +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license # COCO 2017 dataset http://cocodataset.org # Example usage: python train.py --data coco.yaml # parent diff --git a/data/coco128.yaml b/data/coco128.yaml index eda39dcdaa8d..70cf52c397af 100644 --- a/data/coco128.yaml +++ b/data/coco128.yaml @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics https://ultralytics.com, licensed under GNU GPL v3.0 +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license # COCO128 dataset https://www.kaggle.com/ultralytics/coco128 (first 128 images from COCO train2017) # Example usage: python train.py --data coco128.yaml # parent diff --git a/data/hyps/hyp.finetune.yaml b/data/hyps/hyp.finetune.yaml index 237cd5bc19a1..b89d66ff8dee 100644 --- a/data/hyps/hyp.finetune.yaml +++ b/data/hyps/hyp.finetune.yaml @@ -1,8 +1,8 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license # Hyperparameters for VOC finetuning # python train.py --batch 64 --weights yolov5m.pt --data VOC.yaml --img 512 --epochs 50 # See tutorials for hyperparameter evolution https://github.com/ultralytics/yolov5#tutorials - # Hyperparameter Evolution Results # Generations: 306 # P R mAP.5 mAP.5:.95 box obj cls diff --git a/data/hyps/hyp.finetune_objects365.yaml b/data/hyps/hyp.finetune_objects365.yaml index 435fa7a45119..073720a65be5 100644 --- a/data/hyps/hyp.finetune_objects365.yaml +++ b/data/hyps/hyp.finetune_objects365.yaml @@ -1,3 +1,5 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license + lr0: 0.00258 lrf: 0.17 momentum: 0.779 diff --git a/data/hyps/hyp.scratch-p6.yaml b/data/hyps/hyp.scratch-p6.yaml index fc1d8ebe0876..7aad818e5b16 100644 --- a/data/hyps/hyp.scratch-p6.yaml +++ b/data/hyps/hyp.scratch-p6.yaml @@ -1,8 +1,8 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license # Hyperparameters for COCO training from scratch # python train.py --batch 32 --cfg yolov5m6.yaml --weights '' --data coco.yaml --img 1280 --epochs 300 # See tutorials for hyperparameter evolution https://github.com/ultralytics/yolov5#tutorials - lr0: 0.01 # initial learning rate (SGD=1E-2, Adam=1E-3) lrf: 0.2 # final OneCycleLR learning rate (lr0 * lrf) momentum: 0.937 # SGD momentum/Adam beta1 diff --git a/data/hyps/hyp.scratch.yaml b/data/hyps/hyp.scratch.yaml index b2cf2e32c638..77405a537067 100644 --- a/data/hyps/hyp.scratch.yaml +++ b/data/hyps/hyp.scratch.yaml @@ -1,8 +1,8 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license # Hyperparameters for COCO training from scratch # python train.py --batch 40 --cfg yolov5m.yaml --weights '' --data coco.yaml --img 640 --epochs 300 # See tutorials for hyperparameter evolution https://github.com/ultralytics/yolov5#tutorials - lr0: 0.01 # initial learning rate (SGD=1E-2, Adam=1E-3) lrf: 0.2 # final OneCycleLR learning rate (lr0 * lrf) momentum: 0.937 # SGD momentum/Adam beta1 diff --git a/data/scripts/download_weights.sh b/data/scripts/download_weights.sh index a576c956d008..b4b0ccd7857e 100755 --- a/data/scripts/download_weights.sh +++ b/data/scripts/download_weights.sh @@ -1,5 +1,5 @@ #!/bin/bash -# YOLOv5 🚀 by Ultralytics https://ultralytics.com, licensed under GNU GPL v3.0 +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license # Download latest models from https://github.com/ultralytics/yolov5/releases # Example usage: bash path/to/download_weights.sh # parent diff --git a/data/scripts/get_coco.sh b/data/scripts/get_coco.sh index f6c075689709..0210c8ebbda4 100755 --- a/data/scripts/get_coco.sh +++ b/data/scripts/get_coco.sh @@ -1,5 +1,5 @@ #!/bin/bash -# YOLOv5 🚀 by Ultralytics https://ultralytics.com, licensed under GNU GPL v3.0 +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license # Download COCO 2017 dataset http://cocodataset.org # Example usage: bash data/scripts/get_coco.sh # parent diff --git a/data/scripts/get_coco128.sh b/data/scripts/get_coco128.sh index 6eb47bfe5595..4238e3634dbb 100644 --- a/data/scripts/get_coco128.sh +++ b/data/scripts/get_coco128.sh @@ -1,5 +1,5 @@ #!/bin/bash -# YOLOv5 🚀 by Ultralytics https://ultralytics.com, licensed under GNU GPL v3.0 +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license # Download COCO128 dataset https://www.kaggle.com/ultralytics/coco128 (first 128 images from COCO train2017) # Example usage: bash data/scripts/get_coco128.sh # parent diff --git a/data/xView.yaml b/data/xView.yaml index e191188da0f0..fabcdb0bdd13 100644 --- a/data/xView.yaml +++ b/data/xView.yaml @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics https://ultralytics.com, licensed under GNU GPL v3.0 +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license # xView 2018 dataset https://challenge.xviewdataset.org # -------- DOWNLOAD DATA MANUALLY from URL above and unzip to 'datasets/xView' before running train command! -------- # Example usage: python train.py --data xView.yaml diff --git a/detect.py b/detect.py index db0c545b0635..49ebbe96c068 100644 --- a/detect.py +++ b/detect.py @@ -1,4 +1,6 @@ -"""Run inference with a YOLOv5 model on images, videos, directories, streams +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +""" +Run inference on images, videos, directories, streams, etc. Usage: $ python path/to/detect.py --source path/to/img.jpg --weights yolov5s.pt --img 640 diff --git a/export.py b/export.py index db5a6918845c..db805cb45e6e 100644 --- a/export.py +++ b/export.py @@ -1,4 +1,6 @@ -"""Export a YOLOv5 *.pt model to TorchScript, ONNX, CoreML formats +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +""" +Export a PyTorch model to TorchScript, ONNX, CoreML formats Usage: $ python path/to/export.py --weights yolov5s.pt --img 640 --batch 1 diff --git a/hubconf.py b/hubconf.py index 93ea84d69dd3..36f3bd86bc11 100644 --- a/hubconf.py +++ b/hubconf.py @@ -1,4 +1,6 @@ -"""YOLOv5 PyTorch Hub models https://pytorch.org/hub/ultralytics_yolov5/ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +""" +PyTorch Hub models https://pytorch.org/hub/ultralytics_yolov5/ Usage: import torch diff --git a/models/common.py b/models/common.py index 35790804ca52..fe4319b0f370 100644 --- a/models/common.py +++ b/models/common.py @@ -1,11 +1,14 @@ -# YOLOv5 common modules +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +""" +Common modules +""" import logging +import math import warnings from copy import copy from pathlib import Path -import math import numpy as np import pandas as pd import requests diff --git a/models/experimental.py b/models/experimental.py index 5c690cce3d99..7dfaf9611bec 100644 --- a/models/experimental.py +++ b/models/experimental.py @@ -1,10 +1,13 @@ -# YOLOv5 experimental modules +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +""" +Experimental modules +""" import numpy as np import torch import torch.nn as nn -from models.common import Conv, DWConv +from models.common import Conv from utils.downloads import attempt_download diff --git a/models/hub/anchors.yaml b/models/hub/anchors.yaml index 57512955ac1f..e4d7beb06e07 100644 --- a/models/hub/anchors.yaml +++ b/models/hub/anchors.yaml @@ -1,4 +1,5 @@ -# Default YOLOv5 anchors for COCO data +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# Default anchors for COCO data # P5 ------------------------------------------------------------------------------------------------------------------- diff --git a/models/hub/yolov3-spp.yaml b/models/hub/yolov3-spp.yaml index ddc0549f50d6..c66982158ce8 100644 --- a/models/hub/yolov3-spp.yaml +++ b/models/hub/yolov3-spp.yaml @@ -1,3 +1,5 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license + # Parameters nc: 80 # number of classes depth_multiple: 1.0 # model depth multiple diff --git a/models/hub/yolov3-tiny.yaml b/models/hub/yolov3-tiny.yaml index 537ad755b166..b28b44315248 100644 --- a/models/hub/yolov3-tiny.yaml +++ b/models/hub/yolov3-tiny.yaml @@ -1,3 +1,5 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license + # Parameters nc: 80 # number of classes depth_multiple: 1.0 # model depth multiple diff --git a/models/hub/yolov3.yaml b/models/hub/yolov3.yaml index 3adfc2c6d2f9..4f4b240e6c36 100644 --- a/models/hub/yolov3.yaml +++ b/models/hub/yolov3.yaml @@ -1,3 +1,5 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license + # Parameters nc: 80 # number of classes depth_multiple: 1.0 # model depth multiple diff --git a/models/hub/yolov5-bifpn.yaml b/models/hub/yolov5-bifpn.yaml index 69f7b5938c58..119aebb1523a 100644 --- a/models/hub/yolov5-bifpn.yaml +++ b/models/hub/yolov5-bifpn.yaml @@ -1,3 +1,5 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license + # Parameters nc: 80 # number of classes depth_multiple: 1.0 # model depth multiple diff --git a/models/hub/yolov5-fpn.yaml b/models/hub/yolov5-fpn.yaml index 217e4ca6ac96..707b2136cee1 100644 --- a/models/hub/yolov5-fpn.yaml +++ b/models/hub/yolov5-fpn.yaml @@ -1,3 +1,5 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license + # Parameters nc: 80 # number of classes depth_multiple: 1.0 # model depth multiple diff --git a/models/hub/yolov5-p2.yaml b/models/hub/yolov5-p2.yaml index 6a932a868229..44d8da55dafb 100644 --- a/models/hub/yolov5-p2.yaml +++ b/models/hub/yolov5-p2.yaml @@ -1,3 +1,5 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license + # Parameters nc: 80 # number of classes depth_multiple: 1.0 # model depth multiple diff --git a/models/hub/yolov5-p6.yaml b/models/hub/yolov5-p6.yaml index 58b86b0ca892..85e142539ce3 100644 --- a/models/hub/yolov5-p6.yaml +++ b/models/hub/yolov5-p6.yaml @@ -1,3 +1,5 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license + # Parameters nc: 80 # number of classes depth_multiple: 1.0 # model depth multiple diff --git a/models/hub/yolov5-p7.yaml b/models/hub/yolov5-p7.yaml index f6e8fc7928cc..88a7a95cbbd1 100644 --- a/models/hub/yolov5-p7.yaml +++ b/models/hub/yolov5-p7.yaml @@ -1,3 +1,5 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license + # Parameters nc: 80 # number of classes depth_multiple: 1.0 # model depth multiple diff --git a/models/hub/yolov5-panet.yaml b/models/hub/yolov5-panet.yaml index c5f3b4817102..76b9b7e74e33 100644 --- a/models/hub/yolov5-panet.yaml +++ b/models/hub/yolov5-panet.yaml @@ -1,3 +1,5 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license + # Parameters nc: 80 # number of classes depth_multiple: 1.0 # model depth multiple diff --git a/models/hub/yolov5l6.yaml b/models/hub/yolov5l6.yaml index d5afd7d84100..1288f15f940b 100644 --- a/models/hub/yolov5l6.yaml +++ b/models/hub/yolov5l6.yaml @@ -1,3 +1,5 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license + # Parameters nc: 80 # number of classes depth_multiple: 1.0 # model depth multiple diff --git a/models/hub/yolov5m6.yaml b/models/hub/yolov5m6.yaml index 16a841a0b4b0..f14f0b0ebcce 100644 --- a/models/hub/yolov5m6.yaml +++ b/models/hub/yolov5m6.yaml @@ -1,3 +1,5 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license + # Parameters nc: 80 # number of classes depth_multiple: 0.67 # model depth multiple diff --git a/models/hub/yolov5s-ghost.yaml b/models/hub/yolov5s-ghost.yaml index d99d56d26e85..dbf2c8e03489 100644 --- a/models/hub/yolov5s-ghost.yaml +++ b/models/hub/yolov5s-ghost.yaml @@ -1,3 +1,5 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license + # Parameters nc: 80 # number of classes depth_multiple: 0.33 # model depth multiple diff --git a/models/hub/yolov5s-transformer.yaml b/models/hub/yolov5s-transformer.yaml index b999ebb7583d..aeac1acb0582 100644 --- a/models/hub/yolov5s-transformer.yaml +++ b/models/hub/yolov5s-transformer.yaml @@ -1,3 +1,5 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license + # Parameters nc: 80 # number of classes depth_multiple: 0.33 # model depth multiple diff --git a/models/hub/yolov5s6.yaml b/models/hub/yolov5s6.yaml index 2fb245050053..2baee5af9e05 100644 --- a/models/hub/yolov5s6.yaml +++ b/models/hub/yolov5s6.yaml @@ -1,3 +1,5 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license + # Parameters nc: 80 # number of classes depth_multiple: 0.33 # model depth multiple diff --git a/models/hub/yolov5x6.yaml b/models/hub/yolov5x6.yaml index c5187101072b..e94f592fc19a 100644 --- a/models/hub/yolov5x6.yaml +++ b/models/hub/yolov5x6.yaml @@ -1,3 +1,5 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license + # Parameters nc: 80 # number of classes depth_multiple: 1.33 # model depth multiple diff --git a/models/yolo.py b/models/yolo.py index 88adb71f8fea..f3c1516f49f7 100644 --- a/models/yolo.py +++ b/models/yolo.py @@ -1,4 +1,6 @@ -"""YOLOv5-specific modules +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +""" +YOLO-specific modules Usage: $ python path/to/models/yolo.py --cfg yolov5s.yaml diff --git a/models/yolov5l.yaml b/models/yolov5l.yaml index 0c130c1514af..30b22a25a483 100644 --- a/models/yolov5l.yaml +++ b/models/yolov5l.yaml @@ -1,3 +1,5 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license + # Parameters nc: 80 # number of classes depth_multiple: 1.0 # model depth multiple diff --git a/models/yolov5m.yaml b/models/yolov5m.yaml index e477b3433d39..f5f518ad8ab3 100644 --- a/models/yolov5m.yaml +++ b/models/yolov5m.yaml @@ -1,3 +1,5 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license + # Parameters nc: 80 # number of classes depth_multiple: 0.67 # model depth multiple diff --git a/models/yolov5s.yaml b/models/yolov5s.yaml index e85442dc9188..b311ab7fd50a 100644 --- a/models/yolov5s.yaml +++ b/models/yolov5s.yaml @@ -1,3 +1,5 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license + # Parameters nc: 80 # number of classes depth_multiple: 0.33 # model depth multiple diff --git a/models/yolov5x.yaml b/models/yolov5x.yaml index c7ca03589ab8..7dcb822b8b84 100644 --- a/models/yolov5x.yaml +++ b/models/yolov5x.yaml @@ -1,3 +1,5 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license + # Parameters nc: 80 # number of classes depth_multiple: 1.33 # model depth multiple diff --git a/train.py b/train.py index 24152f1a1198..0aa7a13628dc 100644 --- a/train.py +++ b/train.py @@ -1,4 +1,6 @@ -"""Train a YOLOv5 model on a custom dataset +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +""" +Train a YOLOv5 model on a custom dataset Usage: $ python path/to/train.py --data coco128.yaml --weights yolov5s.pt --img 640 @@ -6,6 +8,7 @@ import argparse import logging +import math import os import random import sys @@ -13,7 +16,6 @@ from copy import deepcopy from pathlib import Path -import math import numpy as np import torch import torch.distributed as dist diff --git a/utils/activations.py b/utils/activations.py index 92a3b5eaa54b..62eb532b3f95 100644 --- a/utils/activations.py +++ b/utils/activations.py @@ -1,4 +1,7 @@ -# Activation functions +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +""" +Activation functions +""" import torch import torch.nn as nn diff --git a/utils/augmentations.py b/utils/augmentations.py index cf64f2f9db1f..49f957e6fd62 100644 --- a/utils/augmentations.py +++ b/utils/augmentations.py @@ -1,10 +1,13 @@ -# YOLOv5 image augmentation functions +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +""" +Image augmentation functions +""" import logging +import math import random import cv2 -import math import numpy as np from utils.general import colorstr, segment2box, resample_segments, check_version diff --git a/utils/autoanchor.py b/utils/autoanchor.py index eca1d5be8ebe..66a2712dfd5d 100644 --- a/utils/autoanchor.py +++ b/utils/autoanchor.py @@ -1,4 +1,7 @@ -# Auto-anchor utils +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +""" +Auto-anchor utils +""" import random diff --git a/utils/callbacks.py b/utils/callbacks.py index a204ec1ceaaf..19c334430b5d 100644 --- a/utils/callbacks.py +++ b/utils/callbacks.py @@ -1,4 +1,8 @@ -#!/usr/bin/env python +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +""" +Callback utils +""" + class Callbacks: """" diff --git a/utils/datasets.py b/utils/datasets.py index b402723f9c49..7d831cd63230 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -1,4 +1,7 @@ -# YOLOv5 dataset utils and dataloaders +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +""" +Dataloaders and dataset utils +""" import glob import hashlib diff --git a/utils/downloads.py b/utils/downloads.py index 6b2c37433b5b..27cb899cd606 100644 --- a/utils/downloads.py +++ b/utils/downloads.py @@ -1,4 +1,7 @@ -# Download utils +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +""" +Download utils +""" import os import platform diff --git a/utils/flask_rest_api/README.md b/utils/flask_rest_api/README.md index 6c835936dde6..a726acbd9204 100644 --- a/utils/flask_rest_api/README.md +++ b/utils/flask_rest_api/README.md @@ -1,9 +1,13 @@ # Flask REST API -[REST](https://en.wikipedia.org/wiki/Representational_state_transfer) [API](https://en.wikipedia.org/wiki/API)s are commonly used to expose Machine Learning (ML) models to other services. This folder contains an example REST API created using Flask to expose the YOLOv5s model from [PyTorch Hub](https://pytorch.org/hub/ultralytics_yolov5/). + +[REST](https://en.wikipedia.org/wiki/Representational_state_transfer) [API](https://en.wikipedia.org/wiki/API)s are +commonly used to expose Machine Learning (ML) models to other services. This folder contains an example REST API +created using Flask to expose the YOLOv5s model from [PyTorch Hub](https://pytorch.org/hub/ultralytics_yolov5/). ## Requirements [Flask](https://palletsprojects.com/p/flask/) is required. Install with: + ```shell $ pip install Flask ``` @@ -65,4 +69,5 @@ The model inference results are returned as a JSON response: ] ``` -An example python script to perform inference using [requests](https://docs.python-requests.org/en/master/) is given in `example_request.py` +An example python script to perform inference using [requests](https://docs.python-requests.org/en/master/) is given +in `example_request.py` diff --git a/utils/general.py b/utils/general.py index 4fc32f5691fc..850ca6ba0b1f 100755 --- a/utils/general.py +++ b/utils/general.py @@ -1,8 +1,12 @@ -# YOLOv5 general utils +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +""" +General utils +""" import contextlib import glob import logging +import math import os import platform import random @@ -16,7 +20,6 @@ from subprocess import check_output import cv2 -import math import numpy as np import pandas as pd import pkg_resources as pkg diff --git a/utils/loggers/__init__.py b/utils/loggers/__init__.py index d40c0c350fde..3d67e9307b4c 100644 --- a/utils/loggers/__init__.py +++ b/utils/loggers/__init__.py @@ -1,4 +1,8 @@ -# YOLOv5 experiment logging utils +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +""" +Logging utils +""" + import warnings from threading import Thread diff --git a/utils/loggers/wandb/wandb_utils.py b/utils/loggers/wandb/wandb_utils.py index 019aebf094e1..4631e8a1f8fd 100644 --- a/utils/loggers/wandb/wandb_utils.py +++ b/utils/loggers/wandb/wandb_utils.py @@ -507,4 +507,4 @@ def all_logging_disabled(highest_level=logging.CRITICAL): try: yield finally: - logging.disable(previous_level) \ No newline at end of file + logging.disable(previous_level) diff --git a/utils/loss.py b/utils/loss.py index 79e8f24359c1..29aac3191c10 100644 --- a/utils/loss.py +++ b/utils/loss.py @@ -1,4 +1,7 @@ -# Loss functions +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +""" +Loss functions +""" import torch import torch.nn as nn diff --git a/utils/metrics.py b/utils/metrics.py index c94c4a76a964..ddc425910a75 100644 --- a/utils/metrics.py +++ b/utils/metrics.py @@ -1,9 +1,12 @@ -# Model validation metrics +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +""" +Model validation metrics +""" +import math import warnings from pathlib import Path -import math import matplotlib.pyplot as plt import numpy as np import torch diff --git a/utils/plots.py b/utils/plots.py index 71e90b00241d..76c161a13d1a 100644 --- a/utils/plots.py +++ b/utils/plots.py @@ -1,4 +1,7 @@ -# Plotting utils +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +""" +Plotting utils +""" import math from copy import copy diff --git a/utils/torch_utils.py b/utils/torch_utils.py index 628f672a010d..dff0617e87c9 100644 --- a/utils/torch_utils.py +++ b/utils/torch_utils.py @@ -1,7 +1,11 @@ -# YOLOv5 PyTorch utils +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +""" +PyTorch utils +""" import datetime import logging +import math import os import platform import subprocess @@ -10,7 +14,6 @@ from copy import deepcopy from pathlib import Path -import math import torch import torch.backends.cudnn as cudnn import torch.distributed as dist diff --git a/val.py b/val.py index 4c1d7d26b0de..cbee8cf1c026 100644 --- a/val.py +++ b/val.py @@ -1,4 +1,6 @@ -"""Validate a trained YOLOv5 model accuracy on a custom dataset +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +""" +Validate a trained YOLOv5 model accuracy on a custom dataset Usage: $ python path/to/val.py --data coco128.yaml --weights yolov5s.pt --img 640 From 01cdb7671b82be8dfa9e0bf47af2ab7554825bb0 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 15 Aug 2021 14:28:35 +0200 Subject: [PATCH 0485/1976] Add `SPPF()` layer (#4420) * Add `SPPF()` layer * Cleanup * Add credit --- models/common.py | 20 +++++++++++++++++++- models/yolo.py | 10 ++++++---- 2 files changed, 25 insertions(+), 5 deletions(-) diff --git a/models/common.py b/models/common.py index fe4319b0f370..e1f5aea3abed 100644 --- a/models/common.py +++ b/models/common.py @@ -161,7 +161,7 @@ def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): class SPP(nn.Module): - # Spatial pyramid pooling layer used in YOLOv3-SPP + # Spatial Pyramid Pooling (SPP) layer https://arxiv.org/abs/1406.4729 def __init__(self, c1, c2, k=(5, 9, 13)): super().__init__() c_ = c1 // 2 # hidden channels @@ -176,6 +176,24 @@ def forward(self, x): return self.cv2(torch.cat([x] + [m(x) for m in self.m], 1)) +class SPPF(nn.Module): + # Spatial Pyramid Pooling - Fast (SPPF) layer for YOLOv5 by Glenn Jocher + def __init__(self, c1, c2, k=5): # equivalent to SPP(k=(5, 9, 13)) + super().__init__() + c_ = c1 // 2 # hidden channels + self.cv1 = Conv(c1, c_, 1, 1) + self.cv2 = Conv(c_ * 4, c2, 1, 1) + self.m = nn.MaxPool2d(kernel_size=k, stride=1, padding=k // 2) + + def forward(self, x): + x = self.cv1(x) + with warnings.catch_warnings(): + warnings.simplefilter('ignore') # suppress torch 1.9.0 max_pool2d() warning + y1 = self.m(x) + y2 = self.m(y1) + return self.cv2(torch.cat([x, y1, y2, self.m(y2)], 1)) + + class Focus(nn.Module): # Focus wh information into c-space def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True): # ch_in, ch_out, kernel, stride, padding, groups diff --git a/models/yolo.py b/models/yolo.py index f3c1516f49f7..dee6032d069d 100644 --- a/models/yolo.py +++ b/models/yolo.py @@ -237,8 +237,8 @@ def parse_model(d, ch): # model_dict, input_channels(3) pass n = n_ = max(round(n * gd), 1) if n > 1 else n # depth gain - if m in [Conv, GhostConv, Bottleneck, GhostBottleneck, SPP, DWConv, MixConv2d, Focus, CrossConv, BottleneckCSP, - C3, C3TR, C3SPP, C3Ghost]: + if m in [Conv, GhostConv, Bottleneck, GhostBottleneck, SPP, SPPF, DWConv, MixConv2d, Focus, CrossConv, + BottleneckCSP, C3, C3TR, C3SPP, C3Ghost]: c1, c2 = ch[f], args[0] if c2 != no: # if not output c2 = make_divisible(c2 * gw, 8) @@ -279,6 +279,7 @@ def parse_model(d, ch): # model_dict, input_channels(3) parser = argparse.ArgumentParser() parser.add_argument('--cfg', type=str, default='yolov5s.yaml', help='model.yaml') parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') + parser.add_argument('--profile', action='store_true', help='profile model speed') opt = parser.parse_args() opt.cfg = check_file(opt.cfg) # check file set_logging() @@ -289,8 +290,9 @@ def parse_model(d, ch): # model_dict, input_channels(3) model.train() # Profile - # img = torch.rand(8 if torch.cuda.is_available() else 1, 3, 320, 320).to(device) - # y = model(img, profile=True) + if opt.profile: + img = torch.rand(8 if torch.cuda.is_available() else 1, 3, 640, 640).to(device) + y = model(img, profile=True) # Tensorboard (not working https://github.com/ultralytics/yolov5/issues/2898) # from torch.utils.tensorboard import SummaryWriter From 4e65052f28b1184b9d463c1e44b3a79b95113904 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 15 Aug 2021 16:41:57 +0200 Subject: [PATCH 0486/1976] Created using Colaboratory --- tutorial.ipynb | 424 +++++++++++++------------------------------------ 1 file changed, 106 insertions(+), 318 deletions(-) diff --git a/tutorial.ipynb b/tutorial.ipynb index 7cef01b6b651..ba6d19113a93 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -6,7 +6,6 @@ "name": "YOLOv5 Tutorial", "provenance": [], "collapsed_sections": [], - "toc_visible": true, "include_colab_link": true }, "kernelspec": { @@ -16,7 +15,7 @@ "accelerator": "GPU", "widgets": { "application/vnd.jupyter.widget-state+json": { - "2e915d9016c846e095e382b6a02ee773": { + "484511f272e64eab8b42e68dac5f7a66": { "model_module": "@jupyter-widgets/controls", "model_name": "HBoxModel", "model_module_version": "1.5.0", @@ -29,15 +28,16 @@ "_view_count": null, "_view_module_version": "1.5.0", "box_style": "", - "layout": "IPY_MODEL_cb7fc3a5c6cc4fde8d2c83e594a7c86e", + "layout": "IPY_MODEL_78cceec059784f2bb36988d3336e4d56", "_model_module": "@jupyter-widgets/controls", "children": [ - "IPY_MODEL_ac3edef4e3434f4587e6cbf8aa048770", - "IPY_MODEL_853ac234cc2a4236946fc516871e10eb" + "IPY_MODEL_ab93d8b65c134605934ff9ec5efb1bb6", + "IPY_MODEL_30df865ded4c434191bce772c9a82f3a", + "IPY_MODEL_20cdc61eb3404f42a12b37901b0d85fb" ] } }, - "cb7fc3a5c6cc4fde8d2c83e594a7c86e": { + "78cceec059784f2bb36988d3336e4d56": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", @@ -89,15 +89,36 @@ "left": null } }, - "ac3edef4e3434f4587e6cbf8aa048770": { + "ab93d8b65c134605934ff9ec5efb1bb6": { + "model_module": "@jupyter-widgets/controls", + "model_name": "HTMLModel", + "model_module_version": "1.5.0", + "state": { + "_view_name": "HTMLView", + "style": "IPY_MODEL_2d7239993a9645b09b221405ac682743", + "_dom_classes": [], + "description": "", + "_model_name": "HTMLModel", + "placeholder": "​", + "_view_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "value": "100%", + "_view_count": null, + "_view_module_version": "1.5.0", + "description_tooltip": null, + "_model_module": "@jupyter-widgets/controls", + "layout": "IPY_MODEL_17b5a87f92104ec7ab96bf507637d0d2" + } + }, + "30df865ded4c434191bce772c9a82f3a": { "model_module": "@jupyter-widgets/controls", "model_name": "FloatProgressModel", "model_module_version": "1.5.0", "state": { "_view_name": "ProgressView", - "style": "IPY_MODEL_13842ca90c0047e584b8d68d99dad2b1", + "style": "IPY_MODEL_2358bfb2270247359e94b066b3cc3d1f", "_dom_classes": [], - "description": "100%", + "description": "", "_model_name": "FloatProgressModel", "bar_style": "success", "max": 818322941, @@ -110,99 +131,31 @@ "min": 0, "description_tooltip": null, "_model_module": "@jupyter-widgets/controls", - "layout": "IPY_MODEL_f454999c3a924c7bad0746fb453dec36" + "layout": "IPY_MODEL_3e984405db654b0b83b88b2db08baffd" } }, - "853ac234cc2a4236946fc516871e10eb": { + "20cdc61eb3404f42a12b37901b0d85fb": { "model_module": "@jupyter-widgets/controls", "model_name": "HTMLModel", "model_module_version": "1.5.0", "state": { "_view_name": "HTMLView", - "style": "IPY_MODEL_f94a7ca8c1f04761bf38fdc5f99664b8", + "style": "IPY_MODEL_654d8a19b9f949c6bbdaf8b0875c931e", "_dom_classes": [], "description": "", "_model_name": "HTMLModel", "placeholder": "​", "_view_module": "@jupyter-widgets/controls", "_model_module_version": "1.5.0", - "value": " 780M/780M [03:59<00:00, 3.42MB/s]", + "value": " 780M/780M [00:33<00:00, 24.4MB/s]", "_view_count": null, "_view_module_version": "1.5.0", "description_tooltip": null, "_model_module": "@jupyter-widgets/controls", - "layout": "IPY_MODEL_9da1a23b042c41618dd14b0e30aa7cbe" + "layout": "IPY_MODEL_896030c5d13b415aaa05032818d81a6e" } }, - "13842ca90c0047e584b8d68d99dad2b1": { - "model_module": "@jupyter-widgets/controls", - "model_name": "ProgressStyleModel", - "model_module_version": "1.5.0", - "state": { - "_view_name": "StyleView", - "_model_name": "ProgressStyleModel", - "description_width": "initial", - "_view_module": "@jupyter-widgets/base", - "_model_module_version": "1.5.0", - "_view_count": null, - "_view_module_version": "1.2.0", - "bar_color": null, - "_model_module": "@jupyter-widgets/controls" - } - }, - "f454999c3a924c7bad0746fb453dec36": { - "model_module": "@jupyter-widgets/base", - "model_name": "LayoutModel", - "model_module_version": "1.2.0", - "state": { - "_view_name": "LayoutView", - "grid_template_rows": null, - "right": null, - "justify_content": null, - "_view_module": "@jupyter-widgets/base", - "overflow": null, - "_model_module_version": "1.2.0", - "_view_count": null, - "flex_flow": null, - "width": null, - "min_width": null, - "border": null, - "align_items": null, - "bottom": null, - "_model_module": "@jupyter-widgets/base", - "top": null, - "grid_column": null, - "overflow_y": null, - "overflow_x": null, - "grid_auto_flow": null, - "grid_area": null, - "grid_template_columns": null, - "flex": null, - "_model_name": "LayoutModel", - "justify_items": null, - "grid_row": null, - "max_height": null, - "align_content": null, - "visibility": null, - "align_self": null, - "height": null, - "min_height": null, - "padding": null, - "grid_auto_rows": null, - "grid_gap": null, - "max_width": null, - "order": null, - "_view_module_version": "1.2.0", - "grid_template_areas": null, - "object_position": null, - "object_fit": null, - "grid_auto_columns": null, - "margin": null, - "display": null, - "left": null - } - }, - "f94a7ca8c1f04761bf38fdc5f99664b8": { + "2d7239993a9645b09b221405ac682743": { "model_module": "@jupyter-widgets/controls", "model_name": "DescriptionStyleModel", "model_module_version": "1.5.0", @@ -217,7 +170,7 @@ "_model_module": "@jupyter-widgets/controls" } }, - "9da1a23b042c41618dd14b0e30aa7cbe": { + "17b5a87f92104ec7ab96bf507637d0d2": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", @@ -269,132 +222,14 @@ "left": null } }, - "6ff8a710ded44391a624dec5c460b771": { - "model_module": "@jupyter-widgets/controls", - "model_name": "HBoxModel", - "model_module_version": "1.5.0", - "state": { - "_view_name": "HBoxView", - "_dom_classes": [], - "_model_name": "HBoxModel", - "_view_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "_view_count": null, - "_view_module_version": "1.5.0", - "box_style": "", - "layout": "IPY_MODEL_3c19729b51cd45d4848035da06e96ff8", - "_model_module": "@jupyter-widgets/controls", - "children": [ - "IPY_MODEL_23b2f0ae3d46438c8de375987c77f580", - "IPY_MODEL_dd9498c321a9422da6faf17a0be026d4" - ] - } - }, - "3c19729b51cd45d4848035da06e96ff8": { - "model_module": "@jupyter-widgets/base", - "model_name": "LayoutModel", - "model_module_version": "1.2.0", - "state": { - "_view_name": "LayoutView", - "grid_template_rows": null, - "right": null, - "justify_content": null, - "_view_module": "@jupyter-widgets/base", - "overflow": null, - "_model_module_version": "1.2.0", - "_view_count": null, - "flex_flow": null, - "width": null, - "min_width": null, - "border": null, - "align_items": null, - "bottom": null, - "_model_module": "@jupyter-widgets/base", - "top": null, - "grid_column": null, - "overflow_y": null, - "overflow_x": null, - "grid_auto_flow": null, - "grid_area": null, - "grid_template_columns": null, - "flex": null, - "_model_name": "LayoutModel", - "justify_items": null, - "grid_row": null, - "max_height": null, - "align_content": null, - "visibility": null, - "align_self": null, - "height": null, - "min_height": null, - "padding": null, - "grid_auto_rows": null, - "grid_gap": null, - "max_width": null, - "order": null, - "_view_module_version": "1.2.0", - "grid_template_areas": null, - "object_position": null, - "object_fit": null, - "grid_auto_columns": null, - "margin": null, - "display": null, - "left": null - } - }, - "23b2f0ae3d46438c8de375987c77f580": { - "model_module": "@jupyter-widgets/controls", - "model_name": "FloatProgressModel", - "model_module_version": "1.5.0", - "state": { - "_view_name": "ProgressView", - "style": "IPY_MODEL_d8dda4b2ce864fd682e558b9a48f602e", - "_dom_classes": [], - "description": "100%", - "_model_name": "FloatProgressModel", - "bar_style": "success", - "max": 6984509, - "_view_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "value": 6984509, - "_view_count": null, - "_view_module_version": "1.5.0", - "orientation": "horizontal", - "min": 0, - "description_tooltip": null, - "_model_module": "@jupyter-widgets/controls", - "layout": "IPY_MODEL_ff8151449e444a14869684212b9ab14e" - } - }, - "dd9498c321a9422da6faf17a0be026d4": { - "model_module": "@jupyter-widgets/controls", - "model_name": "HTMLModel", - "model_module_version": "1.5.0", - "state": { - "_view_name": "HTMLView", - "style": "IPY_MODEL_0f84fe609bcf4aa9afdc32a8cf076909", - "_dom_classes": [], - "description": "", - "_model_name": "HTMLModel", - "placeholder": "​", - "_view_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "value": " 6.66M/6.66M [00:01<00:00, 6.08MB/s]", - "_view_count": null, - "_view_module_version": "1.5.0", - "description_tooltip": null, - "_model_module": "@jupyter-widgets/controls", - "layout": "IPY_MODEL_8fda673769984e2b928ef820d34c85c3" - } - }, - "d8dda4b2ce864fd682e558b9a48f602e": { + "2358bfb2270247359e94b066b3cc3d1f": { "model_module": "@jupyter-widgets/controls", "model_name": "ProgressStyleModel", "model_module_version": "1.5.0", "state": { "_view_name": "StyleView", "_model_name": "ProgressStyleModel", - "description_width": "initial", + "description_width": "", "_view_module": "@jupyter-widgets/base", "_model_module_version": "1.5.0", "_view_count": null, @@ -403,7 +238,7 @@ "_model_module": "@jupyter-widgets/controls" } }, - "ff8151449e444a14869684212b9ab14e": { + "3e984405db654b0b83b88b2db08baffd": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", @@ -455,7 +290,7 @@ "left": null } }, - "0f84fe609bcf4aa9afdc32a8cf076909": { + "654d8a19b9f949c6bbdaf8b0875c931e": { "model_module": "@jupyter-widgets/controls", "model_name": "DescriptionStyleModel", "model_module_version": "1.5.0", @@ -470,7 +305,7 @@ "_model_module": "@jupyter-widgets/controls" } }, - "8fda673769984e2b928ef820d34c85c3": { + "896030c5d13b415aaa05032818d81a6e": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", @@ -567,7 +402,7 @@ "colab": { "base_uri": "https://localhost:8080/" }, - "outputId": "ada1dd8d-e0aa-4858-e893-dc320319ca30" + "outputId": "4d67116a-43e9-4d84-d19e-1edd83f23a04" }, "source": [ "!git clone https://github.com/ultralytics/yolov5 # clone repo\n", @@ -580,7 +415,7 @@ "clear_output()\n", "print(f\"Setup complete. Using torch {torch.__version__} ({torch.cuda.get_device_properties(0).name if torch.cuda.is_available() else 'CPU'})\")" ], - "execution_count": null, + "execution_count": 1, "outputs": [ { "output_type": "stream", @@ -619,25 +454,26 @@ "colab": { "base_uri": "https://localhost:8080/" }, - "outputId": "a7a37616-a82b-4bdb-a463-6ead850b5615" + "outputId": "8b728908-81ab-4861-edb0-4d0c46c439fb" }, "source": [ + "%rm -rf runs\n", "!python detect.py --weights yolov5s.pt --img 640 --conf 0.25 --source data/images/\n", - "Image(filename='runs/detect/exp/zidane.jpg', width=600)" + "#Image(filename='runs/detect/exp/zidane.jpg', width=600)" ], - "execution_count": null, + "execution_count": 4, "outputs": [ { "output_type": "stream", "text": [ "\u001b[34m\u001b[1mdetect: \u001b[0mweights=['yolov5s.pt'], source=data/images/, imgsz=640, conf_thres=0.25, iou_thres=0.45, max_det=1000, device=, view_img=False, save_txt=False, save_conf=False, save_crop=False, nosave=False, classes=None, agnostic_nms=False, augment=False, visualize=False, update=False, project=runs/detect, name=exp, exist_ok=False, line_thickness=3, hide_labels=False, hide_conf=False, half=False\n", - "YOLOv5 🚀 v5.0-330-g18f6ba7 torch 1.9.0+cu102 CUDA:0 (Tesla V100-SXM2-16GB, 16160.5MB)\n", + "YOLOv5 🚀 v5.0-367-g01cdb76 torch 1.9.0+cu102 CUDA:0 (Tesla V100-SXM2-16GB, 16160.5MB)\n", "\n", "Fusing layers... \n", "Model Summary: 224 layers, 7266973 parameters, 0 gradients\n", - "image 1/2 /content/yolov5/data/images/bus.jpg: 640x480 4 persons, 1 bus, 1 fire hydrant, Done. (0.008s)\n", - "image 2/2 /content/yolov5/data/images/zidane.jpg: 384x640 2 persons, 2 ties, Done. (0.008s)\n", - "Results saved to runs/detect/exp\n", + "image 1/2 /content/yolov5/data/images/bus.jpg: 640x480 4 persons, 1 bus, 1 fire hydrant, Done. (0.007s)\n", + "image 2/2 /content/yolov5/data/images/zidane.jpg: 384x640 2 persons, 2 ties, Done. (0.007s)\n", + "Results saved to \u001b[1mruns/detect/exp\u001b[0m\n", "Done. (0.091s)\n" ], "name": "stdout" @@ -680,49 +516,45 @@ "id": "WQPtK1QYVaD_", "colab": { "base_uri": "https://localhost:8080/", - "height": 66, + "height": 48, "referenced_widgets": [ - "2e915d9016c846e095e382b6a02ee773", - "cb7fc3a5c6cc4fde8d2c83e594a7c86e", - "ac3edef4e3434f4587e6cbf8aa048770", - "853ac234cc2a4236946fc516871e10eb", - "13842ca90c0047e584b8d68d99dad2b1", - "f454999c3a924c7bad0746fb453dec36", - "f94a7ca8c1f04761bf38fdc5f99664b8", - "9da1a23b042c41618dd14b0e30aa7cbe" + "484511f272e64eab8b42e68dac5f7a66", + "78cceec059784f2bb36988d3336e4d56", + "ab93d8b65c134605934ff9ec5efb1bb6", + "30df865ded4c434191bce772c9a82f3a", + "20cdc61eb3404f42a12b37901b0d85fb", + "2d7239993a9645b09b221405ac682743", + "17b5a87f92104ec7ab96bf507637d0d2", + "2358bfb2270247359e94b066b3cc3d1f", + "3e984405db654b0b83b88b2db08baffd", + "654d8a19b9f949c6bbdaf8b0875c931e", + "896030c5d13b415aaa05032818d81a6e" ] }, - "outputId": "3606f305-aa67-43fd-d5d6-93d1f311768c" + "outputId": "7e6f5c96-c819-43e1-cd03-d3b9878cf8de" }, "source": [ "# Download COCO val2017\n", "torch.hub.download_url_to_file('https://github.com/ultralytics/yolov5/releases/download/v1.0/coco2017val.zip', 'tmp.zip')\n", "!unzip -q tmp.zip -d ../datasets && rm tmp.zip" ], - "execution_count": null, + "execution_count": 5, "outputs": [ { "output_type": "display_data", "data": { "application/vnd.jupyter.widget-view+json": { - "model_id": "2e915d9016c846e095e382b6a02ee773", + "model_id": "484511f272e64eab8b42e68dac5f7a66", "version_minor": 0, "version_major": 2 }, "text/plain": [ - "HBox(children=(FloatProgress(value=0.0, max=818322941.0), HTML(value='')))" + " 0%| | 0.00/780M [00:00 Date: Sun, 15 Aug 2021 18:32:41 +0200 Subject: [PATCH 0487/1976] Remove DDP process group timeout (#4422) --- train.py | 2 +- utils/torch_utils.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/train.py b/train.py index 0aa7a13628dc..275e0a4b1a8e 100644 --- a/train.py +++ b/train.py @@ -493,7 +493,7 @@ def main(opt): assert not opt.sync_bn, '--sync-bn known training issue, see https://github.com/ultralytics/yolov5/issues/3998' torch.cuda.set_device(LOCAL_RANK) device = torch.device('cuda', LOCAL_RANK) - dist.init_process_group(backend="nccl" if dist.is_nccl_available() else "gloo", timeout=timedelta(seconds=60)) + dist.init_process_group(backend="nccl" if dist.is_nccl_available() else "gloo") # Train if not opt.evolve: diff --git a/utils/torch_utils.py b/utils/torch_utils.py index dff0617e87c9..2eb51d80f34e 100644 --- a/utils/torch_utils.py +++ b/utils/torch_utils.py @@ -35,10 +35,10 @@ def torch_distributed_zero_first(local_rank: int): Decorator to make all processes in distributed training wait for each local_master to do something. """ if local_rank not in [-1, 0]: - dist.barrier() + dist.barrier(device_ids=[local_rank]) yield if local_rank == 0: - dist.barrier() + dist.barrier(device_ids=[0]) def init_torch_seeds(seed=0): From dbc06ce29298de4a55d73fa37362dd51a03035ed Mon Sep 17 00:00:00 2001 From: Omid Sadeghnezhad <58780720+OmidSa75@users.noreply.github.com> Date: Mon, 16 Aug 2021 13:28:02 +0430 Subject: [PATCH 0488/1976] Update hubconf.py attempt_load import (#4428) --- hubconf.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/hubconf.py b/hubconf.py index 36f3bd86bc11..799c83ec8400 100644 --- a/hubconf.py +++ b/hubconf.py @@ -27,7 +27,8 @@ def _create(name, pretrained=True, channels=3, classes=80, autoshape=True, verbo """ from pathlib import Path - from models.yolo import Model, attempt_load + from models.yolo import Model + from models.experimental import attempt_load from utils.general import check_requirements, set_logging from utils.downloads import attempt_download from utils.torch_utils import select_device From f3e3f7603fca56e52f3f055d8bbb5847a73e3e78 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 16 Aug 2021 17:25:06 +0200 Subject: [PATCH 0489/1976] TFLite prep (#4436) --- detect.py | 3 ++- utils/general.py | 13 ++++++++----- 2 files changed, 10 insertions(+), 6 deletions(-) diff --git a/detect.py b/detect.py index 49ebbe96c068..cdac4f213790 100644 --- a/detect.py +++ b/detect.py @@ -67,7 +67,8 @@ def run(weights='yolov5s.pt', # model.pt path(s) # Load model w = weights[0] if isinstance(weights, list) else weights - classify, pt, onnx = False, w.endswith('.pt'), w.endswith('.onnx') # inference type + classify, suffix = False, Path(w).suffix.lower() + pt, onnx, tflite, pb, graph_def = (suffix == x for x in ['.pt', '.onnx', '.tflite', '.pb', '']) # backend stride, names = 64, [f'class{i}' for i in range(1000)] # assign defaults if pt: model = attempt_load(weights, map_location=device) # load FP32 model diff --git a/utils/general.py b/utils/general.py index 850ca6ba0b1f..0b6e8fc7fb9a 100755 --- a/utils/general.py +++ b/utils/general.py @@ -203,11 +203,14 @@ def check_requirements(requirements='requirements.txt', exclude=()): print(emojis(s)) -def check_img_size(img_size, s=32, floor=0): - # Verify img_size is a multiple of stride s - new_size = max(make_divisible(img_size, int(s)), floor) # ceil gs-multiple - if new_size != img_size: - print(f'WARNING: --img-size {img_size} must be multiple of max stride {s}, updating to {new_size}') +def check_img_size(imgsz, s=32, floor=0): + # Verify image size is a multiple of stride s in each dimension + if isinstance(imgsz, int): # integer i.e. img_size=640 + new_size = max(make_divisible(imgsz, int(s)), floor) + else: # list i.e. img_size=[640, 480] + new_size = [max(make_divisible(x, int(s)), floor) for x in imgsz] + if new_size != imgsz: + print(f'WARNING: --img-size {imgsz} must be multiple of max stride {s}, updating to {new_size}') return new_size From 808bcad3bb952f4976aca63f95af8855bc227090 Mon Sep 17 00:00:00 2001 From: Jiacong Fang Date: Tue, 17 Aug 2021 19:18:16 +0800 Subject: [PATCH 0490/1976] Add TensorFlow and TFLite export (#1127) * Add models/tf.py for TensorFlow and TFLite export * Set auto=False for int8 calibration * Update requirements.txt for TensorFlow and TFLite export * Read anchors directly from PyTorch weights * Add --tf-nms to append NMS in TensorFlow SavedModel and GraphDef export * Remove check_anchor_order, check_file, set_logging from import * Reformat code and optimize imports * Autodownload model and check cfg * update --source path, img-size to 320, single output * Adjust representative_dataset * Put representative dataset in tfl_int8 block * detect.py TF inference * weights to string * weights to string * cleanup tf.py * Add --dynamic-batch-size * Add xywh normalization to reduce calibration error * Update requirements.txt TensorFlow 2.3.1 -> 2.4.0 to avoid int8 quantization error * Fix imports Move C3 from models.experimental to models.common * Add models/tf.py for TensorFlow and TFLite export * Set auto=False for int8 calibration * Update requirements.txt for TensorFlow and TFLite export * Read anchors directly from PyTorch weights * Add --tf-nms to append NMS in TensorFlow SavedModel and GraphDef export * Remove check_anchor_order, check_file, set_logging from import * Reformat code and optimize imports * Autodownload model and check cfg * update --source path, img-size to 320, single output * Adjust representative_dataset * detect.py TF inference * Put representative dataset in tfl_int8 block * weights to string * weights to string * cleanup tf.py * Add --dynamic-batch-size * Add xywh normalization to reduce calibration error * Update requirements.txt TensorFlow 2.3.1 -> 2.4.0 to avoid int8 quantization error * Fix imports Move C3 from models.experimental to models.common * implement C3() and SiLU() * Fix reshape dim to support dynamic batching * Add epsilon argument in tf_BN, which is different between TF and PT * Set stride to None if not using PyTorch, and do not warmup without PyTorch * Add list support in check_img_size() * Add list input support in detect.py * sys.path.append('./') to run from yolov5/ * Add int8 quantization support for TensorFlow 2.5 * Add get_coco128.sh * Remove --no-tfl-detect in models/tf.py (Use tf-android-tfl-detect branch for EdgeTPU) * Update requirements.txt * Replace torch.load() with attempt_load() * Update requirements.txt * Add --tf-raw-resize to set half_pixel_centers=False * Add --agnostic-nms for TF class-agnostic NMS * Cleanup after merge * Cleanup2 after merge * Cleanup3 after merge * Add tf.py docstring with credit and usage * pb saved_model and tflite use only one model in detect.py * Add use cases in docstring of tf.py * Remove redundant `stride` definition * Remove keras direct import * Fix `check_requirements(('tensorflow>=2.4.1',))` Co-authored-by: Glenn Jocher --- detect.py | 64 ++++- models/experimental.py | 8 +- models/tf.py | 558 +++++++++++++++++++++++++++++++++++++++++ requirements.txt | 1 + utils/datasets.py | 12 +- 5 files changed, 626 insertions(+), 17 deletions(-) create mode 100644 models/tf.py diff --git a/detect.py b/detect.py index cdac4f213790..a2331e23b43e 100644 --- a/detect.py +++ b/detect.py @@ -12,6 +12,7 @@ from pathlib import Path import cv2 +import numpy as np import torch import torch.backends.cudnn as cudnn @@ -51,6 +52,7 @@ def run(weights='yolov5s.pt', # model.pt path(s) hide_labels=False, # hide labels hide_conf=False, # hide confidences half=False, # use FP16 half-precision inference + tfl_int8=False, # INT8 quantized TFLite model ): save_img = not nosave and not source.endswith('.txt') # save inference images webcam = source.isnumeric() or source.endswith('.txt') or source.lower().startswith( @@ -68,7 +70,7 @@ def run(weights='yolov5s.pt', # model.pt path(s) # Load model w = weights[0] if isinstance(weights, list) else weights classify, suffix = False, Path(w).suffix.lower() - pt, onnx, tflite, pb, graph_def = (suffix == x for x in ['.pt', '.onnx', '.tflite', '.pb', '']) # backend + pt, onnx, tflite, pb, saved_model = (suffix == x for x in ['.pt', '.onnx', '.tflite', '.pb', '']) # backend stride, names = 64, [f'class{i}' for i in range(1000)] # assign defaults if pt: model = attempt_load(weights, map_location=device) # load FP32 model @@ -83,30 +85,49 @@ def run(weights='yolov5s.pt', # model.pt path(s) check_requirements(('onnx', 'onnxruntime')) import onnxruntime session = onnxruntime.InferenceSession(w, None) + else: # TensorFlow models + check_requirements(('tensorflow>=2.4.1',)) + import tensorflow as tf + if pb: # https://www.tensorflow.org/guide/migrate#a_graphpb_or_graphpbtxt + def wrap_frozen_graph(gd, inputs, outputs): + x = tf.compat.v1.wrap_function(lambda: tf.compat.v1.import_graph_def(gd, name=""), []) # wrapped import + return x.prune(tf.nest.map_structure(x.graph.as_graph_element, inputs), + tf.nest.map_structure(x.graph.as_graph_element, outputs)) + + graph_def = tf.Graph().as_graph_def() + graph_def.ParseFromString(open(w, 'rb').read()) + frozen_func = wrap_frozen_graph(gd=graph_def, inputs="x:0", outputs="Identity:0") + elif saved_model: + model = tf.keras.models.load_model(w) + elif tflite: + interpreter = tf.lite.Interpreter(model_path=w) # load TFLite model + interpreter.allocate_tensors() # allocate + input_details = interpreter.get_input_details() # inputs + output_details = interpreter.get_output_details() # outputs imgsz = check_img_size(imgsz, s=stride) # check image size # Dataloader if webcam: view_img = check_imshow() cudnn.benchmark = True # set True to speed up constant image size inference - dataset = LoadStreams(source, img_size=imgsz, stride=stride) + dataset = LoadStreams(source, img_size=imgsz, stride=stride, auto=pt) bs = len(dataset) # batch_size else: - dataset = LoadImages(source, img_size=imgsz, stride=stride) + dataset = LoadImages(source, img_size=imgsz, stride=stride, auto=pt) bs = 1 # batch_size vid_path, vid_writer = [None] * bs, [None] * bs # Run inference if pt and device.type != 'cpu': - model(torch.zeros(1, 3, imgsz, imgsz).to(device).type_as(next(model.parameters()))) # run once + model(torch.zeros(1, 3, *imgsz).to(device).type_as(next(model.parameters()))) # run once t0 = time.time() for path, img, im0s, vid_cap in dataset: - if pt: + if onnx: + img = img.astype('float32') + else: img = torch.from_numpy(img).to(device) img = img.half() if half else img.float() # uint8 to fp16/32 - elif onnx: - img = img.astype('float32') - img /= 255.0 # 0 - 255 to 0.0 - 1.0 + img = img / 255.0 # 0 - 255 to 0.0 - 1.0 if len(img.shape) == 3: img = img[None] # expand for batch dim @@ -117,6 +138,27 @@ def run(weights='yolov5s.pt', # model.pt path(s) pred = model(img, augment=augment, visualize=visualize)[0] elif onnx: pred = torch.tensor(session.run([session.get_outputs()[0].name], {session.get_inputs()[0].name: img})) + else: # tensorflow model (tflite, pb, saved_model) + imn = img.permute(0, 2, 3, 1).cpu().numpy() # image in numpy + if pb: + pred = frozen_func(x=tf.constant(imn)).numpy() + elif saved_model: + pred = model(imn, training=False).numpy() + elif tflite: + if tfl_int8: + scale, zero_point = input_details[0]['quantization'] + imn = (imn / scale + zero_point).astype(np.uint8) + interpreter.set_tensor(input_details[0]['index'], imn) + interpreter.invoke() + pred = interpreter.get_tensor(output_details[0]['index']) + if tfl_int8: + scale, zero_point = output_details[0]['quantization'] + pred = (pred.astype(np.float32) - zero_point) * scale + pred[..., 0] *= imgsz[1] # x + pred[..., 1] *= imgsz[0] # y + pred[..., 2] *= imgsz[1] # w + pred[..., 3] *= imgsz[0] # h + pred = torch.tensor(pred) # NMS pred = non_max_suppression(pred, conf_thres, iou_thres, classes, agnostic_nms, max_det=max_det) @@ -202,9 +244,9 @@ def run(weights='yolov5s.pt', # model.pt path(s) def parse_opt(): parser = argparse.ArgumentParser() - parser.add_argument('--weights', nargs='+', type=str, default='yolov5s.pt', help='model.pt path(s)') + parser.add_argument('--weights', nargs='+', type=str, default='yolov5s.pb', help='model.pt path(s)') parser.add_argument('--source', type=str, default='data/images', help='file/dir/URL/glob, 0 for webcam') - parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=640, help='inference size (pixels)') + parser.add_argument('--imgsz', '--img', '--img-size', nargs='+', type=int, default=[640], help='inference size h,w') parser.add_argument('--conf-thres', type=float, default=0.25, help='confidence threshold') parser.add_argument('--iou-thres', type=float, default=0.45, help='NMS IoU threshold') parser.add_argument('--max-det', type=int, default=1000, help='maximum detections per image') @@ -226,7 +268,9 @@ def parse_opt(): parser.add_argument('--hide-labels', default=False, action='store_true', help='hide labels') parser.add_argument('--hide-conf', default=False, action='store_true', help='hide confidences') parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference') + parser.add_argument('--tfl-int8', action='store_true', help='INT8 quantized TFLite model') opt = parser.parse_args() + opt.imgsz *= 2 if len(opt.imgsz) == 1 else 1 # expand return opt diff --git a/models/experimental.py b/models/experimental.py index 7dfaf9611bec..e25a4e1779fa 100644 --- a/models/experimental.py +++ b/models/experimental.py @@ -85,14 +85,18 @@ def forward(self, x, augment=False, profile=False, visualize=False): return y, None # inference, train output -def attempt_load(weights, map_location=None, inplace=True): +def attempt_load(weights, map_location=None, inplace=True, fuse=True): from models.yolo import Detect, Model # Loads an ensemble of models weights=[a,b,c] or a single model weights=[a] or weights=a model = Ensemble() for w in weights if isinstance(weights, list) else [weights]: ckpt = torch.load(attempt_download(w), map_location=map_location) # load - model.append(ckpt['ema' if ckpt.get('ema') else 'model'].float().fuse().eval()) # FP32 model + if fuse: + model.append(ckpt['ema' if ckpt.get('ema') else 'model'].float().fuse().eval()) # FP32 model + else: + model.append(ckpt['ema' if ckpt.get('ema') else 'model'].float().eval()) # without layer fuse + # Compatibility updates for m in model.modules(): diff --git a/models/tf.py b/models/tf.py new file mode 100644 index 000000000000..40e7d20a9d84 --- /dev/null +++ b/models/tf.py @@ -0,0 +1,558 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +""" +TensorFlow/Keras and TFLite versions of YOLOv5 +Authored by https://github.com/zldrobit in PR https://github.com/ultralytics/yolov5/pull/1127 + +Usage: + $ python models/tf.py --weights yolov5s.pt --cfg yolov5s.yaml + +Export int8 TFLite models: + $ python models/tf.py --weights yolov5s.pt --cfg models/yolov5s.yaml --tfl-int8 \ + --source path/to/images/ --ncalib 100 + +Detection: + $ python detect.py --weights yolov5s.pb --img 320 + $ python detect.py --weights yolov5s_saved_model --img 320 + $ python detect.py --weights yolov5s-fp16.tflite --img 320 + $ python detect.py --weights yolov5s-int8.tflite --img 320 --tfl-int8 + +For TensorFlow.js: + $ python models/tf.py --weights yolov5s.pt --cfg models/yolov5s.yaml --img 320 --tf-nms --agnostic-nms + $ pip install tensorflowjs + $ tensorflowjs_converter \ + --input_format=tf_frozen_model \ + --output_node_names='Identity,Identity_1,Identity_2,Identity_3' \ + yolov5s.pb \ + web_model + $ # Edit web_model/model.json to sort Identity* in ascending order + $ cd .. && git clone https://github.com/zldrobit/tfjs-yolov5-example.git && cd tfjs-yolov5-example + $ npm install + $ ln -s ../../yolov5/web_model public/web_model + $ npm start +""" + +import argparse +import logging +import os +import sys +import traceback +from copy import deepcopy +from pathlib import Path + +sys.path.append('./') # to run '$ python *.py' files in subdirectories + +import numpy as np +import tensorflow as tf +import torch +import torch.nn as nn +import yaml +from tensorflow import keras +from tensorflow.python.framework.convert_to_constants import convert_variables_to_constants_v2 + +from models.common import Conv, Bottleneck, SPP, DWConv, Focus, BottleneckCSP, Concat, autopad, C3 +from models.experimental import MixConv2d, CrossConv, attempt_load +from models.yolo import Detect +from utils.datasets import LoadImages +from utils.general import make_divisible, check_file, check_dataset + +logger = logging.getLogger(__name__) + + +class tf_BN(keras.layers.Layer): + # TensorFlow BatchNormalization wrapper + def __init__(self, w=None): + super(tf_BN, self).__init__() + self.bn = keras.layers.BatchNormalization( + beta_initializer=keras.initializers.Constant(w.bias.numpy()), + gamma_initializer=keras.initializers.Constant(w.weight.numpy()), + moving_mean_initializer=keras.initializers.Constant(w.running_mean.numpy()), + moving_variance_initializer=keras.initializers.Constant(w.running_var.numpy()), + epsilon=w.eps) + + def call(self, inputs): + return self.bn(inputs) + + +class tf_Pad(keras.layers.Layer): + def __init__(self, pad): + super(tf_Pad, self).__init__() + self.pad = tf.constant([[0, 0], [pad, pad], [pad, pad], [0, 0]]) + + def call(self, inputs): + return tf.pad(inputs, self.pad, mode='constant', constant_values=0) + + +class tf_Conv(keras.layers.Layer): + # Standard convolution + def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True, w=None): + # ch_in, ch_out, weights, kernel, stride, padding, groups + super(tf_Conv, self).__init__() + assert g == 1, "TF v2.2 Conv2D does not support 'groups' argument" + assert isinstance(k, int), "Convolution with multiple kernels are not allowed." + # TensorFlow convolution padding is inconsistent with PyTorch (e.g. k=3 s=2 'SAME' padding) + # see https://stackoverflow.com/questions/52975843/comparing-conv2d-with-padding-between-tensorflow-and-pytorch + + conv = keras.layers.Conv2D( + c2, k, s, 'SAME' if s == 1 else 'VALID', use_bias=False, + kernel_initializer=keras.initializers.Constant(w.conv.weight.permute(2, 3, 1, 0).numpy())) + self.conv = conv if s == 1 else keras.Sequential([tf_Pad(autopad(k, p)), conv]) + self.bn = tf_BN(w.bn) if hasattr(w, 'bn') else tf.identity + + # YOLOv5 activations + if isinstance(w.act, nn.LeakyReLU): + self.act = (lambda x: keras.activations.relu(x, alpha=0.1)) if act else tf.identity + elif isinstance(w.act, nn.Hardswish): + self.act = (lambda x: x * tf.nn.relu6(x + 3) * 0.166666667) if act else tf.identity + elif isinstance(w.act, nn.SiLU): + self.act = (lambda x: keras.activations.swish(x)) if act else tf.identity + + def call(self, inputs): + return self.act(self.bn(self.conv(inputs))) + + +class tf_Focus(keras.layers.Layer): + # Focus wh information into c-space + def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True, w=None): + # ch_in, ch_out, kernel, stride, padding, groups + super(tf_Focus, self).__init__() + self.conv = tf_Conv(c1 * 4, c2, k, s, p, g, act, w.conv) + + def call(self, inputs): # x(b,w,h,c) -> y(b,w/2,h/2,4c) + # inputs = inputs / 255. # normalize 0-255 to 0-1 + return self.conv(tf.concat([inputs[:, ::2, ::2, :], + inputs[:, 1::2, ::2, :], + inputs[:, ::2, 1::2, :], + inputs[:, 1::2, 1::2, :]], 3)) + + +class tf_Bottleneck(keras.layers.Layer): + # Standard bottleneck + def __init__(self, c1, c2, shortcut=True, g=1, e=0.5, w=None): # ch_in, ch_out, shortcut, groups, expansion + super(tf_Bottleneck, self).__init__() + c_ = int(c2 * e) # hidden channels + self.cv1 = tf_Conv(c1, c_, 1, 1, w=w.cv1) + self.cv2 = tf_Conv(c_, c2, 3, 1, g=g, w=w.cv2) + self.add = shortcut and c1 == c2 + + def call(self, inputs): + return inputs + self.cv2(self.cv1(inputs)) if self.add else self.cv2(self.cv1(inputs)) + + +class tf_Conv2d(keras.layers.Layer): + # Substitution for PyTorch nn.Conv2D + def __init__(self, c1, c2, k, s=1, g=1, bias=True, w=None): + super(tf_Conv2d, self).__init__() + assert g == 1, "TF v2.2 Conv2D does not support 'groups' argument" + self.conv = keras.layers.Conv2D( + c2, k, s, 'VALID', use_bias=bias, + kernel_initializer=keras.initializers.Constant(w.weight.permute(2, 3, 1, 0).numpy()), + bias_initializer=keras.initializers.Constant(w.bias.numpy()) if bias else None, ) + + def call(self, inputs): + return self.conv(inputs) + + +class tf_BottleneckCSP(keras.layers.Layer): + # CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks + def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5, w=None): + # ch_in, ch_out, number, shortcut, groups, expansion + super(tf_BottleneckCSP, self).__init__() + c_ = int(c2 * e) # hidden channels + self.cv1 = tf_Conv(c1, c_, 1, 1, w=w.cv1) + self.cv2 = tf_Conv2d(c1, c_, 1, 1, bias=False, w=w.cv2) + self.cv3 = tf_Conv2d(c_, c_, 1, 1, bias=False, w=w.cv3) + self.cv4 = tf_Conv(2 * c_, c2, 1, 1, w=w.cv4) + self.bn = tf_BN(w.bn) + self.act = lambda x: keras.activations.relu(x, alpha=0.1) + self.m = keras.Sequential([tf_Bottleneck(c_, c_, shortcut, g, e=1.0, w=w.m[j]) for j in range(n)]) + + def call(self, inputs): + y1 = self.cv3(self.m(self.cv1(inputs))) + y2 = self.cv2(inputs) + return self.cv4(self.act(self.bn(tf.concat((y1, y2), axis=3)))) + + +class tf_C3(keras.layers.Layer): + # CSP Bottleneck with 3 convolutions + def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5, w=None): + # ch_in, ch_out, number, shortcut, groups, expansion + super(tf_C3, self).__init__() + c_ = int(c2 * e) # hidden channels + self.cv1 = tf_Conv(c1, c_, 1, 1, w=w.cv1) + self.cv2 = tf_Conv(c1, c_, 1, 1, w=w.cv2) + self.cv3 = tf_Conv(2 * c_, c2, 1, 1, w=w.cv3) + self.m = keras.Sequential([tf_Bottleneck(c_, c_, shortcut, g, e=1.0, w=w.m[j]) for j in range(n)]) + + def call(self, inputs): + return self.cv3(tf.concat((self.m(self.cv1(inputs)), self.cv2(inputs)), axis=3)) + + +class tf_SPP(keras.layers.Layer): + # Spatial pyramid pooling layer used in YOLOv3-SPP + def __init__(self, c1, c2, k=(5, 9, 13), w=None): + super(tf_SPP, self).__init__() + c_ = c1 // 2 # hidden channels + self.cv1 = tf_Conv(c1, c_, 1, 1, w=w.cv1) + self.cv2 = tf_Conv(c_ * (len(k) + 1), c2, 1, 1, w=w.cv2) + self.m = [keras.layers.MaxPool2D(pool_size=x, strides=1, padding='SAME') for x in k] + + def call(self, inputs): + x = self.cv1(inputs) + return self.cv2(tf.concat([x] + [m(x) for m in self.m], 3)) + + +class tf_Detect(keras.layers.Layer): + def __init__(self, nc=80, anchors=(), ch=(), w=None): # detection layer + super(tf_Detect, self).__init__() + self.stride = tf.convert_to_tensor(w.stride.numpy(), dtype=tf.float32) + self.nc = nc # number of classes + self.no = nc + 5 # number of outputs per anchor + self.nl = len(anchors) # number of detection layers + self.na = len(anchors[0]) // 2 # number of anchors + self.grid = [tf.zeros(1)] * self.nl # init grid + self.anchors = tf.convert_to_tensor(w.anchors.numpy(), dtype=tf.float32) + self.anchor_grid = tf.reshape(tf.convert_to_tensor(w.anchor_grid.numpy(), dtype=tf.float32), + [self.nl, 1, -1, 1, 2]) + self.m = [tf_Conv2d(x, self.no * self.na, 1, w=w.m[i]) for i, x in enumerate(ch)] + self.export = False # onnx export + self.training = True # set to False after building model + for i in range(self.nl): + ny, nx = opt.img_size[0] // self.stride[i], opt.img_size[1] // self.stride[i] + self.grid[i] = self._make_grid(nx, ny) + + def call(self, inputs): + # x = x.copy() # for profiling + z = [] # inference output + self.training |= self.export + x = [] + for i in range(self.nl): + x.append(self.m[i](inputs[i])) + # x(bs,20,20,255) to x(bs,3,20,20,85) + ny, nx = opt.img_size[0] // self.stride[i], opt.img_size[1] // self.stride[i] + x[i] = tf.transpose(tf.reshape(x[i], [-1, ny * nx, self.na, self.no]), [0, 2, 1, 3]) + + if not self.training: # inference + y = tf.sigmoid(x[i]) + xy = (y[..., 0:2] * 2. - 0.5 + self.grid[i]) * self.stride[i] # xy + wh = (y[..., 2:4] * 2) ** 2 * self.anchor_grid[i] + # Normalize xywh to 0-1 to reduce calibration error + xy /= tf.constant([[opt.img_size[1], opt.img_size[0]]], dtype=tf.float32) + wh /= tf.constant([[opt.img_size[1], opt.img_size[0]]], dtype=tf.float32) + y = tf.concat([xy, wh, y[..., 4:]], -1) + z.append(tf.reshape(y, [-1, 3 * ny * nx, self.no])) + + return x if self.training else (tf.concat(z, 1), x) + + @staticmethod + def _make_grid(nx=20, ny=20): + # yv, xv = torch.meshgrid([torch.arange(ny), torch.arange(nx)]) + # return torch.stack((xv, yv), 2).view((1, 1, ny, nx, 2)).float() + xv, yv = tf.meshgrid(tf.range(nx), tf.range(ny)) + return tf.cast(tf.reshape(tf.stack([xv, yv], 2), [1, 1, ny * nx, 2]), dtype=tf.float32) + + +class tf_Upsample(keras.layers.Layer): + def __init__(self, size, scale_factor, mode, w=None): + super(tf_Upsample, self).__init__() + assert scale_factor == 2, "scale_factor must be 2" + # self.upsample = keras.layers.UpSampling2D(size=scale_factor, interpolation=mode) + if opt.tf_raw_resize: + # with default arguments: align_corners=False, half_pixel_centers=False + self.upsample = lambda x: tf.raw_ops.ResizeNearestNeighbor(images=x, + size=(x.shape[1] * 2, x.shape[2] * 2)) + else: + self.upsample = lambda x: tf.image.resize(x, (x.shape[1] * 2, x.shape[2] * 2), method=mode) + + def call(self, inputs): + return self.upsample(inputs) + + +class tf_Concat(keras.layers.Layer): + def __init__(self, dimension=1, w=None): + super(tf_Concat, self).__init__() + assert dimension == 1, "convert only NCHW to NHWC concat" + self.d = 3 + + def call(self, inputs): + return tf.concat(inputs, self.d) + + +def parse_model(d, ch, model): # model_dict, input_channels(3) + logger.info('\n%3s%18s%3s%10s %-40s%-30s' % ('', 'from', 'n', 'params', 'module', 'arguments')) + anchors, nc, gd, gw = d['anchors'], d['nc'], d['depth_multiple'], d['width_multiple'] + na = (len(anchors[0]) // 2) if isinstance(anchors, list) else anchors # number of anchors + no = na * (nc + 5) # number of outputs = anchors * (classes + 5) + + layers, save, c2 = [], [], ch[-1] # layers, savelist, ch out + for i, (f, n, m, args) in enumerate(d['backbone'] + d['head']): # from, number, module, args + m_str = m + m = eval(m) if isinstance(m, str) else m # eval strings + for j, a in enumerate(args): + try: + args[j] = eval(a) if isinstance(a, str) else a # eval strings + except: + pass + + n = max(round(n * gd), 1) if n > 1 else n # depth gain + if m in [nn.Conv2d, Conv, Bottleneck, SPP, DWConv, MixConv2d, Focus, CrossConv, BottleneckCSP, C3]: + c1, c2 = ch[f], args[0] + c2 = make_divisible(c2 * gw, 8) if c2 != no else c2 + + args = [c1, c2, *args[1:]] + if m in [BottleneckCSP, C3]: + args.insert(2, n) + n = 1 + elif m is nn.BatchNorm2d: + args = [ch[f]] + elif m is Concat: + c2 = sum([ch[-1 if x == -1 else x + 1] for x in f]) + elif m is Detect: + args.append([ch[x + 1] for x in f]) + if isinstance(args[1], int): # number of anchors + args[1] = [list(range(args[1] * 2))] * len(f) + else: + c2 = ch[f] + + tf_m = eval('tf_' + m_str.replace('nn.', '')) + m_ = keras.Sequential([tf_m(*args, w=model.model[i][j]) for j in range(n)]) if n > 1 \ + else tf_m(*args, w=model.model[i]) # module + + torch_m_ = nn.Sequential(*[m(*args) for _ in range(n)]) if n > 1 else m(*args) # module + t = str(m)[8:-2].replace('__main__.', '') # module type + np = sum([x.numel() for x in torch_m_.parameters()]) # number params + m_.i, m_.f, m_.type, m_.np = i, f, t, np # attach index, 'from' index, type, number params + logger.info('%3s%18s%3s%10.0f %-40s%-30s' % (i, f, n, np, t, args)) # print + save.extend(x % i for x in ([f] if isinstance(f, int) else f) if x != -1) # append to savelist + layers.append(m_) + ch.append(c2) + return keras.Sequential(layers), sorted(save) + + +class tf_Model(): + def __init__(self, cfg='yolov5s.yaml', ch=3, nc=None, model=None): # model, input channels, number of classes + super(tf_Model, self).__init__() + if isinstance(cfg, dict): + self.yaml = cfg # model dict + else: # is *.yaml + import yaml # for torch hub + self.yaml_file = Path(cfg).name + with open(cfg) as f: + self.yaml = yaml.load(f, Loader=yaml.FullLoader) # model dict + + # Define model + if nc and nc != self.yaml['nc']: + print('Overriding %s nc=%g with nc=%g' % (cfg, self.yaml['nc'], nc)) + self.yaml['nc'] = nc # override yaml value + self.model, self.savelist = parse_model(deepcopy(self.yaml), ch=[ch], model=model) # model, savelist, ch_out + + def predict(self, inputs, profile=False): + y = [] # outputs + x = inputs + for i, m in enumerate(self.model.layers): + if m.f != -1: # if not from previous layer + x = y[m.f] if isinstance(m.f, int) else [x if j == -1 else y[j] for j in m.f] # from earlier layers + + x = m(x) # run + y.append(x if m.i in self.savelist else None) # save output + + # Add TensorFlow NMS + if opt.tf_nms: + boxes = xywh2xyxy(x[0][..., :4]) + probs = x[0][:, :, 4:5] + classes = x[0][:, :, 5:] + scores = probs * classes + if opt.agnostic_nms: + nms = agnostic_nms_layer()((boxes, classes, scores)) + return nms, x[1] + else: + boxes = tf.expand_dims(boxes, 2) + nms = tf.image.combined_non_max_suppression( + boxes, scores, opt.topk_per_class, opt.topk_all, opt.iou_thres, opt.score_thres, clip_boxes=False) + return nms, x[1] + + return x[0] # output only first tensor [1,6300,85] = [xywh, conf, class0, class1, ...] + # x = x[0][0] # [x(1,6300,85), ...] to x(6300,85) + # xywh = x[..., :4] # x(6300,4) boxes + # conf = x[..., 4:5] # x(6300,1) confidences + # cls = tf.reshape(tf.cast(tf.argmax(x[..., 5:], axis=1), tf.float32), (-1, 1)) # x(6300,1) classes + # return tf.concat([conf, cls, xywh], 1) + + +class agnostic_nms_layer(keras.layers.Layer): + # wrap map_fn to avoid TypeSpec related error https://stackoverflow.com/a/65809989/3036450 + def call(self, input): + return tf.map_fn(agnostic_nms, input, + fn_output_signature=(tf.float32, tf.float32, tf.float32, tf.int32), + name='agnostic_nms') + + +def agnostic_nms(x): + boxes, classes, scores = x + class_inds = tf.cast(tf.argmax(classes, axis=-1), tf.float32) + scores_inp = tf.reduce_max(scores, -1) + selected_inds = tf.image.non_max_suppression( + boxes, scores_inp, max_output_size=opt.topk_all, iou_threshold=opt.iou_thres, score_threshold=opt.score_thres) + selected_boxes = tf.gather(boxes, selected_inds) + padded_boxes = tf.pad(selected_boxes, + paddings=[[0, opt.topk_all - tf.shape(selected_boxes)[0]], [0, 0]], + mode="CONSTANT", constant_values=0.0) + selected_scores = tf.gather(scores_inp, selected_inds) + padded_scores = tf.pad(selected_scores, + paddings=[[0, opt.topk_all - tf.shape(selected_boxes)[0]]], + mode="CONSTANT", constant_values=-1.0) + selected_classes = tf.gather(class_inds, selected_inds) + padded_classes = tf.pad(selected_classes, + paddings=[[0, opt.topk_all - tf.shape(selected_boxes)[0]]], + mode="CONSTANT", constant_values=-1.0) + valid_detections = tf.shape(selected_inds)[0] + return padded_boxes, padded_scores, padded_classes, valid_detections + + +def xywh2xyxy(xywh): + # Convert nx4 boxes from [x, y, w, h] to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right + x, y, w, h = tf.split(xywh, num_or_size_splits=4, axis=-1) + return tf.concat([x - w / 2, y - h / 2, x + w / 2, y + h / 2], axis=-1) + + +def representative_dataset_gen(): + # Representative dataset for use with converter.representative_dataset + n = 0 + for path, img, im0s, vid_cap in dataset: + # Get sample input data as a numpy array in a method of your choosing. + n += 1 + input = np.transpose(img, [1, 2, 0]) + input = np.expand_dims(input, axis=0).astype(np.float32) + input /= 255.0 + yield [input] + if n >= opt.ncalib: + break + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument('--cfg', type=str, default='yolov5s.yaml', help='cfg path') + parser.add_argument('--weights', type=str, default='yolov5s.pt', help='weights path') + parser.add_argument('--img-size', nargs='+', type=int, default=[320, 320], help='image size') # height, width + parser.add_argument('--batch-size', type=int, default=1, help='batch size') + parser.add_argument('--dynamic-batch-size', action='store_true', help='dynamic batch size') + parser.add_argument('--source', type=str, default='../data/coco128.yaml', help='dir of images or data.yaml file') + parser.add_argument('--ncalib', type=int, default=100, help='number of calibration images') + parser.add_argument('--tfl-int8', action='store_true', dest='tfl_int8', help='export TFLite int8 model') + parser.add_argument('--tf-nms', action='store_true', dest='tf_nms', help='TF NMS (without TFLite export)') + parser.add_argument('--agnostic-nms', action='store_true', help='class-agnostic NMS') + parser.add_argument('--tf-raw-resize', action='store_true', dest='tf_raw_resize', + help='use tf.raw_ops.ResizeNearestNeighbor for resize') + parser.add_argument('--topk-per-class', type=int, default=100, help='topk per class to keep in NMS') + parser.add_argument('--topk-all', type=int, default=100, help='topk for all classes to keep in NMS') + parser.add_argument('--iou-thres', type=float, default=0.5, help='IOU threshold for NMS') + parser.add_argument('--score-thres', type=float, default=0.4, help='score threshold for NMS') + opt = parser.parse_args() + opt.cfg = check_file(opt.cfg) # check file + opt.img_size *= 2 if len(opt.img_size) == 1 else 1 # expand + print(opt) + + # Input + img = torch.zeros((opt.batch_size, 3, *opt.img_size)) # image size(1,3,320,192) iDetection + + # Load PyTorch model + model = attempt_load(opt.weights, map_location=torch.device('cpu'), inplace=True, fuse=False) + model.model[-1].export = False # set Detect() layer export=True + y = model(img) # dry run + nc = y[0].shape[-1] - 5 + + # TensorFlow saved_model export + try: + print('\nStarting TensorFlow saved_model export with TensorFlow %s...' % tf.__version__) + tf_model = tf_Model(opt.cfg, model=model, nc=nc) + img = tf.zeros((opt.batch_size, *opt.img_size, 3)) # NHWC Input for TensorFlow + + m = tf_model.model.layers[-1] + assert isinstance(m, tf_Detect), "the last layer must be Detect" + m.training = False + y = tf_model.predict(img) + + inputs = keras.Input(shape=(*opt.img_size, 3), batch_size=None if opt.dynamic_batch_size else opt.batch_size) + keras_model = keras.Model(inputs=inputs, outputs=tf_model.predict(inputs)) + keras_model.summary() + path = opt.weights.replace('.pt', '_saved_model') # filename + keras_model.save(path, save_format='tf') + print('TensorFlow saved_model export success, saved as %s' % path) + except Exception as e: + print('TensorFlow saved_model export failure: %s' % e) + traceback.print_exc(file=sys.stdout) + + # TensorFlow GraphDef export + try: + print('\nStarting TensorFlow GraphDef export with TensorFlow %s...' % tf.__version__) + + # https://github.com/leimao/Frozen_Graph_TensorFlow + full_model = tf.function(lambda x: keras_model(x)) + full_model = full_model.get_concrete_function( + tf.TensorSpec(keras_model.inputs[0].shape, keras_model.inputs[0].dtype)) + + frozen_func = convert_variables_to_constants_v2(full_model) + frozen_func.graph.as_graph_def() + f = opt.weights.replace('.pt', '.pb') # filename + tf.io.write_graph(graph_or_graph_def=frozen_func.graph, + logdir=os.path.dirname(f), + name=os.path.basename(f), + as_text=False) + + print('TensorFlow GraphDef export success, saved as %s' % f) + except Exception as e: + print('TensorFlow GraphDef export failure: %s' % e) + traceback.print_exc(file=sys.stdout) + + # TFLite model export + if not opt.tf_nms: + try: + print('\nStarting TFLite export with TensorFlow %s...' % tf.__version__) + + # fp32 TFLite model export --------------------------------------------------------------------------------- + # converter = tf.lite.TFLiteConverter.from_keras_model(keras_model) + # converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS] + # converter.allow_custom_ops = False + # converter.experimental_new_converter = True + # tflite_model = converter.convert() + # f = opt.weights.replace('.pt', '.tflite') # filename + # open(f, "wb").write(tflite_model) + + # fp16 TFLite model export --------------------------------------------------------------------------------- + converter = tf.lite.TFLiteConverter.from_keras_model(keras_model) + converter.optimizations = [tf.lite.Optimize.DEFAULT] + # converter.representative_dataset = representative_dataset_gen + # converter.target_spec.supported_types = [tf.float16] + converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS] + converter.allow_custom_ops = False + converter.experimental_new_converter = True + tflite_model = converter.convert() + f = opt.weights.replace('.pt', '-fp16.tflite') # filename + open(f, "wb").write(tflite_model) + print('\nTFLite export success, saved as %s' % f) + + # int8 TFLite model export --------------------------------------------------------------------------------- + if opt.tfl_int8: + # Representative Dataset + if opt.source.endswith('.yaml'): + with open(check_file(opt.source)) as f: + data = yaml.load(f, Loader=yaml.FullLoader) # data dict + check_dataset(data) # check + opt.source = data['train'] + dataset = LoadImages(opt.source, img_size=opt.img_size, auto=False) + converter = tf.lite.TFLiteConverter.from_keras_model(keras_model) + converter.optimizations = [tf.lite.Optimize.DEFAULT] + converter.representative_dataset = representative_dataset_gen + converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8] + converter.inference_input_type = tf.uint8 # or tf.int8 + converter.inference_output_type = tf.uint8 # or tf.int8 + converter.allow_custom_ops = False + converter.experimental_new_converter = True + converter.experimental_new_quantizer = False + tflite_model = converter.convert() + f = opt.weights.replace('.pt', '-int8.tflite') # filename + open(f, "wb").write(tflite_model) + print('\nTFLite (int8) export success, saved as %s' % f) + + except Exception as e: + print('\nTFLite export failure: %s' % e) + traceback.print_exc(file=sys.stdout) diff --git a/requirements.txt b/requirements.txt index f1629eafc65a..f6361d591f1b 100755 --- a/requirements.txt +++ b/requirements.txt @@ -23,6 +23,7 @@ pandas # coremltools>=4.1 # onnx>=1.9.0 # scikit-learn==0.19.2 # for coreml quantization +# tensorflow==2.4.1 # for TFLite export # extras -------------------------------------- # Cython # for pycocotools https://github.com/cocodataset/cocoapi/issues/172 diff --git a/utils/datasets.py b/utils/datasets.py index 7d831cd63230..52b028994325 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -155,7 +155,7 @@ def __iter__(self): class LoadImages: # for inference - def __init__(self, path, img_size=640, stride=32): + def __init__(self, path, img_size=640, stride=32, auto=True): p = str(Path(path).absolute()) # os-agnostic absolute path if '*' in p: files = sorted(glob.glob(p, recursive=True)) # glob @@ -176,6 +176,7 @@ def __init__(self, path, img_size=640, stride=32): self.nf = ni + nv # number of files self.video_flag = [False] * ni + [True] * nv self.mode = 'image' + self.auto = auto if any(videos): self.new_video(videos[0]) # new video else: @@ -217,7 +218,7 @@ def __next__(self): print(f'image {self.count}/{self.nf} {path}: ', end='') # Padded resize - img = letterbox(img0, self.img_size, stride=self.stride)[0] + img = letterbox(img0, self.img_size, stride=self.stride, auto=self.auto)[0] # Convert img = img.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB @@ -276,7 +277,7 @@ def __len__(self): class LoadStreams: # multiple IP or RTSP cameras - def __init__(self, sources='streams.txt', img_size=640, stride=32): + def __init__(self, sources='streams.txt', img_size=640, stride=32, auto=True): self.mode = 'stream' self.img_size = img_size self.stride = stride @@ -290,6 +291,7 @@ def __init__(self, sources='streams.txt', img_size=640, stride=32): n = len(sources) self.imgs, self.fps, self.frames, self.threads = [None] * n, [0] * n, [0] * n, [None] * n self.sources = [clean_str(x) for x in sources] # clean source names for later + self.auto = auto for i, s in enumerate(sources): # index, source # Start thread to read frames from video stream print(f'{i + 1}/{n}: {s}... ', end='') @@ -312,7 +314,7 @@ def __init__(self, sources='streams.txt', img_size=640, stride=32): print('') # newline # check for common shapes - s = np.stack([letterbox(x, self.img_size, stride=self.stride)[0].shape for x in self.imgs], 0) # shapes + s = np.stack([letterbox(x, self.img_size, stride=self.stride, auto=self.auto)[0].shape for x in self.imgs], 0) # shapes self.rect = np.unique(s, axis=0).shape[0] == 1 # rect inference if all shapes equal if not self.rect: print('WARNING: Different stream shapes detected. For optimal performance supply similarly-shaped streams.') @@ -341,7 +343,7 @@ def __next__(self): # Letterbox img0 = self.imgs.copy() - img = [letterbox(x, self.img_size, auto=self.rect, stride=self.stride)[0] for x in img0] + img = [letterbox(x, self.img_size, stride=self.stride, auto=self.rect and self.auto)[0] for x in img0] # Stack img = np.stack(img, 0) From 6dd7dd8dd3c27ac8b986578f91fa14aab12357d7 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 17 Aug 2021 23:29:07 +0200 Subject: [PATCH 0491/1976] Fix default `--weights yolov5s.pt` (#4458) --- detect.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/detect.py b/detect.py index a2331e23b43e..601d5daf9852 100644 --- a/detect.py +++ b/detect.py @@ -244,7 +244,7 @@ def wrap_frozen_graph(gd, inputs, outputs): def parse_opt(): parser = argparse.ArgumentParser() - parser.add_argument('--weights', nargs='+', type=str, default='yolov5s.pb', help='model.pt path(s)') + parser.add_argument('--weights', nargs='+', type=str, default='yolov5s.pt', help='model.pt path(s)') parser.add_argument('--source', type=str, default='data/images', help='file/dir/URL/glob, 0 for webcam') parser.add_argument('--imgsz', '--img', '--img-size', nargs='+', type=int, default=[640], help='inference size h,w') parser.add_argument('--conf-thres', type=float, default=0.25, help='confidence threshold') From 1d65e8194d183f4e1537cf64a7292f8ab57d1d55 Mon Sep 17 00:00:00 2001 From: "Huu Quan, CAP" Date: Wed, 18 Aug 2021 19:07:09 +0900 Subject: [PATCH 0492/1976] Fix missing labels after albumentations (#4455) * fix missing labels after augmentation * Update datasets.py Cleanup Co-authored-by: Huu Quan Co-authored-by: Glenn Jocher --- utils/datasets.py | 1 + 1 file changed, 1 insertion(+) diff --git a/utils/datasets.py b/utils/datasets.py index 52b028994325..25a2ba6f9561 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -568,6 +568,7 @@ def __getitem__(self, index): if self.augment: # Albumentations img, labels = self.albumentations(img, labels) + nl = len(labels) # update after albumentations # HSV color-space augment_hsv(img, hgain=hyp['hsv_h'], sgain=hyp['hsv_s'], vgain=hyp['hsv_v']) From d1182c4f29e2141be856b85c5d613480a2fffc5d Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 18 Aug 2021 21:16:57 +0200 Subject: [PATCH 0493/1976] `check_requirements(('coremltools',))` (#4478) * `check_requirements(('coremltools',))` * Update ci-testing.yml * Update ci-testing.yml --- .github/workflows/ci-testing.yml | 4 ++-- export.py | 1 + 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/.github/workflows/ci-testing.yml b/.github/workflows/ci-testing.yml index 02e8f74bf56c..ecd6f9bbd625 100644 --- a/.github/workflows/ci-testing.yml +++ b/.github/workflows/ci-testing.yml @@ -48,7 +48,7 @@ jobs: run: | python -m pip install --upgrade pip pip install -qr requirements.txt -f https://download.pytorch.org/whl/cpu/torch_stable.html - pip install -q onnx + pip install -q onnx onnx-simplifier coremltools # for export python --version pip --version pip list @@ -76,5 +76,5 @@ jobs: python hubconf.py # hub python models/yolo.py --cfg ${{ matrix.model }}.yaml # inspect - python export.py --img 128 --batch 1 --weights ${{ matrix.model }}.pt # export + python export.py --img 128 --batch 1 --weights ${{ matrix.model }}.pt --include onnx torchscript # export shell: bash diff --git a/export.py b/export.py index db805cb45e6e..674609463a9d 100644 --- a/export.py +++ b/export.py @@ -87,6 +87,7 @@ def export_coreml(model, img, file): # CoreML model export prefix = colorstr('CoreML:') try: + check_requirements(('coremltools',)) import coremltools as ct print(f'\n{prefix} starting export with coremltools {ct.__version__}...') From 7316b78e36a004bfe1272c4d7fc63e7e76f90cc8 Mon Sep 17 00:00:00 2001 From: Ayush Chaurasia Date: Mon, 23 Aug 2021 16:40:07 +0530 Subject: [PATCH 0494/1976] W&B: Refactor the wandb_utils.py file (#4496) * Improve docstrings and run names * default wandb login prompt with timeout * return key * Update api_key check logic * Properly support zipped dataset feature * update docstring * Revert tuorial change * extend changes to log_dataset * add run name * bug fix * bug fix * Update comment * fix import check * remove unused import * Hardcore .yaml file extension * reduce code * Reformat using pycharm * Remove redundant try catch * More refactoring and bug fixes * retry * Reformat using pycharm * respect LOGGERS include list * Fix * fix * refactor constructor * refactor * refactor * refactor * PyCharm reformat Co-authored-by: Glenn Jocher --- utils/loggers/wandb/wandb_utils.py | 77 +++++++++++++++++------------- 1 file changed, 43 insertions(+), 34 deletions(-) diff --git a/utils/loggers/wandb/wandb_utils.py b/utils/loggers/wandb/wandb_utils.py index 4631e8a1f8fd..8b2095afcb8b 100644 --- a/utils/loggers/wandb/wandb_utils.py +++ b/utils/loggers/wandb/wandb_utils.py @@ -38,6 +38,19 @@ def check_wandb_config_file(data_config_file): return data_config_file +def check_wandb_dataset(data_file): + is_wandb_artifact = False + if check_file(data_file) and data_file.endswith('.yaml'): + with open(data_file, errors='ignore') as f: + data_dict = yaml.safe_load(f) + is_wandb_artifact = (data_dict['train'].startswith(WANDB_ARTIFACT_PREFIX) or + data_dict['val'].startswith(WANDB_ARTIFACT_PREFIX)) + if is_wandb_artifact: + return data_dict + else: + return check_dataset(data_file) + + def get_run_info(run_path): run_path = Path(remove_prefix(run_path, WANDB_ARTIFACT_PREFIX)) run_id = run_path.stem @@ -104,7 +117,7 @@ def __init__(self, opt, run_id, job_type='Training'): - Initialize WandbLogger instance - Upload dataset if opt.upload_dataset is True - Setup trainig processes if job_type is 'Training' - + arguments: opt (namespace) -- Commandline arguments for this run run_id (str) -- Run ID of W&B run to be resumed @@ -147,26 +160,24 @@ def __init__(self, opt, run_id, job_type='Training'): allow_val_change=True) if not wandb.run else wandb.run if self.wandb_run: if self.job_type == 'Training': - if not opt.resume: - if opt.upload_dataset: + if opt.upload_dataset: + if not opt.resume: self.wandb_artifact_data_dict = self.check_and_upload_dataset(opt) - elif opt.data.endswith('_wandb.yaml'): # When dataset is W&B artifact - with open(opt.data, errors='ignore') as f: - data_dict = yaml.safe_load(f) - self.data_dict = data_dict - else: # Local .yaml dataset file or .zip file - self.data_dict = check_dataset(opt.data) + if opt.resume: + # resume from artifact + if isinstance(opt.resume, str) and opt.resume.startswith(WANDB_ARTIFACT_PREFIX): + self.data_dict = dict(self.wandb_run.config.data_dict) + else: # local resume + self.data_dict = check_wandb_dataset(opt.data) else: - self.data_dict = check_dataset(opt.data) + self.data_dict = check_wandb_dataset(opt.data) + self.wandb_artifact_data_dict = self.wandb_artifact_data_dict or self.data_dict - self.setup_training(opt) - if not self.wandb_artifact_data_dict: - self.wandb_artifact_data_dict = self.data_dict - # write data_dict to config. useful for resuming from artifacts. Do this only when not resuming. - if not opt.resume: + # write data_dict to config. useful for resuming from artifacts. Do this only when not resuming. self.wandb_run.config.update({'data_dict': self.wandb_artifact_data_dict}, allow_val_change=True) + self.setup_training(opt) if self.job_type == 'Dataset Creation': self.data_dict = self.check_and_upload_dataset(opt) @@ -174,10 +185,10 @@ def __init__(self, opt, run_id, job_type='Training'): def check_and_upload_dataset(self, opt): """ Check if the dataset format is compatible and upload it as W&B artifact - + arguments: opt (namespace)-- Commandline arguments for current run - + returns: Updated dataset info dictionary where local dataset paths are replaced by WAND_ARFACT_PREFIX links. """ @@ -196,10 +207,10 @@ def setup_training(self, opt): - Attempt to download model checkpoint and dataset artifacts if opt.resume stats with WANDB_ARTIFACT_PREFIX - Update data_dict, to contain info of previous run if resumed and the paths of dataset artifact if downloaded - Setup log_dict, initialize bbox_interval - + arguments: opt (namespace) -- commandline arguments for this run - + """ self.log_dict, self.current_epoch = {}, 0 self.bbox_interval = opt.bbox_interval @@ -211,9 +222,7 @@ def setup_training(self, opt): opt.weights, opt.save_period, opt.batch_size, opt.bbox_interval, opt.epochs, opt.hyp = str( self.weights), config.save_period, config.batch_size, config.bbox_interval, config.epochs, \ config.hyp - data_dict = dict(self.wandb_run.config.data_dict) # eliminates the need for config file to resume - else: - data_dict = self.data_dict + data_dict = self.data_dict if self.val_artifact is None: # If --upload_dataset is set, use the existing artifact, don't download self.train_artifact_path, self.train_artifact = self.download_dataset_artifact(data_dict.get('train'), opt.artifact_alias) @@ -243,11 +252,11 @@ def setup_training(self, opt): def download_dataset_artifact(self, path, alias): """ download the model checkpoint artifact if the path starts with WANDB_ARTIFACT_PREFIX - + arguments: path -- path of the dataset to be used for training alias (str)-- alias of the artifact to be download/used for training - + returns: (str, wandb.Artifact) -- path of the downladed dataset and it's corresponding artifact object if dataset is found otherwise returns (None, None) @@ -263,7 +272,7 @@ def download_dataset_artifact(self, path, alias): def download_model_artifact(self, opt): """ download the model checkpoint artifact if the resume path starts with WANDB_ARTIFACT_PREFIX - + arguments: opt (namespace) -- Commandline arguments for this run """ @@ -281,7 +290,7 @@ def download_model_artifact(self, opt): def log_model(self, path, opt, epoch, fitness_score, best_model=False): """ Log the model checkpoint as W&B artifact - + arguments: path (Path) -- Path of directory containing the checkpoints opt (namespace) -- Command line arguments for this run @@ -305,14 +314,14 @@ def log_model(self, path, opt, epoch, fitness_score, best_model=False): def log_dataset_artifact(self, data_file, single_cls, project, overwrite_config=False): """ Log the dataset as W&B artifact and return the new data file with W&B links - + arguments: data_file (str) -- the .yaml file with information about the dataset like - path, classes etc. single_class (boolean) -- train multi-class data as single-class project (str) -- project name. Used to construct the artifact path overwrite_config (boolean) -- overwrites the data.yaml file if set to true otherwise creates a new file with _wandb postfix. Eg -> data_wandb.yaml - + returns: the new .yaml file with artifact links. it can be used to start training directly from artifacts """ @@ -359,12 +368,12 @@ def map_val_table_path(self): def create_dataset_table(self, dataset, class_to_id, name='dataset'): """ Create and return W&B artifact containing W&B Table of the dataset. - + arguments: dataset (LoadImagesAndLabels) -- instance of LoadImagesAndLabels class used to iterate over the data to build Table class_to_id (dict(int, str)) -- hash map that maps class ids to labels name (str) -- name of the artifact - + returns: dataset artifact to be logged or used """ @@ -401,7 +410,7 @@ def create_dataset_table(self, dataset, class_to_id, name='dataset'): def log_training_progress(self, predn, path, names): """ Build evaluation Table. Uses reference from validation dataset table. - + arguments: predn (list): list of predictions in the native space in the format - [xmin, ymin, xmax, ymax, confidence, class] path (str): local path of the current evaluation image @@ -431,7 +440,7 @@ def log_training_progress(self, predn, path, names): def val_one_image(self, pred, predn, path, names, im): """ Log validation data for one image. updates the result Table if validation dataset is uploaded and log bbox media panel - + arguments: pred (list): list of scaled predictions in the format - [xmin, ymin, xmax, ymax, confidence, class] predn (list): list of predictions in the native space - [xmin, ymin, xmax, ymax, confidence, class] @@ -453,7 +462,7 @@ def val_one_image(self, pred, predn, path, names, im): def log(self, log_dict): """ save the metrics to the logging dictionary - + arguments: log_dict (Dict) -- metrics/media to be logged in current step """ @@ -464,7 +473,7 @@ def log(self, log_dict): def end_epoch(self, best_result=False): """ commit the log_dict, model artifacts and Tables to W&B and flush the log_dict. - + arguments: best_result (boolean): Boolean representing if the result of this evaluation is best or not """ From 7b1643b5b563596440beccf7d8ed066f51e1cf83 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 23 Aug 2021 14:38:30 +0200 Subject: [PATCH 0495/1976] Add `install=True` argument to `check_requirements` (#4512) * Add `install=True` argument to `check_requirements` * Update general.py --- utils/general.py | 20 ++++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) diff --git a/utils/general.py b/utils/general.py index 0b6e8fc7fb9a..16244903575a 100755 --- a/utils/general.py +++ b/utils/general.py @@ -172,7 +172,7 @@ def check_version(current='0.0.0', minimum='0.0.0', name='version ', pinned=Fals @try_except -def check_requirements(requirements='requirements.txt', exclude=()): +def check_requirements(requirements='requirements.txt', exclude=(), install=True): # Check installed dependencies meet requirements (pass *.txt file or list of packages) prefix = colorstr('red', 'bold', 'requirements:') check_python() # check python version @@ -188,13 +188,17 @@ def check_requirements(requirements='requirements.txt', exclude=()): try: pkg.require(r) except Exception as e: # DistributionNotFound or VersionConflict if requirements not met - print(f"{prefix} {r} not found and is required by YOLOv5, attempting auto-update...") - try: - assert check_online(), f"'pip install {r}' skipped (offline)" - print(check_output(f"pip install '{r}'", shell=True).decode()) - n += 1 - except Exception as e: - print(f'{prefix} {e}') + s = f"{prefix} {r} not found and is required by YOLOv5" + if install: + print(f"{s}, attempting auto-update...") + try: + assert check_online(), f"'pip install {r}' skipped (offline)" + print(check_output(f"pip install '{r}'", shell=True).decode()) + n += 1 + except Exception as e: + print(f'{prefix} {e}') + else: + print(f'{s}. Please install and rerun your command.') if n: # if packages updated source = file.resolve() if 'file' in locals() else requirements From 79af1144c270ac7169553d450b9170f9c60f92e4 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 23 Aug 2021 17:05:53 +0200 Subject: [PATCH 0496/1976] Automatic TFLite uint8 determination (#4515) * Auto TFLite uint8 detection This PR automatically determines if TFLite models are uint8 quantized rather than accepting a manual argument. The quantization determination is based on @zldrobit comment https://github.com/ultralytics/yolov5/pull/1127#issuecomment-901713847 * Cleanup --- detect.py | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/detect.py b/detect.py index 601d5daf9852..15ddc1ffb6a4 100644 --- a/detect.py +++ b/detect.py @@ -52,7 +52,6 @@ def run(weights='yolov5s.pt', # model.pt path(s) hide_labels=False, # hide labels hide_conf=False, # hide confidences half=False, # use FP16 half-precision inference - tfl_int8=False, # INT8 quantized TFLite model ): save_img = not nosave and not source.endswith('.txt') # save inference images webcam = source.isnumeric() or source.endswith('.txt') or source.lower().startswith( @@ -104,6 +103,7 @@ def wrap_frozen_graph(gd, inputs, outputs): interpreter.allocate_tensors() # allocate input_details = interpreter.get_input_details() # inputs output_details = interpreter.get_output_details() # outputs + int8 = input_details[0]['dtype'] == np.uint8 # is TFLite quantized uint8 model imgsz = check_img_size(imgsz, s=stride) # check image size # Dataloader @@ -145,15 +145,15 @@ def wrap_frozen_graph(gd, inputs, outputs): elif saved_model: pred = model(imn, training=False).numpy() elif tflite: - if tfl_int8: + if int8: scale, zero_point = input_details[0]['quantization'] - imn = (imn / scale + zero_point).astype(np.uint8) + imn = (imn / scale + zero_point).astype(np.uint8) # de-scale interpreter.set_tensor(input_details[0]['index'], imn) interpreter.invoke() pred = interpreter.get_tensor(output_details[0]['index']) - if tfl_int8: + if int8: scale, zero_point = output_details[0]['quantization'] - pred = (pred.astype(np.float32) - zero_point) * scale + pred = (pred.astype(np.float32) - zero_point) * scale # re-scale pred[..., 0] *= imgsz[1] # x pred[..., 1] *= imgsz[0] # y pred[..., 2] *= imgsz[1] # w @@ -268,7 +268,6 @@ def parse_opt(): parser.add_argument('--hide-labels', default=False, action='store_true', help='hide labels') parser.add_argument('--hide-conf', default=False, action='store_true', help='hide confidences') parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference') - parser.add_argument('--tfl-int8', action='store_true', help='INT8 quantized TFLite model') opt = parser.parse_args() opt.imgsz *= 2 if len(opt.imgsz) == 1 else 1 # expand return opt From 2da6444c9251f77cfd3e410369cd067245d961b5 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 25 Aug 2021 21:23:28 +0200 Subject: [PATCH 0497/1976] Fix for `python models/yolo.py --profile` (#4541) Profiling fix copies input to Detect layer to circumvent inplace changes to the feature maps. --- models/yolo.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/models/yolo.py b/models/yolo.py index dee6032d069d..8618401b3455 100644 --- a/models/yolo.py +++ b/models/yolo.py @@ -48,7 +48,6 @@ def __init__(self, nc=80, anchors=(), ch=(), inplace=True): # detection layer self.inplace = inplace # use in-place ops (e.g. slice assignment) def forward(self, x): - # x = x.copy() # for profiling z = [] # inference output for i in range(self.nl): x[i] = self.m[i](x[i]) # conv @@ -143,10 +142,11 @@ def forward_once(self, x, profile=False, visualize=False): x = y[m.f] if isinstance(m.f, int) else [x if j == -1 else y[j] for j in m.f] # from earlier layers if profile: - o = thop.profile(m, inputs=(x,), verbose=False)[0] / 1E9 * 2 if thop else 0 # FLOPs + c = isinstance(m, Detect) # copy input as inplace fix + o = thop.profile(m, inputs=(x.copy() if c else x,), verbose=False)[0] / 1E9 * 2 if thop else 0 # FLOPs t = time_sync() for _ in range(10): - _ = m(x) + m(x.copy() if c else x) dt.append((time_sync() - t) * 100) if m == self.model[0]: LOGGER.info(f"{'time (ms)':>10s} {'GFLOPs':>10s} {'params':>10s} {'module'}") From 11f85e7e71d91810460a2eee22235a2264b458eb Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 26 Aug 2021 15:51:04 +0200 Subject: [PATCH 0498/1976] Auto-fix corrupt JPEGs (#4548) * Autofix corrupt JPEGs This PR automatically re-saves corrupt JPEGs and trains with the resaved images. WARNING: this will overwrite the existing corrupt JPEGs in a dataset and replace them with correct JPEGs, though the filesize may increase and the image contents may not be exactly the same due to lossy JPEG compression schemes. Results may vary by JPEG decoder and hardware. Current behavior is to exclude corrupt JPEGs from training with a warning to the user, but many users have been complaining about large parts of their dataset being excluded from training. * Clarify re-save reason --- utils/datasets.py | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/utils/datasets.py b/utils/datasets.py index 25a2ba6f9561..eea8ad348452 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -314,7 +314,7 @@ def __init__(self, sources='streams.txt', img_size=640, stride=32, auto=True): print('') # newline # check for common shapes - s = np.stack([letterbox(x, self.img_size, stride=self.stride, auto=self.auto)[0].shape for x in self.imgs], 0) # shapes + s = np.stack([letterbox(x, self.img_size, stride=self.stride, auto=self.auto)[0].shape for x in self.imgs]) self.rect = np.unique(s, axis=0).shape[0] == 1 # rect inference if all shapes equal if not self.rect: print('WARNING: Different stream shapes detected. For optimal performance supply similarly-shaped streams.') @@ -568,7 +568,7 @@ def __getitem__(self, index): if self.augment: # Albumentations img, labels = self.albumentations(img, labels) - nl = len(labels) # update after albumentations + nl = len(labels) # update after albumentations # HSV color-space augment_hsv(img, hgain=hyp['hsv_h'], sgain=hyp['hsv_s'], vgain=hyp['hsv_v']) @@ -861,7 +861,7 @@ def autosplit(path='../datasets/coco128/images', weights=(0.9, 0.1, 0.0), annota def verify_image_label(args): # Verify one image-label pair im_file, lb_file, prefix = args - nm, nf, ne, nc = 0, 0, 0, 0 # number missing, found, empty, corrupt + nm, nf, ne, nc, msg, segments = 0, 0, 0, 0, '', [] # number (missing, found, empty, corrupt), message, segments try: # verify images im = Image.open(im_file) @@ -872,10 +872,11 @@ def verify_image_label(args): if im.format.lower() in ('jpg', 'jpeg'): with open(im_file, 'rb') as f: f.seek(-2, 2) - assert f.read() == b'\xff\xd9', 'corrupted JPEG' + if f.read() != b'\xff\xd9': # corrupt JPEG + im.save(im_file, format='JPEG', subsampling=0, quality=100) # re-save image + msg = f'{prefix}WARNING: corrupt JPEG restored and saved {im_file}' # verify labels - segments = [] # instance segments if os.path.isfile(lb_file): nf = 1 # label found with open(lb_file, 'r') as f: @@ -896,7 +897,7 @@ def verify_image_label(args): else: nm = 1 # label missing l = np.zeros((0, 5), dtype=np.float32) - return im_file, l, shape, segments, nm, nf, ne, nc, '' + return im_file, l, shape, segments, nm, nf, ne, nc, msg except Exception as e: nc = 1 msg = f'{prefix}WARNING: Ignoring corrupted image and/or label {im_file}: {e}' From e899d6e8fbfc990f60a822fdd482b350f2d162a4 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 27 Aug 2021 13:01:21 +0200 Subject: [PATCH 0499/1976] Fix for corrupt JPEGs auto-fix PR (#4560) Auto-fix corrupt JPEGs PR introduced a bug whereby the f.seek() operation read all of the bytes in the image, resulting in the PIL image having nothing to read upon the .save() operation. Fix was to re-open the image using PIL before saving. --- utils/datasets.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/datasets.py b/utils/datasets.py index eea8ad348452..852bb7c04aa8 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -873,7 +873,7 @@ def verify_image_label(args): with open(im_file, 'rb') as f: f.seek(-2, 2) if f.read() != b'\xff\xd9': # corrupt JPEG - im.save(im_file, format='JPEG', subsampling=0, quality=100) # re-save image + Image.open(im_file).save(im_file, format='JPEG', subsampling=0, quality=100) # re-save image msg = f'{prefix}WARNING: corrupt JPEG restored and saved {im_file}' # verify labels From 8ac96b797538d6a5e882e56f9a48f3d015bcf952 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 27 Aug 2021 13:23:29 +0200 Subject: [PATCH 0500/1976] Fix for AP calculation limits 0.0 - 1.0 (#4563) This PR brings alignment in AP computation practices with Detectron2 and MMDetection. Problem first noted by @yusiyoh in https://github.com/ultralytics/yolov5/issues/4546 --- utils/metrics.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/utils/metrics.py b/utils/metrics.py index ddc425910a75..44b9a3c16488 100644 --- a/utils/metrics.py +++ b/utils/metrics.py @@ -91,8 +91,8 @@ def compute_ap(recall, precision): """ # Append sentinel values to beginning and end - mrec = np.concatenate(([0.], recall, [recall[-1] + 0.01])) - mpre = np.concatenate(([1.], precision, [0.])) + mrec = np.concatenate(([0.0], recall, [1.0])) + mpre = np.concatenate(([1.0], precision, [0.0])) # Compute the precision envelope mpre = np.flip(np.maximum.accumulate(np.flip(mpre))) From 8b18b66304317276f4bfc7cc7741bd535dc5fa7a Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 27 Aug 2021 16:00:39 +0200 Subject: [PATCH 0501/1976] ONNX opset 13 (#4566) --- export.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/export.py b/export.py index 674609463a9d..5db09884bae8 100644 --- a/export.py +++ b/export.py @@ -176,7 +176,7 @@ def parse_opt(): parser.add_argument('--optimize', action='store_true', help='TorchScript: optimize for mobile') parser.add_argument('--dynamic', action='store_true', help='ONNX: dynamic axes') parser.add_argument('--simplify', action='store_true', help='ONNX: simplify model') - parser.add_argument('--opset', type=int, default=12, help='ONNX: opset version') + parser.add_argument('--opset', type=int, default=13, help='ONNX: opset version') opt = parser.parse_args() return opt From 93cc0157483bf206d23797a4326ce4e1aaab9bea Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 28 Aug 2021 19:03:52 +0200 Subject: [PATCH 0502/1976] Add EarlyStopping feature (#4576) * Add EarlyStopping feature * Add comment * Cleanup * Cleanup2 * debug * debug2 * debug3 * debug3 * debug4 * debug5 * debug6 * debug7 * debug8 * debug9 * debug10 * debug11 * debug12 * Cleanup * Add TODO for known DDP issue --- train.py | 19 ++++++++++++++++++- utils/torch_utils.py | 17 +++++++++++++++++ 2 files changed, 35 insertions(+), 1 deletion(-) diff --git a/train.py b/train.py index 275e0a4b1a8e..a6c34cbc466c 100644 --- a/train.py +++ b/train.py @@ -40,7 +40,8 @@ from utils.downloads import attempt_download from utils.loss import ComputeLoss from utils.plots import plot_labels, plot_evolve -from utils.torch_utils import ModelEMA, select_device, intersect_dicts, torch_distributed_zero_first, de_parallel +from utils.torch_utils import EarlyStopping, ModelEMA, de_parallel, intersect_dicts, select_device, \ + torch_distributed_zero_first from utils.loggers.wandb.wandb_utils import check_wandb_resume from utils.metrics import fitness from utils.loggers import Loggers @@ -255,6 +256,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary results = (0, 0, 0, 0, 0, 0, 0) # P, R, mAP@.5, mAP@.5-.95, val_loss(box, obj, cls) scheduler.last_epoch = start_epoch - 1 # do not move scaler = amp.GradScaler(enabled=cuda) + stopper = EarlyStopping(patience=opt.patience) compute_loss = ComputeLoss(model) # init loss class LOGGER.info(f'Image sizes {imgsz} train, {imgsz} val\n' f'Using {train_loader.num_workers} dataloader workers\n' @@ -389,6 +391,20 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary del ckpt callbacks.on_model_save(last, epoch, final_epoch, best_fitness, fi) + # Stop Single-GPU + if stopper(epoch=epoch, fitness=fi): + break + + # Stop DDP TODO: known issues shttps://github.com/ultralytics/yolov5/pull/4576 + # stop = stopper(epoch=epoch, fitness=fi) + # if RANK == 0: + # dist.broadcast_object_list([stop], 0) # broadcast 'stop' to all ranks + + # Stop DPP + # with torch_distributed_zero_first(RANK): + # if stop: + # break # must break all DDP ranks + # end epoch ---------------------------------------------------------------------------------------------------- # end training ----------------------------------------------------------------------------------------------------- if RANK in [-1, 0]: @@ -454,6 +470,7 @@ def parse_opt(known=False): parser.add_argument('--artifact_alias', type=str, default="latest", help='version of dataset artifact to be used') parser.add_argument('--local_rank', type=int, default=-1, help='DDP parameter, do not modify') parser.add_argument('--freeze', type=int, default=0, help='Number of layers to freeze. backbone=10, all=24') + parser.add_argument('--patience', type=int, default=30, help='EarlyStopping patience (epochs)') opt = parser.parse_known_args()[0] if known else parser.parse_args() return opt diff --git a/utils/torch_utils.py b/utils/torch_utils.py index 2eb51d80f34e..2e153921eb10 100644 --- a/utils/torch_utils.py +++ b/utils/torch_utils.py @@ -293,6 +293,23 @@ def copy_attr(a, b, include=(), exclude=()): setattr(a, k, v) +class EarlyStopping: + # YOLOv5 simple early stopper + def __init__(self, patience=30): + self.best_fitness = 0.0 # i.e. mAP + self.best_epoch = 0 + self.patience = patience # epochs to wait after fitness stops improving to stop + + def __call__(self, epoch, fitness): + if fitness >= self.best_fitness: # >= 0 to allow for early zero-fitness stage of training + self.best_epoch = epoch + self.best_fitness = fitness + stop = (epoch - self.best_epoch) >= self.patience # stop training if patience exceeded + if stop: + LOGGER.info(f'EarlyStopping patience {self.patience} exceeded, stopping training.') + return stop + + class ModelEMA: """ Model Exponential Moving Average from https://github.com/rwightman/pytorch-image-models Keep a moving average of everything in the model state_dict (parameters and buffers). From d7aa3f153d049b89267b1b594a481a1a27fe27e6 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 28 Aug 2021 19:17:21 +0200 Subject: [PATCH 0503/1976] Remove `image_weights` DDP code (#4579) * Initial commit * Update --- train.py | 20 ++++++-------------- 1 file changed, 6 insertions(+), 14 deletions(-) diff --git a/train.py b/train.py index a6c34cbc466c..aaee41a499e4 100644 --- a/train.py +++ b/train.py @@ -265,21 +265,13 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary for epoch in range(start_epoch, epochs): # epoch ------------------------------------------------------------------ model.train() - # Update image weights (optional) + # Update image weights (optional, single-GPU only) if opt.image_weights: - # Generate indices - if RANK in [-1, 0]: - cw = model.class_weights.cpu().numpy() * (1 - maps) ** 2 / nc # class weights - iw = labels_to_image_weights(dataset.labels, nc=nc, class_weights=cw) # image weights - dataset.indices = random.choices(range(dataset.n), weights=iw, k=dataset.n) # rand weighted idx - # Broadcast if DDP - if RANK != -1: - indices = (torch.tensor(dataset.indices) if RANK == 0 else torch.zeros(dataset.n)).int() - dist.broadcast(indices, 0) - if RANK != 0: - dataset.indices = indices.cpu().numpy() - - # Update mosaic border + cw = model.class_weights.cpu().numpy() * (1 - maps) ** 2 / nc # class weights + iw = labels_to_image_weights(dataset.labels, nc=nc, class_weights=cw) # image weights + dataset.indices = random.choices(range(dataset.n), weights=iw, k=dataset.n) # rand weighted idx + + # Update mosaic border (optional) # b = int(random.uniform(0.25 * imgsz, 0.75 * imgsz + gs) // gs * gs) # dataset.mosaic_border = [b - imgsz, -b] # height, width borders From bbfafeabdbf7785f8da5e4f9880df27869a71218 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 29 Aug 2021 13:49:04 +0200 Subject: [PATCH 0504/1976] Add `Profile()` profiler (#4587) * Add `Profile()` profiler * CamelCase Timeout --- utils/general.py | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/utils/general.py b/utils/general.py index 16244903575a..c74d8bb299de 100755 --- a/utils/general.py +++ b/utils/general.py @@ -39,8 +39,17 @@ os.environ['NUMEXPR_MAX_THREADS'] = str(min(os.cpu_count(), 8)) # NumExpr max threads -class timeout(contextlib.ContextDecorator): - # Usage: @timeout(seconds) decorator or 'with timeout(seconds):' context manager +class Profile(contextlib.ContextDecorator): + # Usage: @Profile() decorator or 'with Profile():' context manager + def __enter__(self): + self.start = time.time() + + def __exit__(self, type, value, traceback): + print(f'Profile results: {time.time() - self.start:.5f}s') + + +class Timeout(contextlib.ContextDecorator): + # Usage: @Timeout(seconds) decorator or 'with Timeout(seconds):' context manager def __init__(self, seconds, *, timeout_msg='', suppress_timeout_errors=True): self.seconds = int(seconds) self.timeout_message = timeout_msg From 7b35971ba5942aea0ad81d2c2663629d3e733cf8 Mon Sep 17 00:00:00 2001 From: Takumi Karasawa Date: Sun, 29 Aug 2021 22:08:27 +0900 Subject: [PATCH 0505/1976] Fix bug in `plot_one_box` when label is `None` (#4588) --- utils/plots.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/plots.py b/utils/plots.py index 76c161a13d1a..25d70dbabc75 100644 --- a/utils/plots.py +++ b/utils/plots.py @@ -72,7 +72,7 @@ def plot_one_box(box, im, color=(128, 128, 128), txt_color=(255, 255, 255), labe assert im.data.contiguous, 'Image not contiguous. Apply np.ascontiguousarray(im) to plot_on_box() input image.' lw = line_width or max(int(min(im.size) / 200), 2) # line width - if use_pil or not is_ascii(label): # use PIL + if use_pil or (label is not None and not is_ascii(label)): # use PIL im = Image.fromarray(im) draw = ImageDraw.Draw(im) draw.rectangle(box, width=lw + 1, outline=color) # plot From de44376d1b0a091a5970c52864a6555978e2ff79 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 29 Aug 2021 16:46:13 +0200 Subject: [PATCH 0506/1976] Create `Annotator()` class (#4591) * Add Annotator() class * Download Arial * 2x for loop * Cleanup * tuple 2 list * max_size=1920 * bold logging results to * tolist() * im = annotator.im * PIL save in detect.py * Smart asarray in detect.py * revert to cv2.imwrite * Cleanup * Return result asarray * Add `Profile()` profiler * CamelCase Timeout * Resize after mosaic * pillow>=8.0.0 * daemon imwrite * Add cv2 support * Remove plot_wh_methods and plot_one_box * pil=False for hubconf.py annotations * im.shape bug fix * colorstr common.py * join daemons * Update t.daemon * Removed daemon saving --- detect.py | 6 +- models/common.py | 11 ++- requirements.txt | 2 +- train.py | 2 +- utils/general.py | 5 +- utils/plots.py | 189 ++++++++++++++++++++++------------------------- 6 files changed, 106 insertions(+), 109 deletions(-) diff --git a/detect.py b/detect.py index 15ddc1ffb6a4..77502b0c5bee 100644 --- a/detect.py +++ b/detect.py @@ -23,7 +23,7 @@ from utils.datasets import LoadStreams, LoadImages from utils.general import check_img_size, check_requirements, check_imshow, colorstr, non_max_suppression, \ apply_classifier, scale_coords, xyxy2xywh, strip_optimizer, set_logging, increment_path, save_one_box -from utils.plots import colors, plot_one_box +from utils.plots import colors, Annotator from utils.torch_utils import select_device, load_classifier, time_sync @@ -181,6 +181,7 @@ def wrap_frozen_graph(gd, inputs, outputs): s += '%gx%g ' % img.shape[2:] # print string gn = torch.tensor(im0.shape)[[1, 0, 1, 0]] # normalization gain whwh imc = im0.copy() if save_crop else im0 # for save_crop + annotator = Annotator(im0, line_width=line_thickness, pil=False) if len(det): # Rescale boxes from img_size to im0 size det[:, :4] = scale_coords(img.shape[2:], det[:, :4], im0.shape).round() @@ -201,7 +202,7 @@ def wrap_frozen_graph(gd, inputs, outputs): if save_img or save_crop or view_img: # Add bbox to image c = int(cls) # integer class label = None if hide_labels else (names[c] if hide_conf else f'{names[c]} {conf:.2f}') - im0 = plot_one_box(xyxy, im0, label=label, color=colors(c, True), line_width=line_thickness) + annotator.box_label(xyxy, label, color=colors(c, True)) if save_crop: save_one_box(xyxy, imc, file=save_dir / 'crops' / names[c] / f'{p.stem}.jpg', BGR=True) @@ -209,6 +210,7 @@ def wrap_frozen_graph(gd, inputs, outputs): print(f'{s}Done. ({t2 - t1:.3f}s)') # Stream results + im0 = annotator.result() if view_img: cv2.imshow(str(p), im0) cv2.waitKey(1) # 1 millisecond diff --git a/models/common.py b/models/common.py index e1f5aea3abed..0c60b39a483d 100644 --- a/models/common.py +++ b/models/common.py @@ -18,8 +18,9 @@ from torch.cuda import amp from utils.datasets import exif_transpose, letterbox -from utils.general import non_max_suppression, make_divisible, scale_coords, increment_path, xyxy2xywh, save_one_box -from utils.plots import colors, plot_one_box +from utils.general import colorstr, non_max_suppression, make_divisible, scale_coords, increment_path, xyxy2xywh, \ + save_one_box +from utils.plots import colors, Annotator from utils.torch_utils import time_sync LOGGER = logging.getLogger(__name__) @@ -370,12 +371,14 @@ def display(self, pprint=False, show=False, save=False, crop=False, render=False n = (pred[:, -1] == c).sum() # detections per class str += f"{n} {self.names[int(c)]}{'s' * (n > 1)}, " # add to string if show or save or render or crop: + annotator = Annotator(im, pil=False) for *box, conf, cls in reversed(pred): # xyxy, confidence, class label = f'{self.names[int(cls)]} {conf:.2f}' if crop: save_one_box(box, im, file=save_dir / 'crops' / self.names[int(cls)] / self.files[i]) else: # all others - im = plot_one_box(box, im, label=label, color=colors(cls)) + annotator.box_label(box, label, color=colors(cls)) + im = annotator.im else: str += '(no detections)' @@ -388,7 +391,7 @@ def display(self, pprint=False, show=False, save=False, crop=False, render=False f = self.files[i] im.save(save_dir / f) # save if i == self.n - 1: - LOGGER.info(f"Saved {self.n} image{'s' * (self.n > 1)} to '{save_dir}'") + LOGGER.info(f"Saved {self.n} image{'s' * (self.n > 1)} to {colorstr('bold', save_dir)}") if render: self.imgs[i] = np.asarray(im) diff --git a/requirements.txt b/requirements.txt index f6361d591f1b..2ad65ba53e29 100755 --- a/requirements.txt +++ b/requirements.txt @@ -4,7 +4,7 @@ matplotlib>=3.2.2 numpy>=1.18.5 opencv-python>=4.1.2 -Pillow +Pillow>=8.0.0 PyYAML>=5.3.1 scipy>=1.4.1 torch>=1.7.0 diff --git a/train.py b/train.py index aaee41a499e4..2fe38ef043d0 100644 --- a/train.py +++ b/train.py @@ -260,7 +260,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary compute_loss = ComputeLoss(model) # init loss class LOGGER.info(f'Image sizes {imgsz} train, {imgsz} val\n' f'Using {train_loader.num_workers} dataloader workers\n' - f'Logging results to {save_dir}\n' + f"Logging results to {colorstr('bold', save_dir)}\n" f'Starting training for {epochs} epochs...') for epoch in range(start_epoch, epochs): # epoch ------------------------------------------------------------------ model.train() diff --git a/utils/general.py b/utils/general.py index c74d8bb299de..fe9a8ac537fb 100755 --- a/utils/general.py +++ b/utils/general.py @@ -122,9 +122,10 @@ def is_pip(): return 'site-packages' in Path(__file__).absolute().parts -def is_ascii(str=''): +def is_ascii(s=''): # Is string composed of all ASCII (no UTF) characters? - return len(str.encode().decode('ascii', 'ignore')) == len(str) + s = str(s) # convert to str() in case of None, etc. + return len(s.encode().decode('ascii', 'ignore')) == len(s) def emojis(str=''): diff --git a/utils/plots.py b/utils/plots.py index 25d70dbabc75..696d32345dd5 100644 --- a/utils/plots.py +++ b/utils/plots.py @@ -67,51 +67,59 @@ def butter_lowpass(cutoff, fs, order): return filtfilt(b, a, data) # forward-backward filter -def plot_one_box(box, im, color=(128, 128, 128), txt_color=(255, 255, 255), label=None, line_width=3, use_pil=False): - # Plots one xyxy box on image im with label - assert im.data.contiguous, 'Image not contiguous. Apply np.ascontiguousarray(im) to plot_on_box() input image.' - lw = line_width or max(int(min(im.size) / 200), 2) # line width - - if use_pil or (label is not None and not is_ascii(label)): # use PIL - im = Image.fromarray(im) - draw = ImageDraw.Draw(im) - draw.rectangle(box, width=lw + 1, outline=color) # plot - if label: - font = ImageFont.truetype("Arial.ttf", size=max(round(max(im.size) / 40), 12)) - txt_width, txt_height = font.getsize(label) - draw.rectangle([box[0], box[1] - txt_height + 4, box[0] + txt_width, box[1]], fill=color) - draw.text((box[0], box[1] - txt_height + 1), label, fill=txt_color, font=font) - return np.asarray(im) - else: # use OpenCV - c1, c2 = (int(box[0]), int(box[1])), (int(box[2]), int(box[3])) - cv2.rectangle(im, c1, c2, color, thickness=lw, lineType=cv2.LINE_AA) - if label: - tf = max(lw - 1, 1) # font thickness - txt_width, txt_height = cv2.getTextSize(label, 0, fontScale=lw / 3, thickness=tf)[0] - c2 = c1[0] + txt_width, c1[1] - txt_height - 3 - cv2.rectangle(im, c1, c2, color, -1, cv2.LINE_AA) # filled - cv2.putText(im, label, (c1[0], c1[1] - 2), 0, lw / 3, txt_color, thickness=tf, lineType=cv2.LINE_AA) - return im - - -def plot_wh_methods(): # from utils.plots import *; plot_wh_methods() - # Compares the two methods for width-height anchor multiplication - # https://github.com/ultralytics/yolov3/issues/168 - x = np.arange(-4.0, 4.0, .1) - ya = np.exp(x) - yb = torch.sigmoid(torch.from_numpy(x)).numpy() * 2 - - fig = plt.figure(figsize=(6, 3), tight_layout=True) - plt.plot(x, ya, '.-', label='YOLOv3') - plt.plot(x, yb ** 2, '.-', label='YOLOv5 ^2') - plt.plot(x, yb ** 1.6, '.-', label='YOLOv5 ^1.6') - plt.xlim(left=-4, right=4) - plt.ylim(bottom=0, top=6) - plt.xlabel('input') - plt.ylabel('output') - plt.grid() - plt.legend() - fig.savefig('comparison.png', dpi=200) +class Annotator: + # YOLOv5 PIL Annotator class + def __init__(self, im, line_width=None, font_size=None, font='Arial.ttf', pil=True): + assert im.data.contiguous, 'Image not contiguous. Apply np.ascontiguousarray(im) to plot_on_box() input image.' + self.pil = pil + if self.pil: # use PIL + self.im = im if isinstance(im, Image.Image) else Image.fromarray(im) + self.draw = ImageDraw.Draw(self.im) + s = sum(self.im.size) / 2 # mean shape + f = font_size or max(round(s * 0.035), 12) + try: + self.font = ImageFont.truetype(font, size=f) + except: # download TTF + url = "https://github.com/ultralytics/yolov5/releases/download/v1.0/" + font + torch.hub.download_url_to_file(url, font) + self.font = ImageFont.truetype(font, size=f) + self.fh = self.font.getsize('a')[1] - 3 # font height + else: # use cv2 + self.im = im + s = sum(im.shape) / 2 # mean shape + self.lw = line_width or max(round(s * 0.003), 2) # line width + + def box_label(self, box, label='', color=(128, 128, 128), txt_color=(255, 255, 255)): + # Add one xyxy box to image with label + if self.pil or not is_ascii(label): + self.draw.rectangle(box, width=self.lw, outline=color) # box + if label: + w = self.font.getsize(label)[0] # text width + self.draw.rectangle([box[0], box[1] - self.fh, box[0] + w + 1, box[1] + 1], fill=color) + self.draw.text((box[0], box[1]), label, fill=txt_color, font=self.font, anchor='ls') + else: # cv2 + c1, c2 = (int(box[0]), int(box[1])), (int(box[2]), int(box[3])) + cv2.rectangle(self.im, c1, c2, color, thickness=self.lw, lineType=cv2.LINE_AA) + if label: + tf = max(self.lw - 1, 1) # font thickness + w, h = cv2.getTextSize(label, 0, fontScale=self.lw / 3, thickness=tf)[0] + c2 = c1[0] + w, c1[1] - h - 3 + cv2.rectangle(self.im, c1, c2, color, -1, cv2.LINE_AA) # filled + cv2.putText(self.im, label, (c1[0], c1[1] - 2), 0, self.lw / 3, txt_color, thickness=tf, + lineType=cv2.LINE_AA) + + def rectangle(self, xy, fill=None, outline=None, width=1): + # Add rectangle to image (PIL-only) + self.draw.rectangle(xy, fill, outline, width) + + def text(self, xy, text, txt_color=(255, 255, 255)): + # Add text to image (PIL-only) + w, h = self.font.getsize(text) # text width, height + self.draw.text((xy[0], xy[1] - h + 1), text, fill=txt_color, font=self.font) + + def result(self): + # Return annotated image as array + return np.asarray(self.im) def output_to_target(output): @@ -123,82 +131,65 @@ def output_to_target(output): return np.array(targets) -def plot_images(images, targets, paths=None, fname='images.jpg', names=None, max_size=640, max_subplots=16): +def plot_images(images, targets, paths=None, fname='images.jpg', names=None, max_size=1920, max_subplots=16): # Plot image grid with labels - if isinstance(images, torch.Tensor): images = images.cpu().float().numpy() if isinstance(targets, torch.Tensor): targets = targets.cpu().numpy() - - # un-normalise if np.max(images[0]) <= 1: - images *= 255 - - tl = 3 # line thickness - tf = max(tl - 1, 1) # font thickness + images *= 255.0 # de-normalise (optional) bs, _, h, w = images.shape # batch size, _, height, width bs = min(bs, max_subplots) # limit plot images ns = np.ceil(bs ** 0.5) # number of subplots (square) - # Check if we should resize - scale_factor = max_size / max(h, w) - if scale_factor < 1: - h = math.ceil(scale_factor * h) - w = math.ceil(scale_factor * w) - + # Build Image mosaic = np.full((int(ns * h), int(ns * w), 3), 255, dtype=np.uint8) # init - for i, img in enumerate(images): + for i, im in enumerate(images): if i == max_subplots: # if last batch has fewer images than we expect break - - block_x = int(w * (i // ns)) - block_y = int(h * (i % ns)) - - img = img.transpose(1, 2, 0) - if scale_factor < 1: - img = cv2.resize(img, (w, h)) - - mosaic[block_y:block_y + h, block_x:block_x + w, :] = img + x, y = int(w * (i // ns)), int(h * (i % ns)) # block origin + im = im.transpose(1, 2, 0) + mosaic[y:y + h, x:x + w, :] = im + + # Resize (optional) + scale = max_size / ns / max(h, w) + if scale < 1: + h = math.ceil(scale * h) + w = math.ceil(scale * w) + mosaic = cv2.resize(mosaic, tuple(int(x * ns) for x in (w, h))) + + # Annotate + fs = int(h * ns * 0.02) # font size + annotator = Annotator(mosaic, line_width=round(fs / 10), font_size=fs) + for i in range(i + 1): + x, y = int(w * (i // ns)), int(h * (i % ns)) # block origin + annotator.rectangle([x, y, x + w, y + h], None, (255, 255, 255), width=2) # borders + if paths: + annotator.text((x + 5, y + 5 + h), text=Path(paths[i]).name[:40], txt_color=(220, 220, 220)) # filenames if len(targets) > 0: - image_targets = targets[targets[:, 0] == i] - boxes = xywh2xyxy(image_targets[:, 2:6]).T - classes = image_targets[:, 1].astype('int') - labels = image_targets.shape[1] == 6 # labels if no conf column - conf = None if labels else image_targets[:, 6] # check for confidence presence (label vs pred) + ti = targets[targets[:, 0] == i] # image targets + boxes = xywh2xyxy(ti[:, 2:6]).T + classes = ti[:, 1].astype('int') + labels = ti.shape[1] == 6 # labels if no conf column + conf = None if labels else ti[:, 6] # check for confidence presence (label vs pred) if boxes.shape[1]: if boxes.max() <= 1.01: # if normalized with tolerance 0.01 boxes[[0, 2]] *= w # scale to pixels boxes[[1, 3]] *= h - elif scale_factor < 1: # absolute coords need scale if image scales - boxes *= scale_factor - boxes[[0, 2]] += block_x - boxes[[1, 3]] += block_y - for j, box in enumerate(boxes.T): - cls = int(classes[j]) + elif scale < 1: # absolute coords need scale if image scales + boxes *= scale + boxes[[0, 2]] += x + boxes[[1, 3]] += y + for j, box in enumerate(boxes.T.tolist()): + cls = classes[j] color = colors(cls) cls = names[cls] if names else cls if labels or conf[j] > 0.25: # 0.25 conf thresh - label = '%s' % cls if labels else '%s %.1f' % (cls, conf[j]) - mosaic = plot_one_box(box, mosaic, label=label, color=color, line_width=tl) - - # Draw image filename labels - if paths: - label = Path(paths[i]).name[:40] # trim to 40 char - t_size = cv2.getTextSize(label, 0, fontScale=tl / 3, thickness=tf)[0] - cv2.putText(mosaic, label, (block_x + 5, block_y + t_size[1] + 5), 0, tl / 3, [220, 220, 220], thickness=tf, - lineType=cv2.LINE_AA) - - # Image border - cv2.rectangle(mosaic, (block_x, block_y), (block_x + w, block_y + h), (255, 255, 255), thickness=3) - - if fname: - r = min(1280. / max(h, w) / ns, 1.0) # ratio to limit image size - mosaic = cv2.resize(mosaic, (int(ns * w * r), int(ns * h * r)), interpolation=cv2.INTER_AREA) - # cv2.imwrite(fname, cv2.cvtColor(mosaic, cv2.COLOR_BGR2RGB)) # cv2 save - Image.fromarray(mosaic).save(fname) # PIL save - return mosaic + label = f'{cls}' if labels else f'{cls} {conf[j]:.1f}' + annotator.box_label(box, label, color=color) + annotator.im.save(fname) # save def plot_lr_scheduler(optimizer, scheduler, epochs=300, save_dir=''): From e5e5ebc7999e26ec0d5f96bb6d12de25e412d98e Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 29 Aug 2021 17:15:18 +0200 Subject: [PATCH 0507/1976] Auto-UTF handling (#4594) --- detect.py | 7 ++++--- models/common.py | 9 +++++---- utils/general.py | 2 +- 3 files changed, 10 insertions(+), 8 deletions(-) diff --git a/detect.py b/detect.py index 77502b0c5bee..0b1d93897d4c 100644 --- a/detect.py +++ b/detect.py @@ -21,9 +21,9 @@ from models.experimental import attempt_load from utils.datasets import LoadStreams, LoadImages -from utils.general import check_img_size, check_requirements, check_imshow, colorstr, non_max_suppression, \ +from utils.general import check_img_size, check_requirements, check_imshow, colorstr, is_ascii, non_max_suppression, \ apply_classifier, scale_coords, xyxy2xywh, strip_optimizer, set_logging, increment_path, save_one_box -from utils.plots import colors, Annotator +from utils.plots import Annotator, colors from utils.torch_utils import select_device, load_classifier, time_sync @@ -105,6 +105,7 @@ def wrap_frozen_graph(gd, inputs, outputs): output_details = interpreter.get_output_details() # outputs int8 = input_details[0]['dtype'] == np.uint8 # is TFLite quantized uint8 model imgsz = check_img_size(imgsz, s=stride) # check image size + ascii = is_ascii(names) # names are ascii (use PIL for UTF-8) # Dataloader if webcam: @@ -181,7 +182,7 @@ def wrap_frozen_graph(gd, inputs, outputs): s += '%gx%g ' % img.shape[2:] # print string gn = torch.tensor(im0.shape)[[1, 0, 1, 0]] # normalization gain whwh imc = im0.copy() if save_crop else im0 # for save_crop - annotator = Annotator(im0, line_width=line_thickness, pil=False) + annotator = Annotator(im0, line_width=line_thickness, pil=not ascii) if len(det): # Rescale boxes from img_size to im0 size det[:, :4] = scale_coords(img.shape[2:], det[:, :4], im0.shape).round() diff --git a/models/common.py b/models/common.py index 0c60b39a483d..90bfef5124b3 100644 --- a/models/common.py +++ b/models/common.py @@ -18,9 +18,9 @@ from torch.cuda import amp from utils.datasets import exif_transpose, letterbox -from utils.general import colorstr, non_max_suppression, make_divisible, scale_coords, increment_path, xyxy2xywh, \ - save_one_box -from utils.plots import colors, Annotator +from utils.general import colorstr, increment_path, is_ascii, make_divisible, non_max_suppression, save_one_box, \ + scale_coords, xyxy2xywh +from utils.plots import Annotator, colors from utils.torch_utils import time_sync LOGGER = logging.getLogger(__name__) @@ -354,6 +354,7 @@ def __init__(self, imgs, pred, files, times=None, names=None, shape=None): self.imgs = imgs # list of images as numpy arrays self.pred = pred # list of tensors pred[0] = (xyxy, conf, cls) self.names = names # class names + self.ascii = is_ascii(names) # names are ascii (use PIL for UTF-8) self.files = files # image filenames self.xyxy = pred # xyxy pixels self.xywh = [xyxy2xywh(x) for x in pred] # xywh pixels @@ -371,7 +372,7 @@ def display(self, pprint=False, show=False, save=False, crop=False, render=False n = (pred[:, -1] == c).sum() # detections per class str += f"{n} {self.names[int(c)]}{'s' * (n > 1)}, " # add to string if show or save or render or crop: - annotator = Annotator(im, pil=False) + annotator = Annotator(im, pil=not self.ascii) for *box, conf, cls in reversed(pred): # xyxy, confidence, class label = f'{self.names[int(cls)]} {conf:.2f}' if crop: diff --git a/utils/general.py b/utils/general.py index fe9a8ac537fb..ba1e4f58cd86 100755 --- a/utils/general.py +++ b/utils/general.py @@ -124,7 +124,7 @@ def is_pip(): def is_ascii(s=''): # Is string composed of all ASCII (no UTF) characters? - s = str(s) # convert to str() in case of None, etc. + s = str(s) # convert list, tuple, None, etc. to str return len(s.encode().decode('ascii', 'ignore')) == len(s) From dbbc6b5c48c2f2ff75501e5fec7fd78bcbb632fb Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 29 Aug 2021 17:44:51 +0200 Subject: [PATCH 0508/1976] Re-order `plots.py` to class-first (#4595) --- utils/plots.py | 50 ++++++++++++++++++++++++++------------------------ 1 file changed, 26 insertions(+), 24 deletions(-) diff --git a/utils/plots.py b/utils/plots.py index 696d32345dd5..99c8cc2f7044 100644 --- a/utils/plots.py +++ b/utils/plots.py @@ -45,30 +45,8 @@ def hex2rgb(h): # rgb order (PIL) colors = Colors() # create instance for 'from utils.plots import colors' -def hist2d(x, y, n=100): - # 2d histogram used in labels.png and evolve.png - xedges, yedges = np.linspace(x.min(), x.max(), n), np.linspace(y.min(), y.max(), n) - hist, xedges, yedges = np.histogram2d(x, y, (xedges, yedges)) - xidx = np.clip(np.digitize(x, xedges) - 1, 0, hist.shape[0] - 1) - yidx = np.clip(np.digitize(y, yedges) - 1, 0, hist.shape[1] - 1) - return np.log(hist[xidx, yidx]) - - -def butter_lowpass_filtfilt(data, cutoff=1500, fs=50000, order=5): - from scipy.signal import butter, filtfilt - - # https://stackoverflow.com/questions/28536191/how-to-filter-smooth-with-scipy-numpy - def butter_lowpass(cutoff, fs, order): - nyq = 0.5 * fs - normal_cutoff = cutoff / nyq - return butter(order, normal_cutoff, btype='low', analog=False) - - b, a = butter_lowpass(cutoff, fs, order=order) - return filtfilt(b, a, data) # forward-backward filter - - class Annotator: - # YOLOv5 PIL Annotator class + # YOLOv5 Annotator for train/val mosaics and jpgs and detect/hub inference annotations def __init__(self, im, line_width=None, font_size=None, font='Arial.ttf', pil=True): assert im.data.contiguous, 'Image not contiguous. Apply np.ascontiguousarray(im) to plot_on_box() input image.' self.pil = pil @@ -79,9 +57,11 @@ def __init__(self, im, line_width=None, font_size=None, font='Arial.ttf', pil=Tr f = font_size or max(round(s * 0.035), 12) try: self.font = ImageFont.truetype(font, size=f) - except: # download TTF + except Exception as e: # download TTF if missing + print(f'WARNING: Annotator font {font} not found: {e}') url = "https://github.com/ultralytics/yolov5/releases/download/v1.0/" + font torch.hub.download_url_to_file(url, font) + print(f'Annotator font successfully downloaded from {url} to {font}') self.font = ImageFont.truetype(font, size=f) self.fh = self.font.getsize('a')[1] - 3 # font height else: # use cv2 @@ -122,6 +102,28 @@ def result(self): return np.asarray(self.im) +def hist2d(x, y, n=100): + # 2d histogram used in labels.png and evolve.png + xedges, yedges = np.linspace(x.min(), x.max(), n), np.linspace(y.min(), y.max(), n) + hist, xedges, yedges = np.histogram2d(x, y, (xedges, yedges)) + xidx = np.clip(np.digitize(x, xedges) - 1, 0, hist.shape[0] - 1) + yidx = np.clip(np.digitize(y, yedges) - 1, 0, hist.shape[1] - 1) + return np.log(hist[xidx, yidx]) + + +def butter_lowpass_filtfilt(data, cutoff=1500, fs=50000, order=5): + from scipy.signal import butter, filtfilt + + # https://stackoverflow.com/questions/28536191/how-to-filter-smooth-with-scipy-numpy + def butter_lowpass(cutoff, fs, order): + nyq = 0.5 * fs + normal_cutoff = cutoff / nyq + return butter(order, normal_cutoff, btype='low', analog=False) + + b, a = butter_lowpass(cutoff, fs, order=order) + return filtfilt(b, a, data) # forward-backward filter + + def output_to_target(output): # Convert model output to target format [batch_id, class_id, x, y, w, h, conf] targets = [] From bfad3644555c2e8dd82d194ca11842e5d7723798 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 29 Aug 2021 17:56:35 +0200 Subject: [PATCH 0509/1976] Created using Colaboratory --- tutorial.ipynb | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/tutorial.ipynb b/tutorial.ipynb index ba6d19113a93..d3388670f56c 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -415,7 +415,7 @@ "clear_output()\n", "print(f\"Setup complete. Using torch {torch.__version__} ({torch.cuda.get_device_properties(0).name if torch.cuda.is_available() else 'CPU'})\")" ], - "execution_count": 1, + "execution_count": null, "outputs": [ { "output_type": "stream", @@ -461,7 +461,7 @@ "!python detect.py --weights yolov5s.pt --img 640 --conf 0.25 --source data/images/\n", "#Image(filename='runs/detect/exp/zidane.jpg', width=600)" ], - "execution_count": 4, + "execution_count": null, "outputs": [ { "output_type": "stream", @@ -538,7 +538,7 @@ "torch.hub.download_url_to_file('https://github.com/ultralytics/yolov5/releases/download/v1.0/coco2017val.zip', 'tmp.zip')\n", "!unzip -q tmp.zip -d ../datasets && rm tmp.zip" ], - "execution_count": 5, + "execution_count": null, "outputs": [ { "output_type": "display_data", @@ -571,7 +571,7 @@ "# Run YOLOv5x on COCO val2017\n", "!python val.py --weights yolov5x.pt --data coco.yaml --img 640 --iou 0.65 --half" ], - "execution_count": 6, + "execution_count": null, "outputs": [ { "output_type": "stream", @@ -734,7 +734,7 @@ "# Train YOLOv5s on COCO128 for 3 epochs\n", "!python train.py --img 640 --batch 16 --epochs 3 --data coco128.yaml --weights yolov5s.pt --cache" ], - "execution_count": 8, + "execution_count": null, "outputs": [ { "output_type": "stream", @@ -853,13 +853,13 @@ "\n", "All results are logged by default to `runs/train`, with a new experiment directory created for each new training as `runs/train/exp2`, `runs/train/exp3`, etc. View train and val jpgs to see mosaics, labels, predictions and augmentation effects. Note an Ultralytics **Mosaic Dataloader** is used for training (shown below), which combines 4 images into 1 mosaic during training.\n", "\n", - "> \n", + "> \n", "`train_batch0.jpg` shows train batch 0 mosaics and labels\n", "\n", - "> \n", + "> \n", "`test_batch0_labels.jpg` shows val batch 0 labels\n", "\n", - "> \n", + "> \n", "`test_batch0_pred.jpg` shows val batch 0 _predictions_\n", "\n", "Training results are automatically logged to [Tensorboard](https://www.tensorflow.org/tensorboard) and [CSV](https://github.com/ultralytics/yolov5/pull/4148) as `results.csv`, which is plotted as `results.png` (below) after training completes. You can also plot any `results.csv` file manually:\n", From b894e69dfc341fcbfe4a307a15d6af90d90367df Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 29 Aug 2021 18:05:49 +0200 Subject: [PATCH 0510/1976] Update mosaic plots font size (#4596) --- utils/plots.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/plots.py b/utils/plots.py index 99c8cc2f7044..ddfdb42201ee 100644 --- a/utils/plots.py +++ b/utils/plots.py @@ -162,7 +162,7 @@ def plot_images(images, targets, paths=None, fname='images.jpg', names=None, max mosaic = cv2.resize(mosaic, tuple(int(x * ns) for x in (w, h))) # Annotate - fs = int(h * ns * 0.02) # font size + fs = int((h + w) * ns * 0.01) # font size annotator = Annotator(mosaic, line_width=round(fs / 10), font_size=fs) for i in range(i + 1): x, y = int(w * (i // ns)), int(h * (i % ns)) # block origin From dc13820c9d561bf112e773795cd75d7c40dbbff7 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 30 Aug 2021 13:01:41 +0200 Subject: [PATCH 0511/1976] TensorBoard `on_train_end()` speed improvements (#4605) --- utils/loggers/__init__.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/utils/loggers/__init__.py b/utils/loggers/__init__.py index 3d67e9307b4c..16d0348d86f3 100644 --- a/utils/loggers/__init__.py +++ b/utils/loggers/__init__.py @@ -131,10 +131,9 @@ def on_train_end(self, last, best, plots, epoch): files = [(self.save_dir / f) for f in files if (self.save_dir / f).exists()] # filter if self.tb: - from PIL import Image - import numpy as np + import cv2 for f in files: - self.tb.add_image(f.stem, np.asarray(Image.open(f)), epoch, dataformats='HWC') + self.tb.add_image(f.stem, cv2.imread(str(f))[..., ::-1], epoch, dataformats='HWC') if self.wandb: self.wandb.log({"Results": [wandb.Image(str(f), caption=f.name) for f in files]}) From bb4da083d1b2d19fbe482ed91064498aa8f942e6 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 30 Aug 2021 14:33:53 +0200 Subject: [PATCH 0512/1976] Created using Colaboratory --- tutorial.ipynb | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/tutorial.ipynb b/tutorial.ipynb index d3388670f56c..38e8fd4389ea 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -457,9 +457,8 @@ "outputId": "8b728908-81ab-4861-edb0-4d0c46c439fb" }, "source": [ - "%rm -rf runs\n", "!python detect.py --weights yolov5s.pt --img 640 --conf 0.25 --source data/images/\n", - "#Image(filename='runs/detect/exp/zidane.jpg', width=600)" + "Image(filename='runs/detect/exp/zidane.jpg', width=600)" ], "execution_count": null, "outputs": [ From e7d1842a983b0cd98ea22f2f2d5a2b362bd7ebfd Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 30 Aug 2021 14:58:22 +0200 Subject: [PATCH 0513/1976] Auto-download Arial.ttf on init (#4606) * Auto-download Arial.ttf on init * Fix ROOT --- utils/__init__.py | 16 ++++++++++++++++ utils/plots.py | 9 ++++----- 2 files changed, 20 insertions(+), 5 deletions(-) diff --git a/utils/__init__.py b/utils/__init__.py index e69de29bb2d1..649b288b3588 100644 --- a/utils/__init__.py +++ b/utils/__init__.py @@ -0,0 +1,16 @@ +from pathlib import Path + +import torch +from PIL import ImageFont + +FILE = Path(__file__).absolute() +ROOT = FILE.parents[1] # yolov5/ dir + +# Check YOLOv5 Annotator font +font = 'Arial.ttf' +try: + ImageFont.truetype(font) +except Exception as e: # download if missing + url = "https://ultralytics.com/assets/" + font + print(f'Downloading {url} to {ROOT / font}...') + torch.hub.download_url_to_file(url, str(ROOT / font)) diff --git a/utils/plots.py b/utils/plots.py index ddfdb42201ee..eae295e09bed 100644 --- a/utils/plots.py +++ b/utils/plots.py @@ -48,7 +48,7 @@ def hex2rgb(h): # rgb order (PIL) class Annotator: # YOLOv5 Annotator for train/val mosaics and jpgs and detect/hub inference annotations def __init__(self, im, line_width=None, font_size=None, font='Arial.ttf', pil=True): - assert im.data.contiguous, 'Image not contiguous. Apply np.ascontiguousarray(im) to plot_on_box() input image.' + assert im.data.contiguous, 'Image not contiguous. Apply np.ascontiguousarray(im) to Annotator() input images.' self.pil = pil if self.pil: # use PIL self.im = im if isinstance(im, Image.Image) else Image.fromarray(im) @@ -57,11 +57,10 @@ def __init__(self, im, line_width=None, font_size=None, font='Arial.ttf', pil=Tr f = font_size or max(round(s * 0.035), 12) try: self.font = ImageFont.truetype(font, size=f) - except Exception as e: # download TTF if missing - print(f'WARNING: Annotator font {font} not found: {e}') - url = "https://github.com/ultralytics/yolov5/releases/download/v1.0/" + font + except Exception as e: # download if missing + url = "https://ultralytics.com/assets/" + font + print(f'Downloading {url} to {font}...') torch.hub.download_url_to_file(url, font) - print(f'Annotator font successfully downloaded from {url} to {font}') self.font = ImageFont.truetype(font, size=f) self.fh = self.font.getsize('a')[1] - 3 # font height else: # use cv2 From 35fe03146187cd8b1c09dd3e72ae678bb9ec5b86 Mon Sep 17 00:00:00 2001 From: Yukun Xia Date: Mon, 30 Aug 2021 09:46:33 -0400 Subject: [PATCH 0514/1976] Fix: add P2 layer 21 to yolov5-p2.yaml `Detect()` inputs (#4608) Layer 21 includes the information of xsmall objects --- models/hub/yolov5-p2.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/models/hub/yolov5-p2.yaml b/models/hub/yolov5-p2.yaml index 44d8da55dafb..759e9f92fb29 100644 --- a/models/hub/yolov5-p2.yaml +++ b/models/hub/yolov5-p2.yaml @@ -50,5 +50,5 @@ head: [[-1, 10], 1, Concat, [1]], # cat head P5 [-1, 3, C3, [1024, False]], # 30 (P5/32-large) - [[24, 27, 30], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) + [[21, 24, 27, 30], 1, Detect, [nc, anchors]], # Detect(P2, P3, P4, P5) ] From 3a72d4a7e33e55a3a505832eb44fd7f7b630fffc Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 30 Aug 2021 17:05:45 +0200 Subject: [PATCH 0515/1976] Update `check_git_status()` warning (#4610) --- utils/general.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/utils/general.py b/utils/general.py index ba1e4f58cd86..cc316cd103aa 100755 --- a/utils/general.py +++ b/utils/general.py @@ -162,8 +162,7 @@ def check_git_status(): branch = check_output('git rev-parse --abbrev-ref HEAD', shell=True).decode().strip() # checked out n = int(check_output(f'git rev-list {branch}..origin/master --count', shell=True)) # commits behind if n > 0: - s = f"⚠️ WARNING: code is out of date by {n} commit{'s' * (n > 1)}. " \ - f"Use 'git pull' to update or 'git clone {url}' to download latest." + s = f"⚠️ YOLOv5 is out of date by {n} commit{'s' * (n > 1)}. Use `git pull` or `git clone {url}` to update." else: s = f'up to date with {url} ✅' print(emojis(s)) # emoji-safe From 11e7c7b48d7e94a45c0bf46d35efa7da1581f8e9 Mon Sep 17 00:00:00 2001 From: Ayush Chaurasia Date: Mon, 30 Aug 2021 20:37:20 +0530 Subject: [PATCH 0516/1976] W&B: Don't log models in evolve operation (#4611) --- utils/loggers/__init__.py | 12 ++++++++---- utils/loggers/wandb/wandb_utils.py | 2 +- 2 files changed, 9 insertions(+), 5 deletions(-) diff --git a/utils/loggers/__init__.py b/utils/loggers/__init__.py index 16d0348d86f3..775803abf068 100644 --- a/utils/loggers/__init__.py +++ b/utils/loggers/__init__.py @@ -138,7 +138,11 @@ def on_train_end(self, last, best, plots, epoch): if self.wandb: self.wandb.log({"Results": [wandb.Image(str(f), caption=f.name) for f in files]}) # Calling wandb.log. TODO: Refactor this into WandbLogger.log_model - wandb.log_artifact(str(best if best.exists() else last), type='model', - name='run_' + self.wandb.wandb_run.id + '_model', - aliases=['latest', 'best', 'stripped']) - self.wandb.finish_run() + if not self.opt.evolve: + wandb.log_artifact(str(best if best.exists() else last), type='model', + name='run_' + self.wandb.wandb_run.id + '_model', + aliases=['latest', 'best', 'stripped']) + self.wandb.finish_run() + else: + self.wandb.finish_run() + self.wandb = WandbLogger(self.opt) diff --git a/utils/loggers/wandb/wandb_utils.py b/utils/loggers/wandb/wandb_utils.py index 8b2095afcb8b..5d495c70517b 100644 --- a/utils/loggers/wandb/wandb_utils.py +++ b/utils/loggers/wandb/wandb_utils.py @@ -112,7 +112,7 @@ class WandbLogger(): https://docs.wandb.com/guides/integrations/yolov5 """ - def __init__(self, opt, run_id, job_type='Training'): + def __init__(self, opt, run_id=None, job_type='Training'): """ - Initialize WandbLogger instance - Upload dataset if opt.upload_dataset is True From bb5ebc290e5d630a081d7cbc5a9725ed8cea0a24 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 30 Aug 2021 17:22:21 +0200 Subject: [PATCH 0517/1976] Close `matplotlib` plots after opening (#4612) * Close plots * Replace fig.close() for plt.close() --- utils/metrics.py | 3 +++ utils/plots.py | 4 +++- 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/utils/metrics.py b/utils/metrics.py index 44b9a3c16488..4f1b5e2d2c2d 100644 --- a/utils/metrics.py +++ b/utils/metrics.py @@ -178,6 +178,7 @@ def plot(self, normalize=True, save_dir='', names=()): fig.axes[0].set_xlabel('True') fig.axes[0].set_ylabel('Predicted') fig.savefig(Path(save_dir) / 'confusion_matrix.png', dpi=250) + plt.close() except Exception as e: print(f'WARNING: ConfusionMatrix plot failure: {e}') @@ -308,6 +309,7 @@ def plot_pr_curve(px, py, ap, save_dir='pr_curve.png', names=()): ax.set_ylim(0, 1) plt.legend(bbox_to_anchor=(1.04, 1), loc="upper left") fig.savefig(Path(save_dir), dpi=250) + plt.close() def plot_mc_curve(px, py, save_dir='mc_curve.png', names=(), xlabel='Confidence', ylabel='Metric'): @@ -328,3 +330,4 @@ def plot_mc_curve(px, py, save_dir='mc_curve.png', names=(), xlabel='Confidence' ax.set_ylim(0, 1) plt.legend(bbox_to_anchor=(1.04, 1), loc="upper left") fig.savefig(Path(save_dir), dpi=250) + plt.close() diff --git a/utils/plots.py b/utils/plots.py index eae295e09bed..e470329f5473 100644 --- a/utils/plots.py +++ b/utils/plots.py @@ -345,7 +345,6 @@ def profile_idetection(start=0, stop=0, labels=(), save_dir=''): a.remove() except Exception as e: print('Warning: Plotting error for %s; %s' % (f, e)) - ax[1].legend() plt.savefig(Path(save_dir) / 'idetection_profile.png', dpi=200) @@ -371,6 +370,7 @@ def plot_evolve(evolve_csv=Path('path/to/evolve.csv')): # from utils.plots impo print('%15s: %.3g' % (k, mu)) f = evolve_csv.with_suffix('.png') # filename plt.savefig(f, dpi=200) + plt.close() print(f'Saved {f}') @@ -397,6 +397,7 @@ def plot_results(file='path/to/results.csv', dir=''): print(f'Warning: Plotting error for {f}: {e}') ax[1].legend() fig.savefig(save_dir / 'results.png', dpi=200) + plt.close() def feature_visualization(x, module_type, stage, n=32, save_dir=Path('runs/detect/exp')): @@ -423,3 +424,4 @@ def feature_visualization(x, module_type, stage, n=32, save_dir=Path('runs/detec print(f'Saving {save_dir / f}... ({n}/{channels})') plt.savefig(save_dir / f, dpi=300, bbox_inches='tight') + plt.close() From 50a9828679d075772a0875a5b2488fb9febb1082 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 30 Aug 2021 18:35:07 +0200 Subject: [PATCH 0518/1976] DDP `torch.jit.trace()` `--sync-bn` fix (#4615) * Remove assert * debug0 * trace=not opt.sync * sync to sync_bn fix * Cleanup --- train.py | 3 +-- utils/loggers/__init__.py | 9 +++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/train.py b/train.py index 2fe38ef043d0..36492edb8f0b 100644 --- a/train.py +++ b/train.py @@ -333,7 +333,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary mem = f'{torch.cuda.memory_reserved() / 1E9 if torch.cuda.is_available() else 0:.3g}G' # (GB) pbar.set_description(('%10s' * 2 + '%10.4g' * 5) % ( f'{epoch}/{epochs - 1}', mem, *mloss, targets.shape[0], imgs.shape[-1])) - callbacks.on_train_batch_end(ni, model, imgs, targets, paths, plots) + callbacks.on_train_batch_end(ni, model, imgs, targets, paths, plots, opt.sync_bn) # end batch ------------------------------------------------------------------------------------------------ # Scheduler @@ -499,7 +499,6 @@ def main(opt): assert opt.batch_size % WORLD_SIZE == 0, '--batch-size must be multiple of CUDA device count' assert not opt.image_weights, '--image-weights argument is not compatible with DDP training' assert not opt.evolve, '--evolve argument is not compatible with DDP training' - assert not opt.sync_bn, '--sync-bn known training issue, see https://github.com/ultralytics/yolov5/issues/3998' torch.cuda.set_device(LOCAL_RANK) device = torch.device('cuda', LOCAL_RANK) dist.init_process_group(backend="nccl" if dist.is_nccl_available() else "gloo") diff --git a/utils/loggers/__init__.py b/utils/loggers/__init__.py index 775803abf068..0750be6c8828 100644 --- a/utils/loggers/__init__.py +++ b/utils/loggers/__init__.py @@ -69,13 +69,14 @@ def on_pretrain_routine_end(self): if self.wandb: self.wandb.log({"Labels": [wandb.Image(str(x), caption=x.name) for x in paths]}) - def on_train_batch_end(self, ni, model, imgs, targets, paths, plots): + def on_train_batch_end(self, ni, model, imgs, targets, paths, plots, sync_bn): # Callback runs on train batch end if plots: if ni == 0: - with warnings.catch_warnings(): - warnings.simplefilter('ignore') # suppress jit trace warning - self.tb.add_graph(torch.jit.trace(de_parallel(model), imgs[0:1], strict=False), []) + if not sync_bn: # tb.add_graph() --sync known issue https://github.com/ultralytics/yolov5/issues/3754 + with warnings.catch_warnings(): + warnings.simplefilter('ignore') # suppress jit trace warning + self.tb.add_graph(torch.jit.trace(de_parallel(model), imgs[0:1], strict=False), []) if ni < 3: f = self.save_dir / f'train_batch{ni}.jpg' # filename Thread(target=plot_images, args=(imgs, targets, paths, f), daemon=True).start() From ba0f80874fc5c515fa31a3b0d384a65dd2efdce0 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 31 Aug 2021 15:01:41 +0200 Subject: [PATCH 0519/1976] Fix for Arial.ttf redownloads with hub inference (#4627) --- utils/__init__.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/utils/__init__.py b/utils/__init__.py index 649b288b3588..2af1466f1f1d 100644 --- a/utils/__init__.py +++ b/utils/__init__.py @@ -1,3 +1,4 @@ +import sys from pathlib import Path import torch @@ -5,6 +6,8 @@ FILE = Path(__file__).absolute() ROOT = FILE.parents[1] # yolov5/ dir +if str(ROOT) not in sys.path: + sys.path.append(str(ROOT)) # add ROOT to PATH # Check YOLOv5 Annotator font font = 'Arial.ttf' From a4e8f78c5eba7500ba36f70c805ce76de5b4b0a9 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 31 Aug 2021 20:05:17 +0200 Subject: [PATCH 0520/1976] Fix 2 for Arial.ttf redownloads with hub inference (#4628) --- utils/__init__.py | 38 +++++++++++++++++++------------------- utils/plots.py | 11 ++++++++--- 2 files changed, 27 insertions(+), 22 deletions(-) diff --git a/utils/__init__.py b/utils/__init__.py index 2af1466f1f1d..4a61057e8083 100644 --- a/utils/__init__.py +++ b/utils/__init__.py @@ -1,19 +1,19 @@ -import sys -from pathlib import Path - -import torch -from PIL import ImageFont - -FILE = Path(__file__).absolute() -ROOT = FILE.parents[1] # yolov5/ dir -if str(ROOT) not in sys.path: - sys.path.append(str(ROOT)) # add ROOT to PATH - -# Check YOLOv5 Annotator font -font = 'Arial.ttf' -try: - ImageFont.truetype(font) -except Exception as e: # download if missing - url = "https://ultralytics.com/assets/" + font - print(f'Downloading {url} to {ROOT / font}...') - torch.hub.download_url_to_file(url, str(ROOT / font)) +# import sys +# from pathlib import Path +# +# import torch +# from PIL import ImageFont +# +# FILE = Path(__file__).absolute() +# ROOT = FILE.parents[1] # yolov5/ dir +# if str(ROOT) not in sys.path: +# sys.path.append(str(ROOT)) # add ROOT to PATH +# +# # Check YOLOv5 Annotator font +# font = 'Arial.ttf' +# try: +# ImageFont.truetype(font) +# except Exception as e: # download if missing +# url = "https://ultralytics.com/assets/" + font +# print(f'Downloading {url} to {ROOT / font}...') +# torch.hub.download_url_to_file(url, str(ROOT / font)) diff --git a/utils/plots.py b/utils/plots.py index e470329f5473..9e14e765a647 100644 --- a/utils/plots.py +++ b/utils/plots.py @@ -23,6 +23,9 @@ matplotlib.rc('font', **{'size': 11}) matplotlib.use('Agg') # for writing to files only +FILE = Path(__file__).absolute() +ROOT = FILE.parents[1] # yolov5/ dir + class Colors: # Ultralytics color palette https://ultralytics.com/ @@ -55,12 +58,14 @@ def __init__(self, im, line_width=None, font_size=None, font='Arial.ttf', pil=Tr self.draw = ImageDraw.Draw(self.im) s = sum(self.im.size) / 2 # mean shape f = font_size or max(round(s * 0.035), 12) + font = Path(font) # font handling + font = font if font.exists() else (ROOT / font.name) try: - self.font = ImageFont.truetype(font, size=f) + self.font = ImageFont.truetype(str(font) if font.exists() else font.name, size=f) except Exception as e: # download if missing - url = "https://ultralytics.com/assets/" + font + url = "https://ultralytics.com/assets/" + font.name print(f'Downloading {url} to {font}...') - torch.hub.download_url_to_file(url, font) + torch.hub.download_url_to_file(url, str(font)) self.font = ImageFont.truetype(font, size=f) self.fh = self.font.getsize('a')[1] - 3 # font height else: # use cv2 From de534e922120b2da876e8214b976af1f82019e28 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 31 Aug 2021 20:54:10 +0200 Subject: [PATCH 0521/1976] Fix 3 for Arial.ttf redownloads with hub inference (#4629) Fix 3 for Arial.ttf redownloads with hub inference, follow-on to #4628. --- utils/plots.py | 30 +++++++++++++++++------------- 1 file changed, 17 insertions(+), 13 deletions(-) diff --git a/utils/plots.py b/utils/plots.py index 9e14e765a647..fd120b1d427f 100644 --- a/utils/plots.py +++ b/utils/plots.py @@ -48,7 +48,22 @@ def hex2rgb(h): # rgb order (PIL) colors = Colors() # create instance for 'from utils.plots import colors' +def check_font(font='Arial.ttf', size=10): + # Return a PIL TrueType Font, downloading to ROOT dir if necessary + font = Path(font) + font = font if font.exists() else (ROOT / font.name) + try: + return ImageFont.truetype(str(font) if font.exists() else font.name, size) + except Exception as e: # download if missing + url = "https://ultralytics.com/assets/" + font.name + print(f'Downloading {url} to {font}...') + torch.hub.download_url_to_file(url, str(font)) + return ImageFont.truetype(str(font), size) + + class Annotator: + check_font() # download TTF if necessary + # YOLOv5 Annotator for train/val mosaics and jpgs and detect/hub inference annotations def __init__(self, im, line_width=None, font_size=None, font='Arial.ttf', pil=True): assert im.data.contiguous, 'Image not contiguous. Apply np.ascontiguousarray(im) to Annotator() input images.' @@ -56,22 +71,11 @@ def __init__(self, im, line_width=None, font_size=None, font='Arial.ttf', pil=Tr if self.pil: # use PIL self.im = im if isinstance(im, Image.Image) else Image.fromarray(im) self.draw = ImageDraw.Draw(self.im) - s = sum(self.im.size) / 2 # mean shape - f = font_size or max(round(s * 0.035), 12) - font = Path(font) # font handling - font = font if font.exists() else (ROOT / font.name) - try: - self.font = ImageFont.truetype(str(font) if font.exists() else font.name, size=f) - except Exception as e: # download if missing - url = "https://ultralytics.com/assets/" + font.name - print(f'Downloading {url} to {font}...') - torch.hub.download_url_to_file(url, str(font)) - self.font = ImageFont.truetype(font, size=f) + self.font = check_font(font, size=font_size or max(round(sum(self.im.size) / 2 * 0.035), 12)) self.fh = self.font.getsize('a')[1] - 3 # font height else: # use cv2 self.im = im - s = sum(im.shape) / 2 # mean shape - self.lw = line_width or max(round(s * 0.003), 2) # line width + self.lw = line_width or max(round(sum(im.shape) / 2 * 0.003), 2) # line width def box_label(self, box, label='', color=(128, 128, 128), txt_color=(255, 255, 255)): # Add one xyxy box to image with label From 234e8ae6fabc827ef41aec42d05aceedaf228ebc Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 1 Sep 2021 15:00:13 +0200 Subject: [PATCH 0522/1976] Fix for `plot_evolve()` string argument (#4639) --- utils/plots.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/utils/plots.py b/utils/plots.py index fd120b1d427f..d8a561a71dcf 100644 --- a/utils/plots.py +++ b/utils/plots.py @@ -358,8 +358,9 @@ def profile_idetection(start=0, stop=0, labels=(), save_dir=''): plt.savefig(Path(save_dir) / 'idetection_profile.png', dpi=200) -def plot_evolve(evolve_csv=Path('path/to/evolve.csv')): # from utils.plots import *; plot_evolve() +def plot_evolve(evolve_csv='path/to/evolve.csv'): # from utils.plots import *; plot_evolve() # Plot evolve.csv hyp evolution results + evolve_csv = Path(evolve_csv) data = pd.read_csv(evolve_csv) keys = [x.strip() for x in data.columns] x = data.values From fad57c29cd27c0fcbc0038b7b7312b9b6ef922a8 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 1 Sep 2021 16:30:14 +0200 Subject: [PATCH 0523/1976] Fix `is_coco` on missing `data['val']` key (#4642) --- val.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/val.py b/val.py index cbee8cf1c026..1aa37d12dfac 100644 --- a/val.py +++ b/val.py @@ -134,7 +134,7 @@ def run(data, # Configure model.eval() - is_coco = type(data['val']) is str and data['val'].endswith('coco/val2017.txt') # COCO dataset + is_coco = isinstance(data.get('val'), str) and data['val'].endswith('coco/val2017.txt') # COCO dataset nc = 1 if single_cls else int(data['nc']) # number of classes iouv = torch.linspace(0.5, 0.95, 10).to(device) # iou vector for mAP@0.5:0.95 niou = iouv.numel() From f64fab58251c58d8aac2772dbc005d569bf72edc Mon Sep 17 00:00:00 2001 From: imyhxy Date: Sun, 5 Sep 2021 23:43:09 +0800 Subject: [PATCH 0524/1976] Fixed 'meta' and 'hyp' may out of order when using evolve (#4657) * Fixed 'meta' and 'hyp' may out of order when using evolve * Update gitignore --- .gitignore | 2 +- train.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.gitignore b/.gitignore index e5d02af960af..9c270c7dabe7 100755 --- a/.gitignore +++ b/.gitignore @@ -76,7 +76,7 @@ sdist/ var/ wheels/ *.egg-info/ -wandb/ +/wandb/ .installed.cfg *.egg diff --git a/train.py b/train.py index 36492edb8f0b..89f86401c187 100644 --- a/train.py +++ b/train.py @@ -570,7 +570,7 @@ def main(opt): mp, s = 0.8, 0.2 # mutation probability, sigma npr = np.random npr.seed(int(time.time())) - g = np.array([x[0] for x in meta.values()]) # gains 0-1 + g = np.array([meta[k][0] for k in hyp.keys()]) # gains 0-1 ng = len(meta) v = np.ones(ng) while all(v == 1): # mutate until a change occurs (prevent duplicates) From 548745181a4d30db3d4fe81a952ca6dbb54c7578 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 5 Sep 2021 19:09:53 +0200 Subject: [PATCH 0525/1976] EarlyStopper updates (#4679) --- train.py | 6 +++--- utils/torch_utils.py | 7 +++++-- 2 files changed, 8 insertions(+), 5 deletions(-) diff --git a/train.py b/train.py index 89f86401c187..72aee2cb8883 100644 --- a/train.py +++ b/train.py @@ -344,7 +344,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary # mAP callbacks.on_train_epoch_end(epoch=epoch) ema.update_attr(model, include=['yaml', 'nc', 'hyp', 'names', 'stride', 'class_weights']) - final_epoch = epoch + 1 == epochs + final_epoch = (epoch + 1 == epochs) or stopper.possible_stop if not noval or final_epoch: # Calculate mAP results, maps, _ = val.run(data_dict, batch_size=batch_size // WORLD_SIZE * 2, @@ -384,7 +384,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary callbacks.on_model_save(last, epoch, final_epoch, best_fitness, fi) # Stop Single-GPU - if stopper(epoch=epoch, fitness=fi): + if RANK == -1 and stopper(epoch=epoch, fitness=fi): break # Stop DDP TODO: known issues shttps://github.com/ultralytics/yolov5/pull/4576 @@ -462,7 +462,7 @@ def parse_opt(known=False): parser.add_argument('--artifact_alias', type=str, default="latest", help='version of dataset artifact to be used') parser.add_argument('--local_rank', type=int, default=-1, help='DDP parameter, do not modify') parser.add_argument('--freeze', type=int, default=0, help='Number of layers to freeze. backbone=10, all=24') - parser.add_argument('--patience', type=int, default=30, help='EarlyStopping patience (epochs)') + parser.add_argument('--patience', type=int, default=100, help='EarlyStopping patience (epochs without improvement)') opt = parser.parse_known_args()[0] if known else parser.parse_args() return opt diff --git a/utils/torch_utils.py b/utils/torch_utils.py index 2e153921eb10..04e1446bb908 100644 --- a/utils/torch_utils.py +++ b/utils/torch_utils.py @@ -298,13 +298,16 @@ class EarlyStopping: def __init__(self, patience=30): self.best_fitness = 0.0 # i.e. mAP self.best_epoch = 0 - self.patience = patience # epochs to wait after fitness stops improving to stop + self.patience = patience or float('inf') # epochs to wait after fitness stops improving to stop + self.possible_stop = False # possible stop may occur next epoch def __call__(self, epoch, fitness): if fitness >= self.best_fitness: # >= 0 to allow for early zero-fitness stage of training self.best_epoch = epoch self.best_fitness = fitness - stop = (epoch - self.best_epoch) >= self.patience # stop training if patience exceeded + delta = epoch - self.best_epoch # epochs without improvement + self.possible_stop = delta >= (self.patience - 1) # possible stop may occur next epoch + stop = delta >= self.patience # stop training if patience exceeded if stop: LOGGER.info(f'EarlyStopping patience {self.patience} exceeded, stopping training.') return stop From 2317f86ca4ee140ed6a50fc0cc9857383f755ecd Mon Sep 17 00:00:00 2001 From: Kalen Michael Date: Tue, 7 Sep 2021 18:32:15 +0200 Subject: [PATCH 0526/1976] Optimised Callback Class to Reduce Code and Fix Errors (#4688) * added callbacks * added back callback to main * added save_dir to callback output * reduced code count * updated callbacks * added default callback class to main, added missing parameters to on_model_save * Glenn updates Co-authored-by: Glenn Jocher --- train.py | 20 ++++---- utils/callbacks.py | 123 ++++----------------------------------------- val.py | 4 +- 3 files changed, 22 insertions(+), 125 deletions(-) diff --git a/train.py b/train.py index 72aee2cb8883..f9aa3d4b5f69 100644 --- a/train.py +++ b/train.py @@ -56,7 +56,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary opt, device, - callbacks=Callbacks() + callbacks ): save_dir, epochs, batch_size, weights, single_cls, evolve, data, cfg, resume, noval, nosave, workers, freeze, = \ Path(opt.save_dir), opt.epochs, opt.batch_size, opt.weights, opt.single_cls, opt.evolve, opt.data, opt.cfg, \ @@ -231,7 +231,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary check_anchors(dataset, model=model, thr=hyp['anchor_t'], imgsz=imgsz) model.half().float() # pre-reduce anchor precision - callbacks.on_pretrain_routine_end() + callbacks.run('on_pretrain_routine_end') # DDP mode if cuda and RANK != -1: @@ -333,7 +333,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary mem = f'{torch.cuda.memory_reserved() / 1E9 if torch.cuda.is_available() else 0:.3g}G' # (GB) pbar.set_description(('%10s' * 2 + '%10.4g' * 5) % ( f'{epoch}/{epochs - 1}', mem, *mloss, targets.shape[0], imgs.shape[-1])) - callbacks.on_train_batch_end(ni, model, imgs, targets, paths, plots, opt.sync_bn) + callbacks.run('on_train_batch_end', ni, model, imgs, targets, paths, plots, opt.sync_bn) # end batch ------------------------------------------------------------------------------------------------ # Scheduler @@ -342,7 +342,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary if RANK in [-1, 0]: # mAP - callbacks.on_train_epoch_end(epoch=epoch) + callbacks.run('on_train_epoch_end', epoch=epoch) ema.update_attr(model, include=['yaml', 'nc', 'hyp', 'names', 'stride', 'class_weights']) final_epoch = (epoch + 1 == epochs) or stopper.possible_stop if not noval or final_epoch: # Calculate mAP @@ -364,7 +364,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary if fi > best_fitness: best_fitness = fi log_vals = list(mloss) + list(results) + lr - callbacks.on_fit_epoch_end(log_vals, epoch, best_fitness, fi) + callbacks.run('on_fit_epoch_end', log_vals, epoch, best_fitness, fi) # Save model if (not nosave) or (final_epoch and not evolve): # if save @@ -381,7 +381,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary if best_fitness == fi: torch.save(ckpt, best) del ckpt - callbacks.on_model_save(last, epoch, final_epoch, best_fitness, fi) + callbacks.run('on_model_save', last, epoch, final_epoch, best_fitness, fi) # Stop Single-GPU if RANK == -1 and stopper(epoch=epoch, fitness=fi): @@ -418,7 +418,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary for f in last, best: if f.exists(): strip_optimizer(f) # strip optimizers - callbacks.on_train_end(last, best, plots, epoch) + callbacks.run('on_train_end', last, best, plots, epoch) LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}") torch.cuda.empty_cache() @@ -467,7 +467,7 @@ def parse_opt(known=False): return opt -def main(opt): +def main(opt, callbacks=Callbacks()): # Checks set_logging(RANK) if RANK in [-1, 0]: @@ -505,7 +505,7 @@ def main(opt): # Train if not opt.evolve: - train(opt.hyp, opt, device) + train(opt.hyp, opt, device, callbacks) if WORLD_SIZE > 1 and RANK == 0: _ = [print('Destroying process group... ', end=''), dist.destroy_process_group(), print('Done.')] @@ -585,7 +585,7 @@ def main(opt): hyp[k] = round(hyp[k], 5) # significant digits # Train mutation - results = train(hyp.copy(), opt, device) + results = train(hyp.copy(), opt, device, callbacks) # Write mutation results print_mutation(results, hyp.copy(), save_dir, opt.bucket) diff --git a/utils/callbacks.py b/utils/callbacks.py index 19c334430b5d..327b8639b60c 100644 --- a/utils/callbacks.py +++ b/utils/callbacks.py @@ -9,6 +9,7 @@ class Callbacks: Handles all registered callbacks for YOLOv5 Hooks """ + # Define the available callbacks _callbacks = { 'on_pretrain_routine_start': [], 'on_pretrain_routine_end': [], @@ -34,16 +35,13 @@ class Callbacks: 'teardown': [], } - def __init__(self): - return - def register_action(self, hook, name='', callback=None): """ Register a new action to a callback hook Args: hook The callback hook name to register the action to - name The name of the action + name The name of the action for later reference callback The callback to fire """ assert hook in self._callbacks, f"hook '{hook}' not found in callbacks {self._callbacks}" @@ -62,118 +60,17 @@ def get_registered_actions(self, hook=None): else: return self._callbacks - def run_callbacks(self, hook, *args, **kwargs): + def run(self, hook, *args, **kwargs): """ Loop through the registered actions and fire all callbacks - """ - for logger in self._callbacks[hook]: - # print(f"Running callbacks.{logger['callback'].__name__}()") - logger['callback'](*args, **kwargs) - - def on_pretrain_routine_start(self, *args, **kwargs): - """ - Fires all registered callbacks at the start of each pretraining routine - """ - self.run_callbacks('on_pretrain_routine_start', *args, **kwargs) - - def on_pretrain_routine_end(self, *args, **kwargs): - """ - Fires all registered callbacks at the end of each pretraining routine - """ - self.run_callbacks('on_pretrain_routine_end', *args, **kwargs) - - def on_train_start(self, *args, **kwargs): - """ - Fires all registered callbacks at the start of each training - """ - self.run_callbacks('on_train_start', *args, **kwargs) - - def on_train_epoch_start(self, *args, **kwargs): - """ - Fires all registered callbacks at the start of each training epoch - """ - self.run_callbacks('on_train_epoch_start', *args, **kwargs) - - def on_train_batch_start(self, *args, **kwargs): - """ - Fires all registered callbacks at the start of each training batch - """ - self.run_callbacks('on_train_batch_start', *args, **kwargs) - def optimizer_step(self, *args, **kwargs): - """ - Fires all registered callbacks on each optimizer step - """ - self.run_callbacks('optimizer_step', *args, **kwargs) - - def on_before_zero_grad(self, *args, **kwargs): - """ - Fires all registered callbacks before zero grad - """ - self.run_callbacks('on_before_zero_grad', *args, **kwargs) - - def on_train_batch_end(self, *args, **kwargs): - """ - Fires all registered callbacks at the end of each training batch - """ - self.run_callbacks('on_train_batch_end', *args, **kwargs) - - def on_train_epoch_end(self, *args, **kwargs): - """ - Fires all registered callbacks at the end of each training epoch - """ - self.run_callbacks('on_train_epoch_end', *args, **kwargs) - - def on_val_start(self, *args, **kwargs): - """ - Fires all registered callbacks at the start of the validation - """ - self.run_callbacks('on_val_start', *args, **kwargs) - - def on_val_batch_start(self, *args, **kwargs): - """ - Fires all registered callbacks at the start of each validation batch - """ - self.run_callbacks('on_val_batch_start', *args, **kwargs) - - def on_val_image_end(self, *args, **kwargs): - """ - Fires all registered callbacks at the end of each val image - """ - self.run_callbacks('on_val_image_end', *args, **kwargs) - - def on_val_batch_end(self, *args, **kwargs): - """ - Fires all registered callbacks at the end of each validation batch - """ - self.run_callbacks('on_val_batch_end', *args, **kwargs) - - def on_val_end(self, *args, **kwargs): - """ - Fires all registered callbacks at the end of the validation - """ - self.run_callbacks('on_val_end', *args, **kwargs) - - def on_fit_epoch_end(self, *args, **kwargs): - """ - Fires all registered callbacks at the end of each fit (train+val) epoch - """ - self.run_callbacks('on_fit_epoch_end', *args, **kwargs) - - def on_model_save(self, *args, **kwargs): - """ - Fires all registered callbacks after each model save + Args: + hook The name of the hook to check, defaults to all + args Arguments to receive from YOLOv5 + kwargs Keyword Arguments to receive from YOLOv5 """ - self.run_callbacks('on_model_save', *args, **kwargs) - def on_train_end(self, *args, **kwargs): - """ - Fires all registered callbacks at the end of training - """ - self.run_callbacks('on_train_end', *args, **kwargs) + assert hook in self._callbacks, f"hook '{hook}' not found in callbacks {self._callbacks}" - def teardown(self, *args, **kwargs): - """ - Fires all registered callbacks before teardown - """ - self.run_callbacks('teardown', *args, **kwargs) + for logger in self._callbacks[hook]: + logger['callback'](*args, **kwargs) diff --git a/val.py b/val.py index 1aa37d12dfac..947cd78f7e1f 100644 --- a/val.py +++ b/val.py @@ -216,7 +216,7 @@ def run(data, save_one_txt(predn, save_conf, shape, file=save_dir / 'labels' / (path.stem + '.txt')) if save_json: save_one_json(predn, jdict, path, class_map) # append to COCO-JSON dictionary - callbacks.on_val_image_end(pred, predn, path, names, img[si]) + callbacks.run('on_val_image_end', pred, predn, path, names, img[si]) # Plot images if plots and batch_i < 3: @@ -253,7 +253,7 @@ def run(data, # Plots if plots: confusion_matrix.plot(save_dir=save_dir, names=list(names.values())) - callbacks.on_val_end() + callbacks.run('on_val_end') # Save JSON if save_json and len(jdict): From 8e5f9ddbdb3375d00483db1616ce891886072055 Mon Sep 17 00:00:00 2001 From: Zhiqiang Wang Date: Wed, 8 Sep 2021 18:48:33 +0800 Subject: [PATCH 0527/1976] Remove redundant `ComputeLoss` code (#4701) --- utils/loss.py | 1 - 1 file changed, 1 deletion(-) diff --git a/utils/loss.py b/utils/loss.py index 29aac3191c10..fac432d0edc3 100644 --- a/utils/loss.py +++ b/utils/loss.py @@ -91,7 +91,6 @@ def forward(self, pred, true): class ComputeLoss: # Compute losses def __init__(self, model, autobalance=False): - super(ComputeLoss, self).__init__() self.sort_obj_iou = False device = next(model.parameters()).device # get model device h = model.hyp # hyperparameters From a2b3c71636c41141c244ec43f70adbd7387b15d6 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 8 Sep 2021 14:36:12 +0200 Subject: [PATCH 0528/1976] Add suffix checks (#4711) * Add suffix checks * Cleanup * Cleanup2 * Cleanup3 --- detect.py | 10 ++++++---- models/tf.py | 6 +++--- models/yolo.py | 8 ++++---- train.py | 7 ++++--- utils/datasets.py | 6 +++--- utils/general.py | 17 ++++++++++++++++- val.py | 8 +++++--- 7 files changed, 41 insertions(+), 21 deletions(-) diff --git a/detect.py b/detect.py index 0b1d93897d4c..8acd5bf71593 100644 --- a/detect.py +++ b/detect.py @@ -21,8 +21,9 @@ from models.experimental import attempt_load from utils.datasets import LoadStreams, LoadImages -from utils.general import check_img_size, check_requirements, check_imshow, colorstr, is_ascii, non_max_suppression, \ - apply_classifier, scale_coords, xyxy2xywh, strip_optimizer, set_logging, increment_path, save_one_box +from utils.general import check_img_size, check_imshow, check_requirements, check_suffix, colorstr, is_ascii, \ + non_max_suppression, apply_classifier, scale_coords, xyxy2xywh, strip_optimizer, set_logging, increment_path, \ + save_one_box from utils.plots import Annotator, colors from utils.torch_utils import select_device, load_classifier, time_sync @@ -68,8 +69,9 @@ def run(weights='yolov5s.pt', # model.pt path(s) # Load model w = weights[0] if isinstance(weights, list) else weights - classify, suffix = False, Path(w).suffix.lower() - pt, onnx, tflite, pb, saved_model = (suffix == x for x in ['.pt', '.onnx', '.tflite', '.pb', '']) # backend + classify, suffix, suffixes = False, Path(w).suffix.lower(), ['.pt', '.onnx', '.tflite', '.pb', ''] + check_suffix(w, suffixes) # check weights have acceptable suffix + pt, onnx, tflite, pb, saved_model = (suffix == x for x in suffixes) # backend booleans stride, names = 64, [f'class{i}' for i in range(1000)] # assign defaults if pt: model = attempt_load(weights, map_location=device) # load FP32 model diff --git a/models/tf.py b/models/tf.py index 40e7d20a9d84..d6d0f26210b2 100644 --- a/models/tf.py +++ b/models/tf.py @@ -53,7 +53,7 @@ from models.experimental import MixConv2d, CrossConv, attempt_load from models.yolo import Detect from utils.datasets import LoadImages -from utils.general import make_divisible, check_file, check_dataset +from utils.general import check_dataset, check_yaml, make_divisible logger = logging.getLogger(__name__) @@ -447,7 +447,7 @@ def representative_dataset_gen(): parser.add_argument('--iou-thres', type=float, default=0.5, help='IOU threshold for NMS') parser.add_argument('--score-thres', type=float, default=0.4, help='score threshold for NMS') opt = parser.parse_args() - opt.cfg = check_file(opt.cfg) # check file + opt.cfg = check_yaml(opt.cfg) # check YAML opt.img_size *= 2 if len(opt.img_size) == 1 else 1 # expand print(opt) @@ -534,7 +534,7 @@ def representative_dataset_gen(): if opt.tfl_int8: # Representative Dataset if opt.source.endswith('.yaml'): - with open(check_file(opt.source)) as f: + with open(check_yaml(opt.source)) as f: data = yaml.load(f, Loader=yaml.FullLoader) # data dict check_dataset(data) # check opt.source = data['train'] diff --git a/models/yolo.py b/models/yolo.py index 8618401b3455..25118a92bb2d 100644 --- a/models/yolo.py +++ b/models/yolo.py @@ -17,10 +17,10 @@ from models.common import * from models.experimental import * from utils.autoanchor import check_anchor_order -from utils.general import make_divisible, check_file, set_logging +from utils.general import check_yaml, make_divisible, set_logging from utils.plots import feature_visualization -from utils.torch_utils import time_sync, fuse_conv_and_bn, model_info, scale_img, initialize_weights, \ - select_device, copy_attr +from utils.torch_utils import copy_attr, fuse_conv_and_bn, initialize_weights, model_info, scale_img, \ + select_device, time_sync try: import thop # for FLOPs computation @@ -281,7 +281,7 @@ def parse_model(d, ch): # model_dict, input_channels(3) parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') parser.add_argument('--profile', action='store_true', help='profile model speed') opt = parser.parse_args() - opt.cfg = check_file(opt.cfg) # check file + opt.cfg = check_yaml(opt.cfg) # check YAML set_logging() device = select_device(opt.device) diff --git a/train.py b/train.py index f9aa3d4b5f69..c32664832d8b 100644 --- a/train.py +++ b/train.py @@ -35,8 +35,8 @@ from utils.autoanchor import check_anchors from utils.datasets import create_dataloader from utils.general import labels_to_class_weights, increment_path, labels_to_image_weights, init_seeds, \ - strip_optimizer, get_latest_run, check_dataset, check_file, check_git_status, check_img_size, \ - check_requirements, print_mutation, set_logging, one_cycle, colorstr, methods + strip_optimizer, get_latest_run, check_dataset, check_git_status, check_img_size, check_requirements, \ + check_yaml, check_suffix, print_mutation, set_logging, one_cycle, colorstr, methods from utils.downloads import attempt_download from utils.loss import ComputeLoss from utils.plots import plot_labels, plot_evolve @@ -484,7 +484,8 @@ def main(opt, callbacks=Callbacks()): opt.cfg, opt.weights, opt.resume = '', ckpt, True # reinstate LOGGER.info(f'Resuming training from {ckpt}') else: - opt.data, opt.cfg, opt.hyp = check_file(opt.data), check_file(opt.cfg), check_file(opt.hyp) # check files + check_suffix(opt.weights, '.pt') # check weights + opt.data, opt.cfg, opt.hyp = check_yaml(opt.data), check_yaml(opt.cfg), check_yaml(opt.hyp) # check YAMLs assert len(opt.cfg) or len(opt.weights), 'either --cfg or --weights must be specified' if opt.evolve: opt.project = 'runs/evolve' diff --git a/utils/datasets.py b/utils/datasets.py index 852bb7c04aa8..0c6b9b5e2893 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -26,8 +26,8 @@ from tqdm import tqdm from utils.augmentations import Albumentations, augment_hsv, copy_paste, letterbox, mixup, random_perspective -from utils.general import check_requirements, check_file, check_dataset, xywh2xyxy, xywhn2xyxy, xyxy2xywhn, \ - xyn2xy, segments2boxes, clean_str +from utils.general import check_dataset, check_requirements, check_yaml, clean_str, segments2boxes, \ + xywh2xyxy, xywhn2xyxy, xyxy2xywhn, xyn2xy from utils.torch_utils import torch_distributed_zero_first # Parameters @@ -938,7 +938,7 @@ def hub_ops(f, max_dim=1920): im.save(im_dir / Path(f).name, quality=75) # save zipped, data_dir, yaml_path = unzip(Path(path)) - with open(check_file(yaml_path), errors='ignore') as f: + with open(check_yaml(yaml_path), errors='ignore') as f: data = yaml.safe_load(f) # data dict if zipped: data['path'] = data_dir # TODO: should this be dir.resolve()? diff --git a/utils/general.py b/utils/general.py index cc316cd103aa..06c62daa32f1 100755 --- a/utils/general.py +++ b/utils/general.py @@ -242,8 +242,23 @@ def check_imshow(): return False -def check_file(file): +def check_suffix(file='yolov5s.pt', suffix=('.pt',), msg=''): + # Check file(s) for acceptable suffixes + if any(suffix): + if isinstance(suffix, str): + suffix = [suffix] + for f in file if isinstance(file, (list, tuple)) else [file]: + assert Path(f).suffix.lower() in suffix, f"{msg}{f} acceptable suffix is {suffix}" + + +def check_yaml(file, suffix=('.yaml', '.yml')): + # Check YAML file(s) for acceptable suffixes + return check_file(file, suffix) + + +def check_file(file, suffix=''): # Search/download file (if necessary) and return path + check_suffix(file, suffix) file = str(file) # convert to str() if Path(file).is_file() or file == '': # exists return file diff --git a/val.py b/val.py index 947cd78f7e1f..b7068e041e57 100644 --- a/val.py +++ b/val.py @@ -22,8 +22,9 @@ from models.experimental import attempt_load from utils.datasets import create_dataloader -from utils.general import coco80_to_coco91_class, check_dataset, check_file, check_img_size, check_requirements, \ - box_iou, non_max_suppression, scale_coords, xyxy2xywh, xywh2xyxy, set_logging, increment_path, colorstr +from utils.general import coco80_to_coco91_class, check_dataset, check_img_size, check_requirements, \ + check_suffix, check_yaml, box_iou, non_max_suppression, scale_coords, xyxy2xywh, xywh2xyxy, set_logging, \ + increment_path, colorstr from utils.metrics import ap_per_class, ConfusionMatrix from utils.plots import plot_images, output_to_target, plot_study_txt from utils.torch_utils import select_device, time_sync @@ -116,6 +117,7 @@ def run(data, (save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir # Load model + check_suffix(weights, '.pt') model = attempt_load(weights, map_location=device) # load FP32 model gs = max(int(model.stride.max()), 32) # grid size (max stride) imgsz = check_img_size(imgsz, s=gs) # check image size @@ -316,7 +318,7 @@ def parse_opt(): opt = parser.parse_args() opt.save_json |= opt.data.endswith('coco.yaml') opt.save_txt |= opt.save_hybrid - opt.data = check_file(opt.data) # check file + opt.data = check_yaml(opt.data) # check YAML return opt From f984cce52a465f7377f2d9188e728496a83821af Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 8 Sep 2021 15:06:31 +0200 Subject: [PATCH 0529/1976] Fix `check_suffix()` (#4712) Fix a bug when `file=''` --- utils/general.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/utils/general.py b/utils/general.py index 06c62daa32f1..2033f76126f8 100755 --- a/utils/general.py +++ b/utils/general.py @@ -244,7 +244,7 @@ def check_imshow(): def check_suffix(file='yolov5s.pt', suffix=('.pt',), msg=''): # Check file(s) for acceptable suffixes - if any(suffix): + if file and suffix: if isinstance(suffix, str): suffix = [suffix] for f in file if isinstance(file, (list, tuple)) else [file]: @@ -258,7 +258,7 @@ def check_yaml(file, suffix=('.yaml', '.yml')): def check_file(file, suffix=''): # Search/download file (if necessary) and return path - check_suffix(file, suffix) + check_suffix(file, suffix) # optional file = str(file) # convert to str() if Path(file).is_file() or file == '': # exists return file From 25a7e1dae59e75d2c401a49cd2c7b76a7cf07139 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 8 Sep 2021 16:01:03 +0200 Subject: [PATCH 0530/1976] Update `check_yaml()` comment (#4713) * Update `check_yaml()` comment * Cleanup --- utils/general.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/general.py b/utils/general.py index 2033f76126f8..f2af386a7d93 100755 --- a/utils/general.py +++ b/utils/general.py @@ -252,7 +252,7 @@ def check_suffix(file='yolov5s.pt', suffix=('.pt',), msg=''): def check_yaml(file, suffix=('.yaml', '.yml')): - # Check YAML file(s) for acceptable suffixes + # Search/download YAML file (if necessary) and return path, checking suffix return check_file(file, suffix) From 8e94bf62d9aa588982daec58e89dd2bb682a1f0e Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 8 Sep 2021 18:13:59 +0200 Subject: [PATCH 0531/1976] Add `user_config_dir('Ultralytics')` (#4715) * Add `user_config_dir` * Linux to .config --- utils/general.py | 9 +++++++++ utils/plots.py | 10 ++++------ 2 files changed, 13 insertions(+), 6 deletions(-) diff --git a/utils/general.py b/utils/general.py index f2af386a7d93..06bf088582dc 100755 --- a/utils/general.py +++ b/utils/general.py @@ -103,6 +103,15 @@ def get_latest_run(search_dir='.'): return max(last_list, key=os.path.getctime) if last_list else '' +def user_config_dir(dir='Ultralytics'): + # Return path of user configuration directory (make if necessary) + settings = {'Windows': 'AppData/Roaming', 'Linux': '.config', 'Darwin': 'Library/Application Support'} + path = Path.home() / settings.get(platform.system(), '') / dir + if not path.is_dir(): + path.mkdir() # make dir if required + return path + + def is_docker(): # Is environment a Docker container? return Path('/workspace').exists() # or Path('/.dockerenv').exists() diff --git a/utils/plots.py b/utils/plots.py index d8a561a71dcf..1ed88ea7c832 100644 --- a/utils/plots.py +++ b/utils/plots.py @@ -16,16 +16,14 @@ import torch from PIL import Image, ImageDraw, ImageFont -from utils.general import is_ascii, xyxy2xywh, xywh2xyxy +from utils.general import user_config_dir, is_ascii, xywh2xyxy, xyxy2xywh from utils.metrics import fitness # Settings +CONFIG_DIR = user_config_dir() # Ultralytics settings dir matplotlib.rc('font', **{'size': 11}) matplotlib.use('Agg') # for writing to files only -FILE = Path(__file__).absolute() -ROOT = FILE.parents[1] # yolov5/ dir - class Colors: # Ultralytics color palette https://ultralytics.com/ @@ -49,9 +47,9 @@ def hex2rgb(h): # rgb order (PIL) def check_font(font='Arial.ttf', size=10): - # Return a PIL TrueType Font, downloading to ROOT dir if necessary + # Return a PIL TrueType Font, downloading to CONFIG_DIR if necessary font = Path(font) - font = font if font.exists() else (ROOT / font.name) + font = font if font.exists() else (CONFIG_DIR / font.name) try: return ImageFont.truetype(str(font) if font.exists() else font.name, size) except Exception as e: # download if missing From 0d8a1842373e55f8f639adede0c3d378f1ffbea5 Mon Sep 17 00:00:00 2001 From: ELHoussineT Date: Wed, 8 Sep 2021 19:42:28 +0200 Subject: [PATCH 0532/1976] Add `crops = results.crops()` dictionary (#4676) * adding get cropped functionality * Add target logic in existing functions * Crops cleanup * Add dictionary keys: conf, cls, box * Bug fixes - avoid return after first image Co-authored-by: Glenn Jocher --- models/common.py | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/models/common.py b/models/common.py index 90bfef5124b3..e79b8a9d2644 100644 --- a/models/common.py +++ b/models/common.py @@ -365,6 +365,7 @@ def __init__(self, imgs, pred, files, times=None, names=None, shape=None): self.s = shape # inference BCHW shape def display(self, pprint=False, show=False, save=False, crop=False, render=False, save_dir=Path('')): + crops = [] for i, (im, pred) in enumerate(zip(self.imgs, self.pred)): str = f'image {i + 1}/{len(self.pred)}: {im.shape[0]}x{im.shape[1]} ' if pred.shape[0]: @@ -376,7 +377,9 @@ def display(self, pprint=False, show=False, save=False, crop=False, render=False for *box, conf, cls in reversed(pred): # xyxy, confidence, class label = f'{self.names[int(cls)]} {conf:.2f}' if crop: - save_one_box(box, im, file=save_dir / 'crops' / self.names[int(cls)] / self.files[i]) + file = save_dir / 'crops' / self.names[int(cls)] / self.files[i] if save else None + crops.append({'box': box, 'conf': conf, 'cls': cls, 'label': label, + 'im': save_one_box(box, im, file=file, save=save)}) else: # all others annotator.box_label(box, label, color=colors(cls)) im = annotator.im @@ -395,6 +398,10 @@ def display(self, pprint=False, show=False, save=False, crop=False, render=False LOGGER.info(f"Saved {self.n} image{'s' * (self.n > 1)} to {colorstr('bold', save_dir)}") if render: self.imgs[i] = np.asarray(im) + if crop: + if save: + LOGGER.info(f'Saved results to {save_dir}\n') + return crops def print(self): self.display(pprint=True) # print results @@ -408,10 +415,9 @@ def save(self, save_dir='runs/detect/exp'): save_dir = increment_path(save_dir, exist_ok=save_dir != 'runs/detect/exp', mkdir=True) # increment save_dir self.display(save=True, save_dir=save_dir) # save results - def crop(self, save_dir='runs/detect/exp'): - save_dir = increment_path(save_dir, exist_ok=save_dir != 'runs/detect/exp', mkdir=True) # increment save_dir - self.display(crop=True, save_dir=save_dir) # crop results - LOGGER.info(f'Saved results to {save_dir}\n') + def crop(self, save=True, save_dir='runs/detect/exp'): + save_dir = increment_path(save_dir, exist_ok=save_dir != 'runs/detect/exp', mkdir=True) if save else None + return self.display(crop=True, save=save, save_dir=save_dir) # crop results def render(self): self.display(render=True) # render results From deb434aefad43be38aa0252bbdece501919108ea Mon Sep 17 00:00:00 2001 From: JD Costa Date: Thu, 9 Sep 2021 14:29:18 +0100 Subject: [PATCH 0533/1976] Make CONFIG_DIR configurable per environment variable (#4727) --- utils/plots.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/utils/plots.py b/utils/plots.py index 1ed88ea7c832..141a9ac2b09e 100644 --- a/utils/plots.py +++ b/utils/plots.py @@ -4,6 +4,7 @@ """ import math +import os from copy import copy from pathlib import Path @@ -20,7 +21,7 @@ from utils.metrics import fitness # Settings -CONFIG_DIR = user_config_dir() # Ultralytics settings dir +CONFIG_DIR = Path(os.getenv('YOLOV5_CONFIG_DIR') or user_config_dir()) # Ultralytics settings dir matplotlib.rc('font', **{'size': 11}) matplotlib.use('Agg') # for writing to files only From 1cad0ce2c7d3fb0917b4e392be377b5a370c26ef Mon Sep 17 00:00:00 2001 From: Jean-Baptiste Martin Date: Thu, 9 Sep 2021 15:32:04 +0200 Subject: [PATCH 0534/1976] Allow `multi_label` option for NMS with PyTorch Hub (#4728) * Allow specifying multi_label option for NMS when using torch hub * Reformat Co-authored-by: Glenn Jocher --- models/common.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/models/common.py b/models/common.py index e79b8a9d2644..5305b03d5389 100644 --- a/models/common.py +++ b/models/common.py @@ -278,6 +278,7 @@ class AutoShape(nn.Module): conf = 0.25 # NMS confidence threshold iou = 0.45 # NMS IoU threshold classes = None # (optional list) filter by class + multi_label = False # NMS multiple labels per box max_det = 1000 # maximum number of detections per image def __init__(self, model): @@ -337,7 +338,8 @@ def forward(self, imgs, size=640, augment=False, profile=False): t.append(time_sync()) # Post-process - y = non_max_suppression(y, self.conf, iou_thres=self.iou, classes=self.classes, max_det=self.max_det) # NMS + y = non_max_suppression(y, self.conf, iou_thres=self.iou, classes=self.classes, + multi_label=self.multi_label, max_det=self.max_det) # NMS for i in range(n): scale_coords(shape1, y[i][:, :4], shape0[i]) From 2d9411dbb85ae63b8ca9913726844767898eb021 Mon Sep 17 00:00:00 2001 From: Zegorax Date: Thu, 9 Sep 2021 16:49:10 +0200 Subject: [PATCH 0535/1976] Scope `onnx-simplifier` requirements check (#4730) * Changed onnx-simplifier check behavior Export.py has been updated to check for onnx-simplifier requirement only when the --simplify argument is added. Allows for better flexibility and one less requirement if simplify is not needed. * Fix single-element tuples Co-authored-by: Glenn Jocher --- export.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/export.py b/export.py index 5db09884bae8..b9b32b55ac7f 100644 --- a/export.py +++ b/export.py @@ -44,7 +44,7 @@ def export_onnx(model, img, file, opset, train, dynamic, simplify): # ONNX model export prefix = colorstr('ONNX:') try: - check_requirements(('onnx', 'onnx-simplifier')) + check_requirements(('onnx',)) import onnx print(f'\n{prefix} starting export with onnx {onnx.__version__}...') @@ -66,6 +66,7 @@ def export_onnx(model, img, file, opset, train, dynamic, simplify): # Simplify if simplify: try: + check_requirements(('onnx-simplifier',)) import onnxsim print(f'{prefix} simplifying with onnx-simplifier {onnxsim.__version__}...') From 4a025ae97f0ae274fa25699c6e3a050a82e5bb08 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 9 Sep 2021 17:57:46 +0200 Subject: [PATCH 0536/1976] Fix `user_config_dir()` for GCP/AWS functions (#4726) * Fix `user_config_dir()` for GCP/AWS functions Compatability fix for GCP functions and AWS lambda for user config directory in https://github.com/ultralytics/yolov5/pull/4628 * Windows skip check --- utils/general.py | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/utils/general.py b/utils/general.py index 06bf088582dc..e3fc31e0bd81 100755 --- a/utils/general.py +++ b/utils/general.py @@ -105,13 +105,21 @@ def get_latest_run(search_dir='.'): def user_config_dir(dir='Ultralytics'): # Return path of user configuration directory (make if necessary) - settings = {'Windows': 'AppData/Roaming', 'Linux': '.config', 'Darwin': 'Library/Application Support'} - path = Path.home() / settings.get(platform.system(), '') / dir + system = platform.system() + cfg = {'Windows': 'AppData/Roaming', 'Linux': '.config', 'Darwin': 'Library/Application Support'} + path = Path.home() / cfg.get(system, '') / dir + if system == 'Linux' and not is_writeable(path): # GCP functions and AWS lambda solution, only /tmp is writeable + path = Path('/tmp') / dir if not path.is_dir(): path.mkdir() # make dir if required return path +def is_writeable(path): + # Return True if path has write permissions (Warning: known issue on Windows) + return os.access(path, os.R_OK) + + def is_docker(): # Is environment a Docker container? return Path('/workspace').exists() # or Path('/.dockerenv').exists() From c5360f6e7009eb4d05f14d1cc9dae0963e949213 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 9 Sep 2021 18:01:59 +0200 Subject: [PATCH 0537/1976] Fix `--data from_HUB.zip` (#4732) @KalenMike --- train.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/train.py b/train.py index c32664832d8b..e5410eeeba9f 100644 --- a/train.py +++ b/train.py @@ -36,7 +36,7 @@ from utils.datasets import create_dataloader from utils.general import labels_to_class_weights, increment_path, labels_to_image_weights, init_seeds, \ strip_optimizer, get_latest_run, check_dataset, check_git_status, check_img_size, check_requirements, \ - check_yaml, check_suffix, print_mutation, set_logging, one_cycle, colorstr, methods + check_file, check_yaml, check_suffix, print_mutation, set_logging, one_cycle, colorstr, methods from utils.downloads import attempt_download from utils.loss import ComputeLoss from utils.plots import plot_labels, plot_evolve @@ -105,6 +105,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary is_coco = data.endswith('coco.yaml') and nc == 80 # COCO dataset # Model + check_suffix(weights, '.pt') # check weights pretrained = weights.endswith('.pt') if pretrained: with torch_distributed_zero_first(RANK): @@ -484,8 +485,7 @@ def main(opt, callbacks=Callbacks()): opt.cfg, opt.weights, opt.resume = '', ckpt, True # reinstate LOGGER.info(f'Resuming training from {ckpt}') else: - check_suffix(opt.weights, '.pt') # check weights - opt.data, opt.cfg, opt.hyp = check_yaml(opt.data), check_yaml(opt.cfg), check_yaml(opt.hyp) # check YAMLs + opt.data, opt.cfg, opt.hyp = check_file(opt.data), check_yaml(opt.cfg), check_yaml(opt.hyp) # check YAMLs assert len(opt.cfg) or len(opt.weights), 'either --cfg or --weights must be specified' if opt.evolve: opt.project = 'runs/evolve' From 7af1b4c266fef1a0554c2077509b3be16d972e1b Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 10 Sep 2021 14:34:09 +0200 Subject: [PATCH 0538/1976] Improved `detect.py` timing (#4741) * Improved detect.py timing * Eliminate 1 time_sync() call * Inference-only time * dash * #Save section * Cleanup --- detect.py | 24 ++++++++++++++---------- val.py | 16 ++++++++-------- 2 files changed, 22 insertions(+), 18 deletions(-) diff --git a/detect.py b/detect.py index 8acd5bf71593..5cb131220e89 100644 --- a/detect.py +++ b/detect.py @@ -8,7 +8,6 @@ import argparse import sys -import time from pathlib import Path import cv2 @@ -123,8 +122,9 @@ def wrap_frozen_graph(gd, inputs, outputs): # Run inference if pt and device.type != 'cpu': model(torch.zeros(1, 3, *imgsz).to(device).type_as(next(model.parameters()))) # run once - t0 = time.time() + dt, seen = [0.0, 0.0, 0.0], 0 for path, img, im0s, vid_cap in dataset: + t1 = time_sync() if onnx: img = img.astype('float32') else: @@ -133,9 +133,10 @@ def wrap_frozen_graph(gd, inputs, outputs): img = img / 255.0 # 0 - 255 to 0.0 - 1.0 if len(img.shape) == 3: img = img[None] # expand for batch dim + t2 = time_sync() + dt[0] += t2 - t1 # Inference - t1 = time_sync() if pt: visualize = increment_path(save_dir / Path(path).stem, mkdir=True) if visualize else False pred = model(img, augment=augment, visualize=visualize)[0] @@ -162,17 +163,20 @@ def wrap_frozen_graph(gd, inputs, outputs): pred[..., 2] *= imgsz[1] # w pred[..., 3] *= imgsz[0] # h pred = torch.tensor(pred) + t3 = time_sync() + dt[1] += t3 - t2 # NMS pred = non_max_suppression(pred, conf_thres, iou_thres, classes, agnostic_nms, max_det=max_det) - t2 = time_sync() + dt[2] += time_sync() - t3 # Second-stage classifier (optional) if classify: pred = apply_classifier(pred, modelc, img, im0s) # Process predictions - for i, det in enumerate(pred): # detections per image + for i, det in enumerate(pred): # per image + seen += 1 if webcam: # batch_size >= 1 p, s, im0, frame = path[i], f'{i}: ', im0s[i].copy(), dataset.count else: @@ -209,8 +213,8 @@ def wrap_frozen_graph(gd, inputs, outputs): if save_crop: save_one_box(xyxy, imc, file=save_dir / 'crops' / names[c] / f'{p.stem}.jpg', BGR=True) - # Print time (inference + NMS) - print(f'{s}Done. ({t2 - t1:.3f}s)') + # Print time (inference-only) + print(f'{s}Done. ({t3 - t2:.3f}s)') # Stream results im0 = annotator.result() @@ -237,15 +241,15 @@ def wrap_frozen_graph(gd, inputs, outputs): vid_writer[i] = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h)) vid_writer[i].write(im0) + # Print results + t = tuple(x / seen * 1E3 for x in dt) # speeds per image + print(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {(1, 3, *imgsz)}' % t) if save_txt or save_img: s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else '' print(f"Results saved to {colorstr('bold', save_dir)}{s}") - if update: strip_optimizer(weights) # update model (to fix SourceChangeWarning) - print(f'Done. ({time.time() - t0:.3f}s)') - def parse_opt(): parser = argparse.ArgumentParser() diff --git a/val.py b/val.py index b7068e041e57..c8f503351ad9 100644 --- a/val.py +++ b/val.py @@ -154,22 +154,22 @@ def run(data, names = {k: v for k, v in enumerate(model.names if hasattr(model, 'names') else model.module.names)} class_map = coco80_to_coco91_class() if is_coco else list(range(1000)) s = ('%20s' + '%11s' * 6) % ('Class', 'Images', 'Labels', 'P', 'R', 'mAP@.5', 'mAP@.5:.95') - p, r, f1, mp, mr, map50, map, t0, t1, t2 = 0., 0., 0., 0., 0., 0., 0., 0., 0., 0. + dt, p, r, f1, mp, mr, map50, map = [0.0, 0.0, 0.0], 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 loss = torch.zeros(3, device=device) jdict, stats, ap, ap_class = [], [], [], [] for batch_i, (img, targets, paths, shapes) in enumerate(tqdm(dataloader, desc=s)): - t_ = time_sync() + t1 = time_sync() img = img.to(device, non_blocking=True) img = img.half() if half else img.float() # uint8 to fp16/32 img /= 255.0 # 0 - 255 to 0.0 - 1.0 targets = targets.to(device) nb, _, height, width = img.shape # batch size, channels, height, width - t = time_sync() - t0 += t - t_ + t2 = time_sync() + dt[0] += t2 - t1 # Run model out, train_out = model(img, augment=augment) # inference and training outputs - t1 += time_sync() - t + dt[1] += time_sync() - t2 # Compute loss if compute_loss: @@ -178,9 +178,9 @@ def run(data, # Run NMS targets[:, 2:] *= torch.Tensor([width, height, width, height]).to(device) # to pixels lb = [targets[targets[:, 0] == i, 1:] for i in range(nb)] if save_hybrid else [] # for autolabelling - t = time_sync() + t3 = time_sync() out = non_max_suppression(out, conf_thres, iou_thres, labels=lb, multi_label=True, agnostic=single_cls) - t2 += time_sync() - t + dt[2] += time_sync() - t3 # Statistics per image for si, pred in enumerate(out): @@ -247,7 +247,7 @@ def run(data, print(pf % (names[c], seen, nt[c], p[i], r[i], ap50[i], ap[i])) # Print speeds - t = tuple(x / seen * 1E3 for x in (t0, t1, t2)) # speeds per image + t = tuple(x / seen * 1E3 for x in dt) # speeds per image if not training: shape = (batch_size, 3, imgsz, imgsz) print(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {shape}' % t) From 19e28e3bfe29c3570313bd069a214edae293c7ea Mon Sep 17 00:00:00 2001 From: Josh Veitch-Michaelis Date: Sat, 11 Sep 2021 02:28:52 +1200 Subject: [PATCH 0539/1976] Add `callbacks` to train function in W&B sweep (#4742) * add callbacks to train function in wandb sweep Fix following https://github.com/ultralytics/yolov5/pull/4688 which modified the function signature to `train` * Cleanup Co-authored-by: Glenn Jocher --- utils/loggers/wandb/sweep.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/utils/loggers/wandb/sweep.py b/utils/loggers/wandb/sweep.py index 2dcda508eb50..0ca704dd28dd 100644 --- a/utils/loggers/wandb/sweep.py +++ b/utils/loggers/wandb/sweep.py @@ -9,6 +9,7 @@ from train import train, parse_opt from utils.general import increment_path from utils.torch_utils import select_device +from utils.callbacks import Callbacks def sweep(): @@ -26,7 +27,7 @@ def sweep(): device = select_device(opt.device, batch_size=opt.batch_size) # train - train(hyp_dict, opt, device) + train(hyp_dict, opt, device, callbacks=Callbacks()) if __name__ == "__main__": From a144536f881b0ba36ed865ceaed74f11949ca93c Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 10 Sep 2021 17:06:22 +0200 Subject: [PATCH 0540/1976] Fix `is_writeable()` for 3 OS support (#4743) * Fix `is_writeable()` for 3 OS support * Update general.py --- utils/general.py | 25 +++++++++++++++---------- 1 file changed, 15 insertions(+), 10 deletions(-) diff --git a/utils/general.py b/utils/general.py index e3fc31e0bd81..6201320d3c63 100755 --- a/utils/general.py +++ b/utils/general.py @@ -105,19 +105,24 @@ def get_latest_run(search_dir='.'): def user_config_dir(dir='Ultralytics'): # Return path of user configuration directory (make if necessary) - system = platform.system() - cfg = {'Windows': 'AppData/Roaming', 'Linux': '.config', 'Darwin': 'Library/Application Support'} - path = Path.home() / cfg.get(system, '') / dir - if system == 'Linux' and not is_writeable(path): # GCP functions and AWS lambda solution, only /tmp is writeable - path = Path('/tmp') / dir - if not path.is_dir(): - path.mkdir() # make dir if required + cfg = {'Windows': 'AppData/Roaming', 'Linux': '.config', 'Darwin': 'Library/Application Support'} # 3 config dirs + path = Path.home() / cfg.get(platform.system(), '') # OS-specific config dir + path = (path if is_writeable(path) else Path('/tmp')) / dir # GCP and AWS lambda fix, only /tmp is writeable + path.mkdir(exist_ok=True) # make if required return path -def is_writeable(path): - # Return True if path has write permissions (Warning: known issue on Windows) - return os.access(path, os.R_OK) +def is_writeable(dir): + # Return True if directory has write permissions + # return os.access(path, os.R_OK) # known issue on Windows + file = Path(dir) / 'tmp.txt' + try: + with open(file, 'w'): + pass + file.unlink() # remove file + return True + except IOError: + return False def is_docker(): From 6c554b75eaa77e26cec8759335df3a6bf24175c5 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 10 Sep 2021 17:51:22 +0200 Subject: [PATCH 0541/1976] Add TF and TFLite models to `.gitignore` (#4747) --- .gitignore | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/.gitignore b/.gitignore index 9c270c7dabe7..f8a2437973f0 100755 --- a/.gitignore +++ b/.gitignore @@ -45,9 +45,13 @@ VOC/ # Neural Network weights ----------------------------------------------------------------------------------------------- *.weights *.pt +*.pb *.onnx *.mlmodel *.torchscript +*.tflite +*.h5 +*_saved_model/ darknet53.conv.74 yolov3-tiny.conv.15 From ff3529252077310bf51604294797fe8d3e973d11 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 10 Sep 2021 17:52:21 +0200 Subject: [PATCH 0542/1976] Add TF and TFLite models to `.dockerignore` (#4748) --- .dockerignore | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/.dockerignore b/.dockerignore index 4248cb098cf4..8d60b462e7d1 100644 --- a/.dockerignore +++ b/.dockerignore @@ -18,7 +18,10 @@ data/samples/* **/*.mlmodel **/*.torchscript **/*.torchscript.pt - +**/*.tflite +**/*.h5 +**/*.pb +*_saved_model/ # Below Copied From .gitignore ----------------------------------------------------------------------------------------- # Below Copied From .gitignore ----------------------------------------------------------------------------------------- From 22ee6fb7c186853710ebc57a0dbd716d45b7eef0 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 10 Sep 2021 17:52:33 +0200 Subject: [PATCH 0543/1976] Update `is_writeable()` for 2 methods (#4744) * Writeable test * Fix * Cleanup --- utils/general.py | 24 +++++++++++++----------- 1 file changed, 13 insertions(+), 11 deletions(-) diff --git a/utils/general.py b/utils/general.py index 6201320d3c63..e8b158a773d4 100755 --- a/utils/general.py +++ b/utils/general.py @@ -112,17 +112,19 @@ def user_config_dir(dir='Ultralytics'): return path -def is_writeable(dir): - # Return True if directory has write permissions - # return os.access(path, os.R_OK) # known issue on Windows - file = Path(dir) / 'tmp.txt' - try: - with open(file, 'w'): - pass - file.unlink() # remove file - return True - except IOError: - return False +def is_writeable(dir, test=False): + # Return True if directory has write permissions, test opening a file with write permissions if test=True + if test: # method 1 + file = Path(dir) / 'tmp.txt' + try: + with open(file, 'w'): # open file with write permissions + pass + file.unlink() # remove file + return True + except IOError: + return False + else: # method 2 + return os.access(dir, os.R_OK) # possible issues on Windows def is_docker(): From cd810c82866006c1bbe5de797532dcd469641813 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 11 Sep 2021 16:32:08 +0200 Subject: [PATCH 0544/1976] Centralize `user_config_dir()` decision making (#4755) --- utils/general.py | 14 +++++++++----- utils/plots.py | 3 +-- 2 files changed, 10 insertions(+), 7 deletions(-) diff --git a/utils/general.py b/utils/general.py index e8b158a773d4..229b7a7db168 100755 --- a/utils/general.py +++ b/utils/general.py @@ -103,11 +103,15 @@ def get_latest_run(search_dir='.'): return max(last_list, key=os.path.getctime) if last_list else '' -def user_config_dir(dir='Ultralytics'): - # Return path of user configuration directory (make if necessary) - cfg = {'Windows': 'AppData/Roaming', 'Linux': '.config', 'Darwin': 'Library/Application Support'} # 3 config dirs - path = Path.home() / cfg.get(platform.system(), '') # OS-specific config dir - path = (path if is_writeable(path) else Path('/tmp')) / dir # GCP and AWS lambda fix, only /tmp is writeable +def user_config_dir(dir='Ultralytics', env_var='YOLOV5_CONFIG_DIR'): + # Return path of user configuration directory. Prefer environment variable if exists. Make dir if required. + env = os.getenv(env_var) + if env: + path = Path(env) # use environment variable + else: + cfg = {'Windows': 'AppData/Roaming', 'Linux': '.config', 'Darwin': 'Library/Application Support'} # 3 OS dirs + path = Path.home() / cfg.get(platform.system(), '') # OS-specific config dir + path = (path if is_writeable(path) else Path('/tmp')) / dir # GCP and AWS lambda fix, only /tmp is writeable path.mkdir(exist_ok=True) # make if required return path diff --git a/utils/plots.py b/utils/plots.py index 141a9ac2b09e..1ed88ea7c832 100644 --- a/utils/plots.py +++ b/utils/plots.py @@ -4,7 +4,6 @@ """ import math -import os from copy import copy from pathlib import Path @@ -21,7 +20,7 @@ from utils.metrics import fitness # Settings -CONFIG_DIR = Path(os.getenv('YOLOV5_CONFIG_DIR') or user_config_dir()) # Ultralytics settings dir +CONFIG_DIR = user_config_dir() # Ultralytics settings dir matplotlib.rc('font', **{'size': 11}) matplotlib.use('Agg') # for writing to files only From c47be26f34327e667ad13e5bfc45389bdf21b593 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 11 Sep 2021 22:46:33 +0200 Subject: [PATCH 0545/1976] Replace `path.absolute()` with `path.resolve()` (#4763) --- detect.py | 2 +- export.py | 2 +- hubconf.py | 2 +- models/yolo.py | 2 +- train.py | 2 +- utils/__init__.py | 2 +- utils/datasets.py | 2 +- utils/general.py | 2 +- utils/loggers/wandb/sweep.py | 2 +- utils/loggers/wandb/wandb_utils.py | 2 +- val.py | 2 +- 11 files changed, 11 insertions(+), 11 deletions(-) diff --git a/detect.py b/detect.py index 5cb131220e89..b6597c1662f9 100644 --- a/detect.py +++ b/detect.py @@ -15,7 +15,7 @@ import torch import torch.backends.cudnn as cudnn -FILE = Path(__file__).absolute() +FILE = Path(__file__).resolve() sys.path.append(FILE.parents[0].as_posix()) # add yolov5/ to path from models.experimental import attempt_load diff --git a/export.py b/export.py index b9b32b55ac7f..935bdb40bc9b 100644 --- a/export.py +++ b/export.py @@ -15,7 +15,7 @@ import torch.nn as nn from torch.utils.mobile_optimizer import optimize_for_mobile -FILE = Path(__file__).absolute() +FILE = Path(__file__).resolve() sys.path.append(FILE.parents[0].as_posix()) # add yolov5/ to path from models.common import Conv diff --git a/hubconf.py b/hubconf.py index 799c83ec8400..9c5fa63809d1 100644 --- a/hubconf.py +++ b/hubconf.py @@ -33,7 +33,7 @@ def _create(name, pretrained=True, channels=3, classes=80, autoshape=True, verbo from utils.downloads import attempt_download from utils.torch_utils import select_device - file = Path(__file__).absolute() + file = Path(__file__).resolve() check_requirements(requirements=file.parent / 'requirements.txt', exclude=('tensorboard', 'thop', 'opencv-python')) set_logging(verbose=verbose) diff --git a/models/yolo.py b/models/yolo.py index 25118a92bb2d..9eddf4a08e49 100644 --- a/models/yolo.py +++ b/models/yolo.py @@ -11,7 +11,7 @@ from copy import deepcopy from pathlib import Path -FILE = Path(__file__).absolute() +FILE = Path(__file__).resolve() sys.path.append(FILE.parents[1].as_posix()) # add yolov5/ to path from models.common import * diff --git a/train.py b/train.py index e5410eeeba9f..d243a9cb010f 100644 --- a/train.py +++ b/train.py @@ -26,7 +26,7 @@ from torch.optim import Adam, SGD, lr_scheduler from tqdm import tqdm -FILE = Path(__file__).absolute() +FILE = Path(__file__).resolve() sys.path.append(FILE.parents[0].as_posix()) # add yolov5/ to path import val # for end-of-epoch mAP diff --git a/utils/__init__.py b/utils/__init__.py index 4a61057e8083..74260ad1e5b4 100644 --- a/utils/__init__.py +++ b/utils/__init__.py @@ -4,7 +4,7 @@ # import torch # from PIL import ImageFont # -# FILE = Path(__file__).absolute() +# FILE = Path(__file__).resolve() # ROOT = FILE.parents[1] # yolov5/ dir # if str(ROOT) not in sys.path: # sys.path.append(str(ROOT)) # add ROOT to PATH diff --git a/utils/datasets.py b/utils/datasets.py index 0c6b9b5e2893..cb6ad29e4652 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -156,7 +156,7 @@ def __iter__(self): class LoadImages: # for inference def __init__(self, path, img_size=640, stride=32, auto=True): - p = str(Path(path).absolute()) # os-agnostic absolute path + p = str(Path(path).resolve()) # os-agnostic absolute path if '*' in p: files = sorted(glob.glob(p, recursive=True)) # glob elif os.path.isdir(p): diff --git a/utils/general.py b/utils/general.py index 229b7a7db168..5c3d8d117dc3 100755 --- a/utils/general.py +++ b/utils/general.py @@ -147,7 +147,7 @@ def is_colab(): def is_pip(): # Is file in a pip package? - return 'site-packages' in Path(__file__).absolute().parts + return 'site-packages' in Path(__file__).resolve().parts def is_ascii(s=''): diff --git a/utils/loggers/wandb/sweep.py b/utils/loggers/wandb/sweep.py index 0ca704dd28dd..4d5df5c8e00a 100644 --- a/utils/loggers/wandb/sweep.py +++ b/utils/loggers/wandb/sweep.py @@ -3,7 +3,7 @@ import wandb -FILE = Path(__file__).absolute() +FILE = Path(__file__).resolve() sys.path.append(FILE.parents[3].as_posix()) # add utils/ to path from train import train, parse_opt diff --git a/utils/loggers/wandb/wandb_utils.py b/utils/loggers/wandb/wandb_utils.py index 5d495c70517b..504a518f75ea 100644 --- a/utils/loggers/wandb/wandb_utils.py +++ b/utils/loggers/wandb/wandb_utils.py @@ -9,7 +9,7 @@ import yaml from tqdm import tqdm -FILE = Path(__file__).absolute() +FILE = Path(__file__).resolve() sys.path.append(FILE.parents[3].as_posix()) # add yolov5/ to path from utils.datasets import LoadImagesAndLabels diff --git a/val.py b/val.py index c8f503351ad9..00eb92bb096a 100644 --- a/val.py +++ b/val.py @@ -17,7 +17,7 @@ import torch from tqdm import tqdm -FILE = Path(__file__).absolute() +FILE = Path(__file__).resolve() sys.path.append(FILE.parents[0].as_posix()) # add yolov5/ to path from models.experimental import attempt_load From c3a93d783d1a1e920d346f62b5de9f500e4540e4 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 12 Sep 2021 15:52:24 +0200 Subject: [PATCH 0546/1976] Add TensorFlow formats to `export.py` (#4479) * Initial commit * Remove unused export_torchscript return * ROOT variable * Add prefix to fcn arg * fix ROOT * check_yaml into run() * interim fixes * imgsz=(320, 320) * Hardcode tf_raw_resize False * Finish opt elimination * Update representative_dataset_gen() * Update export.py with TF methods * SiLU and GraphDef fixes * file_size() directory handling feature * export fixes * add lambda: to representative_dataset * Detect training False default * Fuse false for TF models * Embed agnostic NMS arguments * Remove lambda * TensorFlow.js export success * Add pb to Usage * Add *_tfjs_model/ to ignore files * prepend YOLOv5 to function headers * Remove end --- comments * parameterize tfjs export pb file * update run() data default /ROOT * update --include help * update imports * return ct_model * Consolidate TFLite export * pb prerequisite to tfjs * TF modules CamelCase * Remove exports from tf.py and cleanup * pass agnostic NMS arguments * CI * CI * ignore *_web_model/ * Add tensorflow to CI dependencies * CI tensorflow-cpu * Update requirements.txt * Remove tensorflow check_requirement * CI coreml tfjs * export only onnx torchscript * reorder exports torchscript first --- .dockerignore | 1 + .github/workflows/ci-testing.yml | 7 +- .gitignore | 1 + detect.py | 2 +- export.py | 219 +++++++++++++--- models/tf.py | 433 ++++++++++++------------------- requirements.txt | 20 +- utils/general.py | 12 +- 8 files changed, 366 insertions(+), 329 deletions(-) diff --git a/.dockerignore b/.dockerignore index 8d60b462e7d1..6c2f2b9b7725 100644 --- a/.dockerignore +++ b/.dockerignore @@ -22,6 +22,7 @@ data/samples/* **/*.h5 **/*.pb *_saved_model/ +*_web_model/ # Below Copied From .gitignore ----------------------------------------------------------------------------------------- # Below Copied From .gitignore ----------------------------------------------------------------------------------------- diff --git a/.github/workflows/ci-testing.yml b/.github/workflows/ci-testing.yml index ecd6f9bbd625..54b230a13e6b 100644 --- a/.github/workflows/ci-testing.yml +++ b/.github/workflows/ci-testing.yml @@ -48,7 +48,7 @@ jobs: run: | python -m pip install --upgrade pip pip install -qr requirements.txt -f https://download.pytorch.org/whl/cpu/torch_stable.html - pip install -q onnx onnx-simplifier coremltools # for export + pip install -q onnx tensorflow-cpu # for export python --version pip --version pip list @@ -75,6 +75,7 @@ jobs: python val.py --img 128 --batch 16 --weights runs/train/exp/weights/last.pt --device $di python hubconf.py # hub - python models/yolo.py --cfg ${{ matrix.model }}.yaml # inspect - python export.py --img 128 --batch 1 --weights ${{ matrix.model }}.pt --include onnx torchscript # export + python models/yolo.py --cfg ${{ matrix.model }}.yaml # build PyTorch model + python models/tf.py --weights ${{ matrix.model }}.pt # build TensorFlow model + python export.py --img 128 --batch 1 --weights ${{ matrix.model }}.pt --include torchscript onnx # export shell: bash diff --git a/.gitignore b/.gitignore index f8a2437973f0..375b71807588 100755 --- a/.gitignore +++ b/.gitignore @@ -52,6 +52,7 @@ VOC/ *.tflite *.h5 *_saved_model/ +*_web_model/ darknet53.conv.74 yolov3-tiny.conv.15 diff --git a/detect.py b/detect.py index b6597c1662f9..ef7458d52db3 100644 --- a/detect.py +++ b/detect.py @@ -253,7 +253,7 @@ def wrap_frozen_graph(gd, inputs, outputs): def parse_opt(): parser = argparse.ArgumentParser() - parser.add_argument('--weights', nargs='+', type=str, default='yolov5s.pt', help='model.pt path(s)') + parser.add_argument('--weights', nargs='+', type=str, default='yolov5s.pt', help='model path(s)') parser.add_argument('--source', type=str, default='data/images', help='file/dir/URL/glob, 0 for webcam') parser.add_argument('--imgsz', '--img', '--img-size', nargs='+', type=int, default=[640], help='inference size h,w') parser.add_argument('--conf-thres', type=float, default=0.25, help='confidence threshold') diff --git a/export.py b/export.py index 935bdb40bc9b..8d6805893d1e 100644 --- a/export.py +++ b/export.py @@ -1,12 +1,28 @@ # YOLOv5 🚀 by Ultralytics, GPL-3.0 license """ -Export a PyTorch model to TorchScript, ONNX, CoreML formats +Export a YOLOv5 PyTorch model to TorchScript, ONNX, CoreML, TensorFlow (saved_model, pb, TFLite, TF.js,) formats +TensorFlow exports authored by https://github.com/zldrobit Usage: - $ python path/to/export.py --weights yolov5s.pt --img 640 --batch 1 + $ python path/to/export.py --weights yolov5s.pt --include torchscript onnx coreml saved_model pb tflite tfjs + +Inference: + $ python path/to/detect.py --weights yolov5s.pt + yolov5s.onnx (must export with --dynamic) + yolov5s_saved_model + yolov5s.pb + yolov5s.tflite + +TensorFlow.js: + $ # Edit yolov5s_web_model/model.json to sort Identity* in ascending order + $ cd .. && git clone https://github.com/zldrobit/tfjs-yolov5-example.git && cd tfjs-yolov5-example + $ npm install + $ ln -s ../../yolov5/yolov5s_web_model public/yolov5s_web_model + $ npm start """ import argparse +import subprocess import sys import time from pathlib import Path @@ -16,40 +32,42 @@ from torch.utils.mobile_optimizer import optimize_for_mobile FILE = Path(__file__).resolve() -sys.path.append(FILE.parents[0].as_posix()) # add yolov5/ to path +ROOT = FILE.parents[0] # yolov5/ dir +sys.path.append(ROOT.as_posix()) # add yolov5/ to path from models.common import Conv -from models.yolo import Detect from models.experimental import attempt_load -from utils.activations import Hardswish, SiLU -from utils.general import colorstr, check_img_size, check_requirements, file_size, set_logging +from models.yolo import Detect +from utils.activations import SiLU +from utils.datasets import LoadImages +from utils.general import colorstr, check_dataset, check_img_size, check_requirements, file_size, set_logging from utils.torch_utils import select_device -def export_torchscript(model, img, file, optimize): - # TorchScript model export - prefix = colorstr('TorchScript:') +def export_torchscript(model, im, file, optimize, prefix=colorstr('TorchScript:')): + # YOLOv5 TorchScript model export try: print(f'\n{prefix} starting export with torch {torch.__version__}...') f = file.with_suffix('.torchscript.pt') - ts = torch.jit.trace(model, img, strict=False) + + ts = torch.jit.trace(model, im, strict=False) (optimize_for_mobile(ts) if optimize else ts).save(f) + print(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') - return ts except Exception as e: print(f'{prefix} export failure: {e}') -def export_onnx(model, img, file, opset, train, dynamic, simplify): - # ONNX model export - prefix = colorstr('ONNX:') +def export_onnx(model, im, file, opset, train, dynamic, simplify, prefix=colorstr('ONNX:')): + # YOLOv5 ONNX export try: check_requirements(('onnx',)) import onnx print(f'\n{prefix} starting export with onnx {onnx.__version__}...') f = file.with_suffix('.onnx') - torch.onnx.export(model, img, f, verbose=False, opset_version=opset, + + torch.onnx.export(model, im, f, verbose=False, opset_version=opset, training=torch.onnx.TrainingMode.TRAINING if train else torch.onnx.TrainingMode.EVAL, do_constant_folding=not train, input_names=['images'], @@ -73,7 +91,7 @@ def export_onnx(model, img, file, opset, train, dynamic, simplify): model_onnx, check = onnxsim.simplify( model_onnx, dynamic_input_shape=dynamic, - input_shapes={'images': list(img.shape)} if dynamic else None) + input_shapes={'images': list(im.shape)} if dynamic else None) assert check, 'assert check failed' onnx.save(model_onnx, f) except Exception as e: @@ -84,26 +102,131 @@ def export_onnx(model, img, file, opset, train, dynamic, simplify): print(f'{prefix} export failure: {e}') -def export_coreml(model, img, file): - # CoreML model export - prefix = colorstr('CoreML:') +def export_coreml(model, im, file, prefix=colorstr('CoreML:')): + # YOLOv5 CoreML export + ct_model = None try: check_requirements(('coremltools',)) import coremltools as ct print(f'\n{prefix} starting export with coremltools {ct.__version__}...') f = file.with_suffix('.mlmodel') + model.train() # CoreML exports should be placed in model.train() mode - ts = torch.jit.trace(model, img, strict=False) # TorchScript model - model = ct.convert(ts, inputs=[ct.ImageType('image', shape=img.shape, scale=1 / 255.0, bias=[0, 0, 0])]) - model.save(f) + ts = torch.jit.trace(model, im, strict=False) # TorchScript model + ct_model = ct.convert(ts, inputs=[ct.ImageType('image', shape=im.shape, scale=1 / 255.0, bias=[0, 0, 0])]) + ct_model.save(f) + print(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') except Exception as e: print(f'\n{prefix} export failure: {e}') + return ct_model + + +def export_saved_model(model, im, file, dynamic, + tf_nms=False, agnostic_nms=False, topk_per_class=100, topk_all=100, iou_thres=0.45, + conf_thres=0.25, prefix=colorstr('TensorFlow saved_model:')): + # YOLOv5 TensorFlow saved_model export + keras_model = None + try: + import tensorflow as tf + from tensorflow import keras + from models.tf import TFModel, TFDetect + + print(f'\n{prefix} starting export with tensorflow {tf.__version__}...') + f = str(file).replace('.pt', '_saved_model') + batch_size, ch, *imgsz = list(im.shape) # BCHW -def run(weights='./yolov5s.pt', # weights path - img_size=(640, 640), # image (height, width) + tf_model = TFModel(cfg=model.yaml, model=model, nc=model.nc, imgsz=imgsz) + im = tf.zeros((batch_size, *imgsz, 3)) # BHWC order for TensorFlow + y = tf_model.predict(im, tf_nms, agnostic_nms, topk_per_class, topk_all, iou_thres, conf_thres) + inputs = keras.Input(shape=(*imgsz, 3), batch_size=None if dynamic else batch_size) + outputs = tf_model.predict(inputs, tf_nms, agnostic_nms, topk_per_class, topk_all, iou_thres, conf_thres) + keras_model = keras.Model(inputs=inputs, outputs=outputs) + keras_model.summary() + keras_model.save(f, save_format='tf') + + print(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') + except Exception as e: + print(f'\n{prefix} export failure: {e}') + + return keras_model + + +def export_pb(keras_model, im, file, prefix=colorstr('TensorFlow GraphDef:')): + # YOLOv5 TensorFlow GraphDef *.pb export https://github.com/leimao/Frozen_Graph_TensorFlow + try: + import tensorflow as tf + from tensorflow.python.framework.convert_to_constants import convert_variables_to_constants_v2 + + print(f'\n{prefix} starting export with tensorflow {tf.__version__}...') + f = file.with_suffix('.pb') + + m = tf.function(lambda x: keras_model(x)) # full model + m = m.get_concrete_function(tf.TensorSpec(keras_model.inputs[0].shape, keras_model.inputs[0].dtype)) + frozen_func = convert_variables_to_constants_v2(m) + frozen_func.graph.as_graph_def() + tf.io.write_graph(graph_or_graph_def=frozen_func.graph, logdir=str(f.parent), name=f.name, as_text=False) + + print(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') + except Exception as e: + print(f'\n{prefix} export failure: {e}') + + +def export_tflite(keras_model, im, file, tfl_int8, data, ncalib, prefix=colorstr('TensorFlow Lite:')): + # YOLOv5 TensorFlow Lite export + try: + import tensorflow as tf + from models.tf import representative_dataset_gen + + print(f'\n{prefix} starting export with tensorflow {tf.__version__}...') + batch_size, ch, *imgsz = list(im.shape) # BCHW + f = file.with_suffix('.tflite') + + converter = tf.lite.TFLiteConverter.from_keras_model(keras_model) + converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS] + converter.optimizations = [tf.lite.Optimize.DEFAULT] + if tfl_int8: + dataset = LoadImages(check_dataset(data)['train'], img_size=imgsz, auto=False) # representative data + converter.representative_dataset = lambda: representative_dataset_gen(dataset, ncalib) + converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8] + converter.inference_input_type = tf.uint8 # or tf.int8 + converter.inference_output_type = tf.uint8 # or tf.int8 + converter.experimental_new_quantizer = False + f = str(file).replace('.pt', '-int8.tflite') + + tflite_model = converter.convert() + open(f, "wb").write(tflite_model) + print(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') + + except Exception as e: + print(f'\n{prefix} export failure: {e}') + + +def export_tfjs(keras_model, im, file, prefix=colorstr('TensorFlow.js:')): + # YOLOv5 TensorFlow.js export + try: + check_requirements(('tensorflowjs',)) + import tensorflowjs as tfjs + + print(f'\n{prefix} starting export with tensorflowjs {tfjs.__version__}...') + f = str(file).replace('.pt', '_web_model') # js dir + f_pb = file.with_suffix('.pb') # *.pb path + + cmd = f"tensorflowjs_converter --input_format=tf_frozen_model " \ + f"--output_node_names='Identity,Identity_1,Identity_2,Identity_3' {f_pb} {f}" + subprocess.run(cmd, shell=True) + + print(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') + except Exception as e: + print(f'\n{prefix} export failure: {e}') + + +@torch.no_grad() +def run(data=ROOT / 'data/coco128.yaml', # 'dataset.yaml path' + weights=ROOT / 'yolov5s.pt', # weights path + imgsz=(640, 640), # image (height, width) batch_size=1, # batch size device='cpu', # cuda device, i.e. 0 or 0,1,2,3 or cpu include=('torchscript', 'onnx', 'coreml'), # include formats @@ -117,29 +240,28 @@ def run(weights='./yolov5s.pt', # weights path ): t = time.time() include = [x.lower() for x in include] - img_size *= 2 if len(img_size) == 1 else 1 # expand + tf_exports = list(x in include for x in ('saved_model', 'pb', 'tflite', 'tfjs')) # TensorFlow exports + imgsz *= 2 if len(imgsz) == 1 else 1 # expand file = Path(weights) # Load PyTorch model device = select_device(device) assert not (device.type == 'cpu' and half), '--half only compatible with GPU export, i.e. use --device 0' - model = attempt_load(weights, map_location=device) # load FP32 model - names = model.names + model = attempt_load(weights, map_location=device, inplace=True, fuse=not any(tf_exports)) # load FP32 model + nc, names = model.nc, model.names # number of classes, class names # Input gs = int(max(model.stride)) # grid size (max stride) - img_size = [check_img_size(x, gs) for x in img_size] # verify img_size are gs-multiples - img = torch.zeros(batch_size, 3, *img_size).to(device) # image size(1,3,320,192) iDetection + imgsz = [check_img_size(x, gs) for x in imgsz] # verify img_size are gs-multiples + im = torch.zeros(batch_size, 3, *imgsz).to(device) # image size(1,3,320,192) BCHW iDetection # Update model if half: - img, model = img.half(), model.half() # to FP16 + im, model = im.half(), model.half() # to FP16 model.train() if train else model.eval() # training mode = no Detect() layer grid construction for k, m in model.named_modules(): if isinstance(m, Conv): # assign export-friendly activations - if isinstance(m.act, nn.Hardswish): - m.act = Hardswish() - elif isinstance(m.act, nn.SiLU): + if isinstance(m.act, nn.SiLU): m.act = SiLU() elif isinstance(m, Detect): m.inplace = inplace @@ -147,16 +269,28 @@ def run(weights='./yolov5s.pt', # weights path # m.forward = m.forward_export # assign forward (optional) for _ in range(2): - y = model(img) # dry runs + y = model(im) # dry runs print(f"\n{colorstr('PyTorch:')} starting from {weights} ({file_size(weights):.1f} MB)") # Exports if 'torchscript' in include: - export_torchscript(model, img, file, optimize) + export_torchscript(model, im, file, optimize) if 'onnx' in include: - export_onnx(model, img, file, opset, train, dynamic, simplify) + export_onnx(model, im, file, opset, train, dynamic, simplify) if 'coreml' in include: - export_coreml(model, img, file) + export_coreml(model, im, file) + + # TensorFlow Exports + if any(tf_exports): + pb, tflite, tfjs = tf_exports[1:] + assert not (tflite and tfjs), 'TFLite and TF.js models must be exported separately, please pass only one type.' + model = export_saved_model(model, im, file, dynamic, tf_nms=tfjs, agnostic_nms=tfjs) # keras model + if pb or tfjs: # pb prerequisite to tfjs + export_pb(model, im, file) + if tflite: + export_tflite(model, im, file, tfl_int8=False, data=data, ncalib=100) + if tfjs: + export_tfjs(model, im, file) # Finish print(f'\nExport complete ({time.time() - t:.2f}s)' @@ -166,18 +300,21 @@ def run(weights='./yolov5s.pt', # weights path def parse_opt(): parser = argparse.ArgumentParser() - parser.add_argument('--weights', type=str, default='./yolov5s.pt', help='weights path') - parser.add_argument('--img-size', nargs='+', type=int, default=[640, 640], help='image (height, width)') + parser.add_argument('--data', type=str, default=ROOT / 'data/coco128.yaml', help='dataset.yaml path') + parser.add_argument('--weights', type=str, default=ROOT / 'yolov5s.pt', help='weights path') + parser.add_argument('--imgsz', '--img', '--img-size', nargs='+', type=int, default=[640, 640], help='image (h, w)') parser.add_argument('--batch-size', type=int, default=1, help='batch size') parser.add_argument('--device', default='cpu', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') - parser.add_argument('--include', nargs='+', default=['torchscript', 'onnx', 'coreml'], help='include formats') parser.add_argument('--half', action='store_true', help='FP16 half-precision export') parser.add_argument('--inplace', action='store_true', help='set YOLOv5 Detect() inplace=True') parser.add_argument('--train', action='store_true', help='model.train() mode') parser.add_argument('--optimize', action='store_true', help='TorchScript: optimize for mobile') - parser.add_argument('--dynamic', action='store_true', help='ONNX: dynamic axes') + parser.add_argument('--dynamic', action='store_true', help='ONNX/TF: dynamic axes') parser.add_argument('--simplify', action='store_true', help='ONNX: simplify model') parser.add_argument('--opset', type=int, default=13, help='ONNX: opset version') + parser.add_argument('--include', nargs='+', + default=['torchscript', 'onnx'], + help='available formats are (torchscript, onnx, coreml, saved_model, pb, tflite, tfjs)') opt = parser.parse_args() return opt diff --git a/models/tf.py b/models/tf.py index d6d0f26210b2..621236240f10 100644 --- a/models/tf.py +++ b/models/tf.py @@ -1,67 +1,44 @@ # YOLOv5 🚀 by Ultralytics, GPL-3.0 license """ -TensorFlow/Keras and TFLite versions of YOLOv5 +TensorFlow, Keras and TFLite versions of YOLOv5 Authored by https://github.com/zldrobit in PR https://github.com/ultralytics/yolov5/pull/1127 Usage: - $ python models/tf.py --weights yolov5s.pt --cfg yolov5s.yaml - -Export int8 TFLite models: - $ python models/tf.py --weights yolov5s.pt --cfg models/yolov5s.yaml --tfl-int8 \ - --source path/to/images/ --ncalib 100 - -Detection: - $ python detect.py --weights yolov5s.pb --img 320 - $ python detect.py --weights yolov5s_saved_model --img 320 - $ python detect.py --weights yolov5s-fp16.tflite --img 320 - $ python detect.py --weights yolov5s-int8.tflite --img 320 --tfl-int8 - -For TensorFlow.js: - $ python models/tf.py --weights yolov5s.pt --cfg models/yolov5s.yaml --img 320 --tf-nms --agnostic-nms - $ pip install tensorflowjs - $ tensorflowjs_converter \ - --input_format=tf_frozen_model \ - --output_node_names='Identity,Identity_1,Identity_2,Identity_3' \ - yolov5s.pb \ - web_model - $ # Edit web_model/model.json to sort Identity* in ascending order - $ cd .. && git clone https://github.com/zldrobit/tfjs-yolov5-example.git && cd tfjs-yolov5-example - $ npm install - $ ln -s ../../yolov5/web_model public/web_model - $ npm start + $ python models/tf.py --weights yolov5s.pt + +Export: + $ python path/to/export.py --weights yolov5s.pt --include saved_model pb tflite tfjs """ import argparse import logging -import os import sys -import traceback from copy import deepcopy from pathlib import Path -sys.path.append('./') # to run '$ python *.py' files in subdirectories +FILE = Path(__file__).resolve() +ROOT = FILE.parents[1] # yolov5/ dir +sys.path.append(ROOT.as_posix()) # add yolov5/ to path import numpy as np import tensorflow as tf import torch import torch.nn as nn -import yaml from tensorflow import keras -from tensorflow.python.framework.convert_to_constants import convert_variables_to_constants_v2 from models.common import Conv, Bottleneck, SPP, DWConv, Focus, BottleneckCSP, Concat, autopad, C3 from models.experimental import MixConv2d, CrossConv, attempt_load from models.yolo import Detect -from utils.datasets import LoadImages -from utils.general import check_dataset, check_yaml, make_divisible +from utils.general import colorstr, make_divisible, set_logging +from utils.activations import SiLU -logger = logging.getLogger(__name__) +LOGGER = logging.getLogger(__name__) -class tf_BN(keras.layers.Layer): +class TFBN(keras.layers.Layer): # TensorFlow BatchNormalization wrapper def __init__(self, w=None): - super(tf_BN, self).__init__() + super(TFBN, self).__init__() self.bn = keras.layers.BatchNormalization( beta_initializer=keras.initializers.Constant(w.bias.numpy()), gamma_initializer=keras.initializers.Constant(w.weight.numpy()), @@ -73,20 +50,20 @@ def call(self, inputs): return self.bn(inputs) -class tf_Pad(keras.layers.Layer): +class TFPad(keras.layers.Layer): def __init__(self, pad): - super(tf_Pad, self).__init__() + super(TFPad, self).__init__() self.pad = tf.constant([[0, 0], [pad, pad], [pad, pad], [0, 0]]) def call(self, inputs): return tf.pad(inputs, self.pad, mode='constant', constant_values=0) -class tf_Conv(keras.layers.Layer): +class TFConv(keras.layers.Layer): # Standard convolution def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True, w=None): # ch_in, ch_out, weights, kernel, stride, padding, groups - super(tf_Conv, self).__init__() + super(TFConv, self).__init__() assert g == 1, "TF v2.2 Conv2D does not support 'groups' argument" assert isinstance(k, int), "Convolution with multiple kernels are not allowed." # TensorFlow convolution padding is inconsistent with PyTorch (e.g. k=3 s=2 'SAME' padding) @@ -95,27 +72,29 @@ def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True, w=None): conv = keras.layers.Conv2D( c2, k, s, 'SAME' if s == 1 else 'VALID', use_bias=False, kernel_initializer=keras.initializers.Constant(w.conv.weight.permute(2, 3, 1, 0).numpy())) - self.conv = conv if s == 1 else keras.Sequential([tf_Pad(autopad(k, p)), conv]) - self.bn = tf_BN(w.bn) if hasattr(w, 'bn') else tf.identity + self.conv = conv if s == 1 else keras.Sequential([TFPad(autopad(k, p)), conv]) + self.bn = TFBN(w.bn) if hasattr(w, 'bn') else tf.identity # YOLOv5 activations if isinstance(w.act, nn.LeakyReLU): self.act = (lambda x: keras.activations.relu(x, alpha=0.1)) if act else tf.identity elif isinstance(w.act, nn.Hardswish): self.act = (lambda x: x * tf.nn.relu6(x + 3) * 0.166666667) if act else tf.identity - elif isinstance(w.act, nn.SiLU): + elif isinstance(w.act, (nn.SiLU, SiLU)): self.act = (lambda x: keras.activations.swish(x)) if act else tf.identity + else: + raise Exception(f'no matching TensorFlow activation found for {w.act}') def call(self, inputs): return self.act(self.bn(self.conv(inputs))) -class tf_Focus(keras.layers.Layer): +class TFFocus(keras.layers.Layer): # Focus wh information into c-space def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True, w=None): # ch_in, ch_out, kernel, stride, padding, groups - super(tf_Focus, self).__init__() - self.conv = tf_Conv(c1 * 4, c2, k, s, p, g, act, w.conv) + super(TFFocus, self).__init__() + self.conv = TFConv(c1 * 4, c2, k, s, p, g, act, w.conv) def call(self, inputs): # x(b,w,h,c) -> y(b,w/2,h/2,4c) # inputs = inputs / 255. # normalize 0-255 to 0-1 @@ -125,23 +104,23 @@ def call(self, inputs): # x(b,w,h,c) -> y(b,w/2,h/2,4c) inputs[:, 1::2, 1::2, :]], 3)) -class tf_Bottleneck(keras.layers.Layer): +class TFBottleneck(keras.layers.Layer): # Standard bottleneck def __init__(self, c1, c2, shortcut=True, g=1, e=0.5, w=None): # ch_in, ch_out, shortcut, groups, expansion - super(tf_Bottleneck, self).__init__() + super(TFBottleneck, self).__init__() c_ = int(c2 * e) # hidden channels - self.cv1 = tf_Conv(c1, c_, 1, 1, w=w.cv1) - self.cv2 = tf_Conv(c_, c2, 3, 1, g=g, w=w.cv2) + self.cv1 = TFConv(c1, c_, 1, 1, w=w.cv1) + self.cv2 = TFConv(c_, c2, 3, 1, g=g, w=w.cv2) self.add = shortcut and c1 == c2 def call(self, inputs): return inputs + self.cv2(self.cv1(inputs)) if self.add else self.cv2(self.cv1(inputs)) -class tf_Conv2d(keras.layers.Layer): +class TFConv2d(keras.layers.Layer): # Substitution for PyTorch nn.Conv2D def __init__(self, c1, c2, k, s=1, g=1, bias=True, w=None): - super(tf_Conv2d, self).__init__() + super(TFConv2d, self).__init__() assert g == 1, "TF v2.2 Conv2D does not support 'groups' argument" self.conv = keras.layers.Conv2D( c2, k, s, 'VALID', use_bias=bias, @@ -152,19 +131,19 @@ def call(self, inputs): return self.conv(inputs) -class tf_BottleneckCSP(keras.layers.Layer): +class TFBottleneckCSP(keras.layers.Layer): # CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5, w=None): # ch_in, ch_out, number, shortcut, groups, expansion - super(tf_BottleneckCSP, self).__init__() + super(TFBottleneckCSP, self).__init__() c_ = int(c2 * e) # hidden channels - self.cv1 = tf_Conv(c1, c_, 1, 1, w=w.cv1) - self.cv2 = tf_Conv2d(c1, c_, 1, 1, bias=False, w=w.cv2) - self.cv3 = tf_Conv2d(c_, c_, 1, 1, bias=False, w=w.cv3) - self.cv4 = tf_Conv(2 * c_, c2, 1, 1, w=w.cv4) - self.bn = tf_BN(w.bn) + self.cv1 = TFConv(c1, c_, 1, 1, w=w.cv1) + self.cv2 = TFConv2d(c1, c_, 1, 1, bias=False, w=w.cv2) + self.cv3 = TFConv2d(c_, c_, 1, 1, bias=False, w=w.cv3) + self.cv4 = TFConv(2 * c_, c2, 1, 1, w=w.cv4) + self.bn = TFBN(w.bn) self.act = lambda x: keras.activations.relu(x, alpha=0.1) - self.m = keras.Sequential([tf_Bottleneck(c_, c_, shortcut, g, e=1.0, w=w.m[j]) for j in range(n)]) + self.m = keras.Sequential([TFBottleneck(c_, c_, shortcut, g, e=1.0, w=w.m[j]) for j in range(n)]) def call(self, inputs): y1 = self.cv3(self.m(self.cv1(inputs))) @@ -172,28 +151,28 @@ def call(self, inputs): return self.cv4(self.act(self.bn(tf.concat((y1, y2), axis=3)))) -class tf_C3(keras.layers.Layer): +class TFC3(keras.layers.Layer): # CSP Bottleneck with 3 convolutions def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5, w=None): # ch_in, ch_out, number, shortcut, groups, expansion - super(tf_C3, self).__init__() + super(TFC3, self).__init__() c_ = int(c2 * e) # hidden channels - self.cv1 = tf_Conv(c1, c_, 1, 1, w=w.cv1) - self.cv2 = tf_Conv(c1, c_, 1, 1, w=w.cv2) - self.cv3 = tf_Conv(2 * c_, c2, 1, 1, w=w.cv3) - self.m = keras.Sequential([tf_Bottleneck(c_, c_, shortcut, g, e=1.0, w=w.m[j]) for j in range(n)]) + self.cv1 = TFConv(c1, c_, 1, 1, w=w.cv1) + self.cv2 = TFConv(c1, c_, 1, 1, w=w.cv2) + self.cv3 = TFConv(2 * c_, c2, 1, 1, w=w.cv3) + self.m = keras.Sequential([TFBottleneck(c_, c_, shortcut, g, e=1.0, w=w.m[j]) for j in range(n)]) def call(self, inputs): return self.cv3(tf.concat((self.m(self.cv1(inputs)), self.cv2(inputs)), axis=3)) -class tf_SPP(keras.layers.Layer): +class TFSPP(keras.layers.Layer): # Spatial pyramid pooling layer used in YOLOv3-SPP def __init__(self, c1, c2, k=(5, 9, 13), w=None): - super(tf_SPP, self).__init__() + super(TFSPP, self).__init__() c_ = c1 // 2 # hidden channels - self.cv1 = tf_Conv(c1, c_, 1, 1, w=w.cv1) - self.cv2 = tf_Conv(c_ * (len(k) + 1), c2, 1, 1, w=w.cv2) + self.cv1 = TFConv(c1, c_, 1, 1, w=w.cv1) + self.cv2 = TFConv(c_ * (len(k) + 1), c2, 1, 1, w=w.cv2) self.m = [keras.layers.MaxPool2D(pool_size=x, strides=1, padding='SAME') for x in k] def call(self, inputs): @@ -201,9 +180,9 @@ def call(self, inputs): return self.cv2(tf.concat([x] + [m(x) for m in self.m], 3)) -class tf_Detect(keras.layers.Layer): - def __init__(self, nc=80, anchors=(), ch=(), w=None): # detection layer - super(tf_Detect, self).__init__() +class TFDetect(keras.layers.Layer): + def __init__(self, nc=80, anchors=(), ch=(), imgsz=(640, 640), w=None): # detection layer + super(TFDetect, self).__init__() self.stride = tf.convert_to_tensor(w.stride.numpy(), dtype=tf.float32) self.nc = nc # number of classes self.no = nc + 5 # number of outputs per anchor @@ -213,22 +192,20 @@ def __init__(self, nc=80, anchors=(), ch=(), w=None): # detection layer self.anchors = tf.convert_to_tensor(w.anchors.numpy(), dtype=tf.float32) self.anchor_grid = tf.reshape(tf.convert_to_tensor(w.anchor_grid.numpy(), dtype=tf.float32), [self.nl, 1, -1, 1, 2]) - self.m = [tf_Conv2d(x, self.no * self.na, 1, w=w.m[i]) for i, x in enumerate(ch)] - self.export = False # onnx export - self.training = True # set to False after building model + self.m = [TFConv2d(x, self.no * self.na, 1, w=w.m[i]) for i, x in enumerate(ch)] + self.training = False # set to False after building model + self.imgsz = imgsz for i in range(self.nl): - ny, nx = opt.img_size[0] // self.stride[i], opt.img_size[1] // self.stride[i] + ny, nx = self.imgsz[0] // self.stride[i], self.imgsz[1] // self.stride[i] self.grid[i] = self._make_grid(nx, ny) def call(self, inputs): - # x = x.copy() # for profiling z = [] # inference output - self.training |= self.export x = [] for i in range(self.nl): x.append(self.m[i](inputs[i])) # x(bs,20,20,255) to x(bs,3,20,20,85) - ny, nx = opt.img_size[0] // self.stride[i], opt.img_size[1] // self.stride[i] + ny, nx = self.imgsz[0] // self.stride[i], self.imgsz[1] // self.stride[i] x[i] = tf.transpose(tf.reshape(x[i], [-1, ny * nx, self.na, self.no]), [0, 2, 1, 3]) if not self.training: # inference @@ -236,8 +213,8 @@ def call(self, inputs): xy = (y[..., 0:2] * 2. - 0.5 + self.grid[i]) * self.stride[i] # xy wh = (y[..., 2:4] * 2) ** 2 * self.anchor_grid[i] # Normalize xywh to 0-1 to reduce calibration error - xy /= tf.constant([[opt.img_size[1], opt.img_size[0]]], dtype=tf.float32) - wh /= tf.constant([[opt.img_size[1], opt.img_size[0]]], dtype=tf.float32) + xy /= tf.constant([[self.imgsz[1], self.imgsz[0]]], dtype=tf.float32) + wh /= tf.constant([[self.imgsz[1], self.imgsz[0]]], dtype=tf.float32) y = tf.concat([xy, wh, y[..., 4:]], -1) z.append(tf.reshape(y, [-1, 3 * ny * nx, self.no])) @@ -251,25 +228,23 @@ def _make_grid(nx=20, ny=20): return tf.cast(tf.reshape(tf.stack([xv, yv], 2), [1, 1, ny * nx, 2]), dtype=tf.float32) -class tf_Upsample(keras.layers.Layer): - def __init__(self, size, scale_factor, mode, w=None): - super(tf_Upsample, self).__init__() +class TFUpsample(keras.layers.Layer): + def __init__(self, size, scale_factor, mode, w=None): # warning: all arguments needed including 'w' + super(TFUpsample, self).__init__() assert scale_factor == 2, "scale_factor must be 2" + self.upsample = lambda x: tf.image.resize(x, (x.shape[1] * 2, x.shape[2] * 2), method=mode) # self.upsample = keras.layers.UpSampling2D(size=scale_factor, interpolation=mode) - if opt.tf_raw_resize: - # with default arguments: align_corners=False, half_pixel_centers=False - self.upsample = lambda x: tf.raw_ops.ResizeNearestNeighbor(images=x, - size=(x.shape[1] * 2, x.shape[2] * 2)) - else: - self.upsample = lambda x: tf.image.resize(x, (x.shape[1] * 2, x.shape[2] * 2), method=mode) + # with default arguments: align_corners=False, half_pixel_centers=False + # self.upsample = lambda x: tf.raw_ops.ResizeNearestNeighbor(images=x, + # size=(x.shape[1] * 2, x.shape[2] * 2)) def call(self, inputs): return self.upsample(inputs) -class tf_Concat(keras.layers.Layer): +class TFConcat(keras.layers.Layer): def __init__(self, dimension=1, w=None): - super(tf_Concat, self).__init__() + super(TFConcat, self).__init__() assert dimension == 1, "convert only NCHW to NHWC concat" self.d = 3 @@ -277,8 +252,8 @@ def call(self, inputs): return tf.concat(inputs, self.d) -def parse_model(d, ch, model): # model_dict, input_channels(3) - logger.info('\n%3s%18s%3s%10s %-40s%-30s' % ('', 'from', 'n', 'params', 'module', 'arguments')) +def parse_model(d, ch, model, imgsz): # model_dict, input_channels(3) + LOGGER.info('\n%3s%18s%3s%10s %-40s%-30s' % ('', 'from', 'n', 'params', 'module', 'arguments')) anchors, nc, gd, gw = d['anchors'], d['nc'], d['depth_multiple'], d['width_multiple'] na = (len(anchors[0]) // 2) if isinstance(anchors, list) else anchors # number of anchors no = na * (nc + 5) # number of outputs = anchors * (classes + 5) @@ -310,10 +285,11 @@ def parse_model(d, ch, model): # model_dict, input_channels(3) args.append([ch[x + 1] for x in f]) if isinstance(args[1], int): # number of anchors args[1] = [list(range(args[1] * 2))] * len(f) + args.append(imgsz) else: c2 = ch[f] - tf_m = eval('tf_' + m_str.replace('nn.', '')) + tf_m = eval('TF' + m_str.replace('nn.', '')) m_ = keras.Sequential([tf_m(*args, w=model.model[i][j]) for j in range(n)]) if n > 1 \ else tf_m(*args, w=model.model[i]) # module @@ -321,16 +297,16 @@ def parse_model(d, ch, model): # model_dict, input_channels(3) t = str(m)[8:-2].replace('__main__.', '') # module type np = sum([x.numel() for x in torch_m_.parameters()]) # number params m_.i, m_.f, m_.type, m_.np = i, f, t, np # attach index, 'from' index, type, number params - logger.info('%3s%18s%3s%10.0f %-40s%-30s' % (i, f, n, np, t, args)) # print + LOGGER.info('%3s%18s%3s%10.0f %-40s%-30s' % (i, f, n, np, t, args)) # print save.extend(x % i for x in ([f] if isinstance(f, int) else f) if x != -1) # append to savelist layers.append(m_) ch.append(c2) return keras.Sequential(layers), sorted(save) -class tf_Model(): - def __init__(self, cfg='yolov5s.yaml', ch=3, nc=None, model=None): # model, input channels, number of classes - super(tf_Model, self).__init__() +class TFModel: + def __init__(self, cfg='yolov5s.yaml', ch=3, nc=None, model=None, imgsz=(640, 640)): # model, channels, classes + super(TFModel, self).__init__() if isinstance(cfg, dict): self.yaml = cfg # model dict else: # is *.yaml @@ -343,9 +319,10 @@ def __init__(self, cfg='yolov5s.yaml', ch=3, nc=None, model=None): # model, inp if nc and nc != self.yaml['nc']: print('Overriding %s nc=%g with nc=%g' % (cfg, self.yaml['nc'], nc)) self.yaml['nc'] = nc # override yaml value - self.model, self.savelist = parse_model(deepcopy(self.yaml), ch=[ch], model=model) # model, savelist, ch_out + self.model, self.savelist = parse_model(deepcopy(self.yaml), ch=[ch], model=model, imgsz=imgsz) - def predict(self, inputs, profile=False): + def predict(self, inputs, tf_nms=False, agnostic_nms=False, topk_per_class=100, topk_all=100, iou_thres=0.45, + conf_thres=0.25): y = [] # outputs x = inputs for i, m in enumerate(self.model.layers): @@ -356,18 +333,18 @@ def predict(self, inputs, profile=False): y.append(x if m.i in self.savelist else None) # save output # Add TensorFlow NMS - if opt.tf_nms: - boxes = xywh2xyxy(x[0][..., :4]) + if tf_nms: + boxes = self._xywh2xyxy(x[0][..., :4]) probs = x[0][:, :, 4:5] classes = x[0][:, :, 5:] scores = probs * classes - if opt.agnostic_nms: - nms = agnostic_nms_layer()((boxes, classes, scores)) + if agnostic_nms: + nms = AgnosticNMS()((boxes, classes, scores), topk_all, iou_thres, conf_thres) return nms, x[1] else: boxes = tf.expand_dims(boxes, 2) nms = tf.image.combined_non_max_suppression( - boxes, scores, opt.topk_per_class, opt.topk_all, opt.iou_thres, opt.score_thres, clip_boxes=False) + boxes, scores, topk_per_class, topk_all, iou_thres, conf_thres, clip_boxes=False) return nms, x[1] return x[0] # output only first tensor [1,6300,85] = [xywh, conf, class0, class1, ...] @@ -377,182 +354,94 @@ def predict(self, inputs, profile=False): # cls = tf.reshape(tf.cast(tf.argmax(x[..., 5:], axis=1), tf.float32), (-1, 1)) # x(6300,1) classes # return tf.concat([conf, cls, xywh], 1) + @staticmethod + def _xywh2xyxy(xywh): + # Convert nx4 boxes from [x, y, w, h] to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right + x, y, w, h = tf.split(xywh, num_or_size_splits=4, axis=-1) + return tf.concat([x - w / 2, y - h / 2, x + w / 2, y + h / 2], axis=-1) + -class agnostic_nms_layer(keras.layers.Layer): - # wrap map_fn to avoid TypeSpec related error https://stackoverflow.com/a/65809989/3036450 - def call(self, input): - return tf.map_fn(agnostic_nms, input, +class AgnosticNMS(keras.layers.Layer): + # TF Agnostic NMS + def call(self, input, topk_all, iou_thres, conf_thres): + # wrap map_fn to avoid TypeSpec related error https://stackoverflow.com/a/65809989/3036450 + return tf.map_fn(self._nms, input, fn_output_signature=(tf.float32, tf.float32, tf.float32, tf.int32), name='agnostic_nms') - -def agnostic_nms(x): - boxes, classes, scores = x - class_inds = tf.cast(tf.argmax(classes, axis=-1), tf.float32) - scores_inp = tf.reduce_max(scores, -1) - selected_inds = tf.image.non_max_suppression( - boxes, scores_inp, max_output_size=opt.topk_all, iou_threshold=opt.iou_thres, score_threshold=opt.score_thres) - selected_boxes = tf.gather(boxes, selected_inds) - padded_boxes = tf.pad(selected_boxes, - paddings=[[0, opt.topk_all - tf.shape(selected_boxes)[0]], [0, 0]], - mode="CONSTANT", constant_values=0.0) - selected_scores = tf.gather(scores_inp, selected_inds) - padded_scores = tf.pad(selected_scores, - paddings=[[0, opt.topk_all - tf.shape(selected_boxes)[0]]], - mode="CONSTANT", constant_values=-1.0) - selected_classes = tf.gather(class_inds, selected_inds) - padded_classes = tf.pad(selected_classes, - paddings=[[0, opt.topk_all - tf.shape(selected_boxes)[0]]], - mode="CONSTANT", constant_values=-1.0) - valid_detections = tf.shape(selected_inds)[0] - return padded_boxes, padded_scores, padded_classes, valid_detections - - -def xywh2xyxy(xywh): - # Convert nx4 boxes from [x, y, w, h] to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right - x, y, w, h = tf.split(xywh, num_or_size_splits=4, axis=-1) - return tf.concat([x - w / 2, y - h / 2, x + w / 2, y + h / 2], axis=-1) - - -def representative_dataset_gen(): - # Representative dataset for use with converter.representative_dataset - n = 0 - for path, img, im0s, vid_cap in dataset: - # Get sample input data as a numpy array in a method of your choosing. - n += 1 + @staticmethod + def _nms(x, topk_all=100, iou_thres=0.45, conf_thres=0.25): # agnostic NMS + boxes, classes, scores = x + class_inds = tf.cast(tf.argmax(classes, axis=-1), tf.float32) + scores_inp = tf.reduce_max(scores, -1) + selected_inds = tf.image.non_max_suppression( + boxes, scores_inp, max_output_size=topk_all, iou_threshold=iou_thres, score_threshold=conf_thres) + selected_boxes = tf.gather(boxes, selected_inds) + padded_boxes = tf.pad(selected_boxes, + paddings=[[0, topk_all - tf.shape(selected_boxes)[0]], [0, 0]], + mode="CONSTANT", constant_values=0.0) + selected_scores = tf.gather(scores_inp, selected_inds) + padded_scores = tf.pad(selected_scores, + paddings=[[0, topk_all - tf.shape(selected_boxes)[0]]], + mode="CONSTANT", constant_values=-1.0) + selected_classes = tf.gather(class_inds, selected_inds) + padded_classes = tf.pad(selected_classes, + paddings=[[0, topk_all - tf.shape(selected_boxes)[0]]], + mode="CONSTANT", constant_values=-1.0) + valid_detections = tf.shape(selected_inds)[0] + return padded_boxes, padded_scores, padded_classes, valid_detections + + +def representative_dataset_gen(dataset, ncalib=100): + # Representative dataset generator for use with converter.representative_dataset, returns a generator of np arrays + for n, (path, img, im0s, vid_cap) in enumerate(dataset): input = np.transpose(img, [1, 2, 0]) input = np.expand_dims(input, axis=0).astype(np.float32) input /= 255.0 yield [input] - if n >= opt.ncalib: + if n >= ncalib: break -if __name__ == "__main__": +def run(weights=ROOT / 'yolov5s.pt', # weights path + imgsz=(640, 640), # inference size h,w + batch_size=1, # batch size + dynamic=False, # dynamic batch size + ): + # PyTorch model + im = torch.zeros((batch_size, 3, *imgsz)) # BCHW image + model = attempt_load(weights, map_location=torch.device('cpu'), inplace=True, fuse=False) + y = model(im) # inference + model.info() + + # TensorFlow model + im = tf.zeros((batch_size, *imgsz, 3)) # BHWC image + tf_model = TFModel(cfg=model.yaml, model=model, nc=model.nc, imgsz=imgsz) + y = tf_model.predict(im) # inference + + # Keras model + im = keras.Input(shape=(*imgsz, 3), batch_size=None if dynamic else batch_size) + keras_model = keras.Model(inputs=im, outputs=tf_model.predict(im)) + keras_model.summary() + + +def parse_opt(): parser = argparse.ArgumentParser() - parser.add_argument('--cfg', type=str, default='yolov5s.yaml', help='cfg path') - parser.add_argument('--weights', type=str, default='yolov5s.pt', help='weights path') - parser.add_argument('--img-size', nargs='+', type=int, default=[320, 320], help='image size') # height, width + parser.add_argument('--weights', type=str, default=ROOT / 'yolov5s.pt', help='weights path') + parser.add_argument('--imgsz', '--img', '--img-size', nargs='+', type=int, default=[640], help='inference size h,w') parser.add_argument('--batch-size', type=int, default=1, help='batch size') - parser.add_argument('--dynamic-batch-size', action='store_true', help='dynamic batch size') - parser.add_argument('--source', type=str, default='../data/coco128.yaml', help='dir of images or data.yaml file') - parser.add_argument('--ncalib', type=int, default=100, help='number of calibration images') - parser.add_argument('--tfl-int8', action='store_true', dest='tfl_int8', help='export TFLite int8 model') - parser.add_argument('--tf-nms', action='store_true', dest='tf_nms', help='TF NMS (without TFLite export)') - parser.add_argument('--agnostic-nms', action='store_true', help='class-agnostic NMS') - parser.add_argument('--tf-raw-resize', action='store_true', dest='tf_raw_resize', - help='use tf.raw_ops.ResizeNearestNeighbor for resize') - parser.add_argument('--topk-per-class', type=int, default=100, help='topk per class to keep in NMS') - parser.add_argument('--topk-all', type=int, default=100, help='topk for all classes to keep in NMS') - parser.add_argument('--iou-thres', type=float, default=0.5, help='IOU threshold for NMS') - parser.add_argument('--score-thres', type=float, default=0.4, help='score threshold for NMS') + parser.add_argument('--dynamic', action='store_true', help='dynamic batch size') opt = parser.parse_args() - opt.cfg = check_yaml(opt.cfg) # check YAML - opt.img_size *= 2 if len(opt.img_size) == 1 else 1 # expand - print(opt) - - # Input - img = torch.zeros((opt.batch_size, 3, *opt.img_size)) # image size(1,3,320,192) iDetection - - # Load PyTorch model - model = attempt_load(opt.weights, map_location=torch.device('cpu'), inplace=True, fuse=False) - model.model[-1].export = False # set Detect() layer export=True - y = model(img) # dry run - nc = y[0].shape[-1] - 5 - - # TensorFlow saved_model export - try: - print('\nStarting TensorFlow saved_model export with TensorFlow %s...' % tf.__version__) - tf_model = tf_Model(opt.cfg, model=model, nc=nc) - img = tf.zeros((opt.batch_size, *opt.img_size, 3)) # NHWC Input for TensorFlow - - m = tf_model.model.layers[-1] - assert isinstance(m, tf_Detect), "the last layer must be Detect" - m.training = False - y = tf_model.predict(img) - - inputs = keras.Input(shape=(*opt.img_size, 3), batch_size=None if opt.dynamic_batch_size else opt.batch_size) - keras_model = keras.Model(inputs=inputs, outputs=tf_model.predict(inputs)) - keras_model.summary() - path = opt.weights.replace('.pt', '_saved_model') # filename - keras_model.save(path, save_format='tf') - print('TensorFlow saved_model export success, saved as %s' % path) - except Exception as e: - print('TensorFlow saved_model export failure: %s' % e) - traceback.print_exc(file=sys.stdout) - - # TensorFlow GraphDef export - try: - print('\nStarting TensorFlow GraphDef export with TensorFlow %s...' % tf.__version__) - - # https://github.com/leimao/Frozen_Graph_TensorFlow - full_model = tf.function(lambda x: keras_model(x)) - full_model = full_model.get_concrete_function( - tf.TensorSpec(keras_model.inputs[0].shape, keras_model.inputs[0].dtype)) - - frozen_func = convert_variables_to_constants_v2(full_model) - frozen_func.graph.as_graph_def() - f = opt.weights.replace('.pt', '.pb') # filename - tf.io.write_graph(graph_or_graph_def=frozen_func.graph, - logdir=os.path.dirname(f), - name=os.path.basename(f), - as_text=False) - - print('TensorFlow GraphDef export success, saved as %s' % f) - except Exception as e: - print('TensorFlow GraphDef export failure: %s' % e) - traceback.print_exc(file=sys.stdout) - - # TFLite model export - if not opt.tf_nms: - try: - print('\nStarting TFLite export with TensorFlow %s...' % tf.__version__) - - # fp32 TFLite model export --------------------------------------------------------------------------------- - # converter = tf.lite.TFLiteConverter.from_keras_model(keras_model) - # converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS] - # converter.allow_custom_ops = False - # converter.experimental_new_converter = True - # tflite_model = converter.convert() - # f = opt.weights.replace('.pt', '.tflite') # filename - # open(f, "wb").write(tflite_model) - - # fp16 TFLite model export --------------------------------------------------------------------------------- - converter = tf.lite.TFLiteConverter.from_keras_model(keras_model) - converter.optimizations = [tf.lite.Optimize.DEFAULT] - # converter.representative_dataset = representative_dataset_gen - # converter.target_spec.supported_types = [tf.float16] - converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS] - converter.allow_custom_ops = False - converter.experimental_new_converter = True - tflite_model = converter.convert() - f = opt.weights.replace('.pt', '-fp16.tflite') # filename - open(f, "wb").write(tflite_model) - print('\nTFLite export success, saved as %s' % f) - - # int8 TFLite model export --------------------------------------------------------------------------------- - if opt.tfl_int8: - # Representative Dataset - if opt.source.endswith('.yaml'): - with open(check_yaml(opt.source)) as f: - data = yaml.load(f, Loader=yaml.FullLoader) # data dict - check_dataset(data) # check - opt.source = data['train'] - dataset = LoadImages(opt.source, img_size=opt.img_size, auto=False) - converter = tf.lite.TFLiteConverter.from_keras_model(keras_model) - converter.optimizations = [tf.lite.Optimize.DEFAULT] - converter.representative_dataset = representative_dataset_gen - converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8] - converter.inference_input_type = tf.uint8 # or tf.int8 - converter.inference_output_type = tf.uint8 # or tf.int8 - converter.allow_custom_ops = False - converter.experimental_new_converter = True - converter.experimental_new_quantizer = False - tflite_model = converter.convert() - f = opt.weights.replace('.pt', '-int8.tflite') # filename - open(f, "wb").write(tflite_model) - print('\nTFLite (int8) export success, saved as %s' % f) - - except Exception as e: - print('\nTFLite export failure: %s' % e) - traceback.print_exc(file=sys.stdout) + opt.imgsz *= 2 if len(opt.imgsz) == 1 else 1 # expand + return opt + + +def main(opt): + set_logging() + print(colorstr('tf.py: ') + ', '.join(f'{k}={v}' for k, v in vars(opt).items())) + run(**vars(opt)) + + +if __name__ == "__main__": + opt = parse_opt() + main(opt) diff --git a/requirements.txt b/requirements.txt index 2ad65ba53e29..b84b353f75f3 100755 --- a/requirements.txt +++ b/requirements.txt @@ -1,6 +1,6 @@ # pip install -r requirements.txt -# base ---------------------------------------- +# Base ---------------------------------------- matplotlib>=3.2.2 numpy>=1.18.5 opencv-python>=4.1.2 @@ -11,21 +11,23 @@ torch>=1.7.0 torchvision>=0.8.1 tqdm>=4.41.0 -# logging ------------------------------------- +# Logging ------------------------------------- tensorboard>=2.4.1 # wandb -# plotting ------------------------------------ +# Plotting ------------------------------------ seaborn>=0.11.0 pandas -# export -------------------------------------- -# coremltools>=4.1 -# onnx>=1.9.0 -# scikit-learn==0.19.2 # for coreml quantization -# tensorflow==2.4.1 # for TFLite export +# Export -------------------------------------- +# coremltools>=4.1 # CoreML export +# onnx>=1.9.0 # ONNX export +# onnx-simplifier>=0.3.6 # ONNX simplifier +# scikit-learn==0.19.2 # CoreML quantization +# tensorflow>=2.4.1 # TFLite export +# tensorflowjs>=3.9.0 # TF.js export -# extras -------------------------------------- +# Extras -------------------------------------- # Cython # for pycocotools https://github.com/cocodataset/cocoapi/issues/172 # pycocotools>=2.0 # COCO mAP # albumentations>=1.0.3 diff --git a/utils/general.py b/utils/general.py index 5c3d8d117dc3..7a80b2ea81bc 100755 --- a/utils/general.py +++ b/utils/general.py @@ -161,9 +161,15 @@ def emojis(str=''): return str.encode().decode('ascii', 'ignore') if platform.system() == 'Windows' else str -def file_size(file): - # Return file size in MB - return Path(file).stat().st_size / 1e6 +def file_size(path): + # Return file/dir size (MB) + path = Path(path) + if path.is_file(): + return path.stat().st_size / 1E6 + elif path.is_dir(): + return sum(f.stat().st_size for f in path.glob('**/*') if f.is_file()) / 1E6 + else: + return 0.0 def check_online(): From b161edf8738c4020ca4ffb6f73ce2d881cc47d59 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 12 Sep 2021 17:55:41 +0200 Subject: [PATCH 0547/1976] Update ci-testing.yml (#4770) --- .github/workflows/ci-testing.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.github/workflows/ci-testing.yml b/.github/workflows/ci-testing.yml index 54b230a13e6b..3272c0316113 100644 --- a/.github/workflows/ci-testing.yml +++ b/.github/workflows/ci-testing.yml @@ -8,6 +8,8 @@ on: # https://help.github.com/en/actions/reference/events-that-trigger-workflows pull_request: # The branches below must be a subset of the branches above branches: [master, develop] + schedule: + - cron: '0 0 * * *' # Runs at 00:00 UTC every day jobs: cpu-tests: From aa1859909c96d5e1fc839b2746b45038ee8465c9 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 12 Sep 2021 23:40:28 +0200 Subject: [PATCH 0548/1976] Update ci-testing.yml (#4772) --- .github/workflows/ci-testing.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/ci-testing.yml b/.github/workflows/ci-testing.yml index 3272c0316113..71f39c16c4ed 100644 --- a/.github/workflows/ci-testing.yml +++ b/.github/workflows/ci-testing.yml @@ -4,10 +4,10 @@ name: CI CPU testing on: # https://help.github.com/en/actions/reference/events-that-trigger-workflows push: - branches: [master, develop] + branches: [master] pull_request: # The branches below must be a subset of the branches above - branches: [master, develop] + branches: [master] schedule: - cron: '0 0 * * *' # Runs at 00:00 UTC every day From fcb225c1c81a17fbaeff027b1f4be5300049e8a8 Mon Sep 17 00:00:00 2001 From: Jihoon Kim <41357160+kimnamu@users.noreply.github.com> Date: Wed, 15 Sep 2021 07:57:06 +0900 Subject: [PATCH 0549/1976] Shuffle all 4(or 9) images in mosaic augmentation (#4787) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Thank you for sharing nice open-source codes 👍 I applied to shuffle the order of all 4(or 9) images in mosaic augmentation Currently, the order of images in mosaic augmentation is not completely random. The remaining images except the first are randomly arranged. Apply shuffle all to increase the diversity of data composition. --- utils/datasets.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/utils/datasets.py b/utils/datasets.py index cb6ad29e4652..4a4b187da345 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -661,6 +661,7 @@ def load_mosaic(self, index): s = self.img_size yc, xc = [int(random.uniform(-x, 2 * s + x)) for x in self.mosaic_border] # mosaic center x, y indices = [index] + random.choices(self.indices, k=3) # 3 additional image indices + random.shuffle(indices) for i, index in enumerate(indices): # Load image img, _, (h, w) = load_image(self, index) @@ -717,6 +718,7 @@ def load_mosaic9(self, index): labels9, segments9 = [], [] s = self.img_size indices = [index] + random.choices(self.indices, k=8) # 8 additional image indices + random.shuffle(indices) for i, index in enumerate(indices): # Load image img, _, (h, w) = load_image(self, index) From b74dd4ba4f295eaacc8cc3ac75270ba40a2d9ef6 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 15 Sep 2021 11:33:46 +0200 Subject: [PATCH 0550/1976] Add `--int8` argument (#4799) * Add `--int8` argument * parents[0] bug fix * Fix order --- export.py | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/export.py b/export.py index 8d6805893d1e..ea7f1ebd0b1f 100644 --- a/export.py +++ b/export.py @@ -33,7 +33,8 @@ FILE = Path(__file__).resolve() ROOT = FILE.parents[0] # yolov5/ dir -sys.path.append(ROOT.as_posix()) # add yolov5/ to path +if str(ROOT) not in sys.path: + sys.path.append(str(ROOT)) # add ROOT to PATH from models.common import Conv from models.experimental import attempt_load @@ -174,7 +175,7 @@ def export_pb(keras_model, im, file, prefix=colorstr('TensorFlow GraphDef:')): print(f'\n{prefix} export failure: {e}') -def export_tflite(keras_model, im, file, tfl_int8, data, ncalib, prefix=colorstr('TensorFlow Lite:')): +def export_tflite(keras_model, im, file, int8, data, ncalib, prefix=colorstr('TensorFlow Lite:')): # YOLOv5 TensorFlow Lite export try: import tensorflow as tf @@ -187,7 +188,7 @@ def export_tflite(keras_model, im, file, tfl_int8, data, ncalib, prefix=colorstr converter = tf.lite.TFLiteConverter.from_keras_model(keras_model) converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS] converter.optimizations = [tf.lite.Optimize.DEFAULT] - if tfl_int8: + if int8: dataset = LoadImages(check_dataset(data)['train'], img_size=imgsz, auto=False) # representative data converter.representative_dataset = lambda: representative_dataset_gen(dataset, ncalib) converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8] @@ -234,7 +235,8 @@ def run(data=ROOT / 'data/coco128.yaml', # 'dataset.yaml path' inplace=False, # set YOLOv5 Detect() inplace=True train=False, # model.train() mode optimize=False, # TorchScript: optimize for mobile - dynamic=False, # ONNX: dynamic axes + int8=False, # CoreML/TF INT8 quantization + dynamic=False, # ONNX/TF: dynamic axes simplify=False, # ONNX: simplify model opset=12, # ONNX: opset version ): @@ -288,7 +290,7 @@ def run(data=ROOT / 'data/coco128.yaml', # 'dataset.yaml path' if pb or tfjs: # pb prerequisite to tfjs export_pb(model, im, file) if tflite: - export_tflite(model, im, file, tfl_int8=False, data=data, ncalib=100) + export_tflite(model, im, file, int8=int8, data=data, ncalib=100) if tfjs: export_tfjs(model, im, file) @@ -309,6 +311,7 @@ def parse_opt(): parser.add_argument('--inplace', action='store_true', help='set YOLOv5 Detect() inplace=True') parser.add_argument('--train', action='store_true', help='model.train() mode') parser.add_argument('--optimize', action='store_true', help='TorchScript: optimize for mobile') + parser.add_argument('--int8', action='store_true', help='CoreML/TF INT8 quantization') parser.add_argument('--dynamic', action='store_true', help='ONNX/TF: dynamic axes') parser.add_argument('--simplify', action='store_true', help='ONNX: simplify model') parser.add_argument('--opset', type=int, default=13, help='ONNX: opset version') From 621b6d5ba80707ca98242dd7c71d738e5594b41e Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 15 Sep 2021 13:13:33 +0200 Subject: [PATCH 0551/1976] Evolution `--resume` fix (#4802) Also disable `/weights` dir creation when evolving as no weights are saved and empty folder causes user expectations of weights. --- train.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/train.py b/train.py index d243a9cb010f..1d0c2c608878 100644 --- a/train.py +++ b/train.py @@ -64,7 +64,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary # Directories w = save_dir / 'weights' # weights dir - w.mkdir(parents=True, exist_ok=True) # make dir + (w.parent if evolve else w).mkdir(parents=True, exist_ok=True) # make dir last, best = w / 'last.pt', w / 'best.pt' # Hyperparameters @@ -489,7 +489,7 @@ def main(opt, callbacks=Callbacks()): assert len(opt.cfg) or len(opt.weights), 'either --cfg or --weights must be specified' if opt.evolve: opt.project = 'runs/evolve' - opt.exist_ok = opt.resume + opt.exist_ok, opt.resume = opt.resume, False # pass resume to exist_ok and disable resume opt.save_dir = str(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok)) # DDP mode From 0dc725e3dc36283ef657088bbc9e05461311c921 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 16 Sep 2021 09:47:34 +0200 Subject: [PATCH 0552/1976] Refactor `forward()` method profiling (#4816) --- models/yolo.py | 41 +++++++++++++++++++---------------------- 1 file changed, 19 insertions(+), 22 deletions(-) diff --git a/models/yolo.py b/models/yolo.py index 9eddf4a08e49..0a27b24dede7 100644 --- a/models/yolo.py +++ b/models/yolo.py @@ -98,7 +98,6 @@ def __init__(self, cfg='yolov5s.yaml', ch=3, nc=None, anchors=None): # model, i self.model, self.save = parse_model(deepcopy(self.yaml), ch=[ch]) # model, savelist self.names = [str(i) for i in range(self.yaml['nc'])] # default names self.inplace = self.yaml.get('inplace', True) - # LOGGER.info([x.shape for x in self.forward(torch.zeros(1, ch, 64, 64))]) # Build strides, anchors m = self.model[-1] # Detect() @@ -110,7 +109,6 @@ def __init__(self, cfg='yolov5s.yaml', ch=3, nc=None, anchors=None): # model, i check_anchor_order(m) self.stride = m.stride self._initialize_biases() # only run once - # LOGGER.info('Strides: %s' % m.stride.tolist()) # Init weights, biases initialize_weights(self) @@ -119,47 +117,33 @@ def __init__(self, cfg='yolov5s.yaml', ch=3, nc=None, anchors=None): # model, i def forward(self, x, augment=False, profile=False, visualize=False): if augment: - return self.forward_augment(x) # augmented inference, None - return self.forward_once(x, profile, visualize) # single-scale inference, train + return self._forward_augment(x) # augmented inference, None + return self._forward_once(x, profile, visualize) # single-scale inference, train - def forward_augment(self, x): + def _forward_augment(self, x): img_size = x.shape[-2:] # height, width s = [1, 0.83, 0.67] # scales f = [None, 3, None] # flips (2-ud, 3-lr) y = [] # outputs for si, fi in zip(s, f): xi = scale_img(x.flip(fi) if fi else x, si, gs=int(self.stride.max())) - yi = self.forward_once(xi)[0] # forward + yi = self._forward_once(xi)[0] # forward # cv2.imwrite(f'img_{si}.jpg', 255 * xi[0].cpu().numpy().transpose((1, 2, 0))[:, :, ::-1]) # save yi = self._descale_pred(yi, fi, si, img_size) y.append(yi) return torch.cat(y, 1), None # augmented inference, train - def forward_once(self, x, profile=False, visualize=False): + def _forward_once(self, x, profile=False, visualize=False): y, dt = [], [] # outputs for m in self.model: if m.f != -1: # if not from previous layer x = y[m.f] if isinstance(m.f, int) else [x if j == -1 else y[j] for j in m.f] # from earlier layers - if profile: - c = isinstance(m, Detect) # copy input as inplace fix - o = thop.profile(m, inputs=(x.copy() if c else x,), verbose=False)[0] / 1E9 * 2 if thop else 0 # FLOPs - t = time_sync() - for _ in range(10): - m(x.copy() if c else x) - dt.append((time_sync() - t) * 100) - if m == self.model[0]: - LOGGER.info(f"{'time (ms)':>10s} {'GFLOPs':>10s} {'params':>10s} {'module'}") - LOGGER.info(f'{dt[-1]:10.2f} {o:10.2f} {m.np:10.0f} {m.type}') - + self._profile_one_layer(m, x, dt) x = m(x) # run y.append(x if m.i in self.save else None) # save output - if visualize: feature_visualization(x, m.type, m.i, save_dir=visualize) - - if profile: - LOGGER.info('%.1fms total' % sum(dt)) return x def _descale_pred(self, p, flips, scale, img_size): @@ -179,6 +163,19 @@ def _descale_pred(self, p, flips, scale, img_size): p = torch.cat((x, y, wh, p[..., 4:]), -1) return p + def _profile_one_layer(self, m, x, dt): + c = isinstance(m, Detect) # is final layer, copy input as inplace fix + o = thop.profile(m, inputs=(x.copy() if c else x,), verbose=False)[0] / 1E9 * 2 if thop else 0 # FLOPs + t = time_sync() + for _ in range(10): + m(x.copy() if c else x) + dt.append((time_sync() - t) * 100) + if m == self.model[0]: + LOGGER.info(f"{'time (ms)':>10s} {'GFLOPs':>10s} {'params':>10s} {'module'}") + LOGGER.info(f'{dt[-1]:10.2f} {o:10.2f} {m.np:10.0f} {m.type}') + if c: + LOGGER.info(f"{sum(dt):10.2f} {'-':>10s} {'-':>10s} Total") + def _initialize_biases(self, cf=None): # initialize biases into Detect(), cf is class frequency # https://arxiv.org/abs/1708.02002 section 3.3 # cf = torch.bincount(torch.tensor(np.concatenate(dataset.labels, 0)[:, 0]).long(), minlength=nc) + 1. From 43b2817f6e615497a10a0921a8df8b0e3d286210 Mon Sep 17 00:00:00 2001 From: Kalen Michael Date: Thu, 16 Sep 2021 13:33:54 +0200 Subject: [PATCH 0553/1976] Feature/fix export on url (#4823) * added callbacks * added back callback to main * added save_dir to callback output * merged in upstream * removed ghost code * added url check * Add url2file() * Update file-only Co-authored-by: Glenn Jocher --- export.py | 4 ++-- utils/general.py | 7 +++++++ 2 files changed, 9 insertions(+), 2 deletions(-) diff --git a/export.py b/export.py index ea7f1ebd0b1f..4ec3c3e0c711 100644 --- a/export.py +++ b/export.py @@ -41,7 +41,7 @@ from models.yolo import Detect from utils.activations import SiLU from utils.datasets import LoadImages -from utils.general import colorstr, check_dataset, check_img_size, check_requirements, file_size, set_logging +from utils.general import colorstr, check_dataset, check_img_size, check_requirements, file_size, set_logging, url2file from utils.torch_utils import select_device @@ -244,7 +244,7 @@ def run(data=ROOT / 'data/coco128.yaml', # 'dataset.yaml path' include = [x.lower() for x in include] tf_exports = list(x in include for x in ('saved_model', 'pb', 'tflite', 'tfjs')) # TensorFlow exports imgsz *= 2 if len(imgsz) == 1 else 1 # expand - file = Path(weights) + file = Path(url2file(weights) if str(weights).startswith(('http:/', 'https:/')) else weights) # Load PyTorch model device = select_device(device) diff --git a/utils/general.py b/utils/general.py index 7a80b2ea81bc..dc9a10fe8617 100755 --- a/utils/general.py +++ b/utils/general.py @@ -360,6 +360,13 @@ def check_dataset(data, autodownload=True): return data # dictionary +def url2file(url): + # Convert URL to filename, i.e. https://url.com/file.txt?auth -> file.txt + url = str(Path(url)).replace(':/', '://') # Pathlib turns :// -> :/ + file = Path(urllib.parse.unquote(url)).name.split('?')[0] # '%2F' to '/', split https://url.com/file.txt?auth + return file + + def download(url, dir='.', unzip=True, delete=True, curl=False, threads=1): # Multi-threaded file download and unzip function, used in data.yaml for autodownload def download_one(url, dir): From 6b44ecd53dd299ccaa54cff4194e0f4e323bbc40 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 16 Sep 2021 14:38:35 +0200 Subject: [PATCH 0554/1976] Fix 'PyTorch starting from' for URL weights (#4828) Follows #4823 --- export.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/export.py b/export.py index 4ec3c3e0c711..a5139c0a965e 100644 --- a/export.py +++ b/export.py @@ -272,7 +272,7 @@ def run(data=ROOT / 'data/coco128.yaml', # 'dataset.yaml path' for _ in range(2): y = model(im) # dry runs - print(f"\n{colorstr('PyTorch:')} starting from {weights} ({file_size(weights):.1f} MB)") + print(f"\n{colorstr('PyTorch:')} starting from {file} ({file_size(file):.1f} MB)") # Exports if 'torchscript' in include: From 3beb871ba4558c9e720388e6632798c4eb267d4f Mon Sep 17 00:00:00 2001 From: Jiacong Fang Date: Thu, 16 Sep 2021 21:27:22 +0800 Subject: [PATCH 0555/1976] Multiple TF export improvements (#4824) * Add fused conv support * Set all saved_model values to non trainable * Fix TFLite fp16 model export * Fix int8 TFLite conversion --- export.py | 7 +++++-- models/tf.py | 5 +++-- 2 files changed, 8 insertions(+), 4 deletions(-) diff --git a/export.py b/export.py index a5139c0a965e..dd7eefc51702 100644 --- a/export.py +++ b/export.py @@ -145,6 +145,7 @@ def export_saved_model(model, im, file, dynamic, inputs = keras.Input(shape=(*imgsz, 3), batch_size=None if dynamic else batch_size) outputs = tf_model.predict(inputs, tf_nms, agnostic_nms, topk_per_class, topk_all, iou_thres, conf_thres) keras_model = keras.Model(inputs=inputs, outputs=outputs) + keras_model.trainable = False keras_model.summary() keras_model.save(f, save_format='tf') @@ -183,15 +184,17 @@ def export_tflite(keras_model, im, file, int8, data, ncalib, prefix=colorstr('Te print(f'\n{prefix} starting export with tensorflow {tf.__version__}...') batch_size, ch, *imgsz = list(im.shape) # BCHW - f = file.with_suffix('.tflite') + f = str(file).replace('.pt', '-fp16.tflite') converter = tf.lite.TFLiteConverter.from_keras_model(keras_model) converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS] + converter.target_spec.supported_types = [tf.float16] converter.optimizations = [tf.lite.Optimize.DEFAULT] if int8: dataset = LoadImages(check_dataset(data)['train'], img_size=imgsz, auto=False) # representative data converter.representative_dataset = lambda: representative_dataset_gen(dataset, ncalib) converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8] + converter.target_spec.supported_types = [] converter.inference_input_type = tf.uint8 # or tf.int8 converter.inference_output_type = tf.uint8 # or tf.int8 converter.experimental_new_quantizer = False @@ -249,7 +252,7 @@ def run(data=ROOT / 'data/coco128.yaml', # 'dataset.yaml path' # Load PyTorch model device = select_device(device) assert not (device.type == 'cpu' and half), '--half only compatible with GPU export, i.e. use --device 0' - model = attempt_load(weights, map_location=device, inplace=True, fuse=not any(tf_exports)) # load FP32 model + model = attempt_load(weights, map_location=device, inplace=True, fuse=True) # load FP32 model nc, names = model.nc, model.names # number of classes, class names # Input diff --git a/models/tf.py b/models/tf.py index 621236240f10..5d7153f246eb 100644 --- a/models/tf.py +++ b/models/tf.py @@ -70,8 +70,9 @@ def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True, w=None): # see https://stackoverflow.com/questions/52975843/comparing-conv2d-with-padding-between-tensorflow-and-pytorch conv = keras.layers.Conv2D( - c2, k, s, 'SAME' if s == 1 else 'VALID', use_bias=False, - kernel_initializer=keras.initializers.Constant(w.conv.weight.permute(2, 3, 1, 0).numpy())) + c2, k, s, 'SAME' if s == 1 else 'VALID', use_bias=False if hasattr(w, 'bn') else True, + kernel_initializer=keras.initializers.Constant(w.conv.weight.permute(2, 3, 1, 0).numpy()), + bias_initializer='zeros' if hasattr(w, 'bn') else keras.initializers.Constant(w.conv.bias.numpy())) self.conv = conv if s == 1 else keras.Sequential([TFPad(autopad(k, p)), conv]) self.bn = TFBN(w.bn) if hasattr(w, 'bn') else tf.identity From 27a4736e968158063a87024be74534a560fc8e84 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 16 Sep 2021 17:55:58 +0200 Subject: [PATCH 0556/1976] Fix val.py study plot (#4831) * Fix val.py study plot * call plot_val_study * Rename plot_study_txt to plot_val_study --- utils/plots.py | 11 +++++++---- val.py | 4 ++-- 2 files changed, 9 insertions(+), 6 deletions(-) diff --git a/utils/plots.py b/utils/plots.py index 1ed88ea7c832..9570fdf27a63 100644 --- a/utils/plots.py +++ b/utils/plots.py @@ -247,15 +247,16 @@ def plot_targets_txt(): # from utils.plots import *; plot_targets_txt() plt.savefig('targets.jpg', dpi=200) -def plot_study_txt(path='', x=None): # from utils.plots import *; plot_study_txt() - # Plot study.txt generated by val.py +def plot_val_study(file='', dir='', x=None): # from utils.plots import *; plot_val_study() + # Plot file=study.txt generated by val.py (or plot all study*.txt in dir) + save_dir = Path(file).parent if file else Path(dir) plot2 = False # plot additional results if plot2: ax = plt.subplots(2, 4, figsize=(10, 6), tight_layout=True)[1].ravel() fig2, ax2 = plt.subplots(1, 1, figsize=(8, 4), tight_layout=True) # for f in [Path(path) / f'study_coco_{x}.txt' for x in ['yolov5s6', 'yolov5m6', 'yolov5l6', 'yolov5x6']]: - for f in sorted(Path(path).glob('study*.txt')): + for f in sorted(save_dir.glob('study*.txt')): y = np.loadtxt(f, dtype=np.float32, usecols=[0, 1, 2, 3, 7, 8, 9], ndmin=2).T x = np.arange(y.shape[1]) if x is None else np.array(x) if plot2: @@ -278,7 +279,9 @@ def plot_study_txt(path='', x=None): # from utils.plots import *; plot_study_tx ax2.set_xlabel('GPU Speed (ms/img)') ax2.set_ylabel('COCO AP val') ax2.legend(loc='lower right') - plt.savefig(str(Path(path).name) + '.png', dpi=300) + f = save_dir / 'study.png' + print(f'Saving {f}...') + plt.savefig(f, dpi=300) def plot_labels(labels, names=(), save_dir=Path('')): diff --git a/val.py b/val.py index 00eb92bb096a..16dd76d680f7 100644 --- a/val.py +++ b/val.py @@ -26,7 +26,7 @@ check_suffix, check_yaml, box_iou, non_max_suppression, scale_coords, xyxy2xywh, xywh2xyxy, set_logging, \ increment_path, colorstr from utils.metrics import ap_per_class, ConfusionMatrix -from utils.plots import plot_images, output_to_target, plot_study_txt +from utils.plots import output_to_target, plot_images, plot_val_study from utils.torch_utils import select_device, time_sync from utils.callbacks import Callbacks @@ -348,7 +348,7 @@ def main(opt): y.append(r + t) # results and times np.savetxt(f, y, fmt='%10.4g') # save os.system('zip -r study.zip study_*.txt') - plot_study_txt(x=x) # plot + plot_val_study(x=x) # plot if __name__ == "__main__": From 850f98f5085a7c3425ab91412fbd136b407ab2d0 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 17 Sep 2021 11:39:00 +0200 Subject: [PATCH 0557/1976] Created using Colaboratory --- tutorial.ipynb | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tutorial.ipynb b/tutorial.ipynb index 38e8fd4389ea..6d6a1e77dc30 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -957,7 +957,6 @@ "# Unit tests\n", "%%shell\n", "export PYTHONPATH=\"$PWD\" # to run *.py. files in subdirectories\n", - "\n", "rm -rf runs # remove runs/\n", "for m in yolov5s; do # models\n", " python train.py --weights $m.pt --epochs 3 --img 320 --device 0 # train pretrained\n", @@ -968,9 +967,10 @@ " python val.py --weights $m.pt --device $d # val official\n", " python val.py --weights runs/train/exp/weights/best.pt --device $d # val custom\n", " done\n", - " python hubconf.py # hub\n", - " python models/yolo.py --cfg $m.yaml # inspect\n", - " python export.py --weights $m.pt --img 640 --batch 1 # export\n", + "python hubconf.py # hub\n", + "python models/yolo.py --cfg $m.yaml # build PyTorch model\n", + "python models/tf.py --weights $m.pt # build TensorFlow model\n", + "python export.py --img 128 --batch 1 --weights $m.pt --include torchscript onnx # export\n", "done" ], "execution_count": null, From e83792e65ce8b2ac51c29733e111302680dbceb7 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 17 Sep 2021 11:40:47 +0200 Subject: [PATCH 0558/1976] Created using Colaboratory --- tutorial.ipynb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tutorial.ipynb b/tutorial.ipynb index 6d6a1e77dc30..cfa96914c713 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -954,7 +954,7 @@ "id": "FGH0ZjkGjejy" }, "source": [ - "# Unit tests\n", + "# CI Checks\n", "%%shell\n", "export PYTHONPATH=\"$PWD\" # to run *.py. files in subdirectories\n", "rm -rf runs # remove runs/\n", From 3a822a22ceedaca367f7a790c7d1091c0b426758 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 17 Sep 2021 13:02:37 +0200 Subject: [PATCH 0559/1976] `PIL.ImageDraw.text(anchor=...)` removal, reduce to `>=7.1.2` (#4842) * Unpin Pillow * Update requirements.txt * Update plots.py --- requirements.txt | 2 +- utils/plots.py | 7 ++++--- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/requirements.txt b/requirements.txt index b84b353f75f3..42d5dfc49354 100755 --- a/requirements.txt +++ b/requirements.txt @@ -4,7 +4,7 @@ matplotlib>=3.2.2 numpy>=1.18.5 opencv-python>=4.1.2 -Pillow>=8.0.0 +Pillow>=7.1.2 PyYAML>=5.3.1 scipy>=1.4.1 torch>=1.7.0 diff --git a/utils/plots.py b/utils/plots.py index 9570fdf27a63..5ff72cb144e2 100644 --- a/utils/plots.py +++ b/utils/plots.py @@ -3,11 +3,11 @@ Plotting utils """ -import math from copy import copy from pathlib import Path import cv2 +import math import matplotlib import matplotlib.pyplot as plt import numpy as np @@ -80,9 +80,10 @@ def box_label(self, box, label='', color=(128, 128, 128), txt_color=(255, 255, 2 if self.pil or not is_ascii(label): self.draw.rectangle(box, width=self.lw, outline=color) # box if label: - w = self.font.getsize(label)[0] # text width + w, h = self.font.getsize(label) # text width self.draw.rectangle([box[0], box[1] - self.fh, box[0] + w + 1, box[1] + 1], fill=color) - self.draw.text((box[0], box[1]), label, fill=txt_color, font=self.font, anchor='ls') + # self.draw.text((box[0], box[1]), label, fill=txt_color, font=self.font, anchor='ls') # for PIL>8.0 + self.draw.text((box[0], box[1] - h), label, fill=txt_color, font=self.font) else: # cv2 c1, c2 = (int(box[0]), int(box[1])), (int(box[2]), int(box[3])) cv2.rectangle(self.im, c1, c2, color, thickness=self.lw, lineType=cv2.LINE_AA) From fe39562a5f4009c59dde343502b4dd20a6aff823 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 17 Sep 2021 17:35:45 +0200 Subject: [PATCH 0560/1976] Sorted datasets update to `cache_labels()` (#4845) PR should produce datasets sorted alphabetically by filename. Cache version incremented to 0.5. Note: will force a one-time re-caching of existing datasets on first-use. --- utils/datasets.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/utils/datasets.py b/utils/datasets.py index 4a4b187da345..adcdafe69df7 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -487,7 +487,7 @@ def cache_labels(self, path=Path('./labels.cache'), prefix=''): nm, nf, ne, nc, msgs = 0, 0, 0, 0, [] # number missing, found, empty, corrupt, messages desc = f"{prefix}Scanning '{path.parent / path.stem}' images and labels..." with Pool(NUM_THREADS) as pool: - pbar = tqdm(pool.imap_unordered(verify_image_label, zip(self.img_files, self.label_files, repeat(prefix))), + pbar = tqdm(pool.imap(verify_image_label, zip(self.img_files, self.label_files, repeat(prefix))), desc=desc, total=len(self.img_files)) for im_file, l, shape, segments, nm_f, nf_f, ne_f, nc_f, msg in pbar: nm += nm_f @@ -508,7 +508,7 @@ def cache_labels(self, path=Path('./labels.cache'), prefix=''): x['hash'] = get_hash(self.label_files + self.img_files) x['results'] = nf, nm, ne, nc, len(self.img_files) x['msgs'] = msgs # warnings - x['version'] = 0.4 # cache version + x['version'] = 0.5 # cache version try: np.save(path, x) # save cache for next time path.with_suffix('.cache.npy').rename(path) # remove .npy suffix From 4fdaec0449817000803d6ab7e7512b0e58c4d7d4 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 17 Sep 2021 18:01:52 +0200 Subject: [PATCH 0561/1976] Single `cache_version` definition (#4846) Defines dataset labels *.cache version in a single place, fixing a bug introduced in #4845. --- utils/datasets.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/utils/datasets.py b/utils/datasets.py index adcdafe69df7..d253cb177b82 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -365,6 +365,8 @@ def img2label_paths(img_paths): class LoadImagesAndLabels(Dataset): # for training/testing + cache_version = 0.5 # dataset labels *.cache version + def __init__(self, path, img_size=640, batch_size=16, augment=False, hyp=None, rect=False, image_weights=False, cache_images=False, single_cls=False, stride=32, pad=0.0, prefix=''): self.img_size = img_size @@ -404,7 +406,8 @@ def __init__(self, path, img_size=640, batch_size=16, augment=False, hyp=None, r cache_path = (p if p.is_file() else Path(self.label_files[0]).parent).with_suffix('.cache') try: cache, exists = np.load(cache_path, allow_pickle=True).item(), True # load dict - assert cache['version'] == 0.4 and cache['hash'] == get_hash(self.label_files + self.img_files) + assert cache['version'] == self.cache_version # same version + assert cache['hash'] == get_hash(self.label_files + self.img_files) # same hash except: cache, exists = self.cache_labels(cache_path, prefix), False # cache @@ -508,7 +511,7 @@ def cache_labels(self, path=Path('./labels.cache'), prefix=''): x['hash'] = get_hash(self.label_files + self.img_files) x['results'] = nf, nm, ne, nc, len(self.img_files) x['msgs'] = msgs # warnings - x['version'] = 0.5 # cache version + x['version'] = self.cache_version # cache version try: np.save(path, x) # save cache for next time path.with_suffix('.cache.npy').rename(path) # remove .npy suffix From 302a1b0bb03b3dbae0cb41f43b6c6c998070ff49 Mon Sep 17 00:00:00 2001 From: Ayush Chaurasia Date: Fri, 17 Sep 2021 22:29:34 +0530 Subject: [PATCH 0562/1976] W&B: Enable login timeout (#4843) * evolve fix * Enable login timeout * fix pkg --- Dockerfile | 2 +- utils/loggers/wandb/wandb_utils.py | 3 +++ 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/Dockerfile b/Dockerfile index 858b22bc6383..e9cd304376ed 100644 --- a/Dockerfile +++ b/Dockerfile @@ -10,7 +10,7 @@ RUN apt update && apt install -y zip htop screen libgl1-mesa-glx COPY requirements.txt . RUN python -m pip install --upgrade pip RUN pip uninstall -y nvidia-tensorboard nvidia-tensorboard-plugin-dlprof -RUN pip install --no-cache -r requirements.txt coremltools onnx gsutil notebook +RUN pip install --no-cache -r requirements.txt coremltools onnx gsutil notebook wandb>=0.12.2 RUN pip install --no-cache -U torch torchvision numpy # RUN pip install --no-cache torch==1.9.0+cu111 torchvision==0.10.0+cu111 -f https://download.pytorch.org/whl/torch_stable.html diff --git a/utils/loggers/wandb/wandb_utils.py b/utils/loggers/wandb/wandb_utils.py index 504a518f75ea..e7b0d82213f0 100644 --- a/utils/loggers/wandb/wandb_utils.py +++ b/utils/loggers/wandb/wandb_utils.py @@ -5,6 +5,7 @@ import sys from contextlib import contextmanager from pathlib import Path +import pkg_resources as pkg import yaml from tqdm import tqdm @@ -20,6 +21,8 @@ import wandb assert hasattr(wandb, '__version__') # verify package import not local dir + if pkg.parse_version(wandb.__version__) >= pkg.parse_version('0.12.2'): + wandb.login(timeout=30) except (ImportError, AssertionError): wandb = None From 84bfa892365cd9d5938ea78494727783482dcad4 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 18 Sep 2021 13:28:42 +0200 Subject: [PATCH 0563/1976] Consolidate `init_seeds()` (#4849) --- utils/general.py | 8 +++++--- utils/torch_utils.py | 10 ---------- 2 files changed, 5 insertions(+), 13 deletions(-) diff --git a/utils/general.py b/utils/general.py index dc9a10fe8617..561602323ab2 100755 --- a/utils/general.py +++ b/utils/general.py @@ -29,7 +29,6 @@ from utils.downloads import gsutil_getsize from utils.metrics import box_iou, fitness -from utils.torch_utils import init_torch_seeds # Settings torch.set_printoptions(linewidth=320, precision=5, profile='long') @@ -91,10 +90,13 @@ def set_logging(rank=-1, verbose=True): def init_seeds(seed=0): - # Initialize random number generator (RNG) seeds + # Initialize random number generator (RNG) seeds https://pytorch.org/docs/stable/notes/randomness.html + # cudnn seed 0 settings are slower and more reproducible, else faster and less reproducible + import torch.backends.cudnn as cudnn random.seed(seed) np.random.seed(seed) - init_torch_seeds(seed) + torch.manual_seed(seed) + cudnn.benchmark, cudnn.deterministic = (False, True) if seed == 0 else (True, False) def get_latest_run(search_dir='.'): diff --git a/utils/torch_utils.py b/utils/torch_utils.py index 04e1446bb908..352ecf572c9f 100644 --- a/utils/torch_utils.py +++ b/utils/torch_utils.py @@ -15,7 +15,6 @@ from pathlib import Path import torch -import torch.backends.cudnn as cudnn import torch.distributed as dist import torch.nn as nn import torch.nn.functional as F @@ -41,15 +40,6 @@ def torch_distributed_zero_first(local_rank: int): dist.barrier(device_ids=[0]) -def init_torch_seeds(seed=0): - # Speed-reproducibility tradeoff https://pytorch.org/docs/stable/notes/randomness.html - torch.manual_seed(seed) - if seed == 0: # slower, more reproducible - cudnn.benchmark, cudnn.deterministic = False, True - else: # faster, less reproducible - cudnn.benchmark, cudnn.deterministic = True, False - - def date_modified(path=__file__): # return human-readable file modification date, i.e. '2021-3-26' t = datetime.datetime.fromtimestamp(Path(path).stat().st_mtime) From 3732f9ac8a73eeae6ca80795c0ce435a56a5a18d Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 18 Sep 2021 14:16:19 +0200 Subject: [PATCH 0564/1976] Refactor argparser printing to `print_args()` (#4850) * Refactor argparser printing to `print_args()` * Cleanup --- detect.py | 12 ++++++------ export.py | 5 +++-- models/tf.py | 6 +++--- train.py | 7 +++---- utils/general.py | 5 +++++ val.py | 6 +++--- 6 files changed, 23 insertions(+), 18 deletions(-) diff --git a/detect.py b/detect.py index ef7458d52db3..57bd6eea9ec4 100644 --- a/detect.py +++ b/detect.py @@ -19,12 +19,12 @@ sys.path.append(FILE.parents[0].as_posix()) # add yolov5/ to path from models.experimental import attempt_load -from utils.datasets import LoadStreams, LoadImages -from utils.general import check_img_size, check_imshow, check_requirements, check_suffix, colorstr, is_ascii, \ - non_max_suppression, apply_classifier, scale_coords, xyxy2xywh, strip_optimizer, set_logging, increment_path, \ - save_one_box +from utils.datasets import LoadImages, LoadStreams +from utils.general import apply_classifier, check_img_size, check_imshow, check_requirements, check_suffix, colorstr, \ + increment_path, is_ascii, non_max_suppression, print_args, save_one_box, scale_coords, set_logging, \ + strip_optimizer, xyxy2xywh from utils.plots import Annotator, colors -from utils.torch_utils import select_device, load_classifier, time_sync +from utils.torch_utils import load_classifier, select_device, time_sync @torch.no_grad() @@ -279,11 +279,11 @@ def parse_opt(): parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference') opt = parser.parse_args() opt.imgsz *= 2 if len(opt.imgsz) == 1 else 1 # expand + print_args(FILE.stem, opt) return opt def main(opt): - print(colorstr('detect: ') + ', '.join(f'{k}={v}' for k, v in vars(opt).items())) check_requirements(exclude=('tensorboard', 'thop')) run(**vars(opt)) diff --git a/export.py b/export.py index dd7eefc51702..9d7576bf383a 100644 --- a/export.py +++ b/export.py @@ -41,7 +41,8 @@ from models.yolo import Detect from utils.activations import SiLU from utils.datasets import LoadImages -from utils.general import colorstr, check_dataset, check_img_size, check_requirements, file_size, set_logging, url2file +from utils.general import colorstr, check_dataset, check_img_size, check_requirements, file_size, print_args, \ + set_logging, url2file from utils.torch_utils import select_device @@ -322,12 +323,12 @@ def parse_opt(): default=['torchscript', 'onnx'], help='available formats are (torchscript, onnx, coreml, saved_model, pb, tflite, tfjs)') opt = parser.parse_args() + print_args(FILE.stem, opt) return opt def main(opt): set_logging() - print(colorstr('export: ') + ', '.join(f'{k}={v}' for k, v in vars(opt).items())) run(**vars(opt)) diff --git a/models/tf.py b/models/tf.py index 5d7153f246eb..5b918ee3c34a 100644 --- a/models/tf.py +++ b/models/tf.py @@ -27,9 +27,9 @@ from tensorflow import keras from models.common import Conv, Bottleneck, SPP, DWConv, Focus, BottleneckCSP, Concat, autopad, C3 -from models.experimental import MixConv2d, CrossConv, attempt_load +from models.experimental import CrossConv, MixConv2d, attempt_load from models.yolo import Detect -from utils.general import colorstr, make_divisible, set_logging +from utils.general import make_divisible, print_args, set_logging from utils.activations import SiLU LOGGER = logging.getLogger(__name__) @@ -434,12 +434,12 @@ def parse_opt(): parser.add_argument('--dynamic', action='store_true', help='dynamic batch size') opt = parser.parse_args() opt.imgsz *= 2 if len(opt.imgsz) == 1 else 1 # expand + print_args(FILE.stem, opt) return opt def main(opt): set_logging() - print(colorstr('tf.py: ') + ', '.join(f'{k}={v}' for k, v in vars(opt).items())) run(**vars(opt)) diff --git a/train.py b/train.py index 1d0c2c608878..281a3c0bad7a 100644 --- a/train.py +++ b/train.py @@ -36,7 +36,7 @@ from utils.datasets import create_dataloader from utils.general import labels_to_class_weights, increment_path, labels_to_image_weights, init_seeds, \ strip_optimizer, get_latest_run, check_dataset, check_git_status, check_img_size, check_requirements, \ - check_file, check_yaml, check_suffix, print_mutation, set_logging, one_cycle, colorstr, methods + check_file, check_yaml, check_suffix, print_args, print_mutation, set_logging, one_cycle, colorstr, methods from utils.downloads import attempt_download from utils.loss import ComputeLoss from utils.plots import plot_labels, plot_evolve @@ -470,9 +470,8 @@ def parse_opt(known=False): def main(opt, callbacks=Callbacks()): # Checks - set_logging(RANK) if RANK in [-1, 0]: - print(colorstr('train: ') + ', '.join(f'{k}={v}' for k, v in vars(opt).items())) + print_args(FILE.stem, opt) check_git_status() check_requirements(requirements=FILE.parent / 'requirements.txt', exclude=['thop']) @@ -508,7 +507,7 @@ def main(opt, callbacks=Callbacks()): if not opt.evolve: train(opt.hyp, opt, device, callbacks) if WORLD_SIZE > 1 and RANK == 0: - _ = [print('Destroying process group... ', end=''), dist.destroy_process_group(), print('Done.')] + _ = LOGGER.info('Destroying process group... ', end=''), dist.destroy_process_group(), LOGGER.info('Done.') # Evolve hyperparameters (optional) else: diff --git a/utils/general.py b/utils/general.py index 561602323ab2..d4d8e2064d08 100755 --- a/utils/general.py +++ b/utils/general.py @@ -89,6 +89,11 @@ def set_logging(rank=-1, verbose=True): level=logging.INFO if (verbose and rank in [-1, 0]) else logging.WARN) +def print_args(name, opt): + # Print argparser arguments + print(colorstr(f'{name}: ') + ', '.join(f'{k}={v}' for k, v in vars(opt).items())) + + def init_seeds(seed=0): # Initialize random number generator (RNG) seeds https://pytorch.org/docs/stable/notes/randomness.html # cudnn seed 0 settings are slower and more reproducible, else faster and less reproducible diff --git a/val.py b/val.py index 16dd76d680f7..3574fb085c07 100644 --- a/val.py +++ b/val.py @@ -24,7 +24,7 @@ from utils.datasets import create_dataloader from utils.general import coco80_to_coco91_class, check_dataset, check_img_size, check_requirements, \ check_suffix, check_yaml, box_iou, non_max_suppression, scale_coords, xyxy2xywh, xywh2xyxy, set_logging, \ - increment_path, colorstr + increment_path, colorstr, print_args from utils.metrics import ap_per_class, ConfusionMatrix from utils.plots import output_to_target, plot_images, plot_val_study from utils.torch_utils import select_device, time_sync @@ -295,7 +295,7 @@ def run(data, def parse_opt(): - parser = argparse.ArgumentParser(prog='val.py') + parser = argparse.ArgumentParser() parser.add_argument('--data', type=str, default='data/coco128.yaml', help='dataset.yaml path') parser.add_argument('--weights', nargs='+', type=str, default='yolov5s.pt', help='model.pt path(s)') parser.add_argument('--batch-size', type=int, default=32, help='batch size') @@ -319,12 +319,12 @@ def parse_opt(): opt.save_json |= opt.data.endswith('coco.yaml') opt.save_txt |= opt.save_hybrid opt.data = check_yaml(opt.data) # check YAML + print_args(FILE.stem, opt) return opt def main(opt): set_logging() - print(colorstr('val: ') + ', '.join(f'{k}={v}' for k, v in vars(opt).items())) check_requirements(requirements=FILE.parent / 'requirements.txt', exclude=('tensorboard', 'thop')) if opt.task in ('train', 'val', 'test'): # run normally From 4d1a2ac87eb2c9c37978584f4f93af2af0260738 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 18 Sep 2021 15:02:08 +0200 Subject: [PATCH 0565/1976] Update `sys.path.append(str(ROOT))` (#4852) * Update `sys.path.append(str(ROOT))` * Cleanup --- detect.py | 6 ++++-- export.py | 2 +- models/tf.py | 5 +++-- models/yolo.py | 4 +++- train.py | 7 +++++-- utils/__init__.py | 19 ------------------- utils/aws/resume.py | 5 ++++- utils/loggers/wandb/sweep.py | 4 +++- utils/loggers/wandb/wandb_utils.py | 4 +++- val.py | 6 ++++-- 10 files changed, 30 insertions(+), 32 deletions(-) diff --git a/detect.py b/detect.py index 57bd6eea9ec4..92fcd064d53d 100644 --- a/detect.py +++ b/detect.py @@ -16,7 +16,9 @@ import torch.backends.cudnn as cudnn FILE = Path(__file__).resolve() -sys.path.append(FILE.parents[0].as_posix()) # add yolov5/ to path +ROOT = FILE.parents[0] # YOLOv5 root directory +if str(ROOT) not in sys.path: + sys.path.append(str(ROOT)) # add ROOT to PATH from models.experimental import attempt_load from utils.datasets import LoadImages, LoadStreams @@ -284,7 +286,7 @@ def parse_opt(): def main(opt): - check_requirements(exclude=('tensorboard', 'thop')) + check_requirements(requirements=ROOT / 'requirements.txt', exclude=('tensorboard', 'thop')) run(**vars(opt)) diff --git a/export.py b/export.py index 9d7576bf383a..e876af234592 100644 --- a/export.py +++ b/export.py @@ -32,7 +32,7 @@ from torch.utils.mobile_optimizer import optimize_for_mobile FILE = Path(__file__).resolve() -ROOT = FILE.parents[0] # yolov5/ dir +ROOT = FILE.parents[0] # YOLOv5 root directory if str(ROOT) not in sys.path: sys.path.append(str(ROOT)) # add ROOT to PATH diff --git a/models/tf.py b/models/tf.py index 5b918ee3c34a..3265b7b75f55 100644 --- a/models/tf.py +++ b/models/tf.py @@ -17,8 +17,9 @@ from pathlib import Path FILE = Path(__file__).resolve() -ROOT = FILE.parents[1] # yolov5/ dir -sys.path.append(ROOT.as_posix()) # add yolov5/ to path +ROOT = FILE.parents[1] # YOLOv5 root directory +if str(ROOT) not in sys.path: + sys.path.append(str(ROOT)) # add ROOT to PATH import numpy as np import tensorflow as tf diff --git a/models/yolo.py b/models/yolo.py index 0a27b24dede7..a7590c57816c 100644 --- a/models/yolo.py +++ b/models/yolo.py @@ -12,7 +12,9 @@ from pathlib import Path FILE = Path(__file__).resolve() -sys.path.append(FILE.parents[1].as_posix()) # add yolov5/ to path +ROOT = FILE.parents[1] # YOLOv5 root directory +if str(ROOT) not in sys.path: + sys.path.append(str(ROOT)) # add ROOT to PATH from models.common import * from models.experimental import * diff --git a/train.py b/train.py index 281a3c0bad7a..89c0c507b8bf 100644 --- a/train.py +++ b/train.py @@ -27,7 +27,9 @@ from tqdm import tqdm FILE = Path(__file__).resolve() -sys.path.append(FILE.parents[0].as_posix()) # add yolov5/ to path +ROOT = FILE.parents[0] # YOLOv5 root directory +if str(ROOT) not in sys.path: + sys.path.append(str(ROOT)) # add ROOT to PATH import val # for end-of-epoch mAP from models.experimental import attempt_load @@ -470,10 +472,11 @@ def parse_opt(known=False): def main(opt, callbacks=Callbacks()): # Checks + set_logging(RANK) if RANK in [-1, 0]: print_args(FILE.stem, opt) check_git_status() - check_requirements(requirements=FILE.parent / 'requirements.txt', exclude=['thop']) + check_requirements(requirements=ROOT / 'requirements.txt', exclude=['thop']) # Resume if opt.resume and not check_wandb_resume(opt) and not opt.evolve: # resume an interrupted run diff --git a/utils/__init__.py b/utils/__init__.py index 74260ad1e5b4..e69de29bb2d1 100644 --- a/utils/__init__.py +++ b/utils/__init__.py @@ -1,19 +0,0 @@ -# import sys -# from pathlib import Path -# -# import torch -# from PIL import ImageFont -# -# FILE = Path(__file__).resolve() -# ROOT = FILE.parents[1] # yolov5/ dir -# if str(ROOT) not in sys.path: -# sys.path.append(str(ROOT)) # add ROOT to PATH -# -# # Check YOLOv5 Annotator font -# font = 'Arial.ttf' -# try: -# ImageFont.truetype(font) -# except Exception as e: # download if missing -# url = "https://ultralytics.com/assets/" + font -# print(f'Downloading {url} to {ROOT / font}...') -# torch.hub.download_url_to_file(url, str(ROOT / font)) diff --git a/utils/aws/resume.py b/utils/aws/resume.py index e869834e96e7..e1a8bd896a58 100644 --- a/utils/aws/resume.py +++ b/utils/aws/resume.py @@ -8,7 +8,10 @@ import torch import yaml -sys.path.append('./') # to run '$ python *.py' files in subdirectories +FILE = Path(__file__).resolve() +ROOT = FILE.parents[2] # YOLOv5 root directory +if str(ROOT) not in sys.path: + sys.path.append(str(ROOT)) # add ROOT to PATH port = 0 # --master_port path = Path('').resolve() diff --git a/utils/loggers/wandb/sweep.py b/utils/loggers/wandb/sweep.py index 4d5df5c8e00a..fdabec4eb63b 100644 --- a/utils/loggers/wandb/sweep.py +++ b/utils/loggers/wandb/sweep.py @@ -4,7 +4,9 @@ import wandb FILE = Path(__file__).resolve() -sys.path.append(FILE.parents[3].as_posix()) # add utils/ to path +ROOT = FILE.parents[3] # YOLOv5 root directory +if str(ROOT) not in sys.path: + sys.path.append(str(ROOT)) # add ROOT to PATH from train import train, parse_opt from utils.general import increment_path diff --git a/utils/loggers/wandb/wandb_utils.py b/utils/loggers/wandb/wandb_utils.py index e7b0d82213f0..ab2c20d520b0 100644 --- a/utils/loggers/wandb/wandb_utils.py +++ b/utils/loggers/wandb/wandb_utils.py @@ -11,7 +11,9 @@ from tqdm import tqdm FILE = Path(__file__).resolve() -sys.path.append(FILE.parents[3].as_posix()) # add yolov5/ to path +ROOT = FILE.parents[3] # YOLOv5 root directory +if str(ROOT) not in sys.path: + sys.path.append(str(ROOT)) # add ROOT to PATH from utils.datasets import LoadImagesAndLabels from utils.datasets import img2label_paths diff --git a/val.py b/val.py index 3574fb085c07..2dbf570f1e6e 100644 --- a/val.py +++ b/val.py @@ -18,7 +18,9 @@ from tqdm import tqdm FILE = Path(__file__).resolve() -sys.path.append(FILE.parents[0].as_posix()) # add yolov5/ to path +ROOT = FILE.parents[0] # YOLOv5 root directory +if str(ROOT) not in sys.path: + sys.path.append(str(ROOT)) # add ROOT to PATH from models.experimental import attempt_load from utils.datasets import create_dataloader @@ -325,7 +327,7 @@ def parse_opt(): def main(opt): set_logging() - check_requirements(requirements=FILE.parent / 'requirements.txt', exclude=('tensorboard', 'thop')) + check_requirements(requirements=ROOT / 'requirements.txt', exclude=('tensorboard', 'thop')) if opt.task in ('train', 'val', 'test'): # run normally run(**vars(opt)) From 4c839eeb1030e0d5c77609de646c1361e4dfdd61 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 18 Sep 2021 18:34:30 +0200 Subject: [PATCH 0566/1976] Simplify `check_requirements()` usage (#4855) * Simplify `check_requirements()` usage * remove assert, print() --- detect.py | 2 +- hubconf.py | 2 +- train.py | 2 +- utils/autoanchor.py | 2 +- utils/general.py | 5 ++++- val.py | 2 +- 6 files changed, 9 insertions(+), 6 deletions(-) diff --git a/detect.py b/detect.py index 92fcd064d53d..9b9e7c74644b 100644 --- a/detect.py +++ b/detect.py @@ -286,7 +286,7 @@ def parse_opt(): def main(opt): - check_requirements(requirements=ROOT / 'requirements.txt', exclude=('tensorboard', 'thop')) + check_requirements(exclude=('tensorboard', 'thop')) run(**vars(opt)) diff --git a/hubconf.py b/hubconf.py index 9c5fa63809d1..3a89cf9763da 100644 --- a/hubconf.py +++ b/hubconf.py @@ -34,7 +34,7 @@ def _create(name, pretrained=True, channels=3, classes=80, autoshape=True, verbo from utils.torch_utils import select_device file = Path(__file__).resolve() - check_requirements(requirements=file.parent / 'requirements.txt', exclude=('tensorboard', 'thop', 'opencv-python')) + check_requirements(exclude=('tensorboard', 'thop', 'opencv-python')) set_logging(verbose=verbose) save_dir = Path('') if str(name).endswith('.pt') else file.parent diff --git a/train.py b/train.py index 89c0c507b8bf..6f98a64124d7 100644 --- a/train.py +++ b/train.py @@ -476,7 +476,7 @@ def main(opt, callbacks=Callbacks()): if RANK in [-1, 0]: print_args(FILE.stem, opt) check_git_status() - check_requirements(requirements=ROOT / 'requirements.txt', exclude=['thop']) + check_requirements(exclude=['thop']) # Resume if opt.resume and not check_wandb_resume(opt) and not opt.evolve: # resume an interrupted run diff --git a/utils/autoanchor.py b/utils/autoanchor.py index 66a2712dfd5d..1706fcb8e735 100644 --- a/utils/autoanchor.py +++ b/utils/autoanchor.py @@ -127,7 +127,7 @@ def print_results(k): print(f'{prefix}Running kmeans for {n} anchors on {len(wh)} points...') s = wh.std(0) # sigmas for whitening k, dist = kmeans(wh / s, n, iter=30) # points, mean distance - assert len(k) == n, print(f'{prefix}ERROR: scipy.cluster.vq.kmeans requested {n} points but returned only {len(k)}') + assert len(k) == n, f'{prefix}ERROR: scipy.cluster.vq.kmeans requested {n} points but returned only {len(k)}' k *= s wh = torch.tensor(wh, dtype=torch.float32) # filtered wh0 = torch.tensor(wh0, dtype=torch.float32) # unfiltered diff --git a/utils/general.py b/utils/general.py index d4d8e2064d08..dcaa3c71b3f5 100755 --- a/utils/general.py +++ b/utils/general.py @@ -37,6 +37,9 @@ cv2.setNumThreads(0) # prevent OpenCV from multithreading (incompatible with PyTorch DataLoader) os.environ['NUMEXPR_MAX_THREADS'] = str(min(os.cpu_count(), 8)) # NumExpr max threads +FILE = Path(__file__).resolve() +ROOT = FILE.parents[1] # YOLOv5 root directory + class Profile(contextlib.ContextDecorator): # Usage: @Profile() decorator or 'with Profile():' context manager @@ -222,7 +225,7 @@ def check_version(current='0.0.0', minimum='0.0.0', name='version ', pinned=Fals @try_except -def check_requirements(requirements='requirements.txt', exclude=(), install=True): +def check_requirements(requirements=ROOT / 'requirements.txt', exclude=(), install=True): # Check installed dependencies meet requirements (pass *.txt file or list of packages) prefix = colorstr('red', 'bold', 'requirements:') check_python() # check python version diff --git a/val.py b/val.py index 2dbf570f1e6e..f8c4f9e1cdd5 100644 --- a/val.py +++ b/val.py @@ -327,7 +327,7 @@ def parse_opt(): def main(opt): set_logging() - check_requirements(requirements=ROOT / 'requirements.txt', exclude=('tensorboard', 'thop')) + check_requirements(exclude=('tensorboard', 'thop')) if opt.task in ('train', 'val', 'test'): # run normally run(**vars(opt)) From 9ef94940aa5e9618e7e804f0758f9a6cebfc63a9 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 18 Sep 2021 18:43:26 +0200 Subject: [PATCH 0567/1976] Update greetings.yml (#4856) --- .github/workflows/greetings.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/greetings.yml b/.github/workflows/greetings.yml index c557e77f3b70..a40d0a50c8ac 100644 --- a/.github/workflows/greetings.yml +++ b/.github/workflows/greetings.yml @@ -18,7 +18,7 @@ jobs: git remote add upstream https://github.com/ultralytics/yolov5.git git fetch upstream git checkout feature # <----- replace 'feature' with local branch name - git rebase upstream/master + git merge upstream/master git push -u origin -f ``` - ✅ Verify all Continuous Integration (CI) **checks are passing**. From 40d1c805031c4feba8ea9c0c1d5cb4eb8170afcc Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 20 Sep 2021 12:22:24 +0200 Subject: [PATCH 0568/1976] Update Dockerfile (#4861) --- Dockerfile | 3 +++ 1 file changed, 3 insertions(+) diff --git a/Dockerfile b/Dockerfile index e9cd304376ed..543e02e34124 100644 --- a/Dockerfile +++ b/Dockerfile @@ -50,3 +50,6 @@ ENV HOME=/usr/src/app # Clean up # docker system prune -a --volumes + +# Update Ubuntu drivers +# https://www.maketecheasier.com/install-nvidia-drivers-ubuntu/ From 28096163451a7bf3dc964228c557c3b8d010de2d Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 20 Sep 2021 13:10:41 +0200 Subject: [PATCH 0569/1976] Update Dockerfile (#4862) --- Dockerfile | 3 +++ 1 file changed, 3 insertions(+) diff --git a/Dockerfile b/Dockerfile index 543e02e34124..95c098f9f513 100644 --- a/Dockerfile +++ b/Dockerfile @@ -53,3 +53,6 @@ ENV HOME=/usr/src/app # Update Ubuntu drivers # https://www.maketecheasier.com/install-nvidia-drivers-ubuntu/ + +# DDP test +# python -m torch.distributed.run --nproc_per_node 2 --master_port 1 train.py --epochs 3 From 2b3109eeb05a609538e4b378f0e00a4fa78d11c0 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 20 Sep 2021 13:12:48 +0200 Subject: [PATCH 0570/1976] Fix DDP destruction `LOGGER.info()` (#4863) --- train.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/train.py b/train.py index 6f98a64124d7..51a0e127486a 100644 --- a/train.py +++ b/train.py @@ -510,7 +510,8 @@ def main(opt, callbacks=Callbacks()): if not opt.evolve: train(opt.hyp, opt, device, callbacks) if WORLD_SIZE > 1 and RANK == 0: - _ = LOGGER.info('Destroying process group... ', end=''), dist.destroy_process_group(), LOGGER.info('Done.') + LOGGER.info('Destroying process group... ') + dist.destroy_process_group() # Evolve hyperparameters (optional) else: From 8ad9e4ed5be3cba275bcd624b14d53fe4985f262 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 20 Sep 2021 13:57:23 +0200 Subject: [PATCH 0571/1976] Annotator `check_font()` RANK -1 remove progress (#4864) * Annotator `check_font()` RANK -1 remove progress * Cleanup --- utils/plots.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/utils/plots.py b/utils/plots.py index 5ff72cb144e2..1e6ee516387a 100644 --- a/utils/plots.py +++ b/utils/plots.py @@ -3,11 +3,12 @@ Plotting utils """ +import math +import os from copy import copy from pathlib import Path import cv2 -import math import matplotlib import matplotlib.pyplot as plt import numpy as np @@ -21,6 +22,7 @@ # Settings CONFIG_DIR = user_config_dir() # Ultralytics settings dir +RANK = int(os.getenv('RANK', -1)) matplotlib.rc('font', **{'size': 11}) matplotlib.use('Agg') # for writing to files only @@ -55,12 +57,13 @@ def check_font(font='Arial.ttf', size=10): except Exception as e: # download if missing url = "https://ultralytics.com/assets/" + font.name print(f'Downloading {url} to {font}...') - torch.hub.download_url_to_file(url, str(font)) + torch.hub.download_url_to_file(url, str(font), progress=False) return ImageFont.truetype(str(font), size) class Annotator: - check_font() # download TTF if necessary + if RANK in (-1, 0): + check_font() # download TTF if necessary # YOLOv5 Annotator for train/val mosaics and jpgs and detect/hub inference annotations def __init__(self, im, line_width=None, font_size=None, font='Arial.ttf', pil=True): From 9febea79de895191bd7a375e5c5a61bfa2886c89 Mon Sep 17 00:00:00 2001 From: Ayush Chaurasia Date: Mon, 20 Sep 2021 19:49:34 +0530 Subject: [PATCH 0572/1976] W&B: Login only in master processes (#4866) * evolve fix * Enable login timeout * fix pkg * check rank * don't relogin --- utils/loggers/wandb/wandb_utils.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/utils/loggers/wandb/wandb_utils.py b/utils/loggers/wandb/wandb_utils.py index ab2c20d520b0..f520fbba8850 100644 --- a/utils/loggers/wandb/wandb_utils.py +++ b/utils/loggers/wandb/wandb_utils.py @@ -19,16 +19,17 @@ from utils.datasets import img2label_paths from utils.general import check_dataset, check_file +RANK = int(os.getenv('RANK', -1)) + try: import wandb assert hasattr(wandb, '__version__') # verify package import not local dir - if pkg.parse_version(wandb.__version__) >= pkg.parse_version('0.12.2'): + if pkg.parse_version(wandb.__version__) >= pkg.parse_version('0.12.2') and RANK in [0, -1]: wandb.login(timeout=30) except (ImportError, AssertionError): wandb = None -RANK = int(os.getenv('RANK', -1)) WANDB_ARTIFACT_PREFIX = 'wandb-artifact://' From 59aae85a7e40701bb872df673a6ef288e99a4ae3 Mon Sep 17 00:00:00 2001 From: Ayush Chaurasia Date: Tue, 21 Sep 2021 23:32:32 +0530 Subject: [PATCH 0573/1976] W&B: Fix dataset check (#4879) * evolve fix * Enable login timeout * fix pkg * check rank * don't relogin * fix * reformat --- utils/loggers/wandb/wandb_utils.py | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/utils/loggers/wandb/wandb_utils.py b/utils/loggers/wandb/wandb_utils.py index f520fbba8850..9a80dc42ca95 100644 --- a/utils/loggers/wandb/wandb_utils.py +++ b/utils/loggers/wandb/wandb_utils.py @@ -5,8 +5,8 @@ import sys from contextlib import contextmanager from pathlib import Path -import pkg_resources as pkg +import pkg_resources as pkg import yaml from tqdm import tqdm @@ -49,9 +49,11 @@ def check_wandb_dataset(data_file): if check_file(data_file) and data_file.endswith('.yaml'): with open(data_file, errors='ignore') as f: data_dict = yaml.safe_load(f) - is_wandb_artifact = (data_dict['train'].startswith(WANDB_ARTIFACT_PREFIX) or - data_dict['val'].startswith(WANDB_ARTIFACT_PREFIX)) - if is_wandb_artifact: + is_trainset_wandb_artifact = (isinstance(data_dict['train'], str) and + data_dict['train'].startswith(WANDB_ARTIFACT_PREFIX)) + is_valset_wandb_artifact = (isinstance(data_dict['val'], str) and + data_dict['val'].startswith(WANDB_ARTIFACT_PREFIX)) + if is_trainset_wandb_artifact or is_valset_wandb_artifact: return data_dict else: return check_dataset(data_file) From dad8660540e47d3331a2ae1c78bda8670ecdd737 Mon Sep 17 00:00:00 2001 From: NauchtanRobotics Date: Fri, 24 Sep 2021 23:44:01 +1000 Subject: [PATCH 0574/1976] Fix arg help string to match 'classes' arg name (#4893) --- detect.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/detect.py b/detect.py index 9b9e7c74644b..08e78bf64a3b 100644 --- a/detect.py +++ b/detect.py @@ -267,7 +267,7 @@ def parse_opt(): parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels') parser.add_argument('--save-crop', action='store_true', help='save cropped prediction boxes') parser.add_argument('--nosave', action='store_true', help='do not save images/videos') - parser.add_argument('--classes', nargs='+', type=int, help='filter by class: --class 0, or --class 0 2 3') + parser.add_argument('--classes', nargs='+', type=int, help='filter by class: --classes 0, or --classes 0 2 3') parser.add_argument('--agnostic-nms', action='store_true', help='class-agnostic NMS') parser.add_argument('--augment', action='store_true', help='augmented inference') parser.add_argument('--visualize', action='store_true', help='visualize features') From ce7fa81d4e342ca97f7459f6dd10036b3449321b Mon Sep 17 00:00:00 2001 From: Jiacong Fang Date: Sat, 25 Sep 2021 00:23:28 +0800 Subject: [PATCH 0575/1976] Avoid out-of-image class labels (#4902) * Avoid out-of-image class labels * Update plots.py * Cleanup Co-authored-by: Glenn Jocher --- utils/plots.py | 26 +++++++++++++++----------- 1 file changed, 15 insertions(+), 11 deletions(-) diff --git a/utils/plots.py b/utils/plots.py index 1e6ee516387a..d8e7c07f39b1 100644 --- a/utils/plots.py +++ b/utils/plots.py @@ -73,7 +73,6 @@ def __init__(self, im, line_width=None, font_size=None, font='Arial.ttf', pil=Tr self.im = im if isinstance(im, Image.Image) else Image.fromarray(im) self.draw = ImageDraw.Draw(self.im) self.font = check_font(font, size=font_size or max(round(sum(self.im.size) / 2 * 0.035), 12)) - self.fh = self.font.getsize('a')[1] - 3 # font height else: # use cv2 self.im = im self.lw = line_width or max(round(sum(im.shape) / 2 * 0.003), 2) # line width @@ -83,20 +82,25 @@ def box_label(self, box, label='', color=(128, 128, 128), txt_color=(255, 255, 2 if self.pil or not is_ascii(label): self.draw.rectangle(box, width=self.lw, outline=color) # box if label: - w, h = self.font.getsize(label) # text width - self.draw.rectangle([box[0], box[1] - self.fh, box[0] + w + 1, box[1] + 1], fill=color) + w, h = self.font.getsize(label) # text width, height + outside = box[1] - h >= 0 # label fits outside box + self.draw.rectangle([box[0], + box[1] - h if outside else box[1], + box[0] + w + 1, + box[1] + 1 if outside else box[1] + h + 1], fill=color) # self.draw.text((box[0], box[1]), label, fill=txt_color, font=self.font, anchor='ls') # for PIL>8.0 - self.draw.text((box[0], box[1] - h), label, fill=txt_color, font=self.font) + self.draw.text((box[0], box[1] - h if outside else box[1]), label, fill=txt_color, font=self.font) else: # cv2 - c1, c2 = (int(box[0]), int(box[1])), (int(box[2]), int(box[3])) - cv2.rectangle(self.im, c1, c2, color, thickness=self.lw, lineType=cv2.LINE_AA) + p1, p2 = (int(box[0]), int(box[1])), (int(box[2]), int(box[3])) + cv2.rectangle(self.im, p1, p2, color, thickness=self.lw, lineType=cv2.LINE_AA) if label: tf = max(self.lw - 1, 1) # font thickness - w, h = cv2.getTextSize(label, 0, fontScale=self.lw / 3, thickness=tf)[0] - c2 = c1[0] + w, c1[1] - h - 3 - cv2.rectangle(self.im, c1, c2, color, -1, cv2.LINE_AA) # filled - cv2.putText(self.im, label, (c1[0], c1[1] - 2), 0, self.lw / 3, txt_color, thickness=tf, - lineType=cv2.LINE_AA) + w, h = cv2.getTextSize(label, 0, fontScale=self.lw / 3, thickness=tf)[0] # text width, height + outside = p1[1] - h - 3 >= 0 # label fits outside box + p2 = p1[0] + w, p1[1] - h - 3 if outside else p1[1] + h + 3 + cv2.rectangle(self.im, p1, p2, color, -1, cv2.LINE_AA) # filled + cv2.putText(self.im, label, (p1[0], p1[1] - 2 if outside else p1[1] + h + 2), 0, self.lw / 3, txt_color, + thickness=tf, lineType=cv2.LINE_AA) def rectangle(self, xy, fill=None, outline=None, width=1): # Add rectangle to image (PIL-only) From 2c2ef25f8bb351b34aef89f8fce75742c698e847 Mon Sep 17 00:00:00 2001 From: Jiacong Fang Date: Sat, 25 Sep 2021 05:18:15 +0800 Subject: [PATCH 0576/1976] TensorFlow.js export enhancements (#4905) * Add arguments to TensorFlow NMS call * Add regex substitution to reorder Identity_* * Delete reorder in docstring * Cleanup * Cleanup2 * Removed `+ \` on string ends (not needed) Co-authored-by: Glenn Jocher --- export.py | 29 +++++++++++++++++++++++++++-- models/tf.py | 2 +- 2 files changed, 28 insertions(+), 3 deletions(-) diff --git a/export.py b/export.py index e876af234592..d5b63c410af8 100644 --- a/export.py +++ b/export.py @@ -14,7 +14,6 @@ yolov5s.tflite TensorFlow.js: - $ # Edit yolov5s_web_model/model.json to sort Identity* in ascending order $ cd .. && git clone https://github.com/zldrobit/tfjs-yolov5-example.git && cd tfjs-yolov5-example $ npm install $ ln -s ../../yolov5/yolov5s_web_model public/yolov5s_web_model @@ -213,16 +212,32 @@ def export_tfjs(keras_model, im, file, prefix=colorstr('TensorFlow.js:')): # YOLOv5 TensorFlow.js export try: check_requirements(('tensorflowjs',)) + import re import tensorflowjs as tfjs print(f'\n{prefix} starting export with tensorflowjs {tfjs.__version__}...') f = str(file).replace('.pt', '_web_model') # js dir f_pb = file.with_suffix('.pb') # *.pb path + f_json = f + '/model.json' # *.json path cmd = f"tensorflowjs_converter --input_format=tf_frozen_model " \ f"--output_node_names='Identity,Identity_1,Identity_2,Identity_3' {f_pb} {f}" subprocess.run(cmd, shell=True) + json = open(f_json).read() + with open(f_json, 'w') as j: # sort JSON Identity_* in ascending order + subst = re.sub( + r'{"outputs": {"Identity.?.?": {"name": "Identity.?.?"}, ' + r'"Identity.?.?": {"name": "Identity.?.?"}, ' + r'"Identity.?.?": {"name": "Identity.?.?"}, ' + r'"Identity.?.?": {"name": "Identity.?.?"}}}', + r'{"outputs": {"Identity": {"name": "Identity"}, ' + r'"Identity_1": {"name": "Identity_1"}, ' + r'"Identity_2": {"name": "Identity_2"}, ' + r'"Identity_3": {"name": "Identity_3"}}}', + json) + j.write(subst) + print(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') except Exception as e: print(f'\n{prefix} export failure: {e}') @@ -243,6 +258,10 @@ def run(data=ROOT / 'data/coco128.yaml', # 'dataset.yaml path' dynamic=False, # ONNX/TF: dynamic axes simplify=False, # ONNX: simplify model opset=12, # ONNX: opset version + topk_per_class=100, # TF.js NMS: topk per class to keep + topk_all=100, # TF.js NMS: topk for all classes to keep + iou_thres=0.45, # TF.js NMS: IoU threshold + conf_thres=0.25 # TF.js NMS: confidence threshold ): t = time.time() include = [x.lower() for x in include] @@ -290,7 +309,9 @@ def run(data=ROOT / 'data/coco128.yaml', # 'dataset.yaml path' if any(tf_exports): pb, tflite, tfjs = tf_exports[1:] assert not (tflite and tfjs), 'TFLite and TF.js models must be exported separately, please pass only one type.' - model = export_saved_model(model, im, file, dynamic, tf_nms=tfjs, agnostic_nms=tfjs) # keras model + model = export_saved_model(model, im, file, dynamic, tf_nms=tfjs, agnostic_nms=tfjs, + topk_per_class=topk_per_class, topk_all=topk_all, conf_thres=conf_thres, + iou_thres=iou_thres) # keras model if pb or tfjs: # pb prerequisite to tfjs export_pb(model, im, file) if tflite: @@ -319,6 +340,10 @@ def parse_opt(): parser.add_argument('--dynamic', action='store_true', help='ONNX/TF: dynamic axes') parser.add_argument('--simplify', action='store_true', help='ONNX: simplify model') parser.add_argument('--opset', type=int, default=13, help='ONNX: opset version') + parser.add_argument('--topk-per-class', type=int, default=100, help='TF.js NMS: topk per class to keep') + parser.add_argument('--topk-all', type=int, default=100, help='TF.js NMS: topk for all classes to keep') + parser.add_argument('--iou-thres', type=float, default=0.45, help='TF.js NMS: IoU threshold') + parser.add_argument('--conf-thres', type=float, default=0.25, help='TF.js NMS: confidence threshold') parser.add_argument('--include', nargs='+', default=['torchscript', 'onnx'], help='available formats are (torchscript, onnx, coreml, saved_model, pb, tflite, tfjs)') diff --git a/models/tf.py b/models/tf.py index 3265b7b75f55..b7d99359c863 100644 --- a/models/tf.py +++ b/models/tf.py @@ -367,7 +367,7 @@ class AgnosticNMS(keras.layers.Layer): # TF Agnostic NMS def call(self, input, topk_all, iou_thres, conf_thres): # wrap map_fn to avoid TypeSpec related error https://stackoverflow.com/a/65809989/3036450 - return tf.map_fn(self._nms, input, + return tf.map_fn(lambda x: self._nms(x, topk_all, iou_thres, conf_thres), input, fn_output_signature=(tf.float32, tf.float32, tf.float32, tf.int32), name='agnostic_nms') From 149263279655d26dc3c74192773de1c9c5a772c9 Mon Sep 17 00:00:00 2001 From: Jebastin Nadar Date: Sat, 25 Sep 2021 19:13:24 +0530 Subject: [PATCH 0577/1976] Fix zipfile name for coco128-segments (#4914) --- data/scripts/get_coco128.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/data/scripts/get_coco128.sh b/data/scripts/get_coco128.sh index 4238e3634dbb..ee05a867e564 100644 --- a/data/scripts/get_coco128.sh +++ b/data/scripts/get_coco128.sh @@ -10,7 +10,7 @@ # Download/unzip images and labels d='../datasets' # unzip directory url=https://github.com/ultralytics/yolov5/releases/download/v1.0/ -f='coco128.zip' # or 'coco2017labels-segments.zip', 68 MB +f='coco128.zip' # or 'coco128-segments.zip', 68 MB echo 'Downloading' $url$f ' ...' curl -L $url$f -o $f && unzip -q $f -d $d && rm $f & From a64a4c839fa15baf5ea2be933e7034a1607878b2 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 25 Sep 2021 08:52:36 -0700 Subject: [PATCH 0578/1976] Replace `os.system('unzip file.zip')` -> `ZipFile.extractall()` (#4919) * Replace `os.system('unzip file.zip')` -> `ZipFile.extractall()` * Cleanup --- utils/datasets.py | 5 +++-- utils/downloads.py | 5 +++-- utils/general.py | 18 ++++++++++-------- 3 files changed, 16 insertions(+), 12 deletions(-) diff --git a/utils/datasets.py b/utils/datasets.py index d253cb177b82..a54e29fd2908 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -15,6 +15,7 @@ from multiprocessing.pool import ThreadPool, Pool from pathlib import Path from threading import Thread +from zipfile import ZipFile import cv2 import numpy as np @@ -928,8 +929,8 @@ def unzip(path): # Unzip data.zip TODO: CONSTRAINT: path/to/abc.zip MUST unzip to 'path/to/abc/' if str(path).endswith('.zip'): # path is data.zip assert Path(path).is_file(), f'Error unzipping {path}, file not found' - assert os.system(f'unzip -q {path} -d {path.parent}') == 0, f'Error unzipping {path}' - dir = path.with_suffix('') # dataset directory + ZipFile(path).extractall(path=path.parent) # unzip + dir = path.with_suffix('') # dataset directory == zip name return True, str(dir), next(dir.rglob('*.yaml')) # zipped, data_dir, yaml_path else: # path is data.yaml return False, None, path diff --git a/utils/downloads.py b/utils/downloads.py index 27cb899cd606..eafa3b7ac309 100644 --- a/utils/downloads.py +++ b/utils/downloads.py @@ -9,6 +9,7 @@ import time import urllib from pathlib import Path +from zipfile import ZipFile import requests import torch @@ -104,8 +105,8 @@ def gdrive_download(id='16TiPfZj7htmTyhntwcZyEEAejOUxuT6m', file='tmp.zip'): # Unzip if archive if file.suffix == '.zip': print('unzipping... ', end='') - os.system(f'unzip -q {file}') # unzip - file.unlink() # remove zip to free space + ZipFile(file).extractall(path=file.parent) # unzip + file.unlink() # remove zip print(f'Done ({time.time() - t:.1f}s)') return r diff --git a/utils/general.py b/utils/general.py index dcaa3c71b3f5..2e2cdf389075 100755 --- a/utils/general.py +++ b/utils/general.py @@ -18,6 +18,7 @@ from multiprocessing.pool import ThreadPool from pathlib import Path from subprocess import check_output +from zipfile import ZipFile import cv2 import numpy as np @@ -353,17 +354,19 @@ def check_dataset(data, autodownload=True): if s and autodownload: # download script if s.startswith('http') and s.endswith('.zip'): # URL f = Path(s).name # filename - print(f'Downloading {s} ...') + print(f'Downloading {s} to {f}...') torch.hub.download_url_to_file(s, f) root = path.parent if 'path' in data else '..' # unzip directory i.e. '../' Path(root).mkdir(parents=True, exist_ok=True) # create root - r = os.system(f'unzip -q {f} -d {root} && rm {f}') # unzip + ZipFile(f).extractall(path=root) # unzip + Path(f).unlink() # remove zip + r = None # success elif s.startswith('bash '): # bash script print(f'Running {s} ...') r = os.system(s) else: # python script r = exec(s, {'yaml': data}) # return None - print('Dataset autodownload %s\n' % ('success' if r in (0, None) else 'failure')) # print result + print(f"Dataset autodownload {f'success, saved to {root}' if r in (0, None) else 'failure'}") else: raise Exception('Dataset not found.') @@ -393,12 +396,11 @@ def download_one(url, dir): if unzip and f.suffix in ('.zip', '.gz'): print(f'Unzipping {f}...') if f.suffix == '.zip': - s = f'unzip -qo {f} -d {dir}' # unzip -quiet -overwrite + ZipFile(f).extractall(path=dir) # unzip elif f.suffix == '.gz': - s = f'tar xfz {f} --directory {f.parent}' # unzip - if delete: # delete zip file after unzip - s += f' && rm {f}' - os.system(s) + os.system(f'tar xfz {f} --directory {f.parent}') # unzip + if delete: + f.unlink() # remove zip dir = Path(dir) dir.mkdir(parents=True, exist_ok=True) # make directory From 39c17ce0b922bbafcf9f8da64f286fef01040727 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 25 Sep 2021 09:16:14 -0700 Subject: [PATCH 0579/1976] Fix `root` referenced before assignment (#4920) * Fix `root` referenced before assignment Fix for bug introduced by #4919 discovered on VOC autodownload: ``` python train.py --data VOC.yaml ``` * Cleanup --- utils/general.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/utils/general.py b/utils/general.py index 2e2cdf389075..3c5cbff13d55 100755 --- a/utils/general.py +++ b/utils/general.py @@ -352,11 +352,11 @@ def check_dataset(data, autodownload=True): if not all(x.exists() for x in val): print('\nWARNING: Dataset not found, nonexistent paths: %s' % [str(x) for x in val if not x.exists()]) if s and autodownload: # download script + root = path.parent if 'path' in data else '..' # unzip directory i.e. '../' if s.startswith('http') and s.endswith('.zip'): # URL f = Path(s).name # filename print(f'Downloading {s} to {f}...') torch.hub.download_url_to_file(s, f) - root = path.parent if 'path' in data else '..' # unzip directory i.e. '../' Path(root).mkdir(parents=True, exist_ok=True) # create root ZipFile(f).extractall(path=root) # unzip Path(f).unlink() # remove zip @@ -366,7 +366,7 @@ def check_dataset(data, autodownload=True): r = os.system(s) else: # python script r = exec(s, {'yaml': data}) # return None - print(f"Dataset autodownload {f'success, saved to {root}' if r in (0, None) else 'failure'}") + print(f"Dataset autodownload {f'success, saved to {root}' if r in (0, None) else 'failure'}\n") else: raise Exception('Dataset not found.') From e687873436eb7d4259c46387993fb0d50034c18b Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 26 Sep 2021 10:54:55 -0700 Subject: [PATCH 0580/1976] Add Slack Forum badge to README (#4930) Add badge with link to join the new YOLOv5 Slack Forum! https://join.slack.com/t/ultralytics/shared_invite/zt-w29ei8bp-jczz7QYUmDtgo6r6KcMIAg --- README.md | 67 ++++++++++++++++++++++++++++--------------------------- 1 file changed, 34 insertions(+), 33 deletions(-) diff --git a/README.md b/README.md index febf4bff9b40..a01c2c688aa4 100644 --- a/README.md +++ b/README.md @@ -1,42 +1,43 @@

- - + +


-CI CPU testing -YOLOv5 Citation -
-Open In Colab -Open In Kaggle -Docker Pulls + CI CPU testing + YOLOv5 Citation + Docker Pulls +
+ Open In Colab + Open In Kaggle + Join Forum
-
-
- - - - - - - - - - - - - - - - - - - - - - - +
+
From d856c4829837dd6ef004e1defc789e44d24f7b6c Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 26 Sep 2021 11:54:53 -0700 Subject: [PATCH 0581/1976] Validate `best.pt` on train end (#4889) * Validate best.pt on train end * 0.7 iou for COCO only * pass callbacks * active model.float() if not half * print Validating best.pt... * add newline --- train.py | 27 +++++++++++++-------------- val.py | 3 +-- 2 files changed, 14 insertions(+), 16 deletions(-) diff --git a/train.py b/train.py index 51a0e127486a..40f58bfafb4a 100644 --- a/train.py +++ b/train.py @@ -356,9 +356,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary single_cls=single_cls, dataloader=val_loader, save_dir=save_dir, - save_json=is_coco and final_epoch, - verbose=nc < 50 and final_epoch, - plots=plots and final_epoch, + plots=False, callbacks=callbacks, compute_loss=compute_loss) @@ -404,23 +402,24 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary # end training ----------------------------------------------------------------------------------------------------- if RANK in [-1, 0]: LOGGER.info(f'\n{epoch - start_epoch + 1} epochs completed in {(time.time() - t0) / 3600:.3f} hours.') - if not evolve: - if is_coco: # COCO dataset - for m in [last, best] if best.exists() else [last]: # speed, mAP tests + for f in last, best: + if f.exists(): + strip_optimizer(f) # strip optimizers + if f is best: + LOGGER.info(f'\nValidating {f}...') results, _, _ = val.run(data_dict, batch_size=batch_size // WORLD_SIZE * 2, imgsz=imgsz, - model=attempt_load(m, device).half(), - iou_thres=0.7, # NMS IoU threshold for best pycocotools results + model=attempt_load(f, device).half(), + iou_thres=0.7 if is_coco else 0.6, # best pycocotools results at 0.7 single_cls=single_cls, dataloader=val_loader, save_dir=save_dir, - save_json=True, - plots=False) - # Strip optimizers - for f in last, best: - if f.exists(): - strip_optimizer(f) # strip optimizers + save_json=is_coco, + verbose=True, + plots=True, + callbacks=callbacks) # val best model with plots + callbacks.run('on_train_end', last, best, plots, epoch) LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}") diff --git a/val.py b/val.py index f8c4f9e1cdd5..92e0e3b13ae9 100644 --- a/val.py +++ b/val.py @@ -133,8 +133,7 @@ def run(data, # Half half &= device.type != 'cpu' # half precision only supported on CUDA - if half: - model.half() + model.half() if half else model.float() # Configure model.eval() From 793383232fd52382d7bbd2a1ce771516afc15fe5 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 26 Sep 2021 12:10:33 -0700 Subject: [PATCH 0582/1976] Update default Albumentations (#4931) --- utils/augmentations.py | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/utils/augmentations.py b/utils/augmentations.py index 49f957e6fd62..04192d1ec5cd 100644 --- a/utils/augmentations.py +++ b/utils/augmentations.py @@ -23,9 +23,13 @@ def __init__(self): check_version(A.__version__, '1.0.3') # version requirement self.transform = A.Compose([ - A.Blur(p=0.1), - A.MedianBlur(p=0.1), - A.ToGray(p=0.01)], + A.Blur(p=0.01), + A.MedianBlur(p=0.01), + A.ToGray(p=0.01), + A.CLAHE(p=0.01), + A.RandomBrightnessContrast(p=0.0), + A.RandomGamma(p=0.0), + A.ImageCompression(quality_lower=75, p=0.0)], bbox_params=A.BboxParams(format='yolo', label_fields=['class_labels'])) logging.info(colorstr('albumentations: ') + ', '.join(f'{x}' for x in self.transform.transforms if x.p)) From 5a8e4343d80de4ece38cdb5807a7187ec937c57c Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 26 Sep 2021 17:11:46 -0700 Subject: [PATCH 0583/1976] Scope `check_file()` search space (#4933) `check_file()` is now limited to searching opt-in directories: /data, /models, /utils. This prevents large non-project directories like /.git and /venv from being searched, which may cause `check_file()` to slow significantly. --- utils/general.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/utils/general.py b/utils/general.py index 3c5cbff13d55..00bafb1e9537 100755 --- a/utils/general.py +++ b/utils/general.py @@ -315,7 +315,9 @@ def check_file(file, suffix=''): assert Path(file).exists() and Path(file).stat().st_size > 0, f'File download failed: {url}' # check return file else: # search - files = glob.glob('./**/' + file, recursive=True) # find file + files = [] + for d in 'data', 'models', 'utils': # search directories + files.extend(glob.glob(str(ROOT / d / '**' / file), recursive=True)) # find file assert len(files), f'File not found: {file}' # assert file was found assert len(files) == 1, f"Multiple files match '{file}', specify exact path: {files}" # assert unique return files[0] # return file From c5ba2abb4afb9fe8c671f14eb5200647893efe30 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 26 Sep 2021 19:16:14 -0700 Subject: [PATCH 0584/1976] Update Dockerfile (#4935) --- Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Dockerfile b/Dockerfile index 95c098f9f513..e0653e0f9b3a 100644 --- a/Dockerfile +++ b/Dockerfile @@ -12,7 +12,7 @@ RUN python -m pip install --upgrade pip RUN pip uninstall -y nvidia-tensorboard nvidia-tensorboard-plugin-dlprof RUN pip install --no-cache -r requirements.txt coremltools onnx gsutil notebook wandb>=0.12.2 RUN pip install --no-cache -U torch torchvision numpy -# RUN pip install --no-cache torch==1.9.0+cu111 torchvision==0.10.0+cu111 -f https://download.pytorch.org/whl/torch_stable.html +# RUN pip install --no-cache torch==1.9.1+cu111 torchvision==0.10.1+cu111 -f https://download.pytorch.org/whl/torch_stable.html # Create working directory RUN mkdir -p /usr/src/app From a820b43aca3816c9552e9beaf14a77955742b0ec Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 27 Sep 2021 13:48:15 -0700 Subject: [PATCH 0585/1976] Automatic Chinese fonts plotting (#4951) * Automatic Chinese fonts plotting * Default PIL=False --- detect.py | 5 ++--- models/common.py | 13 ++++++------- utils/general.py | 7 +++---- utils/plots.py | 13 +++++++------ 4 files changed, 18 insertions(+), 20 deletions(-) diff --git a/detect.py b/detect.py index 08e78bf64a3b..fae82833c5f6 100644 --- a/detect.py +++ b/detect.py @@ -23,7 +23,7 @@ from models.experimental import attempt_load from utils.datasets import LoadImages, LoadStreams from utils.general import apply_classifier, check_img_size, check_imshow, check_requirements, check_suffix, colorstr, \ - increment_path, is_ascii, non_max_suppression, print_args, save_one_box, scale_coords, set_logging, \ + increment_path, non_max_suppression, print_args, save_one_box, scale_coords, set_logging, \ strip_optimizer, xyxy2xywh from utils.plots import Annotator, colors from utils.torch_utils import load_classifier, select_device, time_sync @@ -108,7 +108,6 @@ def wrap_frozen_graph(gd, inputs, outputs): output_details = interpreter.get_output_details() # outputs int8 = input_details[0]['dtype'] == np.uint8 # is TFLite quantized uint8 model imgsz = check_img_size(imgsz, s=stride) # check image size - ascii = is_ascii(names) # names are ascii (use PIL for UTF-8) # Dataloader if webcam: @@ -190,7 +189,7 @@ def wrap_frozen_graph(gd, inputs, outputs): s += '%gx%g ' % img.shape[2:] # print string gn = torch.tensor(im0.shape)[[1, 0, 1, 0]] # normalization gain whwh imc = im0.copy() if save_crop else im0 # for save_crop - annotator = Annotator(im0, line_width=line_thickness, pil=not ascii) + annotator = Annotator(im0, line_width=line_thickness, example=str(names)) if len(det): # Rescale boxes from img_size to im0 size det[:, :4] = scale_coords(img.shape[2:], det[:, :4], im0.shape).round() diff --git a/models/common.py b/models/common.py index 5305b03d5389..2acf6281f475 100644 --- a/models/common.py +++ b/models/common.py @@ -18,7 +18,7 @@ from torch.cuda import amp from utils.datasets import exif_transpose, letterbox -from utils.general import colorstr, increment_path, is_ascii, make_divisible, non_max_suppression, save_one_box, \ +from utils.general import colorstr, increment_path, make_divisible, non_max_suppression, save_one_box, \ scale_coords, xyxy2xywh from utils.plots import Annotator, colors from utils.torch_utils import time_sync @@ -356,7 +356,6 @@ def __init__(self, imgs, pred, files, times=None, names=None, shape=None): self.imgs = imgs # list of images as numpy arrays self.pred = pred # list of tensors pred[0] = (xyxy, conf, cls) self.names = names # class names - self.ascii = is_ascii(names) # names are ascii (use PIL for UTF-8) self.files = files # image filenames self.xyxy = pred # xyxy pixels self.xywh = [xyxy2xywh(x) for x in pred] # xywh pixels @@ -369,13 +368,13 @@ def __init__(self, imgs, pred, files, times=None, names=None, shape=None): def display(self, pprint=False, show=False, save=False, crop=False, render=False, save_dir=Path('')): crops = [] for i, (im, pred) in enumerate(zip(self.imgs, self.pred)): - str = f'image {i + 1}/{len(self.pred)}: {im.shape[0]}x{im.shape[1]} ' + s = f'image {i + 1}/{len(self.pred)}: {im.shape[0]}x{im.shape[1]} ' # string if pred.shape[0]: for c in pred[:, -1].unique(): n = (pred[:, -1] == c).sum() # detections per class - str += f"{n} {self.names[int(c)]}{'s' * (n > 1)}, " # add to string + s += f"{n} {self.names[int(c)]}{'s' * (n > 1)}, " # add to string if show or save or render or crop: - annotator = Annotator(im, pil=not self.ascii) + annotator = Annotator(im, example=str(self.names)) for *box, conf, cls in reversed(pred): # xyxy, confidence, class label = f'{self.names[int(cls)]} {conf:.2f}' if crop: @@ -386,11 +385,11 @@ def display(self, pprint=False, show=False, save=False, crop=False, render=False annotator.box_label(box, label, color=colors(cls)) im = annotator.im else: - str += '(no detections)' + s += '(no detections)' im = Image.fromarray(im.astype(np.uint8)) if isinstance(im, np.ndarray) else im # from np if pprint: - LOGGER.info(str.rstrip(', ')) + LOGGER.info(s.rstrip(', ')) if show: im.show(self.files[i]) # show if save: diff --git a/utils/general.py b/utils/general.py index 00bafb1e9537..8421981147f7 100755 --- a/utils/general.py +++ b/utils/general.py @@ -161,10 +161,9 @@ def is_pip(): return 'site-packages' in Path(__file__).resolve().parts -def is_ascii(s=''): - # Is string composed of all ASCII (no UTF) characters? - s = str(s) # convert list, tuple, None, etc. to str - return len(s.encode().decode('ascii', 'ignore')) == len(s) +def is_chinese(s='人工智能'): + # Is string composed of any Chinese characters? + return re.search('[\u4e00-\u9fff]', s) def emojis(str=''): diff --git a/utils/plots.py b/utils/plots.py index d8e7c07f39b1..491c5704d67b 100644 --- a/utils/plots.py +++ b/utils/plots.py @@ -17,7 +17,7 @@ import torch from PIL import Image, ImageDraw, ImageFont -from utils.general import user_config_dir, is_ascii, xywh2xyxy, xyxy2xywh +from utils.general import user_config_dir, is_chinese, xywh2xyxy, xyxy2xywh from utils.metrics import fitness # Settings @@ -66,20 +66,21 @@ class Annotator: check_font() # download TTF if necessary # YOLOv5 Annotator for train/val mosaics and jpgs and detect/hub inference annotations - def __init__(self, im, line_width=None, font_size=None, font='Arial.ttf', pil=True): + def __init__(self, im, line_width=None, font_size=None, font='Arial.ttf', pil=False, example='abc'): assert im.data.contiguous, 'Image not contiguous. Apply np.ascontiguousarray(im) to Annotator() input images.' - self.pil = pil + self.pil = pil or not example.isascii() or is_chinese(example) if self.pil: # use PIL self.im = im if isinstance(im, Image.Image) else Image.fromarray(im) self.draw = ImageDraw.Draw(self.im) - self.font = check_font(font, size=font_size or max(round(sum(self.im.size) / 2 * 0.035), 12)) + self.font = check_font(font='Arial.Unicode.ttf' if is_chinese(example) else font, + size=font_size or max(round(sum(self.im.size) / 2 * 0.035), 12)) else: # use cv2 self.im = im self.lw = line_width or max(round(sum(im.shape) / 2 * 0.003), 2) # line width def box_label(self, box, label='', color=(128, 128, 128), txt_color=(255, 255, 255)): # Add one xyxy box to image with label - if self.pil or not is_ascii(label): + if self.pil or not label.isascii(): self.draw.rectangle(box, width=self.lw, outline=color) # box if label: w, h = self.font.getsize(label) # text width, height @@ -177,7 +178,7 @@ def plot_images(images, targets, paths=None, fname='images.jpg', names=None, max # Annotate fs = int((h + w) * ns * 0.01) # font size - annotator = Annotator(mosaic, line_width=round(fs / 10), font_size=fs) + annotator = Annotator(mosaic, line_width=round(fs / 10), font_size=fs, pil=True) for i in range(i + 1): x, y = int(w * (i // ns)), int(h * (i % ns)) # block origin annotator.rectangle([x, y, x + w, y + h], None, (255, 255, 255), width=2) # borders From 4e04cb0dc8cae357ad14be1b1507b88fe08c453a Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 27 Sep 2021 17:40:20 -0700 Subject: [PATCH 0586/1976] Allow YOLOv5 execution from arbitrary `cwd` (#4954) * Allow YOLOv5 execution from arbitrary `cwd` * Fix str bugs --- detect.py | 14 ++++++++------ export.py | 1 + train.py | 14 ++++++++------ val.py | 11 ++++++----- 4 files changed, 23 insertions(+), 17 deletions(-) diff --git a/detect.py b/detect.py index fae82833c5f6..75ec3ecc5ff3 100644 --- a/detect.py +++ b/detect.py @@ -19,6 +19,7 @@ ROOT = FILE.parents[0] # YOLOv5 root directory if str(ROOT) not in sys.path: sys.path.append(str(ROOT)) # add ROOT to PATH +ROOT = ROOT.relative_to(Path.cwd()) # relative from models.experimental import attempt_load from utils.datasets import LoadImages, LoadStreams @@ -30,8 +31,8 @@ @torch.no_grad() -def run(weights='yolov5s.pt', # model.pt path(s) - source='data/images', # file/dir/URL/glob, 0 for webcam +def run(weights=ROOT / 'yolov5s.pt', # model.pt path(s) + source=ROOT / 'data/images', # file/dir/URL/glob, 0 for webcam imgsz=640, # inference size (pixels) conf_thres=0.25, # confidence threshold iou_thres=0.45, # NMS IOU threshold @@ -47,7 +48,7 @@ def run(weights='yolov5s.pt', # model.pt path(s) augment=False, # augmented inference visualize=False, # visualize features update=False, # update all models - project='runs/detect', # save results to project/name + project=ROOT / 'runs/detect', # save results to project/name name='exp', # save results to project/name exist_ok=False, # existing project/name ok, do not increment line_thickness=3, # bounding box thickness (pixels) @@ -55,6 +56,7 @@ def run(weights='yolov5s.pt', # model.pt path(s) hide_conf=False, # hide confidences half=False, # use FP16 half-precision inference ): + source = str(source) save_img = not nosave and not source.endswith('.txt') # save inference images webcam = source.isnumeric() or source.endswith('.txt') or source.lower().startswith( ('rtsp://', 'rtmp://', 'http://', 'https://')) @@ -254,8 +256,8 @@ def wrap_frozen_graph(gd, inputs, outputs): def parse_opt(): parser = argparse.ArgumentParser() - parser.add_argument('--weights', nargs='+', type=str, default='yolov5s.pt', help='model path(s)') - parser.add_argument('--source', type=str, default='data/images', help='file/dir/URL/glob, 0 for webcam') + parser.add_argument('--weights', nargs='+', type=str, default=ROOT / 'yolov5s.pt', help='model path(s)') + parser.add_argument('--source', type=str, default=ROOT / 'data/images', help='file/dir/URL/glob, 0 for webcam') parser.add_argument('--imgsz', '--img', '--img-size', nargs='+', type=int, default=[640], help='inference size h,w') parser.add_argument('--conf-thres', type=float, default=0.25, help='confidence threshold') parser.add_argument('--iou-thres', type=float, default=0.45, help='NMS IoU threshold') @@ -271,7 +273,7 @@ def parse_opt(): parser.add_argument('--augment', action='store_true', help='augmented inference') parser.add_argument('--visualize', action='store_true', help='visualize features') parser.add_argument('--update', action='store_true', help='update all models') - parser.add_argument('--project', default='runs/detect', help='save results to project/name') + parser.add_argument('--project', default=ROOT / 'runs/detect', help='save results to project/name') parser.add_argument('--name', default='exp', help='save results to project/name') parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment') parser.add_argument('--line-thickness', default=3, type=int, help='bounding box thickness (pixels)') diff --git a/export.py b/export.py index d5b63c410af8..74aca4b6c30a 100644 --- a/export.py +++ b/export.py @@ -34,6 +34,7 @@ ROOT = FILE.parents[0] # YOLOv5 root directory if str(ROOT) not in sys.path: sys.path.append(str(ROOT)) # add ROOT to PATH +ROOT = ROOT.relative_to(Path.cwd()) # relative from models.common import Conv from models.experimental import attempt_load diff --git a/train.py b/train.py index 40f58bfafb4a..39fe1a0cb14b 100644 --- a/train.py +++ b/train.py @@ -30,6 +30,7 @@ ROOT = FILE.parents[0] # YOLOv5 root directory if str(ROOT) not in sys.path: sys.path.append(str(ROOT)) # add ROOT to PATH +ROOT = ROOT.relative_to(Path.cwd()) # relative import val # for end-of-epoch mAP from models.experimental import attempt_load @@ -429,10 +430,10 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary def parse_opt(known=False): parser = argparse.ArgumentParser() - parser.add_argument('--weights', type=str, default='yolov5s.pt', help='initial weights path') + parser.add_argument('--weights', type=str, default=ROOT / 'yolov5s.pt', help='initial weights path') parser.add_argument('--cfg', type=str, default='', help='model.yaml path') - parser.add_argument('--data', type=str, default='data/coco128.yaml', help='dataset.yaml path') - parser.add_argument('--hyp', type=str, default='data/hyps/hyp.scratch.yaml', help='hyperparameters path') + parser.add_argument('--data', type=str, default=ROOT / 'data/coco128.yaml', help='dataset.yaml path') + parser.add_argument('--hyp', type=str, default=ROOT / 'data/hyps/hyp.scratch.yaml', help='hyperparameters path') parser.add_argument('--epochs', type=int, default=300) parser.add_argument('--batch-size', type=int, default=16, help='total batch size for all GPUs') parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=640, help='train, val image size (pixels)') @@ -451,8 +452,8 @@ def parse_opt(known=False): parser.add_argument('--adam', action='store_true', help='use torch.optim.Adam() optimizer') parser.add_argument('--sync-bn', action='store_true', help='use SyncBatchNorm, only available in DDP mode') parser.add_argument('--workers', type=int, default=8, help='maximum number of dataloader workers') - parser.add_argument('--project', default='runs/train', help='save to project/name') parser.add_argument('--entity', default=None, help='W&B entity') + parser.add_argument('--project', default=ROOT / 'runs/train', help='save to project/name') parser.add_argument('--name', default='exp', help='save to project/name') parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment') parser.add_argument('--quad', action='store_true', help='quad dataloader') @@ -486,10 +487,11 @@ def main(opt, callbacks=Callbacks()): opt.cfg, opt.weights, opt.resume = '', ckpt, True # reinstate LOGGER.info(f'Resuming training from {ckpt}') else: - opt.data, opt.cfg, opt.hyp = check_file(opt.data), check_yaml(opt.cfg), check_yaml(opt.hyp) # check YAMLs + opt.data, opt.cfg, opt.hyp, opt.weights, opt.project = \ + check_file(opt.data), check_yaml(opt.cfg), check_yaml(opt.hyp), str(opt.weights), str(opt.project) # checks assert len(opt.cfg) or len(opt.weights), 'either --cfg or --weights must be specified' if opt.evolve: - opt.project = 'runs/evolve' + opt.project = str(ROOT / 'runs/evolve') opt.exist_ok, opt.resume = opt.resume, False # pass resume to exist_ok and disable resume opt.save_dir = str(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok)) diff --git a/val.py b/val.py index 92e0e3b13ae9..4f0b49ae2ca7 100644 --- a/val.py +++ b/val.py @@ -21,6 +21,7 @@ ROOT = FILE.parents[0] # YOLOv5 root directory if str(ROOT) not in sys.path: sys.path.append(str(ROOT)) # add ROOT to PATH +ROOT = ROOT.relative_to(Path.cwd()) # relative from models.experimental import attempt_load from utils.datasets import create_dataloader @@ -95,7 +96,7 @@ def run(data, save_hybrid=False, # save label+prediction hybrid results to *.txt save_conf=False, # save confidences in --save-txt labels save_json=False, # save a COCO-JSON results file - project='runs/val', # save to project/name + project=ROOT / 'runs/val', # save to project/name name='exp', # save to project/name exist_ok=False, # existing project/name ok, do not increment half=True, # use FP16 half-precision inference @@ -297,8 +298,8 @@ def run(data, def parse_opt(): parser = argparse.ArgumentParser() - parser.add_argument('--data', type=str, default='data/coco128.yaml', help='dataset.yaml path') - parser.add_argument('--weights', nargs='+', type=str, default='yolov5s.pt', help='model.pt path(s)') + parser.add_argument('--data', type=str, default=ROOT / 'data/coco128.yaml', help='dataset.yaml path') + parser.add_argument('--weights', nargs='+', type=str, default=ROOT / 'yolov5s.pt', help='model.pt path(s)') parser.add_argument('--batch-size', type=int, default=32, help='batch size') parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=640, help='inference size (pixels)') parser.add_argument('--conf-thres', type=float, default=0.001, help='confidence threshold') @@ -312,14 +313,14 @@ def parse_opt(): parser.add_argument('--save-hybrid', action='store_true', help='save label+prediction hybrid results to *.txt') parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels') parser.add_argument('--save-json', action='store_true', help='save a COCO-JSON results file') - parser.add_argument('--project', default='runs/val', help='save to project/name') + parser.add_argument('--project', default=ROOT / 'runs/val', help='save to project/name') parser.add_argument('--name', default='exp', help='save to project/name') parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment') parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference') opt = parser.parse_args() + opt.data = check_yaml(opt.data) # check YAML opt.save_json |= opt.data.endswith('coco.yaml') opt.save_txt |= opt.save_hybrid - opt.data = check_yaml(opt.data) # check YAML print_args(FILE.stem, opt) return opt From 5ed28603cf94185c28da02b3d8bb433118ac33d4 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 27 Sep 2021 18:26:41 -0700 Subject: [PATCH 0587/1976] Update relative `ROOT` logic (#4955) * Update relative `ROOT` logic * python 3.9 Path().is_relative_to() removal --- models/tf.py | 1 + models/yolo.py | 4 +++- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/models/tf.py b/models/tf.py index b7d99359c863..4c082cb8a15e 100644 --- a/models/tf.py +++ b/models/tf.py @@ -20,6 +20,7 @@ ROOT = FILE.parents[1] # YOLOv5 root directory if str(ROOT) not in sys.path: sys.path.append(str(ROOT)) # add ROOT to PATH +ROOT = ROOT.relative_to(Path.cwd()) # relative import numpy as np import tensorflow as tf diff --git a/models/yolo.py b/models/yolo.py index a7590c57816c..b4ec1eda8376 100644 --- a/models/yolo.py +++ b/models/yolo.py @@ -15,11 +15,12 @@ ROOT = FILE.parents[1] # YOLOv5 root directory if str(ROOT) not in sys.path: sys.path.append(str(ROOT)) # add ROOT to PATH +ROOT = ROOT.relative_to(Path.cwd()) # relative from models.common import * from models.experimental import * from utils.autoanchor import check_anchor_order -from utils.general import check_yaml, make_divisible, set_logging +from utils.general import check_yaml, make_divisible, print_args, set_logging from utils.plots import feature_visualization from utils.torch_utils import copy_attr, fuse_conv_and_bn, initialize_weights, model_info, scale_img, \ select_device, time_sync @@ -281,6 +282,7 @@ def parse_model(d, ch): # model_dict, input_channels(3) parser.add_argument('--profile', action='store_true', help='profile model speed') opt = parser.parse_args() opt.cfg = check_yaml(opt.cfg) # check YAML + print_args(FILE.stem, opt) set_logging() device = select_device(opt.device) From 3aeb57d66ff2297d20aab4b5cd3e954fa3ef3e19 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 27 Sep 2021 18:32:22 -0700 Subject: [PATCH 0588/1976] Created using Colaboratory --- tutorial.ipynb | 31 +++++++------------------------ 1 file changed, 7 insertions(+), 24 deletions(-) diff --git a/tutorial.ipynb b/tutorial.ipynb index cfa96914c713..5663f151cef8 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -666,31 +666,14 @@ "source": [ "# 3. Train\n", "\n", - "Download [COCO128](https://www.kaggle.com/ultralytics/coco128), a small 128-image tutorial dataset, start tensorboard and train YOLOv5s from a pretrained checkpoint for 3 epochs (note actual training is typically much longer, around **300-1000 epochs**, depending on your dataset)." - ] - }, - { - "cell_type": "code", - "metadata": { - "id": "Knxi2ncxWffW" - }, - "source": [ - "# Download COCO128\n", - "torch.hub.download_url_to_file('https://github.com/ultralytics/yolov5/releases/download/v1.0/coco128.zip', 'tmp.zip')\n", - "!unzip -q tmp.zip -d ../datasets && rm tmp.zip" - ], - "execution_count": null, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "_pOkGLv1dMqh" - }, - "source": [ - "Train a YOLOv5s model on [COCO128](https://www.kaggle.com/ultralytics/coco128) with `--data coco128.yaml`, starting from pretrained `--weights yolov5s.pt`, or from randomly initialized `--weights '' --cfg yolov5s.yaml`. Models are downloaded automatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases), and **COCO, COCO128, and VOC datasets are downloaded automatically** on first use.\n", + "Train a YOLOv5s model on the [COCO128](https://www.kaggle.com/ultralytics/coco128) dataset with `--data coco128.yaml`, starting from pretrained `--weights yolov5s.pt`, or from randomly initialized `--weights '' --cfg yolov5s.yaml`. \n", + "\n", + "* **Pretrained [Models](https://github.com/ultralytics/yolov5/tree/master/models)** are downloaded \n", + "automatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases)\n", + "\n", + "* **[Datasets](https://github.com/ultralytics/yolov5/tree/master/data)** available for autodownload include: [COCO](https://github.com/ultralytics/yolov5/blob/master/data/coco.yaml), [COCO128](https://github.com/ultralytics/yolov5/blob/master/data/coco128.yaml), [VOC](https://github.com/ultralytics/yolov5/blob/master/data/VOC.yaml), [Argoverse](https://github.com/ultralytics/yolov5/blob/master/data/Argoverse.yaml), [VisDrone](https://github.com/ultralytics/yolov5/blob/master/data/VisDrone.yaml), [GlobalWheat](https://github.com/ultralytics/yolov5/blob/master/data/GlobalWheat2020.yaml), [xView](https://github.com/ultralytics/yolov5/blob/master/data/xView.yaml), [Objects365](https://github.com/ultralytics/yolov5/blob/master/data/Objects365.yaml), [SKU-110K](https://github.com/ultralytics/yolov5/blob/master/data/SKU-110K.yaml).\n", "\n", - "All training results are saved to `runs/train/` with incrementing run directories, i.e. `runs/train/exp2`, `runs/train/exp3` etc.\n" + "* **Training Results** are saved to `runs/train/` with incrementing run directories, i.e. `runs/train/exp2`, `runs/train/exp3` etc." ] }, { From 2993c3fa7af7a76dd82349e3cf85e35e4254576b Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 27 Sep 2021 19:00:05 -0700 Subject: [PATCH 0589/1976] Add `roboflow` (#4956) --- requirements.txt | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/requirements.txt b/requirements.txt index 42d5dfc49354..a80d46789943 100755 --- a/requirements.txt +++ b/requirements.txt @@ -16,8 +16,8 @@ tensorboard>=2.4.1 # wandb # Plotting ------------------------------------ -seaborn>=0.11.0 pandas +seaborn>=0.11.0 # Export -------------------------------------- # coremltools>=4.1 # CoreML export @@ -28,7 +28,8 @@ pandas # tensorflowjs>=3.9.0 # TF.js export # Extras -------------------------------------- +# albumentations>=1.0.3 # Cython # for pycocotools https://github.com/cocodataset/cocoapi/issues/172 # pycocotools>=2.0 # COCO mAP -# albumentations>=1.0.3 +# roboflow thop # FLOPs computation From c1bed601e9b9a3f5fa8fb529cfa40df7a3a0b903 Mon Sep 17 00:00:00 2001 From: Diego Montes <54745152+d57montes@users.noreply.github.com> Date: Mon, 27 Sep 2021 23:16:23 -0400 Subject: [PATCH 0590/1976] Fix `isascii()` method calls for python 3.6 (#4958) * fix isascii for python3.6 * update comment with python 3.7 note Co-authored-by: Glenn Jocher --- utils/general.py | 5 +++++ utils/plots.py | 6 +++--- 2 files changed, 8 insertions(+), 3 deletions(-) diff --git a/utils/general.py b/utils/general.py index 8421981147f7..28301f8573bb 100755 --- a/utils/general.py +++ b/utils/general.py @@ -160,6 +160,11 @@ def is_pip(): # Is file in a pip package? return 'site-packages' in Path(__file__).resolve().parts +def is_ascii(s=''): + # Is string composed of all ASCII (no UTF) characters? (note str().isascii() introduced in python 3.7) + s = str(s) # convert list, tuple, None, etc. to str + return len(s.encode().decode('ascii', 'ignore')) == len(s) + def is_chinese(s='人工智能'): # Is string composed of any Chinese characters? diff --git a/utils/plots.py b/utils/plots.py index 491c5704d67b..2f98d5b7e630 100644 --- a/utils/plots.py +++ b/utils/plots.py @@ -17,7 +17,7 @@ import torch from PIL import Image, ImageDraw, ImageFont -from utils.general import user_config_dir, is_chinese, xywh2xyxy, xyxy2xywh +from utils.general import user_config_dir, is_ascii, is_chinese, xywh2xyxy, xyxy2xywh from utils.metrics import fitness # Settings @@ -68,7 +68,7 @@ class Annotator: # YOLOv5 Annotator for train/val mosaics and jpgs and detect/hub inference annotations def __init__(self, im, line_width=None, font_size=None, font='Arial.ttf', pil=False, example='abc'): assert im.data.contiguous, 'Image not contiguous. Apply np.ascontiguousarray(im) to Annotator() input images.' - self.pil = pil or not example.isascii() or is_chinese(example) + self.pil = pil or not is_ascii(example) or is_chinese(example) if self.pil: # use PIL self.im = im if isinstance(im, Image.Image) else Image.fromarray(im) self.draw = ImageDraw.Draw(self.im) @@ -80,7 +80,7 @@ def __init__(self, im, line_width=None, font_size=None, font='Arial.ttf', pil=Fa def box_label(self, box, label='', color=(128, 128, 128), txt_color=(255, 255, 255)): # Add one xyxy box to image with label - if self.pil or not label.isascii(): + if self.pil or not is_ascii(label): self.draw.rectangle(box, width=self.lw, outline=color) # box if label: w, h = self.font.getsize(label) # text width, height From fb982d6030a700703649311937d9d08e68006b58 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 28 Sep 2021 06:36:12 -0700 Subject: [PATCH 0591/1976] Fix relative `ROOT` Pytorch Hub custom model bug (#4974) * Fix relative `ROOT` Pytorch Hub custom model bug * Update yolo.py --- models/tf.py | 2 +- models/yolo.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/models/tf.py b/models/tf.py index 4c082cb8a15e..bc6134291aca 100644 --- a/models/tf.py +++ b/models/tf.py @@ -20,7 +20,7 @@ ROOT = FILE.parents[1] # YOLOv5 root directory if str(ROOT) not in sys.path: sys.path.append(str(ROOT)) # add ROOT to PATH -ROOT = ROOT.relative_to(Path.cwd()) # relative +# ROOT = ROOT.relative_to(Path.cwd()) # relative import numpy as np import tensorflow as tf diff --git a/models/yolo.py b/models/yolo.py index b4ec1eda8376..5d19aad5369f 100644 --- a/models/yolo.py +++ b/models/yolo.py @@ -15,7 +15,7 @@ ROOT = FILE.parents[1] # YOLOv5 root directory if str(ROOT) not in sys.path: sys.path.append(str(ROOT)) # add ROOT to PATH -ROOT = ROOT.relative_to(Path.cwd()) # relative +# ROOT = ROOT.relative_to(Path.cwd()) # relative from models.common import * from models.experimental import * From 29acedf7dd4b805ef29aff08746d4cf75c1b5eb7 Mon Sep 17 00:00:00 2001 From: Kalen Michael Date: Tue, 28 Sep 2021 18:35:46 +0200 Subject: [PATCH 0592/1976] Add Roboflow to README (#4972) * added callbacks * added back callback to main * added save_dir to callback output * merged in upstream * removed ghost code * added integrations section removed competition * attempt column build * attempt column build * moved new to header * removed center column * utm * Update README.md * Update README.md * updated logo sizes * resized logos * fixed links * Update README.md * Update README.md * Update README.md Co-authored-by: Glenn Jocher --- README.md | 28 +++++++++++++++++++--------- 1 file changed, 19 insertions(+), 9 deletions(-) diff --git a/README.md b/README.md index a01c2c688aa4..8314aea0849a 100644 --- a/README.md +++ b/README.md @@ -145,7 +145,7 @@ $ python train.py --data coco.yaml --cfg yolov5s.yaml --weights '' --batch-size * [Tips for Best Training Results](https://github.com/ultralytics/yolov5/wiki/Tips-for-Best-Training-Results)  ☘️ RECOMMENDED * [Weights & Biases Logging](https://github.com/ultralytics/yolov5/issues/1289)  🌟 NEW -* [Supervisely Ecosystem](https://github.com/ultralytics/yolov5/issues/2518)  🌟 NEW +* [Roboflow for Datasets, Labeling, and Active Learning](https://github.com/ultralytics/yolov5/issues/4975)  🌟 NEW * [Multi-GPU Training](https://github.com/ultralytics/yolov5/issues/475) * [PyTorch Hub](https://github.com/ultralytics/yolov5/issues/36)  ⭐ NEW * [TorchScript, ONNX, CoreML Export](https://github.com/ultralytics/yolov5/issues/251) 🚀 @@ -158,11 +158,9 @@ $ python train.py --data coco.yaml --cfg yolov5s.yaml --weights '' --batch-size
-##
Environments and Integrations
+##
Environments
-Get started in seconds with our verified environments and integrations, -including [Weights & Biases](https://wandb.ai/site?utm_campaign=repo_yolo_readme) for automatic YOLOv5 experiment -logging. Click each icon below for details. +Get started in seconds with our verified environments. Click each icon below for details. + +##
Integrations
+ + + + + + +|Weights and Biases|Roboflow - ⭐ NEW| +|:-:|:-:| +|Automatically track and visualize all your YOLOv5 training runs in the cloud with [Weights & Biases.](https://wandb.ai/site?utm_campaign=repo_yolo_readme)|Label and automatically export your custom datasets directly to YOLOv5 for training using [Roboflow](https://roboflow.com/?ref=ultralytics) | -##
Compete and Win
+ + ##
Why YOLOv5
From 38c779b09950a7a8349d1d0891d414ced176dd4e Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 28 Sep 2021 10:04:56 -0700 Subject: [PATCH 0593/1976] Created using Colaboratory --- tutorial.ipynb | 24 ++++++++++++++++++++---- 1 file changed, 20 insertions(+), 4 deletions(-) diff --git a/tutorial.ipynb b/tutorial.ipynb index 5663f151cef8..17cf192e4832 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -665,15 +665,31 @@ }, "source": [ "# 3. Train\n", + "[](https://roboflow.com/?ref=ultralytics)\n", + "*Close the active learning loop by sampling images from your inference conditions with the `roboflow` pip package*\n", "\n", - "Train a YOLOv5s model on the [COCO128](https://www.kaggle.com/ultralytics/coco128) dataset with `--data coco128.yaml`, starting from pretrained `--weights yolov5s.pt`, or from randomly initialized `--weights '' --cfg yolov5s.yaml`. \n", + "
\n", "\n", - "* **Pretrained [Models](https://github.com/ultralytics/yolov5/tree/master/models)** are downloaded \n", + "Train a YOLOv5s model on the [COCO128](https://www.kaggle.com/ultralytics/coco128) dataset with `--data coco128.yaml`, starting from pretrained `--weights yolov5s.pt`, or from randomly initialized `--weights '' --cfg yolov5s.yaml`.\n", + "\n", + "- **Pretrained [Models](https://github.com/ultralytics/yolov5/tree/master/models)** are downloaded\n", "automatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases)\n", + "- **[Datasets](https://github.com/ultralytics/yolov5/tree/master/data)** available for autodownload include: [COCO](https://github.com/ultralytics/yolov5/blob/master/data/coco.yaml), [COCO128](https://github.com/ultralytics/yolov5/blob/master/data/coco128.yaml), [VOC](https://github.com/ultralytics/yolov5/blob/master/data/VOC.yaml), [Argoverse](https://github.com/ultralytics/yolov5/blob/master/data/Argoverse.yaml), [VisDrone](https://github.com/ultralytics/yolov5/blob/master/data/VisDrone.yaml), [GlobalWheat](https://github.com/ultralytics/yolov5/blob/master/data/GlobalWheat2020.yaml), [xView](https://github.com/ultralytics/yolov5/blob/master/data/xView.yaml), [Objects365](https://github.com/ultralytics/yolov5/blob/master/data/Objects365.yaml), [SKU-110K](https://github.com/ultralytics/yolov5/blob/master/data/SKU-110K.yaml).\n", + "- **Training Results** are saved to `runs/train/` with incrementing run directories, i.e. `runs/train/exp2`, `runs/train/exp3` etc.\n", + "
\n", + "\n", + "## Train on Custom Data with Roboflow 🌟 NEW\n", + "\n", + "[Roboflow](https://roboflow.com/?ref=ultralytics) enables you to easily **organize, label, and prepare** a high quality dataset with your own custom data. Roboflow also makes it easy to establish an active learning pipeline, collaborate with your team on dataset improvement, and integrate directly into your model building workflow with the `roboflow` pip package.\n", + "\n", + "- Custom Training Example: [https://blog.roboflow.com/how-to-train-yolov5-on-a-custom-dataset/](https://blog.roboflow.com/how-to-train-yolov5-on-a-custom-dataset/?ref=ultralytics)\n", + "- Custom Training Notebook: [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/roboflow-ai/yolov5-custom-training-tutorial/blob/main/yolov5-custom-training.ipynb)\n", + "\n", + "
\n", "\n", - "* **[Datasets](https://github.com/ultralytics/yolov5/tree/master/data)** available for autodownload include: [COCO](https://github.com/ultralytics/yolov5/blob/master/data/coco.yaml), [COCO128](https://github.com/ultralytics/yolov5/blob/master/data/coco128.yaml), [VOC](https://github.com/ultralytics/yolov5/blob/master/data/VOC.yaml), [Argoverse](https://github.com/ultralytics/yolov5/blob/master/data/Argoverse.yaml), [VisDrone](https://github.com/ultralytics/yolov5/blob/master/data/VisDrone.yaml), [GlobalWheat](https://github.com/ultralytics/yolov5/blob/master/data/GlobalWheat2020.yaml), [xView](https://github.com/ultralytics/yolov5/blob/master/data/xView.yaml), [Objects365](https://github.com/ultralytics/yolov5/blob/master/data/Objects365.yaml), [SKU-110K](https://github.com/ultralytics/yolov5/blob/master/data/SKU-110K.yaml).\n", + "[](https://roboflow.com/?ref=ultralytics)\n", "\n", - "* **Training Results** are saved to `runs/train/` with incrementing run directories, i.e. `runs/train/exp2`, `runs/train/exp3` etc." + "*Label images lightning fast (including with model-assisted labeling)*" ] }, { From 0c87478713b3db7583da0ae950587e3316291004 Mon Sep 17 00:00:00 2001 From: Diego Montes <54745152+d57montes@users.noreply.github.com> Date: Tue, 28 Sep 2021 15:22:31 -0400 Subject: [PATCH 0594/1976] Update wandb_utils.py (#4953) `is_valset_wandb_artifact` and `is_trainset_wandb_artifact` were referenced before assignment causing wandb to be unusable. --- utils/loggers/wandb/wandb_utils.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/utils/loggers/wandb/wandb_utils.py b/utils/loggers/wandb/wandb_utils.py index 9a80dc42ca95..92fdd27bb004 100644 --- a/utils/loggers/wandb/wandb_utils.py +++ b/utils/loggers/wandb/wandb_utils.py @@ -45,7 +45,8 @@ def check_wandb_config_file(data_config_file): def check_wandb_dataset(data_file): - is_wandb_artifact = False + is_trainset_wandb_artifact = False + is_valset_wandb_artifact = False if check_file(data_file) and data_file.endswith('.yaml'): with open(data_file, errors='ignore') as f: data_dict = yaml.safe_load(f) From 94705a952861d8a70ec8be2fb90f3375150a873d Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 28 Sep 2021 12:28:18 -0700 Subject: [PATCH 0595/1976] Add Hub custom models to CI tests (#4978) * Update ci-testing.yml for Hub custom model tests * Update ci-testing.yml --- .github/workflows/ci-testing.yml | 30 ++++++++++++++++++------------ 1 file changed, 18 insertions(+), 12 deletions(-) diff --git a/.github/workflows/ci-testing.yml b/.github/workflows/ci-testing.yml index 71f39c16c4ed..da695395fe69 100644 --- a/.github/workflows/ci-testing.yml +++ b/.github/workflows/ci-testing.yml @@ -4,10 +4,10 @@ name: CI CPU testing on: # https://help.github.com/en/actions/reference/events-that-trigger-workflows push: - branches: [master] + branches: [ master ] pull_request: # The branches below must be a subset of the branches above - branches: [master] + branches: [ master ] schedule: - cron: '0 0 * * *' # Runs at 00:00 UTC every day @@ -18,9 +18,9 @@ jobs: strategy: fail-fast: false matrix: - os: [ubuntu-latest, macos-latest, windows-latest] - python-version: [3.8] - model: ['yolov5s'] # models to test + os: [ ubuntu-latest, macos-latest, windows-latest ] + python-version: [ 3.8 ] + model: [ 'yolov5s' ] # models to test # Timeout: https://stackoverflow.com/a/59076067/4521646 timeout-minutes: 50 @@ -65,19 +65,25 @@ jobs: - name: Tests workflow run: | # export PYTHONPATH="$PWD" # to run '$ python *.py' files in subdirectories - di=cpu # inference devices # define device + di=cpu # device - # train + # Train python train.py --img 128 --batch 16 --weights ${{ matrix.model }}.pt --cfg ${{ matrix.model }}.yaml --epochs 1 --device $di - # detect - python detect.py --weights ${{ matrix.model }}.pt --device $di - python detect.py --weights runs/train/exp/weights/last.pt --device $di - # val + # Val python val.py --img 128 --batch 16 --weights ${{ matrix.model }}.pt --device $di python val.py --img 128 --batch 16 --weights runs/train/exp/weights/last.pt --device $di - + # Detect + python detect.py --weights ${{ matrix.model }}.pt --device $di + python detect.py --weights runs/train/exp/weights/last.pt --device $di python hubconf.py # hub + # Export python models/yolo.py --cfg ${{ matrix.model }}.yaml # build PyTorch model python models/tf.py --weights ${{ matrix.model }}.pt # build TensorFlow model python export.py --img 128 --batch 1 --weights ${{ matrix.model }}.pt --include torchscript onnx # export + # Python + python - < Date: Tue, 28 Sep 2021 12:41:28 -0700 Subject: [PATCH 0596/1976] Faster `--img 64` CI tests (#4979) --- .github/workflows/ci-testing.yml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/ci-testing.yml b/.github/workflows/ci-testing.yml index da695395fe69..c44e23995c3b 100644 --- a/.github/workflows/ci-testing.yml +++ b/.github/workflows/ci-testing.yml @@ -68,10 +68,10 @@ jobs: di=cpu # device # Train - python train.py --img 128 --batch 16 --weights ${{ matrix.model }}.pt --cfg ${{ matrix.model }}.yaml --epochs 1 --device $di + python train.py --img 64 --batch 32 --weights ${{ matrix.model }}.pt --cfg ${{ matrix.model }}.yaml --epochs 1 --device $di # Val - python val.py --img 128 --batch 16 --weights ${{ matrix.model }}.pt --device $di - python val.py --img 128 --batch 16 --weights runs/train/exp/weights/last.pt --device $di + python val.py --img 64 --batch 32 --weights ${{ matrix.model }}.pt --device $di + python val.py --img 64 --batch 32 --weights runs/train/exp/weights/last.pt --device $di # Detect python detect.py --weights ${{ matrix.model }}.pt --device $di python detect.py --weights runs/train/exp/weights/last.pt --device $di @@ -79,7 +79,7 @@ jobs: # Export python models/yolo.py --cfg ${{ matrix.model }}.yaml # build PyTorch model python models/tf.py --weights ${{ matrix.model }}.pt # build TensorFlow model - python export.py --img 128 --batch 1 --weights ${{ matrix.model }}.pt --include torchscript onnx # export + python export.py --img 64 --batch 1 --weights ${{ matrix.model }}.pt --include torchscript onnx # export # Python python - < Date: Tue, 28 Sep 2021 17:29:05 -0700 Subject: [PATCH 0597/1976] Created using Colaboratory --- tutorial.ipynb | 15 ++++++--------- 1 file changed, 6 insertions(+), 9 deletions(-) diff --git a/tutorial.ipynb b/tutorial.ipynb index 17cf192e4832..9cfb61b0e682 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -661,14 +661,14 @@ { "cell_type": "markdown", "metadata": { - "id": "VUOiNLtMP5aG" + "id": "ZY2VXXXu74w5" }, "source": [ "# 3. Train\n", - "[](https://roboflow.com/?ref=ultralytics)\n", - "*Close the active learning loop by sampling images from your inference conditions with the `roboflow` pip package*\n", "\n", - "
\n", + "

\n", + "Close the active learning loop by sampling images from your inference conditions with the `roboflow` pip package\n", + "

\n", "\n", "Train a YOLOv5s model on the [COCO128](https://www.kaggle.com/ultralytics/coco128) dataset with `--data coco128.yaml`, starting from pretrained `--weights yolov5s.pt`, or from randomly initialized `--weights '' --cfg yolov5s.yaml`.\n", "\n", @@ -676,7 +676,7 @@ "automatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases)\n", "- **[Datasets](https://github.com/ultralytics/yolov5/tree/master/data)** available for autodownload include: [COCO](https://github.com/ultralytics/yolov5/blob/master/data/coco.yaml), [COCO128](https://github.com/ultralytics/yolov5/blob/master/data/coco128.yaml), [VOC](https://github.com/ultralytics/yolov5/blob/master/data/VOC.yaml), [Argoverse](https://github.com/ultralytics/yolov5/blob/master/data/Argoverse.yaml), [VisDrone](https://github.com/ultralytics/yolov5/blob/master/data/VisDrone.yaml), [GlobalWheat](https://github.com/ultralytics/yolov5/blob/master/data/GlobalWheat2020.yaml), [xView](https://github.com/ultralytics/yolov5/blob/master/data/xView.yaml), [Objects365](https://github.com/ultralytics/yolov5/blob/master/data/Objects365.yaml), [SKU-110K](https://github.com/ultralytics/yolov5/blob/master/data/SKU-110K.yaml).\n", "- **Training Results** are saved to `runs/train/` with incrementing run directories, i.e. `runs/train/exp2`, `runs/train/exp3` etc.\n", - "
\n", + "

\n", "\n", "## Train on Custom Data with Roboflow 🌟 NEW\n", "\n", @@ -684,12 +684,9 @@ "\n", "- Custom Training Example: [https://blog.roboflow.com/how-to-train-yolov5-on-a-custom-dataset/](https://blog.roboflow.com/how-to-train-yolov5-on-a-custom-dataset/?ref=ultralytics)\n", "- Custom Training Notebook: [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/roboflow-ai/yolov5-custom-training-tutorial/blob/main/yolov5-custom-training.ipynb)\n", - "\n", "
\n", "\n", - "[](https://roboflow.com/?ref=ultralytics)\n", - "\n", - "*Label images lightning fast (including with model-assisted labeling)*" + "

Label images lightning fast (including with model-assisted labeling)" ] }, { From 6b19f728a1d422a721d3094f746caacbc24c3fed Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 28 Sep 2021 17:33:48 -0700 Subject: [PATCH 0598/1976] Created using Colaboratory --- tutorial.ipynb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tutorial.ipynb b/tutorial.ipynb index 9cfb61b0e682..b7a06845ea99 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -378,7 +378,7 @@ }, "source": [ "\n", - "\n", + "\n", "\n", "This is the **official YOLOv5 🚀 notebook** by **Ultralytics**, and is freely available for redistribution under the [GPL-3.0 license](https://choosealicense.com/licenses/gpl-3.0/). \n", "For more information please visit https://github.com/ultralytics/yolov5 and https://ultralytics.com. Thank you!" From 9988059b1063a8375de76179ff31a273f58b53bc Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 28 Sep 2021 17:46:14 -0700 Subject: [PATCH 0599/1976] Clickable CI badge (#4985) --- .github/workflows/greetings.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/greetings.yml b/.github/workflows/greetings.yml index a40d0a50c8ac..0bbc49ba2508 100644 --- a/.github/workflows/greetings.yml +++ b/.github/workflows/greetings.yml @@ -54,7 +54,7 @@ jobs: ## Status - ![CI CPU testing](https://github.com/ultralytics/yolov5/workflows/CI%20CPU%20testing/badge.svg) + CI CPU testing If this badge is green, all [YOLOv5 GitHub Actions](https://github.com/ultralytics/yolov5/actions) Continuous Integration (CI) tests are currently passing. CI tests verify correct operation of YOLOv5 training ([train.py](https://github.com/ultralytics/yolov5/blob/master/train.py)), validation ([val.py](https://github.com/ultralytics/yolov5/blob/master/val.py)), inference ([detect.py](https://github.com/ultralytics/yolov5/blob/master/detect.py)) and export ([export.py](https://github.com/ultralytics/yolov5/blob/master/export.py)) on MacOS, Windows, and Ubuntu every 24 hours and on every commit. From cd35a009ba964331abccd30f6fa0614224105d39 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 28 Sep 2021 20:11:20 -0700 Subject: [PATCH 0600/1976] Revert `torch.hub.load()` test (#4986) Temporarily reverts https://github.com/ultralytics/yolov5/pull/4978 until torch 1.10 is released, which should resolve `urllib.error.HTTPError: HTTP Error 403: rate limit exceeded` errors generated by torch hub from GitHub actions runners. --- .github/workflows/ci-testing.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.github/workflows/ci-testing.yml b/.github/workflows/ci-testing.yml index c44e23995c3b..23a742fc08dd 100644 --- a/.github/workflows/ci-testing.yml +++ b/.github/workflows/ci-testing.yml @@ -83,7 +83,8 @@ jobs: # Python python - < Date: Wed, 29 Sep 2021 19:48:45 +0200 Subject: [PATCH 0601/1976] Fix URL parsing bug (#4998) * added callbacks * added back callback to main * added save_dir to callback output * merged in upstream * removed ghost code * fixed parsing error for google temp links Co-authored-by: Glenn Jocher --- utils/general.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/general.py b/utils/general.py index 28301f8573bb..f2afb480cc63 100755 --- a/utils/general.py +++ b/utils/general.py @@ -313,7 +313,7 @@ def check_file(file, suffix=''): return file elif file.startswith(('http:/', 'https:/')): # download url = str(Path(file)).replace(':/', '://') # Pathlib turns :// -> :/ - file = Path(urllib.parse.unquote(file)).name.split('?')[0] # '%2F' to '/', split https://url.com/file.txt?auth + file = Path(urllib.parse.unquote(file).split('?')[0]).name # '%2F' to '/', split https://url.com/file.txt?auth print(f'Downloading {url} to {file}...') torch.hub.download_url_to_file(url, file) assert Path(file).exists() and Path(file).stat().st_size > 0, f'File download failed: {url}' # check From 42354d70252be81dedf0b889eaf5f4b071eb7694 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 29 Sep 2021 16:56:10 -0700 Subject: [PATCH 0602/1976] Update README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 8314aea0849a..9510fb278cd7 100644 --- a/README.md +++ b/README.md @@ -192,7 +192,7 @@ Get started in seconds with our verified environments. Click each icon below for |Weights and Biases|Roboflow - ⭐ NEW| |:-:|:-:| -|Automatically track and visualize all your YOLOv5 training runs in the cloud with [Weights & Biases.](https://wandb.ai/site?utm_campaign=repo_yolo_readme)|Label and automatically export your custom datasets directly to YOLOv5 for training using [Roboflow](https://roboflow.com/?ref=ultralytics) | +|Automatically track and visualize all your YOLOv5 training runs in the cloud with [Weights & Biases](https://wandb.ai/site?utm_campaign=repo_yolo_readme)|Label and automatically export your custom datasets directly to YOLOv5 for training with [Roboflow](https://roboflow.com/?ref=ultralytics) | ## Pitch diff --git a/.github/workflows/ci-testing.yml b/.github/workflows/ci-testing.yml index 6d1603880f4d..8ebfdeca8d74 100644 --- a/.github/workflows/ci-testing.yml +++ b/.github/workflows/ci-testing.yml @@ -83,7 +83,7 @@ jobs: # Python python - <> $GITHUB_ENV + - uses: actions/cache@v2 + with: + path: ~/.cache/pre-commit + key: pre-commit|${{ env.PY }}|${{ hashFiles('.pre-commit-config.yaml') }} + + - uses: pre-commit/action@v2.0.3 + # this action also provides an additional behaviour when used in private repositories + # when configured with a github token, the action will push back fixes to the pull request branch + with: + token: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index 2305ea07e902..67f51f0e8bce 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -1,4 +1,4 @@ -# This action runs GitHub's industry-leading static analysis engine, CodeQL, against a repository's source code to find security vulnerabilities. +# This action runs GitHub's industry-leading static analysis engine, CodeQL, against a repository's source code to find security vulnerabilities. # https://github.com/github/codeql-action name: "CodeQL" diff --git a/.github/workflows/greetings.yml b/.github/workflows/greetings.yml index a00ee8da66e1..0daf9514d3c5 100644 --- a/.github/workflows/greetings.yml +++ b/.github/workflows/greetings.yml @@ -57,4 +57,3 @@ jobs: CI CPU testing If this badge is green, all [YOLOv5 GitHub Actions](https://github.com/ultralytics/yolov5/actions) Continuous Integration (CI) tests are currently passing. CI tests verify correct operation of YOLOv5 training ([train.py](https://github.com/ultralytics/yolov5/blob/master/train.py)), validation ([val.py](https://github.com/ultralytics/yolov5/blob/master/val.py)), inference ([detect.py](https://github.com/ultralytics/yolov5/blob/master/detect.py)) and export ([export.py](https://github.com/ultralytics/yolov5/blob/master/export.py)) on MacOS, Windows, and Ubuntu every 24 hours and on every commit. - diff --git a/.gitignore b/.gitignore index 375b71807588..5f8cab550021 100755 --- a/.gitignore +++ b/.gitignore @@ -20,6 +20,7 @@ *.data *.json *.cfg +!setup.cfg !cfg/yolov3*.cfg storage.googleapis.com diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 000000000000..2eb78aa17ef4 --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,67 @@ +# Define hooks for code formations +# Will be applied on any updated commit files if a user has installed and linked commit hook + +default_language_version: + python: python3.8 + +# Define bot property if installed via https://github.com/marketplace/pre-commit-ci +ci: + autofix_prs: true + autoupdate_commit_msg: '[pre-commit.ci] pre-commit suggestions' + autoupdate_schedule: quarterly + # submodules: true + +repos: + - repo: https://github.com/pre-commit/pre-commit-hooks + rev: v4.0.1 + hooks: + - id: end-of-file-fixer + - id: trailing-whitespace + - id: check-case-conflict + - id: check-yaml + - id: check-toml + - id: pretty-format-json + - id: check-docstring-first + + - repo: https://github.com/asottile/pyupgrade + rev: v2.23.1 + hooks: + - id: pyupgrade + args: [--py36-plus] + name: Upgrade code + + # TODO + #- repo: https://github.com/PyCQA/isort + # rev: 5.9.3 + # hooks: + # - id: isort + # name: imports + + # TODO + #- repo: https://github.com/pre-commit/mirrors-yapf + # rev: v0.31.0 + # hooks: + # - id: yapf + # name: formatting + + # TODO + #- repo: https://github.com/executablebooks/mdformat + # rev: 0.7.7 + # hooks: + # - id: mdformat + # additional_dependencies: + # - mdformat-gfm + # - mdformat-black + # - mdformat_frontmatter + + # TODO + #- repo: https://github.com/asottile/yesqa + # rev: v1.2.3 + # hooks: + # - id: yesqa + + - repo: https://github.com/PyCQA/flake8 + rev: 3.9.2 + hooks: + - id: flake8 + name: PEP8 diff --git a/LICENSE b/LICENSE index 9e419e042146..92b370f0e0e1 100644 --- a/LICENSE +++ b/LICENSE @@ -671,4 +671,4 @@ into proprietary programs. If your program is a subroutine library, you may consider it more useful to permit linking proprietary applications with the library. If this is what you want to do, use the GNU Lesser General Public License instead of this License. But first, please read -. \ No newline at end of file +. diff --git a/README.md b/README.md index 0d474cb4a09b..d3fd7e9a92f5 100644 --- a/README.md +++ b/README.md @@ -46,7 +46,7 @@ YOLOv5 🚀 is a family of object detection architectures and models pretrained open-source research into future vision AI methods, incorporating lessons learned and best practices evolved over thousands of hours of research and development.

- @@ -109,7 +109,7 @@ the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases) and ```bash $ python detect.py --source 0 # webcam - file.jpg # image + file.jpg # image file.mp4 # video path/ # directory path/*.jpg # glob @@ -136,7 +136,7 @@ $ python train.py --data coco.yaml --cfg yolov5s.yaml --weights '' --batch-size - +
Tutorials @@ -178,7 +178,7 @@ Get started in seconds with our verified environments. Click each icon below for - + ##
Integrations
@@ -239,7 +239,7 @@ We are super excited about our first-ever Ultralytics YOLOv5 🚀 EXPORT Competi |[YOLOv5s6][assets] |1280 |44.5 |63.0 |385 |8.2 |3.6 |16.8 |12.6 |[YOLOv5m6][assets] |1280 |51.0 |69.0 |887 |11.1 |6.8 |35.7 |50.0 |[YOLOv5l6][assets] |1280 |53.6 |71.6 |1784 |15.8 |10.5 |76.8 |111.4 -|[YOLOv5x6][assets]
+ [TTA][TTA]|1280
1536 |54.7
**55.4** |**72.4**
72.3 |3136
- |26.2
- |19.4
- |140.7
- |209.8
- +|[YOLOv5x6][assets]
+ [TTA][TTA]|1280
1536 |54.7
**55.4** |**72.4**
72.3 |3136
- |26.2
- |19.4
- |140.7
- |209.8
-
Table Notes (click to expand) diff --git a/data/Objects365.yaml b/data/Objects365.yaml index 97a424fd03a0..b10c28e764c1 100644 --- a/data/Objects365.yaml +++ b/data/Objects365.yaml @@ -62,21 +62,21 @@ names: ['Person', 'Sneakers', 'Chair', 'Other Shoes', 'Hat', 'Car', 'Lamp', 'Gla download: | from pycocotools.coco import COCO from tqdm import tqdm - + from utils.general import Path, download, np, xyxy2xywhn - + # Make Directories dir = Path(yaml['path']) # dataset root dir for p in 'images', 'labels': (dir / p).mkdir(parents=True, exist_ok=True) for q in 'train', 'val': (dir / p / q).mkdir(parents=True, exist_ok=True) - + # Train, Val Splits for split, patches in [('train', 50 + 1), ('val', 43 + 1)]: print(f"Processing {split} in {patches} patches ...") images, labels = dir / 'images' / split, dir / 'labels' / split - + # Download url = f"https://dorc.ks3-cn-beijing.ksyun.com/data-set/2020Objects365%E6%95%B0%E6%8D%AE%E9%9B%86/{split}/" if split == 'train': @@ -86,11 +86,11 @@ download: | download([f'{url}zhiyuan_objv2_{split}.json'], dir=dir, delete=False) # annotations json download([f'{url}images/v1/patch{i}.tar.gz' for i in range(15 + 1)], dir=images, curl=True, delete=False, threads=8) download([f'{url}images/v2/patch{i}.tar.gz' for i in range(16, patches)], dir=images, curl=True, delete=False, threads=8) - + # Move for f in tqdm(images.rglob('*.jpg'), desc=f'Moving {split} images'): f.rename(images / f.name) # move to /images/{split} - + # Labels coco = COCO(dir / f'zhiyuan_objv2_{split}.json') names = [x["name"] for x in coco.loadCats(coco.getCatIds())] diff --git a/data/coco128.yaml b/data/coco128.yaml index 70cf52c397af..b1dfb004afa1 100644 --- a/data/coco128.yaml +++ b/data/coco128.yaml @@ -27,4 +27,4 @@ names: ['person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 't # Download script/URL (optional) -download: https://github.com/ultralytics/yolov5/releases/download/v1.0/coco128.zip \ No newline at end of file +download: https://github.com/ultralytics/yolov5/releases/download/v1.0/coco128.zip diff --git a/data/hyps/hyp.scratch-high.yaml b/data/hyps/hyp.scratch-high.yaml index 519c82687e09..5a586cc63fae 100644 --- a/data/hyps/hyp.scratch-high.yaml +++ b/data/hyps/hyp.scratch-high.yaml @@ -31,4 +31,4 @@ flipud: 0.0 # image flip up-down (probability) fliplr: 0.5 # image flip left-right (probability) mosaic: 1.0 # image mosaic (probability) mixup: 0.1 # image mixup (probability) -copy_paste: 0.1 # segment copy-paste (probability) \ No newline at end of file +copy_paste: 0.1 # segment copy-paste (probability) diff --git a/data/hyps/hyp.scratch-low.yaml b/data/hyps/hyp.scratch-low.yaml index b093a95ac53b..b9ef1d55a3b6 100644 --- a/data/hyps/hyp.scratch-low.yaml +++ b/data/hyps/hyp.scratch-low.yaml @@ -31,4 +31,4 @@ flipud: 0.0 # image flip up-down (probability) fliplr: 0.5 # image flip left-right (probability) mosaic: 1.0 # image mosaic (probability) mixup: 0.0 # image mixup (probability) -copy_paste: 0.0 # segment copy-paste (probability) \ No newline at end of file +copy_paste: 0.0 # segment copy-paste (probability) diff --git a/models/common.py b/models/common.py index 5da35690a4ec..d0fb0e8596ed 100644 --- a/models/common.py +++ b/models/common.py @@ -79,7 +79,7 @@ def __init__(self, c1, c2, num_heads, num_layers): if c1 != c2: self.conv = Conv(c1, c2) self.linear = nn.Linear(c2, c2) # learnable position embedding - self.tr = nn.Sequential(*[TransformerLayer(c2, num_heads) for _ in range(num_layers)]) + self.tr = nn.Sequential(*(TransformerLayer(c2, num_heads) for _ in range(num_layers))) self.c2 = c2 def forward(self, x): @@ -114,7 +114,7 @@ def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, nu self.cv4 = Conv(2 * c_, c2, 1, 1) self.bn = nn.BatchNorm2d(2 * c_) # applied to cat(cv2, cv3) self.act = nn.LeakyReLU(0.1, inplace=True) - self.m = nn.Sequential(*[Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n)]) + self.m = nn.Sequential(*(Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n))) def forward(self, x): y1 = self.cv3(self.m(self.cv1(x))) @@ -130,7 +130,7 @@ def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, nu self.cv1 = Conv(c1, c_, 1, 1) self.cv2 = Conv(c1, c_, 1, 1) self.cv3 = Conv(2 * c_, c2, 1) # act=FReLU(c2) - self.m = nn.Sequential(*[Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n)]) + self.m = nn.Sequential(*(Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n))) # self.m = nn.Sequential(*[CrossConv(c_, c_, 3, 1, g, 1.0, shortcut) for _ in range(n)]) def forward(self, x): @@ -158,7 +158,7 @@ class C3Ghost(C3): def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): super().__init__(c1, c2, n, shortcut, g, e) c_ = int(c2 * e) # hidden channels - self.m = nn.Sequential(*[GhostBottleneck(c_, c_) for _ in range(n)]) + self.m = nn.Sequential(*(GhostBottleneck(c_, c_) for _ in range(n))) class SPP(nn.Module): @@ -362,7 +362,7 @@ class Detections: def __init__(self, imgs, pred, files, times=None, names=None, shape=None): super().__init__() d = pred[0].device # device - gn = [torch.tensor([*[im.shape[i] for i in [1, 0, 1, 0]], 1., 1.], device=d) for im in imgs] # normalizations + gn = [torch.tensor([*(im.shape[i] for i in [1, 0, 1, 0]), 1., 1.], device=d) for im in imgs] # normalizations self.imgs = imgs # list of images as numpy arrays self.pred = pred # list of tensors pred[0] = (xyxy, conf, cls) self.names = names # class names diff --git a/models/experimental.py b/models/experimental.py index edccc9632fb5..adb86c81fc06 100644 --- a/models/experimental.py +++ b/models/experimental.py @@ -97,7 +97,6 @@ def attempt_load(weights, map_location=None, inplace=True, fuse=True): else: model.append(ckpt['ema' if ckpt.get('ema') else 'model'].float().eval()) # without layer fuse - # Compatibility updates for m in model.modules(): if type(m) in [nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6, nn.SiLU, Detect, Model]: diff --git a/models/hub/yolov5-bifpn.yaml b/models/hub/yolov5-bifpn.yaml index 119aebb1523a..2f2c82c70122 100644 --- a/models/hub/yolov5-bifpn.yaml +++ b/models/hub/yolov5-bifpn.yaml @@ -18,7 +18,7 @@ backbone: [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 [-1, 9, C3, [256]], [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 - [-1, 9, C3, [512]] + [-1, 9, C3, [512]], [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 [-1, 1, SPP, [1024, [5, 9, 13]]], [-1, 3, C3, [1024, False]], # 9 diff --git a/models/tf.py b/models/tf.py index 1c6da43adaac..5599ff5cce91 100644 --- a/models/tf.py +++ b/models/tf.py @@ -40,7 +40,7 @@ class TFBN(keras.layers.Layer): # TensorFlow BatchNormalization wrapper def __init__(self, w=None): - super(TFBN, self).__init__() + super().__init__() self.bn = keras.layers.BatchNormalization( beta_initializer=keras.initializers.Constant(w.bias.numpy()), gamma_initializer=keras.initializers.Constant(w.weight.numpy()), @@ -54,7 +54,7 @@ def call(self, inputs): class TFPad(keras.layers.Layer): def __init__(self, pad): - super(TFPad, self).__init__() + super().__init__() self.pad = tf.constant([[0, 0], [pad, pad], [pad, pad], [0, 0]]) def call(self, inputs): @@ -65,7 +65,7 @@ class TFConv(keras.layers.Layer): # Standard convolution def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True, w=None): # ch_in, ch_out, weights, kernel, stride, padding, groups - super(TFConv, self).__init__() + super().__init__() assert g == 1, "TF v2.2 Conv2D does not support 'groups' argument" assert isinstance(k, int), "Convolution with multiple kernels are not allowed." # TensorFlow convolution padding is inconsistent with PyTorch (e.g. k=3 s=2 'SAME' padding) @@ -96,7 +96,7 @@ class TFFocus(keras.layers.Layer): # Focus wh information into c-space def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True, w=None): # ch_in, ch_out, kernel, stride, padding, groups - super(TFFocus, self).__init__() + super().__init__() self.conv = TFConv(c1 * 4, c2, k, s, p, g, act, w.conv) def call(self, inputs): # x(b,w,h,c) -> y(b,w/2,h/2,4c) @@ -110,7 +110,7 @@ def call(self, inputs): # x(b,w,h,c) -> y(b,w/2,h/2,4c) class TFBottleneck(keras.layers.Layer): # Standard bottleneck def __init__(self, c1, c2, shortcut=True, g=1, e=0.5, w=None): # ch_in, ch_out, shortcut, groups, expansion - super(TFBottleneck, self).__init__() + super().__init__() c_ = int(c2 * e) # hidden channels self.cv1 = TFConv(c1, c_, 1, 1, w=w.cv1) self.cv2 = TFConv(c_, c2, 3, 1, g=g, w=w.cv2) @@ -123,7 +123,7 @@ def call(self, inputs): class TFConv2d(keras.layers.Layer): # Substitution for PyTorch nn.Conv2D def __init__(self, c1, c2, k, s=1, g=1, bias=True, w=None): - super(TFConv2d, self).__init__() + super().__init__() assert g == 1, "TF v2.2 Conv2D does not support 'groups' argument" self.conv = keras.layers.Conv2D( c2, k, s, 'VALID', use_bias=bias, @@ -138,7 +138,7 @@ class TFBottleneckCSP(keras.layers.Layer): # CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5, w=None): # ch_in, ch_out, number, shortcut, groups, expansion - super(TFBottleneckCSP, self).__init__() + super().__init__() c_ = int(c2 * e) # hidden channels self.cv1 = TFConv(c1, c_, 1, 1, w=w.cv1) self.cv2 = TFConv2d(c1, c_, 1, 1, bias=False, w=w.cv2) @@ -158,7 +158,7 @@ class TFC3(keras.layers.Layer): # CSP Bottleneck with 3 convolutions def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5, w=None): # ch_in, ch_out, number, shortcut, groups, expansion - super(TFC3, self).__init__() + super().__init__() c_ = int(c2 * e) # hidden channels self.cv1 = TFConv(c1, c_, 1, 1, w=w.cv1) self.cv2 = TFConv(c1, c_, 1, 1, w=w.cv2) @@ -172,7 +172,7 @@ def call(self, inputs): class TFSPP(keras.layers.Layer): # Spatial pyramid pooling layer used in YOLOv3-SPP def __init__(self, c1, c2, k=(5, 9, 13), w=None): - super(TFSPP, self).__init__() + super().__init__() c_ = c1 // 2 # hidden channels self.cv1 = TFConv(c1, c_, 1, 1, w=w.cv1) self.cv2 = TFConv(c_ * (len(k) + 1), c2, 1, 1, w=w.cv2) @@ -186,7 +186,7 @@ def call(self, inputs): class TFSPPF(keras.layers.Layer): # Spatial pyramid pooling-Fast layer def __init__(self, c1, c2, k=5, w=None): - super(TFSPPF, self).__init__() + super().__init__() c_ = c1 // 2 # hidden channels self.cv1 = TFConv(c1, c_, 1, 1, w=w.cv1) self.cv2 = TFConv(c_ * 4, c2, 1, 1, w=w.cv2) @@ -201,7 +201,7 @@ def call(self, inputs): class TFDetect(keras.layers.Layer): def __init__(self, nc=80, anchors=(), ch=(), imgsz=(640, 640), w=None): # detection layer - super(TFDetect, self).__init__() + super().__init__() self.stride = tf.convert_to_tensor(w.stride.numpy(), dtype=tf.float32) self.nc = nc # number of classes self.no = nc + 5 # number of outputs per anchor @@ -249,7 +249,7 @@ def _make_grid(nx=20, ny=20): class TFUpsample(keras.layers.Layer): def __init__(self, size, scale_factor, mode, w=None): # warning: all arguments needed including 'w' - super(TFUpsample, self).__init__() + super().__init__() assert scale_factor == 2, "scale_factor must be 2" self.upsample = lambda x: tf.image.resize(x, (x.shape[1] * 2, x.shape[2] * 2), method=mode) # self.upsample = keras.layers.UpSampling2D(size=scale_factor, interpolation=mode) @@ -263,7 +263,7 @@ def call(self, inputs): class TFConcat(keras.layers.Layer): def __init__(self, dimension=1, w=None): - super(TFConcat, self).__init__() + super().__init__() assert dimension == 1, "convert only NCHW to NHWC concat" self.d = 3 @@ -272,7 +272,7 @@ def call(self, inputs): def parse_model(d, ch, model, imgsz): # model_dict, input_channels(3) - LOGGER.info('\n%3s%18s%3s%10s %-40s%-30s' % ('', 'from', 'n', 'params', 'module', 'arguments')) + LOGGER.info(f"\n{'':>3}{'from':>18}{'n':>3}{'params':>10} {'module':<40}{'arguments':<30}") anchors, nc, gd, gw = d['anchors'], d['nc'], d['depth_multiple'], d['width_multiple'] na = (len(anchors[0]) // 2) if isinstance(anchors, list) else anchors # number of anchors no = na * (nc + 5) # number of outputs = anchors * (classes + 5) @@ -299,7 +299,7 @@ def parse_model(d, ch, model, imgsz): # model_dict, input_channels(3) elif m is nn.BatchNorm2d: args = [ch[f]] elif m is Concat: - c2 = sum([ch[-1 if x == -1 else x + 1] for x in f]) + c2 = sum(ch[-1 if x == -1 else x + 1] for x in f) elif m is Detect: args.append([ch[x + 1] for x in f]) if isinstance(args[1], int): # number of anchors @@ -312,11 +312,11 @@ def parse_model(d, ch, model, imgsz): # model_dict, input_channels(3) m_ = keras.Sequential([tf_m(*args, w=model.model[i][j]) for j in range(n)]) if n > 1 \ else tf_m(*args, w=model.model[i]) # module - torch_m_ = nn.Sequential(*[m(*args) for _ in range(n)]) if n > 1 else m(*args) # module + torch_m_ = nn.Sequential(*(m(*args) for _ in range(n))) if n > 1 else m(*args) # module t = str(m)[8:-2].replace('__main__.', '') # module type - np = sum([x.numel() for x in torch_m_.parameters()]) # number params + np = sum(x.numel() for x in torch_m_.parameters()) # number params m_.i, m_.f, m_.type, m_.np = i, f, t, np # attach index, 'from' index, type, number params - LOGGER.info('%3s%18s%3s%10.0f %-40s%-30s' % (i, f, n, np, t, args)) # print + LOGGER.info(f'{i:>3}{str(f):>18}{str(n):>3}{np:>10} {t:<40}{str(args):<30}') # print save.extend(x % i for x in ([f] if isinstance(f, int) else f) if x != -1) # append to savelist layers.append(m_) ch.append(c2) @@ -325,7 +325,7 @@ def parse_model(d, ch, model, imgsz): # model_dict, input_channels(3) class TFModel: def __init__(self, cfg='yolov5s.yaml', ch=3, nc=None, model=None, imgsz=(640, 640)): # model, channels, classes - super(TFModel, self).__init__() + super().__init__() if isinstance(cfg, dict): self.yaml = cfg # model dict else: # is *.yaml @@ -336,7 +336,7 @@ def __init__(self, cfg='yolov5s.yaml', ch=3, nc=None, model=None, imgsz=(640, 64 # Define model if nc and nc != self.yaml['nc']: - print('Overriding %s nc=%g with nc=%g' % (cfg, self.yaml['nc'], nc)) + print(f"Overriding {cfg} nc={self.yaml['nc']} with nc={nc}") self.yaml['nc'] = nc # override yaml value self.model, self.savelist = parse_model(deepcopy(self.yaml), ch=[ch], model=model, imgsz=imgsz) diff --git a/models/yolo.py b/models/yolo.py index 497a0e9c24e6..0fa2db91e82b 100644 --- a/models/yolo.py +++ b/models/yolo.py @@ -247,7 +247,7 @@ def _apply(self, fn): def parse_model(d, ch): # model_dict, input_channels(3) - LOGGER.info('\n%3s%18s%3s%10s %-40s%-30s' % ('', 'from', 'n', 'params', 'module', 'arguments')) + LOGGER.info(f"\n{'':>3}{'from':>18}{'n':>3}{'params':>10} {'module':<40}{'arguments':<30}") anchors, nc, gd, gw = d['anchors'], d['nc'], d['depth_multiple'], d['width_multiple'] na = (len(anchors[0]) // 2) if isinstance(anchors, list) else anchors # number of anchors no = na * (nc + 5) # number of outputs = anchors * (classes + 5) @@ -275,7 +275,7 @@ def parse_model(d, ch): # model_dict, input_channels(3) elif m is nn.BatchNorm2d: args = [ch[f]] elif m is Concat: - c2 = sum([ch[x] for x in f]) + c2 = sum(ch[x] for x in f) elif m is Detect: args.append([ch[x] for x in f]) if isinstance(args[1], int): # number of anchors @@ -287,11 +287,11 @@ def parse_model(d, ch): # model_dict, input_channels(3) else: c2 = ch[f] - m_ = nn.Sequential(*[m(*args) for _ in range(n)]) if n > 1 else m(*args) # module + m_ = nn.Sequential(*(m(*args) for _ in range(n))) if n > 1 else m(*args) # module t = str(m)[8:-2].replace('__main__.', '') # module type - np = sum([x.numel() for x in m_.parameters()]) # number params + np = sum(x.numel() for x in m_.parameters()) # number params m_.i, m_.f, m_.type, m_.np = i, f, t, np # attach index, 'from' index, type, number params - LOGGER.info('%3s%18s%3s%10.0f %-40s%-30s' % (i, f, n_, np, t, args)) # print + LOGGER.info(f'{i:>3}{str(f):>18}{n_:>3}{np:10.0f} {t:<40}{str(args):<30}') # print save.extend(x % i for x in ([f] if isinstance(f, int) else f) if x != -1) # append to savelist layers.append(m_) if i == 0: diff --git a/setup.cfg b/setup.cfg new file mode 100644 index 000000000000..7d25200cdb33 --- /dev/null +++ b/setup.cfg @@ -0,0 +1,45 @@ +# Project-wide configuration file, can be used for package metadata and other toll configurations +# Example usage: global configuration for PEP8 (via flake8) setting or default pytest arguments + +[metadata] +license_file = LICENSE +description-file = README.md + + +[tool:pytest] +norecursedirs = + .git + dist + build +addopts = + --doctest-modules + --durations=25 + --color=yes + + +[flake8] +max-line-length = 120 +exclude = .tox,*.egg,build,temp +select = E,W,F +doctests = True +verbose = 2 +# https://pep8.readthedocs.io/en/latest/intro.html#error-codes +format = pylint +# see: https://www.flake8rules.com/ +ignore = + E731 # Do not assign a lambda expression, use a def + F405 + E402 + F841 + E741 + F821 + E722 + F401 + W504 + E127 + W504 + E231 + E501 + F403 + E302 + F541 diff --git a/tutorial.ipynb b/tutorial.ipynb index 47c44251b5ab..115d767a70bf 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -1014,4 +1014,4 @@ "outputs": [] } ] -} \ No newline at end of file +} diff --git a/utils/datasets.py b/utils/datasets.py index 3997a5df6331..fce005bd597c 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -140,7 +140,7 @@ def __iter__(self): yield next(self.iterator) -class _RepeatSampler(object): +class _RepeatSampler: """ Sampler that repeats forever Args: @@ -287,7 +287,7 @@ def __init__(self, sources='streams.txt', img_size=640, stride=32, auto=True): self.stride = stride if os.path.isfile(sources): - with open(sources, 'r') as f: + with open(sources) as f: sources = [x.strip() for x in f.read().strip().splitlines() if len(x.strip())] else: sources = [sources] @@ -398,14 +398,14 @@ def __init__(self, path, img_size=640, batch_size=16, augment=False, hyp=None, r f += glob.glob(str(p / '**' / '*.*'), recursive=True) # f = list(p.rglob('*.*')) # pathlib elif p.is_file(): # file - with open(p, 'r') as t: + with open(p) as t: t = t.read().strip().splitlines() parent = str(p.parent) + os.sep f += [x.replace('./', parent) if x.startswith('./') else x for x in t] # local to global path # f += [p.parent / x.lstrip(os.sep) for x in t] # local to global path (pathlib) else: raise Exception(f'{prefix}{p} does not exist') - self.img_files = sorted([x.replace('/', os.sep) for x in f if x.split('.')[-1].lower() in IMG_FORMATS]) + self.img_files = sorted(x.replace('/', os.sep) for x in f if x.split('.')[-1].lower() in IMG_FORMATS) # self.img_files = sorted([x for x in f if x.suffix[1:].lower() in IMG_FORMATS]) # pathlib assert self.img_files, f'{prefix}No images found' except Exception as e: @@ -681,7 +681,7 @@ def load_mosaic(self, index): # YOLOv5 4-mosaic loader. Loads 1 image + 3 random images into a 4-image mosaic labels4, segments4 = [], [] s = self.img_size - yc, xc = [int(random.uniform(-x, 2 * s + x)) for x in self.mosaic_border] # mosaic center x, y + yc, xc = (int(random.uniform(-x, 2 * s + x)) for x in self.mosaic_border) # mosaic center x, y indices = [index] + random.choices(self.indices, k=3) # 3 additional image indices random.shuffle(indices) for i, index in enumerate(indices): @@ -767,7 +767,7 @@ def load_mosaic9(self, index): c = s - w, s + h0 - hp - h, s, s + h0 - hp padx, pady = c[:2] - x1, y1, x2, y2 = [max(x, 0) for x in c] # allocate coords + x1, y1, x2, y2 = (max(x, 0) for x in c) # allocate coords # Labels labels, segments = self.labels[index].copy(), self.segments[index].copy() @@ -782,7 +782,7 @@ def load_mosaic9(self, index): hp, wp = h, w # height, width previous # Offset - yc, xc = [int(random.uniform(0, s)) for _ in self.mosaic_border] # mosaic center x, y + yc, xc = (int(random.uniform(0, s)) for _ in self.mosaic_border) # mosaic center x, y img9 = img9[yc:yc + 2 * s, xc:xc + 2 * s] # Concat/clip labels @@ -838,7 +838,7 @@ def extract_boxes(path='../datasets/coco128'): # from utils.datasets import *; # labels lb_file = Path(img2label_paths([str(im_file)])[0]) if Path(lb_file).exists(): - with open(lb_file, 'r') as f: + with open(lb_file) as f: lb = np.array([x.split() for x in f.read().strip().splitlines()], dtype=np.float32) # labels for j, x in enumerate(lb): @@ -866,7 +866,7 @@ def autosplit(path='../datasets/coco128/images', weights=(0.9, 0.1, 0.0), annota annotated_only: Only use images with an annotated txt file """ path = Path(path) # images dir - files = sorted([x for x in path.rglob('*.*') if x.suffix[1:].lower() in IMG_FORMATS]) # image files only + files = sorted(x for x in path.rglob('*.*') if x.suffix[1:].lower() in IMG_FORMATS) # image files only n = len(files) # number of files random.seed(0) # for reproducibility indices = random.choices([0, 1, 2], weights=weights, k=n) # assign each image to a split @@ -902,7 +902,7 @@ def verify_image_label(args): # verify labels if os.path.isfile(lb_file): nf = 1 # label found - with open(lb_file, 'r') as f: + with open(lb_file) as f: l = [x.split() for x in f.read().strip().splitlines() if len(x)] if any([len(x) > 8 for x in l]): # is segment classes = np.array([x[0] for x in l], dtype=np.float32) @@ -944,7 +944,7 @@ def dataset_stats(path='coco128.yaml', autodownload=False, verbose=False, profil def round_labels(labels): # Update labels to integer class and 6 decimal place floats - return [[int(c), *[round(x, 4) for x in points]] for c, *points in labels] + return [[int(c), *(round(x, 4) for x in points)] for c, *points in labels] def unzip(path): # Unzip data.zip TODO: CONSTRAINT: path/to/abc.zip MUST unzip to 'path/to/abc/' @@ -1019,7 +1019,7 @@ def hub_ops(f, max_dim=1920): with open(file, 'w') as f: json.dump(stats, f) # save stats *.json t2 = time.time() - with open(file, 'r') as f: + with open(file) as f: x = json.load(f) # load hyps dict print(f'stats.json times: {time.time() - t2:.3f}s read, {t2 - t1:.3f}s write') diff --git a/utils/general.py b/utils/general.py index 02bc741ca3ba..f22908907fd0 100755 --- a/utils/general.py +++ b/utils/general.py @@ -136,7 +136,7 @@ def is_writeable(dir, test=False): pass file.unlink() # remove file return True - except IOError: + except OSError: return False else: # method 2 return os.access(dir, os.R_OK) # possible issues on Windows @@ -355,7 +355,7 @@ def check_dataset(data, autodownload=True): assert 'nc' in data, "Dataset 'nc' key missing." if 'names' not in data: data['names'] = [f'class{i}' for i in range(data['nc'])] # assign class names if missing - train, val, test, s = [data.get(x) for x in ('train', 'val', 'test', 'download')] + train, val, test, s = (data.get(x) for x in ('train', 'val', 'test', 'download')) if val: val = [Path(x).resolve() for x in (val if isinstance(val, list) else [val])] # val path if not all(x.exists() for x in val): diff --git a/utils/google_app_engine/app.yaml b/utils/google_app_engine/app.yaml index ac29d104b144..5056b7c1186d 100644 --- a/utils/google_app_engine/app.yaml +++ b/utils/google_app_engine/app.yaml @@ -11,4 +11,4 @@ manual_scaling: resources: cpu: 1 memory_gb: 4 - disk_size_gb: 20 \ No newline at end of file + disk_size_gb: 20 diff --git a/utils/loggers/__init__.py b/utils/loggers/__init__.py index 0b457df63c93..ae2d98bdc36d 100644 --- a/utils/loggers/__init__.py +++ b/utils/loggers/__init__.py @@ -135,7 +135,7 @@ def on_train_end(self, last, best, plots, epoch, results): # Callback runs on training end if plots: plot_results(file=self.save_dir / 'results.csv') # save results.png - files = ['results.png', 'confusion_matrix.png', *[f'{x}_curve.png' for x in ('F1', 'PR', 'P', 'R')]] + files = ['results.png', 'confusion_matrix.png', *(f'{x}_curve.png' for x in ('F1', 'PR', 'P', 'R'))] files = [(self.save_dir / f) for f in files if (self.save_dir / f).exists()] # filter if self.tb: diff --git a/utils/loggers/wandb/README.md b/utils/loggers/wandb/README.md index dd7dc1e46d45..d787fb7a5a0e 100644 --- a/utils/loggers/wandb/README.md +++ b/utils/loggers/wandb/README.md @@ -61,10 +61,10 @@ You can leverage W&B artifacts and Tables integration to easily visualize and ma
Usage Code $ python utils/logger/wandb/log_dataset.py --project ... --name ... --data .. - + ![Screenshot (64)](https://user-images.githubusercontent.com/15766192/128486078-d8433890-98a3-4d12-8986-b6c0e3fc64b9.png)
- +

2: Train and Log Evaluation simultaneousy

This is an extension of the previous section, but it'll also training after uploading the dataset. This also evaluation Table Evaluation table compares your predictions and ground truths across the validation set for each epoch. It uses the references to the already uploaded datasets, @@ -72,31 +72,31 @@ You can leverage W&B artifacts and Tables integration to easily visualize and ma
Usage Code $ python utils/logger/wandb/log_dataset.py --data .. --upload_data - + ![Screenshot (72)](https://user-images.githubusercontent.com/15766192/128979739-4cf63aeb-a76f-483f-8861-1c0100b938a5.png)
- +

3: Train using dataset artifact

- When you upload a dataset as described in the first section, you get a new config file with an added `_wandb` to its name. This file contains the information that + When you upload a dataset as described in the first section, you get a new config file with an added `_wandb` to its name. This file contains the information that can be used to train a model directly from the dataset artifact. This also logs evaluation
Usage Code $ python utils/logger/wandb/log_dataset.py --data {data}_wandb.yaml - + ![Screenshot (72)](https://user-images.githubusercontent.com/15766192/128979739-4cf63aeb-a76f-483f-8861-1c0100b938a5.png)
- +

4: Save model checkpoints as artifacts

- To enable saving and versioning checkpoints of your experiment, pass `--save_period n` with the base cammand, where `n` represents checkpoint interval. + To enable saving and versioning checkpoints of your experiment, pass `--save_period n` with the base cammand, where `n` represents checkpoint interval. You can also log both the dataset and model checkpoints simultaneously. If not passed, only the final model will be logged
Usage Code $ python train.py --save_period 1 - + ![Screenshot (68)](https://user-images.githubusercontent.com/15766192/128726138-ec6c1f60-639d-437d-b4ee-3acd9de47ef3.png)
- +

5: Resume runs from checkpoint artifacts.

@@ -105,28 +105,28 @@ Any run can be resumed using artifacts if the --resume argument sta
Usage Code $ python train.py --resume wandb-artifact://{run_path} - + ![Screenshot (70)](https://user-images.githubusercontent.com/15766192/128728988-4e84b355-6c87-41ae-a591-14aecf45343e.png)
- +

6: Resume runs from dataset artifact & checkpoint artifacts.

Local dataset or model checkpoints are not required. This can be used to resume runs directly on a different device - The syntax is same as the previous section, but you'll need to lof both the dataset and model checkpoints as artifacts, i.e, set bot --upload_dataset or + The syntax is same as the previous section, but you'll need to lof both the dataset and model checkpoints as artifacts, i.e, set bot --upload_dataset or train from _wandb.yaml file and set --save_period
Usage Code $ python train.py --resume wandb-artifact://{run_path} - + ![Screenshot (70)](https://user-images.githubusercontent.com/15766192/128728988-4e84b355-6c87-41ae-a591-14aecf45343e.png)
- +

Reports

W&B Reports can be created from your saved runs for sharing online. Once a report is created you will receive a link you can use to publically share your results. Here is an example report created from the COCO128 tutorial trainings of all four YOLOv5 models ([link](https://wandb.ai/glenn-jocher/yolov5_tutorial/reports/YOLOv5-COCO128-Tutorial-Results--VmlldzozMDI5OTY)). - + Weights & Biases Reports diff --git a/utils/loggers/wandb/sweep.yaml b/utils/loggers/wandb/sweep.yaml index c3727de82d4a..c7790d75f6b2 100644 --- a/utils/loggers/wandb/sweep.yaml +++ b/utils/loggers/wandb/sweep.yaml @@ -1,17 +1,17 @@ # Hyperparameters for training -# To set range- +# To set range- # Provide min and max values as: # parameter: -# +# # min: scalar # max: scalar # OR # # Set a specific list of search space- -# parameter: +# parameter: # values: [scalar1, scalar2, scalar3...] -# -# You can use grid, bayesian and hyperopt search strategy +# +# You can use grid, bayesian and hyperopt search strategy # For more info on configuring sweeps visit - https://docs.wandb.ai/guides/sweeps/configuration program: utils/loggers/wandb/sweep.py diff --git a/utils/loggers/wandb/wandb_utils.py b/utils/loggers/wandb/wandb_utils.py index 7fb76b05e987..8546ec6c63cb 100644 --- a/utils/loggers/wandb/wandb_utils.py +++ b/utils/loggers/wandb/wandb_utils.py @@ -5,6 +5,7 @@ import sys from contextlib import contextmanager from pathlib import Path +from typing import Dict import pkg_resources as pkg import yaml @@ -25,7 +26,7 @@ assert hasattr(wandb, '__version__') # verify package import not local dir except (ImportError, AssertionError): wandb = None - + RANK = int(os.getenv('RANK', -1)) WANDB_ARTIFACT_PREFIX = 'wandb-artifact://' @@ -127,7 +128,7 @@ def __init__(self, opt, run_id=None, job_type='Training'): arguments: opt (namespace) -- Commandline arguments for this run run_id (str) -- Run ID of W&B run to be resumed - job_type (str) -- To set the job_type for this run + job_type (str) -- To set the job_type for this run """ # Pre-training routine -- @@ -142,7 +143,8 @@ def __init__(self, opt, run_id=None, job_type='Training'): self.max_imgs_to_log = 16 self.wandb_artifact_data_dict = None self.data_dict = None - # It's more elegant to stick to 1 wandb.init call, but useful config data is overwritten in the WandbLogger's wandb.init call + # It's more elegant to stick to 1 wandb.init call, + # but useful config data is overwritten in the WandbLogger's wandb.init call if isinstance(opt.resume, str): # checks resume from artifact if opt.resume.startswith(WANDB_ARTIFACT_PREFIX): entity, project, run_id, model_artifact_name = get_run_info(opt.resume) @@ -212,7 +214,7 @@ def setup_training(self, opt): Setup the necessary processes for training YOLO models: - Attempt to download model checkpoint and dataset artifacts if opt.resume stats with WANDB_ARTIFACT_PREFIX - Update data_dict, to contain info of previous run if resumed and the paths of dataset artifact if downloaded - - Setup log_dict, initialize bbox_interval + - Setup log_dict, initialize bbox_interval arguments: opt (namespace) -- commandline arguments for this run @@ -301,7 +303,7 @@ def log_model(self, path, opt, epoch, fitness_score, best_model=False): path (Path) -- Path of directory containing the checkpoints opt (namespace) -- Command line arguments for this run epoch (int) -- Current epoch number - fitness_score (float) -- fitness score for current epoch + fitness_score (float) -- fitness score for current epoch best_model (boolean) -- Boolean representing if the current checkpoint is the best yet. """ model_artifact = wandb.Artifact('run_' + wandb.run.id + '_model', type='model', metadata={ @@ -325,7 +327,7 @@ def log_dataset_artifact(self, data_file, single_cls, project, overwrite_config= data_file (str) -- the .yaml file with information about the dataset like - path, classes etc. single_class (boolean) -- train multi-class data as single-class project (str) -- project name. Used to construct the artifact path - overwrite_config (boolean) -- overwrites the data.yaml file if set to true otherwise creates a new + overwrite_config (boolean) -- overwrites the data.yaml file if set to true otherwise creates a new file with _wandb postfix. Eg -> data_wandb.yaml returns: @@ -371,14 +373,14 @@ def map_val_table_path(self): for i, data in enumerate(tqdm(self.val_table.data)): self.val_table_path_map[data[3]] = data[0] - def create_dataset_table(self, dataset, class_to_id, name='dataset'): + def create_dataset_table(self, dataset: LoadImagesAndLabels, class_to_id: Dict[int,str], name: str = 'dataset'): """ Create and return W&B artifact containing W&B Table of the dataset. arguments: - dataset (LoadImagesAndLabels) -- instance of LoadImagesAndLabels class used to iterate over the data to build Table - class_to_id (dict(int, str)) -- hash map that maps class ids to labels - name (str) -- name of the artifact + dataset -- instance of LoadImagesAndLabels class used to iterate over the data to build Table + class_to_id -- hash map that maps class ids to labels + name -- name of the artifact returns: dataset artifact to be logged or used @@ -419,7 +421,7 @@ def log_training_progress(self, predn, path, names): arguments: predn (list): list of predictions in the native space in the format - [xmin, ymin, xmax, ymax, confidence, class] - path (str): local path of the current evaluation image + path (str): local path of the current evaluation image names (dict(int, str)): hash map that maps class ids to labels """ class_set = wandb.Classes([{'id': id, 'name': name} for id, name in names.items()]) @@ -430,7 +432,7 @@ def log_training_progress(self, predn, path, names): box_data.append( {"position": {"minX": xyxy[0], "minY": xyxy[1], "maxX": xyxy[2], "maxY": xyxy[3]}, "class_id": int(cls), - "box_caption": "%s %.3f" % (names[cls], conf), + "box_caption": f"{names[cls]} {conf:.3f}", "scores": {"class_score": conf}, "domain": "pixel"}) total_conf += conf @@ -450,7 +452,7 @@ def val_one_image(self, pred, predn, path, names, im): arguments: pred (list): list of scaled predictions in the format - [xmin, ymin, xmax, ymax, confidence, class] predn (list): list of predictions in the native space - [xmin, ymin, xmax, ymax, confidence, class] - path (str): local path of the current evaluation image + path (str): local path of the current evaluation image """ if self.val_table and self.result_table: # Log Table if Val dataset is uploaded as artifact self.log_training_progress(predn, path, names) @@ -459,7 +461,7 @@ def val_one_image(self, pred, predn, path, names, im): if self.current_epoch % self.bbox_interval == 0: box_data = [{"position": {"minX": xyxy[0], "minY": xyxy[1], "maxX": xyxy[2], "maxY": xyxy[3]}, "class_id": int(cls), - "box_caption": "%s %.3f" % (names[cls], conf), + "box_caption": f"{names[cls]} {conf:.3f}", "scores": {"class_score": conf}, "domain": "pixel"} for *xyxy, conf, cls in pred.tolist()] boxes = {"predictions": {"box_data": box_data, "class_labels": names}} # inference-space diff --git a/utils/loss.py b/utils/loss.py index fac432d0edc3..e8ce42ad994a 100644 --- a/utils/loss.py +++ b/utils/loss.py @@ -18,7 +18,7 @@ def smooth_BCE(eps=0.1): # https://github.com/ultralytics/yolov3/issues/238#iss class BCEBlurWithLogitsLoss(nn.Module): # BCEwithLogitLoss() with reduced missing label effects. def __init__(self, alpha=0.05): - super(BCEBlurWithLogitsLoss, self).__init__() + super().__init__() self.loss_fcn = nn.BCEWithLogitsLoss(reduction='none') # must be nn.BCEWithLogitsLoss() self.alpha = alpha @@ -35,7 +35,7 @@ def forward(self, pred, true): class FocalLoss(nn.Module): # Wraps focal loss around existing loss_fcn(), i.e. criteria = FocalLoss(nn.BCEWithLogitsLoss(), gamma=1.5) def __init__(self, loss_fcn, gamma=1.5, alpha=0.25): - super(FocalLoss, self).__init__() + super().__init__() self.loss_fcn = loss_fcn # must be nn.BCEWithLogitsLoss() self.gamma = gamma self.alpha = alpha @@ -65,7 +65,7 @@ def forward(self, pred, true): class QFocalLoss(nn.Module): # Wraps Quality focal loss around existing loss_fcn(), i.e. criteria = FocalLoss(nn.BCEWithLogitsLoss(), gamma=1.5) def __init__(self, loss_fcn, gamma=1.5, alpha=0.25): - super(QFocalLoss, self).__init__() + super().__init__() self.loss_fcn = loss_fcn # must be nn.BCEWithLogitsLoss() self.gamma = gamma self.alpha = alpha diff --git a/utils/plots.py b/utils/plots.py index 00b8f88811e2..00cda6d8d986 100644 --- a/utils/plots.py +++ b/utils/plots.py @@ -250,7 +250,7 @@ def plot_targets_txt(): # from utils.plots import *; plot_targets_txt() fig, ax = plt.subplots(2, 2, figsize=(8, 8), tight_layout=True) ax = ax.ravel() for i in range(4): - ax[i].hist(x[i], bins=100, label='%.3g +/- %.3g' % (x[i].mean(), x[i].std())) + ax[i].hist(x[i], bins=100, label=f'{x[i].mean():.3g} +/- {x[i].std():.3g}') ax[i].legend() ax[i].set_title(s[i]) plt.savefig('targets.jpg', dpi=200) @@ -363,7 +363,7 @@ def profile_idetection(start=0, stop=0, labels=(), save_dir=''): else: a.remove() except Exception as e: - print('Warning: Plotting error for %s; %s' % (f, e)) + print(f'Warning: Plotting error for {f}; {e}') ax[1].legend() plt.savefig(Path(save_dir) / 'idetection_profile.png', dpi=200) @@ -384,10 +384,10 @@ def plot_evolve(evolve_csv='path/to/evolve.csv'): # from utils.plots import *; plt.subplot(6, 5, i + 1) plt.scatter(v, f, c=hist2d(v, f, 20), cmap='viridis', alpha=.8, edgecolors='none') plt.plot(mu, f.max(), 'k+', markersize=15) - plt.title('%s = %.3g' % (k, mu), fontdict={'size': 9}) # limit to 40 characters + plt.title(f'{k} = {mu:.3g}', fontdict={'size': 9}) # limit to 40 characters if i % 5 != 0: plt.yticks([]) - print('%15s: %.3g' % (k, mu)) + print(f'{k:>15}: {mu:.3g}') f = evolve_csv.with_suffix('.png') # filename plt.savefig(f, dpi=200) plt.close() diff --git a/utils/torch_utils.py b/utils/torch_utils.py index 6f52f9a3728d..e6d8ebd743bf 100644 --- a/utils/torch_utils.py +++ b/utils/torch_utils.py @@ -123,7 +123,7 @@ def profile(input, ops, n=10, device=None): y = m(x) t[1] = time_sync() try: - _ = (sum([yi.sum() for yi in y]) if isinstance(y, list) else y).sum().backward() + _ = (sum(yi.sum() for yi in y) if isinstance(y, list) else y).sum().backward() t[2] = time_sync() except Exception as e: # no backward method # print(e) # for debug @@ -223,7 +223,7 @@ def model_info(model, verbose=False, img_size=640): n_p = sum(x.numel() for x in model.parameters()) # number parameters n_g = sum(x.numel() for x in model.parameters() if x.requires_grad) # number gradients if verbose: - print('%5s %40s %9s %12s %20s %10s %10s' % ('layer', 'name', 'gradient', 'parameters', 'shape', 'mu', 'sigma')) + print(f"{'layer':>5} {'name':>40} {'gradient':>9} {'parameters':>12} {'shape':>20} {'mu':>10} {'sigma':>10}") for i, (name, p) in enumerate(model.named_parameters()): name = name.replace('module_list.', '') print('%5g %40s %9s %12g %20s %10.3g %10.3g' % @@ -270,7 +270,7 @@ def scale_img(img, ratio=1.0, same_shape=False, gs=32): # img(16,3,256,416) s = (int(h * ratio), int(w * ratio)) # new size img = F.interpolate(img, size=s, mode='bilinear', align_corners=False) # resize if not same_shape: # pad/crop img - h, w = [math.ceil(x * ratio / gs) * gs for x in (h, w)] + h, w = (math.ceil(x * ratio / gs) * gs for x in (h, w)) return F.pad(img, [0, w - s[1], 0, h - s[0]], value=0.447) # value = imagenet mean From 620b535f850728d63d81793aa9e4577f7b844078 Mon Sep 17 00:00:00 2001 From: Ayush Chaurasia Date: Fri, 29 Oct 2021 22:51:59 +0530 Subject: [PATCH 0667/1976] Update sweep.py (#5402) --- utils/loggers/wandb/sweep.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/utils/loggers/wandb/sweep.py b/utils/loggers/wandb/sweep.py index fdabec4eb63b..6029f6b8039d 100644 --- a/utils/loggers/wandb/sweep.py +++ b/utils/loggers/wandb/sweep.py @@ -26,6 +26,11 @@ def sweep(): opt.epochs = hyp_dict.get("epochs") opt.nosave = True opt.data = hyp_dict.get("data") + opt.weights = str(opt.weights) + opt.cfg = str(opt.cfg) + opt.data = str(opt.data) + opt.hyp = str(opt.hyp) + opt.project = str(opt.project) device = select_device(opt.device, batch_size=opt.batch_size) # train From 7f9bbf0268317ace43f59174efbfecff60023c84 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 29 Oct 2021 23:16:04 +0200 Subject: [PATCH 0668/1976] Update GitHub issues templates (#5404) * Update GitHub issues templates * pre-commit fixes Co-authored-by: pre-commit --- .github/ISSUE_TEMPLATE/bug-report.md | 59 --------------- .github/ISSUE_TEMPLATE/bug-report.yml | 83 ++++++++++++++++++++++ .github/ISSUE_TEMPLATE/config.yml | 8 +++ .github/ISSUE_TEMPLATE/feature-request.md | 29 -------- .github/ISSUE_TEMPLATE/feature-request.yml | 50 +++++++++++++ .github/ISSUE_TEMPLATE/question.md | 12 ---- .github/ISSUE_TEMPLATE/question.yml | 33 +++++++++ 7 files changed, 174 insertions(+), 100 deletions(-) delete mode 100644 .github/ISSUE_TEMPLATE/bug-report.md create mode 100644 .github/ISSUE_TEMPLATE/bug-report.yml create mode 100644 .github/ISSUE_TEMPLATE/config.yml delete mode 100644 .github/ISSUE_TEMPLATE/feature-request.md create mode 100644 .github/ISSUE_TEMPLATE/feature-request.yml delete mode 100644 .github/ISSUE_TEMPLATE/question.md create mode 100644 .github/ISSUE_TEMPLATE/question.yml diff --git a/.github/ISSUE_TEMPLATE/bug-report.md b/.github/ISSUE_TEMPLATE/bug-report.md deleted file mode 100644 index 62a02a3a6948..000000000000 --- a/.github/ISSUE_TEMPLATE/bug-report.md +++ /dev/null @@ -1,59 +0,0 @@ ---- -name: "🐛 Bug report" -about: Create a report to help us improve -title: '' -labels: bug -assignees: '' - ---- - -Before submitting a bug report, please be aware that your issue **must be reproducible** with all of the following, -otherwise it is non-actionable, and we can not help you: - -- **Current repo**: run `git fetch && git status -uno` to check and `git pull` to update repo -- **Common dataset**: coco.yaml or coco128.yaml -- **Common environment**: Colab, Google Cloud, or Docker image. See https://github.com/ultralytics/yolov5#environments - -If this is a custom dataset/training question you **must include** your `train*.jpg`, `val*.jpg` and `results.png` -figures, or we can not help you. You can generate these with `utils.plot_results()`. - -## 🐛 Bug - -A clear and concise description of what the bug is. - -## To Reproduce (REQUIRED) - -Input: - -``` -import torch - -a = torch.tensor([5]) -c = a / 0 -``` - -Output: - -``` -Traceback (most recent call last): - File "/Users/glennjocher/opt/anaconda3/envs/env1/lib/python3.7/site-packages/IPython/core/interactiveshell.py", line 3331, in run_code - exec(code_obj, self.user_global_ns, self.user_ns) - File "", line 5, in - c = a / 0 -RuntimeError: ZeroDivisionError -``` - -## Expected behavior - -A clear and concise description of what you expected to happen. - -## Environment - -If applicable, add screenshots to help explain your problem. - -- OS: [e.g. Ubuntu] -- GPU [e.g. 2080 Ti] - -## Additional context - -Add any other context about the problem here. diff --git a/.github/ISSUE_TEMPLATE/bug-report.yml b/.github/ISSUE_TEMPLATE/bug-report.yml new file mode 100644 index 000000000000..a20f15c20c93 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/bug-report.yml @@ -0,0 +1,83 @@ +name: 🐛 Bug Report +# title: " " +description: Problems with YOLOv5 +labels: [bug, triage] +body: + - type: markdown + attributes: + value: | + Thank you for submitting a YOLOv5 🐛 Bug Report! + + - type: checkboxes + attributes: + label: Search before asking + description: > + Please search the [issues](https://github.com/ultralytics/yolov5/issues) to see if a similar bug report already exists. + options: + - label: > + I have searched the YOLOv5 [issues](https://github.com/ultralytics/yolov5/issues) and found no similar bug report. + required: true + + - type: dropdown + attributes: + label: YOLOv5 Component + description: | + Please select the part of YOLOv5 where you found the bug. + multiple: true + options: + - "Training" + - "Validation" + - "Detection" + - "Export" + - "PyTorch Hub" + - "Multi-GPU" + - "Evolution" + - "Integrations" + - "Other" + validations: + required: false + + - type: textarea + attributes: + label: Bug + description: Provide console output with error messages and/or screenshots of the bug. + placeholder: > + TIP: Include as much information as possible (screenshots, logs, tracebacks etc.) to receive the most helpful response. + validations: + required: true + + - type: textarea + attributes: + label: Environment + description: Please specify the software and hardware you used to produce the bug. + placeholder: | + - YOLO: YOLOv5 🚀 v6.0-37-g620b535 torch 1.9.0+cu111 CUDA:0 (Tesla V100-SXM2-16GB, 16160.5MB) + - OS: Ubuntu 20.04 + - Python: 3.9.0 + validations: + required: false + + - type: textarea + attributes: + label: Minimal Reproducible Example + description: > + When asking a question, people will be better able to provide help if you provide code that they can easily understand and use to **reproduce** the problem. + This is referred to by community members as creating a [minimal reproducible example](https://stackoverflow.com/help/minimal-reproducible-example). + placeholder: | + # code to reproduce your issue here + validations: + required: false + + - type: textarea + attributes: + label: Additional + description: Anything else you would like to share? + + - type: checkboxes + attributes: + label: Are you willing to submit a PR? + description: > + (Optional) We encourage you to submit a [Pull Request](https://github.com/ultralytics/yolov5/pulls) (PR) to help improve YOLOv5 for everyone, especially if you have a good understanding of how to implement a fix or feature. + See the YOLOv5 [Contributing Guide](https://github.com/ultralytics/yolov5/blob/master/CONTRIBUTING.md) to get started. + options: + - label: Yes I'd like to help by submitting a PR! diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml new file mode 100644 index 000000000000..f388d7bacf66 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/config.yml @@ -0,0 +1,8 @@ +blank_issues_enabled: true +contact_links: + - name: Slack + url: https://join.slack.com/t/ultralytics/shared_invite/zt-w29ei8bp-jczz7QYUmDtgo6r6KcMIAg + about: Ask on Ultralytics Slack Forum + - name: Stack Overflow + url: https://stackoverflow.com/search?q=YOLOv5 + about: Ask on Stack Overflow with 'YOLOv5' tag diff --git a/.github/ISSUE_TEMPLATE/feature-request.md b/.github/ISSUE_TEMPLATE/feature-request.md deleted file mode 100644 index 994f506e0f09..000000000000 --- a/.github/ISSUE_TEMPLATE/feature-request.md +++ /dev/null @@ -1,29 +0,0 @@ ---- -name: "🚀 Feature request" -about: Suggest an idea for this project -title: '' -labels: enhancement -assignees: '' - ---- - -## 🚀 Feature - - - -## Motivation - - - -## Pitch - - - -## Alternatives - - - -## Additional context - - diff --git a/.github/ISSUE_TEMPLATE/feature-request.yml b/.github/ISSUE_TEMPLATE/feature-request.yml new file mode 100644 index 000000000000..68ef985186ef --- /dev/null +++ b/.github/ISSUE_TEMPLATE/feature-request.yml @@ -0,0 +1,50 @@ +name: 🚀 Feature Request +description: Suggest a YOLOv5 idea +# title: " " +labels: [enhancement] +body: + - type: markdown + attributes: + value: | + Thank you for submitting a YOLOv5 🚀 Feature Request! + + - type: checkboxes + attributes: + label: Search before asking + description: > + Please search the [issues](https://github.com/ultralytics/yolov5/issues) to see if a similar feature request already exists. + options: + - label: > + I have searched the YOLOv5 [issues](https://github.com/ultralytics/yolov5/issues) and found no similar feature requests. + required: true + + - type: textarea + attributes: + label: Description + description: A short description of your feature. + placeholder: | + What new feature would you like to see in YOLOv5? + validations: + required: true + + - type: textarea + attributes: + label: Use case + description: | + Describe the use case of your feature request. It will help us understand and prioritize the feature request. + placeholder: | + How would this feature be used, and who would use it? + + - type: textarea + attributes: + label: Additional + description: Anything else you would like to share? + + - type: checkboxes + attributes: + label: Are you willing to submit a PR? + description: > + (Optional) We encourage you to submit a [Pull Request](https://github.com/ultralytics/yolov5/pulls) (PR) to help improve YOLOv5 for everyone, especially if you have a good understanding of how to implement a fix or feature. + See the YOLOv5 [Contributing Guide](https://github.com/ultralytics/yolov5/blob/master/CONTRIBUTING.md) to get started. + options: + - label: Yes I'd like to help by submitting a PR! diff --git a/.github/ISSUE_TEMPLATE/question.md b/.github/ISSUE_TEMPLATE/question.md deleted file mode 100644 index 2892cfe262fb..000000000000 --- a/.github/ISSUE_TEMPLATE/question.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -name: "❓Question" -about: Ask a general question -title: '' -labels: question -assignees: '' - ---- - -## ❔Question - -## Additional context diff --git a/.github/ISSUE_TEMPLATE/question.yml b/.github/ISSUE_TEMPLATE/question.yml new file mode 100644 index 000000000000..9ae5dd57c608 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/question.yml @@ -0,0 +1,33 @@ +name: ❓ Question +description: Ask a YOLOv5 question +# title: " " +labels: [question] +body: + - type: markdown + attributes: + value: | + Thank you for asking a YOLOv5 ❓ Question! + + - type: checkboxes + attributes: + label: Search before asking + description: > + Please search the [issues](https://github.com/ultralytics/yolov5/issues) and [discussions](https://github.com/ultralytics/yolov5/discussions) to see if a similar question already exists. + options: + - label: > + I have searched the YOLOv5 [issues](https://github.com/ultralytics/yolov5/issues) and [discussions](https://github.com/ultralytics/yolov5/discussions) and found no similar questions. + required: true + + - type: textarea + attributes: + label: Question + description: What is your question? + placeholder: > + TIP: Include as much information as possible (screenshots, links, reference etc.) to receive the most helpful response. + validations: + required: true + + - type: textarea + attributes: + label: Additional + description: Anything else you would like to share? From 5d4258fac5e6ceaa9c897f841cb737c56717a996 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 30 Oct 2021 13:38:51 +0200 Subject: [PATCH 0669/1976] Fix MixConv2d() remove shortcut + apply depthwise (#5410) --- models/common.py | 2 +- models/experimental.py | 21 +++++++++++---------- utils/torch_utils.py | 2 +- 3 files changed, 13 insertions(+), 12 deletions(-) diff --git a/models/common.py b/models/common.py index d0fb0e8596ed..8b70a6fea595 100644 --- a/models/common.py +++ b/models/common.py @@ -113,7 +113,7 @@ def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, nu self.cv3 = nn.Conv2d(c_, c_, 1, 1, bias=False) self.cv4 = Conv(2 * c_, c2, 1, 1) self.bn = nn.BatchNorm2d(2 * c_) # applied to cat(cv2, cv3) - self.act = nn.LeakyReLU(0.1, inplace=True) + self.act = nn.SiLU() self.m = nn.Sequential(*(Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n))) def forward(self, x): diff --git a/models/experimental.py b/models/experimental.py index adb86c81fc06..2e92ccb36faf 100644 --- a/models/experimental.py +++ b/models/experimental.py @@ -2,7 +2,7 @@ """ Experimental modules """ - +import math import numpy as np import torch import torch.nn as nn @@ -48,26 +48,27 @@ def forward(self, x): class MixConv2d(nn.Module): # Mixed Depth-wise Conv https://arxiv.org/abs/1907.09595 - def __init__(self, c1, c2, k=(1, 3), s=1, equal_ch=True): + def __init__(self, c1, c2, k=(1, 3), s=1, equal_ch=True): # ch_in, ch_out, kernel, stride, ch_strategy super().__init__() - groups = len(k) + n = len(k) # number of convolutions if equal_ch: # equal c_ per group - i = torch.linspace(0, groups - 1E-6, c2).floor() # c2 indices - c_ = [(i == g).sum() for g in range(groups)] # intermediate channels + i = torch.linspace(0, n - 1E-6, c2).floor() # c2 indices + c_ = [(i == g).sum() for g in range(n)] # intermediate channels else: # equal weight.numel() per group - b = [c2] + [0] * groups - a = np.eye(groups + 1, groups, k=-1) + b = [c2] + [0] * n + a = np.eye(n + 1, n, k=-1) a -= np.roll(a, 1, axis=1) a *= np.array(k) ** 2 a[0] = 1 c_ = np.linalg.lstsq(a, b, rcond=None)[0].round() # solve for equal weight indices, ax = b - self.m = nn.ModuleList([nn.Conv2d(c1, int(c_[g]), k[g], s, k[g] // 2, bias=False) for g in range(groups)]) + self.m = nn.ModuleList( + [nn.Conv2d(c1, int(c_), k, s, k // 2, groups=math.gcd(c1, int(c_)), bias=False) for k, c_ in zip(k, c_)]) self.bn = nn.BatchNorm2d(c2) - self.act = nn.LeakyReLU(0.1, inplace=True) + self.act = nn.SiLU() def forward(self, x): - return x + self.act(self.bn(torch.cat([m(x) for m in self.m], 1))) + return self.act(self.bn(torch.cat([m(x) for m in self.m], 1))) class Ensemble(nn.ModuleList): diff --git a/utils/torch_utils.py b/utils/torch_utils.py index e6d8ebd743bf..fc214147da72 100644 --- a/utils/torch_utils.py +++ b/utils/torch_utils.py @@ -166,7 +166,7 @@ def initialize_weights(model): elif t is nn.BatchNorm2d: m.eps = 1e-3 m.momentum = 0.03 - elif t in [nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6]: + elif t in [nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6, nn.SiLU]: m.inplace = True From 8c326a1edfa5565c6ee81b6a1c669f7849875717 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 1 Nov 2021 14:33:08 +0100 Subject: [PATCH 0670/1976] Meshgrid `indexing='ij'` for PyTorch 1.10 (#5309) * Meshgrid `indexing='ij'` for PyTorch 1.10 Will not merge currently as breaks backwards compatibility. * Meshgrid `indexing='ij'` for PyTorch 1.10 Will not merge currently as breaks backwards compatibility. * Add check_version hard argument * Update comment --- models/yolo.py | 7 +++++-- utils/augmentations.py | 2 +- utils/general.py | 11 +++++++---- 3 files changed, 13 insertions(+), 7 deletions(-) diff --git a/models/yolo.py b/models/yolo.py index 0fa2db91e82b..80ff83e16085 100644 --- a/models/yolo.py +++ b/models/yolo.py @@ -20,7 +20,7 @@ from models.common import * from models.experimental import * from utils.autoanchor import check_anchor_order -from utils.general import check_yaml, make_divisible, print_args, set_logging +from utils.general import check_yaml, make_divisible, print_args, set_logging, check_version from utils.plots import feature_visualization from utils.torch_utils import copy_attr, fuse_conv_and_bn, initialize_weights, model_info, scale_img, \ select_device, time_sync @@ -74,7 +74,10 @@ def forward(self, x): def _make_grid(self, nx=20, ny=20, i=0): d = self.anchors[i].device - yv, xv = torch.meshgrid([torch.arange(ny).to(d), torch.arange(nx).to(d)]) + if check_version(torch.__version__, '1.10.0'): # torch>=1.10.0 meshgrid workaround for torch>=0.7 compatibility + yv, xv = torch.meshgrid([torch.arange(ny).to(d), torch.arange(nx).to(d)], indexing='ij') + else: + yv, xv = torch.meshgrid([torch.arange(ny).to(d), torch.arange(nx).to(d)]) grid = torch.stack((xv, yv), 2).expand((1, self.na, ny, nx, 2)).float() anchor_grid = (self.anchors[i].clone() * self.stride[i]) \ .view((1, self.na, 1, 1, 2)).expand((1, self.na, ny, nx, 2)).float() diff --git a/utils/augmentations.py b/utils/augmentations.py index 04192d1ec5cd..b3cbbf913b65 100644 --- a/utils/augmentations.py +++ b/utils/augmentations.py @@ -20,7 +20,7 @@ def __init__(self): self.transform = None try: import albumentations as A - check_version(A.__version__, '1.0.3') # version requirement + check_version(A.__version__, '1.0.3', hard=True) # version requirement self.transform = A.Compose([ A.Blur(p=0.01), diff --git a/utils/general.py b/utils/general.py index f22908907fd0..667af63e4044 100755 --- a/utils/general.py +++ b/utils/general.py @@ -220,14 +220,17 @@ def check_git_status(): def check_python(minimum='3.6.2'): # Check current python version vs. required python version - check_version(platform.python_version(), minimum, name='Python ') + check_version(platform.python_version(), minimum, name='Python ', hard=True) -def check_version(current='0.0.0', minimum='0.0.0', name='version ', pinned=False): +def check_version(current='0.0.0', minimum='0.0.0', name='version ', pinned=False, hard=False): # Check version vs. required version current, minimum = (pkg.parse_version(x) for x in (current, minimum)) - result = (current == minimum) if pinned else (current >= minimum) - assert result, f'{name}{minimum} required by YOLOv5, but {name}{current} is currently installed' + result = (current == minimum) if pinned else (current >= minimum) # bool + if hard: # assert min requirements met + assert result, f'{name}{minimum} required by YOLOv5, but {name}{current} is currently installed' + else: + return result @try_except From 7b1f7aec4632d7aa0f04442ef21df0b31ec6390a Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 1 Nov 2021 18:22:13 +0100 Subject: [PATCH 0671/1976] Update `get_loggers()` (#4854) * Update `set_logging()` * Update export.py * pre-commit fixes * Update LoadImages * Update LoadStreams * Update print_args * Single LOGGER definition * yolo.py fix Co-authored-by: pre-commit --- detect.py | 17 ++++++------- export.py | 63 +++++++++++++++++++++++------------------------ models/tf.py | 7 ++---- models/yolo.py | 5 +--- train.py | 12 ++++----- utils/datasets.py | 30 +++++++++++----------- utils/general.py | 18 ++++++++------ val.py | 23 +++++++++-------- 8 files changed, 84 insertions(+), 91 deletions(-) diff --git a/detect.py b/detect.py index 70c52dc5214b..c57edba67c6c 100644 --- a/detect.py +++ b/detect.py @@ -25,8 +25,7 @@ from models.experimental import attempt_load from utils.datasets import LoadImages, LoadStreams from utils.general import apply_classifier, check_img_size, check_imshow, check_requirements, check_suffix, colorstr, \ - increment_path, non_max_suppression, print_args, save_one_box, scale_coords, set_logging, \ - strip_optimizer, xyxy2xywh + increment_path, non_max_suppression, print_args, save_one_box, scale_coords, strip_optimizer, xyxy2xywh, LOGGER from utils.plots import Annotator, colors from utils.torch_utils import load_classifier, select_device, time_sync @@ -68,7 +67,6 @@ def run(weights=ROOT / 'yolov5s.pt', # model.pt path(s) (save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir # Initialize - set_logging() device = select_device(device) half &= device.type != 'cpu' # half precision only supported on CUDA @@ -132,7 +130,7 @@ def wrap_frozen_graph(gd, inputs, outputs): if pt and device.type != 'cpu': model(torch.zeros(1, 3, *imgsz).to(device).type_as(next(model.parameters()))) # run once dt, seen = [0.0, 0.0, 0.0], 0 - for path, img, im0s, vid_cap in dataset: + for path, img, im0s, vid_cap, s in dataset: t1 = time_sync() if onnx: img = img.astype('float32') @@ -191,9 +189,10 @@ def wrap_frozen_graph(gd, inputs, outputs): for i, det in enumerate(pred): # per image seen += 1 if webcam: # batch_size >= 1 - p, s, im0, frame = path[i], f'{i}: ', im0s[i].copy(), dataset.count + p, im0, frame = path[i], im0s[i].copy(), dataset.count + s += f'{i}: ' else: - p, s, im0, frame = path, '', im0s.copy(), getattr(dataset, 'frame', 0) + p, im0, frame = path, im0s.copy(), getattr(dataset, 'frame', 0) p = Path(p) # to Path save_path = str(save_dir / p.name) # img.jpg @@ -227,7 +226,7 @@ def wrap_frozen_graph(gd, inputs, outputs): save_one_box(xyxy, imc, file=save_dir / 'crops' / names[c] / f'{p.stem}.jpg', BGR=True) # Print time (inference-only) - print(f'{s}Done. ({t3 - t2:.3f}s)') + LOGGER.info(f'{s}Done. ({t3 - t2:.3f}s)') # Stream results im0 = annotator.result() @@ -256,10 +255,10 @@ def wrap_frozen_graph(gd, inputs, outputs): # Print results t = tuple(x / seen * 1E3 for x in dt) # speeds per image - print(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {(1, 3, *imgsz)}' % t) + LOGGER.info(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {(1, 3, *imgsz)}' % t) if save_txt or save_img: s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else '' - print(f"Results saved to {colorstr('bold', save_dir)}{s}") + LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}{s}") if update: strip_optimizer(weights) # update model (to fix SourceChangeWarning) diff --git a/export.py b/export.py index 2aca0f341dbd..47dbcab50144 100644 --- a/export.py +++ b/export.py @@ -42,23 +42,23 @@ from models.yolo import Detect from utils.activations import SiLU from utils.datasets import LoadImages -from utils.general import colorstr, check_dataset, check_img_size, check_requirements, file_size, print_args, \ - set_logging, url2file +from utils.general import check_dataset, check_img_size, check_requirements, colorstr, file_size, print_args, \ + url2file, LOGGER from utils.torch_utils import select_device def export_torchscript(model, im, file, optimize, prefix=colorstr('TorchScript:')): # YOLOv5 TorchScript model export try: - print(f'\n{prefix} starting export with torch {torch.__version__}...') + LOGGER.info(f'\n{prefix} starting export with torch {torch.__version__}...') f = file.with_suffix('.torchscript.pt') ts = torch.jit.trace(model, im, strict=False) (optimize_for_mobile(ts) if optimize else ts).save(f) - print(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') + LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') except Exception as e: - print(f'{prefix} export failure: {e}') + LOGGER.info(f'{prefix} export failure: {e}') def export_onnx(model, im, file, opset, train, dynamic, simplify, prefix=colorstr('ONNX:')): @@ -67,7 +67,7 @@ def export_onnx(model, im, file, opset, train, dynamic, simplify, prefix=colorst check_requirements(('onnx',)) import onnx - print(f'\n{prefix} starting export with onnx {onnx.__version__}...') + LOGGER.info(f'\n{prefix} starting export with onnx {onnx.__version__}...') f = file.with_suffix('.onnx') torch.onnx.export(model, im, f, verbose=False, opset_version=opset, @@ -82,7 +82,7 @@ def export_onnx(model, im, file, opset, train, dynamic, simplify, prefix=colorst # Checks model_onnx = onnx.load(f) # load onnx model onnx.checker.check_model(model_onnx) # check onnx model - # print(onnx.helper.printable_graph(model_onnx.graph)) # print + # LOGGER.info(onnx.helper.printable_graph(model_onnx.graph)) # print # Simplify if simplify: @@ -90,7 +90,7 @@ def export_onnx(model, im, file, opset, train, dynamic, simplify, prefix=colorst check_requirements(('onnx-simplifier',)) import onnxsim - print(f'{prefix} simplifying with onnx-simplifier {onnxsim.__version__}...') + LOGGER.info(f'{prefix} simplifying with onnx-simplifier {onnxsim.__version__}...') model_onnx, check = onnxsim.simplify( model_onnx, dynamic_input_shape=dynamic, @@ -98,11 +98,11 @@ def export_onnx(model, im, file, opset, train, dynamic, simplify, prefix=colorst assert check, 'assert check failed' onnx.save(model_onnx, f) except Exception as e: - print(f'{prefix} simplifier failure: {e}') - print(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') - print(f"{prefix} run --dynamic ONNX model inference with: 'python detect.py --weights {f}'") + LOGGER.info(f'{prefix} simplifier failure: {e}') + LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') + LOGGER.info(f"{prefix} run --dynamic ONNX model inference with: 'python detect.py --weights {f}'") except Exception as e: - print(f'{prefix} export failure: {e}') + LOGGER.info(f'{prefix} export failure: {e}') def export_coreml(model, im, file, prefix=colorstr('CoreML:')): @@ -112,7 +112,7 @@ def export_coreml(model, im, file, prefix=colorstr('CoreML:')): check_requirements(('coremltools',)) import coremltools as ct - print(f'\n{prefix} starting export with coremltools {ct.__version__}...') + LOGGER.info(f'\n{prefix} starting export with coremltools {ct.__version__}...') f = file.with_suffix('.mlmodel') model.train() # CoreML exports should be placed in model.train() mode @@ -120,9 +120,9 @@ def export_coreml(model, im, file, prefix=colorstr('CoreML:')): ct_model = ct.convert(ts, inputs=[ct.ImageType('image', shape=im.shape, scale=1 / 255.0, bias=[0, 0, 0])]) ct_model.save(f) - print(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') + LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') except Exception as e: - print(f'\n{prefix} export failure: {e}') + LOGGER.info(f'\n{prefix} export failure: {e}') return ct_model @@ -137,7 +137,7 @@ def export_saved_model(model, im, file, dynamic, from tensorflow import keras from models.tf import TFModel, TFDetect - print(f'\n{prefix} starting export with tensorflow {tf.__version__}...') + LOGGER.info(f'\n{prefix} starting export with tensorflow {tf.__version__}...') f = str(file).replace('.pt', '_saved_model') batch_size, ch, *imgsz = list(im.shape) # BCHW @@ -151,9 +151,9 @@ def export_saved_model(model, im, file, dynamic, keras_model.summary() keras_model.save(f, save_format='tf') - print(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') + LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') except Exception as e: - print(f'\n{prefix} export failure: {e}') + LOGGER.info(f'\n{prefix} export failure: {e}') return keras_model @@ -164,7 +164,7 @@ def export_pb(keras_model, im, file, prefix=colorstr('TensorFlow GraphDef:')): import tensorflow as tf from tensorflow.python.framework.convert_to_constants import convert_variables_to_constants_v2 - print(f'\n{prefix} starting export with tensorflow {tf.__version__}...') + LOGGER.info(f'\n{prefix} starting export with tensorflow {tf.__version__}...') f = file.with_suffix('.pb') m = tf.function(lambda x: keras_model(x)) # full model @@ -173,9 +173,9 @@ def export_pb(keras_model, im, file, prefix=colorstr('TensorFlow GraphDef:')): frozen_func.graph.as_graph_def() tf.io.write_graph(graph_or_graph_def=frozen_func.graph, logdir=str(f.parent), name=f.name, as_text=False) - print(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') + LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') except Exception as e: - print(f'\n{prefix} export failure: {e}') + LOGGER.info(f'\n{prefix} export failure: {e}') def export_tflite(keras_model, im, file, int8, data, ncalib, prefix=colorstr('TensorFlow Lite:')): @@ -184,7 +184,7 @@ def export_tflite(keras_model, im, file, int8, data, ncalib, prefix=colorstr('Te import tensorflow as tf from models.tf import representative_dataset_gen - print(f'\n{prefix} starting export with tensorflow {tf.__version__}...') + LOGGER.info(f'\n{prefix} starting export with tensorflow {tf.__version__}...') batch_size, ch, *imgsz = list(im.shape) # BCHW f = str(file).replace('.pt', '-fp16.tflite') @@ -204,10 +204,10 @@ def export_tflite(keras_model, im, file, int8, data, ncalib, prefix=colorstr('Te tflite_model = converter.convert() open(f, "wb").write(tflite_model) - print(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') + LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') except Exception as e: - print(f'\n{prefix} export failure: {e}') + LOGGER.info(f'\n{prefix} export failure: {e}') def export_tfjs(keras_model, im, file, prefix=colorstr('TensorFlow.js:')): @@ -217,7 +217,7 @@ def export_tfjs(keras_model, im, file, prefix=colorstr('TensorFlow.js:')): import re import tensorflowjs as tfjs - print(f'\n{prefix} starting export with tensorflowjs {tfjs.__version__}...') + LOGGER.info(f'\n{prefix} starting export with tensorflowjs {tfjs.__version__}...') f = str(file).replace('.pt', '_web_model') # js dir f_pb = file.with_suffix('.pb') # *.pb path f_json = f + '/model.json' # *.json path @@ -240,9 +240,9 @@ def export_tfjs(keras_model, im, file, prefix=colorstr('TensorFlow.js:')): json) j.write(subst) - print(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') + LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') except Exception as e: - print(f'\n{prefix} export failure: {e}') + LOGGER.info(f'\n{prefix} export failure: {e}') @torch.no_grad() @@ -297,7 +297,7 @@ def run(data=ROOT / 'data/coco128.yaml', # 'dataset.yaml path' for _ in range(2): y = model(im) # dry runs - print(f"\n{colorstr('PyTorch:')} starting from {file} ({file_size(file):.1f} MB)") + LOGGER.info(f"\n{colorstr('PyTorch:')} starting from {file} ({file_size(file):.1f} MB)") # Exports if 'torchscript' in include: @@ -322,9 +322,9 @@ def run(data=ROOT / 'data/coco128.yaml', # 'dataset.yaml path' export_tfjs(model, im, file) # Finish - print(f'\nExport complete ({time.time() - t:.2f}s)' - f"\nResults saved to {colorstr('bold', file.parent.resolve())}" - f'\nVisualize with https://netron.app') + LOGGER.info(f'\nExport complete ({time.time() - t:.2f}s)' + f"\nResults saved to {colorstr('bold', file.parent.resolve())}" + f'\nVisualize with https://netron.app') def parse_opt(): @@ -355,7 +355,6 @@ def parse_opt(): def main(opt): - set_logging() run(**vars(opt)) diff --git a/models/tf.py b/models/tf.py index 5599ff5cce91..531c8cc5a29f 100644 --- a/models/tf.py +++ b/models/tf.py @@ -31,11 +31,9 @@ from models.common import Bottleneck, BottleneckCSP, Concat, Conv, C3, DWConv, Focus, SPP, SPPF, autopad from models.experimental import CrossConv, MixConv2d, attempt_load from models.yolo import Detect -from utils.general import make_divisible, print_args, set_logging +from utils.general import make_divisible, print_args, LOGGER from utils.activations import SiLU -LOGGER = logging.getLogger(__name__) - class TFBN(keras.layers.Layer): # TensorFlow BatchNormalization wrapper @@ -336,7 +334,7 @@ def __init__(self, cfg='yolov5s.yaml', ch=3, nc=None, model=None, imgsz=(640, 64 # Define model if nc and nc != self.yaml['nc']: - print(f"Overriding {cfg} nc={self.yaml['nc']} with nc={nc}") + LOGGER.info(f"Overriding {cfg} nc={self.yaml['nc']} with nc={nc}") self.yaml['nc'] = nc # override yaml value self.model, self.savelist = parse_model(deepcopy(self.yaml), ch=[ch], model=model, imgsz=imgsz) @@ -457,7 +455,6 @@ def parse_opt(): def main(opt): - set_logging() run(**vars(opt)) diff --git a/models/yolo.py b/models/yolo.py index 80ff83e16085..38a17d9e7ba4 100644 --- a/models/yolo.py +++ b/models/yolo.py @@ -20,7 +20,7 @@ from models.common import * from models.experimental import * from utils.autoanchor import check_anchor_order -from utils.general import check_yaml, make_divisible, print_args, set_logging, check_version +from utils.general import check_version, check_yaml, make_divisible, print_args, LOGGER from utils.plots import feature_visualization from utils.torch_utils import copy_attr, fuse_conv_and_bn, initialize_weights, model_info, scale_img, \ select_device, time_sync @@ -30,8 +30,6 @@ except ImportError: thop = None -LOGGER = logging.getLogger(__name__) - class Detect(nn.Module): stride = None # strides computed during build @@ -311,7 +309,6 @@ def parse_model(d, ch): # model_dict, input_channels(3) opt = parser.parse_args() opt.cfg = check_yaml(opt.cfg) # check YAML print_args(FILE.stem, opt) - set_logging() device = select_device(opt.device) # Create model diff --git a/train.py b/train.py index 292f2da965f0..4886034d811f 100644 --- a/train.py +++ b/train.py @@ -40,7 +40,7 @@ from utils.datasets import create_dataloader from utils.general import labels_to_class_weights, increment_path, labels_to_image_weights, init_seeds, \ strip_optimizer, get_latest_run, check_dataset, check_git_status, check_img_size, check_requirements, \ - check_file, check_yaml, check_suffix, print_args, print_mutation, set_logging, one_cycle, colorstr, methods + check_file, check_yaml, check_suffix, print_args, print_mutation, one_cycle, colorstr, methods, LOGGER from utils.downloads import attempt_download from utils.loss import ComputeLoss from utils.plots import plot_labels, plot_evolve @@ -51,7 +51,6 @@ from utils.loggers import Loggers from utils.callbacks import Callbacks -LOGGER = logging.getLogger(__name__) LOCAL_RANK = int(os.getenv('LOCAL_RANK', -1)) # https://pytorch.org/docs/stable/elastic/run.html RANK = int(os.getenv('RANK', -1)) WORLD_SIZE = int(os.getenv('WORLD_SIZE', 1)) @@ -129,7 +128,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary for k, v in model.named_parameters(): v.requires_grad = True # train all layers if any(x in k for x in freeze): - print(f'freezing {k}') + LOGGER.info(f'freezing {k}') v.requires_grad = False # Image size @@ -485,7 +484,6 @@ def parse_opt(known=False): def main(opt, callbacks=Callbacks()): # Checks - set_logging(RANK) if RANK in [-1, 0]: print_args(FILE.stem, opt) check_git_status() @@ -609,9 +607,9 @@ def main(opt, callbacks=Callbacks()): # Plot results plot_evolve(evolve_csv) - print(f'Hyperparameter evolution finished\n' - f"Results saved to {colorstr('bold', save_dir)}\n" - f'Use best hyperparameters example: $ python train.py --hyp {evolve_yaml}') + LOGGER.info(f'Hyperparameter evolution finished\n' + f"Results saved to {colorstr('bold', save_dir)}\n" + f'Use best hyperparameters example: $ python train.py --hyp {evolve_yaml}') def run(**kwargs): diff --git a/utils/datasets.py b/utils/datasets.py index fce005bd597c..7fce122942f7 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -28,7 +28,7 @@ from utils.augmentations import Albumentations, augment_hsv, copy_paste, letterbox, mixup, random_perspective from utils.general import check_dataset, check_requirements, check_yaml, clean_str, segments2boxes, \ - xywh2xyxy, xywhn2xyxy, xyxy2xywhn, xyn2xy + xywh2xyxy, xywhn2xyxy, xyxy2xywhn, xyn2xy, LOGGER from utils.torch_utils import torch_distributed_zero_first # Parameters @@ -210,14 +210,14 @@ def __next__(self): ret_val, img0 = self.cap.read() self.frame += 1 - print(f'video {self.count + 1}/{self.nf} ({self.frame}/{self.frames}) {path}: ', end='') + s = f'video {self.count + 1}/{self.nf} ({self.frame}/{self.frames}) {path}: ' else: # Read image self.count += 1 img0 = cv2.imread(path) # BGR - assert img0 is not None, 'Image Not Found ' + path - print(f'image {self.count}/{self.nf} {path}: ', end='') + assert img0 is not None, f'Image Not Found {path}' + s = f'image {self.count}/{self.nf} {path}: ' # Padded resize img = letterbox(img0, self.img_size, stride=self.stride, auto=self.auto)[0] @@ -226,7 +226,7 @@ def __next__(self): img = img.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB img = np.ascontiguousarray(img) - return path, img, img0, self.cap + return path, img, img0, self.cap, s def new_video(self, path): self.frame = 0 @@ -264,7 +264,7 @@ def __next__(self): # Print assert ret_val, f'Camera Error {self.pipe}' img_path = 'webcam.jpg' - print(f'webcam {self.count}: ', end='') + s = f'webcam {self.count}: ' # Padded resize img = letterbox(img0, self.img_size, stride=self.stride)[0] @@ -273,7 +273,7 @@ def __next__(self): img = img.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB img = np.ascontiguousarray(img) - return img_path, img, img0, None + return img_path, img, img0, None, s def __len__(self): return 0 @@ -298,14 +298,14 @@ def __init__(self, sources='streams.txt', img_size=640, stride=32, auto=True): self.auto = auto for i, s in enumerate(sources): # index, source # Start thread to read frames from video stream - print(f'{i + 1}/{n}: {s}... ', end='') + st = f'{i + 1}/{n}: {s}... ' if 'youtube.com/' in s or 'youtu.be/' in s: # if source is YouTube video check_requirements(('pafy', 'youtube_dl')) import pafy s = pafy.new(s).getbest(preftype="mp4").url # YouTube URL s = eval(s) if s.isnumeric() else s # i.e. s = '0' local webcam cap = cv2.VideoCapture(s) - assert cap.isOpened(), f'Failed to open {s}' + assert cap.isOpened(), f'{st}Failed to open {s}' w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) self.fps[i] = max(cap.get(cv2.CAP_PROP_FPS) % 100, 0) or 30.0 # 30 FPS fallback @@ -313,15 +313,15 @@ def __init__(self, sources='streams.txt', img_size=640, stride=32, auto=True): _, self.imgs[i] = cap.read() # guarantee first frame self.threads[i] = Thread(target=self.update, args=([i, cap, s]), daemon=True) - print(f" success ({self.frames[i]} frames {w}x{h} at {self.fps[i]:.2f} FPS)") + LOGGER.info(f"{st} Success ({self.frames[i]} frames {w}x{h} at {self.fps[i]:.2f} FPS)") self.threads[i].start() - print('') # newline + LOGGER.info('') # newline # check for common shapes s = np.stack([letterbox(x, self.img_size, stride=self.stride, auto=self.auto)[0].shape for x in self.imgs]) self.rect = np.unique(s, axis=0).shape[0] == 1 # rect inference if all shapes equal if not self.rect: - print('WARNING: Different stream shapes detected. For optimal performance supply similarly-shaped streams.') + LOGGER.warning('WARNING: Stream shapes differ. For optimal performance supply similarly-shaped streams.') def update(self, i, cap, stream): # Read stream `i` frames in daemon thread @@ -335,7 +335,7 @@ def update(self, i, cap, stream): if success: self.imgs[i] = im else: - print('WARNING: Video stream unresponsive, please check your IP camera connection.') + LOGGER.warn('WARNING: Video stream unresponsive, please check your IP camera connection.') self.imgs[i] *= 0 cap.open(stream) # re-open stream if signal was lost time.sleep(1 / self.fps[i]) # wait time @@ -361,7 +361,7 @@ def __next__(self): img = img[..., ::-1].transpose((0, 3, 1, 2)) # BGR to RGB, BHWC to BCHW img = np.ascontiguousarray(img) - return self.sources, img, img0, None + return self.sources, img, img0, None, '' def __len__(self): return len(self.sources) # 1E12 frames = 32 streams at 30 FPS for 30 years @@ -666,7 +666,7 @@ def load_image(self, i): else: # read image path = self.img_files[i] im = cv2.imread(path) # BGR - assert im is not None, 'Image Not Found ' + path + assert im is not None, f'Image Not Found {path}' h0, w0 = im.shape[:2] # orig hw r = self.img_size / max(h0, w0) # ratio if r != 1: # if sizes are not equal diff --git a/utils/general.py b/utils/general.py index 667af63e4044..872d5ce57c81 100755 --- a/utils/general.py +++ b/utils/general.py @@ -42,6 +42,16 @@ ROOT = FILE.parents[1] # YOLOv5 root directory +def set_logging(name=None, verbose=True): + # Sets level and returns logger + rank = int(os.getenv('RANK', -1)) # rank in world for Multi-GPU trainings + logging.basicConfig(format="%(message)s", level=logging.INFO if (verbose and rank in (-1, 0)) else logging.WARN) + return logging.getLogger(name) + + +LOGGER = set_logging(__name__) # define globally (used in train.py, val.py, detect.py, etc.) + + class Profile(contextlib.ContextDecorator): # Usage: @Profile() decorator or 'with Profile():' context manager def __enter__(self): @@ -87,15 +97,9 @@ def methods(instance): return [f for f in dir(instance) if callable(getattr(instance, f)) and not f.startswith("__")] -def set_logging(rank=-1, verbose=True): - logging.basicConfig( - format="%(message)s", - level=logging.INFO if (verbose and rank in [-1, 0]) else logging.WARN) - - def print_args(name, opt): # Print argparser arguments - print(colorstr(f'{name}: ') + ', '.join(f'{k}={v}' for k, v in vars(opt).items())) + LOGGER.info(colorstr(f'{name}: ') + ', '.join(f'{k}={v}' for k, v in vars(opt).items())) def init_seeds(seed=0): diff --git a/val.py b/val.py index 2fc547322a0a..1fc98c71198b 100644 --- a/val.py +++ b/val.py @@ -25,9 +25,9 @@ from models.experimental import attempt_load from utils.datasets import create_dataloader -from utils.general import coco80_to_coco91_class, check_dataset, check_img_size, check_requirements, \ - check_suffix, check_yaml, box_iou, non_max_suppression, scale_coords, xyxy2xywh, xywh2xyxy, set_logging, \ - increment_path, colorstr, print_args +from utils.general import box_iou, coco80_to_coco91_class, colorstr, check_dataset, check_img_size, \ + check_requirements, check_suffix, check_yaml, increment_path, non_max_suppression, print_args, scale_coords, \ + xyxy2xywh, xywh2xyxy, LOGGER from utils.metrics import ap_per_class, ConfusionMatrix from utils.plots import output_to_target, plot_images, plot_val_study from utils.torch_utils import select_device, time_sync @@ -242,18 +242,18 @@ def run(data, # Print results pf = '%20s' + '%11i' * 2 + '%11.3g' * 4 # print format - print(pf % ('all', seen, nt.sum(), mp, mr, map50, map)) + LOGGER.info(pf % ('all', seen, nt.sum(), mp, mr, map50, map)) # Print results per class if (verbose or (nc < 50 and not training)) and nc > 1 and len(stats): for i, c in enumerate(ap_class): - print(pf % (names[c], seen, nt[c], p[i], r[i], ap50[i], ap[i])) + LOGGER.info(pf % (names[c], seen, nt[c], p[i], r[i], ap50[i], ap[i])) # Print speeds t = tuple(x / seen * 1E3 for x in dt) # speeds per image if not training: shape = (batch_size, 3, imgsz, imgsz) - print(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {shape}' % t) + LOGGER.info(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {shape}' % t) # Plots if plots: @@ -265,7 +265,7 @@ def run(data, w = Path(weights[0] if isinstance(weights, list) else weights).stem if weights is not None else '' # weights anno_json = str(Path(data.get('path', '../coco')) / 'annotations/instances_val2017.json') # annotations json pred_json = str(save_dir / f"{w}_predictions.json") # predictions json - print(f'\nEvaluating pycocotools mAP... saving {pred_json}...') + LOGGER.info(f'\nEvaluating pycocotools mAP... saving {pred_json}...') with open(pred_json, 'w') as f: json.dump(jdict, f) @@ -284,13 +284,13 @@ def run(data, eval.summarize() map, map50 = eval.stats[:2] # update results (mAP@0.5:0.95, mAP@0.5) except Exception as e: - print(f'pycocotools unable to run: {e}') + LOGGER.info(f'pycocotools unable to run: {e}') # Return results model.float() # for training if not training: s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else '' - print(f"Results saved to {colorstr('bold', save_dir)}{s}") + LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}{s}") maps = np.zeros(nc) + map for i, c in enumerate(ap_class): maps[c] = ap[i] @@ -327,8 +327,7 @@ def parse_opt(): def main(opt): - set_logging() - check_requirements(exclude=('tensorboard', 'thop')) + check_requirements(requirements=ROOT / 'requirements.txt', exclude=('tensorboard', 'thop')) if opt.task in ('train', 'val', 'test'): # run normally run(**vars(opt)) @@ -346,7 +345,7 @@ def main(opt): f = f'study_{Path(opt.data).stem}_{Path(w).stem}.txt' # filename to save to y = [] # y axis for i in x: # img-size - print(f'\nRunning {f} point {i}...') + LOGGER.info(f'\nRunning {f} point {i}...') r, _, t = run(opt.data, weights=w, batch_size=opt.batch_size, imgsz=i, conf_thres=opt.conf_thres, iou_thres=opt.iou_thres, device=opt.device, save_json=opt.save_json, plots=False) y.append(r + t) # results and times From 4c0982a243aac3969345fc61e10eb7ea4d78e104 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 1 Nov 2021 18:28:14 +0100 Subject: [PATCH 0672/1976] Update README.md (#5438) 2-line update --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index d3fd7e9a92f5..3e2f5b656cde 100644 --- a/README.md +++ b/README.md @@ -193,7 +193,7 @@ Get started in seconds with our verified environments. Click each icon below for |Weights and Biases|Roboflow ⭐ NEW| |:-:|:-:| -|Automatically track and visualize all your YOLOv5 training runs in the cloud with [Weights & Biases](https://wandb.ai/site?utm_campaign=repo_yolo_readme)|Label and automatically export your custom datasets directly to YOLOv5 for training with [Roboflow](https://roboflow.com/?ref=ultralytics) | +|Automatically track and visualize all your YOLOv5 training runs in the cloud with [Weights & Biases](https://wandb.ai/site?utm_campaign=repo_yolo_readme)|Label and export your custom datasets directly to YOLOv5 for training with [Roboflow](https://roboflow.com/?ref=ultralytics) | runs/exp{sep}2, runs/exp{sep}3, ... etc. path = Path(path) # os-agnostic if path.exists() and not exist_ok: path, suffix = (path.with_suffix(''), path.suffix) if path.is_file() else (path, '') dirs = glob.glob(f"{path}{sep}*") # similar paths matches = [re.search(rf"%s{sep}(\d+)" % path.stem, d) for d in dirs] i = [int(m.groups()[0]) for m in matches if m] # indices n = max(i) + 1 if i else 2 # increment number path = Path(f"{path}{sep}{n}{suffix}") # increment path if mkdir: path.mkdir(parents=True, exist_ok=True) # make directory return path print(increment_path('runs')) print(increment_path('export.py')) print(increment_path('abc.def.dir')) print(increment_path('abc.def.file')) ``` --- utils/general.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/utils/general.py b/utils/general.py index adbf1bd48c5f..fc05c691afa2 100755 --- a/utils/general.py +++ b/utils/general.py @@ -830,13 +830,12 @@ def increment_path(path, exist_ok=False, sep='', mkdir=False): # Increment file or directory path, i.e. runs/exp --> runs/exp{sep}2, runs/exp{sep}3, ... etc. path = Path(path) # os-agnostic if path.exists() and not exist_ok: - suffix = path.suffix - path = path.with_suffix('') + path, suffix = (path.with_suffix(''), path.suffix) if path.is_file() else (path, '') dirs = glob.glob(f"{path}{sep}*") # similar paths matches = [re.search(rf"%s{sep}(\d+)" % path.stem, d) for d in dirs] i = [int(m.groups()[0]) for m in matches if m] # indices n = max(i) + 1 if i else 2 # increment number - path = Path(f"{path}{sep}{n}{suffix}") # update path + path = Path(f"{path}{sep}{n}{suffix}") # increment path if mkdir: path.mkdir(parents=True, exist_ok=True) # make directory return path From 5f603a9dbaeae3fa052b09e6fff7903a7355e8b5 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 5 Nov 2021 18:43:03 +0100 Subject: [PATCH 0692/1976] Fix detect.py URL inference (#5525) * Fix detect.py URL inference Allows detect.py to run inference on remote URL sources, i.e.: ```python !python detect.py --weights yolov5s.pt --source https://ultralytics.com/assets/zidane.jpg # image URL !python detect.py --weights yolov5s.pt --source https://ultralytics.com/assets/decelera_landscape.mov # video URL ``` * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- detect.py | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/detect.py b/detect.py index 46141ed4da3c..61044914e16b 100644 --- a/detect.py +++ b/detect.py @@ -24,10 +24,10 @@ ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative from models.experimental import attempt_load -from utils.datasets import LoadImages, LoadStreams -from utils.general import (LOGGER, apply_classifier, check_img_size, check_imshow, check_requirements, check_suffix, - colorstr, increment_path, non_max_suppression, print_args, save_one_box, scale_coords, - strip_optimizer, xyxy2xywh) +from utils.datasets import IMG_FORMATS, VID_FORMATS, LoadImages, LoadStreams +from utils.general import (LOGGER, apply_classifier, check_file, check_img_size, check_imshow, check_requirements, + check_suffix, colorstr, increment_path, non_max_suppression, print_args, save_one_box, + scale_coords, strip_optimizer, xyxy2xywh) from utils.plots import Annotator, colors from utils.torch_utils import load_classifier, select_device, time_sync @@ -61,8 +61,11 @@ def run(weights=ROOT / 'yolov5s.pt', # model.pt path(s) ): source = str(source) save_img = not nosave and not source.endswith('.txt') # save inference images - webcam = source.isnumeric() or source.endswith('.txt') or source.lower().startswith( - ('rtsp://', 'rtmp://', 'http://', 'https://')) + is_file = Path(source).suffix[1:] in (IMG_FORMATS + VID_FORMATS) + is_url = source.lower().startswith(('rtsp://', 'rtmp://', 'http://', 'https://')) + webcam = source.isnumeric() or source.endswith('.txt') or (is_url and not is_file) + if is_url and is_file: + source = check_file(source) # download # Directories save_dir = increment_path(Path(project) / name, exist_ok=exist_ok) # increment run From 32b8738735339207b00f79b43987f1c3755a9039 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 5 Nov 2021 19:22:47 +0100 Subject: [PATCH 0693/1976] Update `check_file()` avoid repeat URL downloads (#5526) --- utils/general.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/utils/general.py b/utils/general.py index fc05c691afa2..15b58257eabb 100755 --- a/utils/general.py +++ b/utils/general.py @@ -338,9 +338,12 @@ def check_file(file, suffix=''): elif file.startswith(('http:/', 'https:/')): # download url = str(Path(file)).replace(':/', '://') # Pathlib turns :// -> :/ file = Path(urllib.parse.unquote(file).split('?')[0]).name # '%2F' to '/', split https://url.com/file.txt?auth - print(f'Downloading {url} to {file}...') - torch.hub.download_url_to_file(url, file) - assert Path(file).exists() and Path(file).stat().st_size > 0, f'File download failed: {url}' # check + if Path(file).is_file(): + print(f'Found {url} locally at {file}') # file already exists + else: + print(f'Downloading {url} to {file}...') + torch.hub.download_url_to_file(url, file) + assert Path(file).exists() and Path(file).stat().st_size > 0, f'File download failed: {url}' # check return file else: # search files = [] From 98a3fd7e8fd82d63aa00200c08a9da40959f7217 Mon Sep 17 00:00:00 2001 From: nanmi <37356276+nanmi@users.noreply.github.com> Date: Sat, 6 Nov 2021 02:26:45 +0800 Subject: [PATCH 0694/1976] Update export.py (#5471) * fix export onnx bug * Update export.py * Update export.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update yolo.py Co-authored-by: Glenn Jocher Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> From d895a7f70df7dcda4ce19e904e9d2b3a97705a3a Mon Sep 17 00:00:00 2001 From: Wonbeom Jang Date: Sat, 6 Nov 2021 03:28:53 +0900 Subject: [PATCH 0695/1976] Update train.py (#5451) * correct --resume True error * delete temp file * Update train.py Co-authored-by: Glenn Jocher From 336437998f4ff5facb94b4e36ef2d941456d2d8f Mon Sep 17 00:00:00 2001 From: Deep Patel <35742688+deepsworld@users.noreply.github.com> Date: Fri, 5 Nov 2021 14:31:53 -0400 Subject: [PATCH 0696/1976] Suppress ONNX export trace warning (#5437) Checking for `onnx_dynamic` first should suppress the warning: ```log TracerWarning: Converting a tensor to a Python boolean might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs! if self.grid[i].shape[2:4] != x[i].shape[2:4] or self.onnx_dynamic ``` --- models/yolo.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/models/yolo.py b/models/yolo.py index 85c8d43258e3..510f8e58d9a3 100644 --- a/models/yolo.py +++ b/models/yolo.py @@ -55,7 +55,7 @@ def forward(self, x): x[i] = x[i].view(bs, self.na, self.no, ny, nx).permute(0, 1, 3, 4, 2).contiguous() if not self.training: # inference - if self.grid[i].shape[2:4] != x[i].shape[2:4] or self.onnx_dynamic: + if self.onnx_dynamic or self.grid[i].shape[2:4] != x[i].shape[2:4]: self.grid[i], self.anchor_grid[i] = self._make_grid(nx, ny, i) y = x[i].sigmoid() From 60e42e16c2bc51b303e680afccd72351af7a7a69 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 6 Nov 2021 12:21:17 +0100 Subject: [PATCH 0697/1976] Update autobatch.py (#5536) --- utils/autobatch.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/autobatch.py b/utils/autobatch.py index 168b16f691ab..1632e9bc6a5a 100644 --- a/utils/autobatch.py +++ b/utils/autobatch.py @@ -52,5 +52,5 @@ def autobatch(model, imgsz=640, fraction=0.9, batch_size=16): batch_sizes = batch_sizes[:len(y)] p = np.polyfit(batch_sizes, y, deg=1) # first degree polynomial fit b = int((f * fraction - p[1]) / p[0]) # y intercept (optimal batch size) - print(f'{prefix}Using colorstr(batch-size {b}) for {d} {t * fraction:.3g}G/{t:.3g}G ({fraction * 100:.0f}%)') + print(f'{prefix}Using batch-size {b} for {d} {t * fraction:.3g}G/{t:.3g}G ({fraction * 100:.0f}%)') return b From cb18cac33d7161ed938c1c4056e17c653df69ad0 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 6 Nov 2021 13:49:00 +0100 Subject: [PATCH 0698/1976] Update autobatch.py (#5538) * Update autobatch.py * Update autobatch.py * Update autobatch.py --- utils/autobatch.py | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/utils/autobatch.py b/utils/autobatch.py index 1632e9bc6a5a..3f2b4d1a4c38 100644 --- a/utils/autobatch.py +++ b/utils/autobatch.py @@ -35,11 +35,12 @@ def autobatch(model, imgsz=640, fraction=0.9, batch_size=16): return batch_size d = str(device).upper() # 'CUDA:0' - t = torch.cuda.get_device_properties(device).total_memory / 1024 ** 3 # (GB) - r = torch.cuda.memory_reserved(device) / 1024 ** 3 # (GB) - a = torch.cuda.memory_allocated(device) / 1024 ** 3 # (GB) + properties = torch.cuda.get_device_properties(device) # device properties + t = properties.total_memory / 1024 ** 3 # (GiB) + r = torch.cuda.memory_reserved(device) / 1024 ** 3 # (GiB) + a = torch.cuda.memory_allocated(device) / 1024 ** 3 # (GiB) f = t - (r + a) # free inside reserved - print(f'{prefix}{d} {t:.3g}G total, {r:.3g}G reserved, {a:.3g}G allocated, {f:.3g}G free') + print(f'{prefix}{d} ({properties.name}) {t:.2f}G total, {r:.2f}G reserved, {a:.2f}G allocated, {f:.2f}G free') batch_sizes = [1, 2, 4, 8, 16] try: @@ -52,5 +53,5 @@ def autobatch(model, imgsz=640, fraction=0.9, batch_size=16): batch_sizes = batch_sizes[:len(y)] p = np.polyfit(batch_sizes, y, deg=1) # first degree polynomial fit b = int((f * fraction - p[1]) / p[0]) # y intercept (optimal batch size) - print(f'{prefix}Using batch-size {b} for {d} {t * fraction:.3g}G/{t:.3g}G ({fraction * 100:.0f}%)') + print(f'{prefix}Using batch-size {b} for {d} {t * fraction:.2f}G/{t:.2f}G ({fraction * 100:.0f}%)') return b From 76d90d899a80f52246143edb7a683129b4359396 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 6 Nov 2021 13:58:12 +0100 Subject: [PATCH 0699/1976] =?UTF-8?q?Update=20Issue=20Templates=20with=20?= =?UTF-8?q?=F0=9F=92=A1=20ProTip!=20(#5539)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Update bug-report.yml * Update question.yml * Update bug-report.yml --- .github/ISSUE_TEMPLATE/bug-report.yml | 10 ++++++---- .github/ISSUE_TEMPLATE/question.yml | 4 ++-- 2 files changed, 8 insertions(+), 6 deletions(-) diff --git a/.github/ISSUE_TEMPLATE/bug-report.yml b/.github/ISSUE_TEMPLATE/bug-report.yml index a20f15c20c93..fcb64138b088 100644 --- a/.github/ISSUE_TEMPLATE/bug-report.yml +++ b/.github/ISSUE_TEMPLATE/bug-report.yml @@ -41,8 +41,8 @@ body: attributes: label: Bug description: Provide console output with error messages and/or screenshots of the bug. - placeholder: > - TIP: Include as much information as possible (screenshots, logs, tracebacks etc.) to receive the most helpful response. + placeholder: | + 💡 ProTip! Include as much information as possible (screenshots, logs, tracebacks etc.) to receive the most helpful response. validations: required: true @@ -51,7 +51,7 @@ body: label: Environment description: Please specify the software and hardware you used to produce the bug. placeholder: | - - YOLO: YOLOv5 🚀 v6.0-37-g620b535 torch 1.9.0+cu111 CUDA:0 (Tesla V100-SXM2-16GB, 16160.5MB) + - YOLO: YOLOv5 🚀 v6.0-67-g60e42e1 torch 1.9.0+cu111 CUDA:0 (A100-SXM4-40GB, 40536MiB) - OS: Ubuntu 20.04 - Python: 3.9.0 validations: @@ -64,7 +64,9 @@ body: When asking a question, people will be better able to provide help if you provide code that they can easily understand and use to **reproduce** the problem. This is referred to by community members as creating a [minimal reproducible example](https://stackoverflow.com/help/minimal-reproducible-example). placeholder: | - # code to reproduce your issue here + ``` + # Code to reproduce your issue here + ``` validations: required: false diff --git a/.github/ISSUE_TEMPLATE/question.yml b/.github/ISSUE_TEMPLATE/question.yml index 9ae5dd57c608..8e0993c68bab 100644 --- a/.github/ISSUE_TEMPLATE/question.yml +++ b/.github/ISSUE_TEMPLATE/question.yml @@ -22,8 +22,8 @@ body: attributes: label: Question description: What is your question? - placeholder: > - TIP: Include as much information as possible (screenshots, links, reference etc.) to receive the most helpful response. + placeholder: | + 💡 ProTip! Include as much information as possible (screenshots, logs, tracebacks etc.) to receive the most helpful response. validations: required: true From fa2344cdd8814ac7901b844f9e80d3db8bdc1c32 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 6 Nov 2021 15:07:45 +0100 Subject: [PATCH 0700/1976] Update `models/hub/*.yaml` files for v6.0n release (#5540) * Update model yamls for v6.0 * Add python models/yolo.py --test * Ghost fix --- models/hub/yolov5-bifpn.yaml | 14 +++++++------- models/hub/yolov5-fpn.yaml | 22 +++++++++++----------- models/hub/yolov5-p2.yaml | 14 +++++++------- models/hub/yolov5-p6.yaml | 16 ++++++++-------- models/hub/yolov5-p7.yaml | 12 ++++++------ models/hub/yolov5-panet.yaml | 24 ++++++++++++------------ models/hub/yolov5s-ghost.yaml | 12 ++++++------ models/hub/yolov5s-transformer.yaml | 12 ++++++------ models/yolo.py | 9 +++++++++ 9 files changed, 72 insertions(+), 63 deletions(-) diff --git a/models/hub/yolov5-bifpn.yaml b/models/hub/yolov5-bifpn.yaml index 2f2c82c70122..504815f5cfa0 100644 --- a/models/hub/yolov5-bifpn.yaml +++ b/models/hub/yolov5-bifpn.yaml @@ -9,22 +9,22 @@ anchors: - [30,61, 62,45, 59,119] # P4/16 - [116,90, 156,198, 373,326] # P5/32 -# YOLOv5 backbone +# YOLOv5 v6.0 backbone backbone: # [from, number, module, args] - [[-1, 1, Focus, [64, 3]], # 0-P1/2 + [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 [-1, 3, C3, [128]], [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 - [-1, 9, C3, [256]], + [-1, 6, C3, [256]], [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 [-1, 9, C3, [512]], [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 - [-1, 1, SPP, [1024, [5, 9, 13]]], - [-1, 3, C3, [1024, False]], # 9 + [-1, 3, C3, [1024]], + [-1, 1, SPPF, [1024, 5]], # 9 ] -# YOLOv5 BiFPN head +# YOLOv5 v6.0 BiFPN head head: [[-1, 1, Conv, [512, 1, 1]], [-1, 1, nn.Upsample, [None, 2, 'nearest']], @@ -37,7 +37,7 @@ head: [-1, 3, C3, [256, False]], # 17 (P3/8-small) [-1, 1, Conv, [256, 3, 2]], - [[-1, 14, 6], 1, Concat, [1]], # cat P4 + [[-1, 14, 6], 1, Concat, [1]], # cat P4 <--- BiFPN change [-1, 3, C3, [512, False]], # 20 (P4/16-medium) [-1, 1, Conv, [512, 3, 2]], diff --git a/models/hub/yolov5-fpn.yaml b/models/hub/yolov5-fpn.yaml index 707b2136cee1..a23e9c6fbf9f 100644 --- a/models/hub/yolov5-fpn.yaml +++ b/models/hub/yolov5-fpn.yaml @@ -9,34 +9,34 @@ anchors: - [30,61, 62,45, 59,119] # P4/16 - [116,90, 156,198, 373,326] # P5/32 -# YOLOv5 backbone +# YOLOv5 v6.0 backbone backbone: # [from, number, module, args] - [[-1, 1, Focus, [64, 3]], # 0-P1/2 + [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 - [-1, 3, Bottleneck, [128]], + [-1, 3, C3, [128]], [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 - [-1, 9, BottleneckCSP, [256]], + [-1, 6, C3, [256]], [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 - [-1, 9, BottleneckCSP, [512]], + [-1, 9, C3, [512]], [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 - [-1, 1, SPP, [1024, [5, 9, 13]]], - [-1, 6, BottleneckCSP, [1024]], # 9 + [-1, 3, C3, [1024]], + [-1, 1, SPPF, [1024, 5]], # 9 ] -# YOLOv5 FPN head +# YOLOv5 v6.0 FPN head head: - [[-1, 3, BottleneckCSP, [1024, False]], # 10 (P5/32-large) + [[-1, 3, C3, [1024, False]], # 10 (P5/32-large) [-1, 1, nn.Upsample, [None, 2, 'nearest']], [[-1, 6], 1, Concat, [1]], # cat backbone P4 [-1, 1, Conv, [512, 1, 1]], - [-1, 3, BottleneckCSP, [512, False]], # 14 (P4/16-medium) + [-1, 3, C3, [512, False]], # 14 (P4/16-medium) [-1, 1, nn.Upsample, [None, 2, 'nearest']], [[-1, 4], 1, Concat, [1]], # cat backbone P3 [-1, 1, Conv, [256, 1, 1]], - [-1, 3, BottleneckCSP, [256, False]], # 18 (P3/8-small) + [-1, 3, C3, [256, False]], # 18 (P3/8-small) [[18, 14, 10], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) ] diff --git a/models/hub/yolov5-p2.yaml b/models/hub/yolov5-p2.yaml index 759e9f92fb29..ffe26ebad182 100644 --- a/models/hub/yolov5-p2.yaml +++ b/models/hub/yolov5-p2.yaml @@ -4,24 +4,24 @@ nc: 80 # number of classes depth_multiple: 1.0 # model depth multiple width_multiple: 1.0 # layer channel multiple -anchors: 3 +anchors: 3 # auto-anchor evolves 3 anchors per P output layer -# YOLOv5 backbone +# YOLOv5 v6.0 backbone backbone: # [from, number, module, args] - [[-1, 1, Focus, [64, 3]], # 0-P1/2 + [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 [-1, 3, C3, [128]], [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 - [-1, 9, C3, [256]], + [-1, 6, C3, [256]], [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 [-1, 9, C3, [512]], [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 - [-1, 1, SPP, [1024, [5, 9, 13]]], - [-1, 3, C3, [1024, False]], # 9 + [-1, 3, C3, [1024]], + [-1, 1, SPPF, [1024, 5]], # 9 ] -# YOLOv5 head +# YOLOv5 v6.0 head head: [[-1, 1, Conv, [512, 1, 1]], [-1, 1, nn.Upsample, [None, 2, 'nearest']], diff --git a/models/hub/yolov5-p6.yaml b/models/hub/yolov5-p6.yaml index 85e142539ce3..28f3e439cccd 100644 --- a/models/hub/yolov5-p6.yaml +++ b/models/hub/yolov5-p6.yaml @@ -4,26 +4,26 @@ nc: 80 # number of classes depth_multiple: 1.0 # model depth multiple width_multiple: 1.0 # layer channel multiple -anchors: 3 +anchors: 3 # auto-anchor 3 anchors per P output layer -# YOLOv5 backbone +# YOLOv5 v6.0 backbone backbone: # [from, number, module, args] - [[-1, 1, Focus, [64, 3]], # 0-P1/2 + [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 [-1, 3, C3, [128]], [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 - [-1, 9, C3, [256]], + [-1, 6, C3, [256]], [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 [-1, 9, C3, [512]], [-1, 1, Conv, [768, 3, 2]], # 7-P5/32 [-1, 3, C3, [768]], [-1, 1, Conv, [1024, 3, 2]], # 9-P6/64 - [-1, 1, SPP, [1024, [3, 5, 7]]], - [-1, 3, C3, [1024, False]], # 11 + [-1, 3, C3, [1024]], + [-1, 1, SPPF, [1024, 5]], # 11 ] -# YOLOv5 head +# YOLOv5 v6.0 head head: [[-1, 1, Conv, [768, 1, 1]], [-1, 1, nn.Upsample, [None, 2, 'nearest']], @@ -50,7 +50,7 @@ head: [-1, 1, Conv, [768, 3, 2]], [[-1, 12], 1, Concat, [1]], # cat head P6 - [-1, 3, C3, [1024, False]], # 32 (P5/64-xlarge) + [-1, 3, C3, [1024, False]], # 32 (P6/64-xlarge) [[23, 26, 29, 32], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5, P6) ] diff --git a/models/hub/yolov5-p7.yaml b/models/hub/yolov5-p7.yaml index 88a7a95cbbd1..bd2f5845f884 100644 --- a/models/hub/yolov5-p7.yaml +++ b/models/hub/yolov5-p7.yaml @@ -4,16 +4,16 @@ nc: 80 # number of classes depth_multiple: 1.0 # model depth multiple width_multiple: 1.0 # layer channel multiple -anchors: 3 +anchors: 3 # auto-anchor 3 anchors per P output layer -# YOLOv5 backbone +# YOLOv5 v6.0 backbone backbone: # [from, number, module, args] - [[-1, 1, Focus, [64, 3]], # 0-P1/2 + [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 [-1, 3, C3, [128]], [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 - [-1, 9, C3, [256]], + [-1, 6, C3, [256]], [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 [-1, 9, C3, [512]], [-1, 1, Conv, [768, 3, 2]], # 7-P5/32 @@ -21,8 +21,8 @@ backbone: [-1, 1, Conv, [1024, 3, 2]], # 9-P6/64 [-1, 3, C3, [1024]], [-1, 1, Conv, [1280, 3, 2]], # 11-P7/128 - [-1, 1, SPP, [1280, [3, 5]]], - [-1, 3, C3, [1280, False]], # 13 + [-1, 3, C3, [1280]], + [-1, 1, SPPF, [1280, 5]], # 13 ] # YOLOv5 head diff --git a/models/hub/yolov5-panet.yaml b/models/hub/yolov5-panet.yaml index 76b9b7e74e33..ccfbf900691c 100644 --- a/models/hub/yolov5-panet.yaml +++ b/models/hub/yolov5-panet.yaml @@ -9,40 +9,40 @@ anchors: - [30,61, 62,45, 59,119] # P4/16 - [116,90, 156,198, 373,326] # P5/32 -# YOLOv5 backbone +# YOLOv5 v6.0 backbone backbone: # [from, number, module, args] - [[-1, 1, Focus, [64, 3]], # 0-P1/2 + [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 - [-1, 3, BottleneckCSP, [128]], + [-1, 3, C3, [128]], [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 - [-1, 9, BottleneckCSP, [256]], + [-1, 6, C3, [256]], [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 - [-1, 9, BottleneckCSP, [512]], + [-1, 9, C3, [512]], [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 - [-1, 1, SPP, [1024, [5, 9, 13]]], - [-1, 3, BottleneckCSP, [1024, False]], # 9 + [-1, 3, C3, [1024]], + [-1, 1, SPPF, [1024, 5]], # 9 ] -# YOLOv5 PANet head +# YOLOv5 v6.0 PANet head head: [[-1, 1, Conv, [512, 1, 1]], [-1, 1, nn.Upsample, [None, 2, 'nearest']], [[-1, 6], 1, Concat, [1]], # cat backbone P4 - [-1, 3, BottleneckCSP, [512, False]], # 13 + [-1, 3, C3, [512, False]], # 13 [-1, 1, Conv, [256, 1, 1]], [-1, 1, nn.Upsample, [None, 2, 'nearest']], [[-1, 4], 1, Concat, [1]], # cat backbone P3 - [-1, 3, BottleneckCSP, [256, False]], # 17 (P3/8-small) + [-1, 3, C3, [256, False]], # 17 (P3/8-small) [-1, 1, Conv, [256, 3, 2]], [[-1, 14], 1, Concat, [1]], # cat head P4 - [-1, 3, BottleneckCSP, [512, False]], # 20 (P4/16-medium) + [-1, 3, C3, [512, False]], # 20 (P4/16-medium) [-1, 1, Conv, [512, 3, 2]], [[-1, 10], 1, Concat, [1]], # cat head P5 - [-1, 3, BottleneckCSP, [1024, False]], # 23 (P5/32-large) + [-1, 3, C3, [1024, False]], # 23 (P5/32-large) [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) ] diff --git a/models/hub/yolov5s-ghost.yaml b/models/hub/yolov5s-ghost.yaml index dbf2c8e03489..ff9519c3f1aa 100644 --- a/models/hub/yolov5s-ghost.yaml +++ b/models/hub/yolov5s-ghost.yaml @@ -9,22 +9,22 @@ anchors: - [30,61, 62,45, 59,119] # P4/16 - [116,90, 156,198, 373,326] # P5/32 -# YOLOv5 backbone +# YOLOv5 v6.0 backbone backbone: # [from, number, module, args] - [[-1, 1, Focus, [64, 3]], # 0-P1/2 + [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 [-1, 1, GhostConv, [128, 3, 2]], # 1-P2/4 [-1, 3, C3Ghost, [128]], [-1, 1, GhostConv, [256, 3, 2]], # 3-P3/8 - [-1, 9, C3Ghost, [256]], + [-1, 6, C3Ghost, [256]], [-1, 1, GhostConv, [512, 3, 2]], # 5-P4/16 [-1, 9, C3Ghost, [512]], [-1, 1, GhostConv, [1024, 3, 2]], # 7-P5/32 - [-1, 1, SPP, [1024, [5, 9, 13]]], - [-1, 3, C3Ghost, [1024, False]], # 9 + [-1, 3, C3Ghost, [1024]], + [-1, 1, SPPF, [1024, 5]], # 9 ] -# YOLOv5 head +# YOLOv5 v6.0 head head: [[-1, 1, GhostConv, [512, 1, 1]], [-1, 1, nn.Upsample, [None, 2, 'nearest']], diff --git a/models/hub/yolov5s-transformer.yaml b/models/hub/yolov5s-transformer.yaml index aeac1acb0582..100d7c447527 100644 --- a/models/hub/yolov5s-transformer.yaml +++ b/models/hub/yolov5s-transformer.yaml @@ -9,22 +9,22 @@ anchors: - [30,61, 62,45, 59,119] # P4/16 - [116,90, 156,198, 373,326] # P5/32 -# YOLOv5 backbone +# YOLOv5 v6.0 backbone backbone: # [from, number, module, args] - [[-1, 1, Focus, [64, 3]], # 0-P1/2 + [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 [-1, 3, C3, [128]], [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 - [-1, 9, C3, [256]], + [-1, 6, C3, [256]], [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 [-1, 9, C3, [512]], [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 - [-1, 1, SPP, [1024, [5, 9, 13]]], - [-1, 3, C3TR, [1024, False]], # 9 <-------- C3TR() Transformer module + [-1, 3, C3TR, [1024]], # 9 <--- C3TR() Transformer module + [-1, 1, SPPF, [1024, 5]], # 9 ] -# YOLOv5 head +# YOLOv5 v6.0 head head: [[-1, 1, Conv, [512, 1, 1]], [-1, 1, nn.Upsample, [None, 2, 'nearest']], diff --git a/models/yolo.py b/models/yolo.py index 510f8e58d9a3..c196d46f9efa 100644 --- a/models/yolo.py +++ b/models/yolo.py @@ -306,6 +306,7 @@ def parse_model(d, ch): # model_dict, input_channels(3) parser.add_argument('--cfg', type=str, default='yolov5s.yaml', help='model.yaml') parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') parser.add_argument('--profile', action='store_true', help='profile model speed') + parser.add_argument('--test', action='store_true', help='test all yolo*.yaml') opt = parser.parse_args() opt.cfg = check_yaml(opt.cfg) # check YAML print_args(FILE.stem, opt) @@ -320,6 +321,14 @@ def parse_model(d, ch): # model_dict, input_channels(3) img = torch.rand(8 if torch.cuda.is_available() else 1, 3, 640, 640).to(device) y = model(img, profile=True) + # Test all models + if opt.test: + for cfg in Path(ROOT / 'models').rglob('yolo*.yaml'): + try: + _ = Model(cfg) + except Exception as e: + print(f'Error in {cfg}: {e}') + # Tensorboard (not working https://github.com/ultralytics/yolov5/issues/2898) # from torch.utils.tensorboard import SummaryWriter # tb_writer = SummaryWriter('.') From e189fa15eab4866a5f55c8b58d873dacebfb2f74 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 6 Nov 2021 15:41:17 +0100 Subject: [PATCH 0701/1976] `intersect_dicts()` in hubconf.py fix (#5542) --- hubconf.py | 5 ++--- train.py | 7 +++---- utils/general.py | 5 +++++ utils/torch_utils.py | 5 ----- 4 files changed, 10 insertions(+), 12 deletions(-) diff --git a/hubconf.py b/hubconf.py index 51f658a532ff..3488fef76ac5 100644 --- a/hubconf.py +++ b/hubconf.py @@ -30,7 +30,7 @@ def _create(name, pretrained=True, channels=3, classes=80, autoshape=True, verbo from models.experimental import attempt_load from models.yolo import Model from utils.downloads import attempt_download - from utils.general import check_requirements, set_logging + from utils.general import check_requirements, intersect_dicts, set_logging from utils.torch_utils import select_device file = Path(__file__).resolve() @@ -49,9 +49,8 @@ def _create(name, pretrained=True, channels=3, classes=80, autoshape=True, verbo model = Model(cfg, channels, classes) # create model if pretrained: ckpt = torch.load(attempt_download(path), map_location=device) # load - msd = model.state_dict() # model state_dict csd = ckpt['model'].float().state_dict() # checkpoint state_dict as FP32 - csd = {k: v for k, v in csd.items() if msd[k].shape == v.shape} # filter + csd = intersect_dicts(csd, model.state_dict(), exclude=['anchors']) # intersect model.load_state_dict(csd, strict=False) # load if len(ckpt['model'].names) == classes: model.names = ckpt['model'].names # set class names attribute diff --git a/train.py b/train.py index 75f3b7cb36a7..90abdc59db88 100644 --- a/train.py +++ b/train.py @@ -43,15 +43,14 @@ from utils.downloads import attempt_download from utils.general import (LOGGER, check_dataset, check_file, check_git_status, check_img_size, check_requirements, check_suffix, check_yaml, colorstr, get_latest_run, increment_path, init_seeds, - labels_to_class_weights, labels_to_image_weights, methods, one_cycle, print_args, - print_mutation, strip_optimizer) + intersect_dicts, labels_to_class_weights, labels_to_image_weights, methods, one_cycle, + print_args, print_mutation, strip_optimizer) from utils.loggers import Loggers from utils.loggers.wandb.wandb_utils import check_wandb_resume from utils.loss import ComputeLoss from utils.metrics import fitness from utils.plots import plot_evolve, plot_labels -from utils.torch_utils import (EarlyStopping, ModelEMA, de_parallel, intersect_dicts, select_device, - torch_distributed_zero_first) +from utils.torch_utils import EarlyStopping, ModelEMA, de_parallel, select_device, torch_distributed_zero_first LOCAL_RANK = int(os.getenv('LOCAL_RANK', -1)) # https://pytorch.org/docs/stable/elastic/run.html RANK = int(os.getenv('RANK', -1)) diff --git a/utils/general.py b/utils/general.py index 15b58257eabb..46cb1ddef983 100755 --- a/utils/general.py +++ b/utils/general.py @@ -125,6 +125,11 @@ def init_seeds(seed=0): cudnn.benchmark, cudnn.deterministic = (False, True) if seed == 0 else (True, False) +def intersect_dicts(da, db, exclude=()): + # Dictionary intersection of matching keys and shapes, omitting 'exclude' keys, using da values + return {k: v for k, v in da.items() if k in db and not any(x in k for x in exclude) and v.shape == db[k].shape} + + def get_latest_run(search_dir='.'): # Return path to most recent 'last.pt' in /runs (i.e. to --resume from) last_list = glob.glob(f'{search_dir}/**/last*.pt', recursive=True) diff --git a/utils/torch_utils.py b/utils/torch_utils.py index 793e8d8ffd3e..b36e98d0b656 100644 --- a/utils/torch_utils.py +++ b/utils/torch_utils.py @@ -153,11 +153,6 @@ def de_parallel(model): return model.module if is_parallel(model) else model -def intersect_dicts(da, db, exclude=()): - # Dictionary intersection of matching keys and shapes, omitting 'exclude' keys, using da values - return {k: v for k, v in da.items() if k in db and not any(x in k for x in exclude) and v.shape == db[k].shape} - - def initialize_weights(model): for m in model.modules(): t = type(m) From 60c8a4f6965cd16c22ee425f58f63eb903e40ee0 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 6 Nov 2021 16:03:14 +0100 Subject: [PATCH 0702/1976] Fix for *.yaml emojis on load (#5543) Fix for Colab hub error: ```python import yaml with open('yolov5s.yaml', errors='ignore') as f: d = yaml.safe_load(f) # model dict print(d) --------------------------------------------------------------------------- ReaderError Traceback (most recent call last) in () 2 3 with open('yolov5s.yaml', errors='ignore') as f: ----> 4 d = yaml.safe_load(f) # model dict 5 6 print(d) 6 frames /usr/local/lib/python3.7/dist-packages/yaml/reader.py in check_printable(self, data) 142 position = self.index+(len(self.buffer)-self.pointer)+match.start() 143 raise ReaderError(self.name, position, ord(character), --> 144 'unicode', "special characters are not allowed") 145 146 def update(self, length): ReaderError: unacceptable character #x1f680: special characters are not allowed in "yolov5s.yaml", position 9 ``` --- models/yolo.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/models/yolo.py b/models/yolo.py index c196d46f9efa..305f0ca0cc88 100644 --- a/models/yolo.py +++ b/models/yolo.py @@ -90,7 +90,7 @@ def __init__(self, cfg='yolov5s.yaml', ch=3, nc=None, anchors=None): # model, i else: # is *.yaml import yaml # for torch hub self.yaml_file = Path(cfg).name - with open(cfg, errors='ignore') as f: + with open(cfg, encoding='ascii', errors='ignore') as f: self.yaml = yaml.safe_load(f) # model dict # Define model From 3f64ad176068ba5f840eefe943cdafbcd9a7753b Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 6 Nov 2021 19:28:03 +0100 Subject: [PATCH 0703/1976] Fix `save_one_box()` (#5545) * Fix `save_one_box()` * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- detect.py | 6 +-- models/common.py | 5 +- utils/general.py | 15 ------ utils/plots.py | 126 ++++++++++++++++++++++++++--------------------- 4 files changed, 76 insertions(+), 76 deletions(-) diff --git a/detect.py b/detect.py index 61044914e16b..9527ae2b57f4 100644 --- a/detect.py +++ b/detect.py @@ -26,9 +26,9 @@ from models.experimental import attempt_load from utils.datasets import IMG_FORMATS, VID_FORMATS, LoadImages, LoadStreams from utils.general import (LOGGER, apply_classifier, check_file, check_img_size, check_imshow, check_requirements, - check_suffix, colorstr, increment_path, non_max_suppression, print_args, save_one_box, - scale_coords, strip_optimizer, xyxy2xywh) -from utils.plots import Annotator, colors + check_suffix, colorstr, increment_path, non_max_suppression, print_args, scale_coords, + strip_optimizer, xyxy2xywh) +from utils.plots import Annotator, colors, save_one_box from utils.torch_utils import load_classifier, select_device, time_sync diff --git a/models/common.py b/models/common.py index 04aa2e4749f4..8035ef11a791 100644 --- a/models/common.py +++ b/models/common.py @@ -18,9 +18,8 @@ from torch.cuda import amp from utils.datasets import exif_transpose, letterbox -from utils.general import (colorstr, increment_path, make_divisible, non_max_suppression, save_one_box, scale_coords, - xyxy2xywh) -from utils.plots import Annotator, colors +from utils.general import colorstr, increment_path, make_divisible, non_max_suppression, scale_coords, xyxy2xywh +from utils.plots import Annotator, colors, save_one_box from utils.torch_utils import time_sync LOGGER = logging.getLogger(__name__) diff --git a/utils/general.py b/utils/general.py index 46cb1ddef983..0f45d72498fe 100755 --- a/utils/general.py +++ b/utils/general.py @@ -819,21 +819,6 @@ def apply_classifier(x, model, img, im0): return x -def save_one_box(xyxy, im, file='image.jpg', gain=1.02, pad=10, square=False, BGR=False, save=True): - # Save image crop as {file} with crop size multiple {gain} and {pad} pixels. Save and/or return crop - xyxy = torch.tensor(xyxy).view(-1, 4) - b = xyxy2xywh(xyxy) # boxes - if square: - b[:, 2:] = b[:, 2:].max(1)[0].unsqueeze(1) # attempt rectangle to square - b[:, 2:] = b[:, 2:] * gain + pad # box wh * gain + pad - xyxy = xywh2xyxy(b).long() - clip_coords(xyxy, im.shape) - crop = im[int(xyxy[0, 1]):int(xyxy[0, 3]), int(xyxy[0, 0]):int(xyxy[0, 2]), ::(1 if BGR else -1)] - if save: - cv2.imwrite(str(increment_path(file, mkdir=True).with_suffix('.jpg')), crop) - return crop - - def increment_path(path, exist_ok=False, sep='', mkdir=False): # Increment file or directory path, i.e. runs/exp --> runs/exp{sep}2, runs/exp{sep}3, ... etc. path = Path(path) # os-agnostic diff --git a/utils/plots.py b/utils/plots.py index 94487b4f5b85..b5e25d668d22 100644 --- a/utils/plots.py +++ b/utils/plots.py @@ -17,7 +17,7 @@ import torch from PIL import Image, ImageDraw, ImageFont -from utils.general import is_ascii, is_chinese, user_config_dir, xywh2xyxy, xyxy2xywh +from utils.general import clip_coords, increment_path, is_ascii, is_chinese, user_config_dir, xywh2xyxy, xyxy2xywh from utils.metrics import fitness # Settings @@ -117,6 +117,33 @@ def result(self): return np.asarray(self.im) +def feature_visualization(x, module_type, stage, n=32, save_dir=Path('runs/detect/exp')): + """ + x: Features to be visualized + module_type: Module type + stage: Module stage within model + n: Maximum number of feature maps to plot + save_dir: Directory to save results + """ + if 'Detect' not in module_type: + batch, channels, height, width = x.shape # batch, channels, height, width + if height > 1 and width > 1: + f = f"stage{stage}_{module_type.split('.')[-1]}_features.png" # filename + + blocks = torch.chunk(x[0].cpu(), channels, dim=0) # select batch index 0, block by channels + n = min(n, channels) # number of plots + fig, ax = plt.subplots(math.ceil(n / 8), 8, tight_layout=True) # 8 rows x n/8 cols + ax = ax.ravel() + plt.subplots_adjust(wspace=0.05, hspace=0.05) + for i in range(n): + ax[i].imshow(blocks[i].squeeze()) # cmap='gray' + ax[i].axis('off') + + print(f'Saving {save_dir / f}... ({n}/{channels})') + plt.savefig(save_dir / f, dpi=300, bbox_inches='tight') + plt.close() + + def hist2d(x, y, n=100): # 2d histogram used in labels.png and evolve.png xedges, yedges = np.linspace(x.min(), x.max(), n), np.linspace(y.min(), y.max(), n) @@ -337,37 +364,6 @@ def plot_labels(labels, names=(), save_dir=Path('')): plt.close() -def profile_idetection(start=0, stop=0, labels=(), save_dir=''): - # Plot iDetection '*.txt' per-image logs. from utils.plots import *; profile_idetection() - ax = plt.subplots(2, 4, figsize=(12, 6), tight_layout=True)[1].ravel() - s = ['Images', 'Free Storage (GB)', 'RAM Usage (GB)', 'Battery', 'dt_raw (ms)', 'dt_smooth (ms)', 'real-world FPS'] - files = list(Path(save_dir).glob('frames*.txt')) - for fi, f in enumerate(files): - try: - results = np.loadtxt(f, ndmin=2).T[:, 90:-30] # clip first and last rows - n = results.shape[1] # number of rows - x = np.arange(start, min(stop, n) if stop else n) - results = results[:, x] - t = (results[0] - results[0].min()) # set t0=0s - results[0] = x - for i, a in enumerate(ax): - if i < len(results): - label = labels[fi] if len(labels) else f.stem.replace('frames_', '') - a.plot(t, results[i], marker='.', label=label, linewidth=1, markersize=5) - a.set_title(s[i]) - a.set_xlabel('time (s)') - # if fi == len(files) - 1: - # a.set_ylim(bottom=0) - for side in ['top', 'right']: - a.spines[side].set_visible(False) - else: - a.remove() - except Exception as e: - print(f'Warning: Plotting error for {f}; {e}') - ax[1].legend() - plt.savefig(Path(save_dir) / 'idetection_profile.png', dpi=200) - - def plot_evolve(evolve_csv='path/to/evolve.csv'): # from utils.plots import *; plot_evolve() # Plot evolve.csv hyp evolution results evolve_csv = Path(evolve_csv) @@ -420,28 +416,48 @@ def plot_results(file='path/to/results.csv', dir=''): plt.close() -def feature_visualization(x, module_type, stage, n=32, save_dir=Path('runs/detect/exp')): - """ - x: Features to be visualized - module_type: Module type - stage: Module stage within model - n: Maximum number of feature maps to plot - save_dir: Directory to save results - """ - if 'Detect' not in module_type: - batch, channels, height, width = x.shape # batch, channels, height, width - if height > 1 and width > 1: - f = f"stage{stage}_{module_type.split('.')[-1]}_features.png" # filename +def profile_idetection(start=0, stop=0, labels=(), save_dir=''): + # Plot iDetection '*.txt' per-image logs. from utils.plots import *; profile_idetection() + ax = plt.subplots(2, 4, figsize=(12, 6), tight_layout=True)[1].ravel() + s = ['Images', 'Free Storage (GB)', 'RAM Usage (GB)', 'Battery', 'dt_raw (ms)', 'dt_smooth (ms)', 'real-world FPS'] + files = list(Path(save_dir).glob('frames*.txt')) + for fi, f in enumerate(files): + try: + results = np.loadtxt(f, ndmin=2).T[:, 90:-30] # clip first and last rows + n = results.shape[1] # number of rows + x = np.arange(start, min(stop, n) if stop else n) + results = results[:, x] + t = (results[0] - results[0].min()) # set t0=0s + results[0] = x + for i, a in enumerate(ax): + if i < len(results): + label = labels[fi] if len(labels) else f.stem.replace('frames_', '') + a.plot(t, results[i], marker='.', label=label, linewidth=1, markersize=5) + a.set_title(s[i]) + a.set_xlabel('time (s)') + # if fi == len(files) - 1: + # a.set_ylim(bottom=0) + for side in ['top', 'right']: + a.spines[side].set_visible(False) + else: + a.remove() + except Exception as e: + print(f'Warning: Plotting error for {f}; {e}') + ax[1].legend() + plt.savefig(Path(save_dir) / 'idetection_profile.png', dpi=200) - blocks = torch.chunk(x[0].cpu(), channels, dim=0) # select batch index 0, block by channels - n = min(n, channels) # number of plots - fig, ax = plt.subplots(math.ceil(n / 8), 8, tight_layout=True) # 8 rows x n/8 cols - ax = ax.ravel() - plt.subplots_adjust(wspace=0.05, hspace=0.05) - for i in range(n): - ax[i].imshow(blocks[i].squeeze()) # cmap='gray' - ax[i].axis('off') - print(f'Saving {save_dir / f}... ({n}/{channels})') - plt.savefig(save_dir / f, dpi=300, bbox_inches='tight') - plt.close() +def save_one_box(xyxy, im, file='image.jpg', gain=1.02, pad=10, square=False, BGR=False, save=True): + # Save image crop as {file} with crop size multiple {gain} and {pad} pixels. Save and/or return crop + xyxy = torch.tensor(xyxy).view(-1, 4) + b = xyxy2xywh(xyxy) # boxes + if square: + b[:, 2:] = b[:, 2:].max(1)[0].unsqueeze(1) # attempt rectangle to square + b[:, 2:] = b[:, 2:] * gain + pad # box wh * gain + pad + xyxy = xywh2xyxy(b).long() + clip_coords(xyxy, im.shape) + crop = im[int(xyxy[0, 1]):int(xyxy[0, 3]), int(xyxy[0, 0]):int(xyxy[0, 2]), ::(1 if BGR else -1)] + if save: + file.parent.mkdir(parents=True, exist_ok=True) # make directory + cv2.imwrite(str(increment_path(file).with_suffix('.jpg')), crop) + return crop From b8f979bafab6db020d86779b4b40619cd4d77d57 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 6 Nov 2021 20:34:54 +0100 Subject: [PATCH 0704/1976] Inside Ultralytics video https://youtu.be/Zgi9g1ksQHc (#5546) * Update detect.py Usage examples * Inside Ultralytics at https://youtu.be/Zgi9g1ksQHc --- README.md | 6 +++--- detect.py | 8 +++++++- tutorial.ipynb | 6 +++--- 3 files changed, 13 insertions(+), 7 deletions(-) diff --git a/README.md b/README.md index 3e2f5b656cde..6e72d85da7ee 100644 --- a/README.md +++ b/README.md @@ -109,11 +109,11 @@ the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases) and ```bash $ python detect.py --source 0 # webcam - file.jpg # image - file.mp4 # video + img.jpg # image + vid.mp4 # video path/ # directory path/*.jpg # glob - 'https://youtu.be/NUsoVlDFqZg' # YouTube + 'https://youtu.be/Zgi9g1ksQHc' # YouTube 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream ``` diff --git a/detect.py b/detect.py index 9527ae2b57f4..661a0b86bc99 100644 --- a/detect.py +++ b/detect.py @@ -3,7 +3,13 @@ Run inference on images, videos, directories, streams, etc. Usage: - $ python path/to/detect.py --source path/to/img.jpg --weights yolov5s.pt --img 640 + $ python path/to/detect.py --weights yolov5s.pt --source 0 # webcam + img.jpg # image + vid.mp4 # video + path/ # directory + path/*.jpg # glob + 'https://youtu.be/Zgi9g1ksQHc' # YouTube + 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream """ import argparse diff --git a/tutorial.ipynb b/tutorial.ipynb index 9184a66d3f42..b013fe694ba4 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -438,11 +438,11 @@ "\n", "```shell\n", "python detect.py --source 0 # webcam\n", - " file.jpg # image \n", - " file.mp4 # video\n", + " img.jpg # image \n", + " vid.mp4 # video\n", " path/ # directory\n", " path/*.jpg # glob\n", - " 'https://youtu.be/NUsoVlDFqZg' # YouTube\n", + " 'https://youtu.be/Zgi9g1ksQHc' # YouTube\n", " 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream\n", "```" ] From 0de4a9c35d0ab2a204eeb1eab879106c799b28bd Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 8 Nov 2021 16:04:31 +0100 Subject: [PATCH 0705/1976] Add `--conf-thres` >> 0.001 warning (#5567) Partially addresses invalid mAPs at higher confidence threshold issue https://github.com/ultralytics/yolov5/issues/1466. --- val.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/val.py b/val.py index 2118ad400ac7..d2797f1189ec 100644 --- a/val.py +++ b/val.py @@ -330,6 +330,8 @@ def main(opt): check_requirements(requirements=ROOT / 'requirements.txt', exclude=('tensorboard', 'thop')) if opt.task in ('train', 'val', 'test'): # run normally + if opt.conf_thres > 0.001: # https://github.com/ultralytics/yolov5/issues/1466 + LOGGER.info(f'WARNING: confidence threshold {opt.conf_thres} >> 0.001 will produce invalid mAP values.') run(**vars(opt)) elif opt.task == 'speed': # speed benchmarks From 79bca2bf64da04e7e1e74a132eb54171f41638cc Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 8 Nov 2021 16:32:15 +0100 Subject: [PATCH 0706/1976] `LOGGER` consolidation (#5569) * Logger consolidation * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- models/common.py | 5 +---- train.py | 5 ++--- utils/augmentations.py | 7 +++---- utils/datasets.py | 13 ++++++------- utils/general.py | 2 +- utils/torch_utils.py | 2 -- 6 files changed, 13 insertions(+), 21 deletions(-) diff --git a/models/common.py b/models/common.py index 8035ef11a791..f9e4fc69f006 100644 --- a/models/common.py +++ b/models/common.py @@ -3,7 +3,6 @@ Common modules """ -import logging import math import warnings from copy import copy @@ -18,12 +17,10 @@ from torch.cuda import amp from utils.datasets import exif_transpose, letterbox -from utils.general import colorstr, increment_path, make_divisible, non_max_suppression, scale_coords, xyxy2xywh +from utils.general import LOGGER, colorstr, increment_path, make_divisible, non_max_suppression, scale_coords, xyxy2xywh from utils.plots import Annotator, colors, save_one_box from utils.torch_utils import time_sync -LOGGER = logging.getLogger(__name__) - def autopad(k, p=None): # kernel, padding # Pad to 'same' diff --git a/train.py b/train.py index 90abdc59db88..fedc55d8be5c 100644 --- a/train.py +++ b/train.py @@ -7,7 +7,6 @@ """ import argparse -import logging import math import os import random @@ -201,8 +200,8 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary # DP mode if cuda and RANK == -1 and torch.cuda.device_count() > 1: - logging.warning('DP not recommended, instead use torch.distributed.run for best DDP Multi-GPU results.\n' - 'See Multi-GPU Tutorial at https://github.com/ultralytics/yolov5/issues/475 to get started.') + LOGGER.warning('WARNING: DP not recommended, use torch.distributed.run for best DDP Multi-GPU results.\n' + 'See Multi-GPU Tutorial at https://github.com/ultralytics/yolov5/issues/475 to get started.') model = torch.nn.DataParallel(model) # SyncBatchNorm diff --git a/utils/augmentations.py b/utils/augmentations.py index 1c3e66fb87ab..5dcfd49fdd05 100644 --- a/utils/augmentations.py +++ b/utils/augmentations.py @@ -3,14 +3,13 @@ Image augmentation functions """ -import logging import math import random import cv2 import numpy as np -from utils.general import check_version, colorstr, resample_segments, segment2box +from utils.general import LOGGER, check_version, colorstr, resample_segments, segment2box from utils.metrics import bbox_ioa @@ -32,11 +31,11 @@ def __init__(self): A.ImageCompression(quality_lower=75, p=0.0)], bbox_params=A.BboxParams(format='yolo', label_fields=['class_labels'])) - logging.info(colorstr('albumentations: ') + ', '.join(f'{x}' for x in self.transform.transforms if x.p)) + LOGGER.info(colorstr('albumentations: ') + ', '.join(f'{x}' for x in self.transform.transforms if x.p)) except ImportError: # package not installed, skip pass except Exception as e: - logging.info(colorstr('albumentations: ') + f'{e}') + LOGGER.info(colorstr('albumentations: ') + f'{e}') def __call__(self, im, labels, p=1.0): if self.transform and random.random() < p: diff --git a/utils/datasets.py b/utils/datasets.py index 15fca1775849..94acaaa92cd7 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -6,7 +6,6 @@ import glob import hashlib import json -import logging import os import random import shutil @@ -335,7 +334,7 @@ def update(self, i, cap, stream): if success: self.imgs[i] = im else: - LOGGER.warn('WARNING: Video stream unresponsive, please check your IP camera connection.') + LOGGER.warning('WARNING: Video stream unresponsive, please check your IP camera connection.') self.imgs[i] *= 0 cap.open(stream) # re-open stream if signal was lost time.sleep(1 / self.fps[i]) # wait time @@ -427,7 +426,7 @@ def __init__(self, path, img_size=640, batch_size=16, augment=False, hyp=None, r d = f"Scanning '{cache_path}' images and labels... {nf} found, {nm} missing, {ne} empty, {nc} corrupted" tqdm(None, desc=prefix + d, total=n, initial=n) # display cache results if cache['msgs']: - logging.info('\n'.join(cache['msgs'])) # display warnings + LOGGER.info('\n'.join(cache['msgs'])) # display warnings assert nf > 0 or not augment, f'{prefix}No labels in {cache_path}. Can not train without labels. See {HELP_URL}' # Read cache @@ -525,9 +524,9 @@ def cache_labels(self, path=Path('./labels.cache'), prefix=''): pbar.close() if msgs: - logging.info('\n'.join(msgs)) + LOGGER.info('\n'.join(msgs)) if nf == 0: - logging.info(f'{prefix}WARNING: No labels found in {path}. See {HELP_URL}') + LOGGER.warning(f'{prefix}WARNING: No labels found in {path}. See {HELP_URL}') x['hash'] = get_hash(self.label_files + self.img_files) x['results'] = nf, nm, ne, nc, len(self.img_files) x['msgs'] = msgs # warnings @@ -535,9 +534,9 @@ def cache_labels(self, path=Path('./labels.cache'), prefix=''): try: np.save(path, x) # save cache for next time path.with_suffix('.cache.npy').rename(path) # remove .npy suffix - logging.info(f'{prefix}New cache created: {path}') + LOGGER.info(f'{prefix}New cache created: {path}') except Exception as e: - logging.info(f'{prefix}WARNING: Cache directory {path.parent} is not writeable: {e}') # path not writeable + LOGGER.warning(f'{prefix}WARNING: Cache directory {path.parent} is not writeable: {e}') # not writeable return x def __len__(self): diff --git a/utils/general.py b/utils/general.py index 0f45d72498fe..b0ea1527129a 100755 --- a/utils/general.py +++ b/utils/general.py @@ -45,7 +45,7 @@ def set_logging(name=None, verbose=True): # Sets level and returns logger rank = int(os.getenv('RANK', -1)) # rank in world for Multi-GPU trainings - logging.basicConfig(format="%(message)s", level=logging.INFO if (verbose and rank in (-1, 0)) else logging.WARN) + logging.basicConfig(format="%(message)s", level=logging.INFO if (verbose and rank in (-1, 0)) else logging.WARNING) return logging.getLogger(name) diff --git a/utils/torch_utils.py b/utils/torch_utils.py index b36e98d0b656..73acec8e819c 100644 --- a/utils/torch_utils.py +++ b/utils/torch_utils.py @@ -4,7 +4,6 @@ """ import datetime -import logging import math import os import platform @@ -100,7 +99,6 @@ def profile(input, ops, n=10, device=None): # profile(input, [m1, m2], n=100) # profile over 100 iterations results = [] - logging.basicConfig(format="%(message)s", level=logging.INFO) device = device or select_device() print(f"{'Params':>12s}{'GFLOPs':>12s}{'GPU_mem (GB)':>14s}{'forward (ms)':>14s}{'backward (ms)':>14s}" f"{'input':>24s}{'output':>24s}") From 3883261143c56a7eca035f94f2bcb3e4023e72bc Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 9 Nov 2021 16:45:02 +0100 Subject: [PATCH 0707/1976] New `DetectMultiBackend()` class (#5549) * New `DetectMultiBackend()` class * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * pb to pt fix * Cleanup * explicit apply_classifier path * Cleanup2 * Cleanup3 * Cleanup4 * Cleanup5 * Cleanup6 * val.py MultiBackend inference * warmup fix * to device fix * pt fix * device fix * Val cleanup * COCO128 URL to assets * half fix * detect fix * detect fix 2 * remove half from DetectMultiBackend * training half handling * training half handling 2 * training half handling 3 * Cleanup * Fix CI error * Add torchscript _extra_files * Add TorchScript * Add CoreML * CoreML cleanup * New `DetectMultiBackend()` class * pb to pt fix * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Cleanup * explicit apply_classifier path * Cleanup2 * Cleanup3 * Cleanup4 * Cleanup5 * Cleanup6 * val.py MultiBackend inference * warmup fix * to device fix * pt fix * device fix * Val cleanup * COCO128 URL to assets * half fix * detect fix * detect fix 2 * remove half from DetectMultiBackend * training half handling * training half handling 2 * training half handling 3 * Cleanup * Fix CI error * Add torchscript _extra_files * Add TorchScript * Add CoreML * CoreML cleanup * revert default to pt * Add Usage examples * Cleanup val Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- data/coco128.yaml | 2 +- detect.py | 133 +++++++++---------------------------------- export.py | 5 +- models/common.py | 128 ++++++++++++++++++++++++++++++++++++++++- utils/general.py | 3 +- utils/torch_utils.py | 20 ------- val.py | 74 ++++++++++++------------ 7 files changed, 200 insertions(+), 165 deletions(-) diff --git a/data/coco128.yaml b/data/coco128.yaml index b1dfb004afa1..84a91b18359d 100644 --- a/data/coco128.yaml +++ b/data/coco128.yaml @@ -27,4 +27,4 @@ names: ['person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 't # Download script/URL (optional) -download: https://github.com/ultralytics/yolov5/releases/download/v1.0/coco128.zip +download: https://ultralytics.com/assets/coco128.zip diff --git a/detect.py b/detect.py index 661a0b86bc99..108f8f138052 100644 --- a/detect.py +++ b/detect.py @@ -14,12 +14,10 @@ import argparse import os -import platform import sys from pathlib import Path import cv2 -import numpy as np import torch import torch.backends.cudnn as cudnn @@ -29,13 +27,12 @@ sys.path.append(str(ROOT)) # add ROOT to PATH ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative -from models.experimental import attempt_load +from models.common import DetectMultiBackend from utils.datasets import IMG_FORMATS, VID_FORMATS, LoadImages, LoadStreams -from utils.general import (LOGGER, apply_classifier, check_file, check_img_size, check_imshow, check_requirements, - check_suffix, colorstr, increment_path, non_max_suppression, print_args, scale_coords, - strip_optimizer, xyxy2xywh) +from utils.general import (LOGGER, check_file, check_img_size, check_imshow, check_requirements, colorstr, + increment_path, non_max_suppression, print_args, scale_coords, strip_optimizer, xyxy2xywh) from utils.plots import Annotator, colors, save_one_box -from utils.torch_utils import load_classifier, select_device, time_sync +from utils.torch_utils import select_device, time_sync @torch.no_grad() @@ -77,120 +74,45 @@ def run(weights=ROOT / 'yolov5s.pt', # model.pt path(s) save_dir = increment_path(Path(project) / name, exist_ok=exist_ok) # increment run (save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir - # Initialize + # Load model device = select_device(device) - half &= device.type != 'cpu' # half precision only supported on CUDA + model = DetectMultiBackend(weights, device=device, dnn=dnn) + stride, names, pt, jit, onnx = model.stride, model.names, model.pt, model.jit, model.onnx + imgsz = check_img_size(imgsz, s=stride) # check image size - # Load model - w = str(weights[0] if isinstance(weights, list) else weights) - classify, suffix, suffixes = False, Path(w).suffix.lower(), ['.pt', '.onnx', '.tflite', '.pb', ''] - check_suffix(w, suffixes) # check weights have acceptable suffix - pt, onnx, tflite, pb, saved_model = (suffix == x for x in suffixes) # backend booleans - stride, names = 64, [f'class{i}' for i in range(1000)] # assign defaults + # Half + half &= pt and device.type != 'cpu' # half precision only supported by PyTorch on CUDA if pt: - model = torch.jit.load(w) if 'torchscript' in w else attempt_load(weights, map_location=device) - stride = int(model.stride.max()) # model stride - names = model.module.names if hasattr(model, 'module') else model.names # get class names - if half: - model.half() # to FP16 - if classify: # second-stage classifier - modelc = load_classifier(name='resnet50', n=2) # initialize - modelc.load_state_dict(torch.load('resnet50.pt', map_location=device)['model']).to(device).eval() - elif onnx: - if dnn: - check_requirements(('opencv-python>=4.5.4',)) - net = cv2.dnn.readNetFromONNX(w) - else: - check_requirements(('onnx', 'onnxruntime-gpu' if torch.has_cuda else 'onnxruntime')) - import onnxruntime - session = onnxruntime.InferenceSession(w, None) - else: # TensorFlow models - import tensorflow as tf - if pb: # https://www.tensorflow.org/guide/migrate#a_graphpb_or_graphpbtxt - def wrap_frozen_graph(gd, inputs, outputs): - x = tf.compat.v1.wrap_function(lambda: tf.compat.v1.import_graph_def(gd, name=""), []) # wrapped import - return x.prune(tf.nest.map_structure(x.graph.as_graph_element, inputs), - tf.nest.map_structure(x.graph.as_graph_element, outputs)) - - graph_def = tf.Graph().as_graph_def() - graph_def.ParseFromString(open(w, 'rb').read()) - frozen_func = wrap_frozen_graph(gd=graph_def, inputs="x:0", outputs="Identity:0") - elif saved_model: - model = tf.keras.models.load_model(w) - elif tflite: - if "edgetpu" in w: # https://www.tensorflow.org/lite/guide/python#install_tensorflow_lite_for_python - import tflite_runtime.interpreter as tflri - delegate = {'Linux': 'libedgetpu.so.1', # install libedgetpu https://coral.ai/software/#edgetpu-runtime - 'Darwin': 'libedgetpu.1.dylib', - 'Windows': 'edgetpu.dll'}[platform.system()] - interpreter = tflri.Interpreter(model_path=w, experimental_delegates=[tflri.load_delegate(delegate)]) - else: - interpreter = tf.lite.Interpreter(model_path=w) # load TFLite model - interpreter.allocate_tensors() # allocate - input_details = interpreter.get_input_details() # inputs - output_details = interpreter.get_output_details() # outputs - int8 = input_details[0]['dtype'] == np.uint8 # is TFLite quantized uint8 model - imgsz = check_img_size(imgsz, s=stride) # check image size + model.model.half() if half else model.model.float() # Dataloader if webcam: view_img = check_imshow() cudnn.benchmark = True # set True to speed up constant image size inference - dataset = LoadStreams(source, img_size=imgsz, stride=stride, auto=pt) + dataset = LoadStreams(source, img_size=imgsz, stride=stride, auto=pt and not jit) bs = len(dataset) # batch_size else: - dataset = LoadImages(source, img_size=imgsz, stride=stride, auto=pt) + dataset = LoadImages(source, img_size=imgsz, stride=stride, auto=pt and not jit) bs = 1 # batch_size vid_path, vid_writer = [None] * bs, [None] * bs # Run inference if pt and device.type != 'cpu': - model(torch.zeros(1, 3, *imgsz).to(device).type_as(next(model.parameters()))) # run once + model(torch.zeros(1, 3, *imgsz).to(device).type_as(next(model.model.parameters()))) # warmup dt, seen = [0.0, 0.0, 0.0], 0 - for path, img, im0s, vid_cap, s in dataset: + for path, im, im0s, vid_cap, s in dataset: t1 = time_sync() - if onnx: - img = img.astype('float32') - else: - img = torch.from_numpy(img).to(device) - img = img.half() if half else img.float() # uint8 to fp16/32 - img /= 255 # 0 - 255 to 0.0 - 1.0 - if len(img.shape) == 3: - img = img[None] # expand for batch dim + im = torch.from_numpy(im).to(device) + im = im.half() if half else im.float() # uint8 to fp16/32 + im /= 255 # 0 - 255 to 0.0 - 1.0 + if len(im.shape) == 3: + im = im[None] # expand for batch dim t2 = time_sync() dt[0] += t2 - t1 # Inference - if pt: - visualize = increment_path(save_dir / Path(path).stem, mkdir=True) if visualize else False - pred = model(img, augment=augment, visualize=visualize)[0] - elif onnx: - if dnn: - net.setInput(img) - pred = torch.tensor(net.forward()) - else: - pred = torch.tensor(session.run([session.get_outputs()[0].name], {session.get_inputs()[0].name: img})) - else: # tensorflow model (tflite, pb, saved_model) - imn = img.permute(0, 2, 3, 1).cpu().numpy() # image in numpy - if pb: - pred = frozen_func(x=tf.constant(imn)).numpy() - elif saved_model: - pred = model(imn, training=False).numpy() - elif tflite: - if int8: - scale, zero_point = input_details[0]['quantization'] - imn = (imn / scale + zero_point).astype(np.uint8) # de-scale - interpreter.set_tensor(input_details[0]['index'], imn) - interpreter.invoke() - pred = interpreter.get_tensor(output_details[0]['index']) - if int8: - scale, zero_point = output_details[0]['quantization'] - pred = (pred.astype(np.float32) - zero_point) * scale # re-scale - pred[..., 0] *= imgsz[1] # x - pred[..., 1] *= imgsz[0] # y - pred[..., 2] *= imgsz[1] # w - pred[..., 3] *= imgsz[0] # h - pred = torch.tensor(pred) + visualize = increment_path(save_dir / Path(path).stem, mkdir=True) if visualize else False + pred = model(im, augment=augment, visualize=visualize) t3 = time_sync() dt[1] += t3 - t2 @@ -199,8 +121,7 @@ def wrap_frozen_graph(gd, inputs, outputs): dt[2] += time_sync() - t3 # Second-stage classifier (optional) - if classify: - pred = apply_classifier(pred, modelc, img, im0s) + # pred = utils.general.apply_classifier(pred, classifier_model, im, im0s) # Process predictions for i, det in enumerate(pred): # per image @@ -212,15 +133,15 @@ def wrap_frozen_graph(gd, inputs, outputs): p, im0, frame = path, im0s.copy(), getattr(dataset, 'frame', 0) p = Path(p) # to Path - save_path = str(save_dir / p.name) # img.jpg - txt_path = str(save_dir / 'labels' / p.stem) + ('' if dataset.mode == 'image' else f'_{frame}') # img.txt - s += '%gx%g ' % img.shape[2:] # print string + save_path = str(save_dir / p.name) # im.jpg + txt_path = str(save_dir / 'labels' / p.stem) + ('' if dataset.mode == 'image' else f'_{frame}') # im.txt + s += '%gx%g ' % im.shape[2:] # print string gn = torch.tensor(im0.shape)[[1, 0, 1, 0]] # normalization gain whwh imc = im0.copy() if save_crop else im0 # for save_crop annotator = Annotator(im0, line_width=line_thickness, example=str(names)) if len(det): # Rescale boxes from img_size to im0 size - det[:, :4] = scale_coords(img.shape[2:], det[:, :4], im0.shape).round() + det[:, :4] = scale_coords(im.shape[2:], det[:, :4], im0.shape).round() # Print results for c in det[:, -1].unique(): diff --git a/export.py b/export.py index f5eb487045b0..4cf30e34fc7b 100644 --- a/export.py +++ b/export.py @@ -21,6 +21,7 @@ """ import argparse +import json import os import subprocess import sys @@ -54,7 +55,9 @@ def export_torchscript(model, im, file, optimize, prefix=colorstr('TorchScript:' f = file.with_suffix('.torchscript.pt') ts = torch.jit.trace(model, im, strict=False) - (optimize_for_mobile(ts) if optimize else ts).save(f) + d = {"shape": im.shape, "stride": int(max(model.stride)), "names": model.names} + extra_files = {'config.txt': json.dumps(d)} # torch._C.ExtraFilesMap() + (optimize_for_mobile(ts) if optimize else ts).save(f, _extra_files=extra_files) LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') except Exception as e: diff --git a/models/common.py b/models/common.py index f9e4fc69f006..3ea7ba5477a6 100644 --- a/models/common.py +++ b/models/common.py @@ -3,11 +3,14 @@ Common modules """ +import json import math +import platform import warnings from copy import copy from pathlib import Path +import cv2 import numpy as np import pandas as pd import requests @@ -17,7 +20,8 @@ from torch.cuda import amp from utils.datasets import exif_transpose, letterbox -from utils.general import LOGGER, colorstr, increment_path, make_divisible, non_max_suppression, scale_coords, xyxy2xywh +from utils.general import (LOGGER, check_requirements, check_suffix, colorstr, increment_path, make_divisible, + non_max_suppression, scale_coords, xywh2xyxy, xyxy2xywh) from utils.plots import Annotator, colors, save_one_box from utils.torch_utils import time_sync @@ -269,6 +273,128 @@ def forward(self, x): return torch.cat(x, self.d) +class DetectMultiBackend(nn.Module): + # YOLOv5 MultiBackend class for python inference on various backends + def __init__(self, weights='yolov5s.pt', device=None, dnn=True): + # Usage: + # PyTorch: weights = *.pt + # TorchScript: *.torchscript.pt + # CoreML: *.mlmodel + # TensorFlow: *_saved_model + # TensorFlow: *.pb + # TensorFlow Lite: *.tflite + # ONNX Runtime: *.onnx + # OpenCV DNN: *.onnx with dnn=True + super().__init__() + w = str(weights[0] if isinstance(weights, list) else weights) + suffix, suffixes = Path(w).suffix.lower(), ['.pt', '.onnx', '.tflite', '.pb', '', '.mlmodel'] + check_suffix(w, suffixes) # check weights have acceptable suffix + pt, onnx, tflite, pb, saved_model, coreml = (suffix == x for x in suffixes) # backend booleans + jit = pt and 'torchscript' in w.lower() + stride, names = 64, [f'class{i}' for i in range(1000)] # assign defaults + + if jit: # TorchScript + LOGGER.info(f'Loading {w} for TorchScript inference...') + extra_files = {'config.txt': ''} # model metadata + model = torch.jit.load(w, _extra_files=extra_files) + if extra_files['config.txt']: + d = json.loads(extra_files['config.txt']) # extra_files dict + stride, names = int(d['stride']), d['names'] + elif pt: # PyTorch + from models.experimental import attempt_load # scoped to avoid circular import + model = torch.jit.load(w) if 'torchscript' in w else attempt_load(weights, map_location=device) + stride = int(model.stride.max()) # model stride + names = model.module.names if hasattr(model, 'module') else model.names # get class names + elif coreml: # CoreML *.mlmodel + import coremltools as ct + model = ct.models.MLModel(w) + elif dnn: # ONNX OpenCV DNN + LOGGER.info(f'Loading {w} for ONNX OpenCV DNN inference...') + check_requirements(('opencv-python>=4.5.4',)) + net = cv2.dnn.readNetFromONNX(w) + elif onnx: # ONNX Runtime + LOGGER.info(f'Loading {w} for ONNX Runtime inference...') + check_requirements(('onnx', 'onnxruntime-gpu' if torch.has_cuda else 'onnxruntime')) + import onnxruntime + session = onnxruntime.InferenceSession(w, None) + else: # TensorFlow model (TFLite, pb, saved_model) + import tensorflow as tf + if pb: # https://www.tensorflow.org/guide/migrate#a_graphpb_or_graphpbtxt + def wrap_frozen_graph(gd, inputs, outputs): + x = tf.compat.v1.wrap_function(lambda: tf.compat.v1.import_graph_def(gd, name=""), []) # wrapped + return x.prune(tf.nest.map_structure(x.graph.as_graph_element, inputs), + tf.nest.map_structure(x.graph.as_graph_element, outputs)) + + LOGGER.info(f'Loading {w} for TensorFlow *.pb inference...') + graph_def = tf.Graph().as_graph_def() + graph_def.ParseFromString(open(w, 'rb').read()) + frozen_func = wrap_frozen_graph(gd=graph_def, inputs="x:0", outputs="Identity:0") + elif saved_model: + LOGGER.info(f'Loading {w} for TensorFlow saved_model inference...') + model = tf.keras.models.load_model(w) + elif tflite: # https://www.tensorflow.org/lite/guide/python#install_tensorflow_lite_for_python + if 'edgetpu' in w.lower(): + LOGGER.info(f'Loading {w} for TensorFlow Edge TPU inference...') + import tflite_runtime.interpreter as tfli + delegate = {'Linux': 'libedgetpu.so.1', # install https://coral.ai/software/#edgetpu-runtime + 'Darwin': 'libedgetpu.1.dylib', + 'Windows': 'edgetpu.dll'}[platform.system()] + interpreter = tfli.Interpreter(model_path=w, experimental_delegates=[tfli.load_delegate(delegate)]) + else: + LOGGER.info(f'Loading {w} for TensorFlow Lite inference...') + interpreter = tf.lite.Interpreter(model_path=w) # load TFLite model + interpreter.allocate_tensors() # allocate + input_details = interpreter.get_input_details() # inputs + output_details = interpreter.get_output_details() # outputs + self.__dict__.update(locals()) # assign all variables to self + + def forward(self, im, augment=False, visualize=False, val=False): + # YOLOv5 MultiBackend inference + b, ch, h, w = im.shape # batch, channel, height, width + if self.pt: # PyTorch + y = self.model(im) if self.jit else self.model(im, augment=augment, visualize=visualize) + return y if val else y[0] + elif self.coreml: # CoreML *.mlmodel + im = im.permute(0, 2, 3, 1).cpu().numpy() # torch BCHW to numpy BHWC shape(1,320,192,3) + im = Image.fromarray((im[0] * 255).astype('uint8')) + # im = im.resize((192, 320), Image.ANTIALIAS) + y = self.model.predict({'image': im}) # coordinates are xywh normalized + box = xywh2xyxy(y['coordinates'] * [[w, h, w, h]]) # xyxy pixels + conf, cls = y['confidence'].max(1), y['confidence'].argmax(1).astype(np.float) + y = np.concatenate((box, conf.reshape(-1, 1), cls.reshape(-1, 1)), 1) + elif self.onnx: # ONNX + im = im.cpu().numpy() # torch to numpy + if self.dnn: # ONNX OpenCV DNN + self.net.setInput(im) + y = self.net.forward() + else: # ONNX Runtime + y = self.session.run([self.session.get_outputs()[0].name], {self.session.get_inputs()[0].name: im})[0] + else: # TensorFlow model (TFLite, pb, saved_model) + im = im.permute(0, 2, 3, 1).cpu().numpy() # torch BCHW to numpy BHWC shape(1,320,192,3) + if self.pb: + y = self.frozen_func(x=self.tf.constant(im)).numpy() + elif self.saved_model: + y = self.model(im, training=False).numpy() + elif self.tflite: + input, output = self.input_details[0], self.output_details[0] + int8 = input['dtype'] == np.uint8 # is TFLite quantized uint8 model + if int8: + scale, zero_point = input['quantization'] + im = (im / scale + zero_point).astype(np.uint8) # de-scale + self.interpreter.set_tensor(input['index'], im) + self.interpreter.invoke() + y = self.interpreter.get_tensor(output['index']) + if int8: + scale, zero_point = output['quantization'] + y = (y.astype(np.float32) - zero_point) * scale # re-scale + y[..., 0] *= w # x + y[..., 1] *= h # y + y[..., 2] *= w # w + y[..., 3] *= h # h + y = torch.tensor(y) + return (y, []) if val else y + + class AutoShape(nn.Module): # YOLOv5 input-robust model wrapper for passing cv2/np/PIL/torch inputs. Includes preprocessing, inference and NMS conf = 0.25 # NMS confidence threshold diff --git a/utils/general.py b/utils/general.py index b0ea1527129a..a6fe603850c8 100755 --- a/utils/general.py +++ b/utils/general.py @@ -785,7 +785,8 @@ def print_mutation(results, hyp, save_dir, bucket): def apply_classifier(x, model, img, im0): - # Apply a second stage classifier to yolo outputs + # Apply a second stage classifier to YOLO outputs + # Example model = torchvision.models.__dict__['efficientnet_b0'](pretrained=True).to(device).eval() im0 = [im0] if isinstance(im0, np.ndarray) else im0 for i, d in enumerate(x): # per image if d is not None and len(d): diff --git a/utils/torch_utils.py b/utils/torch_utils.py index 73acec8e819c..b65b69fe1559 100644 --- a/utils/torch_utils.py +++ b/utils/torch_utils.py @@ -17,7 +17,6 @@ import torch.distributed as dist import torch.nn as nn import torch.nn.functional as F -import torchvision from utils.general import LOGGER @@ -235,25 +234,6 @@ def model_info(model, verbose=False, img_size=640): LOGGER.info(f"Model Summary: {len(list(model.modules()))} layers, {n_p} parameters, {n_g} gradients{fs}") -def load_classifier(name='resnet101', n=2): - # Loads a pretrained model reshaped to n-class output - model = torchvision.models.__dict__[name](pretrained=True) - - # ResNet model properties - # input_size = [3, 224, 224] - # input_space = 'RGB' - # input_range = [0, 1] - # mean = [0.485, 0.456, 0.406] - # std = [0.229, 0.224, 0.225] - - # Reshape output to n classes - filters = model.fc.weight.shape[1] - model.fc.bias = nn.Parameter(torch.zeros(n), requires_grad=True) - model.fc.weight = nn.Parameter(torch.zeros(n, filters), requires_grad=True) - model.fc.out_features = n - return model - - def scale_img(img, ratio=1.0, same_shape=False, gs=32): # img(16,3,256,416) # scales img(bs,3,y,x) by ratio constrained to gs-multiple if ratio == 1.0: diff --git a/val.py b/val.py index d2797f1189ec..2bcbc582a500 100644 --- a/val.py +++ b/val.py @@ -23,10 +23,10 @@ sys.path.append(str(ROOT)) # add ROOT to PATH ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative -from models.experimental import attempt_load +from models.common import DetectMultiBackend from utils.callbacks import Callbacks from utils.datasets import create_dataloader -from utils.general import (LOGGER, box_iou, check_dataset, check_img_size, check_requirements, check_suffix, check_yaml, +from utils.general import (LOGGER, box_iou, check_dataset, check_img_size, check_requirements, check_yaml, coco80_to_coco91_class, colorstr, increment_path, non_max_suppression, print_args, scale_coords, xywh2xyxy, xyxy2xywh) from utils.metrics import ConfusionMatrix, ap_per_class @@ -100,6 +100,7 @@ def run(data, name='exp', # save to project/name exist_ok=False, # existing project/name ok, do not increment half=True, # use FP16 half-precision inference + dnn=False, # use OpenCV DNN for ONNX inference model=None, dataloader=None, save_dir=Path(''), @@ -110,8 +111,10 @@ def run(data, # Initialize/load model and set device training = model is not None if training: # called by train.py - device = next(model.parameters()).device # get model device + device, pt = next(model.parameters()).device, True # get model device, PyTorch model + half &= device.type != 'cpu' # half precision only supported on CUDA + model.half() if half else model.float() else: # called directly device = select_device(device, batch_size=batch_size) @@ -120,22 +123,21 @@ def run(data, (save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir # Load model - check_suffix(weights, '.pt') - model = attempt_load(weights, map_location=device) # load FP32 model - gs = max(int(model.stride.max()), 32) # grid size (max stride) - imgsz = check_img_size(imgsz, s=gs) # check image size - - # Multi-GPU disabled, incompatible with .half() https://github.com/ultralytics/yolov5/issues/99 - # if device.type != 'cpu' and torch.cuda.device_count() > 1: - # model = nn.DataParallel(model) + model = DetectMultiBackend(weights, device=device, dnn=dnn) + stride, pt = model.stride, model.pt + imgsz = check_img_size(imgsz, s=stride) # check image size + half &= pt and device.type != 'cpu' # half precision only supported by PyTorch on CUDA + if pt: + model.model.half() if half else model.model.float() + else: + half = False + batch_size = 1 # export.py models default to batch-size 1 + device = torch.device('cpu') + LOGGER.info(f'Forcing --batch-size 1 square inference shape(1,3,{imgsz},{imgsz}) for non-PyTorch backends') # Data data = check_dataset(data) # check - # Half - half &= device.type != 'cpu' # half precision only supported on CUDA - model.half() if half else model.float() - # Configure model.eval() is_coco = isinstance(data.get('val'), str) and data['val'].endswith('coco/val2017.txt') # COCO dataset @@ -145,11 +147,11 @@ def run(data, # Dataloader if not training: - if device.type != 'cpu': - model(torch.zeros(1, 3, imgsz, imgsz).to(device).type_as(next(model.parameters()))) # run once + if pt and device.type != 'cpu': + model(torch.zeros(1, 3, imgsz, imgsz).to(device).type_as(next(model.model.parameters()))) # warmup pad = 0.0 if task == 'speed' else 0.5 task = task if task in ('train', 'val', 'test') else 'val' # path to train/val/test images - dataloader = create_dataloader(data[task], imgsz, batch_size, gs, single_cls, pad=pad, rect=True, + dataloader = create_dataloader(data[task], imgsz, batch_size, stride, single_cls, pad=pad, rect=pt, prefix=colorstr(f'{task}: '))[0] seen = 0 @@ -160,32 +162,33 @@ def run(data, dt, p, r, f1, mp, mr, map50, map = [0.0, 0.0, 0.0], 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 loss = torch.zeros(3, device=device) jdict, stats, ap, ap_class = [], [], [], [] - for batch_i, (img, targets, paths, shapes) in enumerate(tqdm(dataloader, desc=s)): + for batch_i, (im, targets, paths, shapes) in enumerate(tqdm(dataloader, desc=s)): t1 = time_sync() - img = img.to(device, non_blocking=True) - img = img.half() if half else img.float() # uint8 to fp16/32 - img /= 255 # 0 - 255 to 0.0 - 1.0 - targets = targets.to(device) - nb, _, height, width = img.shape # batch size, channels, height, width + if pt: + im = im.to(device, non_blocking=True) + targets = targets.to(device) + im = im.half() if half else im.float() # uint8 to fp16/32 + im /= 255 # 0 - 255 to 0.0 - 1.0 + nb, _, height, width = im.shape # batch size, channels, height, width t2 = time_sync() dt[0] += t2 - t1 - # Run model - out, train_out = model(img, augment=augment) # inference and training outputs + # Inference + out, train_out = model(im) if training else model(im, augment=augment, val=True) # inference, loss outputs dt[1] += time_sync() - t2 - # Compute loss + # Loss if compute_loss: loss += compute_loss([x.float() for x in train_out], targets)[1] # box, obj, cls - # Run NMS + # NMS targets[:, 2:] *= torch.Tensor([width, height, width, height]).to(device) # to pixels lb = [targets[targets[:, 0] == i, 1:] for i in range(nb)] if save_hybrid else [] # for autolabelling t3 = time_sync() out = non_max_suppression(out, conf_thres, iou_thres, labels=lb, multi_label=True, agnostic=single_cls) dt[2] += time_sync() - t3 - # Statistics per image + # Metrics for si, pred in enumerate(out): labels = targets[targets[:, 0] == si, 1:] nl = len(labels) @@ -202,12 +205,12 @@ def run(data, if single_cls: pred[:, 5] = 0 predn = pred.clone() - scale_coords(img[si].shape[1:], predn[:, :4], shape, shapes[si][1]) # native-space pred + scale_coords(im[si].shape[1:], predn[:, :4], shape, shapes[si][1]) # native-space pred # Evaluate if nl: tbox = xywh2xyxy(labels[:, 1:5]) # target boxes - scale_coords(img[si].shape[1:], tbox, shape, shapes[si][1]) # native-space labels + scale_coords(im[si].shape[1:], tbox, shape, shapes[si][1]) # native-space labels labelsn = torch.cat((labels[:, 0:1], tbox), 1) # native-space labels correct = process_batch(predn, labelsn, iouv) if plots: @@ -221,16 +224,16 @@ def run(data, save_one_txt(predn, save_conf, shape, file=save_dir / 'labels' / (path.stem + '.txt')) if save_json: save_one_json(predn, jdict, path, class_map) # append to COCO-JSON dictionary - callbacks.run('on_val_image_end', pred, predn, path, names, img[si]) + callbacks.run('on_val_image_end', pred, predn, path, names, im[si]) # Plot images if plots and batch_i < 3: f = save_dir / f'val_batch{batch_i}_labels.jpg' # labels - Thread(target=plot_images, args=(img, targets, paths, f, names), daemon=True).start() + Thread(target=plot_images, args=(im, targets, paths, f, names), daemon=True).start() f = save_dir / f'val_batch{batch_i}_pred.jpg' # predictions - Thread(target=plot_images, args=(img, output_to_target(out), paths, f, names), daemon=True).start() + Thread(target=plot_images, args=(im, output_to_target(out), paths, f, names), daemon=True).start() - # Compute statistics + # Compute metrics stats = [np.concatenate(x, 0) for x in zip(*stats)] # to numpy if len(stats) and stats[0].any(): p, r, ap, f1, ap_class = ap_per_class(*stats, plot=plots, save_dir=save_dir, names=names) @@ -318,6 +321,7 @@ def parse_opt(): parser.add_argument('--name', default='exp', help='save to project/name') parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment') parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference') + parser.add_argument('--dnn', action='store_true', help='use OpenCV DNN for ONNX inference') opt = parser.parse_args() opt.data = check_yaml(opt.data) # check YAML opt.save_json |= opt.data.endswith('coco.yaml') From 7207fe95e5dc368e4402134148c5d0c35361ad88 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 9 Nov 2021 17:55:57 +0100 Subject: [PATCH 0708/1976] FROM nvcr.io/nvidia/pytorch:21.10-py3 (#5592) --- Dockerfile | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/Dockerfile b/Dockerfile index 0ee89b432b8f..fe1acb0a6540 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,7 +1,7 @@ # YOLOv5 🚀 by Ultralytics, GPL-3.0 license # Start FROM Nvidia PyTorch image https://ngc.nvidia.com/catalog/containers/nvidia:pytorch -FROM nvcr.io/nvidia/pytorch:21.05-py3 +FROM nvcr.io/nvidia/pytorch:21.10-py3 # Install linux packages RUN apt update && apt install -y zip htop screen libgl1-mesa-glx @@ -11,8 +11,8 @@ COPY requirements.txt . RUN python -m pip install --upgrade pip RUN pip uninstall -y nvidia-tensorboard nvidia-tensorboard-plugin-dlprof RUN pip install --no-cache -r requirements.txt coremltools onnx gsutil notebook wandb>=0.12.2 -RUN pip install --no-cache -U torch torchvision numpy -# RUN pip install --no-cache torch==1.9.1+cu111 torchvision==0.10.1+cu111 -f https://download.pytorch.org/whl/torch_stable.html +RUN pip install --no-cache -U torch torchvision numpy Pillow +# RUN pip install --no-cache torch==1.10.0+cu113 torchvision==0.11.1+cu113 -f https://download.pytorch.org/whl/cu113/torch_stable.html # Create working directory RUN mkdir -p /usr/src/app From 7ebb5e5da673350d4c168cf60d01986a5e0f00cc Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 9 Nov 2021 23:03:19 +0100 Subject: [PATCH 0709/1976] Add `notebook_init()` to utils/__init__.py (#5488) * Update __init__.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * notebook_init * notebook_init * notebook_init * notebook_init * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * notebook_init * Created using Colaboratory * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- tutorial.ipynb | 30 ++++++++++++++---------------- utils/__init__.py | 18 ++++++++++++++++++ utils/torch_utils.py | 4 +++- 3 files changed, 35 insertions(+), 17 deletions(-) diff --git a/tutorial.ipynb b/tutorial.ipynb index b013fe694ba4..7763a26066e2 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -368,7 +368,7 @@ "colab_type": "text" }, "source": [ - "\"Open" + "\"Open" ] }, { @@ -402,26 +402,24 @@ "colab": { "base_uri": "https://localhost:8080/" }, - "outputId": "e2e839d5-d6fc-409c-e44c-0b0b6aa9319d" + "outputId": "3809e5a9-dd41-4577-fe62-5531abf7cca2" }, "source": [ - "!git clone https://github.com/ultralytics/yolov5 # clone repo\n", + "!git clone https://github.com/ultralytics/yolov5 # clone\n", "%cd yolov5\n", - "%pip install -qr requirements.txt # install dependencies\n", + "%pip install -qr requirements.txt # install\n", "\n", - "import torch\n", - "from IPython.display import Image, clear_output # to display images\n", - "\n", - "clear_output()\n", - "print(f\"Setup complete. Using torch {torch.__version__} ({torch.cuda.get_device_properties(0).name if torch.cuda.is_available() else 'CPU'})\")" + "from yolov5 import utils\n", + "display = utils.notebook_init() # checks" ], - "execution_count": 11, + "execution_count": 2, "outputs": [ { "output_type": "stream", "name": "stdout", "text": [ - "Setup complete. Using torch 1.10.0+cu102 (Tesla V100-SXM2-16GB)\n" + "YOLOv5 🚀 v6.0-48-g84a8099 torch 1.10.0+cu102 CUDA:0 (Tesla V100-SXM2-16GB, 16160MiB)\n", + "Setup complete ✅\n" ] } ] @@ -458,9 +456,9 @@ }, "source": [ "!python detect.py --weights yolov5s.pt --img 640 --conf 0.25 --source data/images\n", - "Image(filename='runs/detect/exp/zidane.jpg', width=600)" + "display.Image(filename='runs/detect/exp/zidane.jpg', width=600)" ], - "execution_count": 17, + "execution_count": null, "outputs": [ { "output_type": "stream", @@ -537,7 +535,7 @@ "torch.hub.download_url_to_file('https://ultralytics.com/assets/coco2017val.zip', 'tmp.zip')\n", "!unzip -q tmp.zip -d ../datasets && rm tmp.zip" ], - "execution_count": 18, + "execution_count": null, "outputs": [ { "output_type": "display_data", @@ -568,7 +566,7 @@ "# Run YOLOv5x on COCO val\n", "!python val.py --weights yolov5x.pt --data coco.yaml --img 640 --iou 0.65 --half" ], - "execution_count": 19, + "execution_count": null, "outputs": [ { "output_type": "stream", @@ -726,7 +724,7 @@ "# Train YOLOv5s on COCO128 for 3 epochs\n", "!python train.py --img 640 --batch 16 --epochs 3 --data coco128.yaml --weights yolov5s.pt --cache" ], - "execution_count": 24, + "execution_count": null, "outputs": [ { "output_type": "stream", diff --git a/utils/__init__.py b/utils/__init__.py index e69de29bb2d1..2b0c896364a2 100644 --- a/utils/__init__.py +++ b/utils/__init__.py @@ -0,0 +1,18 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +""" +utils/initialization +""" + + +def notebook_init(): + # For YOLOv5 notebooks + print('Checking setup...') + from IPython import display # to display images and clear console output + + from utils.general import emojis + from utils.torch_utils import select_device # YOLOv5 imports + + display.clear_output() + select_device(newline=False) + print(emojis('Setup complete ✅')) + return display diff --git a/utils/torch_utils.py b/utils/torch_utils.py index b65b69fe1559..16289104eb48 100644 --- a/utils/torch_utils.py +++ b/utils/torch_utils.py @@ -53,7 +53,7 @@ def git_describe(path=Path(__file__).parent): # path must be a directory return '' # not a git repository -def select_device(device='', batch_size=None): +def select_device(device='', batch_size=None, newline=True): # device = 'cpu' or '0' or '0,1,2,3' s = f'YOLOv5 🚀 {git_describe() or date_modified()} torch {torch.__version__} ' # string device = str(device).strip().lower().replace('cuda:', '') # to string, 'cuda:0' to '0' @@ -77,6 +77,8 @@ def select_device(device='', batch_size=None): else: s += 'CPU\n' + if not newline: + s = s.rstrip() LOGGER.info(s.encode().decode('ascii', 'ignore') if platform.system() == 'Windows' else s) # emoji-safe return torch.device('cuda:0' if cuda else 'cpu') From 27bf4282d3d5879f0f4f7492400675ba93a3db1b Mon Sep 17 00:00:00 2001 From: Ayman Saleh <30412615+ayman-saleh@users.noreply.github.com> Date: Wed, 10 Nov 2021 06:51:30 -0500 Subject: [PATCH 0710/1976] Fix `check_requirements()` resource warning allocation open file (#5602) * Fix to resource warning allocation; utilize file.open within a context manager * rename fh to f in keeping with naming convention Co-authored-by: Ayman Saleh Co-authored-by: Glenn Jocher --- utils/general.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/utils/general.py b/utils/general.py index a6fe603850c8..8f59d487edfb 100755 --- a/utils/general.py +++ b/utils/general.py @@ -264,7 +264,8 @@ def check_requirements(requirements=ROOT / 'requirements.txt', exclude=(), insta if isinstance(requirements, (str, Path)): # requirements.txt file file = Path(requirements) assert file.exists(), f"{prefix} {file.resolve()} not found, check failed." - requirements = [f'{x.name}{x.specifier}' for x in pkg.parse_requirements(file.open()) if x.name not in exclude] + with file.open() as f: + requirements = [f'{x.name}{x.specifier}' for x in pkg.parse_requirements(f) if x.name not in exclude] else: # list or tuple of packages requirements = [x for x in requirements if x not in exclude] From 61c50199a234e950ee16fff199bba0915ab9951d Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 10 Nov 2021 15:47:38 +0100 Subject: [PATCH 0711/1976] Update train, val `tqdm` to fixed width (#5367) * Update tqdm for fixed width * Update val.py * Update val.py * Try ncols= in train.py * NCOLS * NCOLS * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * bar_format * position 0 leave true * exp0 * auto * auto * Cleanup * Cleanup * Cleanup Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- train.py | 11 +++++------ utils/general.py | 5 +++++ val.py | 5 +++-- 3 files changed, 13 insertions(+), 8 deletions(-) diff --git a/train.py b/train.py index fedc55d8be5c..4193365d5a09 100644 --- a/train.py +++ b/train.py @@ -5,7 +5,6 @@ Usage: $ python path/to/train.py --data coco128.yaml --weights yolov5s.pt --img 640 """ - import argparse import math import os @@ -40,10 +39,10 @@ from utils.callbacks import Callbacks from utils.datasets import create_dataloader from utils.downloads import attempt_download -from utils.general import (LOGGER, check_dataset, check_file, check_git_status, check_img_size, check_requirements, - check_suffix, check_yaml, colorstr, get_latest_run, increment_path, init_seeds, - intersect_dicts, labels_to_class_weights, labels_to_image_weights, methods, one_cycle, - print_args, print_mutation, strip_optimizer) +from utils.general import (LOGGER, NCOLS, check_dataset, check_file, check_git_status, check_img_size, + check_requirements, check_suffix, check_yaml, colorstr, get_latest_run, increment_path, + init_seeds, intersect_dicts, labels_to_class_weights, labels_to_image_weights, methods, + one_cycle, print_args, print_mutation, strip_optimizer) from utils.loggers import Loggers from utils.loggers.wandb.wandb_utils import check_wandb_resume from utils.loss import ComputeLoss @@ -289,7 +288,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary pbar = enumerate(train_loader) LOGGER.info(('\n' + '%10s' * 7) % ('Epoch', 'gpu_mem', 'box', 'obj', 'cls', 'labels', 'img_size')) if RANK in [-1, 0]: - pbar = tqdm(pbar, total=nb) # progress bar + pbar = tqdm(pbar, total=nb, ncols=NCOLS, bar_format='{l_bar}{bar:10}{r_bar}{bar:-10b}') # progress bar optimizer.zero_grad() for i, (imgs, targets, paths, _) in pbar: # batch ------------------------------------------------------------- ni = i + nb * epoch # number integrated batches (since train start) diff --git a/utils/general.py b/utils/general.py index 8f59d487edfb..fa56ed49aba8 100755 --- a/utils/general.py +++ b/utils/general.py @@ -11,6 +11,7 @@ import platform import random import re +import shutil import signal import time import urllib @@ -834,3 +835,7 @@ def increment_path(path, exist_ok=False, sep='', mkdir=False): if mkdir: path.mkdir(parents=True, exist_ok=True) # make directory return path + + +# Variables +NCOLS = 0 if is_docker() else shutil.get_terminal_size().columns # terminal window size diff --git a/val.py b/val.py index 2bcbc582a500..62a30ac09d39 100644 --- a/val.py +++ b/val.py @@ -26,7 +26,7 @@ from models.common import DetectMultiBackend from utils.callbacks import Callbacks from utils.datasets import create_dataloader -from utils.general import (LOGGER, box_iou, check_dataset, check_img_size, check_requirements, check_yaml, +from utils.general import (LOGGER, NCOLS, box_iou, check_dataset, check_img_size, check_requirements, check_yaml, coco80_to_coco91_class, colorstr, increment_path, non_max_suppression, print_args, scale_coords, xywh2xyxy, xyxy2xywh) from utils.metrics import ConfusionMatrix, ap_per_class @@ -162,7 +162,8 @@ def run(data, dt, p, r, f1, mp, mr, map50, map = [0.0, 0.0, 0.0], 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 loss = torch.zeros(3, device=device) jdict, stats, ap, ap_class = [], [], [], [] - for batch_i, (im, targets, paths, shapes) in enumerate(tqdm(dataloader, desc=s)): + pbar = tqdm(dataloader, desc=s, ncols=NCOLS, bar_format='{l_bar}{bar:10}{r_bar}{bar:-10b}') # progress bar + for batch_i, (im, targets, paths, shapes) in enumerate(pbar): t1 = time_sync() if pt: im = im.to(device, non_blocking=True) From 30bc089cbbe0c38bb09883f01b85ca31afca653b Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 10 Nov 2021 16:48:38 +0100 Subject: [PATCH 0712/1976] Update val.py `speed` and `study` tasks (#5608) Accepts all arguments now by default resolving https://github.com/ultralytics/yolov5/issues/5600 --- val.py | 41 +++++++++++++++++++++-------------------- 1 file changed, 21 insertions(+), 20 deletions(-) diff --git a/val.py b/val.py index 62a30ac09d39..dfabb65b979c 100644 --- a/val.py +++ b/val.py @@ -339,26 +339,27 @@ def main(opt): LOGGER.info(f'WARNING: confidence threshold {opt.conf_thres} >> 0.001 will produce invalid mAP values.') run(**vars(opt)) - elif opt.task == 'speed': # speed benchmarks - # python val.py --task speed --data coco.yaml --batch 1 --weights yolov5n.pt yolov5s.pt... - for w in opt.weights if isinstance(opt.weights, list) else [opt.weights]: - run(opt.data, weights=w, batch_size=opt.batch_size, imgsz=opt.imgsz, conf_thres=.25, iou_thres=.45, - device=opt.device, save_json=False, plots=False) - - elif opt.task == 'study': # run over a range of settings and save/plot - # python val.py --task study --data coco.yaml --iou 0.7 --weights yolov5n.pt yolov5s.pt... - x = list(range(256, 1536 + 128, 128)) # x axis (image sizes) - for w in opt.weights if isinstance(opt.weights, list) else [opt.weights]: - f = f'study_{Path(opt.data).stem}_{Path(w).stem}.txt' # filename to save to - y = [] # y axis - for i in x: # img-size - LOGGER.info(f'\nRunning {f} point {i}...') - r, _, t = run(opt.data, weights=w, batch_size=opt.batch_size, imgsz=i, conf_thres=opt.conf_thres, - iou_thres=opt.iou_thres, device=opt.device, save_json=opt.save_json, plots=False) - y.append(r + t) # results and times - np.savetxt(f, y, fmt='%10.4g') # save - os.system('zip -r study.zip study_*.txt') - plot_val_study(x=x) # plot + else: + weights = opt.weights if isinstance(opt.weights, list) else [opt.weights] + opt.half = True # FP16 for fastest results + if opt.task == 'speed': # speed benchmarks + # python val.py --task speed --data coco.yaml --batch 1 --weights yolov5n.pt yolov5s.pt... + opt.conf_thres, opt.iou_thres, opt.save_json = 0.25, 0.45, False + for opt.weights in weights: + run(**vars(opt), plots=False) + + elif opt.task == 'study': # speed vs mAP benchmarks + # python val.py --task study --data coco.yaml --iou 0.7 --weights yolov5n.pt yolov5s.pt... + for opt.weights in weights: + f = f'study_{Path(opt.data).stem}_{Path(opt.weights).stem}.txt' # filename to save to + x, y = list(range(256, 1536 + 128, 128)), [] # x axis (image sizes), y axis + for opt.imgsz in x: # img-size + LOGGER.info(f'\nRunning {f} --imgsz {opt.imgsz}...') + r, _, t = run(**vars(opt), plots=False) + y.append(r + t) # results and times + np.savetxt(f, y, fmt='%10.4g') # save + os.system('zip -r study.zip study_*.txt') + plot_val_study(x=x) # plot if __name__ == "__main__": From 69032519bc575ef6c2033ab0f7d9bc1f9651b251 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 11 Nov 2021 00:15:17 +0100 Subject: [PATCH 0713/1976] `np.unique()` sort fix for segments (#5609) * `np.unique()` sort fix for segments * Update datasets.py --- utils/datasets.py | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/utils/datasets.py b/utils/datasets.py index 94acaaa92cd7..1ecc7440119f 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -913,10 +913,12 @@ def verify_image_label(args): assert l.shape[1] == 5, f'labels require 5 columns, {l.shape[1]} columns detected' assert (l >= 0).all(), f'negative label values {l[l < 0]}' assert (l[:, 1:] <= 1).all(), f'non-normalized or out of bounds coordinates {l[:, 1:][l[:, 1:] > 1]}' - l = np.unique(l, axis=0) # remove duplicate rows - if len(l) < nl: - segments = np.unique(segments, axis=0) - msg = f'{prefix}WARNING: {im_file}: {nl - len(l)} duplicate labels removed' + _, i = np.unique(l, axis=0, return_index=True) + if len(i) < nl: # duplicate row check + l = l[i] # remove duplicates + if segments: + segments = segments[i] + msg = f'{prefix}WARNING: {im_file}: {nl - len(i)} duplicate labels removed' else: ne = 1 # label empty l = np.zeros((0, 5), dtype=np.float32) From def7a0fd19c1629903c3b073b4df265407719a07 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 11 Nov 2021 12:56:38 +0100 Subject: [PATCH 0714/1976] Improve plots.py robustness (#5616) * Improve plots.py robustness Addresses issues #5374, #5395, #5611 * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- utils/plots.py | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/utils/plots.py b/utils/plots.py index b5e25d668d22..a5b20803c7be 100644 --- a/utils/plots.py +++ b/utils/plots.py @@ -17,7 +17,8 @@ import torch from PIL import Image, ImageDraw, ImageFont -from utils.general import clip_coords, increment_path, is_ascii, is_chinese, user_config_dir, xywh2xyxy, xyxy2xywh +from utils.general import (Timeout, check_requirements, clip_coords, increment_path, is_ascii, is_chinese, try_except, + user_config_dir, xywh2xyxy, xyxy2xywh) from utils.metrics import fitness # Settings @@ -58,7 +59,10 @@ def check_font(font='Arial.ttf', size=10): url = "https://ultralytics.com/assets/" + font.name print(f'Downloading {url} to {font}...') torch.hub.download_url_to_file(url, str(font), progress=False) - return ImageFont.truetype(str(font), size) + try: + return ImageFont.truetype(str(font), size) + except TypeError: + check_requirements('Pillow>=8.4.0') # known issue https://github.com/ultralytics/yolov5/issues/5374 class Annotator: @@ -320,6 +324,8 @@ def plot_val_study(file='', dir='', x=None): # from utils.plots import *; plot_ plt.savefig(f, dpi=300) +@try_except # known issue https://github.com/ultralytics/yolov5/issues/5395 +@Timeout(30) # known issue https://github.com/ultralytics/yolov5/issues/5611 def plot_labels(labels, names=(), save_dir=Path('')): # plot dataset labels print('Plotting labels... ') From d5b21b1ecb66b35af937ac12364aa80733222bd1 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 12 Nov 2021 12:05:25 +0100 Subject: [PATCH 0715/1976] HUB dataset previews to JPEG (#5627) @kalenmike per our convo yesterday. --- utils/datasets.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/datasets.py b/utils/datasets.py index 1ecc7440119f..2a6653bfc02c 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -965,7 +965,7 @@ def hub_ops(f, max_dim=1920): r = max_dim / max(im.height, im.width) # ratio if r < 1.0: # image too large im = im.resize((int(im.width * r), int(im.height * r))) - im.save(f_new, quality=75) # save + im.save(f_new, 'JPEG', quality=75, optimize=True) # save except Exception as e: # use OpenCV print(f'WARNING: HUB ops PIL failure {f}: {e}') im = cv2.imread(f) From 7473f0f95dbc9ef9dd1706274906c99eac2ee2f9 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 12 Nov 2021 14:48:55 +0100 Subject: [PATCH 0716/1976] DDP `WORLD_SIZE`-safe dataloader workers (#5631) * WORLD_SIZE-safe workers * Update with DDP comment --- train.py | 4 ++-- utils/datasets.py | 3 ++- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/train.py b/train.py index 4193365d5a09..96b3c2fdc516 100644 --- a/train.py +++ b/train.py @@ -266,7 +266,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary stopper = EarlyStopping(patience=opt.patience) compute_loss = ComputeLoss(model) # init loss class LOGGER.info(f'Image sizes {imgsz} train, {imgsz} val\n' - f'Using {train_loader.num_workers} dataloader workers\n' + f'Using {train_loader.num_workers * WORLD_SIZE} dataloader workers\n' f"Logging results to {colorstr('bold', save_dir)}\n" f'Starting training for {epochs} epochs...') for epoch in range(start_epoch, epochs): # epoch ------------------------------------------------------------------ @@ -460,7 +460,7 @@ def parse_opt(known=False): parser.add_argument('--single-cls', action='store_true', help='train multi-class data as single-class') parser.add_argument('--adam', action='store_true', help='use torch.optim.Adam() optimizer') parser.add_argument('--sync-bn', action='store_true', help='use SyncBatchNorm, only available in DDP mode') - parser.add_argument('--workers', type=int, default=8, help='maximum number of dataloader workers') + parser.add_argument('--workers', type=int, default=8, help='max dataloader workers (per RANK in DDP mode)') parser.add_argument('--project', default=ROOT / 'runs/train', help='save to project/name') parser.add_argument('--name', default='exp', help='save to project/name') parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment') diff --git a/utils/datasets.py b/utils/datasets.py index 2a6653bfc02c..f153db0d7104 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -34,6 +34,7 @@ HELP_URL = 'https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data' IMG_FORMATS = ['bmp', 'jpg', 'jpeg', 'png', 'tif', 'tiff', 'dng', 'webp', 'mpo'] # acceptable image suffixes VID_FORMATS = ['mov', 'avi', 'mp4', 'mpg', 'mpeg', 'm4v', 'wmv', 'mkv'] # acceptable video suffixes +WORLD_SIZE = int(os.getenv('WORLD_SIZE', 1)) # DPP NUM_THREADS = min(8, os.cpu_count()) # number of multiprocessing threads # Get orientation exif tag @@ -107,7 +108,7 @@ def create_dataloader(path, imgsz, batch_size, stride, single_cls=False, hyp=Non prefix=prefix) batch_size = min(batch_size, len(dataset)) - nw = min([os.cpu_count(), batch_size if batch_size > 1 else 0, workers]) # number of workers + nw = min([os.cpu_count() // WORLD_SIZE, batch_size if batch_size > 1 else 0, workers]) # number of workers sampler = torch.utils.data.distributed.DistributedSampler(dataset) if rank != -1 else None loader = torch.utils.data.DataLoader if image_weights else InfiniteDataLoader # Use torch.utils.data.DataLoader() if dataset.properties will update during training else InfiniteDataLoader() From 09d170381c67032f2daaeeb9defc5a67e59265aa Mon Sep 17 00:00:00 2001 From: Werner Duvaud <40442230+werner-duvaud@users.noreply.github.com> Date: Sat, 13 Nov 2021 12:07:32 +0000 Subject: [PATCH 0717/1976] Default DataLoader `shuffle=True` for training (#5623) * Fix shuffle DataLoader argument * Add shuffle argument * Disable shuffle when rect * Cleanup, add rect warning * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Cleanup2 * Cleanup3 Co-authored-by: Glenn Jocher Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- train.py | 2 +- utils/datasets.py | 41 +++++++++++++++++++++-------------------- 2 files changed, 22 insertions(+), 21 deletions(-) diff --git a/train.py b/train.py index 96b3c2fdc516..91bcd1e1e2e8 100644 --- a/train.py +++ b/train.py @@ -212,7 +212,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary train_loader, dataset = create_dataloader(train_path, imgsz, batch_size // WORLD_SIZE, gs, single_cls, hyp=hyp, augment=True, cache=opt.cache, rect=opt.rect, rank=LOCAL_RANK, workers=workers, image_weights=opt.image_weights, quad=opt.quad, - prefix=colorstr('train: ')) + prefix=colorstr('train: '), shuffle=True) mlc = int(np.concatenate(dataset.labels, 0)[:, 0].max()) # max label class nb = len(train_loader) # number of batches assert mlc < nc, f'Label class {mlc} exceeds nc={nc} in {data}. Possible class labels are 0-{nc - 1}' diff --git a/utils/datasets.py b/utils/datasets.py index f153db0d7104..3504998b125d 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -22,7 +22,7 @@ import torch.nn.functional as F import yaml from PIL import ExifTags, Image, ImageOps -from torch.utils.data import Dataset +from torch.utils.data import DataLoader, Dataset, dataloader, distributed from tqdm import tqdm from utils.augmentations import Albumentations, augment_hsv, copy_paste, letterbox, mixup, random_perspective @@ -93,13 +93,15 @@ def exif_transpose(image): def create_dataloader(path, imgsz, batch_size, stride, single_cls=False, hyp=None, augment=False, cache=False, pad=0.0, - rect=False, rank=-1, workers=8, image_weights=False, quad=False, prefix=''): - # Make sure only the first process in DDP process the dataset first, and the following others can use the cache - with torch_distributed_zero_first(rank): + rect=False, rank=-1, workers=8, image_weights=False, quad=False, prefix='', shuffle=False): + if rect and shuffle: + LOGGER.warning('WARNING: --rect is incompatible with DataLoader shuffle, setting shuffle=False') + shuffle = False + with torch_distributed_zero_first(rank): # init dataset *.cache only once if DDP dataset = LoadImagesAndLabels(path, imgsz, batch_size, - augment=augment, # augment images - hyp=hyp, # augmentation hyperparameters - rect=rect, # rectangular training + augment=augment, # augmentation + hyp=hyp, # hyperparameters + rect=rect, # rectangular batches cache_images=cache, single_cls=single_cls, stride=int(stride), @@ -109,19 +111,18 @@ def create_dataloader(path, imgsz, batch_size, stride, single_cls=False, hyp=Non batch_size = min(batch_size, len(dataset)) nw = min([os.cpu_count() // WORLD_SIZE, batch_size if batch_size > 1 else 0, workers]) # number of workers - sampler = torch.utils.data.distributed.DistributedSampler(dataset) if rank != -1 else None - loader = torch.utils.data.DataLoader if image_weights else InfiniteDataLoader - # Use torch.utils.data.DataLoader() if dataset.properties will update during training else InfiniteDataLoader() - dataloader = loader(dataset, - batch_size=batch_size, - num_workers=nw, - sampler=sampler, - pin_memory=True, - collate_fn=LoadImagesAndLabels.collate_fn4 if quad else LoadImagesAndLabels.collate_fn) - return dataloader, dataset - - -class InfiniteDataLoader(torch.utils.data.dataloader.DataLoader): + sampler = None if rank == -1 else distributed.DistributedSampler(dataset, shuffle=shuffle) + loader = DataLoader if image_weights else InfiniteDataLoader # only DataLoader allows for attribute updates + return loader(dataset, + batch_size=batch_size, + shuffle=shuffle and sampler is None, + num_workers=nw, + sampler=sampler, + pin_memory=True, + collate_fn=LoadImagesAndLabels.collate_fn4 if quad else LoadImagesAndLabels.collate_fn), dataset + + +class InfiniteDataLoader(dataloader.DataLoader): """ Dataloader that reuses workers Uses same syntax as vanilla DataLoader From 80cfaf40ef1923183820a2d88d33b7c3a6217c54 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 13 Nov 2021 15:40:18 +0100 Subject: [PATCH 0718/1976] AutoAnchor and AutoBatch `LOGGER` (#5635) * AutoBatch, AutoAnchor `LOGGER` * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update autoanchor.py Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- utils/autoanchor.py | 50 +++++++++++++++++++++++---------------------- utils/autobatch.py | 14 ++++++------- utils/plots.py | 6 +++--- 3 files changed, 36 insertions(+), 34 deletions(-) diff --git a/utils/autoanchor.py b/utils/autoanchor.py index af0aa7de65ac..eef8f6499194 100644 --- a/utils/autoanchor.py +++ b/utils/autoanchor.py @@ -10,7 +10,9 @@ import yaml from tqdm import tqdm -from utils.general import colorstr +from utils.general import LOGGER, colorstr, emojis + +PREFIX = colorstr('AutoAnchor: ') def check_anchor_order(m): @@ -19,14 +21,12 @@ def check_anchor_order(m): da = a[-1] - a[0] # delta a ds = m.stride[-1] - m.stride[0] # delta s if da.sign() != ds.sign(): # same order - print('Reversing anchor order') + LOGGER.info(f'{PREFIX}Reversing anchor order') m.anchors[:] = m.anchors.flip(0) def check_anchors(dataset, model, thr=4.0, imgsz=640): # Check anchor fit to data, recompute if necessary - prefix = colorstr('autoanchor: ') - print(f'\n{prefix}Analyzing anchors... ', end='') m = model.module.model[-1] if hasattr(model, 'module') else model.model[-1] # Detect() shapes = imgsz * dataset.shapes / dataset.shapes.max(1, keepdims=True) scale = np.random.uniform(0.9, 1.1, size=(shapes.shape[0], 1)) # augment scale @@ -42,23 +42,24 @@ def metric(k): # compute metric anchors = m.anchors.clone() * m.stride.to(m.anchors.device).view(-1, 1, 1) # current anchors bpr, aat = metric(anchors.cpu().view(-1, 2)) - print(f'anchors/target = {aat:.2f}, Best Possible Recall (BPR) = {bpr:.4f}', end='') - if bpr < 0.98: # threshold to recompute - print('. Attempting to improve anchors, please wait...') + s = f'\n{PREFIX}{aat:.2f} anchors/target, {bpr:.3f} Best Possible Recall (BPR). ' + if bpr > 0.98: # threshold to recompute + LOGGER.info(emojis(f'{s}Current anchors are a good fit to dataset ✅')) + else: + LOGGER.info(emojis(f'{s}Anchors are a poor fit to dataset ⚠️, attempting to improve...')) na = m.anchors.numel() // 2 # number of anchors try: anchors = kmean_anchors(dataset, n=na, img_size=imgsz, thr=thr, gen=1000, verbose=False) except Exception as e: - print(f'{prefix}ERROR: {e}') + LOGGER.info(f'{PREFIX}ERROR: {e}') new_bpr = metric(anchors)[0] if new_bpr > bpr: # replace anchors anchors = torch.tensor(anchors, device=m.anchors.device).type_as(m.anchors) m.anchors[:] = anchors.clone().view_as(m.anchors) / m.stride.to(m.anchors.device).view(-1, 1, 1) # loss check_anchor_order(m) - print(f'{prefix}New anchors saved to model. Update model *.yaml to use these anchors in the future.') + LOGGER.info(f'{PREFIX}New anchors saved to model. Update model *.yaml to use these anchors in the future.') else: - print(f'{prefix}Original anchors better than new anchors. Proceeding with original anchors.') - print('') # newline + LOGGER.info(f'{PREFIX}Original anchors better than new anchors. Proceeding with original anchors.') def kmean_anchors(dataset='./data/coco128.yaml', n=9, img_size=640, thr=4.0, gen=1000, verbose=True): @@ -81,7 +82,6 @@ def kmean_anchors(dataset='./data/coco128.yaml', n=9, img_size=640, thr=4.0, gen from scipy.cluster.vq import kmeans thr = 1 / thr - prefix = colorstr('autoanchor: ') def metric(k, wh): # compute metrics r = wh[:, None] / k[None] @@ -93,15 +93,17 @@ def anchor_fitness(k): # mutation fitness _, best = metric(torch.tensor(k, dtype=torch.float32), wh) return (best * (best > thr).float()).mean() # fitness - def print_results(k): + def print_results(k, verbose=True): k = k[np.argsort(k.prod(1))] # sort small to large x, best = metric(k, wh0) bpr, aat = (best > thr).float().mean(), (x > thr).float().mean() * n # best possible recall, anch > thr - print(f'{prefix}thr={thr:.2f}: {bpr:.4f} best possible recall, {aat:.2f} anchors past thr') - print(f'{prefix}n={n}, img_size={img_size}, metric_all={x.mean():.3f}/{best.mean():.3f}-mean/best, ' - f'past_thr={x[x > thr].mean():.3f}-mean: ', end='') + s = f'{PREFIX}thr={thr:.2f}: {bpr:.4f} best possible recall, {aat:.2f} anchors past thr\n' \ + f'{PREFIX}n={n}, img_size={img_size}, metric_all={x.mean():.3f}/{best.mean():.3f}-mean/best, ' \ + f'past_thr={x[x > thr].mean():.3f}-mean: ' for i, x in enumerate(k): - print('%i,%i' % (round(x[0]), round(x[1])), end=', ' if i < len(k) - 1 else '\n') # use in *.cfg + s += '%i,%i, ' % (round(x[0]), round(x[1])) + if verbose: + LOGGER.info(s[:-2]) return k if isinstance(dataset, str): # *.yaml file @@ -117,19 +119,19 @@ def print_results(k): # Filter i = (wh0 < 3.0).any(1).sum() if i: - print(f'{prefix}WARNING: Extremely small objects found. {i} of {len(wh0)} labels are < 3 pixels in size.') + LOGGER.info(f'{PREFIX}WARNING: Extremely small objects found. {i} of {len(wh0)} labels are < 3 pixels in size.') wh = wh0[(wh0 >= 2.0).any(1)] # filter > 2 pixels # wh = wh * (np.random.rand(wh.shape[0], 1) * 0.9 + 0.1) # multiply by random scale 0-1 # Kmeans calculation - print(f'{prefix}Running kmeans for {n} anchors on {len(wh)} points...') + LOGGER.info(f'{PREFIX}Running kmeans for {n} anchors on {len(wh)} points...') s = wh.std(0) # sigmas for whitening k, dist = kmeans(wh / s, n, iter=30) # points, mean distance - assert len(k) == n, f'{prefix}ERROR: scipy.cluster.vq.kmeans requested {n} points but returned only {len(k)}' + assert len(k) == n, f'{PREFIX}ERROR: scipy.cluster.vq.kmeans requested {n} points but returned only {len(k)}' k *= s wh = torch.tensor(wh, dtype=torch.float32) # filtered wh0 = torch.tensor(wh0, dtype=torch.float32) # unfiltered - k = print_results(k) + k = print_results(k, verbose=False) # Plot # k, d = [None] * 20, [None] * 20 @@ -146,7 +148,7 @@ def print_results(k): # Evolve npr = np.random f, sh, mp, s = anchor_fitness(k), k.shape, 0.9, 0.1 # fitness, generations, mutation prob, sigma - pbar = tqdm(range(gen), desc=f'{prefix}Evolving anchors with Genetic Algorithm:') # progress bar + pbar = tqdm(range(gen), desc=f'{PREFIX}Evolving anchors with Genetic Algorithm:') # progress bar for _ in pbar: v = np.ones(sh) while (v == 1).all(): # mutate until a change occurs (prevent duplicates) @@ -155,8 +157,8 @@ def print_results(k): fg = anchor_fitness(kg) if fg > f: f, k = fg, kg.copy() - pbar.desc = f'{prefix}Evolving anchors with Genetic Algorithm: fitness = {f:.4f}' + pbar.desc = f'{PREFIX}Evolving anchors with Genetic Algorithm: fitness = {f:.4f}' if verbose: - print_results(k) + print_results(k, verbose) return print_results(k) diff --git a/utils/autobatch.py b/utils/autobatch.py index 3f2b4d1a4c38..cb94f041e95d 100644 --- a/utils/autobatch.py +++ b/utils/autobatch.py @@ -9,7 +9,7 @@ import torch from torch.cuda import amp -from utils.general import colorstr +from utils.general import LOGGER, colorstr from utils.torch_utils import profile @@ -27,11 +27,11 @@ def autobatch(model, imgsz=640, fraction=0.9, batch_size=16): # model = torch.hub.load('ultralytics/yolov5', 'yolov5s', autoshape=False) # print(autobatch(model)) - prefix = colorstr('autobatch: ') - print(f'{prefix}Computing optimal batch size for --imgsz {imgsz}') + prefix = colorstr('AutoBatch: ') + LOGGER.info(f'{prefix}Computing optimal batch size for --imgsz {imgsz}') device = next(model.parameters()).device # get model device if device.type == 'cpu': - print(f'{prefix}CUDA not detected, using default CPU batch-size {batch_size}') + LOGGER.info(f'{prefix}CUDA not detected, using default CPU batch-size {batch_size}') return batch_size d = str(device).upper() # 'CUDA:0' @@ -40,18 +40,18 @@ def autobatch(model, imgsz=640, fraction=0.9, batch_size=16): r = torch.cuda.memory_reserved(device) / 1024 ** 3 # (GiB) a = torch.cuda.memory_allocated(device) / 1024 ** 3 # (GiB) f = t - (r + a) # free inside reserved - print(f'{prefix}{d} ({properties.name}) {t:.2f}G total, {r:.2f}G reserved, {a:.2f}G allocated, {f:.2f}G free') + LOGGER.info(f'{prefix}{d} ({properties.name}) {t:.2f}G total, {r:.2f}G reserved, {a:.2f}G allocated, {f:.2f}G free') batch_sizes = [1, 2, 4, 8, 16] try: img = [torch.zeros(b, 3, imgsz, imgsz) for b in batch_sizes] y = profile(img, model, n=3, device=device) except Exception as e: - print(f'{prefix}{e}') + LOGGER.warning(f'{prefix}{e}') y = [x[2] for x in y if x] # memory [2] batch_sizes = batch_sizes[:len(y)] p = np.polyfit(batch_sizes, y, deg=1) # first degree polynomial fit b = int((f * fraction - p[1]) / p[0]) # y intercept (optimal batch size) - print(f'{prefix}Using batch-size {b} for {d} {t * fraction:.2f}G/{t:.2f}G ({fraction * 100:.0f}%)') + LOGGER.info(f'{prefix}Using batch-size {b} for {d} {t * fraction:.2f}G/{t:.2f}G ({fraction * 100:.0f}%)') return b diff --git a/utils/plots.py b/utils/plots.py index a5b20803c7be..9919e4d9d88f 100644 --- a/utils/plots.py +++ b/utils/plots.py @@ -17,8 +17,8 @@ import torch from PIL import Image, ImageDraw, ImageFont -from utils.general import (Timeout, check_requirements, clip_coords, increment_path, is_ascii, is_chinese, try_except, - user_config_dir, xywh2xyxy, xyxy2xywh) +from utils.general import (LOGGER, Timeout, check_requirements, clip_coords, increment_path, is_ascii, is_chinese, + try_except, user_config_dir, xywh2xyxy, xyxy2xywh) from utils.metrics import fitness # Settings @@ -328,7 +328,7 @@ def plot_val_study(file='', dir='', x=None): # from utils.plots import *; plot_ @Timeout(30) # known issue https://github.com/ultralytics/yolov5/issues/5611 def plot_labels(labels, names=(), save_dir=Path('')): # plot dataset labels - print('Plotting labels... ') + LOGGER.info(f"Plotting labels to {save_dir / 'labels.jpg'}... ") c, b = labels[:, 0], labels[:, 1:].transpose() # classes, boxes nc = int(c.max() + 1) # number of classes x = pd.DataFrame(b.transpose(), columns=['x', 'y', 'width', 'height']) From 540ef0dd30be9bcf6882c9625c49f61c5c764f52 Mon Sep 17 00:00:00 2001 From: Ayush Chaurasia Date: Sun, 14 Nov 2021 17:56:53 +0530 Subject: [PATCH 0719/1976] W&B refactor, handle exceptions, CI example (#5618) * handle exceptions| attempt CI * update * Pre-commit manual run * yaml one-liner * Update ci-testing.yml * Comment W&B CI Leave as example for future separate CI * Update ci-testing.yml Co-authored-by: Glenn Jocher --- .github/workflows/ci-testing.yml | 5 ++++- utils/loggers/wandb/log_dataset.py | 4 ++++ utils/loggers/wandb/wandb_utils.py | 16 +++++++++++----- 3 files changed, 19 insertions(+), 6 deletions(-) diff --git a/.github/workflows/ci-testing.yml b/.github/workflows/ci-testing.yml index abfe21ef8726..5db6d41f4bcc 100644 --- a/.github/workflows/ci-testing.yml +++ b/.github/workflows/ci-testing.yml @@ -51,12 +51,15 @@ jobs: run: | python -m pip install --upgrade pip pip install -qr requirements.txt -f https://download.pytorch.org/whl/cpu/torch_stable.html - pip install -q onnx tensorflow-cpu keras==2.6.0 # for export + pip install -q onnx tensorflow-cpu keras==2.6.0 # wandb # extras python --version pip --version pip list shell: bash + # - name: W&B login + # run: wandb login 345011b3fb26dc8337fd9b20e53857c1d403f2aa + - name: Download data run: | # curl -L -o tmp.zip https://github.com/ultralytics/yolov5/releases/download/v1.0/coco128.zip diff --git a/utils/loggers/wandb/log_dataset.py b/utils/loggers/wandb/log_dataset.py index 8447272cdb48..06e81fb69307 100644 --- a/utils/loggers/wandb/log_dataset.py +++ b/utils/loggers/wandb/log_dataset.py @@ -2,11 +2,15 @@ from wandb_utils import WandbLogger +from utils.general import LOGGER + WANDB_ARTIFACT_PREFIX = 'wandb-artifact://' def create_dataset_artifact(opt): logger = WandbLogger(opt, None, job_type='Dataset Creation') # TODO: return value unused + if not logger.wandb: + LOGGER.info("install wandb using `pip install wandb` to log the dataset") if __name__ == '__main__': diff --git a/utils/loggers/wandb/wandb_utils.py b/utils/loggers/wandb/wandb_utils.py index a71bc6ce96d2..47757dd1a74e 100644 --- a/utils/loggers/wandb/wandb_utils.py +++ b/utils/loggers/wandb/wandb_utils.py @@ -17,7 +17,7 @@ sys.path.append(str(ROOT)) # add ROOT to PATH from utils.datasets import LoadImagesAndLabels, img2label_paths -from utils.general import check_dataset, check_file +from utils.general import LOGGER, check_dataset, check_file try: import wandb @@ -203,7 +203,7 @@ def check_and_upload_dataset(self, opt): config_path = self.log_dataset_artifact(opt.data, opt.single_cls, 'YOLOv5' if opt.project == 'runs/train' else Path(opt.project).stem) - print("Created dataset config file ", config_path) + LOGGER.info(f"Created dataset config file {config_path}") with open(config_path, errors='ignore') as f: wandb_data_dict = yaml.safe_load(f) return wandb_data_dict @@ -316,7 +316,7 @@ def log_model(self, path, opt, epoch, fitness_score, best_model=False): model_artifact.add_file(str(path / 'last.pt'), name='last.pt') wandb.log_artifact(model_artifact, aliases=['latest', 'last', 'epoch ' + str(self.current_epoch), 'best' if best_model else '']) - print("Saving model artifact on epoch ", epoch + 1) + LOGGER.info(f"Saving model artifact on epoch {epoch + 1}") def log_dataset_artifact(self, data_file, single_cls, project, overwrite_config=False): """ @@ -368,7 +368,7 @@ def map_val_table_path(self): Useful for - referencing artifacts for evaluation. """ self.val_table_path_map = {} - print("Mapping dataset") + LOGGER.info("Mapping dataset") for i, data in enumerate(tqdm(self.val_table.data)): self.val_table_path_map[data[3]] = data[0] @@ -488,7 +488,13 @@ def end_epoch(self, best_result=False): with all_logging_disabled(): if self.bbox_media_panel_images: self.log_dict["BoundingBoxDebugger"] = self.bbox_media_panel_images - wandb.log(self.log_dict) + try: + wandb.log(self.log_dict) + except BaseException as e: + LOGGER.info(f"An error occurred in wandb logger. The training will proceed without interruption. More info\n{e}") + self.wandb_run.finish() + self.wandb_run = None + self.log_dict = {} self.bbox_media_panel_images = [] if self.result_artifact: From c2523be634a94da2b1b2a43c11b25827a0de990d Mon Sep 17 00:00:00 2001 From: Ding Yiwei <16083536+dingyiwei@users.noreply.github.com> Date: Mon, 15 Nov 2021 17:06:18 +0800 Subject: [PATCH 0720/1976] Replace 2 `transpose()` with 1 `permute` in TransformerBlock()` (#5645) --- models/common.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/models/common.py b/models/common.py index 3ea7ba5477a6..3930c8e7b2df 100644 --- a/models/common.py +++ b/models/common.py @@ -86,8 +86,8 @@ def forward(self, x): if self.conv is not None: x = self.conv(x) b, _, w, h = x.shape - p = x.flatten(2).unsqueeze(0).transpose(0, 3).squeeze(3) - return self.tr(p + self.linear(p)).unsqueeze(3).transpose(0, 3).reshape(b, self.c2, w, h) + p = x.flatten(2).permute(2, 0, 1) + return self.tr(p + self.linear(p)).permute(1, 2, 0).reshape(b, self.c2, w, h) class Bottleneck(nn.Module): From fb19561f9869714cd639c7ce58281ea0d5592dff Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 15 Nov 2021 20:10:29 +0100 Subject: [PATCH 0721/1976] Bump pip from 19.2 to 21.1 in /utils/google_app_engine (#5661) Bumps [pip](https://github.com/pypa/pip) from 19.2 to 21.1. - [Release notes](https://github.com/pypa/pip/releases) - [Changelog](https://github.com/pypa/pip/blob/main/NEWS.rst) - [Commits](https://github.com/pypa/pip/compare/19.2...21.1) --- updated-dependencies: - dependency-name: pip dependency-type: direct:production ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- utils/google_app_engine/additional_requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/google_app_engine/additional_requirements.txt b/utils/google_app_engine/additional_requirements.txt index 2f81c8b40056..42d7ffc0eed8 100644 --- a/utils/google_app_engine/additional_requirements.txt +++ b/utils/google_app_engine/additional_requirements.txt @@ -1,4 +1,4 @@ # add these requirements in your app on top of the existing ones -pip==19.2 +pip==21.1 Flask==1.0.2 gunicorn==19.9.0 From e80a09bbfa1ddb1097fdc7164d84dedeb3d95388 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 15 Nov 2021 20:15:50 +0100 Subject: [PATCH 0722/1976] Update ci-testing.yml to Python 3.9 (#5660) --- .github/workflows/ci-testing.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci-testing.yml b/.github/workflows/ci-testing.yml index 5db6d41f4bcc..b2bc040191e7 100644 --- a/.github/workflows/ci-testing.yml +++ b/.github/workflows/ci-testing.yml @@ -19,7 +19,7 @@ jobs: fail-fast: false matrix: os: [ ubuntu-latest, macos-latest, windows-latest ] - python-version: [ 3.8 ] + python-version: [ 3.9 ] model: [ 'yolov5n' ] # models to test # Timeout: https://stackoverflow.com/a/59076067/4521646 From 0453b758e7ff645528ae52aa85228f3672ff7594 Mon Sep 17 00:00:00 2001 From: Nrupatunga Date: Tue, 16 Nov 2021 17:06:00 +0530 Subject: [PATCH 0723/1976] TFDetect dynamic anchor count assignment fix (#5668) * fix tf.py when anchors not equal to 3 * revert the isort fix * update the fix to use anchor attribute available already --- models/tf.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/models/tf.py b/models/tf.py index 6de0245cfe50..96482dd37bea 100644 --- a/models/tf.py +++ b/models/tf.py @@ -233,7 +233,7 @@ def call(self, inputs): xy /= tf.constant([[self.imgsz[1], self.imgsz[0]]], dtype=tf.float32) wh /= tf.constant([[self.imgsz[1], self.imgsz[0]]], dtype=tf.float32) y = tf.concat([xy, wh, y[..., 4:]], -1) - z.append(tf.reshape(y, [-1, 3 * ny * nx, self.no])) + z.append(tf.reshape(y, [-1, self.na * ny * nx, self.no])) return x if self.training else (tf.concat(z, 1), x) From 47fac9ff73aceedd267db1e734a98de122fc9430 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 16 Nov 2021 13:58:15 +0100 Subject: [PATCH 0724/1976] Update train.py comment to 'Model attributes' (#5670) --- train.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/train.py b/train.py index 91bcd1e1e2e8..2838936d2d78 100644 --- a/train.py +++ b/train.py @@ -243,7 +243,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary if cuda and RANK != -1: model = DDP(model, device_ids=[LOCAL_RANK], output_device=LOCAL_RANK) - # Model parameters + # Model attributes nl = de_parallel(model).model[-1].nl # number of detection layers (to scale hyps) hyp['box'] *= 3 / nl # scale to layers hyp['cls'] *= nc / 80 * 3 / nl # scale to classes and layers From 562191f5756273aca54225903f5933f7683daade Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 17 Nov 2021 15:18:50 +0100 Subject: [PATCH 0725/1976] Update export.py docstring (#5689) --- export.py | 18 +++++++++++++++--- 1 file changed, 15 insertions(+), 3 deletions(-) diff --git a/export.py b/export.py index 4cf30e34fc7b..b3ab4df25ae3 100644 --- a/export.py +++ b/export.py @@ -1,14 +1,26 @@ # YOLOv5 🚀 by Ultralytics, GPL-3.0 license """ -Export a YOLOv5 PyTorch model to TorchScript, ONNX, CoreML, TensorFlow (saved_model, pb, TFLite, TF.js,) formats -TensorFlow exports authored by https://github.com/zldrobit +Export a YOLOv5 PyTorch model to other formats. TensorFlow exports authored by https://github.com/zldrobit + +Format | Example | Export `include=(...)` argument +--- | --- | --- +PyTorch | yolov5s.pt | - +TorchScript | yolov5s.torchscript.pt | 'torchscript' +ONNX | yolov5s.onnx | 'onnx' +CoreML | yolov5s.mlmodel | 'coreml' +TensorFlow SavedModel | yolov5s_saved_model/ | 'saved_model' +TensorFlow GraphDef | yolov5s.pb | 'pb' +TensorFlow Lite | yolov5s.tflite | 'tflite' +TensorFlow.js | yolov5s_web_model/ | 'tfjs' Usage: $ python path/to/export.py --weights yolov5s.pt --include torchscript onnx coreml saved_model pb tflite tfjs Inference: $ python path/to/detect.py --weights yolov5s.pt - yolov5s.onnx (must export with --dynamic) + yolov5s.torchscript.pt + yolov5s.onnx + yolov5s.mlmodel (under development) yolov5s_saved_model yolov5s.pb yolov5s.tflite From 8df64a912274ea3a82df2f96f0e3c3ab95713502 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 18 Nov 2021 14:53:42 +0100 Subject: [PATCH 0726/1976] `NUM_THREADS` leave at least 1 CPU free (#5706) Updated strategy leaves at least 1 cpu free to avoid system overloads. Partially addresses https://github.com/ultralytics/yolov5/issues/5685 --- utils/datasets.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/datasets.py b/utils/datasets.py index 3504998b125d..68b1e634bebf 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -35,7 +35,7 @@ IMG_FORMATS = ['bmp', 'jpg', 'jpeg', 'png', 'tif', 'tiff', 'dng', 'webp', 'mpo'] # acceptable image suffixes VID_FORMATS = ['mov', 'avi', 'mp4', 'mpg', 'mpeg', 'm4v', 'wmv', 'mkv'] # acceptable video suffixes WORLD_SIZE = int(os.getenv('WORLD_SIZE', 1)) # DPP -NUM_THREADS = min(8, os.cpu_count()) # number of multiprocessing threads +NUM_THREADS = min(8, max(1, os.cpu_count() - 1)) # number of multiprocessing threads # Get orientation exif tag for orientation in ExifTags.TAGS.keys(): From eb51ffdcac466e553607c470b0e8f19d5a61da67 Mon Sep 17 00:00:00 2001 From: Jirka Borovec Date: Fri, 19 Nov 2021 13:32:53 +0100 Subject: [PATCH 0727/1976] Prune unused imports (#5711) * prune unused imports * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- models/tf.py | 1 - utils/loggers/wandb/wandb_utils.py | 1 - 2 files changed, 2 deletions(-) diff --git a/models/tf.py b/models/tf.py index 96482dd37bea..84359c445797 100644 --- a/models/tf.py +++ b/models/tf.py @@ -11,7 +11,6 @@ """ import argparse -import logging import sys from copy import deepcopy from pathlib import Path diff --git a/utils/loggers/wandb/wandb_utils.py b/utils/loggers/wandb/wandb_utils.py index 47757dd1a74e..a4cbaee240d5 100644 --- a/utils/loggers/wandb/wandb_utils.py +++ b/utils/loggers/wandb/wandb_utils.py @@ -7,7 +7,6 @@ from pathlib import Path from typing import Dict -import pkg_resources as pkg import yaml from tqdm import tqdm From 36d12a500eae4561d09d4955e1b50b12e57bf6c6 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 20 Nov 2021 01:04:56 +0100 Subject: [PATCH 0728/1976] Explicitly compute TP, FP in val.py (#5727) --- utils/metrics.py | 21 +++++++++++++++------ val.py | 2 +- 2 files changed, 16 insertions(+), 7 deletions(-) diff --git a/utils/metrics.py b/utils/metrics.py index 2e0e0c65e63d..3f1dc559c75a 100644 --- a/utils/metrics.py +++ b/utils/metrics.py @@ -18,7 +18,7 @@ def fitness(x): return (x[:, :4] * w).sum(1) -def ap_per_class(tp, conf, pred_cls, target_cls, plot=False, save_dir='.', names=()): +def ap_per_class(tp, conf, pred_cls, target_cls, plot=False, save_dir='.', names=(), eps=1e-16): """ Compute the average precision, given the recall and precision curves. Source: https://github.com/rafaelpadilla/Object-Detection-Metrics. # Arguments @@ -37,7 +37,7 @@ def ap_per_class(tp, conf, pred_cls, target_cls, plot=False, save_dir='.', names tp, conf, pred_cls = tp[i], conf[i], pred_cls[i] # Find unique classes - unique_classes = np.unique(target_cls) + unique_classes, nt = np.unique(target_cls, return_counts=True) nc = unique_classes.shape[0] # number of classes, number of detections # Create Precision-Recall curve and compute AP for each class @@ -45,7 +45,7 @@ def ap_per_class(tp, conf, pred_cls, target_cls, plot=False, save_dir='.', names ap, p, r = np.zeros((nc, tp.shape[1])), np.zeros((nc, 1000)), np.zeros((nc, 1000)) for ci, c in enumerate(unique_classes): i = pred_cls == c - n_l = (target_cls == c).sum() # number of labels + n_l = nt[ci] # number of labels n_p = i.sum() # number of predictions if n_p == 0 or n_l == 0: @@ -56,7 +56,7 @@ def ap_per_class(tp, conf, pred_cls, target_cls, plot=False, save_dir='.', names tpc = tp[i].cumsum(0) # Recall - recall = tpc / (n_l + 1e-16) # recall curve + recall = tpc / (n_l + eps) # recall curve r[ci] = np.interp(-px, -conf[i], recall[:, 0], left=0) # negative x, xp because xp decreases # Precision @@ -70,7 +70,7 @@ def ap_per_class(tp, conf, pred_cls, target_cls, plot=False, save_dir='.', names py.append(np.interp(px, mrec, mpre)) # precision at mAP@0.5 # Compute F1 (harmonic mean of precision and recall) - f1 = 2 * p * r / (p + r + 1e-16) + f1 = 2 * p * r / (p + r + eps) names = [v for k, v in names.items() if k in unique_classes] # list: only classes that have data names = {i: v for i, v in enumerate(names)} # to dict if plot: @@ -80,7 +80,10 @@ def ap_per_class(tp, conf, pred_cls, target_cls, plot=False, save_dir='.', names plot_mc_curve(px, r, Path(save_dir) / 'R_curve.png', names, ylabel='Recall') i = f1.mean(0).argmax() # max F1 index - return p[:, i], r[:, i], ap, f1[:, i], unique_classes.astype('int32') + p, r, f1 = p[:, i], r[:, i], f1[:, i] + tp = (r * nt).round() # true positives + fp = (tp / (p + eps) - tp).round() # false positives + return tp, fp, p, r, f1, ap, unique_classes.astype('int32') def compute_ap(recall, precision): @@ -162,6 +165,12 @@ def process_batch(self, detections, labels): def matrix(self): return self.matrix + def tp_fp(self): + tp = self.matrix.diagonal() # true positives + fp = self.matrix.sum(1) - tp # false positives + # fn = self.matrix.sum(0) - tp # false negatives (missed detections) + return tp[:-1], fp[:-1] # remove background class + def plot(self, normalize=True, save_dir='', names=()): try: import seaborn as sn diff --git a/val.py b/val.py index dfabb65b979c..cc6ff027b070 100644 --- a/val.py +++ b/val.py @@ -237,7 +237,7 @@ def run(data, # Compute metrics stats = [np.concatenate(x, 0) for x in zip(*stats)] # to numpy if len(stats) and stats[0].any(): - p, r, ap, f1, ap_class = ap_per_class(*stats, plot=plots, save_dir=save_dir, names=names) + tp, fp, p, r, f1, ap, ap_class = ap_per_class(*stats, plot=plots, save_dir=save_dir, names=names) ap50, ap = ap[:, 0], ap.mean(1) # AP@0.5, AP@0.5:0.95 mp, mr, map50, map = p.mean(), r.mean(), ap50.mean(), ap.mean() nt = np.bincount(stats[3].astype(np.int64), minlength=nc) # number of targets per class From 46daa7b78d281f0bf5ab512d170654259e4009e4 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 20 Nov 2021 01:11:36 +0100 Subject: [PATCH 0729/1976] Remove `.autoshape()` method (#5694) --- hubconf.py | 3 ++- models/common.py | 8 +++----- models/yolo.py | 9 +-------- 3 files changed, 6 insertions(+), 14 deletions(-) diff --git a/hubconf.py b/hubconf.py index 3488fef76ac5..03335f7906f0 100644 --- a/hubconf.py +++ b/hubconf.py @@ -27,6 +27,7 @@ def _create(name, pretrained=True, channels=3, classes=80, autoshape=True, verbo """ from pathlib import Path + from models.common import AutoShape from models.experimental import attempt_load from models.yolo import Model from utils.downloads import attempt_download @@ -55,7 +56,7 @@ def _create(name, pretrained=True, channels=3, classes=80, autoshape=True, verbo if len(ckpt['model'].names) == classes: model.names = ckpt['model'].names # set class names attribute if autoshape: - model = model.autoshape() # for file/URI/PIL/cv2/np inputs and NMS + model = AutoShape(model) # for file/URI/PIL/cv2/np inputs and NMS return model.to(device) except Exception as e: diff --git a/models/common.py b/models/common.py index 3930c8e7b2df..b9604f3c1cbd 100644 --- a/models/common.py +++ b/models/common.py @@ -23,7 +23,7 @@ from utils.general import (LOGGER, check_requirements, check_suffix, colorstr, increment_path, make_divisible, non_max_suppression, scale_coords, xywh2xyxy, xyxy2xywh) from utils.plots import Annotator, colors, save_one_box -from utils.torch_utils import time_sync +from utils.torch_utils import copy_attr, time_sync def autopad(k, p=None): # kernel, padding @@ -405,12 +405,10 @@ class AutoShape(nn.Module): def __init__(self, model): super().__init__() + LOGGER.info('Adding AutoShape... ') + copy_attr(self, model, include=('yaml', 'nc', 'hyp', 'names', 'stride', 'abc'), exclude=()) # copy attributes self.model = model.eval() - def autoshape(self): - LOGGER.info('AutoShape already enabled, skipping... ') # model already converted to model.autoshape() - return self - def _apply(self, fn): # Apply to(), cpu(), cuda(), half() to model tensors that are not parameters or registered buffers self = super()._apply(fn) diff --git a/models/yolo.py b/models/yolo.py index 305f0ca0cc88..db3d711a81fa 100644 --- a/models/yolo.py +++ b/models/yolo.py @@ -22,8 +22,7 @@ from utils.autoanchor import check_anchor_order from utils.general import LOGGER, check_version, check_yaml, make_divisible, print_args from utils.plots import feature_visualization -from utils.torch_utils import (copy_attr, fuse_conv_and_bn, initialize_weights, model_info, scale_img, select_device, - time_sync) +from utils.torch_utils import fuse_conv_and_bn, initialize_weights, model_info, scale_img, select_device, time_sync try: import thop # for FLOPs computation @@ -226,12 +225,6 @@ def fuse(self): # fuse model Conv2d() + BatchNorm2d() layers self.info() return self - def autoshape(self): # add AutoShape module - LOGGER.info('Adding AutoShape... ') - m = AutoShape(self) # wrap model - copy_attr(m, self, include=('yaml', 'nc', 'hyp', 'names', 'stride'), exclude=()) # copy attributes - return m - def info(self, verbose=False, img_size=640): # print model information model_info(self, verbose, img_size) From 5185981993737861575adb07f2817a74fa4b2baa Mon Sep 17 00:00:00 2001 From: IL2006 <94582889+IL2006@users.noreply.github.com> Date: Sat, 20 Nov 2021 08:26:48 +0800 Subject: [PATCH 0730/1976] SECURITY.md (#5695) * SECURITY_1.md * Delete SECURITY.md Co-authored-by: Glenn Jocher From d6ae1c835a0ea61268a29cb4de3dbd061828d386 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 21 Nov 2021 19:26:07 +0100 Subject: [PATCH 0731/1976] Created using Colaboratory --- tutorial.ipynb | 27 ++++++++++++++++++++++++--- 1 file changed, 24 insertions(+), 3 deletions(-) diff --git a/tutorial.ipynb b/tutorial.ipynb index 7763a26066e2..9440ca8b1788 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -368,7 +368,7 @@ "colab_type": "text" }, "source": [ - "\"Open" + "\"Open" ] }, { @@ -412,7 +412,7 @@ "from yolov5 import utils\n", "display = utils.notebook_init() # checks" ], - "execution_count": 2, + "execution_count": null, "outputs": [ { "output_type": "stream", @@ -1081,6 +1081,27 @@ ], "execution_count": null, "outputs": [] + }, + { + "cell_type": "code", + "metadata": { + "id": "VTRwsvA9u7ln" + }, + "source": [ + "# TensorRT \n", + "# https://developer.nvidia.com/nvidia-tensorrt-download\n", + "!lsb_release -a # check system\n", + "%ls /usr/local | grep cuda # check CUDA\n", + "!wget https://ultralytics.com/assets/TensorRT-8.2.0.6.Linux.x86_64-gnu.cuda-11.4.cudnn8.2.tar.gz # download\n", + "![ -d /content/TensorRT-8.2.0.6/ ] || tar -C /content/ -zxf ./TensorRT-8.2.0.6.Linux.x86_64-gnu.cuda-11.4.cudnn8.2.tar.gz # unzip\n", + "%pip list | grep tensorrt || pip install /content/TensorRT-8.2.0.6/python/tensorrt-8.2.0.6-cp37-none-linux_x86_64.whl # install\n", + "%env LD_LIBRARY_PATH=/usr/local/cuda-11.1/lib64:/content/cuda-11.1/lib64:/content/TensorRT-8.2.0.6/lib:/usr/local/nvidia/lib:/usr/local/nvidia/lib64 # add to path\n", + "\n", + "!python export.py --weights yolov5s.pt --include engine --imgsz 640 640 --device 0\n", + "!python detect.py --weights yolov5s.engine --imgsz 640 640 --device 0" + ], + "execution_count": null, + "outputs": [] } ] -} +} \ No newline at end of file From f17c86b7f0d2038288d7292cb82dec2433cc91e5 Mon Sep 17 00:00:00 2001 From: Zengyf-CVer <41098760+Zengyf-CVer@users.noreply.github.com> Date: Mon, 22 Nov 2021 03:21:44 +0800 Subject: [PATCH 0732/1976] Save *.npy features on detect.py `--visualize` (#5701) * Add feature map to save npy files Add feature map to save npy files,export npy files with 32 feature maps per layer. * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update plots.py * Update plots.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update plots.py Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Glenn Jocher --- tutorial.ipynb | 2 +- utils/plots.py | 7 ++++--- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/tutorial.ipynb b/tutorial.ipynb index 9440ca8b1788..4ce87c75aa64 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -1104,4 +1104,4 @@ "outputs": [] } ] -} \ No newline at end of file +} diff --git a/utils/plots.py b/utils/plots.py index 9919e4d9d88f..69037ee9af70 100644 --- a/utils/plots.py +++ b/utils/plots.py @@ -132,7 +132,7 @@ def feature_visualization(x, module_type, stage, n=32, save_dir=Path('runs/detec if 'Detect' not in module_type: batch, channels, height, width = x.shape # batch, channels, height, width if height > 1 and width > 1: - f = f"stage{stage}_{module_type.split('.')[-1]}_features.png" # filename + f = save_dir / f"stage{stage}_{module_type.split('.')[-1]}_features.png" # filename blocks = torch.chunk(x[0].cpu(), channels, dim=0) # select batch index 0, block by channels n = min(n, channels) # number of plots @@ -143,9 +143,10 @@ def feature_visualization(x, module_type, stage, n=32, save_dir=Path('runs/detec ax[i].imshow(blocks[i].squeeze()) # cmap='gray' ax[i].axis('off') - print(f'Saving {save_dir / f}... ({n}/{channels})') - plt.savefig(save_dir / f, dpi=300, bbox_inches='tight') + print(f'Saving {f}... ({n}/{channels})') + plt.savefig(f, dpi=300, bbox_inches='tight') plt.close() + np.save(str(f.with_suffix('.npy')), x[0].cpu().numpy()) # npy save def hist2d(x, y, n=100): From 7a39803476f8ae55fb25ed93a400a3bba998d5e7 Mon Sep 17 00:00:00 2001 From: imyhxy Date: Mon, 22 Nov 2021 21:58:07 +0800 Subject: [PATCH 0733/1976] Export, detect and validation with TensorRT engine file (#5699) * Export and detect with TensorRT engine file * Resolve `isort` * Make validation works with TensorRT engine * feat: update export docstring * feat: change suffix from *.trt to *.engine * feat: get rid of pycuda * feat: make compatiable with val.py * feat: support detect with fp16 engine * Add Lite to Edge TPU string * Remove *.trt comment * Revert to standard success logger.info string * Fix Deprecation Warning ``` export.py:310: DeprecationWarning: Use build_serialized_network instead. with builder.build_engine(network, config) as engine, open(f, 'wb') as t: ``` * Revert deprecation warning fix @imyhxy it seems we can't apply the deprecation warning fix because then export fails, so I'm reverting my previous change here. * Update export.py * Update export.py * Update common.py * export onnx to file before building TensorRT engine file * feat: triger ONNX export failed early * feat: load ONNX model from file Co-authored-by: Glenn Jocher --- detect.py | 4 ++-- export.py | 55 +++++++++++++++++++++++++++++++++++++++++++++++- models/common.py | 32 ++++++++++++++++++++++++---- val.py | 10 +++++---- 4 files changed, 90 insertions(+), 11 deletions(-) diff --git a/detect.py b/detect.py index 108f8f138052..29904f310200 100644 --- a/detect.py +++ b/detect.py @@ -77,11 +77,11 @@ def run(weights=ROOT / 'yolov5s.pt', # model.pt path(s) # Load model device = select_device(device) model = DetectMultiBackend(weights, device=device, dnn=dnn) - stride, names, pt, jit, onnx = model.stride, model.names, model.pt, model.jit, model.onnx + stride, names, pt, jit, onnx, engine = model.stride, model.names, model.pt, model.jit, model.onnx, model.engine imgsz = check_img_size(imgsz, s=stride) # check image size # Half - half &= pt and device.type != 'cpu' # half precision only supported by PyTorch on CUDA + half &= (pt or engine) and device.type != 'cpu' # half precision only supported by PyTorch on CUDA if pt: model.model.half() if half else model.model.float() diff --git a/export.py b/export.py index b3ab4df25ae3..35875f1fb0d3 100644 --- a/export.py +++ b/export.py @@ -12,6 +12,7 @@ TensorFlow GraphDef | yolov5s.pb | 'pb' TensorFlow Lite | yolov5s.tflite | 'tflite' TensorFlow.js | yolov5s_web_model/ | 'tfjs' +TensorRT | yolov5s.engine | 'engine' Usage: $ python path/to/export.py --weights yolov5s.pt --include torchscript onnx coreml saved_model pb tflite tfjs @@ -24,6 +25,7 @@ yolov5s_saved_model yolov5s.pb yolov5s.tflite + yolov5s.engine TensorFlow.js: $ cd .. && git clone https://github.com/zldrobit/tfjs-yolov5-example.git && cd tfjs-yolov5-example @@ -263,6 +265,51 @@ def export_tfjs(keras_model, im, file, prefix=colorstr('TensorFlow.js:')): LOGGER.info(f'\n{prefix} export failure: {e}') +def export_engine(model, im, file, train, half, simplify, workspace=4, verbose=False, prefix=colorstr('TensorRT:')): + try: + check_requirements(('tensorrt',)) + import tensorrt as trt + + opset = (12, 13)[trt.__version__[0] == '8'] # test on TensorRT 7.x and 8.x + export_onnx(model, im, file, opset, train, False, simplify) + onnx = file.with_suffix('.onnx') + assert onnx.exists(), f'failed to export ONNX file: {onnx}' + + LOGGER.info(f'\n{prefix} starting export with TensorRT {trt.__version__}...') + f = str(file).replace('.pt', '.engine') # TensorRT engine file + logger = trt.Logger(trt.Logger.INFO) + if verbose: + logger.min_severity = trt.Logger.Severity.VERBOSE + + builder = trt.Builder(logger) + config = builder.create_builder_config() + config.max_workspace_size = workspace * 1 << 30 + + flag = (1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)) + network = builder.create_network(flag) + parser = trt.OnnxParser(network, logger) + if not parser.parse_from_file(str(onnx)): + raise RuntimeError(f'failed to load ONNX file: {onnx}') + + inputs = [network.get_input(i) for i in range(network.num_inputs)] + outputs = [network.get_output(i) for i in range(network.num_outputs)] + LOGGER.info(f'{prefix} Network Description:') + for inp in inputs: + LOGGER.info(f'{prefix}\tinput "{inp.name}" with shape {inp.shape} and dtype {inp.dtype}') + for out in outputs: + LOGGER.info(f'{prefix}\toutput "{out.name}" with shape {out.shape} and dtype {out.dtype}') + + half &= builder.platform_has_fast_fp16 + LOGGER.info(f'{prefix} building FP{16 if half else 32} engine in {f}') + if half: + config.set_flag(trt.BuilderFlag.FP16) + with builder.build_engine(network, config) as engine, open(f, 'wb') as t: + t.write(engine.serialize()) + LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') + + except Exception as e: + LOGGER.info(f'\n{prefix} export failure: {e}') + @torch.no_grad() def run(data=ROOT / 'data/coco128.yaml', # 'dataset.yaml path' weights=ROOT / 'yolov5s.pt', # weights path @@ -278,6 +325,8 @@ def run(data=ROOT / 'data/coco128.yaml', # 'dataset.yaml path' dynamic=False, # ONNX/TF: dynamic axes simplify=False, # ONNX: simplify model opset=12, # ONNX: opset version + verbose=False, # TensorRT: verbose log + workspace=4, # TensorRT: workspace size (GB) topk_per_class=100, # TF.js NMS: topk per class to keep topk_all=100, # TF.js NMS: topk for all classes to keep iou_thres=0.45, # TF.js NMS: IoU threshold @@ -322,6 +371,8 @@ def run(data=ROOT / 'data/coco128.yaml', # 'dataset.yaml path' export_torchscript(model, im, file, optimize) if 'onnx' in include: export_onnx(model, im, file, opset, train, dynamic, simplify) + if 'engine' in include: + export_engine(model, im, file, train, half, simplify, workspace, verbose) if 'coreml' in include: export_coreml(model, im, file) @@ -360,13 +411,15 @@ def parse_opt(): parser.add_argument('--dynamic', action='store_true', help='ONNX/TF: dynamic axes') parser.add_argument('--simplify', action='store_true', help='ONNX: simplify model') parser.add_argument('--opset', type=int, default=13, help='ONNX: opset version') + parser.add_argument('--verbose', action='store_true', help='TensorRT: verbose log') + parser.add_argument('--workspace', type=int, default=4, help='TensorRT: workspace size (GB)') parser.add_argument('--topk-per-class', type=int, default=100, help='TF.js NMS: topk per class to keep') parser.add_argument('--topk-all', type=int, default=100, help='TF.js NMS: topk for all classes to keep') parser.add_argument('--iou-thres', type=float, default=0.45, help='TF.js NMS: IoU threshold') parser.add_argument('--conf-thres', type=float, default=0.25, help='TF.js NMS: confidence threshold') parser.add_argument('--include', nargs='+', default=['torchscript', 'onnx'], - help='available formats are (torchscript, onnx, coreml, saved_model, pb, tflite, tfjs)') + help='available formats are (torchscript, onnx, engine, coreml, saved_model, pb, tflite, tfjs)') opt = parser.parse_args() print_args(FILE.stem, opt) return opt diff --git a/models/common.py b/models/common.py index b9604f3c1cbd..8836a655986a 100644 --- a/models/common.py +++ b/models/common.py @@ -7,6 +7,7 @@ import math import platform import warnings +from collections import namedtuple from copy import copy from pathlib import Path @@ -285,11 +286,12 @@ def __init__(self, weights='yolov5s.pt', device=None, dnn=True): # TensorFlow Lite: *.tflite # ONNX Runtime: *.onnx # OpenCV DNN: *.onnx with dnn=True + # TensorRT: *.engine super().__init__() w = str(weights[0] if isinstance(weights, list) else weights) - suffix, suffixes = Path(w).suffix.lower(), ['.pt', '.onnx', '.tflite', '.pb', '', '.mlmodel'] + suffix, suffixes = Path(w).suffix.lower(), ['.pt', '.onnx', '.engine', '.tflite', '.pb', '', '.mlmodel'] check_suffix(w, suffixes) # check weights have acceptable suffix - pt, onnx, tflite, pb, saved_model, coreml = (suffix == x for x in suffixes) # backend booleans + pt, onnx, engine, tflite, pb, saved_model, coreml = (suffix == x for x in suffixes) # backend booleans jit = pt and 'torchscript' in w.lower() stride, names = 64, [f'class{i}' for i in range(1000)] # assign defaults @@ -317,6 +319,23 @@ def __init__(self, weights='yolov5s.pt', device=None, dnn=True): check_requirements(('onnx', 'onnxruntime-gpu' if torch.has_cuda else 'onnxruntime')) import onnxruntime session = onnxruntime.InferenceSession(w, None) + elif engine: # TensorRT + LOGGER.info(f'Loading {w} for TensorRT inference...') + import tensorrt as trt # https://developer.nvidia.com/nvidia-tensorrt-download + Binding = namedtuple('Binding', ('name', 'dtype', 'shape', 'data', 'ptr')) + logger = trt.Logger(trt.Logger.INFO) + with open(w, 'rb') as f, trt.Runtime(logger) as runtime: + model = runtime.deserialize_cuda_engine(f.read()) + bindings = dict() + for index in range(model.num_bindings): + name = model.get_binding_name(index) + dtype = trt.nptype(model.get_binding_dtype(index)) + shape = tuple(model.get_binding_shape(index)) + data = torch.from_numpy(np.empty(shape, dtype=np.dtype(dtype))).to(device) + bindings[name] = Binding(name, dtype, shape, data, int(data.data_ptr())) + binding_addrs = {n: d.ptr for n, d in bindings.items()} + context = model.create_execution_context() + batch_size = bindings['images'].shape[0] else: # TensorFlow model (TFLite, pb, saved_model) import tensorflow as tf if pb: # https://www.tensorflow.org/guide/migrate#a_graphpb_or_graphpbtxt @@ -334,7 +353,7 @@ def wrap_frozen_graph(gd, inputs, outputs): model = tf.keras.models.load_model(w) elif tflite: # https://www.tensorflow.org/lite/guide/python#install_tensorflow_lite_for_python if 'edgetpu' in w.lower(): - LOGGER.info(f'Loading {w} for TensorFlow Edge TPU inference...') + LOGGER.info(f'Loading {w} for TensorFlow Lite Edge TPU inference...') import tflite_runtime.interpreter as tfli delegate = {'Linux': 'libedgetpu.so.1', # install https://coral.ai/software/#edgetpu-runtime 'Darwin': 'libedgetpu.1.dylib', @@ -369,6 +388,11 @@ def forward(self, im, augment=False, visualize=False, val=False): y = self.net.forward() else: # ONNX Runtime y = self.session.run([self.session.get_outputs()[0].name], {self.session.get_inputs()[0].name: im})[0] + elif self.engine: # TensorRT + assert im.shape == self.bindings['images'].shape, (im.shape, self.bindings['images'].shape) + self.binding_addrs['images'] = int(im.data_ptr()) + self.context.execute_v2(list(self.binding_addrs.values())) + y = self.bindings['output'].data else: # TensorFlow model (TFLite, pb, saved_model) im = im.permute(0, 2, 3, 1).cpu().numpy() # torch BCHW to numpy BHWC shape(1,320,192,3) if self.pb: @@ -391,7 +415,7 @@ def forward(self, im, augment=False, visualize=False, val=False): y[..., 1] *= h # y y[..., 2] *= w # w y[..., 3] *= h # h - y = torch.tensor(y) + y = torch.tensor(y) if isinstance(y, np.ndarray) else y return (y, []) if val else y diff --git a/val.py b/val.py index cc6ff027b070..64a7e4dffeb0 100644 --- a/val.py +++ b/val.py @@ -111,7 +111,7 @@ def run(data, # Initialize/load model and set device training = model is not None if training: # called by train.py - device, pt = next(model.parameters()).device, True # get model device, PyTorch model + device, pt, engine = next(model.parameters()).device, True, False # get model device, PyTorch model half &= device.type != 'cpu' # half precision only supported on CUDA model.half() if half else model.float() @@ -124,11 +124,13 @@ def run(data, # Load model model = DetectMultiBackend(weights, device=device, dnn=dnn) - stride, pt = model.stride, model.pt + stride, pt, engine = model.stride, model.pt, model.engine imgsz = check_img_size(imgsz, s=stride) # check image size - half &= pt and device.type != 'cpu' # half precision only supported by PyTorch on CUDA + half &= (pt or engine) and device.type != 'cpu' # half precision only supported by PyTorch on CUDA if pt: model.model.half() if half else model.model.float() + elif engine: + batch_size = model.batch_size else: half = False batch_size = 1 # export.py models default to batch-size 1 @@ -165,7 +167,7 @@ def run(data, pbar = tqdm(dataloader, desc=s, ncols=NCOLS, bar_format='{l_bar}{bar:10}{r_bar}{bar:-10b}') # progress bar for batch_i, (im, targets, paths, shapes) in enumerate(pbar): t1 = time_sync() - if pt: + if pt or engine: im = im.to(device, non_blocking=True) targets = targets.to(device) im = im.half() if half else im.float() # uint8 to fp16/32 From 4ca4aec46fa3ed89e5a16f09f6c85d40380ebb0f Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 24 Nov 2021 23:25:51 +0100 Subject: [PATCH 0734/1976] Do not save hyp.yaml and opt.yaml on evolve (#5775) * Do not save hyp.yaml and opt.yaml on evolve * Update general.py --- train.py | 11 ++++++----- utils/general.py | 2 +- 2 files changed, 7 insertions(+), 6 deletions(-) diff --git a/train.py b/train.py index 2838936d2d78..8d35f50afb11 100644 --- a/train.py +++ b/train.py @@ -76,13 +76,14 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary LOGGER.info(colorstr('hyperparameters: ') + ', '.join(f'{k}={v}' for k, v in hyp.items())) # Save run settings - with open(save_dir / 'hyp.yaml', 'w') as f: - yaml.safe_dump(hyp, f, sort_keys=False) - with open(save_dir / 'opt.yaml', 'w') as f: - yaml.safe_dump(vars(opt), f, sort_keys=False) - data_dict = None + if not evolve: + with open(save_dir / 'hyp.yaml', 'w') as f: + yaml.safe_dump(hyp, f, sort_keys=False) + with open(save_dir / 'opt.yaml', 'w') as f: + yaml.safe_dump(vars(opt), f, sort_keys=False) # Loggers + data_dict = None if RANK in [-1, 0]: loggers = Loggers(save_dir, weights, opt, hyp, LOGGER) # loggers instance if loggers.wandb: diff --git a/utils/general.py b/utils/general.py index fa56ed49aba8..5a2bcc3660f6 100755 --- a/utils/general.py +++ b/utils/general.py @@ -777,7 +777,7 @@ def print_mutation(results, hyp, save_dir, bucket): i = np.argmax(fitness(data.values[:, :7])) # f.write('# YOLOv5 Hyperparameter Evolution Results\n' + f'# Best generation: {i}\n' + - f'# Last generation: {len(data)}\n' + + f'# Last generation: {len(data) - 1}\n' + '# ' + ', '.join(f'{x.strip():>20s}' for x in keys[:7]) + '\n' + '# ' + ', '.join(f'{x:>20.5g}' for x in data.values[i, :7]) + '\n\n') yaml.safe_dump(hyp, f, sort_keys=False) From c55e674ac3ffa641cbbd465760701c64d3a3dcb9 Mon Sep 17 00:00:00 2001 From: rockstarr <41538890+miknyko@users.noreply.github.com> Date: Thu, 25 Nov 2021 15:42:15 +0800 Subject: [PATCH 0735/1976] fix the path error in export.py (#5778) * fix the path error in export.py * Update export.py Co-authored-by: Glenn Jocher From 4c7b2bdc30657354afcbc255385a163662e66c8b Mon Sep 17 00:00:00 2001 From: rockstarr <41538890+miknyko@users.noreply.github.com> Date: Thu, 25 Nov 2021 18:18:30 +0800 Subject: [PATCH 0736/1976] TorchScript `torch==1.7.0` Path support (#5781) * fix path error in export.py * Update export.py updated! * Update export.py oops forget something --- export.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/export.py b/export.py index 35875f1fb0d3..9d6d04967c80 100644 --- a/export.py +++ b/export.py @@ -71,7 +71,7 @@ def export_torchscript(model, im, file, optimize, prefix=colorstr('TorchScript:' ts = torch.jit.trace(model, im, strict=False) d = {"shape": im.shape, "stride": int(max(model.stride)), "names": model.names} extra_files = {'config.txt': json.dumps(d)} # torch._C.ExtraFilesMap() - (optimize_for_mobile(ts) if optimize else ts).save(f, _extra_files=extra_files) + (optimize_for_mobile(ts) if optimize else ts).save(str(f), _extra_files=extra_files) LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') except Exception as e: From f2ca30a407b00eb54999e9f350906e0c6eead906 Mon Sep 17 00:00:00 2001 From: Ayush Chaurasia Date: Thu, 25 Nov 2021 21:49:21 +0530 Subject: [PATCH 0737/1976] W&B: refactor W&B tables (#5737) * update * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * reformat * Single-line argparser argument * Update README.md * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update README.md * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Glenn Jocher --- train.py | 2 +- utils/loggers/wandb/README.md | 33 +++++++++------ utils/loggers/wandb/wandb_utils.py | 68 +++++++++++++++++++++--------- 3 files changed, 68 insertions(+), 35 deletions(-) diff --git a/train.py b/train.py index 8d35f50afb11..9a5f402c3501 100644 --- a/train.py +++ b/train.py @@ -475,7 +475,7 @@ def parse_opt(known=False): # Weights & Biases arguments parser.add_argument('--entity', default=None, help='W&B: Entity') - parser.add_argument('--upload_dataset', action='store_true', help='W&B: Upload dataset as artifact table') + parser.add_argument('--upload_dataset', nargs='?', const=True, default=False, help='W&B: Upload data, "val" option') parser.add_argument('--bbox_interval', type=int, default=-1, help='W&B: Set bounding-box image logging interval') parser.add_argument('--artifact_alias', type=str, default='latest', help='W&B: Version of dataset artifact to use') diff --git a/utils/loggers/wandb/README.md b/utils/loggers/wandb/README.md index d787fb7a5a0e..63d999859e6d 100644 --- a/utils/loggers/wandb/README.md +++ b/utils/loggers/wandb/README.md @@ -2,6 +2,7 @@ * [About Weights & Biases](#about-weights-&-biases) * [First-Time Setup](#first-time-setup) * [Viewing runs](#viewing-runs) +* [Disabling wandb](#disabling-wandb) * [Advanced Usage: Dataset Versioning and Evaluation](#advanced-usage) * [Reports: Share your work with the world!](#reports) @@ -49,31 +50,36 @@ Run information streams from your environment to the W&B cloud console as you tr * Environment: OS and Python types, Git repository and state, **training command**

Weights & Biases dashboard

+ + ## Disabling wandb +* training after running `wandb disabled` inside that directory creates no wandb run +![Screenshot (84)](https://user-images.githubusercontent.com/15766192/143441777-c780bdd7-7cb4-4404-9559-b4316030a985.png) - +* To enable wandb again, run `wandb online` +![Screenshot (85)](https://user-images.githubusercontent.com/15766192/143441866-7191b2cb-22f0-4e0f-ae64-2dc47dc13078.png) ## Advanced Usage You can leverage W&B artifacts and Tables integration to easily visualize and manage your datasets, models and training evaluations. Here are some quick examples to get you started.
-

1. Visualize and Version Datasets

- Log, visualize, dynamically query, and understand your data with W&B Tables. You can use the following command to log your dataset as a W&B Table. This will generate a {dataset}_wandb.yaml file which can be used to train from dataset artifact. -
+

1: Train and Log Evaluation simultaneousy

+ This is an extension of the previous section, but it'll also training after uploading the dataset. This also evaluation Table + Evaluation table compares your predictions and ground truths across the validation set for each epoch. It uses the references to the already uploaded datasets, + so no images will be uploaded from your system more than once. +
Usage - Code $ python utils/logger/wandb/log_dataset.py --project ... --name ... --data .. + Code $ python train.py --upload_data val - ![Screenshot (64)](https://user-images.githubusercontent.com/15766192/128486078-d8433890-98a3-4d12-8986-b6c0e3fc64b9.png) +![Screenshot from 2021-11-21 17-40-06](https://user-images.githubusercontent.com/15766192/142761183-c1696d8c-3f38-45ab-991a-bb0dfd98ae7d.png)
-

2: Train and Log Evaluation simultaneousy

- This is an extension of the previous section, but it'll also training after uploading the dataset. This also evaluation Table - Evaluation table compares your predictions and ground truths across the validation set for each epoch. It uses the references to the already uploaded datasets, - so no images will be uploaded from your system more than once. +

2. Visualize and Version Datasets

+ Log, visualize, dynamically query, and understand your data with W&B Tables. You can use the following command to log your dataset as a W&B Table. This will generate a {dataset}_wandb.yaml file which can be used to train from dataset artifact.
Usage - Code $ python utils/logger/wandb/log_dataset.py --data .. --upload_data + Code $ python utils/logger/wandb/log_dataset.py --project ... --name ... --data .. -![Screenshot (72)](https://user-images.githubusercontent.com/15766192/128979739-4cf63aeb-a76f-483f-8861-1c0100b938a5.png) + ![Screenshot (64)](https://user-images.githubusercontent.com/15766192/128486078-d8433890-98a3-4d12-8986-b6c0e3fc64b9.png)

3: Train using dataset artifact

@@ -81,7 +87,7 @@ You can leverage W&B artifacts and Tables integration to easily visualize and ma can be used to train a model directly from the dataset artifact. This also logs evaluation
Usage - Code $ python utils/logger/wandb/log_dataset.py --data {data}_wandb.yaml + Code $ python train.py --data {data}_wandb.yaml ![Screenshot (72)](https://user-images.githubusercontent.com/15766192/128979739-4cf63aeb-a76f-483f-8861-1c0100b938a5.png)
@@ -123,7 +129,6 @@ Any run can be resumed using artifacts if the --resume argument sta
-

Reports

W&B Reports can be created from your saved runs for sharing online. Once a report is created you will receive a link you can use to publically share your results. Here is an example report created from the COCO128 tutorial trainings of all four YOLOv5 models ([link](https://wandb.ai/glenn-jocher/yolov5_tutorial/reports/YOLOv5-COCO128-Tutorial-Results--VmlldzozMDI5OTY)). diff --git a/utils/loggers/wandb/wandb_utils.py b/utils/loggers/wandb/wandb_utils.py index a4cbaee240d5..2d6133ab94c5 100644 --- a/utils/loggers/wandb/wandb_utils.py +++ b/utils/loggers/wandb/wandb_utils.py @@ -202,7 +202,6 @@ def check_and_upload_dataset(self, opt): config_path = self.log_dataset_artifact(opt.data, opt.single_cls, 'YOLOv5' if opt.project == 'runs/train' else Path(opt.project).stem) - LOGGER.info(f"Created dataset config file {config_path}") with open(config_path, errors='ignore') as f: wandb_data_dict = yaml.safe_load(f) return wandb_data_dict @@ -244,7 +243,9 @@ def setup_training(self, opt): if self.val_artifact is not None: self.result_artifact = wandb.Artifact("run_" + wandb.run.id + "_progress", "evaluation") - self.result_table = wandb.Table(["epoch", "id", "ground truth", "prediction", "avg_confidence"]) + columns = ["epoch", "id", "ground truth", "prediction"] + columns.extend(self.data_dict['names']) + self.result_table = wandb.Table(columns) self.val_table = self.val_artifact.get("val") if self.val_table_path_map is None: self.map_val_table_path() @@ -331,28 +332,41 @@ def log_dataset_artifact(self, data_file, single_cls, project, overwrite_config= returns: the new .yaml file with artifact links. it can be used to start training directly from artifacts """ + upload_dataset = self.wandb_run.config.upload_dataset + log_val_only = isinstance(upload_dataset, str) and upload_dataset == 'val' self.data_dict = check_dataset(data_file) # parse and check data = dict(self.data_dict) nc, names = (1, ['item']) if single_cls else (int(data['nc']), data['names']) names = {k: v for k, v in enumerate(names)} # to index dictionary - self.train_artifact = self.create_dataset_table(LoadImagesAndLabels( - data['train'], rect=True, batch_size=1), names, name='train') if data.get('train') else None + + # log train set + if not log_val_only: + self.train_artifact = self.create_dataset_table(LoadImagesAndLabels( + data['train'], rect=True, batch_size=1), names, name='train') if data.get('train') else None + if data.get('train'): + data['train'] = WANDB_ARTIFACT_PREFIX + str(Path(project) / 'train') + self.val_artifact = self.create_dataset_table(LoadImagesAndLabels( data['val'], rect=True, batch_size=1), names, name='val') if data.get('val') else None - if data.get('train'): - data['train'] = WANDB_ARTIFACT_PREFIX + str(Path(project) / 'train') if data.get('val'): data['val'] = WANDB_ARTIFACT_PREFIX + str(Path(project) / 'val') - path = Path(data_file).stem - path = (path if overwrite_config else path + '_wandb') + '.yaml' # updated data.yaml path - data.pop('download', None) - data.pop('path', None) - with open(path, 'w') as f: - yaml.safe_dump(data, f) + + path = Path(data_file) + # create a _wandb.yaml file with artifacts links if both train and test set are logged + if not log_val_only: + path = (path.stem if overwrite_config else path.stem + '_wandb') + '.yaml' # updated data.yaml path + path = Path('data') / path + data.pop('download', None) + data.pop('path', None) + with open(path, 'w') as f: + yaml.safe_dump(data, f) + LOGGER.info(f"Created dataset config file {path}") if self.job_type == 'Training': # builds correct artifact pipeline graph + if not log_val_only: + self.wandb_run.log_artifact( + self.train_artifact) # calling use_artifact downloads the dataset. NOT NEEDED! self.wandb_run.use_artifact(self.val_artifact) - self.wandb_run.use_artifact(self.train_artifact) self.val_artifact.wait() self.val_table = self.val_artifact.get('val') self.map_val_table_path() @@ -371,7 +385,7 @@ def map_val_table_path(self): for i, data in enumerate(tqdm(self.val_table.data)): self.val_table_path_map[data[3]] = data[0] - def create_dataset_table(self, dataset: LoadImagesAndLabels, class_to_id: Dict[int,str], name: str = 'dataset'): + def create_dataset_table(self, dataset: LoadImagesAndLabels, class_to_id: Dict[int, str], name: str = 'dataset'): """ Create and return W&B artifact containing W&B Table of the dataset. @@ -424,23 +438,34 @@ def log_training_progress(self, predn, path, names): """ class_set = wandb.Classes([{'id': id, 'name': name} for id, name in names.items()]) box_data = [] - total_conf = 0 + avg_conf_per_class = [0] * len(self.data_dict['names']) + pred_class_count = {} for *xyxy, conf, cls in predn.tolist(): if conf >= 0.25: + cls = int(cls) box_data.append( {"position": {"minX": xyxy[0], "minY": xyxy[1], "maxX": xyxy[2], "maxY": xyxy[3]}, - "class_id": int(cls), + "class_id": cls, "box_caption": f"{names[cls]} {conf:.3f}", "scores": {"class_score": conf}, "domain": "pixel"}) - total_conf += conf + avg_conf_per_class[cls] += conf + + if cls in pred_class_count: + pred_class_count[cls] += 1 + else: + pred_class_count[cls] = 1 + + for pred_class in pred_class_count.keys(): + avg_conf_per_class[pred_class] = avg_conf_per_class[pred_class] / pred_class_count[pred_class] + boxes = {"predictions": {"box_data": box_data, "class_labels": names}} # inference-space id = self.val_table_path_map[Path(path).name] self.result_table.add_data(self.current_epoch, id, self.val_table.data[id][1], wandb.Image(self.val_table.data[id][1], boxes=boxes, classes=class_set), - total_conf / max(1, len(box_data)) + *avg_conf_per_class ) def val_one_image(self, pred, predn, path, names, im): @@ -490,7 +515,8 @@ def end_epoch(self, best_result=False): try: wandb.log(self.log_dict) except BaseException as e: - LOGGER.info(f"An error occurred in wandb logger. The training will proceed without interruption. More info\n{e}") + LOGGER.info( + f"An error occurred in wandb logger. The training will proceed without interruption. More info\n{e}") self.wandb_run.finish() self.wandb_run = None @@ -502,7 +528,9 @@ def end_epoch(self, best_result=False): ('best' if best_result else '')]) wandb.log({"evaluation": self.result_table}) - self.result_table = wandb.Table(["epoch", "id", "ground truth", "prediction", "avg_confidence"]) + columns = ["epoch", "id", "ground truth", "prediction"] + columns.extend(self.data_dict['names']) + self.result_table = wandb.Table(columns) self.result_artifact = wandb.Artifact("run_" + wandb.run.id + "_progress", "evaluation") def finish_run(self): From 53349dac8e9fb447bb43319811699bb72d1c2470 Mon Sep 17 00:00:00 2001 From: Phil2020 <35833843+phodgers@users.noreply.github.com> Date: Thu, 25 Nov 2021 16:54:00 +0000 Subject: [PATCH 0738/1976] Scope TF imports in `DetectMultiBackend()` (#5792) * tensorflow or tflite exclusively as interpreter As per bug report https://github.com/ultralytics/yolov5/issues/5709 I think there should be only one attempt to assign interpreter, and it appears tflite is only ever needed for the case of edgetpu model. * Scope imports * Nested definition line fix * Update common.py Co-authored-by: Glenn Jocher --- models/common.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/models/common.py b/models/common.py index 8836a655986a..284f03e6de20 100644 --- a/models/common.py +++ b/models/common.py @@ -337,19 +337,21 @@ def __init__(self, weights='yolov5s.pt', device=None, dnn=True): context = model.create_execution_context() batch_size = bindings['images'].shape[0] else: # TensorFlow model (TFLite, pb, saved_model) - import tensorflow as tf if pb: # https://www.tensorflow.org/guide/migrate#a_graphpb_or_graphpbtxt + LOGGER.info(f'Loading {w} for TensorFlow *.pb inference...') + import tensorflow as tf + def wrap_frozen_graph(gd, inputs, outputs): x = tf.compat.v1.wrap_function(lambda: tf.compat.v1.import_graph_def(gd, name=""), []) # wrapped return x.prune(tf.nest.map_structure(x.graph.as_graph_element, inputs), tf.nest.map_structure(x.graph.as_graph_element, outputs)) - LOGGER.info(f'Loading {w} for TensorFlow *.pb inference...') graph_def = tf.Graph().as_graph_def() graph_def.ParseFromString(open(w, 'rb').read()) frozen_func = wrap_frozen_graph(gd=graph_def, inputs="x:0", outputs="Identity:0") elif saved_model: LOGGER.info(f'Loading {w} for TensorFlow saved_model inference...') + import tensorflow as tf model = tf.keras.models.load_model(w) elif tflite: # https://www.tensorflow.org/lite/guide/python#install_tensorflow_lite_for_python if 'edgetpu' in w.lower(): @@ -361,6 +363,7 @@ def wrap_frozen_graph(gd, inputs, outputs): interpreter = tfli.Interpreter(model_path=w, experimental_delegates=[tfli.load_delegate(delegate)]) else: LOGGER.info(f'Loading {w} for TensorFlow Lite inference...') + import tensorflow as tf interpreter = tf.lite.Interpreter(model_path=w) # load TFLite model interpreter.allocate_tensors() # allocate input_details = interpreter.get_input_details() # inputs From 7c6bae0ae6711b470ace2587ff7cf313a90cfed0 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 26 Nov 2021 13:37:28 +0100 Subject: [PATCH 0739/1976] Remove NCOLS from tqdm (#5804) * Remove NCOLS from tqdm * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- train.py | 10 +++++----- utils/general.py | 2 +- val.py | 4 ++-- 3 files changed, 8 insertions(+), 8 deletions(-) diff --git a/train.py b/train.py index 9a5f402c3501..8cb68fc0748e 100644 --- a/train.py +++ b/train.py @@ -39,10 +39,10 @@ from utils.callbacks import Callbacks from utils.datasets import create_dataloader from utils.downloads import attempt_download -from utils.general import (LOGGER, NCOLS, check_dataset, check_file, check_git_status, check_img_size, - check_requirements, check_suffix, check_yaml, colorstr, get_latest_run, increment_path, - init_seeds, intersect_dicts, labels_to_class_weights, labels_to_image_weights, methods, - one_cycle, print_args, print_mutation, strip_optimizer) +from utils.general import (LOGGER, check_dataset, check_file, check_git_status, check_img_size, check_requirements, + check_suffix, check_yaml, colorstr, get_latest_run, increment_path, init_seeds, + intersect_dicts, labels_to_class_weights, labels_to_image_weights, methods, one_cycle, + print_args, print_mutation, strip_optimizer) from utils.loggers import Loggers from utils.loggers.wandb.wandb_utils import check_wandb_resume from utils.loss import ComputeLoss @@ -289,7 +289,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary pbar = enumerate(train_loader) LOGGER.info(('\n' + '%10s' * 7) % ('Epoch', 'gpu_mem', 'box', 'obj', 'cls', 'labels', 'img_size')) if RANK in [-1, 0]: - pbar = tqdm(pbar, total=nb, ncols=NCOLS, bar_format='{l_bar}{bar:10}{r_bar}{bar:-10b}') # progress bar + pbar = tqdm(pbar, total=nb, bar_format='{l_bar}{bar:10}{r_bar}{bar:-10b}') # progress bar optimizer.zero_grad() for i, (imgs, targets, paths, _) in pbar: # batch ------------------------------------------------------------- ni = i + nb * epoch # number integrated batches (since train start) diff --git a/utils/general.py b/utils/general.py index 5a2bcc3660f6..8aa76fbdb6ad 100755 --- a/utils/general.py +++ b/utils/general.py @@ -838,4 +838,4 @@ def increment_path(path, exist_ok=False, sep='', mkdir=False): # Variables -NCOLS = 0 if is_docker() else shutil.get_terminal_size().columns # terminal window size +NCOLS = 0 if is_docker() else shutil.get_terminal_size().columns # terminal window size for tqdm diff --git a/val.py b/val.py index 64a7e4dffeb0..165cab1d6259 100644 --- a/val.py +++ b/val.py @@ -26,7 +26,7 @@ from models.common import DetectMultiBackend from utils.callbacks import Callbacks from utils.datasets import create_dataloader -from utils.general import (LOGGER, NCOLS, box_iou, check_dataset, check_img_size, check_requirements, check_yaml, +from utils.general import (LOGGER, box_iou, check_dataset, check_img_size, check_requirements, check_yaml, coco80_to_coco91_class, colorstr, increment_path, non_max_suppression, print_args, scale_coords, xywh2xyxy, xyxy2xywh) from utils.metrics import ConfusionMatrix, ap_per_class @@ -164,7 +164,7 @@ def run(data, dt, p, r, f1, mp, mr, map50, map = [0.0, 0.0, 0.0], 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 loss = torch.zeros(3, device=device) jdict, stats, ap, ap_class = [], [], [], [] - pbar = tqdm(dataloader, desc=s, ncols=NCOLS, bar_format='{l_bar}{bar:10}{r_bar}{bar:-10b}') # progress bar + pbar = tqdm(dataloader, desc=s, bar_format='{l_bar}{bar:10}{r_bar}{bar:-10b}') # progress bar for batch_i, (im, targets, paths, shapes) in enumerate(pbar): t1 = time_sync() if pt or engine: From fcd180d33697848ea7acb96d7485c58110704d5e Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 27 Nov 2021 12:29:45 +0100 Subject: [PATCH 0740/1976] Refactor new `model.warmup()` method (#5810) * Refactor new `model.warmup()` method * Add half --- detect.py | 3 +-- models/common.py | 7 +++++++ val.py | 3 +-- 3 files changed, 9 insertions(+), 4 deletions(-) diff --git a/detect.py b/detect.py index 29904f310200..ecf868b5eaf4 100644 --- a/detect.py +++ b/detect.py @@ -97,8 +97,7 @@ def run(weights=ROOT / 'yolov5s.pt', # model.pt path(s) vid_path, vid_writer = [None] * bs, [None] * bs # Run inference - if pt and device.type != 'cpu': - model(torch.zeros(1, 3, *imgsz).to(device).type_as(next(model.model.parameters()))) # warmup + model.warmup(imgsz=(1, 3, *imgsz), half=half) # warmup dt, seen = [0.0, 0.0, 0.0], 0 for path, im, im0s, vid_cap, s in dataset: t1 = time_sync() diff --git a/models/common.py b/models/common.py index 284f03e6de20..72549809c8c3 100644 --- a/models/common.py +++ b/models/common.py @@ -421,6 +421,13 @@ def forward(self, im, augment=False, visualize=False, val=False): y = torch.tensor(y) if isinstance(y, np.ndarray) else y return (y, []) if val else y + def warmup(self, imgsz=(1, 3, 640, 640), half=False): + # Warmup model by running inference once + if self.pt or self.engine or self.onnx: # warmup types + if isinstance(self.device, torch.device) and self.device.type != 'cpu': # only warmup GPU models + im = torch.zeros(*imgsz).to(self.device).type(torch.half if half else torch.float) # input image + self.forward(im) # warmup + class AutoShape(nn.Module): # YOLOv5 input-robust model wrapper for passing cv2/np/PIL/torch inputs. Includes preprocessing, inference and NMS diff --git a/val.py b/val.py index 165cab1d6259..bd0ce9a7861d 100644 --- a/val.py +++ b/val.py @@ -149,8 +149,7 @@ def run(data, # Dataloader if not training: - if pt and device.type != 'cpu': - model(torch.zeros(1, 3, imgsz, imgsz).to(device).type_as(next(model.model.parameters()))) # warmup + model.warmup(imgsz=(1, 3, imgsz, imgsz), half=half) # warmup pad = 0.0 if task == 'speed' else 0.5 task = task if task in ('train', 'val', 'test') else 'val' # path to train/val/test images dataloader = create_dataloader(data[task], imgsz, batch_size, stride, single_cls, pad=pad, rect=pt, From 94d8fec6d846313fed5530f9d18d2f93f89e9e97 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 28 Nov 2021 18:12:46 +0100 Subject: [PATCH 0741/1976] GCP VM from Image example (#5814) --- Dockerfile | 3 +++ 1 file changed, 3 insertions(+) diff --git a/Dockerfile b/Dockerfile index fe1acb0a6540..9a55005a95c5 100644 --- a/Dockerfile +++ b/Dockerfile @@ -59,3 +59,6 @@ ADD https://ultralytics.com/assets/Arial.ttf /root/.config/Ultralytics/ # DDP test # python -m torch.distributed.run --nproc_per_node 2 --master_port 1 train.py --epochs 3 + +# GCP VM from Image +# docker.io/ultralytics/yolov5:latest From 8277033b65fe81d2b48178b335b2d91bcb41a98b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 29 Nov 2021 12:04:36 +0100 Subject: [PATCH 0742/1976] Bump actions/cache from 2.1.6 to 2.1.7 (#5816) Bumps [actions/cache](https://github.com/actions/cache) from 2.1.6 to 2.1.7. - [Release notes](https://github.com/actions/cache/releases) - [Commits](https://github.com/actions/cache/compare/v2.1.6...v2.1.7) --- updated-dependencies: - dependency-name: actions/cache dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/ci-testing.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci-testing.yml b/.github/workflows/ci-testing.yml index b2bc040191e7..9085b2b7e6dd 100644 --- a/.github/workflows/ci-testing.yml +++ b/.github/workflows/ci-testing.yml @@ -39,7 +39,7 @@ jobs: python -c "from pip._internal.locations import USER_CACHE_DIR; print('::set-output name=dir::' + USER_CACHE_DIR)" - name: Cache pip - uses: actions/cache@v2.1.6 + uses: actions/cache@v2.1.7 with: path: ${{ steps.pip-cache.outputs.dir }} key: ${{ runner.os }}-${{ matrix.python-version }}-pip-${{ hashFiles('requirements.txt') }} From 5ca5dd4c87fcc62491173b393fd51cf244805313 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 29 Nov 2021 13:15:36 +0100 Subject: [PATCH 0743/1976] Update `dataset_stats()` to `cv2.INTER_AREA` (#5821) --- utils/datasets.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/datasets.py b/utils/datasets.py index 68b1e634bebf..ac81603c7d34 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -974,7 +974,7 @@ def hub_ops(f, max_dim=1920): im_height, im_width = im.shape[:2] r = max_dim / max(im_height, im_width) # ratio if r < 1.0: # image too large - im = cv2.resize(im, (int(im_width * r), int(im_height * r)), interpolation=cv2.INTER_LINEAR) + im = cv2.resize(im, (int(im_width * r), int(im_height * r)), interpolation=cv2.INTER_AREA) cv2.imwrite(str(f_new), im) zipped, data_dir, yaml_path = unzip(Path(path)) From a4207a202d6801df4586a8e044f60c496d94aeb4 Mon Sep 17 00:00:00 2001 From: imyhxy Date: Tue, 30 Nov 2021 20:52:22 +0800 Subject: [PATCH 0744/1976] Fix TensorRT potential unordered binding addresses (#5826) * feat: change file suffix in pythonic way * fix: enforce binding addresses order * fix: enforce binding addresses order --- export.py | 3 ++- models/common.py | 6 +++--- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/export.py b/export.py index 9d6d04967c80..b2f42142e16c 100644 --- a/export.py +++ b/export.py @@ -276,7 +276,7 @@ def export_engine(model, im, file, train, half, simplify, workspace=4, verbose=F assert onnx.exists(), f'failed to export ONNX file: {onnx}' LOGGER.info(f'\n{prefix} starting export with TensorRT {trt.__version__}...') - f = str(file).replace('.pt', '.engine') # TensorRT engine file + f = file.with_suffix('.engine') # TensorRT engine file logger = trt.Logger(trt.Logger.INFO) if verbose: logger.min_severity = trt.Logger.Severity.VERBOSE @@ -310,6 +310,7 @@ def export_engine(model, im, file, train, half, simplify, workspace=4, verbose=F except Exception as e: LOGGER.info(f'\n{prefix} export failure: {e}') + @torch.no_grad() def run(data=ROOT / 'data/coco128.yaml', # 'dataset.yaml path' weights=ROOT / 'yolov5s.pt', # weights path diff --git a/models/common.py b/models/common.py index 72549809c8c3..cbd4ff479885 100644 --- a/models/common.py +++ b/models/common.py @@ -7,7 +7,7 @@ import math import platform import warnings -from collections import namedtuple +from collections import OrderedDict, namedtuple from copy import copy from pathlib import Path @@ -326,14 +326,14 @@ def __init__(self, weights='yolov5s.pt', device=None, dnn=True): logger = trt.Logger(trt.Logger.INFO) with open(w, 'rb') as f, trt.Runtime(logger) as runtime: model = runtime.deserialize_cuda_engine(f.read()) - bindings = dict() + bindings = OrderedDict() for index in range(model.num_bindings): name = model.get_binding_name(index) dtype = trt.nptype(model.get_binding_dtype(index)) shape = tuple(model.get_binding_shape(index)) data = torch.from_numpy(np.empty(shape, dtype=np.dtype(dtype))).to(device) bindings[name] = Binding(name, dtype, shape, data, int(data.data_ptr())) - binding_addrs = {n: d.ptr for n, d in bindings.items()} + binding_addrs = OrderedDict((n, d.ptr) for n, d in bindings.items()) context = model.create_execution_context() batch_size = bindings['images'].shape[0] else: # TensorFlow model (TFLite, pb, saved_model) From bc484579d7be481ffb5fba95020c515afd89be9b Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 1 Dec 2021 15:38:02 +0100 Subject: [PATCH 0745/1976] Handle non-TTY `wandb.errors.UsageError` (#5839) * `try: except (..., wandb.errors.UsageError)` * bug fix --- utils/loggers/__init__.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/utils/loggers/__init__.py b/utils/loggers/__init__.py index ae2d98bdc36d..2a68d9785071 100644 --- a/utils/loggers/__init__.py +++ b/utils/loggers/__init__.py @@ -24,7 +24,10 @@ assert hasattr(wandb, '__version__') # verify package import not local dir if pkg.parse_version(wandb.__version__) >= pkg.parse_version('0.12.2') and RANK in [0, -1]: - wandb_login_success = wandb.login(timeout=30) + try: + wandb_login_success = wandb.login(timeout=30) + except wandb.errors.UsageError: # known non-TTY terminal issue + wandb_login_success = False if not wandb_login_success: wandb = None except (ImportError, AssertionError): From e8f8f2b9039e7879262675c56d21148398bf9aae Mon Sep 17 00:00:00 2001 From: Yu Zhang Date: Thu, 2 Dec 2021 17:51:19 +0800 Subject: [PATCH 0746/1976] Avoid inplace modifying`imgs` in `LoadStreams` (#5850) When OpenCV retrieving image fail, original code would modify source images **inplace**, which may result in plotting bounding boxes on a black image. That is, before inference, source image `im0s[i]` is OK, but after inference before `Process predictions`, `im0s[i]` may have been changed. --- utils/datasets.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/datasets.py b/utils/datasets.py index ac81603c7d34..f3abfb1f6f90 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -337,7 +337,7 @@ def update(self, i, cap, stream): self.imgs[i] = im else: LOGGER.warning('WARNING: Video stream unresponsive, please check your IP camera connection.') - self.imgs[i] *= 0 + self.imgs[i] = np.zeros_like(self.imgs[i]) cap.open(stream) # re-open stream if signal was lost time.sleep(1 / self.fps[i]) # wait time From 1679aacdc7f08b55df0ebf985688e01ec1f2d9b6 Mon Sep 17 00:00:00 2001 From: gggmt <1065504814@qq.com> Date: Thu, 2 Dec 2021 17:57:39 +0800 Subject: [PATCH 0747/1976] Update `LoadImages` `ret_val=False` handling (#5852) Video errors may occur. --- utils/datasets.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/datasets.py b/utils/datasets.py index f3abfb1f6f90..6ce7a81b69e7 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -200,7 +200,7 @@ def __next__(self): # Read video self.mode = 'video' ret_val, img0 = self.cap.read() - if not ret_val: + while not ret_val: self.count += 1 self.cap.release() if self.count == self.nf: # last video From 30db14fea8646aa3cbd2381b72e1bd45731e1d24 Mon Sep 17 00:00:00 2001 From: Vishnu Pradeep <61411495+pradeep-vishnu@users.noreply.github.com> Date: Thu, 2 Dec 2021 13:20:58 +0100 Subject: [PATCH 0748/1976] Update val.py (#5838) * Update val.py Solving Non-ASCII character '\xf0' error during runtime * Update val.py Co-authored-by: Glenn Jocher From 00e308f7be3b4152fa8c90efc38d8df3a9f0d4c2 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 2 Dec 2021 16:06:45 +0100 Subject: [PATCH 0749/1976] Update TorchScript suffix to `*.torchscript` (#5856) --- detect.py | 8 ++++---- export.py | 6 +++--- models/common.py | 12 ++++++------ utils/activations.py | 4 ++-- val.py | 10 +++++----- 5 files changed, 20 insertions(+), 20 deletions(-) diff --git a/detect.py b/detect.py index ecf868b5eaf4..0b6875e5564c 100644 --- a/detect.py +++ b/detect.py @@ -81,18 +81,18 @@ def run(weights=ROOT / 'yolov5s.pt', # model.pt path(s) imgsz = check_img_size(imgsz, s=stride) # check image size # Half - half &= (pt or engine) and device.type != 'cpu' # half precision only supported by PyTorch on CUDA - if pt: + half &= (pt or jit or engine) and device.type != 'cpu' # half precision only supported by PyTorch on CUDA + if pt or jit: model.model.half() if half else model.model.float() # Dataloader if webcam: view_img = check_imshow() cudnn.benchmark = True # set True to speed up constant image size inference - dataset = LoadStreams(source, img_size=imgsz, stride=stride, auto=pt and not jit) + dataset = LoadStreams(source, img_size=imgsz, stride=stride, auto=pt) bs = len(dataset) # batch_size else: - dataset = LoadImages(source, img_size=imgsz, stride=stride, auto=pt and not jit) + dataset = LoadImages(source, img_size=imgsz, stride=stride, auto=pt) bs = 1 # batch_size vid_path, vid_writer = [None] * bs, [None] * bs diff --git a/export.py b/export.py index b2f42142e16c..437616a9890d 100644 --- a/export.py +++ b/export.py @@ -5,7 +5,7 @@ Format | Example | Export `include=(...)` argument --- | --- | --- PyTorch | yolov5s.pt | - -TorchScript | yolov5s.torchscript.pt | 'torchscript' +TorchScript | yolov5s.torchscript | 'torchscript' ONNX | yolov5s.onnx | 'onnx' CoreML | yolov5s.mlmodel | 'coreml' TensorFlow SavedModel | yolov5s_saved_model/ | 'saved_model' @@ -19,7 +19,7 @@ Inference: $ python path/to/detect.py --weights yolov5s.pt - yolov5s.torchscript.pt + yolov5s.torchscript yolov5s.onnx yolov5s.mlmodel (under development) yolov5s_saved_model @@ -66,7 +66,7 @@ def export_torchscript(model, im, file, optimize, prefix=colorstr('TorchScript:' # YOLOv5 TorchScript model export try: LOGGER.info(f'\n{prefix} starting export with torch {torch.__version__}...') - f = file.with_suffix('.torchscript.pt') + f = file.with_suffix('.torchscript') ts = torch.jit.trace(model, im, strict=False) d = {"shape": im.shape, "stride": int(max(model.stride)), "names": model.names} diff --git a/models/common.py b/models/common.py index cbd4ff479885..73f21729fa85 100644 --- a/models/common.py +++ b/models/common.py @@ -279,7 +279,7 @@ class DetectMultiBackend(nn.Module): def __init__(self, weights='yolov5s.pt', device=None, dnn=True): # Usage: # PyTorch: weights = *.pt - # TorchScript: *.torchscript.pt + # TorchScript: *.torchscript # CoreML: *.mlmodel # TensorFlow: *_saved_model # TensorFlow: *.pb @@ -289,10 +289,10 @@ def __init__(self, weights='yolov5s.pt', device=None, dnn=True): # TensorRT: *.engine super().__init__() w = str(weights[0] if isinstance(weights, list) else weights) - suffix, suffixes = Path(w).suffix.lower(), ['.pt', '.onnx', '.engine', '.tflite', '.pb', '', '.mlmodel'] + suffix = Path(w).suffix.lower() + suffixes = ['.pt', '.torchscript', '.onnx', '.engine', '.tflite', '.pb', '', '.mlmodel'] check_suffix(w, suffixes) # check weights have acceptable suffix - pt, onnx, engine, tflite, pb, saved_model, coreml = (suffix == x for x in suffixes) # backend booleans - jit = pt and 'torchscript' in w.lower() + pt, jit, onnx, engine, tflite, pb, saved_model, coreml = (suffix == x for x in suffixes) # backend booleans stride, names = 64, [f'class{i}' for i in range(1000)] # assign defaults if jit: # TorchScript @@ -304,10 +304,10 @@ def __init__(self, weights='yolov5s.pt', device=None, dnn=True): stride, names = int(d['stride']), d['names'] elif pt: # PyTorch from models.experimental import attempt_load # scoped to avoid circular import - model = torch.jit.load(w) if 'torchscript' in w else attempt_load(weights, map_location=device) + model = attempt_load(weights, map_location=device) stride = int(model.stride.max()) # model stride names = model.module.names if hasattr(model, 'module') else model.names # get class names - elif coreml: # CoreML *.mlmodel + elif coreml: # CoreML import coremltools as ct model = ct.models.MLModel(w) elif dnn: # ONNX OpenCV DNN diff --git a/utils/activations.py b/utils/activations.py index 4c7d46c32104..a4ff789cf336 100644 --- a/utils/activations.py +++ b/utils/activations.py @@ -18,8 +18,8 @@ def forward(x): class Hardswish(nn.Module): # export-friendly version of nn.Hardswish() @staticmethod def forward(x): - # return x * F.hardsigmoid(x) # for torchscript and CoreML - return x * F.hardtanh(x + 3, 0.0, 6.0) / 6.0 # for torchscript, CoreML and ONNX + # return x * F.hardsigmoid(x) # for TorchScript and CoreML + return x * F.hardtanh(x + 3, 0.0, 6.0) / 6.0 # for TorchScript, CoreML and ONNX # Mish https://github.com/digantamisra98/Mish -------------------------------------------------------------------------- diff --git a/val.py b/val.py index bd0ce9a7861d..27edd158a2f6 100644 --- a/val.py +++ b/val.py @@ -111,7 +111,7 @@ def run(data, # Initialize/load model and set device training = model is not None if training: # called by train.py - device, pt, engine = next(model.parameters()).device, True, False # get model device, PyTorch model + device, pt, jit, engine = next(model.parameters()).device, True, False, False # get model device, PyTorch model half &= device.type != 'cpu' # half precision only supported on CUDA model.half() if half else model.float() @@ -124,10 +124,10 @@ def run(data, # Load model model = DetectMultiBackend(weights, device=device, dnn=dnn) - stride, pt, engine = model.stride, model.pt, model.engine + stride, pt, jit, engine = model.stride, model.pt, model.jit, model.engine imgsz = check_img_size(imgsz, s=stride) # check image size - half &= (pt or engine) and device.type != 'cpu' # half precision only supported by PyTorch on CUDA - if pt: + half &= (pt or jit or engine) and device.type != 'cpu' # half precision only supported by PyTorch on CUDA + if pt or jit: model.model.half() if half else model.model.float() elif engine: batch_size = model.batch_size @@ -166,7 +166,7 @@ def run(data, pbar = tqdm(dataloader, desc=s, bar_format='{l_bar}{bar:10}{r_bar}{bar:-10b}') # progress bar for batch_i, (im, targets, paths, shapes) in enumerate(pbar): t1 = time_sync() - if pt or engine: + if pt or jit or engine: im = im.to(device, non_blocking=True) targets = targets.to(device) im = im.half() if half else im.float() # uint8 to fp16/32 From 92a7391039110b93e0028eeda5e370f2ec5a2f74 Mon Sep 17 00:00:00 2001 From: iumyx2612 <69593462+iumyx2612@users.noreply.github.com> Date: Thu, 2 Dec 2021 22:49:50 +0700 Subject: [PATCH 0750/1976] Add `--workers 8` argument to val.py (#5857) * Update val.py Add an option to choose number of workers if not called by train.py * Update comment * 120 char line width Co-authored-by: Glenn Jocher --- val.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/val.py b/val.py index 27edd158a2f6..4eec499d3029 100644 --- a/val.py +++ b/val.py @@ -89,6 +89,7 @@ def run(data, iou_thres=0.6, # NMS IoU threshold task='val', # train, val, test, speed or study device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu + workers=8, # max dataloader workers (per RANK in DDP mode) single_cls=False, # treat as single-class dataset augment=False, # augmented inference verbose=False, # verbose output @@ -153,7 +154,7 @@ def run(data, pad = 0.0 if task == 'speed' else 0.5 task = task if task in ('train', 'val', 'test') else 'val' # path to train/val/test images dataloader = create_dataloader(data[task], imgsz, batch_size, stride, single_cls, pad=pad, rect=pt, - prefix=colorstr(f'{task}: '))[0] + workers=workers, prefix=colorstr(f'{task}: '))[0] seen = 0 confusion_matrix = ConfusionMatrix(nc=nc) @@ -312,6 +313,7 @@ def parse_opt(): parser.add_argument('--iou-thres', type=float, default=0.6, help='NMS IoU threshold') parser.add_argument('--task', default='val', help='train, val, test, speed or study') parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') + parser.add_argument('--workers', type=int, default=8, help='max dataloader workers (per RANK in DDP mode)') parser.add_argument('--single-cls', action='store_true', help='treat as single-class dataset') parser.add_argument('--augment', action='store_true', help='augmented inference') parser.add_argument('--verbose', action='store_true', help='report mAP by class') From 360eec69101ad9ffada78326b715b724b9b7eb0d Mon Sep 17 00:00:00 2001 From: Can Date: Fri, 3 Dec 2021 20:37:45 +0800 Subject: [PATCH 0751/1976] Update `plot_lr_scheduler()` (#5864) shallow copy modify originals --- utils/plots.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/utils/plots.py b/utils/plots.py index 69037ee9af70..5742d050fdf5 100644 --- a/utils/plots.py +++ b/utils/plots.py @@ -5,7 +5,7 @@ import math import os -from copy import copy +from copy import deepcopy from pathlib import Path import cv2 @@ -243,7 +243,7 @@ def plot_images(images, targets, paths=None, fname='images.jpg', names=None, max def plot_lr_scheduler(optimizer, scheduler, epochs=300, save_dir=''): # Plot LR simulating training for full epochs - optimizer, scheduler = copy(optimizer), copy(scheduler) # do not modify originals + optimizer, scheduler = deepcopy(optimizer), deepcopy(scheduler) # do not modify originals y = [] for _ in range(epochs): scheduler.step() From d885799c713e578082704c103c3a0b3796f7d10a Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 3 Dec 2021 15:28:14 +0100 Subject: [PATCH 0752/1976] Update `nl` after `cutout()` (#5873) --- utils/datasets.py | 1 + 1 file changed, 1 insertion(+) diff --git a/utils/datasets.py b/utils/datasets.py index 6ce7a81b69e7..5a3b2110b2e0 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -611,6 +611,7 @@ def __getitem__(self, index): # Cutouts # labels = cutout(img, labels, p=0.5) + # nl = len(labels) # update after cutout labels_out = torch.zeros((nl, 6)) if nl: From 7bf04d9bbfffa6d88b018e11f431b971db2a7034 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 4 Dec 2021 15:00:07 +0100 Subject: [PATCH 0753/1976] `AutoShape()` models as `DetectMultiBackend()` instances (#5845) * Update AutoShape() * autodownload ONNX * Cleanup * Finish updates * Add Usage * Update * Update * Update * Update * Update * Update * Update * Update * Update * Update * Update * Update * Update * Update * Update * Update * Update * fix device * Update hubconf.py * Update common.py * smart param selection * autodownload all formats * autopad only pytorch models * new_shape edits * stride tensor fix * Cleanup --- export.py | 2 +- hubconf.py | 14 +++++++------- models/common.py | 40 ++++++++++++++++++++++++---------------- utils/general.py | 4 +++- 4 files changed, 35 insertions(+), 25 deletions(-) diff --git a/export.py b/export.py index 437616a9890d..21c83c697b4d 100644 --- a/export.py +++ b/export.py @@ -411,7 +411,7 @@ def parse_opt(): parser.add_argument('--int8', action='store_true', help='CoreML/TF INT8 quantization') parser.add_argument('--dynamic', action='store_true', help='ONNX/TF: dynamic axes') parser.add_argument('--simplify', action='store_true', help='ONNX: simplify model') - parser.add_argument('--opset', type=int, default=13, help='ONNX: opset version') + parser.add_argument('--opset', type=int, default=14, help='ONNX: opset version') parser.add_argument('--verbose', action='store_true', help='TensorRT: verbose log') parser.add_argument('--workspace', type=int, default=4, help='TensorRT: workspace size (GB)') parser.add_argument('--topk-per-class', type=int, default=100, help='TF.js NMS: topk per class to keep') diff --git a/hubconf.py b/hubconf.py index 03335f7906f0..e407677b3233 100644 --- a/hubconf.py +++ b/hubconf.py @@ -5,6 +5,7 @@ Usage: import torch model = torch.hub.load('ultralytics/yolov5', 'yolov5s') + model = torch.hub.load('ultralytics/yolov5:master', 'custom', 'path/to/yolov5s.onnx') # file from branch """ import torch @@ -27,26 +28,25 @@ def _create(name, pretrained=True, channels=3, classes=80, autoshape=True, verbo """ from pathlib import Path - from models.common import AutoShape - from models.experimental import attempt_load + from models.common import AutoShape, DetectMultiBackend from models.yolo import Model from utils.downloads import attempt_download from utils.general import check_requirements, intersect_dicts, set_logging from utils.torch_utils import select_device - file = Path(__file__).resolve() check_requirements(exclude=('tensorboard', 'thop', 'opencv-python')) set_logging(verbose=verbose) - save_dir = Path('') if str(name).endswith('.pt') else file.parent - path = (save_dir / name).with_suffix('.pt') # checkpoint path + name = Path(name) + path = name.with_suffix('.pt') if name.suffix == '' else name # checkpoint path try: device = select_device(('0' if torch.cuda.is_available() else 'cpu') if device is None else device) if pretrained and channels == 3 and classes == 80: - model = attempt_load(path, map_location=device) # download/load FP32 model + model = DetectMultiBackend(path, device=device) # download/load FP32 model + # model = models.experimental.attempt_load(path, map_location=device) # download/load FP32 model else: - cfg = list((Path(__file__).parent / 'models').rglob(f'{name}.yaml'))[0] # model.yaml path + cfg = list((Path(__file__).parent / 'models').rglob(f'{path.name}.yaml'))[0] # model.yaml path model = Model(cfg, channels, classes) # create model if pretrained: ckpt = torch.load(attempt_download(path), map_location=device) # load diff --git a/models/common.py b/models/common.py index 73f21729fa85..6a5303ba8c42 100644 --- a/models/common.py +++ b/models/common.py @@ -276,7 +276,7 @@ def forward(self, x): class DetectMultiBackend(nn.Module): # YOLOv5 MultiBackend class for python inference on various backends - def __init__(self, weights='yolov5s.pt', device=None, dnn=True): + def __init__(self, weights='yolov5s.pt', device=None, dnn=False): # Usage: # PyTorch: weights = *.pt # TorchScript: *.torchscript @@ -287,6 +287,8 @@ def __init__(self, weights='yolov5s.pt', device=None, dnn=True): # ONNX Runtime: *.onnx # OpenCV DNN: *.onnx with dnn=True # TensorRT: *.engine + from models.experimental import attempt_download, attempt_load # scoped to avoid circular import + super().__init__() w = str(weights[0] if isinstance(weights, list) else weights) suffix = Path(w).suffix.lower() @@ -294,6 +296,7 @@ def __init__(self, weights='yolov5s.pt', device=None, dnn=True): check_suffix(w, suffixes) # check weights have acceptable suffix pt, jit, onnx, engine, tflite, pb, saved_model, coreml = (suffix == x for x in suffixes) # backend booleans stride, names = 64, [f'class{i}' for i in range(1000)] # assign defaults + attempt_download(w) # download if not local if jit: # TorchScript LOGGER.info(f'Loading {w} for TorchScript inference...') @@ -303,11 +306,12 @@ def __init__(self, weights='yolov5s.pt', device=None, dnn=True): d = json.loads(extra_files['config.txt']) # extra_files dict stride, names = int(d['stride']), d['names'] elif pt: # PyTorch - from models.experimental import attempt_load # scoped to avoid circular import model = attempt_load(weights, map_location=device) stride = int(model.stride.max()) # model stride names = model.module.names if hasattr(model, 'module') else model.names # get class names + self.model = model # explicitly assign for to(), cpu(), cuda(), half() elif coreml: # CoreML + LOGGER.info(f'Loading {w} for CoreML inference...') import coremltools as ct model = ct.models.MLModel(w) elif dnn: # ONNX OpenCV DNN @@ -316,7 +320,7 @@ def __init__(self, weights='yolov5s.pt', device=None, dnn=True): net = cv2.dnn.readNetFromONNX(w) elif onnx: # ONNX Runtime LOGGER.info(f'Loading {w} for ONNX Runtime inference...') - check_requirements(('onnx', 'onnxruntime-gpu' if torch.has_cuda else 'onnxruntime')) + check_requirements(('onnx', 'onnxruntime-gpu' if torch.cuda.is_available() else 'onnxruntime')) import onnxruntime session = onnxruntime.InferenceSession(w, None) elif engine: # TensorRT @@ -376,7 +380,7 @@ def forward(self, im, augment=False, visualize=False, val=False): if self.pt: # PyTorch y = self.model(im) if self.jit else self.model(im, augment=augment, visualize=visualize) return y if val else y[0] - elif self.coreml: # CoreML *.mlmodel + elif self.coreml: # CoreML im = im.permute(0, 2, 3, 1).cpu().numpy() # torch BCHW to numpy BHWC shape(1,320,192,3) im = Image.fromarray((im[0] * 255).astype('uint8')) # im = im.resize((192, 320), Image.ANTIALIAS) @@ -433,24 +437,28 @@ class AutoShape(nn.Module): # YOLOv5 input-robust model wrapper for passing cv2/np/PIL/torch inputs. Includes preprocessing, inference and NMS conf = 0.25 # NMS confidence threshold iou = 0.45 # NMS IoU threshold - classes = None # (optional list) filter by class, i.e. = [0, 15, 16] for COCO persons, cats and dogs + agnostic = False # NMS class-agnostic multi_label = False # NMS multiple labels per box + classes = None # (optional list) filter by class, i.e. = [0, 15, 16] for COCO persons, cats and dogs max_det = 1000 # maximum number of detections per image def __init__(self, model): super().__init__() LOGGER.info('Adding AutoShape... ') copy_attr(self, model, include=('yaml', 'nc', 'hyp', 'names', 'stride', 'abc'), exclude=()) # copy attributes + self.dmb = isinstance(model, DetectMultiBackend) # DetectMultiBackend() instance + self.pt = not self.dmb or model.pt # PyTorch model self.model = model.eval() def _apply(self, fn): # Apply to(), cpu(), cuda(), half() to model tensors that are not parameters or registered buffers self = super()._apply(fn) - m = self.model.model[-1] # Detect() - m.stride = fn(m.stride) - m.grid = list(map(fn, m.grid)) - if isinstance(m.anchor_grid, list): - m.anchor_grid = list(map(fn, m.anchor_grid)) + if self.pt: + m = self.model.model.model[-1] if self.dmb else self.model.model[-1] # Detect() + m.stride = fn(m.stride) + m.grid = list(map(fn, m.grid)) + if isinstance(m.anchor_grid, list): + m.anchor_grid = list(map(fn, m.anchor_grid)) return self @torch.no_grad() @@ -465,7 +473,7 @@ def forward(self, imgs, size=640, augment=False, profile=False): # multiple: = [Image.open('image1.jpg'), Image.open('image2.jpg'), ...] # list of images t = [time_sync()] - p = next(self.model.parameters()) # for device and type + p = next(self.model.parameters()) if self.pt else torch.zeros(1) # for device and type if isinstance(imgs, torch.Tensor): # torch with amp.autocast(enabled=p.device.type != 'cpu'): return self.model(imgs.to(p.device).type_as(p), augment, profile) # inference @@ -489,8 +497,8 @@ def forward(self, imgs, size=640, augment=False, profile=False): g = (size / max(s)) # gain shape1.append([y * g for y in s]) imgs[i] = im if im.data.contiguous else np.ascontiguousarray(im) # update - shape1 = [make_divisible(x, int(self.stride.max())) for x in np.stack(shape1, 0).max(0)] # inference shape - x = [letterbox(im, new_shape=shape1, auto=False)[0] for im in imgs] # pad + shape1 = [make_divisible(x, self.stride) for x in np.stack(shape1, 0).max(0)] # inference shape + x = [letterbox(im, new_shape=shape1 if self.pt else size, auto=False)[0] for im in imgs] # pad x = np.stack(x, 0) if n > 1 else x[0][None] # stack x = np.ascontiguousarray(x.transpose((0, 3, 1, 2))) # BHWC to BCHW x = torch.from_numpy(x).to(p.device).type_as(p) / 255 # uint8 to fp16/32 @@ -498,12 +506,12 @@ def forward(self, imgs, size=640, augment=False, profile=False): with amp.autocast(enabled=p.device.type != 'cpu'): # Inference - y = self.model(x, augment, profile)[0] # forward + y = self.model(x, augment, profile) # forward t.append(time_sync()) # Post-process - y = non_max_suppression(y, self.conf, iou_thres=self.iou, classes=self.classes, - multi_label=self.multi_label, max_det=self.max_det) # NMS + y = non_max_suppression(y if self.dmb else y[0], self.conf, iou_thres=self.iou, classes=self.classes, + agnostic=self.agnostic, multi_label=self.multi_label, max_det=self.max_det) # NMS for i in range(n): scale_coords(shape1, y[i][:, :4], shape0[i]) diff --git a/utils/general.py b/utils/general.py index 8aa76fbdb6ad..bbb9054a7235 100755 --- a/utils/general.py +++ b/utils/general.py @@ -455,7 +455,9 @@ def download_one(url, dir): def make_divisible(x, divisor): - # Returns x evenly divisible by divisor + # Returns nearest x divisible by divisor + if isinstance(divisor, torch.Tensor): + divisor = int(divisor.max()) # to int return math.ceil(x / divisor) * divisor From 1075488d893f2167737d89549c3f675b0713aa5a Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 4 Dec 2021 16:28:40 +0100 Subject: [PATCH 0754/1976] Single-command multiple-model export (#5882) * Export multiple models in series Export multiple models in series by adding additional `*.pt` files to the `--weights` argument, i.e.: ```bash python export.py --include tflite --weights yolov5n.pt # export 1 model python export.py --include tflite --weights yolov5n.pt yolov5s.pt yolov5m.pt yolov5l.pt yolov5x.pt # export 5 models ``` * Update export.py * Update README.md --- README.md | 2 +- export.py | 23 ++++++++++++----------- 2 files changed, 13 insertions(+), 12 deletions(-) diff --git a/README.md b/README.md index 6e72d85da7ee..3074330e5505 100644 --- a/README.md +++ b/README.md @@ -148,7 +148,7 @@ $ python train.py --data coco.yaml --cfg yolov5s.yaml --weights '' --batch-size * [Roboflow for Datasets, Labeling, and Active Learning](https://github.com/ultralytics/yolov5/issues/4975)  🌟 NEW * [Multi-GPU Training](https://github.com/ultralytics/yolov5/issues/475) * [PyTorch Hub](https://github.com/ultralytics/yolov5/issues/36)  ⭐ NEW -* [TorchScript, ONNX, CoreML Export](https://github.com/ultralytics/yolov5/issues/251) 🚀 +* [TFLite, ONNX, CoreML, TensorRT Export](https://github.com/ultralytics/yolov5/issues/251) 🚀 * [Test-Time Augmentation (TTA)](https://github.com/ultralytics/yolov5/issues/303) * [Model Ensembling](https://github.com/ultralytics/yolov5/issues/318) * [Model Pruning/Sparsity](https://github.com/ultralytics/yolov5/issues/304) diff --git a/export.py b/export.py index 21c83c697b4d..88d03a2c9475 100644 --- a/export.py +++ b/export.py @@ -2,17 +2,17 @@ """ Export a YOLOv5 PyTorch model to other formats. TensorFlow exports authored by https://github.com/zldrobit -Format | Example | Export `include=(...)` argument +Format | Example | `--include ...` argument --- | --- | --- PyTorch | yolov5s.pt | - -TorchScript | yolov5s.torchscript | 'torchscript' -ONNX | yolov5s.onnx | 'onnx' -CoreML | yolov5s.mlmodel | 'coreml' -TensorFlow SavedModel | yolov5s_saved_model/ | 'saved_model' -TensorFlow GraphDef | yolov5s.pb | 'pb' -TensorFlow Lite | yolov5s.tflite | 'tflite' -TensorFlow.js | yolov5s_web_model/ | 'tfjs' -TensorRT | yolov5s.engine | 'engine' +TorchScript | yolov5s.torchscript | `torchscript` +ONNX | yolov5s.onnx | `onnx` +CoreML | yolov5s.mlmodel | `coreml` +TensorFlow SavedModel | yolov5s_saved_model/ | `saved_model` +TensorFlow GraphDef | yolov5s.pb | `pb` +TensorFlow Lite | yolov5s.tflite | `tflite` +TensorFlow.js | yolov5s_web_model/ | `tfjs` +TensorRT | yolov5s.engine | `engine` Usage: $ python path/to/export.py --weights yolov5s.pt --include torchscript onnx coreml saved_model pb tflite tfjs @@ -400,7 +400,7 @@ def run(data=ROOT / 'data/coco128.yaml', # 'dataset.yaml path' def parse_opt(): parser = argparse.ArgumentParser() parser.add_argument('--data', type=str, default=ROOT / 'data/coco128.yaml', help='dataset.yaml path') - parser.add_argument('--weights', type=str, default=ROOT / 'yolov5s.pt', help='weights path') + parser.add_argument('--weights', nargs='+', type=str, default=ROOT / 'yolov5s.pt', help='model.pt path(s)') parser.add_argument('--imgsz', '--img', '--img-size', nargs='+', type=int, default=[640, 640], help='image (h, w)') parser.add_argument('--batch-size', type=int, default=1, help='batch size') parser.add_argument('--device', default='cpu', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') @@ -427,7 +427,8 @@ def parse_opt(): def main(opt): - run(**vars(opt)) + for opt.weights in (opt.weights if isinstance(opt.weights, list) else [opt.weights]): + run(**vars(opt)) if __name__ == "__main__": From fa05f8c97798b228b79a61ae5d8d5251bbe34758 Mon Sep 17 00:00:00 2001 From: Li Zeng Date: Tue, 7 Dec 2021 16:01:41 +0100 Subject: [PATCH 0755/1976] `Detections().tolist()` explicit argument fix (#5907) debugged for missigned Detections attributes --- models/common.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/models/common.py b/models/common.py index 6a5303ba8c42..ec5fbfaec4ca 100644 --- a/models/common.py +++ b/models/common.py @@ -608,7 +608,7 @@ def pandas(self): def tolist(self): # return a list of Detections objects, i.e. 'for result in results.tolist():' - x = [Detections([self.imgs[i]], [self.pred[i]], self.names, self.s) for i in range(self.n)] + x = [Detections([self.imgs[i]], [self.pred[i]], names=self.names, shape=self.s) for i in range(self.n)] for d in x: for k in ['imgs', 'pred', 'xyxy', 'xyxyn', 'xywh', 'xywhn']: setattr(d, k, getattr(d, k)[0]) # pop out of list From 3f152e58074514b2531cb43fb57db380e085cd09 Mon Sep 17 00:00:00 2001 From: Ayush Chaurasia Date: Tue, 7 Dec 2021 20:39:11 +0530 Subject: [PATCH 0756/1976] Update wandb_utils.py (#5908) --- utils/loggers/wandb/wandb_utils.py | 1 + 1 file changed, 1 insertion(+) diff --git a/utils/loggers/wandb/wandb_utils.py b/utils/loggers/wandb/wandb_utils.py index 2d6133ab94c5..221d3c88c56e 100644 --- a/utils/loggers/wandb/wandb_utils.py +++ b/utils/loggers/wandb/wandb_utils.py @@ -186,6 +186,7 @@ def __init__(self, opt, run_id=None, job_type='Training'): self.setup_training(opt) if self.job_type == 'Dataset Creation': + self.wandb_run.config.update({"upload_dataset": True}) self.data_dict = self.check_and_upload_dataset(opt) def check_and_upload_dataset(self, opt): From 554f782537b9af336c02c013468b78fe16ce092d Mon Sep 17 00:00:00 2001 From: greg2451 <51173502+greg2451@users.noreply.github.com> Date: Tue, 7 Dec 2021 18:20:16 +0100 Subject: [PATCH 0757/1976] Add *.engine (TensorRT extensions) to .gitignore (#5911) * Add *.engine (TensorRT extensions) to .gitignore * Update .dockerignore Co-authored-by: Glenn Jocher --- .dockerignore | 1 + .gitignore | 1 + 2 files changed, 2 insertions(+) diff --git a/.dockerignore b/.dockerignore index 6c2f2b9b7725..4be8d4108e78 100644 --- a/.dockerignore +++ b/.dockerignore @@ -15,6 +15,7 @@ data/samples/* **/*.pt **/*.pth **/*.onnx +**/*.engine **/*.mlmodel **/*.torchscript **/*.torchscript.pt diff --git a/.gitignore b/.gitignore index 5f8cab550021..8bb082b0355a 100755 --- a/.gitignore +++ b/.gitignore @@ -48,6 +48,7 @@ VOC/ *.pt *.pb *.onnx +*.engine *.mlmodel *.torchscript *.tflite From 581dc301a70bef6d3e768adfe1a87b85e50e6268 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 8 Dec 2021 13:37:33 +0100 Subject: [PATCH 0758/1976] Add ONNX inference providers (#5918) * Add ONNX inference providers Fix for https://github.com/ultralytics/yolov5/issues/5916 * Update common.py --- models/common.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/models/common.py b/models/common.py index ec5fbfaec4ca..c269cfef9a6c 100644 --- a/models/common.py +++ b/models/common.py @@ -320,9 +320,11 @@ def __init__(self, weights='yolov5s.pt', device=None, dnn=False): net = cv2.dnn.readNetFromONNX(w) elif onnx: # ONNX Runtime LOGGER.info(f'Loading {w} for ONNX Runtime inference...') - check_requirements(('onnx', 'onnxruntime-gpu' if torch.cuda.is_available() else 'onnxruntime')) + cuda = torch.cuda.is_available() + check_requirements(('onnx', 'onnxruntime-gpu' if cuda else 'onnxruntime')) import onnxruntime - session = onnxruntime.InferenceSession(w, None) + providers = ['CUDAExecutionProvider', 'CPUExecutionProvider'] if cuda else ['CPUExecutionProvider'] + session = onnxruntime.InferenceSession(w, providers=providers) elif engine: # TensorRT LOGGER.info(f'Loading {w} for TensorRT inference...') import tensorrt as trt # https://developer.nvidia.com/nvidia-tensorrt-download From 7d56d451241e94cd9dbe4fcb9bfba0e92c6e0e23 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 8 Dec 2021 14:57:03 +0100 Subject: [PATCH 0759/1976] Add hardware checks to `notebook_init()` (#5919) * Update notebook * Update notebook * update string * update string * Updates * Updates * Updates * check both ipython and psutil * remove sample_data if is_colab * cleanup * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- tutorial.ipynb | 3 ++- utils/__init__.py | 31 +++++++++++++++++++++++++------ 2 files changed, 27 insertions(+), 7 deletions(-) diff --git a/tutorial.ipynb b/tutorial.ipynb index 4ce87c75aa64..45b27b7ab2cc 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -409,6 +409,7 @@ "%cd yolov5\n", "%pip install -qr requirements.txt # install\n", "\n", + "import torch\n", "from yolov5 import utils\n", "display = utils.notebook_init() # checks" ], @@ -983,7 +984,7 @@ "source": [ "# Reproduce\n", "for x in 'yolov5s', 'yolov5m', 'yolov5l', 'yolov5x':\n", - " !python val.py --weights {x}.pt --data coco.yaml --img 640 --conf 0.25 --iou 0.45 # speed\n", + " !python val.py --weights {x}.pt --data coco.yaml --img 640 --task speed # speed\n", " !python val.py --weights {x}.pt --data coco.yaml --img 640 --conf 0.001 --iou 0.65 # mAP" ], "execution_count": null, diff --git a/utils/__init__.py b/utils/__init__.py index 2b0c896364a2..ff93fd760059 100644 --- a/utils/__init__.py +++ b/utils/__init__.py @@ -4,15 +4,34 @@ """ -def notebook_init(): - # For YOLOv5 notebooks +def notebook_init(verbose=True): + # Check system software and hardware print('Checking setup...') + + import os + import shutil + + from utils.general import check_requirements, emojis, is_colab + from utils.torch_utils import select_device # imports + + check_requirements(('psutil', 'IPython')) + import psutil from IPython import display # to display images and clear console output - from utils.general import emojis - from utils.torch_utils import select_device # YOLOv5 imports + if is_colab(): + shutil.rmtree('sample_data', ignore_errors=True) # remove colab /sample_data directory + + if verbose: + # System info + # gb = 1 / 1000 ** 3 # bytes to GB + gib = 1 / 1024 ** 3 # bytes to GiB + ram = psutil.virtual_memory().total + total, used, free = shutil.disk_usage("/") + display.clear_output() + s = f'({os.cpu_count()} CPUs, {ram * gib:.1f} GB RAM, {(total - free) * gib:.1f}/{total * gib:.1f} GB disk)' + else: + s = '' - display.clear_output() select_device(newline=False) - print(emojis('Setup complete ✅')) + print(emojis(f'Setup complete ✅ {s}')) return display From a3d5f1d3e36d8e023806da0f0c744eef02591c9b Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 8 Dec 2021 16:46:24 +0100 Subject: [PATCH 0760/1976] Revert "Update `plot_lr_scheduler()` (#5864)" (#5920) This reverts commit 360eec69101ad9ffada78326b715b724b9b7eb0d. --- utils/plots.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/utils/plots.py b/utils/plots.py index 5742d050fdf5..69037ee9af70 100644 --- a/utils/plots.py +++ b/utils/plots.py @@ -5,7 +5,7 @@ import math import os -from copy import deepcopy +from copy import copy from pathlib import Path import cv2 @@ -243,7 +243,7 @@ def plot_images(images, targets, paths=None, fname='images.jpg', names=None, max def plot_lr_scheduler(optimizer, scheduler, epochs=300, save_dir=''): # Plot LR simulating training for full epochs - optimizer, scheduler = deepcopy(optimizer), deepcopy(scheduler) # do not modify originals + optimizer, scheduler = copy(optimizer), copy(scheduler) # do not modify originals y = [] for _ in range(epochs): scheduler.step() From c77a5a84e3c6083d4e707d2e252c1499e294495c Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 8 Dec 2021 17:31:31 +0100 Subject: [PATCH 0761/1976] Absolute '/content/sample_data' (#5922) --- utils/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/__init__.py b/utils/__init__.py index ff93fd760059..4658ed6473cd 100644 --- a/utils/__init__.py +++ b/utils/__init__.py @@ -19,7 +19,7 @@ def notebook_init(verbose=True): from IPython import display # to display images and clear console output if is_colab(): - shutil.rmtree('sample_data', ignore_errors=True) # remove colab /sample_data directory + shutil.rmtree('/content/sample_data', ignore_errors=True) # remove colab /sample_data directory if verbose: # System info From 5bdb28ed1083c63a7837dfc2ef7bf00402a02dd1 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 8 Dec 2021 23:15:14 +0100 Subject: [PATCH 0762/1976] Default PyTorch Hub to `autocast(False)` (#5926) --- models/common.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/models/common.py b/models/common.py index c269cfef9a6c..b39017378577 100644 --- a/models/common.py +++ b/models/common.py @@ -443,6 +443,7 @@ class AutoShape(nn.Module): multi_label = False # NMS multiple labels per box classes = None # (optional list) filter by class, i.e. = [0, 15, 16] for COCO persons, cats and dogs max_det = 1000 # maximum number of detections per image + amp = False # Automatic Mixed Precision (AMP) inference def __init__(self, model): super().__init__() @@ -476,8 +477,9 @@ def forward(self, imgs, size=640, augment=False, profile=False): t = [time_sync()] p = next(self.model.parameters()) if self.pt else torch.zeros(1) # for device and type + autocast = self.amp and (p.device.type != 'cpu') # Automatic Mixed Precision (AMP) inference if isinstance(imgs, torch.Tensor): # torch - with amp.autocast(enabled=p.device.type != 'cpu'): + with amp.autocast(enabled=autocast): return self.model(imgs.to(p.device).type_as(p), augment, profile) # inference # Pre-process @@ -506,7 +508,7 @@ def forward(self, imgs, size=640, augment=False, profile=False): x = torch.from_numpy(x).to(p.device).type_as(p) / 255 # uint8 to fp16/32 t.append(time_sync()) - with amp.autocast(enabled=p.device.type != 'cpu'): + with amp.autocast(enabled=autocast): # Inference y = self.model(x, augment, profile) # forward t.append(time_sync()) From 4fb6dd4b26a4d1c39a1e2565999be62f53fb7c71 Mon Sep 17 00:00:00 2001 From: Diego Montes <54745152+d57montes@users.noreply.github.com> Date: Thu, 9 Dec 2021 17:10:16 -0500 Subject: [PATCH 0763/1976] Fix ONNX opset inconsistency with parseargs and run args (#5937) --- export.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/export.py b/export.py index 88d03a2c9475..4f83c75c89a0 100644 --- a/export.py +++ b/export.py @@ -325,7 +325,7 @@ def run(data=ROOT / 'data/coco128.yaml', # 'dataset.yaml path' int8=False, # CoreML/TF INT8 quantization dynamic=False, # ONNX/TF: dynamic axes simplify=False, # ONNX: simplify model - opset=12, # ONNX: opset version + opset=14, # ONNX: opset version verbose=False, # TensorRT: verbose log workspace=4, # TensorRT: workspace size (GB) topk_per_class=100, # TF.js NMS: topk per class to keep From c45f9f678d7a17d37aaac50c324a82509d9c3cde Mon Sep 17 00:00:00 2001 From: Felix You <35478566+youyuxiansen@users.noreply.github.com> Date: Fri, 10 Dec 2021 22:27:20 +0800 Subject: [PATCH 0764/1976] Make `select_device()` robust to `batch_size=-1` (#5940) * Find out a bug. When set batch_size = -1 to use the autobatch. reproduce: * Fix type conflict Co-authored-by: Glenn Jocher --- utils/torch_utils.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/utils/torch_utils.py b/utils/torch_utils.py index 16289104eb48..cddb173948fb 100644 --- a/utils/torch_utils.py +++ b/utils/torch_utils.py @@ -53,7 +53,7 @@ def git_describe(path=Path(__file__).parent): # path must be a directory return '' # not a git repository -def select_device(device='', batch_size=None, newline=True): +def select_device(device='', batch_size=0, newline=True): # device = 'cpu' or '0' or '0,1,2,3' s = f'YOLOv5 🚀 {git_describe() or date_modified()} torch {torch.__version__} ' # string device = str(device).strip().lower().replace('cuda:', '') # to string, 'cuda:0' to '0' @@ -68,7 +68,7 @@ def select_device(device='', batch_size=None, newline=True): if cuda: devices = device.split(',') if device else '0' # range(torch.cuda.device_count()) # i.e. 0,1,6,7 n = len(devices) # device count - if n > 1 and batch_size: # check batch_size is divisible by device_count + if n > 1 and batch_size > 0: # check batch_size is divisible by device_count assert batch_size % n == 0, f'batch-size {batch_size} not multiple of GPU count {n}' space = ' ' * (len(s) + 1) for i, d in enumerate(devices): From 922fbd82094e051553400cd4de8b63b9d202eee2 Mon Sep 17 00:00:00 2001 From: Pascal Maillard Date: Fri, 10 Dec 2021 17:32:09 +0100 Subject: [PATCH 0765/1976] fix .gitignore not tracking existing folders (#5946) * fix .gitignore not tracking existing folders fix .gitignore so that the files that are in the repository are actually being tracked. Everything in the data/ folder is ignored, which also means the subdirectories are ignored. Fix so that the subdirectories and their contents are still tracked. * Remove data/trainings Co-authored-by: Glenn Jocher --- .gitignore | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/.gitignore b/.gitignore index 8bb082b0355a..327dc8566681 100755 --- a/.gitignore +++ b/.gitignore @@ -26,7 +26,11 @@ storage.googleapis.com runs/* data/* -!data/hyps/* +data/images/* +!data/*.yaml +!data/hyps +!data/scripts +!data/images !data/images/zidane.jpg !data/images/bus.jpg !data/*.sh From a42af30d8a011fb84dde6aaba89723e2c5d7a3d3 Mon Sep 17 00:00:00 2001 From: iumyx2612 <69593462+iumyx2612@users.noreply.github.com> Date: Sat, 11 Dec 2021 00:06:27 +0700 Subject: [PATCH 0766/1976] Update `strip_optimizer()` (#5949) Replace 'training_result' with 'best_fitness' in strip_optimizer() to match key with ckpt from train.py --- utils/general.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/general.py b/utils/general.py index bbb9054a7235..6cc658cc3150 100755 --- a/utils/general.py +++ b/utils/general.py @@ -738,7 +738,7 @@ def strip_optimizer(f='best.pt', s=''): # from utils.general import *; strip_op x = torch.load(f, map_location=torch.device('cpu')) if x.get('ema'): x['model'] = x['ema'] # replace model with ema - for k in 'optimizer', 'training_results', 'wandb_id', 'ema', 'updates': # keys + for k in 'optimizer', 'best_fitness', 'wandb_id', 'ema', 'updates': # keys x[k] = None x['epoch'] = -1 x['model'].half() # to FP16 From 2c6317547a46a2dfc414fe1a5886fb9f63c14bf4 Mon Sep 17 00:00:00 2001 From: Diego Montes <54745152+d57montes@users.noreply.github.com> Date: Fri, 10 Dec 2021 12:24:32 -0500 Subject: [PATCH 0767/1976] Add nms and agnostic nms to export.py (#5938) * add nms and agnostic nms to export.py * fix agnostic implies nms * reorder args to group TF args * PEP8 120 char Co-authored-by: Glenn Jocher --- export.py | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/export.py b/export.py index 4f83c75c89a0..7feb525711e8 100644 --- a/export.py +++ b/export.py @@ -328,6 +328,8 @@ def run(data=ROOT / 'data/coco128.yaml', # 'dataset.yaml path' opset=14, # ONNX: opset version verbose=False, # TensorRT: verbose log workspace=4, # TensorRT: workspace size (GB) + nms=False, # TF: add NMS to model + agnostic_nms=False, # TF: add agnostic NMS to model topk_per_class=100, # TF.js NMS: topk per class to keep topk_all=100, # TF.js NMS: topk for all classes to keep iou_thres=0.45, # TF.js NMS: IoU threshold @@ -381,9 +383,9 @@ def run(data=ROOT / 'data/coco128.yaml', # 'dataset.yaml path' if any(tf_exports): pb, tflite, tfjs = tf_exports[1:] assert not (tflite and tfjs), 'TFLite and TF.js models must be exported separately, please pass only one type.' - model = export_saved_model(model, im, file, dynamic, tf_nms=tfjs, agnostic_nms=tfjs, - topk_per_class=topk_per_class, topk_all=topk_all, conf_thres=conf_thres, - iou_thres=iou_thres) # keras model + model = export_saved_model(model, im, file, dynamic, tf_nms=nms or agnostic_nms or tfjs, + agnostic_nms=agnostic_nms or tfjs, topk_per_class=topk_per_class, topk_all=topk_all, + conf_thres=conf_thres, iou_thres=iou_thres) # keras model if pb or tfjs: # pb prerequisite to tfjs export_pb(model, im, file) if tflite: @@ -414,6 +416,8 @@ def parse_opt(): parser.add_argument('--opset', type=int, default=14, help='ONNX: opset version') parser.add_argument('--verbose', action='store_true', help='TensorRT: verbose log') parser.add_argument('--workspace', type=int, default=4, help='TensorRT: workspace size (GB)') + parser.add_argument('--nms', action='store_true', help='TF: add NMS to model') + parser.add_argument('--agnostic-nms', action='store_true', help='TF: add agnostic NMS to model') parser.add_argument('--topk-per-class', type=int, default=100, help='TF.js NMS: topk per class to keep') parser.add_argument('--topk-all', type=int, default=100, help='TF.js NMS: topk for all classes to keep') parser.add_argument('--iou-thres', type=float, default=0.45, help='TF.js NMS: IoU threshold') From 8f875d93a258d2b8b27a19499058f755af8aec4f Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 11 Dec 2021 14:32:18 +0100 Subject: [PATCH 0768/1976] Refactor NUM_THREADS (#5954) --- utils/datasets.py | 5 ++--- utils/general.py | 9 +++++---- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/utils/datasets.py b/utils/datasets.py index 5a3b2110b2e0..79b871c9294b 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -26,8 +26,8 @@ from tqdm import tqdm from utils.augmentations import Albumentations, augment_hsv, copy_paste, letterbox, mixup, random_perspective -from utils.general import (LOGGER, check_dataset, check_requirements, check_yaml, clean_str, segments2boxes, xyn2xy, - xywh2xyxy, xywhn2xyxy, xyxy2xywhn) +from utils.general import (LOGGER, NUM_THREADS, check_dataset, check_requirements, check_yaml, clean_str, + segments2boxes, xyn2xy, xywh2xyxy, xywhn2xyxy, xyxy2xywhn) from utils.torch_utils import torch_distributed_zero_first # Parameters @@ -35,7 +35,6 @@ IMG_FORMATS = ['bmp', 'jpg', 'jpeg', 'png', 'tif', 'tiff', 'dng', 'webp', 'mpo'] # acceptable image suffixes VID_FORMATS = ['mov', 'avi', 'mp4', 'mpg', 'mpeg', 'm4v', 'wmv', 'mkv'] # acceptable video suffixes WORLD_SIZE = int(os.getenv('WORLD_SIZE', 1)) # DPP -NUM_THREADS = min(8, max(1, os.cpu_count() - 1)) # number of multiprocessing threads # Get orientation exif tag for orientation in ExifTags.TAGS.keys(): diff --git a/utils/general.py b/utils/general.py index 6cc658cc3150..1da8a147510e 100755 --- a/utils/general.py +++ b/utils/general.py @@ -33,14 +33,15 @@ from utils.metrics import box_iou, fitness # Settings +FILE = Path(__file__).resolve() +ROOT = FILE.parents[1] # YOLOv5 root directory +NUM_THREADS = min(8, max(1, os.cpu_count() - 1)) # number of YOLOv5 multiprocessing threads + torch.set_printoptions(linewidth=320, precision=5, profile='long') np.set_printoptions(linewidth=320, formatter={'float_kind': '{:11.5g}'.format}) # format short g, %precision=5 pd.options.display.max_columns = 10 cv2.setNumThreads(0) # prevent OpenCV from multithreading (incompatible with PyTorch DataLoader) -os.environ['NUMEXPR_MAX_THREADS'] = str(min(os.cpu_count(), 8)) # NumExpr max threads - -FILE = Path(__file__).resolve() -ROOT = FILE.parents[1] # YOLOv5 root directory +os.environ['NUMEXPR_MAX_THREADS'] = str(NUM_THREADS) # NumExpr max threads def set_logging(name=None, verbose=True): From 8f354362cd94c70908bf6168951b07bd32715ebe Mon Sep 17 00:00:00 2001 From: Yono Mittlefehldt Date: Sat, 11 Dec 2021 18:40:37 +0100 Subject: [PATCH 0769/1976] Fix Detections class `tolist()` method (#5945) * Fix tolist() to add the file for each Detection * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fix PEP8 requirement for 2 spaces before an inline comment * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Cleanup Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Glenn Jocher --- models/common.py | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/models/common.py b/models/common.py index b39017378577..c2edff4d3021 100644 --- a/models/common.py +++ b/models/common.py @@ -525,7 +525,7 @@ def forward(self, imgs, size=640, augment=False, profile=False): class Detections: # YOLOv5 detections class for inference results - def __init__(self, imgs, pred, files, times=None, names=None, shape=None): + def __init__(self, imgs, pred, files, times=(0, 0, 0, 0), names=None, shape=None): super().__init__() d = pred[0].device # device gn = [torch.tensor([*(im.shape[i] for i in [1, 0, 1, 0]), 1, 1], device=d) for im in imgs] # normalizations @@ -533,6 +533,7 @@ def __init__(self, imgs, pred, files, times=None, names=None, shape=None): self.pred = pred # list of tensors pred[0] = (xyxy, conf, cls) self.names = names # class names self.files = files # image filenames + self.times = times # profiling times self.xyxy = pred # xyxy pixels self.xywh = [xyxy2xywh(x) for x in pred] # xywh pixels self.xyxyn = [x / g for x, g in zip(self.xyxy, gn)] # xyxy normalized @@ -612,10 +613,11 @@ def pandas(self): def tolist(self): # return a list of Detections objects, i.e. 'for result in results.tolist():' - x = [Detections([self.imgs[i]], [self.pred[i]], names=self.names, shape=self.s) for i in range(self.n)] - for d in x: - for k in ['imgs', 'pred', 'xyxy', 'xyxyn', 'xywh', 'xywhn']: - setattr(d, k, getattr(d, k)[0]) # pop out of list + r = range(self.n) # iterable + x = [Detections([self.imgs[i]], [self.pred[i]], [self.files[i]], self.times, self.names, self.s) for i in r] + # for d in x: + # for k in ['imgs', 'pred', 'xyxy', 'xyxyn', 'xywh', 'xywhn']: + # setattr(d, k, getattr(d, k)[0]) # pop out of list return x def __len__(self): From 19c56e60b100cf8ff9af65b4347de69e0cff76ae Mon Sep 17 00:00:00 2001 From: Diego Montes <54745152+d57montes@users.noreply.github.com> Date: Sun, 12 Dec 2021 17:39:14 -0500 Subject: [PATCH 0770/1976] Fix `imgsz` bug (#5948) * fix imgsz bug * Update detect.py Co-authored-by: Glenn Jocher --- detect.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/detect.py b/detect.py index 0b6875e5564c..14cdf96ca9db 100644 --- a/detect.py +++ b/detect.py @@ -38,7 +38,7 @@ @torch.no_grad() def run(weights=ROOT / 'yolov5s.pt', # model.pt path(s) source=ROOT / 'data/images', # file/dir/URL/glob, 0 for webcam - imgsz=640, # inference size (pixels) + imgsz=(640, 640), # inference size (height, width) conf_thres=0.25, # confidence threshold iou_thres=0.45, # NMS IOU threshold max_det=1000, # maximum detections per image From e8ef8fb1ca34436577cf6d1f3933b0c30e19992c Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 13 Dec 2021 13:32:27 +0100 Subject: [PATCH 0771/1976] `pretrained=False` fix (#5966) * `pretriained=False` fix Fix for https://github.com/ultralytics/yolov5/issues/5964 * CI speed improvement --- hubconf.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/hubconf.py b/hubconf.py index e407677b3233..6bf4b0b0265f 100644 --- a/hubconf.py +++ b/hubconf.py @@ -46,7 +46,7 @@ def _create(name, pretrained=True, channels=3, classes=80, autoshape=True, verbo model = DetectMultiBackend(path, device=device) # download/load FP32 model # model = models.experimental.attempt_load(path, map_location=device) # download/load FP32 model else: - cfg = list((Path(__file__).parent / 'models').rglob(f'{path.name}.yaml'))[0] # model.yaml path + cfg = list((Path(__file__).parent / 'models').rglob(f'{path.stem}.yaml'))[0] # model.yaml path model = Model(cfg, channels, classes) # create model if pretrained: ckpt = torch.load(attempt_download(path), map_location=device) # load @@ -138,6 +138,6 @@ def yolov5x6(pretrained=True, channels=3, classes=80, autoshape=True, verbose=Tr Image.open('data/images/bus.jpg'), # PIL np.zeros((320, 640, 3))] # numpy - results = model(imgs) # batched inference + results = model(imgs, size=320) # batched inference results.print() results.save() From 2d0c6afbfea3c844ebdd30388ae668efa3243f7f Mon Sep 17 00:00:00 2001 From: jinmc Date: Tue, 14 Dec 2021 19:18:34 +0900 Subject: [PATCH 0772/1976] make parameter ignore epochs (#5972) * make parameter ignore epochs ignore epochs functionality add to prevent spikes at the beginning when fitness spikes and decreases after. Discussed at https://github.com/ultralytics/yolov5/issues/5971 * Update train.py Co-authored-by: Glenn Jocher From d699c21c752a9d9ca26232fabb91d55e4daea5f8 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 14 Dec 2021 11:24:39 +0100 Subject: [PATCH 0773/1976] YOLOv5s6 params and FLOPs fix (#5977) --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 3074330e5505..fa0645d4fd2c 100644 --- a/README.md +++ b/README.md @@ -236,9 +236,9 @@ We are super excited about our first-ever Ultralytics YOLOv5 🚀 EXPORT Competi |[YOLOv5x][assets] |640 |50.7 |68.9 |766 |12.1 |4.8 |86.7 |205.7 | | | | | | | | | |[YOLOv5n6][assets] |1280 |34.0 |50.7 |153 |8.1 |2.1 |3.2 |4.6 -|[YOLOv5s6][assets] |1280 |44.5 |63.0 |385 |8.2 |3.6 |16.8 |12.6 +|[YOLOv5s6][assets] |1280 |44.5 |63.0 |385 |8.2 |3.6 |12.6 |16.8 |[YOLOv5m6][assets] |1280 |51.0 |69.0 |887 |11.1 |6.8 |35.7 |50.0 -|[YOLOv5l6][assets] |1280 |53.6 |71.6 |1784 |15.8 |10.5 |76.8 |111.4 +|[YOLOv5l6][assets] |1280 |53.6 |71.6 |1784 |15.8 |10.5 |76.7 |111.4 |[YOLOv5x6][assets]
+ [TTA][TTA]|1280
1536 |54.7
**55.4** |**72.4**
72.3 |3136
- |26.2
- |19.4
- |140.7
- |209.8
-
From c9a46a60e09ab94009754ca71bde23e91aab33fe Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 14 Dec 2021 15:47:49 +0100 Subject: [PATCH 0774/1976] Update callbacks.py with `__init__()` (#5979) Add __init__() function. --- utils/callbacks.py | 43 ++++++++++++++++++++++--------------------- 1 file changed, 22 insertions(+), 21 deletions(-) diff --git a/utils/callbacks.py b/utils/callbacks.py index 327b8639b60c..c9d936ef082d 100644 --- a/utils/callbacks.py +++ b/utils/callbacks.py @@ -9,31 +9,32 @@ class Callbacks: Handles all registered callbacks for YOLOv5 Hooks """ - # Define the available callbacks - _callbacks = { - 'on_pretrain_routine_start': [], - 'on_pretrain_routine_end': [], + def __init__(self): + # Define the available callbacks + self._callbacks = { + 'on_pretrain_routine_start': [], + 'on_pretrain_routine_end': [], - 'on_train_start': [], - 'on_train_epoch_start': [], - 'on_train_batch_start': [], - 'optimizer_step': [], - 'on_before_zero_grad': [], - 'on_train_batch_end': [], - 'on_train_epoch_end': [], + 'on_train_start': [], + 'on_train_epoch_start': [], + 'on_train_batch_start': [], + 'optimizer_step': [], + 'on_before_zero_grad': [], + 'on_train_batch_end': [], + 'on_train_epoch_end': [], - 'on_val_start': [], - 'on_val_batch_start': [], - 'on_val_image_end': [], - 'on_val_batch_end': [], - 'on_val_end': [], + 'on_val_start': [], + 'on_val_batch_start': [], + 'on_val_image_end': [], + 'on_val_batch_end': [], + 'on_val_end': [], - 'on_fit_epoch_end': [], # fit = train + val - 'on_model_save': [], - 'on_train_end': [], + 'on_fit_epoch_end': [], # fit = train + val + 'on_model_save': [], + 'on_train_end': [], - 'teardown': [], - } + 'teardown': [], + } def register_action(self, hook, name='', callback=None): """ From b7d18f363665791bfaf58cd110dc162ebb5df3b5 Mon Sep 17 00:00:00 2001 From: Mrinal Jain Date: Wed, 15 Dec 2021 19:12:23 +0530 Subject: [PATCH 0775/1976] Increase `ar_thr` from 20 to 100 for better detection on slender (high aspect ratio) objects (#5556) * Making `ar_thr` available as a hyperparameter * Disabling ar_thr as hyperparameter and computing from the dataset instead * Fixing bug in ar_thr computation * Fix `ar_thr` to 100 --- utils/augmentations.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/augmentations.py b/utils/augmentations.py index 5dcfd49fdd05..0311b97b63db 100644 --- a/utils/augmentations.py +++ b/utils/augmentations.py @@ -269,7 +269,7 @@ def mixup(im, labels, im2, labels2): return im, labels -def box_candidates(box1, box2, wh_thr=2, ar_thr=20, area_thr=0.1, eps=1e-16): # box1(4,n), box2(4,n) +def box_candidates(box1, box2, wh_thr=2, ar_thr=100, area_thr=0.1, eps=1e-16): # box1(4,n), box2(4,n) # Compute candidate boxes: box1 before augment, box2 after augment, wh_thr (pixels), aspect_ratio_thr, area_ratio w1, h1 = box1[2] - box1[0], box1[3] - box1[1] w2, h2 = box2[2] - box2[0], box2[3] - box2[1] From da9a1b719ba7d10e209ff89efe28b074fb9a5f16 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 15 Dec 2021 15:27:08 +0100 Subject: [PATCH 0776/1976] Allow `--weights URL` (#5991) --- models/common.py | 4 ++-- utils/downloads.py | 9 ++++++--- 2 files changed, 8 insertions(+), 5 deletions(-) diff --git a/models/common.py b/models/common.py index c2edff4d3021..4f1afa13396c 100644 --- a/models/common.py +++ b/models/common.py @@ -296,7 +296,7 @@ def __init__(self, weights='yolov5s.pt', device=None, dnn=False): check_suffix(w, suffixes) # check weights have acceptable suffix pt, jit, onnx, engine, tflite, pb, saved_model, coreml = (suffix == x for x in suffixes) # backend booleans stride, names = 64, [f'class{i}' for i in range(1000)] # assign defaults - attempt_download(w) # download if not local + w = attempt_download(w) # download if not local if jit: # TorchScript LOGGER.info(f'Loading {w} for TorchScript inference...') @@ -306,7 +306,7 @@ def __init__(self, weights='yolov5s.pt', device=None, dnn=False): d = json.loads(extra_files['config.txt']) # extra_files dict stride, names = int(d['stride']), d['names'] elif pt: # PyTorch - model = attempt_load(weights, map_location=device) + model = attempt_load(weights if isinstance(weights, list) else w, map_location=device) stride = int(model.stride.max()) # model stride names = model.module.names if hasattr(model, 'module') else model.names # get class names self.model = model # explicitly assign for to(), cpu(), cuda(), half() diff --git a/utils/downloads.py b/utils/downloads.py index 998a7a582a33..a8bacae4420f 100644 --- a/utils/downloads.py +++ b/utils/downloads.py @@ -49,9 +49,12 @@ def attempt_download(file, repo='ultralytics/yolov5'): # from utils.downloads i name = Path(urllib.parse.unquote(str(file))).name # decode '%2F' to '/' etc. if str(file).startswith(('http:/', 'https:/')): # download url = str(file).replace(':/', '://') # Pathlib turns :// -> :/ - name = name.split('?')[0] # parse authentication https://url.com/file.txt?auth... - safe_download(file=name, url=url, min_bytes=1E5) - return name + file = name.split('?')[0] # parse authentication https://url.com/file.txt?auth... + if Path(file).is_file(): + print(f'Found {url} locally at {file}') # file already exists + else: + safe_download(file=file, url=url, min_bytes=1E5) + return file # GitHub assets file.parent.mkdir(parents=True, exist_ok=True) # make parent dir (if required) From 628817dfae670302a69f83a7c44431877f90eb3f Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 15 Dec 2021 17:19:19 +0100 Subject: [PATCH 0777/1976] Recommend `jar xf file.zip` for zips (#5993) --- data/xView.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/data/xView.yaml b/data/xView.yaml index fabcdb0bdd13..5fcb7479d0af 100644 --- a/data/xView.yaml +++ b/data/xView.yaml @@ -1,6 +1,6 @@ # YOLOv5 🚀 by Ultralytics, GPL-3.0 license # xView 2018 dataset https://challenge.xviewdataset.org -# -------- DOWNLOAD DATA MANUALLY from URL above and unzip to 'datasets/xView' before running train command! -------- +# -------- DOWNLOAD DATA MANUALLY and jar xf val_images.zip to 'datasets/xView' before running train command! -------- # Example usage: python train.py --data xView.yaml # parent # ├── yolov5 From c1249a47c7fe19e2067cb25ed8347e67d26ff1f1 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 16 Dec 2021 14:10:54 +0100 Subject: [PATCH 0778/1976] *.torchscript inference `self.jit` fix (#6007) --- models/common.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/models/common.py b/models/common.py index 4f1afa13396c..cfecb20d2141 100644 --- a/models/common.py +++ b/models/common.py @@ -379,7 +379,7 @@ def wrap_frozen_graph(gd, inputs, outputs): def forward(self, im, augment=False, visualize=False, val=False): # YOLOv5 MultiBackend inference b, ch, h, w = im.shape # batch, channel, height, width - if self.pt: # PyTorch + if self.pt or self.jit: # PyTorch y = self.model(im) if self.jit else self.model(im, augment=augment, visualize=visualize) return y if val else y[0] elif self.coreml: # CoreML From 407a9057478d3deea0a9984af42162d21afa2bd2 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 17 Dec 2021 14:59:46 +0100 Subject: [PATCH 0779/1976] Check TensorRT>=8.0.0 version (#6021) * Check TensorRT>=8.0.0 version * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- models/common.py | 5 +++-- utils/general.py | 12 +++++++----- 2 files changed, 10 insertions(+), 7 deletions(-) diff --git a/models/common.py b/models/common.py index cfecb20d2141..4fd608f4b3e2 100644 --- a/models/common.py +++ b/models/common.py @@ -21,8 +21,8 @@ from torch.cuda import amp from utils.datasets import exif_transpose, letterbox -from utils.general import (LOGGER, check_requirements, check_suffix, colorstr, increment_path, make_divisible, - non_max_suppression, scale_coords, xywh2xyxy, xyxy2xywh) +from utils.general import (LOGGER, check_requirements, check_suffix, check_version, colorstr, increment_path, + make_divisible, non_max_suppression, scale_coords, xywh2xyxy, xyxy2xywh) from utils.plots import Annotator, colors, save_one_box from utils.torch_utils import copy_attr, time_sync @@ -328,6 +328,7 @@ def __init__(self, weights='yolov5s.pt', device=None, dnn=False): elif engine: # TensorRT LOGGER.info(f'Loading {w} for TensorRT inference...') import tensorrt as trt # https://developer.nvidia.com/nvidia-tensorrt-download + check_version(trt.__version__, '8.0.0', verbose=True) # version requirement Binding = namedtuple('Binding', ('name', 'dtype', 'shape', 'data', 'ptr')) logger = trt.Logger(trt.Logger.INFO) with open(w, 'rb') as f, trt.Runtime(logger) as runtime: diff --git a/utils/general.py b/utils/general.py index 1da8a147510e..7ff397fb4caa 100755 --- a/utils/general.py +++ b/utils/general.py @@ -248,14 +248,16 @@ def check_python(minimum='3.6.2'): check_version(platform.python_version(), minimum, name='Python ', hard=True) -def check_version(current='0.0.0', minimum='0.0.0', name='version ', pinned=False, hard=False): +def check_version(current='0.0.0', minimum='0.0.0', name='version ', pinned=False, hard=False, verbose=False): # Check version vs. required version current, minimum = (pkg.parse_version(x) for x in (current, minimum)) result = (current == minimum) if pinned else (current >= minimum) # bool - if hard: # assert min requirements met - assert result, f'{name}{minimum} required by YOLOv5, but {name}{current} is currently installed' - else: - return result + s = f'{name}{minimum} required by YOLOv5, but {name}{current} is currently installed' # string + if hard: + assert result, s # assert min requirements met + if verbose and not result: + LOGGER.warning(s) + return result @try_except From 361705d9be532cfff592b8a89db40c8218ed1df2 Mon Sep 17 00:00:00 2001 From: Felix You <35478566+youyuxiansen@users.noreply.github.com> Date: Fri, 17 Dec 2021 22:42:26 +0800 Subject: [PATCH 0780/1976] Multi-layer capable `--freeze` argument (#6019) * support specfiy multiple frozen layers * fix bug * Cleanup Freeze section * Cleanup argument Co-authored-by: Glenn Jocher --- train.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/train.py b/train.py index 8cb68fc0748e..ae19c1851d62 100644 --- a/train.py +++ b/train.py @@ -124,7 +124,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary model = Model(cfg, ch=3, nc=nc, anchors=hyp.get('anchors')).to(device) # create # Freeze - freeze = [f'model.{x}.' for x in range(freeze)] # layers to freeze + freeze = [f'model.{x}.' for x in (freeze if isinstance(freeze, list) else range(freeze))] # layers to freeze for k, v in model.named_parameters(): v.requires_grad = True # train all layers if any(x in k for x in freeze): @@ -469,7 +469,7 @@ def parse_opt(known=False): parser.add_argument('--linear-lr', action='store_true', help='linear LR') parser.add_argument('--label-smoothing', type=float, default=0.0, help='Label smoothing epsilon') parser.add_argument('--patience', type=int, default=100, help='EarlyStopping patience (epochs without improvement)') - parser.add_argument('--freeze', type=int, default=0, help='Number of layers to freeze. backbone=10, all=24') + parser.add_argument('--freeze', nargs='+', type=int, default=0, help='Freeze layers: backbone=10, first3=0 1 2') parser.add_argument('--save-period', type=int, default=-1, help='Save checkpoint every x epochs (disabled if < 1)') parser.add_argument('--local_rank', type=int, default=-1, help='DDP parameter, do not modify') From abbdd4802ea1f01c9e8c723d3792e0e0a92d604e Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 17 Dec 2021 16:43:00 +0100 Subject: [PATCH 0781/1976] train -> val comment fix (#6024) --- data/coco.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/data/coco.yaml b/data/coco.yaml index 2ccc6478b620..348a3d48c412 100644 --- a/data/coco.yaml +++ b/data/coco.yaml @@ -10,7 +10,7 @@ # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..] path: ../datasets/coco # dataset root dir train: train2017.txt # train images (relative to 'path') 118287 images -val: val2017.txt # train images (relative to 'path') 5000 images +val: val2017.txt # val images (relative to 'path') 5000 images test: test-dev2017.txt # 20288 of 40670 images, submit to https://competitions.codalab.org/competitions/20794 # Classes From 26f0415287b7fa333f559a8300cedc2274943ab6 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 19 Dec 2021 15:19:04 +0100 Subject: [PATCH 0782/1976] Add dataset source citations (#6032) --- data/Argoverse.yaml | 2 +- data/GlobalWheat2020.yaml | 2 +- data/Objects365.yaml | 2 +- data/SKU-110K.yaml | 2 +- data/VOC.yaml | 2 +- data/VisDrone.yaml | 2 +- data/coco.yaml | 2 +- data/coco128.yaml | 2 +- data/xView.yaml | 2 +- 9 files changed, 9 insertions(+), 9 deletions(-) diff --git a/data/Argoverse.yaml b/data/Argoverse.yaml index 1625dd1b9d2b..312791b33a2d 100644 --- a/data/Argoverse.yaml +++ b/data/Argoverse.yaml @@ -1,5 +1,5 @@ # YOLOv5 🚀 by Ultralytics, GPL-3.0 license -# Argoverse-HD dataset (ring-front-center camera) http://www.cs.cmu.edu/~mengtial/proj/streaming/ +# Argoverse-HD dataset (ring-front-center camera) http://www.cs.cmu.edu/~mengtial/proj/streaming/ by Argo AI # Example usage: python train.py --data Argoverse.yaml # parent # ├── yolov5 diff --git a/data/GlobalWheat2020.yaml b/data/GlobalWheat2020.yaml index 75b3bfdff43e..869dace0be2b 100644 --- a/data/GlobalWheat2020.yaml +++ b/data/GlobalWheat2020.yaml @@ -1,5 +1,5 @@ # YOLOv5 🚀 by Ultralytics, GPL-3.0 license -# Global Wheat 2020 dataset http://www.global-wheat.com/ +# Global Wheat 2020 dataset http://www.global-wheat.com/ by University of Saskatchewan # Example usage: python train.py --data GlobalWheat2020.yaml # parent # ├── yolov5 diff --git a/data/Objects365.yaml b/data/Objects365.yaml index b10c28e764c1..4c7cf3fdb2c8 100644 --- a/data/Objects365.yaml +++ b/data/Objects365.yaml @@ -1,5 +1,5 @@ # YOLOv5 🚀 by Ultralytics, GPL-3.0 license -# Objects365 dataset https://www.objects365.org/ +# Objects365 dataset https://www.objects365.org/ by Megvii # Example usage: python train.py --data Objects365.yaml # parent # ├── yolov5 diff --git a/data/SKU-110K.yaml b/data/SKU-110K.yaml index 653485e2079a..9481b7a04aee 100644 --- a/data/SKU-110K.yaml +++ b/data/SKU-110K.yaml @@ -1,5 +1,5 @@ # YOLOv5 🚀 by Ultralytics, GPL-3.0 license -# SKU-110K retail items dataset https://github.com/eg4000/SKU110K_CVPR19 +# SKU-110K retail items dataset https://github.com/eg4000/SKU110K_CVPR19 by Trax Retail # Example usage: python train.py --data SKU-110K.yaml # parent # ├── yolov5 diff --git a/data/VOC.yaml b/data/VOC.yaml index 8dbaacf9c290..975d56466de1 100644 --- a/data/VOC.yaml +++ b/data/VOC.yaml @@ -1,5 +1,5 @@ # YOLOv5 🚀 by Ultralytics, GPL-3.0 license -# PASCAL VOC dataset http://host.robots.ox.ac.uk/pascal/VOC +# PASCAL VOC dataset http://host.robots.ox.ac.uk/pascal/VOC by University of Oxford # Example usage: python train.py --data VOC.yaml # parent # ├── yolov5 diff --git a/data/VisDrone.yaml b/data/VisDrone.yaml index 7753da98269e..83a5c7d55e06 100644 --- a/data/VisDrone.yaml +++ b/data/VisDrone.yaml @@ -1,5 +1,5 @@ # YOLOv5 🚀 by Ultralytics, GPL-3.0 license -# VisDrone2019-DET dataset https://github.com/VisDrone/VisDrone-Dataset +# VisDrone2019-DET dataset https://github.com/VisDrone/VisDrone-Dataset by Tianjin University # Example usage: python train.py --data VisDrone.yaml # parent # ├── yolov5 diff --git a/data/coco.yaml b/data/coco.yaml index 348a3d48c412..3ed7e48a2185 100644 --- a/data/coco.yaml +++ b/data/coco.yaml @@ -1,5 +1,5 @@ # YOLOv5 🚀 by Ultralytics, GPL-3.0 license -# COCO 2017 dataset http://cocodataset.org +# COCO 2017 dataset http://cocodataset.org by Microsoft # Example usage: python train.py --data coco.yaml # parent # ├── yolov5 diff --git a/data/coco128.yaml b/data/coco128.yaml index 84a91b18359d..d07c704407a1 100644 --- a/data/coco128.yaml +++ b/data/coco128.yaml @@ -1,5 +1,5 @@ # YOLOv5 🚀 by Ultralytics, GPL-3.0 license -# COCO128 dataset https://www.kaggle.com/ultralytics/coco128 (first 128 images from COCO train2017) +# COCO128 dataset https://www.kaggle.com/ultralytics/coco128 (first 128 images from COCO train2017) by Ultralytics # Example usage: python train.py --data coco128.yaml # parent # ├── yolov5 diff --git a/data/xView.yaml b/data/xView.yaml index 5fcb7479d0af..fd82828dcb8c 100644 --- a/data/xView.yaml +++ b/data/xView.yaml @@ -1,5 +1,5 @@ # YOLOv5 🚀 by Ultralytics, GPL-3.0 license -# xView 2018 dataset https://challenge.xviewdataset.org +# DIUx xView 2018 Challenge https://challenge.xviewdataset.org by U.S. National Geospatial-Intelligence Agency (NGA) # -------- DOWNLOAD DATA MANUALLY and jar xf val_images.zip to 'datasets/xView' before running train command! -------- # Example usage: python train.py --data xView.yaml # parent From 0db9d5b6a217e3603622884b906dcd4c8008685c Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 20 Dec 2021 17:30:46 +0100 Subject: [PATCH 0783/1976] Kaggle `LOGGER` fix (#6041) --- utils/general.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/utils/general.py b/utils/general.py index 7ff397fb4caa..e63ac3e20b62 100755 --- a/utils/general.py +++ b/utils/general.py @@ -46,6 +46,8 @@ def set_logging(name=None, verbose=True): # Sets level and returns logger + for h in logging.root.handlers[:]: + logging.root.removeHandler(h) # remove all handlers associated with the root logger object rank = int(os.getenv('RANK', -1)) # rank in world for Multi-GPU trainings logging.basicConfig(format="%(message)s", level=logging.INFO if (verbose and rank in (-1, 0)) else logging.WARNING) return logging.getLogger(name) From b8a4babd603d09f518581c589fc8607993a5e192 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 20 Dec 2021 17:42:52 +0100 Subject: [PATCH 0784/1976] Simplify `set_logging()` indexing (#6042) --- utils/general.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/general.py b/utils/general.py index e63ac3e20b62..ef27eb570ffa 100755 --- a/utils/general.py +++ b/utils/general.py @@ -46,7 +46,7 @@ def set_logging(name=None, verbose=True): # Sets level and returns logger - for h in logging.root.handlers[:]: + for h in logging.root.handlers: logging.root.removeHandler(h) # remove all handlers associated with the root logger object rank = int(os.getenv('RANK', -1)) # rank in world for Multi-GPU trainings logging.basicConfig(format="%(message)s", level=logging.INFO if (verbose and rank in (-1, 0)) else logging.WARNING) From dc54ed5763720ced4f6784552c47534af5413d45 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 20 Dec 2021 18:24:07 +0100 Subject: [PATCH 0785/1976] `--freeze` fix (#6044) Fix for https://github.com/ultralytics/yolov5/issues/6038 --- train.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/train.py b/train.py index ae19c1851d62..17e816c06ede 100644 --- a/train.py +++ b/train.py @@ -60,7 +60,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary device, callbacks ): - save_dir, epochs, batch_size, weights, single_cls, evolve, data, cfg, resume, noval, nosave, workers, freeze, = \ + save_dir, epochs, batch_size, weights, single_cls, evolve, data, cfg, resume, noval, nosave, workers, freeze = \ Path(opt.save_dir), opt.epochs, opt.batch_size, opt.weights, opt.single_cls, opt.evolve, opt.data, opt.cfg, \ opt.resume, opt.noval, opt.nosave, opt.workers, opt.freeze @@ -124,7 +124,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary model = Model(cfg, ch=3, nc=nc, anchors=hyp.get('anchors')).to(device) # create # Freeze - freeze = [f'model.{x}.' for x in (freeze if isinstance(freeze, list) else range(freeze))] # layers to freeze + freeze = [f'model.{x}.' for x in (freeze if len(freeze) > 1 else range(freeze[0]))] # layers to freeze for k, v in model.named_parameters(): v.requires_grad = True # train all layers if any(x in k for x in freeze): @@ -469,7 +469,7 @@ def parse_opt(known=False): parser.add_argument('--linear-lr', action='store_true', help='linear LR') parser.add_argument('--label-smoothing', type=float, default=0.0, help='Label smoothing epsilon') parser.add_argument('--patience', type=int, default=100, help='EarlyStopping patience (epochs without improvement)') - parser.add_argument('--freeze', nargs='+', type=int, default=0, help='Freeze layers: backbone=10, first3=0 1 2') + parser.add_argument('--freeze', nargs='+', type=int, default=[0], help='Freeze layers: backbone=10, first3=0 1 2') parser.add_argument('--save-period', type=int, default=-1, help='Save checkpoint every x epochs (disabled if < 1)') parser.add_argument('--local_rank', type=int, default=-1, help='DDP parameter, do not modify') From 95c7bc25d3eabc61b12bcfd95c866d9014d97714 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 22 Dec 2021 20:29:48 +0100 Subject: [PATCH 0786/1976] OpenVINO Export (#6057) * OpenVINO export * Remove timeout * Add 3 files * str * Constrain opset to 12 * Default ONNX opset to 12 * Make dir * Make dir * Cleanup * Cleanup * check_requirements(('openvino-dev',)) --- export.py | 36 ++++++++++++++++++++++++++++++------ requirements.txt | 1 + 2 files changed, 31 insertions(+), 6 deletions(-) diff --git a/export.py b/export.py index 7feb525711e8..600e0c318f33 100644 --- a/export.py +++ b/export.py @@ -8,6 +8,7 @@ TorchScript | yolov5s.torchscript | `torchscript` ONNX | yolov5s.onnx | `onnx` CoreML | yolov5s.mlmodel | `coreml` +OpenVINO | yolov5s_openvino_model/ | `openvino` TensorFlow SavedModel | yolov5s_saved_model/ | `saved_model` TensorFlow GraphDef | yolov5s.pb | `pb` TensorFlow Lite | yolov5s.tflite | `tflite` @@ -15,13 +16,14 @@ TensorRT | yolov5s.engine | `engine` Usage: - $ python path/to/export.py --weights yolov5s.pt --include torchscript onnx coreml saved_model pb tflite tfjs + $ python path/to/export.py --weights yolov5s.pt --include torchscript onnx coreml openvino saved_model tflite tfjs Inference: $ python path/to/detect.py --weights yolov5s.pt yolov5s.torchscript yolov5s.onnx yolov5s.mlmodel (under development) + yolov5s_openvino_model (under development) yolov5s_saved_model yolov5s.pb yolov5s.tflite @@ -144,6 +146,23 @@ def export_coreml(model, im, file, prefix=colorstr('CoreML:')): return ct_model +def export_openvino(model, im, file, prefix=colorstr('OpenVINO:')): + # YOLOv5 OpenVINO export + try: + check_requirements(('openvino-dev',)) # requires openvino-dev: https://pypi.org/project/openvino-dev/ + import openvino.inference_engine as ie + + LOGGER.info(f'\n{prefix} starting export with openvino {ie.__version__}...') + f = str(file).replace('.pt', '_openvino_model' + os.sep) + + cmd = f"mo --input_model {file.with_suffix('.onnx')} --output_dir {f}" + subprocess.check_output(cmd, shell=True) + + LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') + except Exception as e: + LOGGER.info(f'\n{prefix} export failure: {e}') + + def export_saved_model(model, im, file, dynamic, tf_nms=False, agnostic_nms=False, topk_per_class=100, topk_all=100, iou_thres=0.45, conf_thres=0.25, prefix=colorstr('TensorFlow saved_model:')): @@ -317,7 +336,7 @@ def run(data=ROOT / 'data/coco128.yaml', # 'dataset.yaml path' imgsz=(640, 640), # image (height, width) batch_size=1, # batch size device='cpu', # cuda device, i.e. 0 or 0,1,2,3 or cpu - include=('torchscript', 'onnx', 'coreml'), # include formats + include=('torchscript', 'onnx'), # include formats half=False, # FP16 half-precision export inplace=False, # set YOLOv5 Detect() inplace=True train=False, # model.train() mode @@ -325,7 +344,7 @@ def run(data=ROOT / 'data/coco128.yaml', # 'dataset.yaml path' int8=False, # CoreML/TF INT8 quantization dynamic=False, # ONNX/TF: dynamic axes simplify=False, # ONNX: simplify model - opset=14, # ONNX: opset version + opset=12, # ONNX: opset version verbose=False, # TensorRT: verbose log workspace=4, # TensorRT: workspace size (GB) nms=False, # TF: add NMS to model @@ -338,9 +357,12 @@ def run(data=ROOT / 'data/coco128.yaml', # 'dataset.yaml path' t = time.time() include = [x.lower() for x in include] tf_exports = list(x in include for x in ('saved_model', 'pb', 'tflite', 'tfjs')) # TensorFlow exports - imgsz *= 2 if len(imgsz) == 1 else 1 # expand file = Path(url2file(weights) if str(weights).startswith(('http:/', 'https:/')) else weights) + # Checks + imgsz *= 2 if len(imgsz) == 1 else 1 # expand + opset = 12 if ('openvino' in include) else opset # OpenVINO requires opset <= 12 + # Load PyTorch model device = select_device(device) assert not (device.type == 'cpu' and half), '--half only compatible with GPU export, i.e. use --device 0' @@ -372,12 +394,14 @@ def run(data=ROOT / 'data/coco128.yaml', # 'dataset.yaml path' # Exports if 'torchscript' in include: export_torchscript(model, im, file, optimize) - if 'onnx' in include: + if ('onnx' in include) or ('openvino' in include): # OpenVINO requires ONNX export_onnx(model, im, file, opset, train, dynamic, simplify) if 'engine' in include: export_engine(model, im, file, train, half, simplify, workspace, verbose) if 'coreml' in include: export_coreml(model, im, file) + if 'openvino' in include: + export_openvino(model, im, file) # TensorFlow Exports if any(tf_exports): @@ -413,7 +437,7 @@ def parse_opt(): parser.add_argument('--int8', action='store_true', help='CoreML/TF INT8 quantization') parser.add_argument('--dynamic', action='store_true', help='ONNX/TF: dynamic axes') parser.add_argument('--simplify', action='store_true', help='ONNX: simplify model') - parser.add_argument('--opset', type=int, default=14, help='ONNX: opset version') + parser.add_argument('--opset', type=int, default=12, help='ONNX: opset version') parser.add_argument('--verbose', action='store_true', help='TensorRT: verbose log') parser.add_argument('--workspace', type=int, default=4, help='TensorRT: workspace size (GB)') parser.add_argument('--nms', action='store_true', help='TF: add NMS to model') diff --git a/requirements.txt b/requirements.txt index 22b51fc490e3..96fc9d1a1f32 100755 --- a/requirements.txt +++ b/requirements.txt @@ -27,6 +27,7 @@ seaborn>=0.11.0 # scikit-learn==0.19.2 # CoreML quantization # tensorflow>=2.4.1 # TFLite export # tensorflowjs>=3.9.0 # TF.js export +# openvino-dev # OpenVINO export # Extras -------------------------------------- # albumentations>=1.0.3 From afa5cfb0f872bbd467a3e37bc041a1c908c18bba Mon Sep 17 00:00:00 2001 From: JieLi <32835610+jedi007@users.noreply.github.com> Date: Thu, 23 Dec 2021 18:53:00 +0800 Subject: [PATCH 0787/1976] Reduce G/D/CIoU logic operations (#6074) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Consider that the default value is CIOU,adjust the order of judgment could reduce the number of judgments. And “elif CIoU:” didn't need 'if'. Co-authored-by: 李杰 <360751194@qq.comqq.com> --- utils/metrics.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/utils/metrics.py b/utils/metrics.py index 3f1dc559c75a..e03e1bd7460b 100644 --- a/utils/metrics.py +++ b/utils/metrics.py @@ -222,20 +222,20 @@ def bbox_iou(box1, box2, x1y1x2y2=True, GIoU=False, DIoU=False, CIoU=False, eps= union = w1 * h1 + w2 * h2 - inter + eps iou = inter / union - if GIoU or DIoU or CIoU: + if CIoU or DIoU or GIoU: cw = torch.max(b1_x2, b2_x2) - torch.min(b1_x1, b2_x1) # convex (smallest enclosing box) width ch = torch.max(b1_y2, b2_y2) - torch.min(b1_y1, b2_y1) # convex height if CIoU or DIoU: # Distance or Complete IoU https://arxiv.org/abs/1911.08287v1 c2 = cw ** 2 + ch ** 2 + eps # convex diagonal squared rho2 = ((b2_x1 + b2_x2 - b1_x1 - b1_x2) ** 2 + (b2_y1 + b2_y2 - b1_y1 - b1_y2) ** 2) / 4 # center distance squared - if DIoU: - return iou - rho2 / c2 # DIoU - elif CIoU: # https://github.com/Zzh-tju/DIoU-SSD-pytorch/blob/master/utils/box/box_utils.py#L47 + if CIoU: # https://github.com/Zzh-tju/DIoU-SSD-pytorch/blob/master/utils/box/box_utils.py#L47 v = (4 / math.pi ** 2) * torch.pow(torch.atan(w2 / h2) - torch.atan(w1 / h1), 2) with torch.no_grad(): alpha = v / (v - iou + (1 + eps)) return iou - (rho2 / c2 + v * alpha) # CIoU + else: + return iou - rho2 / c2 # DIoU else: # GIoU https://arxiv.org/pdf/1902.09630.pdf c_area = cw * ch + eps # convex area return iou - (c_area - union) / c_area # GIoU From c72270c076e1f087d3eb0b1ef3fb7ab55fe794ba Mon Sep 17 00:00:00 2001 From: Deep Patel <35742688+deepsworld@users.noreply.github.com> Date: Thu, 23 Dec 2021 07:49:00 -0500 Subject: [PATCH 0788/1976] Init tensor directly on device (#6068) Slightly more efficient than .to(device) --- models/yolo.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/models/yolo.py b/models/yolo.py index db3d711a81fa..f659a04545b9 100644 --- a/models/yolo.py +++ b/models/yolo.py @@ -72,9 +72,9 @@ def forward(self, x): def _make_grid(self, nx=20, ny=20, i=0): d = self.anchors[i].device if check_version(torch.__version__, '1.10.0'): # torch>=1.10.0 meshgrid workaround for torch>=0.7 compatibility - yv, xv = torch.meshgrid([torch.arange(ny).to(d), torch.arange(nx).to(d)], indexing='ij') + yv, xv = torch.meshgrid([torch.arange(ny, device=d), torch.arange(nx, device=d)], indexing='ij') else: - yv, xv = torch.meshgrid([torch.arange(ny).to(d), torch.arange(nx).to(d)]) + yv, xv = torch.meshgrid([torch.arange(ny, device=d), torch.arange(nx, device=d)]) grid = torch.stack((xv, yv), 2).expand((1, self.na, ny, nx, 2)).float() anchor_grid = (self.anchors[i].clone() * self.stride[i]) \ .view((1, self.na, 1, 1, 2)).expand((1, self.na, ny, nx, 2)).float() From db6ec66a602a0b64a7db1711acd064eda5daf2b3 Mon Sep 17 00:00:00 2001 From: Ayush Chaurasia Date: Thu, 23 Dec 2021 05:23:50 -0800 Subject: [PATCH 0789/1976] W&B: track batch size after autobatch (#6039) * track batch size after autobatch * remove redundant import * Update __init__.py * Update __init__.py Co-authored-by: Glenn Jocher --- train.py | 1 + utils/callbacks.py | 2 +- utils/loggers/__init__.py | 6 ++++++ 3 files changed, 8 insertions(+), 1 deletion(-) diff --git a/train.py b/train.py index 17e816c06ede..e2cd5ec85c09 100644 --- a/train.py +++ b/train.py @@ -138,6 +138,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary # Batch size if RANK == -1 and batch_size == -1: # single-GPU only, estimate best batch size batch_size = check_train_batch_size(model, imgsz) + loggers.on_params_update({"batch_size": batch_size}) # Optimizer nbs = 64 # nominal batch size diff --git a/utils/callbacks.py b/utils/callbacks.py index c9d936ef082d..13d82ebc2e41 100644 --- a/utils/callbacks.py +++ b/utils/callbacks.py @@ -32,7 +32,7 @@ def __init__(self): 'on_fit_epoch_end': [], # fit = train + val 'on_model_save': [], 'on_train_end': [], - + 'on_params_update': [], 'teardown': [], } diff --git a/utils/loggers/__init__.py b/utils/loggers/__init__.py index 2a68d9785071..7a1df2a45ea7 100644 --- a/utils/loggers/__init__.py +++ b/utils/loggers/__init__.py @@ -157,3 +157,9 @@ def on_train_end(self, last, best, plots, epoch, results): else: self.wandb.finish_run() self.wandb = WandbLogger(self.opt) + + def on_params_update(self, params): + # Update hyperparams or configs of the experiment + # params: A dict containing {param: value} pairs + if self.wandb: + self.wandb.wandb_run.config.update(params, allow_val_change=True) From 9155eb86419cecd43e542ca69923d26fc2fd9902 Mon Sep 17 00:00:00 2001 From: Ayush Chaurasia Date: Fri, 31 Dec 2021 01:13:19 +0530 Subject: [PATCH 0790/1976] W&B: Log best results after training ends (#6120) * log best.pt metrics at train end * update * Update __init__.py Co-authored-by: Glenn Jocher --- utils/loggers/__init__.py | 1 + 1 file changed, 1 insertion(+) diff --git a/utils/loggers/__init__.py b/utils/loggers/__init__.py index 7a1df2a45ea7..8af5c402d5ee 100644 --- a/utils/loggers/__init__.py +++ b/utils/loggers/__init__.py @@ -147,6 +147,7 @@ def on_train_end(self, last, best, plots, epoch, results): self.tb.add_image(f.stem, cv2.imread(str(f))[..., ::-1], epoch, dataformats='HWC') if self.wandb: + self.wandb.log({k: v for k, v in zip(self.keys[3:10], results)}) # log best.pt val results self.wandb.log({"Results": [wandb.Image(str(f), caption=f.name) for f in files]}) # Calling wandb.log. TODO: Refactor this into WandbLogger.log_model if not self.opt.evolve: From 7b6938d5b54f562e6883eb294b4904066bef8188 Mon Sep 17 00:00:00 2001 From: Awsaf Date: Fri, 31 Dec 2021 03:47:53 +0600 Subject: [PATCH 0791/1976] Log best results (#6085) * log best result in summary * comment added * add space for `flake8` * log `best/epoch` * fix `dimension` for epoch ValueError: all the input arrays must have same number of dimensions * log `best/` in `utils.logger.__init__` * fix pre-commit 1. missing whitespace around operator 2. over-indented --- utils/loggers/__init__.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/utils/loggers/__init__.py b/utils/loggers/__init__.py index 8af5c402d5ee..7679ee70f176 100644 --- a/utils/loggers/__init__.py +++ b/utils/loggers/__init__.py @@ -47,6 +47,7 @@ def __init__(self, save_dir=None, weights=None, opt=None, hyp=None, logger=None, 'metrics/precision', 'metrics/recall', 'metrics/mAP_0.5', 'metrics/mAP_0.5:0.95', # metrics 'val/box_loss', 'val/obj_loss', 'val/cls_loss', # val loss 'x/lr0', 'x/lr1', 'x/lr2'] # params + self.best_keys = ['best/epoch', 'best/precision', 'best/recall', 'best/mAP_0.5', 'best/mAP_0.5:0.95',] for k in LOGGERS: setattr(self, k, None) # init empty logger dictionary self.csv = True # always log to csv @@ -125,6 +126,10 @@ def on_fit_epoch_end(self, vals, epoch, best_fitness, fi): self.tb.add_scalar(k, v, epoch) if self.wandb: + if best_fitness == fi: + best_results = [epoch] + vals[3:7] + for i, name in enumerate(self.best_keys): + self.wandb.wandb_run.summary[name] = best_results[i] # log best results in the summary self.wandb.log(x) self.wandb.end_epoch(best_result=best_fitness == fi) From affa284352fa6d094d32fe2be69dbffe36bd20f8 Mon Sep 17 00:00:00 2001 From: Chen Gen <4850090@qq.com> Date: Fri, 31 Dec 2021 05:59:29 +0800 Subject: [PATCH 0792/1976] Refactor/reduce G/C/D/IoU `if: else` statements (#6087) * Refactor the code to reduece else * Update metrics.py * Cleanup Co-authored-by: Cmos Co-authored-by: Glenn Jocher --- utils/metrics.py | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) diff --git a/utils/metrics.py b/utils/metrics.py index e03e1bd7460b..83defa7fd186 100644 --- a/utils/metrics.py +++ b/utils/metrics.py @@ -234,14 +234,10 @@ def bbox_iou(box1, box2, x1y1x2y2=True, GIoU=False, DIoU=False, CIoU=False, eps= with torch.no_grad(): alpha = v / (v - iou + (1 + eps)) return iou - (rho2 / c2 + v * alpha) # CIoU - else: - return iou - rho2 / c2 # DIoU - else: # GIoU https://arxiv.org/pdf/1902.09630.pdf - c_area = cw * ch + eps # convex area - return iou - (c_area - union) / c_area # GIoU - else: - return iou # IoU - + return iou - rho2 / c2 # DIoU + c_area = cw * ch + eps # convex area + return iou - (c_area - union) / c_area # GIoU https://arxiv.org/pdf/1902.09630.pdf + return iou # IoU def box_iou(box1, box2): # https://github.com/pytorch/vision/blob/master/torchvision/ops/boxes.py From d95978a562bec74eed1d42e370235937ab4e1d7a Mon Sep 17 00:00:00 2001 From: Jiacong Fang Date: Sat, 1 Jan 2022 01:47:52 +0800 Subject: [PATCH 0793/1976] Add EdgeTPU support (#3630) * Add models/tf.py for TensorFlow and TFLite export * Set auto=False for int8 calibration * Update requirements.txt for TensorFlow and TFLite export * Read anchors directly from PyTorch weights * Add --tf-nms to append NMS in TensorFlow SavedModel and GraphDef export * Remove check_anchor_order, check_file, set_logging from import * Reformat code and optimize imports * Autodownload model and check cfg * update --source path, img-size to 320, single output * Adjust representative_dataset * Put representative dataset in tfl_int8 block * detect.py TF inference * weights to string * weights to string * cleanup tf.py * Add --dynamic-batch-size * Add xywh normalization to reduce calibration error * Update requirements.txt TensorFlow 2.3.1 -> 2.4.0 to avoid int8 quantization error * Fix imports Move C3 from models.experimental to models.common * Add models/tf.py for TensorFlow and TFLite export * Set auto=False for int8 calibration * Update requirements.txt for TensorFlow and TFLite export * Read anchors directly from PyTorch weights * Add --tf-nms to append NMS in TensorFlow SavedModel and GraphDef export * Remove check_anchor_order, check_file, set_logging from import * Reformat code and optimize imports * Autodownload model and check cfg * update --source path, img-size to 320, single output * Adjust representative_dataset * detect.py TF inference * Put representative dataset in tfl_int8 block * weights to string * weights to string * cleanup tf.py * Add --dynamic-batch-size * Add xywh normalization to reduce calibration error * Update requirements.txt TensorFlow 2.3.1 -> 2.4.0 to avoid int8 quantization error * Fix imports Move C3 from models.experimental to models.common * implement C3() and SiLU() * Add TensorFlow and TFLite Detection * Add --tfl-detect for TFLite Detection * Add int8 quantized TFLite inference in detect.py * Add --edgetpu for Edge TPU detection * Fix --img-size to add rectangle TensorFlow and TFLite input * Add --no-tf-nms to detect objects using models combined with TensorFlow NMS * Fix --img-size list type input * Update README.md * Add Android project for TFLite inference * Upgrade TensorFlow v2.3.1 -> v2.4.0 * Disable normalization of xywh * Rewrite names init in detect.py * Change input resolution 640 -> 320 on Android * Disable NNAPI * Update README.me --img 640 -> 320 * Update README.me for Edge TPU * Update README.md * Fix reshape dim to support dynamic batching * Fix reshape dim to support dynamic batching * Add epsilon argument in tf_BN, which is different between TF and PT * Set stride to None if not using PyTorch, and do not warmup without PyTorch * Add list support in check_img_size() * Add list input support in detect.py * sys.path.append('./') to run from yolov5/ * Add int8 quantization support for TensorFlow 2.5 * Add get_coco128.sh * Remove --no-tfl-detect in models/tf.py (Use tf-android-tfl-detect branch for EdgeTPU) * Update requirements.txt * Replace torch.load() with attempt_load() * Update requirements.txt * Add --tf-raw-resize to set half_pixel_centers=False * Remove android directory * Update README.md * Update README.md * Add multiple OS support for EdgeTPU detection * Fix export and detect * Export 3 YOLO heads with Edge TPU models * Remove xywh denormalization with Edge TPU models in detect.py * Fix saved_model and pb detect error * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fix pre-commit.ci failure * Add edgetpu in export.py docstring * Fix Edge TPU model detection exported by TF 2.7 * Add class names for TF/TFLite in DetectMultibackend * Fix assignment with nl in TFLite Detection * Add check when getting Edge TPU compiler version * Add UTF-8 encoding in opening --data file for Windows * Remove redundant TensorFlow import * Add Edge TPU in export.py's docstring * Add the detect layer in Edge TPU model conversion * Default `dnn=False` * Cleanup data.yaml loading * Update detect.py * Update val.py * Comments and generalize data.yaml names Co-authored-by: Glenn Jocher Co-authored-by: unknown Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- detect.py | 4 +++- export.py | 29 +++++++++++++++++++++++++---- models/common.py | 10 ++++++++-- val.py | 2 +- 4 files changed, 37 insertions(+), 8 deletions(-) diff --git a/detect.py b/detect.py index 14cdf96ca9db..e6e74ea7dfeb 100644 --- a/detect.py +++ b/detect.py @@ -38,6 +38,7 @@ @torch.no_grad() def run(weights=ROOT / 'yolov5s.pt', # model.pt path(s) source=ROOT / 'data/images', # file/dir/URL/glob, 0 for webcam + data=ROOT / 'data/coco128.yaml', # dataset.yaml path imgsz=(640, 640), # inference size (height, width) conf_thres=0.25, # confidence threshold iou_thres=0.45, # NMS IOU threshold @@ -76,7 +77,7 @@ def run(weights=ROOT / 'yolov5s.pt', # model.pt path(s) # Load model device = select_device(device) - model = DetectMultiBackend(weights, device=device, dnn=dnn) + model = DetectMultiBackend(weights, device=device, dnn=dnn, data=data) stride, names, pt, jit, onnx, engine = model.stride, model.names, model.pt, model.jit, model.onnx, model.engine imgsz = check_img_size(imgsz, s=stride) # check image size @@ -204,6 +205,7 @@ def parse_opt(): parser = argparse.ArgumentParser() parser.add_argument('--weights', nargs='+', type=str, default=ROOT / 'yolov5s.pt', help='model path(s)') parser.add_argument('--source', type=str, default=ROOT / 'data/images', help='file/dir/URL/glob, 0 for webcam') + parser.add_argument('--data', type=str, default=ROOT / 'data/coco128.yaml', help='(optional) dataset.yaml path') parser.add_argument('--imgsz', '--img', '--img-size', nargs='+', type=int, default=[640], help='inference size h,w') parser.add_argument('--conf-thres', type=float, default=0.25, help='confidence threshold') parser.add_argument('--iou-thres', type=float, default=0.45, help='NMS IoU threshold') diff --git a/export.py b/export.py index 600e0c318f33..a0758010e816 100644 --- a/export.py +++ b/export.py @@ -248,6 +248,24 @@ def export_tflite(keras_model, im, file, int8, data, ncalib, prefix=colorstr('Te LOGGER.info(f'\n{prefix} export failure: {e}') +def export_edgetpu(keras_model, im, file, prefix=colorstr('Edge TPU:')): + # YOLOv5 Edge TPU export https://coral.ai/docs/edgetpu/models-intro/ + try: + cmd = 'edgetpu_compiler --version' + out = subprocess.run(cmd, shell=True, capture_output=True, check=True) + ver = out.stdout.decode().split()[-1] + LOGGER.info(f'\n{prefix} starting export with Edge TPU compiler {ver}...') + f = str(file).replace('.pt', '-int8_edgetpu.tflite') + f_tfl = str(file).replace('.pt', '-int8.tflite') # TFLite model + + cmd = f"edgetpu_compiler -s {f_tfl}" + subprocess.run(cmd, shell=True, check=True) + + LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') + except Exception as e: + LOGGER.info(f'\n{prefix} export failure: {e}') + + def export_tfjs(keras_model, im, file, prefix=colorstr('TensorFlow.js:')): # YOLOv5 TensorFlow.js export try: @@ -285,6 +303,7 @@ def export_tfjs(keras_model, im, file, prefix=colorstr('TensorFlow.js:')): def export_engine(model, im, file, train, half, simplify, workspace=4, verbose=False, prefix=colorstr('TensorRT:')): + # YOLOv5 TensorRT export https://developer.nvidia.com/tensorrt try: check_requirements(('tensorrt',)) import tensorrt as trt @@ -356,7 +375,7 @@ def run(data=ROOT / 'data/coco128.yaml', # 'dataset.yaml path' ): t = time.time() include = [x.lower() for x in include] - tf_exports = list(x in include for x in ('saved_model', 'pb', 'tflite', 'tfjs')) # TensorFlow exports + tf_exports = list(x in include for x in ('saved_model', 'pb', 'tflite', 'edgetpu', 'tfjs')) # TensorFlow exports file = Path(url2file(weights) if str(weights).startswith(('http:/', 'https:/')) else weights) # Checks @@ -405,15 +424,17 @@ def run(data=ROOT / 'data/coco128.yaml', # 'dataset.yaml path' # TensorFlow Exports if any(tf_exports): - pb, tflite, tfjs = tf_exports[1:] + pb, tflite, edgetpu, tfjs = tf_exports[1:] assert not (tflite and tfjs), 'TFLite and TF.js models must be exported separately, please pass only one type.' model = export_saved_model(model, im, file, dynamic, tf_nms=nms or agnostic_nms or tfjs, agnostic_nms=agnostic_nms or tfjs, topk_per_class=topk_per_class, topk_all=topk_all, conf_thres=conf_thres, iou_thres=iou_thres) # keras model if pb or tfjs: # pb prerequisite to tfjs export_pb(model, im, file) - if tflite: - export_tflite(model, im, file, int8=int8, data=data, ncalib=100) + if tflite or edgetpu: + export_tflite(model, im, file, int8=int8 or edgetpu, data=data, ncalib=100) + if edgetpu: + export_edgetpu(model, im, file) if tfjs: export_tfjs(model, im, file) diff --git a/models/common.py b/models/common.py index 4fd608f4b3e2..b53de7001454 100644 --- a/models/common.py +++ b/models/common.py @@ -17,6 +17,7 @@ import requests import torch import torch.nn as nn +import yaml from PIL import Image from torch.cuda import amp @@ -276,7 +277,7 @@ def forward(self, x): class DetectMultiBackend(nn.Module): # YOLOv5 MultiBackend class for python inference on various backends - def __init__(self, weights='yolov5s.pt', device=None, dnn=False): + def __init__(self, weights='yolov5s.pt', device=None, dnn=False, data=None): # Usage: # PyTorch: weights = *.pt # TorchScript: *.torchscript @@ -284,6 +285,7 @@ def __init__(self, weights='yolov5s.pt', device=None, dnn=False): # TensorFlow: *_saved_model # TensorFlow: *.pb # TensorFlow Lite: *.tflite + # TensorFlow Edge TPU: *_edgetpu.tflite # ONNX Runtime: *.onnx # OpenCV DNN: *.onnx with dnn=True # TensorRT: *.engine @@ -297,6 +299,9 @@ def __init__(self, weights='yolov5s.pt', device=None, dnn=False): pt, jit, onnx, engine, tflite, pb, saved_model, coreml = (suffix == x for x in suffixes) # backend booleans stride, names = 64, [f'class{i}' for i in range(1000)] # assign defaults w = attempt_download(w) # download if not local + if data: # data.yaml path (optional) + with open(data, errors='ignore') as f: + names = yaml.safe_load(f)['names'] # class names if jit: # TorchScript LOGGER.info(f'Loading {w} for TorchScript inference...') @@ -343,7 +348,7 @@ def __init__(self, weights='yolov5s.pt', device=None, dnn=False): binding_addrs = OrderedDict((n, d.ptr) for n, d in bindings.items()) context = model.create_execution_context() batch_size = bindings['images'].shape[0] - else: # TensorFlow model (TFLite, pb, saved_model) + else: # TensorFlow (TFLite, pb, saved_model) if pb: # https://www.tensorflow.org/guide/migrate#a_graphpb_or_graphpbtxt LOGGER.info(f'Loading {w} for TensorFlow *.pb inference...') import tensorflow as tf @@ -425,6 +430,7 @@ def forward(self, im, augment=False, visualize=False, val=False): y[..., 1] *= h # y y[..., 2] *= w # w y[..., 3] *= h # h + y = torch.tensor(y) if isinstance(y, np.ndarray) else y return (y, []) if val else y diff --git a/val.py b/val.py index 4eec499d3029..c1fcf61b468c 100644 --- a/val.py +++ b/val.py @@ -124,7 +124,7 @@ def run(data, (save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir # Load model - model = DetectMultiBackend(weights, device=device, dnn=dnn) + model = DetectMultiBackend(weights, device=device, dnn=dnn, data=data) stride, pt, jit, engine = model.stride, model.pt, model.jit, model.engine imgsz = check_img_size(imgsz, s=stride) # check image size half &= (pt or jit or engine) and device.type != 'cpu' # half precision only supported by PyTorch on CUDA From e1dc8943647941378620b9b230aec2862a913fe3 Mon Sep 17 00:00:00 2001 From: bilzard <36561962+bilzard@users.noreply.github.com> Date: Mon, 3 Jan 2022 06:10:19 +0900 Subject: [PATCH 0794/1976] Enable AdamW optimizer (#6152) --- train.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/train.py b/train.py index e2cd5ec85c09..304c001b6547 100644 --- a/train.py +++ b/train.py @@ -22,7 +22,7 @@ import yaml from torch.cuda import amp from torch.nn.parallel import DistributedDataParallel as DDP -from torch.optim import SGD, Adam, lr_scheduler +from torch.optim import SGD, Adam, AdamW, lr_scheduler from tqdm import tqdm FILE = Path(__file__).resolve() @@ -155,8 +155,10 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary elif hasattr(v, 'weight') and isinstance(v.weight, nn.Parameter): # weight (with decay) g1.append(v.weight) - if opt.adam: + if opt.optimizer == 'Adam': optimizer = Adam(g0, lr=hyp['lr0'], betas=(hyp['momentum'], 0.999)) # adjust beta1 to momentum + elif opt.optimizer == 'AdamW': + optimizer = AdamW(g0, lr=hyp['lr0'], betas=(hyp['momentum'], 0.999)) # adjust beta1 to momentum else: optimizer = SGD(g0, lr=hyp['lr0'], momentum=hyp['momentum'], nesterov=True) @@ -460,7 +462,7 @@ def parse_opt(known=False): parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') parser.add_argument('--multi-scale', action='store_true', help='vary img-size +/- 50%%') parser.add_argument('--single-cls', action='store_true', help='train multi-class data as single-class') - parser.add_argument('--adam', action='store_true', help='use torch.optim.Adam() optimizer') + parser.add_argument('--optimizer', type=str, choices=['SGD', 'Adam', 'AdamW'], default='SGD', help='optimizer') parser.add_argument('--sync-bn', action='store_true', help='use SyncBatchNorm, only available in DDP mode') parser.add_argument('--workers', type=int, default=8, help='max dataloader workers (per RANK in DDP mode)') parser.add_argument('--project', default=ROOT / 'runs/train', help='save to project/name') From ec4b6dd2a31604fd9963b96ee472f78651bc1caa Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 2 Jan 2022 16:09:45 -0800 Subject: [PATCH 0795/1976] Update export format docstrings (#6151) * Update export documentation * Cleanup * Update export.py * Update README.md * Update README.md * Update README.md * Update README.md * Update README.md * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update README.md * Update README.md * Update README.md * Update train.py * Update train.py Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- README.md | 58 +++++++++++++++++++++++++++++-------------------------- detect.py | 24 +++++++++++++++++------ export.py | 26 +++++++++++++------------ train.py | 11 +++++++++-- val.py | 14 +++++++++++++- 5 files changed, 85 insertions(+), 48 deletions(-) diff --git a/README.md b/README.md index fa0645d4fd2c..59abd084572c 100644 --- a/README.md +++ b/README.md @@ -62,15 +62,14 @@ See the [YOLOv5 Docs](https://docs.ultralytics.com) for full documentation on tr
Install -[**Python>=3.6.0**](https://www.python.org/) is required with all -[requirements.txt](https://github.com/ultralytics/yolov5/blob/master/requirements.txt) installed including -[**PyTorch>=1.7**](https://pytorch.org/get-started/locally/): - +Clone repo and install [requirements.txt](https://github.com/ultralytics/yolov5/blob/master/requirements.txt) in a +[**Python>=3.6.0**](https://www.python.org/) environment, including +[**PyTorch>=1.7**](https://pytorch.org/get-started/locally/). ```bash -$ git clone https://github.com/ultralytics/yolov5 -$ cd yolov5 -$ pip install -r requirements.txt +git clone https://github.com/ultralytics/yolov5 # clone +cd yolov5 +pip install -r requirements.txt # install ```
@@ -78,8 +77,9 @@ $ pip install -r requirements.txt
Inference -Inference with YOLOv5 and [PyTorch Hub](https://github.com/ultralytics/yolov5/issues/36). Models automatically download -from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases). +Inference with YOLOv5 and [PyTorch Hub](https://github.com/ultralytics/yolov5/issues/36) +. [Models](https://github.com/ultralytics/yolov5/tree/master/models) download automatically from the latest +YOLOv5 [release](https://github.com/ultralytics/yolov5/releases). ```python import torch @@ -104,17 +104,17 @@ results.print() # or .show(), .save(), .crop(), .pandas(), etc.
Inference with detect.py -`detect.py` runs inference on a variety of sources, downloading models automatically from -the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases) and saving results to `runs/detect`. +`detect.py` runs inference on a variety of sources, downloading [models](https://github.com/ultralytics/yolov5/tree/master/models) automatically from +the latest YOLOv5 [release](https://github.com/ultralytics/yolov5/releases) and saving results to `runs/detect`. ```bash -$ python detect.py --source 0 # webcam - img.jpg # image - vid.mp4 # video - path/ # directory - path/*.jpg # glob - 'https://youtu.be/Zgi9g1ksQHc' # YouTube - 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream +python detect.py --source 0 # webcam + img.jpg # image + vid.mp4 # video + path/ # directory + path/*.jpg # glob + 'https://youtu.be/Zgi9g1ksQHc' # YouTube + 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream ```
@@ -122,16 +122,20 @@ $ python detect.py --source 0 # webcam
Training -Run commands below to reproduce results -on [COCO](https://github.com/ultralytics/yolov5/blob/master/data/scripts/get_coco.sh) dataset (dataset auto-downloads on -first use). Training times for YOLOv5s/m/l/x are 2/4/6/8 days on a single V100 (multi-GPU times faster). Use the -largest `--batch-size` your GPU allows (batch sizes shown for 16 GB devices). +The commands below reproduce YOLOv5 [COCO](https://github.com/ultralytics/yolov5/blob/master/data/scripts/get_coco.sh) +results. [Models](https://github.com/ultralytics/yolov5/tree/master/models) +and [datasets](https://github.com/ultralytics/yolov5/tree/master/data) download automatically from the latest +YOLOv5 [release](https://github.com/ultralytics/yolov5/releases). Training times for YOLOv5n/s/m/l/x are +1/2/4/6/8 days on a V100 GPU ([Multi-GPU](https://github.com/ultralytics/yolov5/issues/475) times faster). Use the +largest `--batch-size` possible, or pass `--batch-size -1` for +YOLOv5 [AutoBatch](https://github.com/ultralytics/yolov5/pull/5092). Batch sizes shown for V100-16GB. ```bash -$ python train.py --data coco.yaml --cfg yolov5s.yaml --weights '' --batch-size 64 - yolov5m 40 - yolov5l 24 - yolov5x 16 +python train.py --data coco.yaml --cfg yolov5n.yaml --weights '' --batch-size 128 + yolov5s 64 + yolov5m 40 + yolov5l 24 + yolov5x 16 ``` @@ -225,6 +229,7 @@ We are super excited about our first-ever Ultralytics YOLOv5 🚀 EXPORT Competi ### Pretrained Checkpoints [assets]: https://github.com/ultralytics/yolov5/releases + [TTA]: https://github.com/ultralytics/yolov5/issues/303 |Model |size
(pixels) |mAPval
0.5:0.95 |mAPval
0.5 |Speed
CPU b1
(ms) |Speed
V100 b1
(ms) |Speed
V100 b32
(ms) |params
(M) |FLOPs
@640 (B) @@ -257,7 +262,6 @@ We love your input! We want to make contributing to YOLOv5 as easy and transpare - ##
Contact
For YOLOv5 bugs and feature requests please visit [GitHub Issues](https://github.com/ultralytics/yolov5/issues). For business inquiries or diff --git a/detect.py b/detect.py index e6e74ea7dfeb..1393f79746f6 100644 --- a/detect.py +++ b/detect.py @@ -2,14 +2,26 @@ """ Run inference on images, videos, directories, streams, etc. -Usage: - $ python path/to/detect.py --weights yolov5s.pt --source 0 # webcam - img.jpg # image - vid.mp4 # video - path/ # directory - path/*.jpg # glob +Usage - sources: + $ python path/to/detect.py --weights yolov5s.pt --source 0 # webcam + img.jpg # image + vid.mp4 # video + path/ # directory + path/*.jpg # glob 'https://youtu.be/Zgi9g1ksQHc' # YouTube 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream + +Usage - formats: + $ python path/to/detect.py --weights yolov5s.pt # PyTorch + yolov5s.torchscript # TorchScript + yolov5s.onnx # ONNX Runtime or OpenCV DNN with --dnn + yolov5s.mlmodel # CoreML (under development) + yolov5s_openvino_model # OpenVINO (under development) + yolov5s_saved_model # TensorFlow SavedModel + yolov5s.pb # TensorFlow protobuf + yolov5s.tflite # TensorFlow Lite + yolov5s_edgetpu.tflite # TensorFlow Edge TPU + yolov5s.engine # TensorRT """ import argparse diff --git a/export.py b/export.py index a0758010e816..67e32305ded1 100644 --- a/export.py +++ b/export.py @@ -2,18 +2,19 @@ """ Export a YOLOv5 PyTorch model to other formats. TensorFlow exports authored by https://github.com/zldrobit -Format | Example | `--include ...` argument ---- | --- | --- -PyTorch | yolov5s.pt | - -TorchScript | yolov5s.torchscript | `torchscript` -ONNX | yolov5s.onnx | `onnx` -CoreML | yolov5s.mlmodel | `coreml` -OpenVINO | yolov5s_openvino_model/ | `openvino` -TensorFlow SavedModel | yolov5s_saved_model/ | `saved_model` -TensorFlow GraphDef | yolov5s.pb | `pb` -TensorFlow Lite | yolov5s.tflite | `tflite` -TensorFlow.js | yolov5s_web_model/ | `tfjs` -TensorRT | yolov5s.engine | `engine` +Format | Example | `--include ...` argument +--- | --- | --- +PyTorch | yolov5s.pt | - +TorchScript | yolov5s.torchscript | `torchscript` +ONNX | yolov5s.onnx | `onnx` +CoreML | yolov5s.mlmodel | `coreml` +OpenVINO | yolov5s_openvino_model/ | `openvino` +TensorFlow SavedModel | yolov5s_saved_model/ | `saved_model` +TensorFlow GraphDef | yolov5s.pb | `pb` +TensorFlow Lite | yolov5s.tflite | `tflite` +TensorFlow Edge TPU | yolov5s_edgetpu.tflite | `edgetpu` +TensorFlow.js | yolov5s_web_model/ | `tfjs` +TensorRT | yolov5s.engine | `engine` Usage: $ python path/to/export.py --weights yolov5s.pt --include torchscript onnx coreml openvino saved_model tflite tfjs @@ -27,6 +28,7 @@ yolov5s_saved_model yolov5s.pb yolov5s.tflite + yolov5s_edgetpu.tflite yolov5s.engine TensorFlow.js: diff --git a/train.py b/train.py index 304c001b6547..bd2fb5898cb9 100644 --- a/train.py +++ b/train.py @@ -1,10 +1,17 @@ # YOLOv5 🚀 by Ultralytics, GPL-3.0 license """ -Train a YOLOv5 model on a custom dataset +Train a YOLOv5 model on a custom dataset. + +Models and datasets download automatically from the latest YOLOv5 release. +Models: https://github.com/ultralytics/yolov5/tree/master/models +Datasets: https://github.com/ultralytics/yolov5/tree/master/data +Tutorial: https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data Usage: - $ python path/to/train.py --data coco128.yaml --weights yolov5s.pt --img 640 + $ python path/to/train.py --data coco128.yaml --weights yolov5s.pt --img 640 # from pretrained (RECOMMENDED) + $ python path/to/train.py --data coco128.yaml --weights '' --cfg yolov5s.yaml --img 640 # from scratch """ + import argparse import math import os diff --git a/val.py b/val.py index c1fcf61b468c..f7c9ef5e60d2 100644 --- a/val.py +++ b/val.py @@ -3,7 +3,19 @@ Validate a trained YOLOv5 model accuracy on a custom dataset Usage: - $ python path/to/val.py --data coco128.yaml --weights yolov5s.pt --img 640 + $ python path/to/val.py --weights yolov5s.pt --data coco128.yaml --img 640 + +Usage - formats: + $ python path/to/val.py --weights yolov5s.pt # PyTorch + yolov5s.torchscript # TorchScript + yolov5s.onnx # ONNX Runtime or OpenCV DNN with --dnn + yolov5s.mlmodel # CoreML (under development) + yolov5s_openvino_model # OpenVINO (under development) + yolov5s_saved_model # TensorFlow SavedModel + yolov5s.pb # TensorFlow protobuf + yolov5s.tflite # TensorFlow Lite + yolov5s_edgetpu.tflite # TensorFlow Edge TPU + yolov5s.engine # TensorRT """ import argparse From 968e30065aa1ccbebc42d1a19fd48f2aebc5cf52 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 2 Jan 2022 19:47:03 -0800 Subject: [PATCH 0796/1976] Update greetings.yml (#6165) --- .github/workflows/greetings.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/greetings.yml b/.github/workflows/greetings.yml index 0daf9514d3c5..6ced1132264a 100644 --- a/.github/workflows/greetings.yml +++ b/.github/workflows/greetings.yml @@ -37,9 +37,9 @@ jobs: [**Python>=3.6.0**](https://www.python.org/) with all [requirements.txt](https://github.com/ultralytics/yolov5/blob/master/requirements.txt) installed including [**PyTorch>=1.7**](https://pytorch.org/get-started/locally/). To get started: ```bash - $ git clone https://github.com/ultralytics/yolov5 - $ cd yolov5 - $ pip install -r requirements.txt + git clone https://github.com/ultralytics/yolov5 # clone + cd yolov5 + pip install -r requirements.txt # install ``` ## Environments From 5344e54da6aa5f40fc20115c672f29c96d7827cd Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 3 Jan 2022 10:42:50 -0800 Subject: [PATCH 0797/1976] [pre-commit.ci] pre-commit suggestions (#6177) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/pre-commit/pre-commit-hooks: v4.0.1 → v4.1.0](https://github.com/pre-commit/pre-commit-hooks/compare/v4.0.1...v4.1.0) - [github.com/asottile/pyupgrade: v2.23.1 → v2.31.0](https://github.com/asottile/pyupgrade/compare/v2.23.1...v2.31.0) - [github.com/PyCQA/isort: 5.9.3 → 5.10.1](https://github.com/PyCQA/isort/compare/5.9.3...5.10.1) - [github.com/PyCQA/flake8: 3.9.2 → 4.0.1](https://github.com/PyCQA/flake8/compare/3.9.2...4.0.1) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 48e752f448f1..526a5609fdd7 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -13,7 +13,7 @@ ci: repos: - repo: https://github.com/pre-commit/pre-commit-hooks - rev: v4.0.1 + rev: v4.1.0 hooks: - id: end-of-file-fixer - id: trailing-whitespace @@ -24,14 +24,14 @@ repos: - id: check-docstring-first - repo: https://github.com/asottile/pyupgrade - rev: v2.23.1 + rev: v2.31.0 hooks: - id: pyupgrade args: [--py36-plus] name: Upgrade code - repo: https://github.com/PyCQA/isort - rev: 5.9.3 + rev: 5.10.1 hooks: - id: isort name: Sort imports @@ -60,7 +60,7 @@ repos: # - id: yesqa - repo: https://github.com/PyCQA/flake8 - rev: 3.9.2 + rev: 4.0.1 hooks: - id: flake8 name: PEP8 From b4a29b5a8d63a8c2d4a8929942b44e8969c5dddd Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 3 Jan 2022 10:54:52 -0800 Subject: [PATCH 0798/1976] Update NMS `max_wh=7680` for 8k images (#6178) --- utils/general.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/general.py b/utils/general.py index ef27eb570ffa..470e6d81d250 100755 --- a/utils/general.py +++ b/utils/general.py @@ -660,7 +660,7 @@ def non_max_suppression(prediction, conf_thres=0.25, iou_thres=0.45, classes=Non assert 0 <= iou_thres <= 1, f'Invalid IoU {iou_thres}, valid values are between 0.0 and 1.0' # Settings - min_wh, max_wh = 2, 4096 # (pixels) minimum and maximum box width and height + min_wh, max_wh = 2, 7680 # (pixels) minimum and maximum box width and height max_nms = 30000 # maximum number of boxes into torchvision.ops.nms() time_limit = 10.0 # seconds to quit after redundant = True # require redundant detections From 63a4d862aae72935a010e1efd20bd9be5984f105 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 3 Jan 2022 15:41:26 -0800 Subject: [PATCH 0799/1976] Add OpenVINO inference (#6179) --- detect.py | 2 +- export.py | 20 ++++++++++---------- models/common.py | 31 +++++++++++++++++++++++-------- val.py | 2 +- 4 files changed, 35 insertions(+), 20 deletions(-) diff --git a/detect.py b/detect.py index 1393f79746f6..6aa5b825da48 100644 --- a/detect.py +++ b/detect.py @@ -16,7 +16,7 @@ yolov5s.torchscript # TorchScript yolov5s.onnx # ONNX Runtime or OpenCV DNN with --dnn yolov5s.mlmodel # CoreML (under development) - yolov5s_openvino_model # OpenVINO (under development) + yolov5s.xml # OpenVINO yolov5s_saved_model # TensorFlow SavedModel yolov5s.pb # TensorFlow protobuf yolov5s.tflite # TensorFlow Lite diff --git a/export.py b/export.py index 67e32305ded1..fa40864ac378 100644 --- a/export.py +++ b/export.py @@ -20,16 +20,16 @@ $ python path/to/export.py --weights yolov5s.pt --include torchscript onnx coreml openvino saved_model tflite tfjs Inference: - $ python path/to/detect.py --weights yolov5s.pt - yolov5s.torchscript - yolov5s.onnx - yolov5s.mlmodel (under development) - yolov5s_openvino_model (under development) - yolov5s_saved_model - yolov5s.pb - yolov5s.tflite - yolov5s_edgetpu.tflite - yolov5s.engine + $ python path/to/detect.py --weights yolov5s.pt # PyTorch + yolov5s.torchscript # TorchScript + yolov5s.onnx # ONNX Runtime or OpenCV DNN with --dnn + yolov5s.mlmodel # CoreML (under development) + yolov5s.xml # OpenVINO + yolov5s_saved_model # TensorFlow SavedModel + yolov5s.pb # TensorFlow protobuf + yolov5s.tflite # TensorFlow Lite + yolov5s_edgetpu.tflite # TensorFlow Edge TPU + yolov5s.engine # TensorRT TensorFlow.js: $ cd .. && git clone https://github.com/zldrobit/tfjs-yolov5-example.git && cd tfjs-yolov5-example diff --git a/models/common.py b/models/common.py index b53de7001454..519ce611d7ef 100644 --- a/models/common.py +++ b/models/common.py @@ -282,6 +282,7 @@ def __init__(self, weights='yolov5s.pt', device=None, dnn=False, data=None): # PyTorch: weights = *.pt # TorchScript: *.torchscript # CoreML: *.mlmodel + # OpenVINO: *.xml # TensorFlow: *_saved_model # TensorFlow: *.pb # TensorFlow Lite: *.tflite @@ -294,31 +295,38 @@ def __init__(self, weights='yolov5s.pt', device=None, dnn=False, data=None): super().__init__() w = str(weights[0] if isinstance(weights, list) else weights) suffix = Path(w).suffix.lower() - suffixes = ['.pt', '.torchscript', '.onnx', '.engine', '.tflite', '.pb', '', '.mlmodel'] + suffixes = ['.pt', '.torchscript', '.onnx', '.engine', '.tflite', '.pb', '', '.mlmodel', '.xml'] check_suffix(w, suffixes) # check weights have acceptable suffix - pt, jit, onnx, engine, tflite, pb, saved_model, coreml = (suffix == x for x in suffixes) # backend booleans + pt, jit, onnx, engine, tflite, pb, saved_model, coreml, xml = (suffix == x for x in suffixes) # backends stride, names = 64, [f'class{i}' for i in range(1000)] # assign defaults w = attempt_download(w) # download if not local if data: # data.yaml path (optional) with open(data, errors='ignore') as f: names = yaml.safe_load(f)['names'] # class names - if jit: # TorchScript + if pt: # PyTorch + model = attempt_load(weights if isinstance(weights, list) else w, map_location=device) + stride = int(model.stride.max()) # model stride + names = model.module.names if hasattr(model, 'module') else model.names # get class names + self.model = model # explicitly assign for to(), cpu(), cuda(), half() + elif jit: # TorchScript LOGGER.info(f'Loading {w} for TorchScript inference...') extra_files = {'config.txt': ''} # model metadata model = torch.jit.load(w, _extra_files=extra_files) if extra_files['config.txt']: d = json.loads(extra_files['config.txt']) # extra_files dict stride, names = int(d['stride']), d['names'] - elif pt: # PyTorch - model = attempt_load(weights if isinstance(weights, list) else w, map_location=device) - stride = int(model.stride.max()) # model stride - names = model.module.names if hasattr(model, 'module') else model.names # get class names - self.model = model # explicitly assign for to(), cpu(), cuda(), half() elif coreml: # CoreML LOGGER.info(f'Loading {w} for CoreML inference...') import coremltools as ct model = ct.models.MLModel(w) + elif xml: # OpenVINO + LOGGER.info(f'Loading {w} for OpenVINO inference...') + check_requirements(('openvino-dev',)) # requires openvino-dev: https://pypi.org/project/openvino-dev/ + import openvino.inference_engine as ie + core = ie.IECore() + network = core.read_network(model=w, weights=Path(w).with_suffix('.bin')) # *.xml, *.bin paths + executable_network = core.load_network(network, device_name='CPU', num_requests=1) elif dnn: # ONNX OpenCV DNN LOGGER.info(f'Loading {w} for ONNX OpenCV DNN inference...') check_requirements(('opencv-python>=4.5.4',)) @@ -403,6 +411,13 @@ def forward(self, im, augment=False, visualize=False, val=False): y = self.net.forward() else: # ONNX Runtime y = self.session.run([self.session.get_outputs()[0].name], {self.session.get_inputs()[0].name: im})[0] + elif self.xml: # OpenVINO + im = im.cpu().numpy() # FP32 + desc = self.ie.TensorDesc(precision='FP32', dims=im.shape, layout='NCHW') # Tensor Description + request = self.executable_network.requests[0] # inference request + request.set_blob(blob_name='images', blob=self.ie.Blob(desc, im)) # name=next(iter(request.input_blobs)) + request.infer() + y = request.output_blobs['output'].buffer # name=next(iter(request.output_blobs)) elif self.engine: # TensorRT assert im.shape == self.bindings['images'].shape, (im.shape, self.bindings['images'].shape) self.binding_addrs['images'] = int(im.data_ptr()) diff --git a/val.py b/val.py index f7c9ef5e60d2..704a7a46eb38 100644 --- a/val.py +++ b/val.py @@ -10,7 +10,7 @@ yolov5s.torchscript # TorchScript yolov5s.onnx # ONNX Runtime or OpenCV DNN with --dnn yolov5s.mlmodel # CoreML (under development) - yolov5s_openvino_model # OpenVINO (under development) + yolov5s.xml # OpenVINO yolov5s_saved_model # TensorFlow SavedModel yolov5s.pb # TensorFlow protobuf yolov5s.tflite # TensorFlow Lite From 7cad6597bb617b02d36b062039fb237f49efdaae Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 3 Jan 2022 18:43:21 -0800 Subject: [PATCH 0800/1976] Ignore `*_openvino_model/` dir (#6180) --- .dockerignore | 1 + .gitignore | 1 + 2 files changed, 2 insertions(+) diff --git a/.dockerignore b/.dockerignore index 4be8d4108e78..af51ccc3d8df 100644 --- a/.dockerignore +++ b/.dockerignore @@ -24,6 +24,7 @@ data/samples/* **/*.pb *_saved_model/ *_web_model/ +*_openvino_model/ # Below Copied From .gitignore ----------------------------------------------------------------------------------------- # Below Copied From .gitignore ----------------------------------------------------------------------------------------- diff --git a/.gitignore b/.gitignore index 327dc8566681..69a00843ea42 100755 --- a/.gitignore +++ b/.gitignore @@ -59,6 +59,7 @@ VOC/ *.h5 *_saved_model/ *_web_model/ +*_openvino_model/ darknet53.conv.74 yolov3-tiny.conv.15 From 5bd6a97b18285120c389f7c59d605322702d1f5e Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 3 Jan 2022 20:08:15 -0800 Subject: [PATCH 0801/1976] Global export format sort (#6182) * Global export sort * Cleanup --- detect.py | 6 +- export.py | 148 +++++++++++++++++++++++------------------------ models/common.py | 80 ++++++++++++------------- val.py | 6 +- 4 files changed, 120 insertions(+), 120 deletions(-) diff --git a/detect.py b/detect.py index 6aa5b825da48..a4a1fb69b42e 100644 --- a/detect.py +++ b/detect.py @@ -15,13 +15,13 @@ $ python path/to/detect.py --weights yolov5s.pt # PyTorch yolov5s.torchscript # TorchScript yolov5s.onnx # ONNX Runtime or OpenCV DNN with --dnn - yolov5s.mlmodel # CoreML (under development) yolov5s.xml # OpenVINO + yolov5s.engine # TensorRT + yolov5s.mlmodel # CoreML (under development) yolov5s_saved_model # TensorFlow SavedModel - yolov5s.pb # TensorFlow protobuf + yolov5s.pb # TensorFlow GraphDef yolov5s.tflite # TensorFlow Lite yolov5s_edgetpu.tflite # TensorFlow Edge TPU - yolov5s.engine # TensorRT """ import argparse diff --git a/export.py b/export.py index fa40864ac378..3b677d2ca144 100644 --- a/export.py +++ b/export.py @@ -2,19 +2,19 @@ """ Export a YOLOv5 PyTorch model to other formats. TensorFlow exports authored by https://github.com/zldrobit -Format | Example | `--include ...` argument +Format | `export.py --include` | Model --- | --- | --- -PyTorch | yolov5s.pt | - -TorchScript | yolov5s.torchscript | `torchscript` -ONNX | yolov5s.onnx | `onnx` -CoreML | yolov5s.mlmodel | `coreml` -OpenVINO | yolov5s_openvino_model/ | `openvino` -TensorFlow SavedModel | yolov5s_saved_model/ | `saved_model` -TensorFlow GraphDef | yolov5s.pb | `pb` -TensorFlow Lite | yolov5s.tflite | `tflite` -TensorFlow Edge TPU | yolov5s_edgetpu.tflite | `edgetpu` -TensorFlow.js | yolov5s_web_model/ | `tfjs` -TensorRT | yolov5s.engine | `engine` +PyTorch | - | yolov5s.pt +TorchScript | `torchscript` | yolov5s.torchscript +ONNX | `onnx` | yolov5s.onnx +OpenVINO | `openvino` | yolov5s_openvino_model/ +TensorRT | `engine` | yolov5s.engine +CoreML | `coreml` | yolov5s.mlmodel +TensorFlow SavedModel | `saved_model` | yolov5s_saved_model/ +TensorFlow GraphDef | `pb` | yolov5s.pb +TensorFlow Lite | `tflite` | yolov5s.tflite +TensorFlow Edge TPU | `edgetpu` | yolov5s_edgetpu.tflite +TensorFlow.js | `tfjs` | yolov5s_web_model/ Usage: $ python path/to/export.py --weights yolov5s.pt --include torchscript onnx coreml openvino saved_model tflite tfjs @@ -23,13 +23,13 @@ $ python path/to/detect.py --weights yolov5s.pt # PyTorch yolov5s.torchscript # TorchScript yolov5s.onnx # ONNX Runtime or OpenCV DNN with --dnn - yolov5s.mlmodel # CoreML (under development) yolov5s.xml # OpenVINO + yolov5s.engine # TensorRT + yolov5s.mlmodel # CoreML (under development) yolov5s_saved_model # TensorFlow SavedModel - yolov5s.pb # TensorFlow protobuf + yolov5s.pb # TensorFlow GraphDef yolov5s.tflite # TensorFlow Lite yolov5s_edgetpu.tflite # TensorFlow Edge TPU - yolov5s.engine # TensorRT TensorFlow.js: $ cd .. && git clone https://github.com/zldrobit/tfjs-yolov5-example.git && cd tfjs-yolov5-example @@ -126,6 +126,23 @@ def export_onnx(model, im, file, opset, train, dynamic, simplify, prefix=colorst LOGGER.info(f'{prefix} export failure: {e}') +def export_openvino(model, im, file, prefix=colorstr('OpenVINO:')): + # YOLOv5 OpenVINO export + try: + check_requirements(('openvino-dev',)) # requires openvino-dev: https://pypi.org/project/openvino-dev/ + import openvino.inference_engine as ie + + LOGGER.info(f'\n{prefix} starting export with openvino {ie.__version__}...') + f = str(file).replace('.pt', '_openvino_model' + os.sep) + + cmd = f"mo --input_model {file.with_suffix('.onnx')} --output_dir {f}" + subprocess.check_output(cmd, shell=True) + + LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') + except Exception as e: + LOGGER.info(f'\n{prefix} export failure: {e}') + + def export_coreml(model, im, file, prefix=colorstr('CoreML:')): # YOLOv5 CoreML export ct_model = None @@ -148,27 +165,57 @@ def export_coreml(model, im, file, prefix=colorstr('CoreML:')): return ct_model -def export_openvino(model, im, file, prefix=colorstr('OpenVINO:')): - # YOLOv5 OpenVINO export +def export_engine(model, im, file, train, half, simplify, workspace=4, verbose=False, prefix=colorstr('TensorRT:')): + # YOLOv5 TensorRT export https://developer.nvidia.com/tensorrt try: - check_requirements(('openvino-dev',)) # requires openvino-dev: https://pypi.org/project/openvino-dev/ - import openvino.inference_engine as ie + check_requirements(('tensorrt',)) + import tensorrt as trt - LOGGER.info(f'\n{prefix} starting export with openvino {ie.__version__}...') - f = str(file).replace('.pt', '_openvino_model' + os.sep) + opset = (12, 13)[trt.__version__[0] == '8'] # test on TensorRT 7.x and 8.x + export_onnx(model, im, file, opset, train, False, simplify) + onnx = file.with_suffix('.onnx') + assert onnx.exists(), f'failed to export ONNX file: {onnx}' - cmd = f"mo --input_model {file.with_suffix('.onnx')} --output_dir {f}" - subprocess.check_output(cmd, shell=True) + LOGGER.info(f'\n{prefix} starting export with TensorRT {trt.__version__}...') + f = file.with_suffix('.engine') # TensorRT engine file + logger = trt.Logger(trt.Logger.INFO) + if verbose: + logger.min_severity = trt.Logger.Severity.VERBOSE + + builder = trt.Builder(logger) + config = builder.create_builder_config() + config.max_workspace_size = workspace * 1 << 30 + + flag = (1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)) + network = builder.create_network(flag) + parser = trt.OnnxParser(network, logger) + if not parser.parse_from_file(str(onnx)): + raise RuntimeError(f'failed to load ONNX file: {onnx}') + + inputs = [network.get_input(i) for i in range(network.num_inputs)] + outputs = [network.get_output(i) for i in range(network.num_outputs)] + LOGGER.info(f'{prefix} Network Description:') + for inp in inputs: + LOGGER.info(f'{prefix}\tinput "{inp.name}" with shape {inp.shape} and dtype {inp.dtype}') + for out in outputs: + LOGGER.info(f'{prefix}\toutput "{out.name}" with shape {out.shape} and dtype {out.dtype}') + half &= builder.platform_has_fast_fp16 + LOGGER.info(f'{prefix} building FP{16 if half else 32} engine in {f}') + if half: + config.set_flag(trt.BuilderFlag.FP16) + with builder.build_engine(network, config) as engine, open(f, 'wb') as t: + t.write(engine.serialize()) LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') + except Exception as e: LOGGER.info(f'\n{prefix} export failure: {e}') def export_saved_model(model, im, file, dynamic, tf_nms=False, agnostic_nms=False, topk_per_class=100, topk_all=100, iou_thres=0.45, - conf_thres=0.25, prefix=colorstr('TensorFlow saved_model:')): - # YOLOv5 TensorFlow saved_model export + conf_thres=0.25, prefix=colorstr('TensorFlow SavedModel:')): + # YOLOv5 TensorFlow SavedModel export keras_model = None try: import tensorflow as tf @@ -304,53 +351,6 @@ def export_tfjs(keras_model, im, file, prefix=colorstr('TensorFlow.js:')): LOGGER.info(f'\n{prefix} export failure: {e}') -def export_engine(model, im, file, train, half, simplify, workspace=4, verbose=False, prefix=colorstr('TensorRT:')): - # YOLOv5 TensorRT export https://developer.nvidia.com/tensorrt - try: - check_requirements(('tensorrt',)) - import tensorrt as trt - - opset = (12, 13)[trt.__version__[0] == '8'] # test on TensorRT 7.x and 8.x - export_onnx(model, im, file, opset, train, False, simplify) - onnx = file.with_suffix('.onnx') - assert onnx.exists(), f'failed to export ONNX file: {onnx}' - - LOGGER.info(f'\n{prefix} starting export with TensorRT {trt.__version__}...') - f = file.with_suffix('.engine') # TensorRT engine file - logger = trt.Logger(trt.Logger.INFO) - if verbose: - logger.min_severity = trt.Logger.Severity.VERBOSE - - builder = trt.Builder(logger) - config = builder.create_builder_config() - config.max_workspace_size = workspace * 1 << 30 - - flag = (1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)) - network = builder.create_network(flag) - parser = trt.OnnxParser(network, logger) - if not parser.parse_from_file(str(onnx)): - raise RuntimeError(f'failed to load ONNX file: {onnx}') - - inputs = [network.get_input(i) for i in range(network.num_inputs)] - outputs = [network.get_output(i) for i in range(network.num_outputs)] - LOGGER.info(f'{prefix} Network Description:') - for inp in inputs: - LOGGER.info(f'{prefix}\tinput "{inp.name}" with shape {inp.shape} and dtype {inp.dtype}') - for out in outputs: - LOGGER.info(f'{prefix}\toutput "{out.name}" with shape {out.shape} and dtype {out.dtype}') - - half &= builder.platform_has_fast_fp16 - LOGGER.info(f'{prefix} building FP{16 if half else 32} engine in {f}') - if half: - config.set_flag(trt.BuilderFlag.FP16) - with builder.build_engine(network, config) as engine, open(f, 'wb') as t: - t.write(engine.serialize()) - LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') - - except Exception as e: - LOGGER.info(f'\n{prefix} export failure: {e}') - - @torch.no_grad() def run(data=ROOT / 'data/coco128.yaml', # 'dataset.yaml path' weights=ROOT / 'yolov5s.pt', # weights path @@ -417,12 +417,12 @@ def run(data=ROOT / 'data/coco128.yaml', # 'dataset.yaml path' export_torchscript(model, im, file, optimize) if ('onnx' in include) or ('openvino' in include): # OpenVINO requires ONNX export_onnx(model, im, file, opset, train, dynamic, simplify) + if 'openvino' in include: + export_openvino(model, im, file) if 'engine' in include: export_engine(model, im, file, train, half, simplify, workspace, verbose) if 'coreml' in include: export_coreml(model, im, file) - if 'openvino' in include: - export_openvino(model, im, file) # TensorFlow Exports if any(tf_exports): diff --git a/models/common.py b/models/common.py index 519ce611d7ef..284dd2bb3af0 100644 --- a/models/common.py +++ b/models/common.py @@ -316,17 +316,6 @@ def __init__(self, weights='yolov5s.pt', device=None, dnn=False, data=None): if extra_files['config.txt']: d = json.loads(extra_files['config.txt']) # extra_files dict stride, names = int(d['stride']), d['names'] - elif coreml: # CoreML - LOGGER.info(f'Loading {w} for CoreML inference...') - import coremltools as ct - model = ct.models.MLModel(w) - elif xml: # OpenVINO - LOGGER.info(f'Loading {w} for OpenVINO inference...') - check_requirements(('openvino-dev',)) # requires openvino-dev: https://pypi.org/project/openvino-dev/ - import openvino.inference_engine as ie - core = ie.IECore() - network = core.read_network(model=w, weights=Path(w).with_suffix('.bin')) # *.xml, *.bin paths - executable_network = core.load_network(network, device_name='CPU', num_requests=1) elif dnn: # ONNX OpenCV DNN LOGGER.info(f'Loading {w} for ONNX OpenCV DNN inference...') check_requirements(('opencv-python>=4.5.4',)) @@ -338,6 +327,13 @@ def __init__(self, weights='yolov5s.pt', device=None, dnn=False, data=None): import onnxruntime providers = ['CUDAExecutionProvider', 'CPUExecutionProvider'] if cuda else ['CPUExecutionProvider'] session = onnxruntime.InferenceSession(w, providers=providers) + elif xml: # OpenVINO + LOGGER.info(f'Loading {w} for OpenVINO inference...') + check_requirements(('openvino-dev',)) # requires openvino-dev: https://pypi.org/project/openvino-dev/ + import openvino.inference_engine as ie + core = ie.IECore() + network = core.read_network(model=w, weights=Path(w).with_suffix('.bin')) # *.xml, *.bin paths + executable_network = core.load_network(network, device_name='CPU', num_requests=1) elif engine: # TensorRT LOGGER.info(f'Loading {w} for TensorRT inference...') import tensorrt as trt # https://developer.nvidia.com/nvidia-tensorrt-download @@ -356,9 +352,17 @@ def __init__(self, weights='yolov5s.pt', device=None, dnn=False, data=None): binding_addrs = OrderedDict((n, d.ptr) for n, d in bindings.items()) context = model.create_execution_context() batch_size = bindings['images'].shape[0] - else: # TensorFlow (TFLite, pb, saved_model) - if pb: # https://www.tensorflow.org/guide/migrate#a_graphpb_or_graphpbtxt - LOGGER.info(f'Loading {w} for TensorFlow *.pb inference...') + elif coreml: # CoreML + LOGGER.info(f'Loading {w} for CoreML inference...') + import coremltools as ct + model = ct.models.MLModel(w) + else: # TensorFlow (SavedModel, GraphDef, Lite, Edge TPU) + if saved_model: # SavedModel + LOGGER.info(f'Loading {w} for TensorFlow SavedModel inference...') + import tensorflow as tf + model = tf.keras.models.load_model(w) + elif pb: # GraphDef https://www.tensorflow.org/guide/migrate#a_graphpb_or_graphpbtxt + LOGGER.info(f'Loading {w} for TensorFlow GraphDef inference...') import tensorflow as tf def wrap_frozen_graph(gd, inputs, outputs): @@ -369,19 +373,15 @@ def wrap_frozen_graph(gd, inputs, outputs): graph_def = tf.Graph().as_graph_def() graph_def.ParseFromString(open(w, 'rb').read()) frozen_func = wrap_frozen_graph(gd=graph_def, inputs="x:0", outputs="Identity:0") - elif saved_model: - LOGGER.info(f'Loading {w} for TensorFlow saved_model inference...') - import tensorflow as tf - model = tf.keras.models.load_model(w) elif tflite: # https://www.tensorflow.org/lite/guide/python#install_tensorflow_lite_for_python - if 'edgetpu' in w.lower(): + if 'edgetpu' in w.lower(): # Edge TPU LOGGER.info(f'Loading {w} for TensorFlow Lite Edge TPU inference...') import tflite_runtime.interpreter as tfli delegate = {'Linux': 'libedgetpu.so.1', # install https://coral.ai/software/#edgetpu-runtime 'Darwin': 'libedgetpu.1.dylib', 'Windows': 'edgetpu.dll'}[platform.system()] interpreter = tfli.Interpreter(model_path=w, experimental_delegates=[tfli.load_delegate(delegate)]) - else: + else: # Lite LOGGER.info(f'Loading {w} for TensorFlow Lite inference...') import tensorflow as tf interpreter = tf.lite.Interpreter(model_path=w) # load TFLite model @@ -396,21 +396,13 @@ def forward(self, im, augment=False, visualize=False, val=False): if self.pt or self.jit: # PyTorch y = self.model(im) if self.jit else self.model(im, augment=augment, visualize=visualize) return y if val else y[0] - elif self.coreml: # CoreML - im = im.permute(0, 2, 3, 1).cpu().numpy() # torch BCHW to numpy BHWC shape(1,320,192,3) - im = Image.fromarray((im[0] * 255).astype('uint8')) - # im = im.resize((192, 320), Image.ANTIALIAS) - y = self.model.predict({'image': im}) # coordinates are xywh normalized - box = xywh2xyxy(y['coordinates'] * [[w, h, w, h]]) # xyxy pixels - conf, cls = y['confidence'].max(1), y['confidence'].argmax(1).astype(np.float) - y = np.concatenate((box, conf.reshape(-1, 1), cls.reshape(-1, 1)), 1) - elif self.onnx: # ONNX + elif self.dnn: # ONNX OpenCV DNN im = im.cpu().numpy() # torch to numpy - if self.dnn: # ONNX OpenCV DNN - self.net.setInput(im) - y = self.net.forward() - else: # ONNX Runtime - y = self.session.run([self.session.get_outputs()[0].name], {self.session.get_inputs()[0].name: im})[0] + self.net.setInput(im) + y = self.net.forward() + elif self.onnx: # ONNX Runtime + im = im.cpu().numpy() # torch to numpy + y = self.session.run([self.session.get_outputs()[0].name], {self.session.get_inputs()[0].name: im})[0] elif self.xml: # OpenVINO im = im.cpu().numpy() # FP32 desc = self.ie.TensorDesc(precision='FP32', dims=im.shape, layout='NCHW') # Tensor Description @@ -423,13 +415,21 @@ def forward(self, im, augment=False, visualize=False, val=False): self.binding_addrs['images'] = int(im.data_ptr()) self.context.execute_v2(list(self.binding_addrs.values())) y = self.bindings['output'].data - else: # TensorFlow model (TFLite, pb, saved_model) + elif self.coreml: # CoreML im = im.permute(0, 2, 3, 1).cpu().numpy() # torch BCHW to numpy BHWC shape(1,320,192,3) - if self.pb: - y = self.frozen_func(x=self.tf.constant(im)).numpy() - elif self.saved_model: + im = Image.fromarray((im[0] * 255).astype('uint8')) + # im = im.resize((192, 320), Image.ANTIALIAS) + y = self.model.predict({'image': im}) # coordinates are xywh normalized + box = xywh2xyxy(y['coordinates'] * [[w, h, w, h]]) # xyxy pixels + conf, cls = y['confidence'].max(1), y['confidence'].argmax(1).astype(np.float) + y = np.concatenate((box, conf.reshape(-1, 1), cls.reshape(-1, 1)), 1) + else: # TensorFlow (SavedModel, GraphDef, Lite, Edge TPU) + im = im.permute(0, 2, 3, 1).cpu().numpy() # torch BCHW to numpy BHWC shape(1,320,192,3) + if self.saved_model: # SavedModel y = self.model(im, training=False).numpy() - elif self.tflite: + elif self.pb: # GraphDef + y = self.frozen_func(x=self.tf.constant(im)).numpy() + elif self.tflite: # Lite input, output = self.input_details[0], self.output_details[0] int8 = input['dtype'] == np.uint8 # is TFLite quantized uint8 model if int8: @@ -451,7 +451,7 @@ def forward(self, im, augment=False, visualize=False, val=False): def warmup(self, imgsz=(1, 3, 640, 640), half=False): # Warmup model by running inference once - if self.pt or self.engine or self.onnx: # warmup types + if self.pt or self.jit or self.onnx or self.engine: # warmup types if isinstance(self.device, torch.device) and self.device.type != 'cpu': # only warmup GPU models im = torch.zeros(*imgsz).to(self.device).type(torch.half if half else torch.float) # input image self.forward(im) # warmup diff --git a/val.py b/val.py index 704a7a46eb38..4d707f62bffa 100644 --- a/val.py +++ b/val.py @@ -9,13 +9,13 @@ $ python path/to/val.py --weights yolov5s.pt # PyTorch yolov5s.torchscript # TorchScript yolov5s.onnx # ONNX Runtime or OpenCV DNN with --dnn - yolov5s.mlmodel # CoreML (under development) yolov5s.xml # OpenVINO + yolov5s.engine # TensorRT + yolov5s.mlmodel # CoreML (under development) yolov5s_saved_model # TensorFlow SavedModel - yolov5s.pb # TensorFlow protobuf + yolov5s.pb # TensorFlow GraphDef yolov5s.tflite # TensorFlow Lite yolov5s_edgetpu.tflite # TensorFlow Edge TPU - yolov5s.engine # TensorRT """ import argparse From fb839298a16fca4143dc70619a084f9ed085ac07 Mon Sep 17 00:00:00 2001 From: Yin Rong Date: Tue, 4 Jan 2022 12:25:48 +0800 Subject: [PATCH 0802/1976] Fix TorchScript on mobile export (#6183) * fix export of TorchScript on mobile * Cleanup Co-authored-by: yinrong Co-authored-by: Glenn Jocher --- export.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/export.py b/export.py index 3b677d2ca144..d169aa0de5f5 100644 --- a/export.py +++ b/export.py @@ -75,7 +75,10 @@ def export_torchscript(model, im, file, optimize, prefix=colorstr('TorchScript:' ts = torch.jit.trace(model, im, strict=False) d = {"shape": im.shape, "stride": int(max(model.stride)), "names": model.names} extra_files = {'config.txt': json.dumps(d)} # torch._C.ExtraFilesMap() - (optimize_for_mobile(ts) if optimize else ts).save(str(f), _extra_files=extra_files) + if optimize: # https://pytorch.org/tutorials/recipes/mobile_interpreter.html + optimize_for_mobile(ts)._save_for_lite_interpreter(str(f), _extra_files=extra_files) + else: + ts.save(str(f), _extra_files=extra_files) LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') except Exception as e: From a2f4a1799ba6dabea4cd74a3b1e292c102918670 Mon Sep 17 00:00:00 2001 From: imyhxy Date: Wed, 5 Jan 2022 03:09:25 +0800 Subject: [PATCH 0803/1976] TensorRT 7 `anchor_grid` compatibility fix (#6185) * fix: TensorRT 7 incompatiable * Add comment * Add if: else and comment Co-authored-by: Glenn Jocher --- export.py | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/export.py b/export.py index d169aa0de5f5..6cf1db2c45b8 100644 --- a/export.py +++ b/export.py @@ -175,7 +175,13 @@ def export_engine(model, im, file, train, half, simplify, workspace=4, verbose=F import tensorrt as trt opset = (12, 13)[trt.__version__[0] == '8'] # test on TensorRT 7.x and 8.x - export_onnx(model, im, file, opset, train, False, simplify) + if opset == 12: # TensorRT 7 handling https://github.com/ultralytics/yolov5/issues/6012 + grid = model.model[-1].anchor_grid + model.model[-1].anchor_grid = [a[..., :1, :1, :] for a in grid] + export_onnx(model, im, file, opset, train, False, simplify) + model.model[-1].anchor_grid = grid + else: # TensorRT >= 8 + export_onnx(model, im, file, opset, train, False, simplify) onnx = file.with_suffix('.onnx') assert onnx.exists(), f'failed to export ONNX file: {onnx}' @@ -418,12 +424,12 @@ def run(data=ROOT / 'data/coco128.yaml', # 'dataset.yaml path' # Exports if 'torchscript' in include: export_torchscript(model, im, file, optimize) + if 'engine' in include: # TensorRT required before ONNX + export_engine(model, im, file, train, half, simplify, workspace, verbose) if ('onnx' in include) or ('openvino' in include): # OpenVINO requires ONNX export_onnx(model, im, file, opset, train, dynamic, simplify) if 'openvino' in include: export_openvino(model, im, file) - if 'engine' in include: - export_engine(model, im, file, train, half, simplify, workspace, verbose) if 'coreml' in include: export_coreml(model, im, file) From 7b31a531b45f9c8b9ec543cdfbf5c9d0c9aa920d Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 4 Jan 2022 13:39:13 -0800 Subject: [PATCH 0804/1976] Add `tensorrt>=7.0.0` checks (#6193) * Add `tensorrt>=7.0.0` checks * Update export.py * Update common.py * Update export.py --- export.py | 12 ++++++------ models/common.py | 2 +- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/export.py b/export.py index 6cf1db2c45b8..a0cb5fdc5678 100644 --- a/export.py +++ b/export.py @@ -61,8 +61,8 @@ from models.yolo import Detect from utils.activations import SiLU from utils.datasets import LoadImages -from utils.general import (LOGGER, check_dataset, check_img_size, check_requirements, colorstr, file_size, print_args, - url2file) +from utils.general import (LOGGER, check_dataset, check_img_size, check_requirements, check_version, colorstr, + file_size, print_args, url2file) from utils.torch_utils import select_device @@ -174,14 +174,14 @@ def export_engine(model, im, file, train, half, simplify, workspace=4, verbose=F check_requirements(('tensorrt',)) import tensorrt as trt - opset = (12, 13)[trt.__version__[0] == '8'] # test on TensorRT 7.x and 8.x - if opset == 12: # TensorRT 7 handling https://github.com/ultralytics/yolov5/issues/6012 + if trt.__version__[0] == 7: # TensorRT 7 handling https://github.com/ultralytics/yolov5/issues/6012 grid = model.model[-1].anchor_grid model.model[-1].anchor_grid = [a[..., :1, :1, :] for a in grid] - export_onnx(model, im, file, opset, train, False, simplify) + export_onnx(model, im, file, 12, train, False, simplify) # opset 12 model.model[-1].anchor_grid = grid else: # TensorRT >= 8 - export_onnx(model, im, file, opset, train, False, simplify) + check_version(trt.__version__, '7.0.0', hard=True) # require tensorrt>=8.0.0 + export_onnx(model, im, file, 13, train, False, simplify) # opset 13 onnx = file.with_suffix('.onnx') assert onnx.exists(), f'failed to export ONNX file: {onnx}' diff --git a/models/common.py b/models/common.py index 284dd2bb3af0..836314568f67 100644 --- a/models/common.py +++ b/models/common.py @@ -337,7 +337,7 @@ def __init__(self, weights='yolov5s.pt', device=None, dnn=False, data=None): elif engine: # TensorRT LOGGER.info(f'Loading {w} for TensorRT inference...') import tensorrt as trt # https://developer.nvidia.com/nvidia-tensorrt-download - check_version(trt.__version__, '8.0.0', verbose=True) # version requirement + check_version(trt.__version__, '7.0.0', hard=True) # require tensorrt>=7.0.0 Binding = namedtuple('Binding', ('name', 'dtype', 'shape', 'data', 'ptr')) logger = trt.Logger(trt.Logger.INFO) with open(w, 'rb') as f, trt.Runtime(logger) as runtime: From b5b56a3c887e5cc93770f54233f3ba4b2cc214ec Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 4 Jan 2022 17:49:09 -0800 Subject: [PATCH 0805/1976] Add CoreML inference (#6195) * Add Apple CoreML inference * Cleanup --- detect.py | 2 +- export.py | 3 +-- models/common.py | 9 ++++++--- val.py | 2 +- 4 files changed, 9 insertions(+), 7 deletions(-) diff --git a/detect.py b/detect.py index a4a1fb69b42e..2d1963ad6f86 100644 --- a/detect.py +++ b/detect.py @@ -17,7 +17,7 @@ yolov5s.onnx # ONNX Runtime or OpenCV DNN with --dnn yolov5s.xml # OpenVINO yolov5s.engine # TensorRT - yolov5s.mlmodel # CoreML (under development) + yolov5s.mlmodel # CoreML (MacOS-only) yolov5s_saved_model # TensorFlow SavedModel yolov5s.pb # TensorFlow GraphDef yolov5s.tflite # TensorFlow Lite diff --git a/export.py b/export.py index a0cb5fdc5678..3447fc6ed1ab 100644 --- a/export.py +++ b/export.py @@ -25,7 +25,7 @@ yolov5s.onnx # ONNX Runtime or OpenCV DNN with --dnn yolov5s.xml # OpenVINO yolov5s.engine # TensorRT - yolov5s.mlmodel # CoreML (under development) + yolov5s.mlmodel # CoreML (MacOS-only) yolov5s_saved_model # TensorFlow SavedModel yolov5s.pb # TensorFlow GraphDef yolov5s.tflite # TensorFlow Lite @@ -156,7 +156,6 @@ def export_coreml(model, im, file, prefix=colorstr('CoreML:')): LOGGER.info(f'\n{prefix} starting export with coremltools {ct.__version__}...') f = file.with_suffix('.mlmodel') - model.train() # CoreML exports should be placed in model.train() mode ts = torch.jit.trace(model, im, strict=False) # TorchScript model ct_model = ct.convert(ts, inputs=[ct.ImageType('image', shape=im.shape, scale=1 / 255, bias=[0, 0, 0])]) ct_model.save(f) diff --git a/models/common.py b/models/common.py index 836314568f67..d8d5423a16e0 100644 --- a/models/common.py +++ b/models/common.py @@ -420,9 +420,12 @@ def forward(self, im, augment=False, visualize=False, val=False): im = Image.fromarray((im[0] * 255).astype('uint8')) # im = im.resize((192, 320), Image.ANTIALIAS) y = self.model.predict({'image': im}) # coordinates are xywh normalized - box = xywh2xyxy(y['coordinates'] * [[w, h, w, h]]) # xyxy pixels - conf, cls = y['confidence'].max(1), y['confidence'].argmax(1).astype(np.float) - y = np.concatenate((box, conf.reshape(-1, 1), cls.reshape(-1, 1)), 1) + if 'confidence' in y: + box = xywh2xyxy(y['coordinates'] * [[w, h, w, h]]) # xyxy pixels + conf, cls = y['confidence'].max(1), y['confidence'].argmax(1).astype(np.float) + y = np.concatenate((box, conf.reshape(-1, 1), cls.reshape(-1, 1)), 1) + else: + y = y[list(y)[-1]] # last output else: # TensorFlow (SavedModel, GraphDef, Lite, Edge TPU) im = im.permute(0, 2, 3, 1).cpu().numpy() # torch BCHW to numpy BHWC shape(1,320,192,3) if self.saved_model: # SavedModel diff --git a/val.py b/val.py index 4d707f62bffa..4709f67511bb 100644 --- a/val.py +++ b/val.py @@ -11,7 +11,7 @@ yolov5s.onnx # ONNX Runtime or OpenCV DNN with --dnn yolov5s.xml # OpenVINO yolov5s.engine # TensorRT - yolov5s.mlmodel # CoreML (under development) + yolov5s.mlmodel # CoreML (MacOS-only) yolov5s_saved_model # TensorFlow SavedModel yolov5s.pb # TensorFlow GraphDef yolov5s.tflite # TensorFlow Lite From 9e9219fe17070ae38b50d29c11d460ed0ec8b1db Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 4 Jan 2022 19:32:42 -0800 Subject: [PATCH 0806/1976] Fix `nan`-robust stream FPS (#6198) Fix for Webcam stop working suddenly (Issue #6197) --- utils/datasets.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/utils/datasets.py b/utils/datasets.py index 79b871c9294b..6584342a621d 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -6,6 +6,7 @@ import glob import hashlib import json +import math import os import random import shutil @@ -308,8 +309,9 @@ def __init__(self, sources='streams.txt', img_size=640, stride=32, auto=True): assert cap.isOpened(), f'{st}Failed to open {s}' w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) - self.fps[i] = max(cap.get(cv2.CAP_PROP_FPS) % 100, 0) or 30.0 # 30 FPS fallback + fps = cap.get(cv2.CAP_PROP_FPS) # warning: may return 0 or nan self.frames[i] = max(int(cap.get(cv2.CAP_PROP_FRAME_COUNT)), 0) or float('inf') # infinite stream fallback + self.fps[i] = max((fps if math.isfinite(fps) else 0) % 100, 0) or 30 # 30 FPS fallback _, self.imgs[i] = cap.read() # guarantee first frame self.threads[i] = Thread(target=self.update, args=([i, cap, s]), daemon=True) From 5402753a53b0fe0b5fe70af5fcf4498ad138c99b Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 4 Jan 2022 19:36:12 -0800 Subject: [PATCH 0807/1976] Edge TPU compiler comment (#6196) * Edge TPU compiler comment * 7 to 8 fix --- export.py | 6 +++--- models/common.py | 4 ++-- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/export.py b/export.py index 3447fc6ed1ab..c56a0a99a635 100644 --- a/export.py +++ b/export.py @@ -17,7 +17,7 @@ TensorFlow.js | `tfjs` | yolov5s_web_model/ Usage: - $ python path/to/export.py --weights yolov5s.pt --include torchscript onnx coreml openvino saved_model tflite tfjs + $ python path/to/export.py --weights yolov5s.pt --include torchscript onnx openvino engine coreml tflite ... Inference: $ python path/to/detect.py --weights yolov5s.pt # PyTorch @@ -179,7 +179,7 @@ def export_engine(model, im, file, train, half, simplify, workspace=4, verbose=F export_onnx(model, im, file, 12, train, False, simplify) # opset 12 model.model[-1].anchor_grid = grid else: # TensorRT >= 8 - check_version(trt.__version__, '7.0.0', hard=True) # require tensorrt>=8.0.0 + check_version(trt.__version__, '8.0.0', hard=True) # require tensorrt>=8.0.0 export_onnx(model, im, file, 13, train, False, simplify) # opset 13 onnx = file.with_suffix('.onnx') assert onnx.exists(), f'failed to export ONNX file: {onnx}' @@ -308,7 +308,7 @@ def export_tflite(keras_model, im, file, int8, data, ncalib, prefix=colorstr('Te def export_edgetpu(keras_model, im, file, prefix=colorstr('Edge TPU:')): # YOLOv5 Edge TPU export https://coral.ai/docs/edgetpu/models-intro/ try: - cmd = 'edgetpu_compiler --version' + cmd = 'edgetpu_compiler --version' # install https://coral.ai/docs/edgetpu/compiler/ out = subprocess.run(cmd, shell=True, capture_output=True, check=True) ver = out.stdout.decode().split()[-1] LOGGER.info(f'\n{prefix} starting export with Edge TPU compiler {ver}...') diff --git a/models/common.py b/models/common.py index d8d5423a16e0..b055cb68a439 100644 --- a/models/common.py +++ b/models/common.py @@ -376,8 +376,8 @@ def wrap_frozen_graph(gd, inputs, outputs): elif tflite: # https://www.tensorflow.org/lite/guide/python#install_tensorflow_lite_for_python if 'edgetpu' in w.lower(): # Edge TPU LOGGER.info(f'Loading {w} for TensorFlow Lite Edge TPU inference...') - import tflite_runtime.interpreter as tfli - delegate = {'Linux': 'libedgetpu.so.1', # install https://coral.ai/software/#edgetpu-runtime + import tflite_runtime.interpreter as tfli # install https://coral.ai/software/#edgetpu-runtime + delegate = {'Linux': 'libedgetpu.so.1', 'Darwin': 'libedgetpu.1.dylib', 'Windows': 'edgetpu.dll'}[platform.system()] interpreter = tfli.Interpreter(model_path=w, experimental_delegates=[tfli.load_delegate(delegate)]) From 8125ec5d4230441611c49f1064bbfae15a487fac Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 5 Jan 2022 13:01:21 -0800 Subject: [PATCH 0808/1976] TFLite `--int8` 'flatbuffers==1.12' fix (#6216) * TFLite `--int8` 'flatbuffers==1.12' fix Temporary workaround for TFLite INT8 export. * Update export.py * Update export.py --- export.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/export.py b/export.py index c56a0a99a635..0e8e4242f487 100644 --- a/export.py +++ b/export.py @@ -277,8 +277,6 @@ def export_tflite(keras_model, im, file, int8, data, ncalib, prefix=colorstr('Te try: import tensorflow as tf - from models.tf import representative_dataset_gen - LOGGER.info(f'\n{prefix} starting export with tensorflow {tf.__version__}...') batch_size, ch, *imgsz = list(im.shape) # BCHW f = str(file).replace('.pt', '-fp16.tflite') @@ -288,6 +286,8 @@ def export_tflite(keras_model, im, file, int8, data, ncalib, prefix=colorstr('Te converter.target_spec.supported_types = [tf.float16] converter.optimizations = [tf.lite.Optimize.DEFAULT] if int8: + from models.tf import representative_dataset_gen + check_requirements(('flatbuffers==1.12',)) # https://github.com/ultralytics/yolov5/issues/5707 dataset = LoadImages(check_dataset(data)['train'], img_size=imgsz, auto=False) # representative data converter.representative_dataset = lambda: representative_dataset_gen(dataset, ncalib) converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8] From 00d7b978690d16729f411393bbd56f9dbd6a840c Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 5 Jan 2022 13:34:36 -0800 Subject: [PATCH 0809/1976] TFLite `--int8` 'flatbuffers==1.12' fix 2 (#6217) * TFLite `--int8` 'flatbuffers==1.12' fix 2 Reorganizes #6216 fix to update before `tensorflow` import so no restart required. * Update export.py --- export.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/export.py b/export.py index 0e8e4242f487..2466d2538ee8 100644 --- a/export.py +++ b/export.py @@ -287,7 +287,6 @@ def export_tflite(keras_model, im, file, int8, data, ncalib, prefix=colorstr('Te converter.optimizations = [tf.lite.Optimize.DEFAULT] if int8: from models.tf import representative_dataset_gen - check_requirements(('flatbuffers==1.12',)) # https://github.com/ultralytics/yolov5/issues/5707 dataset = LoadImages(check_dataset(data)['train'], img_size=imgsz, auto=False) # representative data converter.representative_dataset = lambda: representative_dataset_gen(dataset, ncalib) converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8] @@ -435,6 +434,8 @@ def run(data=ROOT / 'data/coco128.yaml', # 'dataset.yaml path' # TensorFlow Exports if any(tf_exports): pb, tflite, edgetpu, tfjs = tf_exports[1:] + if (tflite or edgetpu) and int8: # TFLite --int8 bug https://github.com/ultralytics/yolov5/issues/5707 + check_requirements(('flatbuffers==1.12',)) # required before `import tensorflow` assert not (tflite and tfjs), 'TFLite and TF.js models must be exported separately, please pass only one type.' model = export_saved_model(model, im, file, dynamic, tf_nms=nms or agnostic_nms or tfjs, agnostic_nms=agnostic_nms or tfjs, topk_per_class=topk_per_class, topk_all=topk_all, From b4ac3df6ffd6efb02200b45083b5527af41740ec Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 5 Jan 2022 14:55:04 -0800 Subject: [PATCH 0810/1976] Add `edgetpu_compiler` checks (#6218) * Add `edgetpu_compiler` checks * Update export.py * Update export.py * Update export.py * Update export.py * Update export.py * Update export.py --- export.py | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/export.py b/export.py index 2466d2538ee8..21aa67ff0a48 100644 --- a/export.py +++ b/export.py @@ -41,6 +41,7 @@ import argparse import json import os +import platform import subprocess import sys import time @@ -307,11 +308,15 @@ def export_tflite(keras_model, im, file, int8, data, ncalib, prefix=colorstr('Te def export_edgetpu(keras_model, im, file, prefix=colorstr('Edge TPU:')): # YOLOv5 Edge TPU export https://coral.ai/docs/edgetpu/models-intro/ try: - cmd = 'edgetpu_compiler --version' # install https://coral.ai/docs/edgetpu/compiler/ + cmd = 'edgetpu_compiler --version' + help = 'See https://coral.ai/docs/edgetpu/compiler/' + assert platform.system() == 'Linux', f'export only supported on Linux. {help}' + assert subprocess.run(cmd, shell=True).returncode == 0, f'export requires edgetpu-compiler. {help}' out = subprocess.run(cmd, shell=True, capture_output=True, check=True) ver = out.stdout.decode().split()[-1] + LOGGER.info(f'\n{prefix} starting export with Edge TPU compiler {ver}...') - f = str(file).replace('.pt', '-int8_edgetpu.tflite') + f = str(file).replace('.pt', '-int8_edgetpu.tflite') # Edge TPU model f_tfl = str(file).replace('.pt', '-int8.tflite') # TFLite model cmd = f"edgetpu_compiler -s {f_tfl}" @@ -434,7 +439,7 @@ def run(data=ROOT / 'data/coco128.yaml', # 'dataset.yaml path' # TensorFlow Exports if any(tf_exports): pb, tflite, edgetpu, tfjs = tf_exports[1:] - if (tflite or edgetpu) and int8: # TFLite --int8 bug https://github.com/ultralytics/yolov5/issues/5707 + if int8 or edgetpu: # TFLite --int8 bug https://github.com/ultralytics/yolov5/issues/5707 check_requirements(('flatbuffers==1.12',)) # required before `import tensorflow` assert not (tflite and tfjs), 'TFLite and TF.js models must be exported separately, please pass only one type.' model = export_saved_model(model, im, file, dynamic, tf_nms=nms or agnostic_nms or tfjs, From f80c463010101e463670440537662f671cbaaa04 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 5 Jan 2022 20:57:20 -0800 Subject: [PATCH 0811/1976] Attempt `edgetpu-compiler` autoinstall (#6223) * Attempt `edgetpu-compiler` autoinstall Attempt to install edgetpu-compiler dependency if missing on Linux. * Update export.py * Update export.py --- export.py | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/export.py b/export.py index 21aa67ff0a48..6d15b21d031d 100644 --- a/export.py +++ b/export.py @@ -309,11 +309,16 @@ def export_edgetpu(keras_model, im, file, prefix=colorstr('Edge TPU:')): # YOLOv5 Edge TPU export https://coral.ai/docs/edgetpu/models-intro/ try: cmd = 'edgetpu_compiler --version' - help = 'See https://coral.ai/docs/edgetpu/compiler/' - assert platform.system() == 'Linux', f'export only supported on Linux. {help}' - assert subprocess.run(cmd, shell=True).returncode == 0, f'export requires edgetpu-compiler. {help}' - out = subprocess.run(cmd, shell=True, capture_output=True, check=True) - ver = out.stdout.decode().split()[-1] + help_url = 'https://coral.ai/docs/edgetpu/compiler/' + assert platform.system() == 'Linux', f'export only supported on Linux. See {help_url}' + if subprocess.run(cmd, shell=True).returncode != 0: + LOGGER.info(f'\n{prefix} export requires Edge TPU compiler. Attempting install from {help_url}') + for c in ['curl https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -', + 'echo "deb https://packages.cloud.google.com/apt coral-edgetpu-stable main" | sudo tee /etc/apt/sources.list.d/coral-edgetpu.list', + 'sudo apt-get update', + 'sudo apt-get install edgetpu-compiler']: + subprocess.run(c, shell=True, check=True) + ver = subprocess.run(cmd, shell=True, capture_output=True, check=True).stdout.decode().split()[-1] LOGGER.info(f'\n{prefix} starting export with Edge TPU compiler {ver}...') f = str(file).replace('.pt', '-int8_edgetpu.tflite') # Edge TPU model From ad565e31d2f0bf515d94a95690a3e13a83ce3b30 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 6 Jan 2022 09:55:31 -0800 Subject: [PATCH 0812/1976] Update README speed reproduction command (#6228) --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 59abd084572c..5f45eb407fc5 100644 --- a/README.md +++ b/README.md @@ -251,7 +251,7 @@ We are super excited about our first-ever Ultralytics YOLOv5 🚀 EXPORT Competi * All checkpoints are trained to 300 epochs with default settings and hyperparameters. * **mAPval** values are for single-model single-scale on [COCO val2017](http://cocodataset.org) dataset.
Reproduce by `python val.py --data coco.yaml --img 640 --conf 0.001 --iou 0.65` -* **Speed** averaged over COCO val images using a [AWS p3.2xlarge](https://aws.amazon.com/ec2/instance-types/p3/) instance. NMS times (~1 ms/img) not included.
Reproduce by `python val.py --data coco.yaml --img 640 --conf 0.25 --iou 0.45` +* **Speed** averaged over COCO val images using a [AWS p3.2xlarge](https://aws.amazon.com/ec2/instance-types/p3/) instance. NMS times (~1 ms/img) not included.
Reproduce by `python val.py --data coco.yaml --img 640 --task speed --batch 1` * **TTA** [Test Time Augmentation](https://github.com/ultralytics/yolov5/issues/303) includes reflection and scale augmentations.
Reproduce by `python val.py --data coco.yaml --img 1536 --iou 0.7 --augment`
From 33a67b4918aa3f2e572f115781e615a91fd543a4 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 6 Jan 2022 11:08:09 -0800 Subject: [PATCH 0813/1976] Update P2-P7 `models/hub` variants (#6230) * Update p2-p7 `models/hub` variants * Update common.py * AutoAnchor camelcase corrections --- models/common.py | 2 +- models/hub/yolov5-p2.yaml | 4 ++-- models/hub/yolov5-p34.yaml | 41 ++++++++++++++++++++++++++++++++++++++ models/hub/yolov5-p6.yaml | 4 ++-- models/hub/yolov5-p7.yaml | 4 ++-- train.py | 2 +- tutorial.ipynb | 2 +- 7 files changed, 50 insertions(+), 9 deletions(-) create mode 100644 models/hub/yolov5-p34.yaml diff --git a/models/common.py b/models/common.py index b055cb68a439..e375507a5a7e 100644 --- a/models/common.py +++ b/models/common.py @@ -306,7 +306,7 @@ def __init__(self, weights='yolov5s.pt', device=None, dnn=False, data=None): if pt: # PyTorch model = attempt_load(weights if isinstance(weights, list) else w, map_location=device) - stride = int(model.stride.max()) # model stride + stride = max(int(model.stride.max()), 32) # model stride names = model.module.names if hasattr(model, 'module') else model.names # get class names self.model = model # explicitly assign for to(), cpu(), cuda(), half() elif jit: # TorchScript diff --git a/models/hub/yolov5-p2.yaml b/models/hub/yolov5-p2.yaml index ffe26ebad182..554117dda59a 100644 --- a/models/hub/yolov5-p2.yaml +++ b/models/hub/yolov5-p2.yaml @@ -4,7 +4,7 @@ nc: 80 # number of classes depth_multiple: 1.0 # model depth multiple width_multiple: 1.0 # layer channel multiple -anchors: 3 # auto-anchor evolves 3 anchors per P output layer +anchors: 3 # AutoAnchor evolves 3 anchors per P output layer # YOLOv5 v6.0 backbone backbone: @@ -21,7 +21,7 @@ backbone: [-1, 1, SPPF, [1024, 5]], # 9 ] -# YOLOv5 v6.0 head +# YOLOv5 v6.0 head with (P2, P3, P4, P5) outputs head: [[-1, 1, Conv, [512, 1, 1]], [-1, 1, nn.Upsample, [None, 2, 'nearest']], diff --git a/models/hub/yolov5-p34.yaml b/models/hub/yolov5-p34.yaml new file mode 100644 index 000000000000..dbf0f850083e --- /dev/null +++ b/models/hub/yolov5-p34.yaml @@ -0,0 +1,41 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license + +# Parameters +nc: 80 # number of classes +depth_multiple: 0.33 # model depth multiple +width_multiple: 0.50 # layer channel multiple +anchors: 3 # AutoAnchor evolves 3 anchors per P output layer + +# YOLOv5 v6.0 backbone +backbone: + # [from, number, module, args] + [ [ -1, 1, Conv, [ 64, 6, 2, 2 ] ], # 0-P1/2 + [ -1, 1, Conv, [ 128, 3, 2 ] ], # 1-P2/4 + [ -1, 3, C3, [ 128 ] ], + [ -1, 1, Conv, [ 256, 3, 2 ] ], # 3-P3/8 + [ -1, 6, C3, [ 256 ] ], + [ -1, 1, Conv, [ 512, 3, 2 ] ], # 5-P4/16 + [ -1, 9, C3, [ 512 ] ], + [ -1, 1, Conv, [ 1024, 3, 2 ] ], # 7-P5/32 + [ -1, 3, C3, [ 1024 ] ], + [ -1, 1, SPPF, [ 1024, 5 ] ], # 9 + ] + +# YOLOv5 v6.0 head with (P3, P4) outputs +head: + [ [ -1, 1, Conv, [ 512, 1, 1 ] ], + [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], + [ [ -1, 6 ], 1, Concat, [ 1 ] ], # cat backbone P4 + [ -1, 3, C3, [ 512, False ] ], # 13 + + [ -1, 1, Conv, [ 256, 1, 1 ] ], + [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], + [ [ -1, 4 ], 1, Concat, [ 1 ] ], # cat backbone P3 + [ -1, 3, C3, [ 256, False ] ], # 17 (P3/8-small) + + [ -1, 1, Conv, [ 256, 3, 2 ] ], + [ [ -1, 14 ], 1, Concat, [ 1 ] ], # cat head P4 + [ -1, 3, C3, [ 512, False ] ], # 20 (P4/16-medium) + + [ [ 17, 20 ], 1, Detect, [ nc, anchors ] ], # Detect(P3, P4) + ] diff --git a/models/hub/yolov5-p6.yaml b/models/hub/yolov5-p6.yaml index 28f3e439cccd..a17202f22044 100644 --- a/models/hub/yolov5-p6.yaml +++ b/models/hub/yolov5-p6.yaml @@ -4,7 +4,7 @@ nc: 80 # number of classes depth_multiple: 1.0 # model depth multiple width_multiple: 1.0 # layer channel multiple -anchors: 3 # auto-anchor 3 anchors per P output layer +anchors: 3 # AutoAnchor evolves 3 anchors per P output layer # YOLOv5 v6.0 backbone backbone: @@ -23,7 +23,7 @@ backbone: [-1, 1, SPPF, [1024, 5]], # 11 ] -# YOLOv5 v6.0 head +# YOLOv5 v6.0 head with (P3, P4, P5, P6) outputs head: [[-1, 1, Conv, [768, 1, 1]], [-1, 1, nn.Upsample, [None, 2, 'nearest']], diff --git a/models/hub/yolov5-p7.yaml b/models/hub/yolov5-p7.yaml index bd2f5845f884..edd7d13a34a6 100644 --- a/models/hub/yolov5-p7.yaml +++ b/models/hub/yolov5-p7.yaml @@ -4,7 +4,7 @@ nc: 80 # number of classes depth_multiple: 1.0 # model depth multiple width_multiple: 1.0 # layer channel multiple -anchors: 3 # auto-anchor 3 anchors per P output layer +anchors: 3 # AutoAnchor evolves 3 anchors per P output layer # YOLOv5 v6.0 backbone backbone: @@ -25,7 +25,7 @@ backbone: [-1, 1, SPPF, [1280, 5]], # 13 ] -# YOLOv5 head +# YOLOv5 v6.0 head with (P3, P4, P5, P6, P7) outputs head: [[-1, 1, Conv, [1024, 1, 1]], [-1, 1, nn.Upsample, [None, 2, 'nearest']], diff --git a/train.py b/train.py index bd2fb5898cb9..410f16fed3bf 100644 --- a/train.py +++ b/train.py @@ -461,7 +461,7 @@ def parse_opt(known=False): parser.add_argument('--resume', nargs='?', const=True, default=False, help='resume most recent training') parser.add_argument('--nosave', action='store_true', help='only save final checkpoint') parser.add_argument('--noval', action='store_true', help='only validate final epoch') - parser.add_argument('--noautoanchor', action='store_true', help='disable autoanchor check') + parser.add_argument('--noautoanchor', action='store_true', help='disable AutoAnchor') parser.add_argument('--evolve', type=int, nargs='?', const=300, help='evolve hyperparameters for x generations') parser.add_argument('--bucket', type=str, default='', help='gsutil bucket') parser.add_argument('--cache', type=str, nargs='?', const='ram', help='--cache images in "ram" (default) or "disk"') diff --git a/tutorial.ipynb b/tutorial.ipynb index 45b27b7ab2cc..fb808cf6e10b 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -777,7 +777,7 @@ "\u001b[34m\u001b[1mval: \u001b[0mCaching images (0.1GB ram): 100% 128/128 [00:01<00:00, 121.58it/s]\n", "Plotting labels... \n", "\n", - "\u001b[34m\u001b[1mautoanchor: \u001b[0mAnalyzing anchors... anchors/target = 4.27, Best Possible Recall (BPR) = 0.9935\n", + "\u001b[34m\u001b[1mAutoAnchor: \u001b[0mAnalyzing anchors... anchors/target = 4.27, Best Possible Recall (BPR) = 0.9935\n", "Image sizes 640 train, 640 val\n", "Using 2 dataloader workers\n", "Logging results to \u001b[1mruns/train/exp\u001b[0m\n", From 6865d19a92d8c160c7fc3c92256627dadce1cd1e Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 7 Jan 2022 09:31:17 -0800 Subject: [PATCH 0814/1976] TensorRT 7 export fix (#6235) --- export.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/export.py b/export.py index 6d15b21d031d..6adcf72e9e66 100644 --- a/export.py +++ b/export.py @@ -174,7 +174,7 @@ def export_engine(model, im, file, train, half, simplify, workspace=4, verbose=F check_requirements(('tensorrt',)) import tensorrt as trt - if trt.__version__[0] == 7: # TensorRT 7 handling https://github.com/ultralytics/yolov5/issues/6012 + if trt.__version__[0] == '7': # TensorRT 7 handling https://github.com/ultralytics/yolov5/issues/6012 grid = model.model[-1].anchor_grid model.model[-1].anchor_grid = [a[..., :1, :1, :] for a in grid] export_onnx(model, im, file, 12, train, False, simplify) # opset 12 From 9b13a594e953e4e2688ff6ac0190a2247733e4ca Mon Sep 17 00:00:00 2001 From: Jinwoong Yoo Date: Mon, 10 Jan 2022 15:40:47 +0900 Subject: [PATCH 0815/1976] Fix `cmd` string on `tfjs` export (#6243) * Fix cmd string on tfjs export * Cleanup Co-authored-by: Glenn Jocher --- export.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/export.py b/export.py index 6adcf72e9e66..0236872c2d94 100644 --- a/export.py +++ b/export.py @@ -345,8 +345,8 @@ def export_tfjs(keras_model, im, file, prefix=colorstr('TensorFlow.js:')): f_pb = file.with_suffix('.pb') # *.pb path f_json = f + '/model.json' # *.json path - cmd = f"tensorflowjs_converter --input_format=tf_frozen_model " \ - f"--output_node_names='Identity,Identity_1,Identity_2,Identity_3' {f_pb} {f}" + cmd = f'tensorflowjs_converter --input_format=tf_frozen_model ' \ + f'--output_node_names="Identity,Identity_1,Identity_2,Identity_3" {f_pb} {f}' subprocess.run(cmd, shell=True) json = open(f_json).read() From b3eaf5008bb8a34de481a6ef7ac8ba520d97b70e Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 10 Jan 2022 16:49:10 -1000 Subject: [PATCH 0816/1976] TensorRT pip install --- tutorial.ipynb | 16 +++++----------- 1 file changed, 5 insertions(+), 11 deletions(-) diff --git a/tutorial.ipynb b/tutorial.ipynb index fb808cf6e10b..97f9074f9fd0 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -1090,19 +1090,13 @@ }, "source": [ "# TensorRT \n", - "# https://developer.nvidia.com/nvidia-tensorrt-download\n", - "!lsb_release -a # check system\n", - "%ls /usr/local | grep cuda # check CUDA\n", - "!wget https://ultralytics.com/assets/TensorRT-8.2.0.6.Linux.x86_64-gnu.cuda-11.4.cudnn8.2.tar.gz # download\n", - "![ -d /content/TensorRT-8.2.0.6/ ] || tar -C /content/ -zxf ./TensorRT-8.2.0.6.Linux.x86_64-gnu.cuda-11.4.cudnn8.2.tar.gz # unzip\n", - "%pip list | grep tensorrt || pip install /content/TensorRT-8.2.0.6/python/tensorrt-8.2.0.6-cp37-none-linux_x86_64.whl # install\n", - "%env LD_LIBRARY_PATH=/usr/local/cuda-11.1/lib64:/content/cuda-11.1/lib64:/content/TensorRT-8.2.0.6/lib:/usr/local/nvidia/lib:/usr/local/nvidia/lib64 # add to path\n", - "\n", - "!python export.py --weights yolov5s.pt --include engine --imgsz 640 640 --device 0\n", - "!python detect.py --weights yolov5s.engine --imgsz 640 640 --device 0" + "# https://docs.nvidia.com/deeplearning/tensorrt/install-guide/index.html#installing-pip\n", + "!pip install -U nvidia-tensorrt --index-url https://pypi.ngc.nvidia.com # install\n", + "!python export.py --weights yolov5s.pt --include engine --imgsz 640 640 --device 0 # export\n", + "!python detect.py --weights yolov5s.engine --imgsz 640 640 --device 0 # inference" ], "execution_count": null, "outputs": [] } ] -} +} \ No newline at end of file From f3085accd3f768a5ffeb6be268d2eac1720f764d Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 11 Jan 2022 10:13:17 -1000 Subject: [PATCH 0817/1976] Enable ONNX `--half` FP16 inference (#6268) * Enable ONNX ``--half` FP16 inference * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- detect.py | 2 +- tutorial.ipynb | 2 +- val.py | 4 ++-- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/detect.py b/detect.py index 2d1963ad6f86..41c364c05d00 100644 --- a/detect.py +++ b/detect.py @@ -94,7 +94,7 @@ def run(weights=ROOT / 'yolov5s.pt', # model.pt path(s) imgsz = check_img_size(imgsz, s=stride) # check image size # Half - half &= (pt or jit or engine) and device.type != 'cpu' # half precision only supported by PyTorch on CUDA + half &= (pt or jit or onnx or engine) and device.type != 'cpu' # FP16 supported on limited backends with CUDA if pt or jit: model.model.half() if half else model.model.float() diff --git a/tutorial.ipynb b/tutorial.ipynb index 97f9074f9fd0..251c18d97815 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -1099,4 +1099,4 @@ "outputs": [] } ] -} \ No newline at end of file +} diff --git a/val.py b/val.py index 4709f67511bb..843943b5ff7e 100644 --- a/val.py +++ b/val.py @@ -137,9 +137,9 @@ def run(data, # Load model model = DetectMultiBackend(weights, device=device, dnn=dnn, data=data) - stride, pt, jit, engine = model.stride, model.pt, model.jit, model.engine + stride, pt, jit, onnx, engine = model.stride, model.pt, model.jit, model.onnx, model.engine imgsz = check_img_size(imgsz, s=stride) # check image size - half &= (pt or jit or engine) and device.type != 'cpu' # half precision only supported by PyTorch on CUDA + half &= (pt or jit or onnx or engine) and device.type != 'cpu' # FP16 supported on limited backends with CUDA if pt or jit: model.model.half() if half else model.model.float() elif engine: From 80473a65511859698aa36778c30997ba80943945 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 12 Jan 2022 15:48:40 -1000 Subject: [PATCH 0818/1976] Update `export.py` with Detect, Validate usages (#6280) --- export.py | 53 ++++++++++++++++++++++++++++++----------------------- 1 file changed, 30 insertions(+), 23 deletions(-) diff --git a/export.py b/export.py index 0236872c2d94..bca2564a7333 100644 --- a/export.py +++ b/export.py @@ -82,6 +82,7 @@ def export_torchscript(model, im, file, optimize, prefix=colorstr('TorchScript:' ts.save(str(f), _extra_files=extra_files) LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') + return f except Exception as e: LOGGER.info(f'{prefix} export failure: {e}') @@ -125,7 +126,7 @@ def export_onnx(model, im, file, opset, train, dynamic, simplify, prefix=colorst except Exception as e: LOGGER.info(f'{prefix} simplifier failure: {e}') LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') - LOGGER.info(f"{prefix} run --dynamic ONNX model inference with: 'python detect.py --weights {f}'") + return f except Exception as e: LOGGER.info(f'{prefix} export failure: {e}') @@ -143,13 +144,13 @@ def export_openvino(model, im, file, prefix=colorstr('OpenVINO:')): subprocess.check_output(cmd, shell=True) LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') + return f except Exception as e: LOGGER.info(f'\n{prefix} export failure: {e}') def export_coreml(model, im, file, prefix=colorstr('CoreML:')): # YOLOv5 CoreML export - ct_model = None try: check_requirements(('coremltools',)) import coremltools as ct @@ -162,10 +163,10 @@ def export_coreml(model, im, file, prefix=colorstr('CoreML:')): ct_model.save(f) LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') + return ct_model, f except Exception as e: LOGGER.info(f'\n{prefix} export failure: {e}') - - return ct_model + return None, None def export_engine(model, im, file, train, half, simplify, workspace=4, verbose=False, prefix=colorstr('TensorRT:')): @@ -216,7 +217,7 @@ def export_engine(model, im, file, train, half, simplify, workspace=4, verbose=F with builder.build_engine(network, config) as engine, open(f, 'wb') as t: t.write(engine.serialize()) LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') - + return f except Exception as e: LOGGER.info(f'\n{prefix} export failure: {e}') @@ -225,7 +226,6 @@ def export_saved_model(model, im, file, dynamic, tf_nms=False, agnostic_nms=False, topk_per_class=100, topk_all=100, iou_thres=0.45, conf_thres=0.25, prefix=colorstr('TensorFlow SavedModel:')): # YOLOv5 TensorFlow SavedModel export - keras_model = None try: import tensorflow as tf from tensorflow import keras @@ -247,10 +247,10 @@ def export_saved_model(model, im, file, dynamic, keras_model.save(f, save_format='tf') LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') + return keras_model, f except Exception as e: LOGGER.info(f'\n{prefix} export failure: {e}') - - return keras_model + return None, None def export_pb(keras_model, im, file, prefix=colorstr('TensorFlow GraphDef:')): @@ -269,6 +269,7 @@ def export_pb(keras_model, im, file, prefix=colorstr('TensorFlow GraphDef:')): tf.io.write_graph(graph_or_graph_def=frozen_func.graph, logdir=str(f.parent), name=f.name, as_text=False) LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') + return f except Exception as e: LOGGER.info(f'\n{prefix} export failure: {e}') @@ -300,7 +301,7 @@ def export_tflite(keras_model, im, file, int8, data, ncalib, prefix=colorstr('Te tflite_model = converter.convert() open(f, "wb").write(tflite_model) LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') - + return f except Exception as e: LOGGER.info(f'\n{prefix} export failure: {e}') @@ -328,6 +329,7 @@ def export_edgetpu(keras_model, im, file, prefix=colorstr('Edge TPU:')): subprocess.run(cmd, shell=True, check=True) LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') + return f except Exception as e: LOGGER.info(f'\n{prefix} export failure: {e}') @@ -364,6 +366,7 @@ def export_tfjs(keras_model, im, file, prefix=colorstr('TensorFlow.js:')): j.write(subst) LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') + return f except Exception as e: LOGGER.info(f'\n{prefix} export failure: {e}') @@ -431,15 +434,15 @@ def run(data=ROOT / 'data/coco128.yaml', # 'dataset.yaml path' # Exports if 'torchscript' in include: - export_torchscript(model, im, file, optimize) + f = export_torchscript(model, im, file, optimize) if 'engine' in include: # TensorRT required before ONNX - export_engine(model, im, file, train, half, simplify, workspace, verbose) + f = export_engine(model, im, file, train, half, simplify, workspace, verbose) if ('onnx' in include) or ('openvino' in include): # OpenVINO requires ONNX - export_onnx(model, im, file, opset, train, dynamic, simplify) + f = export_onnx(model, im, file, opset, train, dynamic, simplify) if 'openvino' in include: - export_openvino(model, im, file) + f = export_openvino(model, im, file) if 'coreml' in include: - export_coreml(model, im, file) + _, f = export_coreml(model, im, file) # TensorFlow Exports if any(tf_exports): @@ -447,22 +450,26 @@ def run(data=ROOT / 'data/coco128.yaml', # 'dataset.yaml path' if int8 or edgetpu: # TFLite --int8 bug https://github.com/ultralytics/yolov5/issues/5707 check_requirements(('flatbuffers==1.12',)) # required before `import tensorflow` assert not (tflite and tfjs), 'TFLite and TF.js models must be exported separately, please pass only one type.' - model = export_saved_model(model, im, file, dynamic, tf_nms=nms or agnostic_nms or tfjs, - agnostic_nms=agnostic_nms or tfjs, topk_per_class=topk_per_class, topk_all=topk_all, - conf_thres=conf_thres, iou_thres=iou_thres) # keras model + model, f = export_saved_model(model, im, file, dynamic, tf_nms=nms or agnostic_nms or tfjs, + agnostic_nms=agnostic_nms or tfjs, topk_per_class=topk_per_class, + topk_all=topk_all, + conf_thres=conf_thres, iou_thres=iou_thres) # keras model if pb or tfjs: # pb prerequisite to tfjs - export_pb(model, im, file) + f = export_pb(model, im, file) if tflite or edgetpu: - export_tflite(model, im, file, int8=int8 or edgetpu, data=data, ncalib=100) + f = export_tflite(model, im, file, int8=int8 or edgetpu, data=data, ncalib=100) if edgetpu: - export_edgetpu(model, im, file) + f = export_edgetpu(model, im, file) if tfjs: - export_tfjs(model, im, file) + f = export_tfjs(model, im, file) # Finish LOGGER.info(f'\nExport complete ({time.time() - t:.2f}s)' f"\nResults saved to {colorstr('bold', file.parent.resolve())}" - f'\nVisualize with https://netron.app') + f"\nVisualize with https://netron.app" + f"\nDetect with `python detect.py --weights {f}`" + f" or `model = torch.hub.load('ultralytics/yolov5', 'custom', '{f}')" + f"\nValidate with `python val.py --weights {f}`") def parse_opt(): @@ -490,7 +497,7 @@ def parse_opt(): parser.add_argument('--conf-thres', type=float, default=0.25, help='TF.js NMS: confidence threshold') parser.add_argument('--include', nargs='+', default=['torchscript', 'onnx'], - help='available formats are (torchscript, onnx, engine, coreml, saved_model, pb, tflite, tfjs)') + help='torchscript, onnx, openvino, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs') opt = parser.parse_args() print_args(FILE.stem, opt) return opt From af001349e46048f151b091b2ff593cdcd65a863b Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 13 Jan 2022 12:39:42 -1000 Subject: [PATCH 0819/1976] Add `is_kaggle()` function (#6285) * Add `is_kaggle()` function Return True if environment is Kaggle Notebook. * Remove root loggers only if is_kaggle() == True * Update general.py --- utils/general.py | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) diff --git a/utils/general.py b/utils/general.py index 470e6d81d250..bce2a1763e2a 100755 --- a/utils/general.py +++ b/utils/general.py @@ -44,10 +44,21 @@ os.environ['NUMEXPR_MAX_THREADS'] = str(NUM_THREADS) # NumExpr max threads +def is_kaggle(): + # Is environment a Kaggle Notebook? + try: + assert os.environ.get('PWD') == '/kaggle/working' + assert os.environ.get('KAGGLE_URL_BASE') == 'https://www.kaggle.com' + return True + except AssertionError: + return False + + def set_logging(name=None, verbose=True): # Sets level and returns logger - for h in logging.root.handlers: - logging.root.removeHandler(h) # remove all handlers associated with the root logger object + if is_kaggle(): + for h in logging.root.handlers: + logging.root.removeHandler(h) # remove all handlers associated with the root logger object rank = int(os.getenv('RANK', -1)) # rank in world for Multi-GPU trainings logging.basicConfig(format="%(message)s", level=logging.INFO if (verbose and rank in (-1, 0)) else logging.WARNING) return logging.getLogger(name) From e7bf38277f57086bf37486201909e9c04acfaa48 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 13 Jan 2022 21:23:03 -1000 Subject: [PATCH 0820/1976] Fix `device` count check (#6290) * Fix device count check() * Update torch_utils.py * Update torch_utils.py * Update hubconf.py --- hubconf.py | 2 +- utils/torch_utils.py | 5 +++-- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/hubconf.py b/hubconf.py index 6bf4b0b0265f..55d15abe2ac5 100644 --- a/hubconf.py +++ b/hubconf.py @@ -61,7 +61,7 @@ def _create(name, pretrained=True, channels=3, classes=80, autoshape=True, verbo except Exception as e: help_url = 'https://github.com/ultralytics/yolov5/issues/36' - s = 'Cache may be out of date, try `force_reload=True`. See %s for help.' % help_url + s = f'{e}. Cache may be out of date, try `force_reload=True` or see {help_url} for help.' raise Exception(s) from e diff --git a/utils/torch_utils.py b/utils/torch_utils.py index cddb173948fb..060768e8251b 100644 --- a/utils/torch_utils.py +++ b/utils/torch_utils.py @@ -61,8 +61,9 @@ def select_device(device='', batch_size=0, newline=True): if cpu: os.environ['CUDA_VISIBLE_DEVICES'] = '-1' # force torch.cuda.is_available() = False elif device: # non-cpu device requested - os.environ['CUDA_VISIBLE_DEVICES'] = device # set environment variable - assert torch.cuda.is_available(), f'CUDA unavailable, invalid device {device} requested' # check availability + assert torch.cuda.is_available(), 'CUDA unavailable' # check CUDA is available + assert torch.cuda.device_count() > int(device), f'invalid CUDA device {device} requested' # check index + os.environ['CUDA_VISIBLE_DEVICES'] = device # set environment variable (must be after asserts) cuda = not cpu and torch.cuda.is_available() if cuda: From a1a9c6884c5cfda4c972f4087ad4d4b9c3da6518 Mon Sep 17 00:00:00 2001 From: Henry Date: Fri, 14 Jan 2022 21:11:06 +0100 Subject: [PATCH 0821/1976] Fixing bug multi-gpu training (#6299) * Fixing bug multi-gpu training This solves this issue: https://github.com/ultralytics/yolov5/issues/6297#issue-1103853348 * Update torch_utils.py for pep8 --- utils/torch_utils.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/utils/torch_utils.py b/utils/torch_utils.py index 060768e8251b..451bcdd29b7c 100644 --- a/utils/torch_utils.py +++ b/utils/torch_utils.py @@ -62,7 +62,8 @@ def select_device(device='', batch_size=0, newline=True): os.environ['CUDA_VISIBLE_DEVICES'] = '-1' # force torch.cuda.is_available() = False elif device: # non-cpu device requested assert torch.cuda.is_available(), 'CUDA unavailable' # check CUDA is available - assert torch.cuda.device_count() > int(device), f'invalid CUDA device {device} requested' # check index + device_list = [int(val) for val in device.replace(',', '')] + assert all([torch.cuda.device_count() > element for element in device_list]), f'invalid CUDA device {device} requested' # check index os.environ['CUDA_VISIBLE_DEVICES'] = device # set environment variable (must be after asserts) cuda = not cpu and torch.cuda.is_available() From 436ffc417ac2312de18287ddc4f87bdc2f7f5734 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 14 Jan 2022 15:48:15 -1000 Subject: [PATCH 0822/1976] `select_device()` cleanup (#6302) * `select_device()` cleanup * Update torch_utils.py * Update torch_utils.py * Update torch_utils.py * Update torch_utils.py * Update torch_utils.py --- utils/torch_utils.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/utils/torch_utils.py b/utils/torch_utils.py index 451bcdd29b7c..7e464190f9ba 100644 --- a/utils/torch_utils.py +++ b/utils/torch_utils.py @@ -61,9 +61,9 @@ def select_device(device='', batch_size=0, newline=True): if cpu: os.environ['CUDA_VISIBLE_DEVICES'] = '-1' # force torch.cuda.is_available() = False elif device: # non-cpu device requested - assert torch.cuda.is_available(), 'CUDA unavailable' # check CUDA is available - device_list = [int(val) for val in device.replace(',', '')] - assert all([torch.cuda.device_count() > element for element in device_list]), f'invalid CUDA device {device} requested' # check index + nd = torch.cuda.device_count() # number of CUDA devices + assert torch.cuda.is_available(), 'CUDA is not available, use `--device cpu` or do not pass a --device' + assert nd > int(max(device.split(','))), f'Invalid `--device {device}` request, valid devices are 0 - {nd - 1}' os.environ['CUDA_VISIBLE_DEVICES'] = device # set environment variable (must be after asserts) cuda = not cpu and torch.cuda.is_available() From db1f83be6312a3d68c817fd25251194e5e7e5b5d Mon Sep 17 00:00:00 2001 From: Otfot Date: Mon, 17 Jan 2022 14:58:34 +0800 Subject: [PATCH 0823/1976] Fix `train.py` parameter groups desc error (#6318) * Fix `train.py` parameter groups desc error * Cleanup Co-authored-by: Glenn Jocher --- train.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/train.py b/train.py index 410f16fed3bf..ebe6c2e8f5f9 100644 --- a/train.py +++ b/train.py @@ -172,7 +172,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary optimizer.add_param_group({'params': g1, 'weight_decay': hyp['weight_decay']}) # add g1 with weight_decay optimizer.add_param_group({'params': g2}) # add g2 (biases) LOGGER.info(f"{colorstr('optimizer:')} {type(optimizer).__name__} with parameter groups " - f"{len(g0)} weight, {len(g1)} weight (no decay), {len(g2)} bias") + f"{len(g0)} weight (no decay), {len(g1)} weight, {len(g2)} bias") del g0, g1, g2 # Scheduler From 3119b2f27c198c6b9c02fb57d3b00b61a7bd2356 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 18 Jan 2022 10:04:49 -1000 Subject: [PATCH 0824/1976] Remove `dataset_stats()` autodownload capability (#6303) * Remove `dataset_stats()` autodownload capability @kalenmike security update per Slack convo * Update datasets.py --- utils/datasets.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/utils/datasets.py b/utils/datasets.py index 6584342a621d..a8f453aa1904 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -936,11 +936,10 @@ def verify_image_label(args): return [None, None, None, None, nm, nf, ne, nc, msg] -def dataset_stats(path='coco128.yaml', autodownload=False, verbose=False, profile=False, hub=False): +def dataset_stats(path='coco128.yaml', verbose=False, profile=False, hub=False): """ Return dataset statistics dictionary with images and instances counts per split per class To run in parent directory: export PYTHONPATH="$PWD/yolov5" - Usage1: from utils.datasets import *; dataset_stats('coco128.yaml', autodownload=True) - Usage2: from utils.datasets import *; dataset_stats('../datasets/coco128_with_yaml.zip') + Usage: from utils.datasets import *; dataset_stats('../datasets/coco128_with_yaml.zip') Arguments path: Path to data.yaml or data.zip (with data.yaml inside data.zip) autodownload: Attempt to download dataset if not found locally @@ -984,7 +983,7 @@ def hub_ops(f, max_dim=1920): data = yaml.safe_load(f) # data dict if zipped: data['path'] = data_dir # TODO: should this be dir.resolve()? - check_dataset(data, autodownload) # download dataset if missing + check_dataset(data, autodownload=False) hub_dir = Path(data['path'] + ('-hub' if hub else '')) stats = {'nc': data['nc'], 'names': data['names']} # statistics dictionary for split in 'train', 'val', 'test': From fd55271c04e9be68ea5299f7fe2aafcf4dc1984d Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 18 Jan 2022 10:49:26 -1000 Subject: [PATCH 0825/1976] Console corrupted -> corrupt (#6338) * Console corrupted -> corrupt Minor style changes. * Update export.py --- export.py | 3 +-- utils/datasets.py | 6 +++--- 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/export.py b/export.py index bca2564a7333..11fead4a9e1d 100644 --- a/export.py +++ b/export.py @@ -452,8 +452,7 @@ def run(data=ROOT / 'data/coco128.yaml', # 'dataset.yaml path' assert not (tflite and tfjs), 'TFLite and TF.js models must be exported separately, please pass only one type.' model, f = export_saved_model(model, im, file, dynamic, tf_nms=nms or agnostic_nms or tfjs, agnostic_nms=agnostic_nms or tfjs, topk_per_class=topk_per_class, - topk_all=topk_all, - conf_thres=conf_thres, iou_thres=iou_thres) # keras model + topk_all=topk_all, conf_thres=conf_thres, iou_thres=iou_thres) # keras model if pb or tfjs: # pb prerequisite to tfjs f = export_pb(model, im, file) if tflite or edgetpu: diff --git a/utils/datasets.py b/utils/datasets.py index a8f453aa1904..8159c3dcf264 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -424,9 +424,9 @@ def __init__(self, path, img_size=640, batch_size=16, augment=False, hyp=None, r cache, exists = self.cache_labels(cache_path, prefix), False # cache # Display cache - nf, nm, ne, nc, n = cache.pop('results') # found, missing, empty, corrupted, total + nf, nm, ne, nc, n = cache.pop('results') # found, missing, empty, corrupt, total if exists: - d = f"Scanning '{cache_path}' images and labels... {nf} found, {nm} missing, {ne} empty, {nc} corrupted" + d = f"Scanning '{cache_path}' images and labels... {nf} found, {nm} missing, {ne} empty, {nc} corrupt" tqdm(None, desc=prefix + d, total=n, initial=n) # display cache results if cache['msgs']: LOGGER.info('\n'.join(cache['msgs'])) # display warnings @@ -523,7 +523,7 @@ def cache_labels(self, path=Path('./labels.cache'), prefix=''): x[im_file] = [l, shape, segments] if msg: msgs.append(msg) - pbar.desc = f"{desc}{nf} found, {nm} missing, {ne} empty, {nc} corrupted" + pbar.desc = f"{desc}{nf} found, {nm} missing, {ne} empty, {nc} corrupt" pbar.close() if msgs: From e2e95b2d8e8b6be216f4a7c11955d622aff7d043 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 18 Jan 2022 13:52:25 -1000 Subject: [PATCH 0826/1976] TensorRT `assert im.device.type != 'cpu'` on export (#6340) * TensorRT `assert im.device.type != 'cpu'` on export * Update export.py --- export.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/export.py b/export.py index 11fead4a9e1d..2e90b0a1b24c 100644 --- a/export.py +++ b/export.py @@ -184,9 +184,10 @@ def export_engine(model, im, file, train, half, simplify, workspace=4, verbose=F check_version(trt.__version__, '8.0.0', hard=True) # require tensorrt>=8.0.0 export_onnx(model, im, file, 13, train, False, simplify) # opset 13 onnx = file.with_suffix('.onnx') - assert onnx.exists(), f'failed to export ONNX file: {onnx}' LOGGER.info(f'\n{prefix} starting export with TensorRT {trt.__version__}...') + assert im.device.type != 'cpu', 'export running on CPU but must be on GPU, i.e. `python export.py --device 0`' + assert onnx.exists(), f'failed to export ONNX file: {onnx}' f = file.with_suffix('.engine') # TensorRT engine file logger = trt.Logger(trt.Logger.INFO) if verbose: From 0cf932bf6346909189c53b375ef97551bb0c2326 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 18 Jan 2022 15:18:23 -1000 Subject: [PATCH 0827/1976] `export.py` return exported files/dirs (#6343) * `export.py` return exported files/dirs * Path to str --- export.py | 33 ++++++++++++++++++--------------- 1 file changed, 18 insertions(+), 15 deletions(-) diff --git a/export.py b/export.py index 2e90b0a1b24c..a7a79b46b8bb 100644 --- a/export.py +++ b/export.py @@ -434,16 +434,17 @@ def run(data=ROOT / 'data/coco128.yaml', # 'dataset.yaml path' LOGGER.info(f"\n{colorstr('PyTorch:')} starting from {file} ({file_size(file):.1f} MB)") # Exports + f = [''] * 10 # exported filenames if 'torchscript' in include: - f = export_torchscript(model, im, file, optimize) + f[0] = export_torchscript(model, im, file, optimize) if 'engine' in include: # TensorRT required before ONNX - f = export_engine(model, im, file, train, half, simplify, workspace, verbose) + f[1] = export_engine(model, im, file, train, half, simplify, workspace, verbose) if ('onnx' in include) or ('openvino' in include): # OpenVINO requires ONNX - f = export_onnx(model, im, file, opset, train, dynamic, simplify) + f[2] = export_onnx(model, im, file, opset, train, dynamic, simplify) if 'openvino' in include: - f = export_openvino(model, im, file) + f[3] = export_openvino(model, im, file) if 'coreml' in include: - _, f = export_coreml(model, im, file) + _, f[4] = export_coreml(model, im, file) # TensorFlow Exports if any(tf_exports): @@ -451,25 +452,27 @@ def run(data=ROOT / 'data/coco128.yaml', # 'dataset.yaml path' if int8 or edgetpu: # TFLite --int8 bug https://github.com/ultralytics/yolov5/issues/5707 check_requirements(('flatbuffers==1.12',)) # required before `import tensorflow` assert not (tflite and tfjs), 'TFLite and TF.js models must be exported separately, please pass only one type.' - model, f = export_saved_model(model, im, file, dynamic, tf_nms=nms or agnostic_nms or tfjs, - agnostic_nms=agnostic_nms or tfjs, topk_per_class=topk_per_class, - topk_all=topk_all, conf_thres=conf_thres, iou_thres=iou_thres) # keras model + model, f[5] = export_saved_model(model, im, file, dynamic, tf_nms=nms or agnostic_nms or tfjs, + agnostic_nms=agnostic_nms or tfjs, topk_per_class=topk_per_class, + topk_all=topk_all, conf_thres=conf_thres, iou_thres=iou_thres) # keras model if pb or tfjs: # pb prerequisite to tfjs - f = export_pb(model, im, file) + f[6] = export_pb(model, im, file) if tflite or edgetpu: - f = export_tflite(model, im, file, int8=int8 or edgetpu, data=data, ncalib=100) + f[7] = export_tflite(model, im, file, int8=int8 or edgetpu, data=data, ncalib=100) if edgetpu: - f = export_edgetpu(model, im, file) + f[8] = export_edgetpu(model, im, file) if tfjs: - f = export_tfjs(model, im, file) + f[9] = export_tfjs(model, im, file) # Finish + f = [str(x) for x in f if x] # filter out '' and None LOGGER.info(f'\nExport complete ({time.time() - t:.2f}s)' f"\nResults saved to {colorstr('bold', file.parent.resolve())}" f"\nVisualize with https://netron.app" - f"\nDetect with `python detect.py --weights {f}`" - f" or `model = torch.hub.load('ultralytics/yolov5', 'custom', '{f}')" - f"\nValidate with `python val.py --weights {f}`") + f"\nDetect with `python detect.py --weights {f[-1]}`" + f" or `model = torch.hub.load('ultralytics/yolov5', 'custom', '{f[-1]}')" + f"\nValidate with `python val.py --weights {f[-1]}`") + return f # return list of exported files/dirs def parse_opt(): From e5219099cd6e76e4f75a4c8b376531af2791d358 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 19 Jan 2022 10:18:29 -1000 Subject: [PATCH 0828/1976] Created using Colaboratory --- tutorial.ipynb | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tutorial.ipynb b/tutorial.ipynb index 251c18d97815..b160e75adb58 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -1077,7 +1077,7 @@ }, "source": [ "# VOC\n", - "for b, m in zip([64, 48, 32, 16], ['yolov5s', 'yolov5m', 'yolov5l', 'yolov5x']): # zip(batch_size, model)\n", + "for b, m in zip([64, 64, 32, 16], ['yolov5s', 'yolov5m', 'yolov5l', 'yolov5x']): # zip(batch_size, model)\n", " !python train.py --batch {b} --weights {m}.pt --data VOC.yaml --epochs 50 --cache --img 512 --nosave --hyp hyp.finetune.yaml --project VOC --name {m}" ], "execution_count": null, @@ -1099,4 +1099,4 @@ "outputs": [] } ] -} +} \ No newline at end of file From 750c42e43eb38bf23659fcee50576156acd86c77 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 19 Jan 2022 12:24:40 -1000 Subject: [PATCH 0829/1976] `export.py` automatic `forward_export` (#6352) * `export.py` automatic `forward_export` * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- export.py | 3 ++- tutorial.ipynb | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/export.py b/export.py index a7a79b46b8bb..589b381e035a 100644 --- a/export.py +++ b/export.py @@ -427,7 +427,8 @@ def run(data=ROOT / 'data/coco128.yaml', # 'dataset.yaml path' elif isinstance(m, Detect): m.inplace = inplace m.onnx_dynamic = dynamic - # m.forward = m.forward_export # assign forward (optional) + if hasattr(m, 'forward_export'): + m.forward = m.forward_export # assign custom forward (optional) for _ in range(2): y = model(im) # dry runs diff --git a/tutorial.ipynb b/tutorial.ipynb index b160e75adb58..6ff20dc36c40 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -1099,4 +1099,4 @@ "outputs": [] } ] -} \ No newline at end of file +} From 9708cf56eaead29bce789a07cfe73ecc7d7d4838 Mon Sep 17 00:00:00 2001 From: johnk2hawaii <64561921+johnk2hawaii@users.noreply.github.com> Date: Wed, 19 Jan 2022 14:32:19 -1000 Subject: [PATCH 0830/1976] New environment variable `VERBOSE` (#6353) New environment variable `VERBOSE` --- utils/general.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/utils/general.py b/utils/general.py index bce2a1763e2a..41f47785bd16 100755 --- a/utils/general.py +++ b/utils/general.py @@ -36,6 +36,7 @@ FILE = Path(__file__).resolve() ROOT = FILE.parents[1] # YOLOv5 root directory NUM_THREADS = min(8, max(1, os.cpu_count() - 1)) # number of YOLOv5 multiprocessing threads +VERBOSE = str(os.getenv('VERBOSE', True)).lower() == 'true' # global verbose mode torch.set_printoptions(linewidth=320, precision=5, profile='long') np.set_printoptions(linewidth=320, formatter={'float_kind': '{:11.5g}'.format}) # format short g, %precision=5 @@ -54,7 +55,7 @@ def is_kaggle(): return False -def set_logging(name=None, verbose=True): +def set_logging(name=None, verbose=VERBOSE): # Sets level and returns logger if is_kaggle(): for h in logging.root.handlers: From 4e841b9b16aa60a39cf7c11be58f55fa2fdc34f2 Mon Sep 17 00:00:00 2001 From: imyhxy Date: Fri, 21 Jan 2022 04:50:17 +0800 Subject: [PATCH 0831/1976] Reuse `de_parallel()` rather than `is_parallel()` (#6354) --- utils/loss.py | 4 ++-- utils/torch_utils.py | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/utils/loss.py b/utils/loss.py index 194c8e503e0e..5aa9f017d2af 100644 --- a/utils/loss.py +++ b/utils/loss.py @@ -7,7 +7,7 @@ import torch.nn as nn from utils.metrics import bbox_iou -from utils.torch_utils import is_parallel +from utils.torch_utils import de_parallel def smooth_BCE(eps=0.1): # https://github.com/ultralytics/yolov3/issues/238#issuecomment-598028441 @@ -107,7 +107,7 @@ def __init__(self, model, autobalance=False): if g > 0: BCEcls, BCEobj = FocalLoss(BCEcls, g), FocalLoss(BCEobj, g) - det = model.module.model[-1] if is_parallel(model) else model.model[-1] # Detect() module + det = de_parallel(model).model[-1] # Detect() module self.balance = {3: [4.0, 1.0, 0.4]}.get(det.nl, [4.0, 1.0, 0.25, 0.06, 0.02]) # P3-P7 self.ssi = list(det.stride).index(16) if autobalance else 0 # stride 16 index self.BCEcls, self.BCEobj, self.gr, self.hyp, self.autobalance = BCEcls, BCEobj, 1.0, h, autobalance diff --git a/utils/torch_utils.py b/utils/torch_utils.py index 7e464190f9ba..2a45f434c6a5 100644 --- a/utils/torch_utils.py +++ b/utils/torch_utils.py @@ -295,7 +295,7 @@ class ModelEMA: def __init__(self, model, decay=0.9999, updates=0): # Create EMA - self.ema = deepcopy(model.module if is_parallel(model) else model).eval() # FP32 EMA + self.ema = deepcopy(de_parallel(model)).eval() # FP32 EMA # if next(model.parameters()).device.type != 'cpu': # self.ema.half() # FP16 EMA self.updates = updates # number of EMA updates @@ -309,7 +309,7 @@ def update(self, model): self.updates += 1 d = self.decay(self.updates) - msd = model.module.state_dict() if is_parallel(model) else model.state_dict() # model state_dict + msd = de_parallel(model).state_dict() # model state_dict for k, v in self.ema.state_dict().items(): if v.dtype.is_floating_point: v *= d From e1893c894a6c2a1af25038b87b6146eee6a6ee9c Mon Sep 17 00:00:00 2001 From: sitecao <95668894+sitecao@users.noreply.github.com> Date: Thu, 20 Jan 2022 18:06:26 -0500 Subject: [PATCH 0832/1976] `DEVICE_COUNT` instead of `WORLD_SIZE` to calculate `nw` (#6324) --- utils/datasets.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/utils/datasets.py b/utils/datasets.py index 8159c3dcf264..96f05afe508e 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -35,7 +35,7 @@ HELP_URL = 'https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data' IMG_FORMATS = ['bmp', 'jpg', 'jpeg', 'png', 'tif', 'tiff', 'dng', 'webp', 'mpo'] # acceptable image suffixes VID_FORMATS = ['mov', 'avi', 'mp4', 'mpg', 'mpeg', 'm4v', 'wmv', 'mkv'] # acceptable video suffixes -WORLD_SIZE = int(os.getenv('WORLD_SIZE', 1)) # DPP +DEVICE_COUNT = max(torch.cuda.device_count(), 1) # Get orientation exif tag for orientation in ExifTags.TAGS.keys(): @@ -110,7 +110,7 @@ def create_dataloader(path, imgsz, batch_size, stride, single_cls=False, hyp=Non prefix=prefix) batch_size = min(batch_size, len(dataset)) - nw = min([os.cpu_count() // WORLD_SIZE, batch_size if batch_size > 1 else 0, workers]) # number of workers + nw = min([os.cpu_count() // DEVICE_COUNT, batch_size if batch_size > 1 else 0, workers]) # number of workers sampler = None if rank == -1 else distributed.DistributedSampler(dataset, shuffle=shuffle) loader = DataLoader if image_weights else InfiniteDataLoader # only DataLoader allows for attribute updates return loader(dataset, From 9bcc32a5bf5a823707e47a1167fc87d6050e60f4 Mon Sep 17 00:00:00 2001 From: Ayush Chaurasia Date: Fri, 21 Jan 2022 04:52:47 +0530 Subject: [PATCH 0833/1976] Flush callbacks when on `--evolve` (#6374) * log best.pt metrics at train end * update * Update __init__.py * flush callbacks when using evolve Co-authored-by: Glenn Jocher --- train.py | 2 +- utils/loggers/__init__.py | 5 +---- 2 files changed, 2 insertions(+), 5 deletions(-) diff --git a/train.py b/train.py index ebe6c2e8f5f9..b20b7dbb2dda 100644 --- a/train.py +++ b/train.py @@ -612,7 +612,7 @@ def main(opt, callbacks=Callbacks()): # Train mutation results = train(hyp.copy(), opt, device, callbacks) - + callbacks = Callbacks() # Write mutation results print_mutation(results, hyp.copy(), save_dir, opt.bucket) diff --git a/utils/loggers/__init__.py b/utils/loggers/__init__.py index 7679ee70f176..86ccf38443a9 100644 --- a/utils/loggers/__init__.py +++ b/utils/loggers/__init__.py @@ -159,10 +159,7 @@ def on_train_end(self, last, best, plots, epoch, results): wandb.log_artifact(str(best if best.exists() else last), type='model', name='run_' + self.wandb.wandb_run.id + '_model', aliases=['latest', 'best', 'stripped']) - self.wandb.finish_run() - else: - self.wandb.finish_run() - self.wandb = WandbLogger(self.opt) + self.wandb.finish_run() def on_params_update(self, params): # Update hyperparams or configs of the experiment From 1b41a1d0599337b760810ba9690b6e633e129e65 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 20 Jan 2022 17:59:41 -1000 Subject: [PATCH 0834/1976] FROM nvcr.io/nvidia/pytorch:21.12-py3 (#6377) --- Dockerfile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Dockerfile b/Dockerfile index 9a55005a95c5..35e346bf6850 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,7 +1,7 @@ # YOLOv5 🚀 by Ultralytics, GPL-3.0 license # Start FROM Nvidia PyTorch image https://ngc.nvidia.com/catalog/containers/nvidia:pytorch -FROM nvcr.io/nvidia/pytorch:21.10-py3 +FROM nvcr.io/nvidia/pytorch:21.12-py3 # Install linux packages RUN apt update && apt install -y zip htop screen libgl1-mesa-glx @@ -12,7 +12,7 @@ RUN python -m pip install --upgrade pip RUN pip uninstall -y nvidia-tensorboard nvidia-tensorboard-plugin-dlprof RUN pip install --no-cache -r requirements.txt coremltools onnx gsutil notebook wandb>=0.12.2 RUN pip install --no-cache -U torch torchvision numpy Pillow -# RUN pip install --no-cache torch==1.10.0+cu113 torchvision==0.11.1+cu113 -f https://download.pytorch.org/whl/cu113/torch_stable.html +# RUN pip install --no-cache torch==1.10.1+cu113 torchvision==0.11.2+cu113 -f https://download.pytorch.org/whl/cu113/torch_stable.html # Create working directory RUN mkdir -p /usr/src/app From bd815d48df18a23e2bb08d88e430183bfb48eb78 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 20 Jan 2022 19:15:04 -1000 Subject: [PATCH 0835/1976] FROM nvcr.io/nvidia/pytorch:21.10-py3 (#6379) 21.12 generates dockerhub errors so rolling back to 21.10 with latest pytorch install. Not sure if this torch install will work on non-GPU dockerhub autobuild so this is an experiment. --- Dockerfile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Dockerfile b/Dockerfile index 35e346bf6850..d631a057c359 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,7 +1,7 @@ # YOLOv5 🚀 by Ultralytics, GPL-3.0 license # Start FROM Nvidia PyTorch image https://ngc.nvidia.com/catalog/containers/nvidia:pytorch -FROM nvcr.io/nvidia/pytorch:21.12-py3 +FROM nvcr.io/nvidia/pytorch:21.10-py3 # Install linux packages RUN apt update && apt install -y zip htop screen libgl1-mesa-glx @@ -12,7 +12,7 @@ RUN python -m pip install --upgrade pip RUN pip uninstall -y nvidia-tensorboard nvidia-tensorboard-plugin-dlprof RUN pip install --no-cache -r requirements.txt coremltools onnx gsutil notebook wandb>=0.12.2 RUN pip install --no-cache -U torch torchvision numpy Pillow -# RUN pip install --no-cache torch==1.10.1+cu113 torchvision==0.11.2+cu113 -f https://download.pytorch.org/whl/cu113/torch_stable.html +RUN pip install --no-cache torch==1.10.1+cu113 torchvision==0.11.2+cu113 -f https://download.pytorch.org/whl/cu113/torch_stable.html # Create working directory RUN mkdir -p /usr/src/app From c43439aa31afdca9d1adbd1cc35b57bfb95b442d Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 21 Jan 2022 21:06:02 -1000 Subject: [PATCH 0836/1976] Add `albumentations` to Dockerfile (#6392) --- Dockerfile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Dockerfile b/Dockerfile index d631a057c359..95e2cd4af66d 100644 --- a/Dockerfile +++ b/Dockerfile @@ -10,9 +10,9 @@ RUN apt update && apt install -y zip htop screen libgl1-mesa-glx COPY requirements.txt . RUN python -m pip install --upgrade pip RUN pip uninstall -y nvidia-tensorboard nvidia-tensorboard-plugin-dlprof -RUN pip install --no-cache -r requirements.txt coremltools onnx gsutil notebook wandb>=0.12.2 -RUN pip install --no-cache -U torch torchvision numpy Pillow +RUN pip install --no-cache -r requirements.txt albumentations coremltools onnx gsutil notebook numpy Pillow wandb>=0.12.2 RUN pip install --no-cache torch==1.10.1+cu113 torchvision==0.11.2+cu113 -f https://download.pytorch.org/whl/cu113/torch_stable.html +# RUN pip install --no-cache -U torch torchvision # Create working directory RUN mkdir -p /usr/src/app From 8efe97719c9b3b77c9db9b5c8592e051b7f0c9a7 Mon Sep 17 00:00:00 2001 From: Matthias Date: Sun, 23 Jan 2022 03:37:21 +0100 Subject: [PATCH 0837/1976] Add `stop_training=False` flag to callbacks (#6365) * New flag 'stop_training' in util.callbacks.Callbacks class to prematurely stop training from callback handler * Removed most of the new checks, leaving only the one after calling 'on_train_batch_end' * Cleanup Co-authored-by: Glenn Jocher --- train.py | 2 ++ utils/callbacks.py | 1 + 2 files changed, 3 insertions(+) diff --git a/train.py b/train.py index b20b7dbb2dda..510377e1178e 100644 --- a/train.py +++ b/train.py @@ -352,6 +352,8 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary pbar.set_description(('%10s' * 2 + '%10.4g' * 5) % ( f'{epoch}/{epochs - 1}', mem, *mloss, targets.shape[0], imgs.shape[-1])) callbacks.run('on_train_batch_end', ni, model, imgs, targets, paths, plots, opt.sync_bn) + if callbacks.stop_training: + return # end batch ------------------------------------------------------------------------------------------------ # Scheduler diff --git a/utils/callbacks.py b/utils/callbacks.py index 13d82ebc2e41..c51c268f20d6 100644 --- a/utils/callbacks.py +++ b/utils/callbacks.py @@ -35,6 +35,7 @@ def __init__(self): 'on_params_update': [], 'teardown': [], } + self.stop_training = False # set True to interrupt training def register_action(self, hook, name='', callback=None): """ From 482af479c07cd465890f63a08483f1ae6540987c Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 24 Jan 2022 13:11:11 -0800 Subject: [PATCH 0838/1976] Add `detect.py` GIF video inference (#6410) * Add detect.py GIF video inference * Cleanup --- detect.py | 2 +- utils/datasets.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/detect.py b/detect.py index 41c364c05d00..9b553faa34e4 100644 --- a/detect.py +++ b/detect.py @@ -199,7 +199,7 @@ def run(weights=ROOT / 'yolov5s.pt', # model.pt path(s) h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) else: # stream fps, w, h = 30, im0.shape[1], im0.shape[0] - save_path += '.mp4' + save_path = str(Path(save_path).with_suffix('.mp4')) # force *.mp4 suffix on results videos vid_writer[i] = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h)) vid_writer[i].write(im0) diff --git a/utils/datasets.py b/utils/datasets.py index 96f05afe508e..fa73cba64d40 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -33,8 +33,8 @@ # Parameters HELP_URL = 'https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data' -IMG_FORMATS = ['bmp', 'jpg', 'jpeg', 'png', 'tif', 'tiff', 'dng', 'webp', 'mpo'] # acceptable image suffixes -VID_FORMATS = ['mov', 'avi', 'mp4', 'mpg', 'mpeg', 'm4v', 'wmv', 'mkv'] # acceptable video suffixes +IMG_FORMATS = ['bmp', 'dng', 'jpeg', 'jpg', 'mpo', 'png', 'tif', 'tiff', 'webp'] # include image suffixes +VID_FORMATS = ['avi', 'gif', 'm4v', 'mkv', 'mov', 'mp4', 'mpeg', 'mpg', 'wmv'] # include video suffixes DEVICE_COUNT = max(torch.cuda.device_count(), 1) # Get orientation exif tag From cfecd903a3399ef4529a244303d8807edd6abae4 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 24 Jan 2022 15:28:52 -0800 Subject: [PATCH 0839/1976] Update `greetings.yaml` email address (#6412) * Update `greetings.yaml` email address * Update greetings.yml --- .github/workflows/greetings.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/greetings.yml b/.github/workflows/greetings.yml index 6ced1132264a..db2aaf8d9a39 100644 --- a/.github/workflows/greetings.yml +++ b/.github/workflows/greetings.yml @@ -31,7 +31,7 @@ jobs: If this is a custom training ❓ Question, please provide as much information as possible, including dataset images, training logs, screenshots, and a public link to online [W&B logging](https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data#visualize) if available. - For business inquiries or professional support requests please visit https://ultralytics.com or email Glenn Jocher at glenn.jocher@ultralytics.com. + For business inquiries or professional support requests please visit https://ultralytics.com or email support@ultralytics.com. ## Requirements From ed9bac83922def8c7355c557df4d78208d490799 Mon Sep 17 00:00:00 2001 From: Jonathan Samelson Date: Tue, 25 Jan 2022 16:21:06 +0100 Subject: [PATCH 0840/1976] Rename logger from 'utils.logger' to 'yolov5' (#6421) * Gave a more explicit name to the logger * Cleanup Co-authored-by: Glenn Jocher --- utils/general.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/general.py b/utils/general.py index 41f47785bd16..3d6da2fdb173 100755 --- a/utils/general.py +++ b/utils/general.py @@ -65,7 +65,7 @@ def set_logging(name=None, verbose=VERBOSE): return logging.getLogger(name) -LOGGER = set_logging(__name__) # define globally (used in train.py, val.py, detect.py, etc.) +LOGGER = set_logging('yolov5') # define globally (used in train.py, val.py, detect.py, etc.) class Profile(contextlib.ContextDecorator): From 16563ac5b54da3925cd71c83d58f53c102ec61ff Mon Sep 17 00:00:00 2001 From: Motoki Kimura Date: Wed, 26 Jan 2022 00:24:24 +0900 Subject: [PATCH 0841/1976] Prefer `tflite_runtime` for TFLite inference if installed (#6406) * import tflite_runtime if tensorflow not installed * rename tflite to tfli * Attempt tflite_runtime for all TFLite workflows Also rename tfli to tfl Co-authored-by: Glenn Jocher --- models/common.py | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/models/common.py b/models/common.py index e375507a5a7e..346fa37ae2d0 100644 --- a/models/common.py +++ b/models/common.py @@ -374,17 +374,19 @@ def wrap_frozen_graph(gd, inputs, outputs): graph_def.ParseFromString(open(w, 'rb').read()) frozen_func = wrap_frozen_graph(gd=graph_def, inputs="x:0", outputs="Identity:0") elif tflite: # https://www.tensorflow.org/lite/guide/python#install_tensorflow_lite_for_python - if 'edgetpu' in w.lower(): # Edge TPU + try: + import tflite_runtime.interpreter as tfl # prefer tflite_runtime if installed + except ImportError: + import tensorflow.lite as tfl + if 'edgetpu' in w.lower(): # Edge TPU https://coral.ai/software/#edgetpu-runtime LOGGER.info(f'Loading {w} for TensorFlow Lite Edge TPU inference...') - import tflite_runtime.interpreter as tfli # install https://coral.ai/software/#edgetpu-runtime delegate = {'Linux': 'libedgetpu.so.1', 'Darwin': 'libedgetpu.1.dylib', 'Windows': 'edgetpu.dll'}[platform.system()] - interpreter = tfli.Interpreter(model_path=w, experimental_delegates=[tfli.load_delegate(delegate)]) + interpreter = tfl.Interpreter(model_path=w, experimental_delegates=[tfl.load_delegate(delegate)]) else: # Lite LOGGER.info(f'Loading {w} for TensorFlow Lite inference...') - import tensorflow as tf - interpreter = tf.lite.Interpreter(model_path=w) # load TFLite model + interpreter = tfl.Interpreter(model_path=w) # load TFLite model interpreter.allocate_tensors() # allocate input_details = interpreter.get_input_details() # inputs output_details = interpreter.get_output_details() # outputs From ff8646cdea57a1e81a381de37b881e55ab273777 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 25 Jan 2022 14:33:22 -0800 Subject: [PATCH 0842/1976] Update workflows (#6427) * Workflow updates * quotes fix * best to weights fix --- .github/workflows/ci-testing.yml | 29 ++++++++++++++--------------- .github/workflows/greetings.yml | 8 ++++---- .github/workflows/stale.yml | 2 +- README.md | 2 +- 4 files changed, 20 insertions(+), 21 deletions(-) diff --git a/.github/workflows/ci-testing.yml b/.github/workflows/ci-testing.yml index 9085b2b7e6dd..5cf1613ab0cd 100644 --- a/.github/workflows/ci-testing.yml +++ b/.github/workflows/ci-testing.yml @@ -23,7 +23,7 @@ jobs: model: [ 'yolov5n' ] # models to test # Timeout: https://stackoverflow.com/a/59076067/4521646 - timeout-minutes: 50 + timeout-minutes: 60 steps: - uses: actions/checkout@v2 - name: Set up Python ${{ matrix.python-version }} @@ -60,35 +60,34 @@ jobs: # - name: W&B login # run: wandb login 345011b3fb26dc8337fd9b20e53857c1d403f2aa - - name: Download data - run: | - # curl -L -o tmp.zip https://github.com/ultralytics/yolov5/releases/download/v1.0/coco128.zip - # unzip -q tmp.zip -d ../ - # rm tmp.zip + # - name: Download data + # run: | + # curl -L -o tmp.zip https://github.com/ultralytics/yolov5/releases/download/v1.0/coco128.zip + # unzip -q tmp.zip -d ../datasets - name: Tests workflow run: | # export PYTHONPATH="$PWD" # to run '$ python *.py' files in subdirectories - di=cpu # device + d=cpu # device + weights=runs/train/exp/weights/best.pt # Train - python train.py --img 64 --batch 32 --weights ${{ matrix.model }}.pt --cfg ${{ matrix.model }}.yaml --epochs 1 --device $di + python train.py --img 64 --batch 32 --weights ${{ matrix.model }}.pt --cfg ${{ matrix.model }}.yaml --epochs 1 --device $d # Val - python val.py --img 64 --batch 32 --weights ${{ matrix.model }}.pt --device $di - python val.py --img 64 --batch 32 --weights runs/train/exp/weights/last.pt --device $di + python val.py --img 64 --batch 32 --weights ${{ matrix.model }}.pt --device $d + python val.py --img 64 --batch 32 --weights $weights --device $d # Detect - python detect.py --weights ${{ matrix.model }}.pt --device $di - python detect.py --weights runs/train/exp/weights/last.pt --device $di + python detect.py --weights ${{ matrix.model }}.pt --device $d + python detect.py --weights $weights --device $d python hubconf.py # hub # Export python models/yolo.py --cfg ${{ matrix.model }}.yaml # build PyTorch model python models/tf.py --weights ${{ matrix.model }}.pt # build TensorFlow model - python export.py --img 64 --batch 1 --weights ${{ matrix.model }}.pt --include torchscript onnx # export + python export.py --weights ${{ matrix.model }}.pt --img 64 --include torchscript onnx # export # Python python - <=3.6.0**](https://www.python.org/) with all [requirements.txt](https://github.com/ultralytics/yolov5/blob/master/requirements.txt) installed including [**PyTorch>=1.7**](https://pytorch.org/get-started/locally/). To get started: + [**Python>=3.7.0**](https://www.python.org/) with all [requirements.txt](https://github.com/ultralytics/yolov5/blob/master/requirements.txt) installed including [**PyTorch>=1.7**](https://pytorch.org/get-started/locally/). To get started: ```bash git clone https://github.com/ultralytics/yolov5 # clone cd yolov5 diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml index b046dc949d1c..be2b0d97d5e7 100644 --- a/.github/workflows/stale.yml +++ b/.github/workflows/stale.yml @@ -3,7 +3,7 @@ name: Close stale issues on: schedule: - - cron: "0 0 * * *" + - cron: '0 0 * * *' # Runs at 00:00 UTC every day jobs: stale: diff --git a/README.md b/README.md index 5f45eb407fc5..a73ba2797b1b 100644 --- a/README.md +++ b/README.md @@ -63,7 +63,7 @@ See the [YOLOv5 Docs](https://docs.ultralytics.com) for full documentation on tr Install Clone repo and install [requirements.txt](https://github.com/ultralytics/yolov5/blob/master/requirements.txt) in a -[**Python>=3.6.0**](https://www.python.org/) environment, including +[**Python>=3.7.0**](https://www.python.org/) environment, including [**PyTorch>=1.7**](https://pytorch.org/get-started/locally/). ```bash From d5966c93f1855baec531c3585da247cded72247f Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 25 Jan 2022 14:57:27 -0800 Subject: [PATCH 0843/1976] Namespace `VERBOSE` env variable to `YOLOv5_VERBOSE` (#6428) * Verbose updates * Verbose updates --- hubconf.py | 12 +++++------ utils/general.py | 54 ++++++++++++++++++++++++------------------------ utils/plots.py | 6 +++--- 3 files changed, 36 insertions(+), 36 deletions(-) diff --git a/hubconf.py b/hubconf.py index 55d15abe2ac5..39fa614b2e34 100644 --- a/hubconf.py +++ b/hubconf.py @@ -12,10 +12,10 @@ def _create(name, pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None): - """Creates a specified YOLOv5 model + """Creates or loads a YOLOv5 model Arguments: - name (str): name of model, i.e. 'yolov5s' + name (str): model name 'yolov5s' or path 'path/to/best.pt' pretrained (bool): load pretrained weights into the model channels (int): number of input channels classes (int): number of model classes @@ -24,19 +24,19 @@ def _create(name, pretrained=True, channels=3, classes=80, autoshape=True, verbo device (str, torch.device, None): device to use for model parameters Returns: - YOLOv5 pytorch model + YOLOv5 model """ from pathlib import Path from models.common import AutoShape, DetectMultiBackend from models.yolo import Model from utils.downloads import attempt_download - from utils.general import check_requirements, intersect_dicts, set_logging + from utils.general import LOGGER, check_requirements, intersect_dicts, logging from utils.torch_utils import select_device + if not verbose: + LOGGER.setLevel(logging.WARNING) check_requirements(exclude=('tensorboard', 'thop', 'opencv-python')) - set_logging(verbose=verbose) - name = Path(name) path = name.with_suffix('.pt') if name.suffix == '' else name # checkpoint path try: diff --git a/utils/general.py b/utils/general.py index 3d6da2fdb173..e9f5ec2ac128 100755 --- a/utils/general.py +++ b/utils/general.py @@ -36,7 +36,7 @@ FILE = Path(__file__).resolve() ROOT = FILE.parents[1] # YOLOv5 root directory NUM_THREADS = min(8, max(1, os.cpu_count() - 1)) # number of YOLOv5 multiprocessing threads -VERBOSE = str(os.getenv('VERBOSE', True)).lower() == 'true' # global verbose mode +VERBOSE = str(os.getenv('YOLOv5_VERBOSE', True)).lower() == 'true' # global verbose mode torch.set_printoptions(linewidth=320, precision=5, profile='long') np.set_printoptions(linewidth=320, formatter={'float_kind': '{:11.5g}'.format}) # format short g, %precision=5 @@ -241,20 +241,20 @@ def check_online(): def check_git_status(): # Recommend 'git pull' if code is out of date msg = ', for updates see https://github.com/ultralytics/yolov5' - print(colorstr('github: '), end='') - assert Path('.git').exists(), 'skipping check (not a git repository)' + msg - assert not is_docker(), 'skipping check (Docker image)' + msg - assert check_online(), 'skipping check (offline)' + msg + s = colorstr('github: ') # string + assert Path('.git').exists(), s + 'skipping check (not a git repository)' + msg + assert not is_docker(), s + 'skipping check (Docker image)' + msg + assert check_online(), s + 'skipping check (offline)' + msg cmd = 'git fetch && git config --get remote.origin.url' url = check_output(cmd, shell=True, timeout=5).decode().strip().rstrip('.git') # git fetch branch = check_output('git rev-parse --abbrev-ref HEAD', shell=True).decode().strip() # checked out n = int(check_output(f'git rev-list {branch}..origin/master --count', shell=True)) # commits behind if n > 0: - s = f"⚠️ YOLOv5 is out of date by {n} commit{'s' * (n > 1)}. Use `git pull` or `git clone {url}` to update." + s += f"⚠️ YOLOv5 is out of date by {n} commit{'s' * (n > 1)}. Use `git pull` or `git clone {url}` to update." else: - s = f'up to date with {url} ✅' - print(emojis(s)) # emoji-safe + s += f'up to date with {url} ✅' + LOGGER.info(emojis(s)) # emoji-safe def check_python(minimum='3.6.2'): @@ -294,21 +294,21 @@ def check_requirements(requirements=ROOT / 'requirements.txt', exclude=(), insta except Exception as e: # DistributionNotFound or VersionConflict if requirements not met s = f"{prefix} {r} not found and is required by YOLOv5" if install: - print(f"{s}, attempting auto-update...") + LOGGER.info(f"{s}, attempting auto-update...") try: assert check_online(), f"'pip install {r}' skipped (offline)" - print(check_output(f"pip install '{r}'", shell=True).decode()) + LOGGER.info(check_output(f"pip install '{r}'", shell=True).decode()) n += 1 except Exception as e: - print(f'{prefix} {e}') + LOGGER.warning(f'{prefix} {e}') else: - print(f'{s}. Please install and rerun your command.') + LOGGER.info(f'{s}. Please install and rerun your command.') if n: # if packages updated source = file.resolve() if 'file' in locals() else requirements s = f"{prefix} {n} package{'s' * (n > 1)} updated per {source}\n" \ f"{prefix} ⚠️ {colorstr('bold', 'Restart runtime or rerun command for updates to take effect')}\n" - print(emojis(s)) + LOGGER.info(emojis(s)) def check_img_size(imgsz, s=32, floor=0): @@ -318,7 +318,7 @@ def check_img_size(imgsz, s=32, floor=0): else: # list i.e. img_size=[640, 480] new_size = [max(make_divisible(x, int(s)), floor) for x in imgsz] if new_size != imgsz: - print(f'WARNING: --img-size {imgsz} must be multiple of max stride {s}, updating to {new_size}') + LOGGER.warning(f'WARNING: --img-size {imgsz} must be multiple of max stride {s}, updating to {new_size}') return new_size @@ -333,7 +333,7 @@ def check_imshow(): cv2.waitKey(1) return True except Exception as e: - print(f'WARNING: Environment does not support cv2.imshow() or PIL Image.show() image displays\n{e}') + LOGGER.warning(f'WARNING: Environment does not support cv2.imshow() or PIL Image.show() image displays\n{e}') return False @@ -363,9 +363,9 @@ def check_file(file, suffix=''): url = str(Path(file)).replace(':/', '://') # Pathlib turns :// -> :/ file = Path(urllib.parse.unquote(file).split('?')[0]).name # '%2F' to '/', split https://url.com/file.txt?auth if Path(file).is_file(): - print(f'Found {url} locally at {file}') # file already exists + LOGGER.info(f'Found {url} locally at {file}') # file already exists else: - print(f'Downloading {url} to {file}...') + LOGGER.info(f'Downloading {url} to {file}...') torch.hub.download_url_to_file(url, file) assert Path(file).exists() and Path(file).stat().st_size > 0, f'File download failed: {url}' # check return file @@ -407,23 +407,23 @@ def check_dataset(data, autodownload=True): if val: val = [Path(x).resolve() for x in (val if isinstance(val, list) else [val])] # val path if not all(x.exists() for x in val): - print('\nWARNING: Dataset not found, nonexistent paths: %s' % [str(x) for x in val if not x.exists()]) + LOGGER.info('\nDataset not found, missing paths: %s' % [str(x) for x in val if not x.exists()]) if s and autodownload: # download script root = path.parent if 'path' in data else '..' # unzip directory i.e. '../' if s.startswith('http') and s.endswith('.zip'): # URL f = Path(s).name # filename - print(f'Downloading {s} to {f}...') + LOGGER.info(f'Downloading {s} to {f}...') torch.hub.download_url_to_file(s, f) Path(root).mkdir(parents=True, exist_ok=True) # create root ZipFile(f).extractall(path=root) # unzip Path(f).unlink() # remove zip r = None # success elif s.startswith('bash '): # bash script - print(f'Running {s} ...') + LOGGER.info(f'Running {s} ...') r = os.system(s) else: # python script r = exec(s, {'yaml': data}) # return None - print(f"Dataset autodownload {f'success, saved to {root}' if r in (0, None) else 'failure'}\n") + LOGGER.info(f"Dataset autodownload {f'success, saved to {root}' if r in (0, None) else 'failure'}\n") else: raise Exception('Dataset not found.') @@ -445,13 +445,13 @@ def download_one(url, dir): if Path(url).is_file(): # exists in current path Path(url).rename(f) # move to dir elif not f.exists(): - print(f'Downloading {url} to {f}...') + LOGGER.info(f'Downloading {url} to {f}...') if curl: os.system(f"curl -L '{url}' -o '{f}' --retry 9 -C -") # curl download, retry and resume on fail else: torch.hub.download_url_to_file(url, f, progress=True) # torch download if unzip and f.suffix in ('.zip', '.gz'): - print(f'Unzipping {f}...') + LOGGER.info(f'Unzipping {f}...') if f.suffix == '.zip': ZipFile(f).extractall(path=dir) # unzip elif f.suffix == '.gz': @@ -744,7 +744,7 @@ def non_max_suppression(prediction, conf_thres=0.25, iou_thres=0.45, classes=Non output[xi] = x[i] if (time.time() - t) > time_limit: - print(f'WARNING: NMS time limit {time_limit}s exceeded') + LOGGER.warning(f'WARNING: NMS time limit {time_limit}s exceeded') break # time limit exceeded return output @@ -763,7 +763,7 @@ def strip_optimizer(f='best.pt', s=''): # from utils.general import *; strip_op p.requires_grad = False torch.save(x, s or f) mb = os.path.getsize(s or f) / 1E6 # filesize - print(f"Optimizer stripped from {f},{(' saved as %s,' % s) if s else ''} {mb:.1f}MB") + LOGGER.info(f"Optimizer stripped from {f},{(' saved as %s,' % s) if s else ''} {mb:.1f}MB") def print_mutation(results, hyp, save_dir, bucket): @@ -786,8 +786,8 @@ def print_mutation(results, hyp, save_dir, bucket): f.write(s + ('%20.5g,' * n % vals).rstrip(',') + '\n') # Print to screen - print(colorstr('evolve: ') + ', '.join(f'{x.strip():>20s}' for x in keys)) - print(colorstr('evolve: ') + ', '.join(f'{x:20.5g}' for x in vals), end='\n\n\n') + LOGGER.info(colorstr('evolve: ') + ', '.join(f'{x.strip():>20s}' for x in keys)) + LOGGER.info(colorstr('evolve: ') + ', '.join(f'{x:20.5g}' for x in vals) + '\n\n') # Save yaml with open(evolve_yaml, 'w') as f: diff --git a/utils/plots.py b/utils/plots.py index 69037ee9af70..74868403edc0 100644 --- a/utils/plots.py +++ b/utils/plots.py @@ -57,7 +57,7 @@ def check_font(font='Arial.ttf', size=10): return ImageFont.truetype(str(font) if font.exists() else font.name, size) except Exception as e: # download if missing url = "https://ultralytics.com/assets/" + font.name - print(f'Downloading {url} to {font}...') + LOGGER.info(f'Downloading {url} to {font}...') torch.hub.download_url_to_file(url, str(font), progress=False) try: return ImageFont.truetype(str(font), size) @@ -143,7 +143,7 @@ def feature_visualization(x, module_type, stage, n=32, save_dir=Path('runs/detec ax[i].imshow(blocks[i].squeeze()) # cmap='gray' ax[i].axis('off') - print(f'Saving {f}... ({n}/{channels})') + LOGGER.info(f'Saving {f}... ({n}/{channels})') plt.savefig(f, dpi=300, bbox_inches='tight') plt.close() np.save(str(f.with_suffix('.npy')), x[0].cpu().numpy()) # npy save @@ -417,7 +417,7 @@ def plot_results(file='path/to/results.csv', dir=''): # if j in [8, 9, 10]: # share train and val loss y axes # ax[i].get_shared_y_axes().join(ax[i], ax[i - 5]) except Exception as e: - print(f'Warning: Plotting error for {f}: {e}') + LOGGER.info(f'Warning: Plotting error for {f}: {e}') ax[1].legend() fig.savefig(save_dir / 'results.png', dpi=200) plt.close() From 3b7ac28ed1c760ee3ed6a9780027a4a3e775f937 Mon Sep 17 00:00:00 2001 From: toschi23 Date: Wed, 26 Jan 2022 14:26:02 +0100 Subject: [PATCH 0844/1976] Add `*.asf` video support (#6436) --- utils/datasets.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/datasets.py b/utils/datasets.py index fa73cba64d40..4e0c38e76370 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -34,7 +34,7 @@ # Parameters HELP_URL = 'https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data' IMG_FORMATS = ['bmp', 'dng', 'jpeg', 'jpg', 'mpo', 'png', 'tif', 'tiff', 'webp'] # include image suffixes -VID_FORMATS = ['avi', 'gif', 'm4v', 'mkv', 'mov', 'mp4', 'mpeg', 'mpg', 'wmv'] # include video suffixes +VID_FORMATS = ['asf', 'avi', 'gif', 'm4v', 'mkv', 'mov', 'mp4', 'mpeg', 'mpg', 'wmv'] # include video suffixes DEVICE_COUNT = max(torch.cuda.device_count(), 1) # Get orientation exif tag From fe7de6a82da3444d755453f86f40b508f3b99419 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 26 Jan 2022 19:10:59 +0100 Subject: [PATCH 0845/1976] Revert "Remove `dataset_stats()` autodownload capability (#6303)" (#6442) This reverts commit 3119b2f27c198c6b9c02fb57d3b00b61a7bd2356. --- utils/datasets.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/utils/datasets.py b/utils/datasets.py index 4e0c38e76370..85923e918aa5 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -936,10 +936,11 @@ def verify_image_label(args): return [None, None, None, None, nm, nf, ne, nc, msg] -def dataset_stats(path='coco128.yaml', verbose=False, profile=False, hub=False): +def dataset_stats(path='coco128.yaml', autodownload=False, verbose=False, profile=False, hub=False): """ Return dataset statistics dictionary with images and instances counts per split per class To run in parent directory: export PYTHONPATH="$PWD/yolov5" - Usage: from utils.datasets import *; dataset_stats('../datasets/coco128_with_yaml.zip') + Usage1: from utils.datasets import *; dataset_stats('coco128.yaml', autodownload=True) + Usage2: from utils.datasets import *; dataset_stats('../datasets/coco128_with_yaml.zip') Arguments path: Path to data.yaml or data.zip (with data.yaml inside data.zip) autodownload: Attempt to download dataset if not found locally @@ -983,7 +984,7 @@ def hub_ops(f, max_dim=1920): data = yaml.safe_load(f) # data dict if zipped: data['path'] = data_dir # TODO: should this be dir.resolve()? - check_dataset(data, autodownload=False) + check_dataset(data, autodownload) # download dataset if missing hub_dir = Path(data['path'] + ('-hub' if hub else '')) stats = {'nc': data['nc'], 'names': data['names']} # statistics dictionary for split in 'train', 'val', 'test': From 856d4e5733451c7fe9b12f183b384e986699b1f8 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 26 Jan 2022 23:47:00 +0100 Subject: [PATCH 0846/1976] Fix `select_device()` for Multi-GPU (#6434) * Fix `select_device()` for Multi-GPU Possible fix for https://github.com/ultralytics/yolov5/issues/6431 * Update torch_utils.py * Update torch_utils.py * Update torch_utils.py * Update torch_utils.py * Update * Update * Update * Update * Update * Update * Update * Update * Update --- utils/datasets.py | 4 ++-- utils/torch_utils.py | 15 ++++++++++++--- 2 files changed, 14 insertions(+), 5 deletions(-) diff --git a/utils/datasets.py b/utils/datasets.py index 85923e918aa5..4eb444087860 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -29,13 +29,13 @@ from utils.augmentations import Albumentations, augment_hsv, copy_paste, letterbox, mixup, random_perspective from utils.general import (LOGGER, NUM_THREADS, check_dataset, check_requirements, check_yaml, clean_str, segments2boxes, xyn2xy, xywh2xyxy, xywhn2xyxy, xyxy2xywhn) -from utils.torch_utils import torch_distributed_zero_first +from utils.torch_utils import device_count, torch_distributed_zero_first # Parameters HELP_URL = 'https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data' IMG_FORMATS = ['bmp', 'dng', 'jpeg', 'jpg', 'mpo', 'png', 'tif', 'tiff', 'webp'] # include image suffixes VID_FORMATS = ['asf', 'avi', 'gif', 'm4v', 'mkv', 'mov', 'mp4', 'mpeg', 'mpg', 'wmv'] # include video suffixes -DEVICE_COUNT = max(torch.cuda.device_count(), 1) +DEVICE_COUNT = max(device_count(), 1) # number of CUDA devices # Get orientation exif tag for orientation in ExifTags.TAGS.keys(): diff --git a/utils/torch_utils.py b/utils/torch_utils.py index 2a45f434c6a5..d958a8951074 100644 --- a/utils/torch_utils.py +++ b/utils/torch_utils.py @@ -53,6 +53,15 @@ def git_describe(path=Path(__file__).parent): # path must be a directory return '' # not a git repository +def device_count(): + # Returns number of CUDA devices available. Safe version of torch.cuda.device_count(). + try: + cmd = 'nvidia-smi -L | wc -l' + return int(subprocess.run(cmd, shell=True, capture_output=True, check=True).stdout.decode().split()[-1]) + except Exception as e: + return 0 + + def select_device(device='', batch_size=0, newline=True): # device = 'cpu' or '0' or '0,1,2,3' s = f'YOLOv5 🚀 {git_describe() or date_modified()} torch {torch.__version__} ' # string @@ -61,10 +70,10 @@ def select_device(device='', batch_size=0, newline=True): if cpu: os.environ['CUDA_VISIBLE_DEVICES'] = '-1' # force torch.cuda.is_available() = False elif device: # non-cpu device requested - nd = torch.cuda.device_count() # number of CUDA devices - assert torch.cuda.is_available(), 'CUDA is not available, use `--device cpu` or do not pass a --device' + nd = device_count() # number of CUDA devices assert nd > int(max(device.split(','))), f'Invalid `--device {device}` request, valid devices are 0 - {nd - 1}' - os.environ['CUDA_VISIBLE_DEVICES'] = device # set environment variable (must be after asserts) + os.environ['CUDA_VISIBLE_DEVICES'] = device # set environment variable - must be before assert is_available() + assert torch.cuda.is_available(), 'CUDA is not available, use `--device cpu` or do not pass a --device' cuda = not cpu and torch.cuda.is_available() if cuda: From d8b5beb0b0a5cb3ec3ea20e9fff415057dcf25f6 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 28 Jan 2022 08:18:01 +0100 Subject: [PATCH 0847/1976] Fix2 `select_device()` for Multi-GPU (#6461) * Fix2 select_device() for Multi-GPU * Cleanup * Cleanup * Simplify error message * Improve assert * Update torch_utils.py --- utils/datasets.py | 6 +++--- utils/torch_utils.py | 8 ++++---- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/utils/datasets.py b/utils/datasets.py index 4eb444087860..07f6321e0285 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -29,13 +29,12 @@ from utils.augmentations import Albumentations, augment_hsv, copy_paste, letterbox, mixup, random_perspective from utils.general import (LOGGER, NUM_THREADS, check_dataset, check_requirements, check_yaml, clean_str, segments2boxes, xyn2xy, xywh2xyxy, xywhn2xyxy, xyxy2xywhn) -from utils.torch_utils import device_count, torch_distributed_zero_first +from utils.torch_utils import torch_distributed_zero_first # Parameters HELP_URL = 'https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data' IMG_FORMATS = ['bmp', 'dng', 'jpeg', 'jpg', 'mpo', 'png', 'tif', 'tiff', 'webp'] # include image suffixes VID_FORMATS = ['asf', 'avi', 'gif', 'm4v', 'mkv', 'mov', 'mp4', 'mpeg', 'mpg', 'wmv'] # include video suffixes -DEVICE_COUNT = max(device_count(), 1) # number of CUDA devices # Get orientation exif tag for orientation in ExifTags.TAGS.keys(): @@ -110,7 +109,8 @@ def create_dataloader(path, imgsz, batch_size, stride, single_cls=False, hyp=Non prefix=prefix) batch_size = min(batch_size, len(dataset)) - nw = min([os.cpu_count() // DEVICE_COUNT, batch_size if batch_size > 1 else 0, workers]) # number of workers + nd = torch.cuda.device_count() # number of CUDA devices + nw = min([os.cpu_count() // max(nd, 1), batch_size if batch_size > 1 else 0, workers]) # number of workers sampler = None if rank == -1 else distributed.DistributedSampler(dataset, shuffle=shuffle) loader = DataLoader if image_weights else InfiniteDataLoader # only DataLoader allows for attribute updates return loader(dataset, diff --git a/utils/torch_utils.py b/utils/torch_utils.py index d958a8951074..2b51821a3b62 100644 --- a/utils/torch_utils.py +++ b/utils/torch_utils.py @@ -54,7 +54,8 @@ def git_describe(path=Path(__file__).parent): # path must be a directory def device_count(): - # Returns number of CUDA devices available. Safe version of torch.cuda.device_count(). + # Returns number of CUDA devices available. Safe version of torch.cuda.device_count(). Only works on Linux. + assert platform.system() == 'Linux', 'device_count() function only works on Linux' try: cmd = 'nvidia-smi -L | wc -l' return int(subprocess.run(cmd, shell=True, capture_output=True, check=True).stdout.decode().split()[-1]) @@ -70,10 +71,9 @@ def select_device(device='', batch_size=0, newline=True): if cpu: os.environ['CUDA_VISIBLE_DEVICES'] = '-1' # force torch.cuda.is_available() = False elif device: # non-cpu device requested - nd = device_count() # number of CUDA devices - assert nd > int(max(device.split(','))), f'Invalid `--device {device}` request, valid devices are 0 - {nd - 1}' os.environ['CUDA_VISIBLE_DEVICES'] = device # set environment variable - must be before assert is_available() - assert torch.cuda.is_available(), 'CUDA is not available, use `--device cpu` or do not pass a --device' + assert torch.cuda.is_available() and torch.cuda.device_count() >= len(device.replace(',', '')), \ + f"Invalid CUDA '--device {device}' requested, use '--device cpu' or pass valid CUDA device(s)" cuda = not cpu and torch.cuda.is_available() if cuda: From 7539cd75c3a6c06d00848617f6265f39a765ccea Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 28 Jan 2022 20:23:17 +0100 Subject: [PATCH 0848/1976] Add Product Hunt social media icon (#6464) * Social media icons update * fix URL * Update README.md --- README.md | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/README.md b/README.md index a73ba2797b1b..f9947b98557d 100644 --- a/README.md +++ b/README.md @@ -27,6 +27,10 @@ + + + + @@ -282,6 +286,10 @@ professional support requests please visit [https://ultralytics.com/contact](htt + + + + From 6445a8137e87f67cf3275c70e3585f634260417b Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 1 Feb 2022 15:54:51 +0100 Subject: [PATCH 0849/1976] Resolve dataset paths (#6489) --- utils/general.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/utils/general.py b/utils/general.py index e9f5ec2ac128..86e3b3c1c54b 100755 --- a/utils/general.py +++ b/utils/general.py @@ -394,12 +394,15 @@ def check_dataset(data, autodownload=True): with open(data, errors='ignore') as f: data = yaml.safe_load(f) # dictionary - # Parse yaml - path = extract_dir or Path(data.get('path') or '') # optional 'path' default to '.' + # Resolve paths + path = Path(extract_dir or data.get('path') or '') # optional 'path' default to '.' + if not path.is_absolute(): + path = (ROOT / path).resolve() for k in 'train', 'val', 'test': if data.get(k): # prepend path data[k] = str(path / data[k]) if isinstance(data[k], str) else [str(path / x) for x in data[k]] + # Parse yaml assert 'nc' in data, "Dataset 'nc' key missing." if 'names' not in data: data['names'] = [f'class{i}' for i in range(data['nc'])] # assign class names if missing From b884ea36c469d8501aa4016bf76cccfc3168ccd9 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 1 Feb 2022 21:17:56 +0100 Subject: [PATCH 0850/1976] Simplify TF normalized to pixels (#6494) --- models/common.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/models/common.py b/models/common.py index 346fa37ae2d0..557163310e74 100644 --- a/models/common.py +++ b/models/common.py @@ -446,10 +446,7 @@ def forward(self, im, augment=False, visualize=False, val=False): if int8: scale, zero_point = output['quantization'] y = (y.astype(np.float32) - zero_point) * scale # re-scale - y[..., 0] *= w # x - y[..., 1] *= h # y - y[..., 2] *= w # w - y[..., 3] *= h # h + y[..., :4] *= [w, h, w, h] # xywh normalized to pixels y = torch.tensor(y) if isinstance(y, np.ndarray) else y return (y, []) if val else y From 5e4ff195b21816d96b1fe0a94a9670a7e2ad34e2 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 1 Feb 2022 22:06:29 +0100 Subject: [PATCH 0851/1976] Improved `export.py` usage examples (#6495) * Improved `export.py` usage examples * Cleanup --- export.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/export.py b/export.py index 589b381e035a..bb17703821e8 100644 --- a/export.py +++ b/export.py @@ -469,10 +469,10 @@ def run(data=ROOT / 'data/coco128.yaml', # 'dataset.yaml path' f = [str(x) for x in f if x] # filter out '' and None LOGGER.info(f'\nExport complete ({time.time() - t:.2f}s)' f"\nResults saved to {colorstr('bold', file.parent.resolve())}" - f"\nVisualize with https://netron.app" - f"\nDetect with `python detect.py --weights {f[-1]}`" - f" or `model = torch.hub.load('ultralytics/yolov5', 'custom', '{f[-1]}')" - f"\nValidate with `python val.py --weights {f[-1]}`") + f"\nDetect: python detect.py --weights {f[-1]}" + f"\nPyTorch Hub: model = torch.hub.load('ultralytics/yolov5', 'custom', '{f[-1]}')" + f"\nValidate: python val.py --weights {f[-1]}" + f"\nVisualize: https://netron.app") return f # return list of exported files/dirs From 77977e07912768738ef7ca46f44f19b6959206d9 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 1 Feb 2022 22:34:15 +0100 Subject: [PATCH 0852/1976] CoreML inference fix `list()` -> `sorted()` (#6496) --- models/common.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/models/common.py b/models/common.py index 557163310e74..29d02e741e17 100644 --- a/models/common.py +++ b/models/common.py @@ -427,7 +427,7 @@ def forward(self, im, augment=False, visualize=False, val=False): conf, cls = y['confidence'].max(1), y['confidence'].argmax(1).astype(np.float) y = np.concatenate((box, conf.reshape(-1, 1), cls.reshape(-1, 1)), 1) else: - y = y[list(y)[-1]] # last output + y = y[sorted(y)[-1]] # last output else: # TensorFlow (SavedModel, GraphDef, Lite, Edge TPU) im = im.permute(0, 2, 3, 1).cpu().numpy() # torch BCHW to numpy BHWC shape(1,320,192,3) if self.saved_model: # SavedModel From 842d049e1bbe5db87ad36f4ba86e1a9c2b6e413a Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 1 Feb 2022 22:59:26 +0100 Subject: [PATCH 0853/1976] Suppress `torch.jit.TracerWarning` on export (#6498) * Suppress torch.jit.TracerWarning TracerWarnings can be safely ignored. * Cleanup --- export.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/export.py b/export.py index bb17703821e8..8666f3de63e0 100644 --- a/export.py +++ b/export.py @@ -45,6 +45,7 @@ import subprocess import sys import time +import warnings from pathlib import Path import torch @@ -508,8 +509,10 @@ def parse_opt(): def main(opt): - for opt.weights in (opt.weights if isinstance(opt.weights, list) else [opt.weights]): - run(**vars(opt)) + with warnings.catch_warnings(): + warnings.filterwarnings(action='ignore', category=torch.jit.TracerWarning) # suppress TracerWarning + for opt.weights in (opt.weights if isinstance(opt.weights, list) else [opt.weights]): + run(**vars(opt)) if __name__ == "__main__": From 4c409332667477560200958b513b958bb8fdef71 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 1 Feb 2022 23:52:50 +0100 Subject: [PATCH 0854/1976] Suppress export.run() TracerWarnings (#6499) Suppresses warnings when calling export.run() directly, not just CLI python export.py. Also adds Requirements examples for CPU and GPU backends --- export.py | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/export.py b/export.py index 8666f3de63e0..09c50baa415a 100644 --- a/export.py +++ b/export.py @@ -16,6 +16,10 @@ TensorFlow Edge TPU | `edgetpu` | yolov5s_edgetpu.tflite TensorFlow.js | `tfjs` | yolov5s_web_model/ +Requirements: + $ pip install -r requirements.txt coremltools onnx onnx-simplifier onnxruntime openvino-dev tensorflow-cpu # CPU + $ pip install -r requirements.txt coremltools onnx onnx-simplifier onnxruntime-gpu openvino-dev tensorflow # GPU + Usage: $ python path/to/export.py --weights yolov5s.pt --include torchscript onnx openvino engine coreml tflite ... @@ -437,6 +441,7 @@ def run(data=ROOT / 'data/coco128.yaml', # 'dataset.yaml path' # Exports f = [''] * 10 # exported filenames + warnings.filterwarnings(action='ignore', category=torch.jit.TracerWarning) # suppress TracerWarning if 'torchscript' in include: f[0] = export_torchscript(model, im, file, optimize) if 'engine' in include: # TensorRT required before ONNX @@ -509,10 +514,8 @@ def parse_opt(): def main(opt): - with warnings.catch_warnings(): - warnings.filterwarnings(action='ignore', category=torch.jit.TracerWarning) # suppress TracerWarning - for opt.weights in (opt.weights if isinstance(opt.weights, list) else [opt.weights]): - run(**vars(opt)) + for opt.weights in (opt.weights if isinstance(opt.weights, list) else [opt.weights]): + run(**vars(opt)) if __name__ == "__main__": From b73c62ebc5180d1fa3b412e55ab831d8285e1673 Mon Sep 17 00:00:00 2001 From: Ayush Chaurasia Date: Thu, 3 Feb 2022 15:59:52 +0530 Subject: [PATCH 0855/1976] W&B: Remember batchsize on resuming (#6512) * log best.pt metrics at train end * update * Update __init__.py * flush callbacks when using evolve * remember batch size on resuming * Update train.py Co-authored-by: Glenn Jocher --- train.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/train.py b/train.py index 510377e1178e..2a973fb7164b 100644 --- a/train.py +++ b/train.py @@ -96,7 +96,7 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary if loggers.wandb: data_dict = loggers.wandb.data_dict if resume: - weights, epochs, hyp = opt.weights, opt.epochs, opt.hyp + weights, epochs, hyp, batch_size = opt.weights, opt.epochs, opt.hyp, opt.batch_size # Register actions for k in methods(loggers): From 19e0208fc9e33010717e066f9bd65c27db7c2b5c Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 3 Feb 2022 12:15:13 +0100 Subject: [PATCH 0856/1976] Update hyp.scratch-high.yaml (#6525) Update `lrf: 0.1`, tested on YOLOv5x6 to 55.0 mAP@0.5:0.95, slightly higher than current. --- data/hyps/hyp.scratch-high.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/data/hyps/hyp.scratch-high.yaml b/data/hyps/hyp.scratch-high.yaml index 5a586cc63fae..123cc8407413 100644 --- a/data/hyps/hyp.scratch-high.yaml +++ b/data/hyps/hyp.scratch-high.yaml @@ -4,7 +4,7 @@ # See tutorials for hyperparameter evolution https://github.com/ultralytics/yolov5#tutorials lr0: 0.01 # initial learning rate (SGD=1E-2, Adam=1E-3) -lrf: 0.2 # final OneCycleLR learning rate (lr0 * lrf) +lrf: 0.1 # final OneCycleLR learning rate (lr0 * lrf) momentum: 0.937 # SGD momentum/Adam beta1 weight_decay: 0.0005 # optimizer weight decay 5e-4 warmup_epochs: 3.0 # warmup epochs (fractions ok) From cb40c9afda52a149b49d5e8d06100c60f6cd1614 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 3 Feb 2022 18:11:28 +0100 Subject: [PATCH 0857/1976] TODO issues exempt from stale action (#6530) --- .github/workflows/stale.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml index be2b0d97d5e7..7a83950c17b7 100644 --- a/.github/workflows/stale.yml +++ b/.github/workflows/stale.yml @@ -34,5 +34,5 @@ jobs: stale-pr-message: 'This pull request has been automatically marked as stale because it has not had recent activity. It will be closed if no further activity occurs. Thank you for your contributions YOLOv5 🚀 and Vision AI ⭐.' days-before-stale: 30 days-before-close: 5 - exempt-issue-labels: 'documentation,tutorial' + exempt-issue-labels: 'documentation,tutorial,TODO' operations-per-run: 100 # The maximum number of operations per run, used to control rate limiting. From c3e599cfda112455d69da0fea64faadfaeaedcf2 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 3 Feb 2022 19:09:24 +0100 Subject: [PATCH 0858/1976] Update val_batch*.jpg for Chinese fonts (#6526) * Update plots for Chinese fonts * make is_chinese() non-str safe * Add global FONT * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update general.py Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- utils/general.py | 71 ++++++++++++++++++++++++++++-------------------- utils/plots.py | 23 +++++++--------- 2 files changed, 52 insertions(+), 42 deletions(-) diff --git a/utils/general.py b/utils/general.py index 86e3b3c1c54b..fce5e38c6c9e 100755 --- a/utils/general.py +++ b/utils/general.py @@ -37,6 +37,7 @@ ROOT = FILE.parents[1] # YOLOv5 root directory NUM_THREADS = min(8, max(1, os.cpu_count() - 1)) # number of YOLOv5 multiprocessing threads VERBOSE = str(os.getenv('YOLOv5_VERBOSE', True)).lower() == 'true' # global verbose mode +FONT = 'Arial.ttf' # https://ultralytics.com/assets/Arial.ttf torch.set_printoptions(linewidth=320, precision=5, profile='long') np.set_printoptions(linewidth=320, formatter={'float_kind': '{:11.5g}'.format}) # format short g, %precision=5 @@ -55,6 +56,21 @@ def is_kaggle(): return False +def is_writeable(dir, test=False): + # Return True if directory has write permissions, test opening a file with write permissions if test=True + if test: # method 1 + file = Path(dir) / 'tmp.txt' + try: + with open(file, 'w'): # open file with write permissions + pass + file.unlink() # remove file + return True + except OSError: + return False + else: # method 2 + return os.access(dir, os.R_OK) # possible issues on Windows + + def set_logging(name=None, verbose=VERBOSE): # Sets level and returns logger if is_kaggle(): @@ -68,6 +84,22 @@ def set_logging(name=None, verbose=VERBOSE): LOGGER = set_logging('yolov5') # define globally (used in train.py, val.py, detect.py, etc.) +def user_config_dir(dir='Ultralytics', env_var='YOLOV5_CONFIG_DIR'): + # Return path of user configuration directory. Prefer environment variable if exists. Make dir if required. + env = os.getenv(env_var) + if env: + path = Path(env) # use environment variable + else: + cfg = {'Windows': 'AppData/Roaming', 'Linux': '.config', 'Darwin': 'Library/Application Support'} # 3 OS dirs + path = Path.home() / cfg.get(platform.system(), '') # OS-specific config dir + path = (path if is_writeable(path) else Path('/tmp')) / dir # GCP and AWS lambda fix, only /tmp is writeable + path.mkdir(exist_ok=True) # make if required + return path + + +CONFIG_DIR = user_config_dir() # Ultralytics settings dir + + class Profile(contextlib.ContextDecorator): # Usage: @Profile() decorator or 'with Profile():' context manager def __enter__(self): @@ -152,34 +184,6 @@ def get_latest_run(search_dir='.'): return max(last_list, key=os.path.getctime) if last_list else '' -def user_config_dir(dir='Ultralytics', env_var='YOLOV5_CONFIG_DIR'): - # Return path of user configuration directory. Prefer environment variable if exists. Make dir if required. - env = os.getenv(env_var) - if env: - path = Path(env) # use environment variable - else: - cfg = {'Windows': 'AppData/Roaming', 'Linux': '.config', 'Darwin': 'Library/Application Support'} # 3 OS dirs - path = Path.home() / cfg.get(platform.system(), '') # OS-specific config dir - path = (path if is_writeable(path) else Path('/tmp')) / dir # GCP and AWS lambda fix, only /tmp is writeable - path.mkdir(exist_ok=True) # make if required - return path - - -def is_writeable(dir, test=False): - # Return True if directory has write permissions, test opening a file with write permissions if test=True - if test: # method 1 - file = Path(dir) / 'tmp.txt' - try: - with open(file, 'w'): # open file with write permissions - pass - file.unlink() # remove file - return True - except OSError: - return False - else: # method 2 - return os.access(dir, os.R_OK) # possible issues on Windows - - def is_docker(): # Is environment a Docker container? return Path('/workspace').exists() # or Path('/.dockerenv').exists() @@ -207,7 +211,7 @@ def is_ascii(s=''): def is_chinese(s='人工智能'): # Is string composed of any Chinese characters? - return re.search('[\u4e00-\u9fff]', s) + return True if re.search('[\u4e00-\u9fff]', str(s)) else False def emojis(str=''): @@ -378,6 +382,15 @@ def check_file(file, suffix=''): return files[0] # return file +def check_font(font=FONT): + # Download font to CONFIG_DIR if necessary + font = Path(font) + if not font.exists() and not (CONFIG_DIR / font.name).exists(): + url = "https://ultralytics.com/assets/" + font.name + LOGGER.info(f'Downloading {url} to {CONFIG_DIR / font.name}...') + torch.hub.download_url_to_file(url, str(font), progress=False) + + def check_dataset(data, autodownload=True): # Download and/or unzip dataset if not found locally # Usage: https://github.com/ultralytics/yolov5/releases/download/v1.0/coco128_with_yaml.zip diff --git a/utils/plots.py b/utils/plots.py index 74868403edc0..be70ac8a030f 100644 --- a/utils/plots.py +++ b/utils/plots.py @@ -17,12 +17,11 @@ import torch from PIL import Image, ImageDraw, ImageFont -from utils.general import (LOGGER, Timeout, check_requirements, clip_coords, increment_path, is_ascii, is_chinese, - try_except, user_config_dir, xywh2xyxy, xyxy2xywh) +from utils.general import (CONFIG_DIR, FONT, LOGGER, Timeout, check_font, check_requirements, clip_coords, + increment_path, is_ascii, is_chinese, try_except, xywh2xyxy, xyxy2xywh) from utils.metrics import fitness # Settings -CONFIG_DIR = user_config_dir() # Ultralytics settings dir RANK = int(os.getenv('RANK', -1)) matplotlib.rc('font', **{'size': 11}) matplotlib.use('Agg') # for writing to files only @@ -49,16 +48,14 @@ def hex2rgb(h): # rgb order (PIL) colors = Colors() # create instance for 'from utils.plots import colors' -def check_font(font='Arial.ttf', size=10): +def check_pil_font(font=FONT, size=10): # Return a PIL TrueType Font, downloading to CONFIG_DIR if necessary font = Path(font) font = font if font.exists() else (CONFIG_DIR / font.name) try: return ImageFont.truetype(str(font) if font.exists() else font.name, size) except Exception as e: # download if missing - url = "https://ultralytics.com/assets/" + font.name - LOGGER.info(f'Downloading {url} to {font}...') - torch.hub.download_url_to_file(url, str(font), progress=False) + check_font(font) try: return ImageFont.truetype(str(font), size) except TypeError: @@ -67,7 +64,7 @@ def check_font(font='Arial.ttf', size=10): class Annotator: if RANK in (-1, 0): - check_font() # download TTF if necessary + check_pil_font() # download TTF if necessary # YOLOv5 Annotator for train/val mosaics and jpgs and detect/hub inference annotations def __init__(self, im, line_width=None, font_size=None, font='Arial.ttf', pil=False, example='abc'): @@ -76,8 +73,8 @@ def __init__(self, im, line_width=None, font_size=None, font='Arial.ttf', pil=Fa if self.pil: # use PIL self.im = im if isinstance(im, Image.Image) else Image.fromarray(im) self.draw = ImageDraw.Draw(self.im) - self.font = check_font(font='Arial.Unicode.ttf' if is_chinese(example) else font, - size=font_size or max(round(sum(self.im.size) / 2 * 0.035), 12)) + self.font = check_pil_font(font='Arial.Unicode.ttf' if is_chinese(example) else font, + size=font_size or max(round(sum(self.im.size) / 2 * 0.035), 12)) else: # use cv2 self.im = im self.lw = line_width or max(round(sum(im.shape) / 2 * 0.003), 2) # line width @@ -89,10 +86,10 @@ def box_label(self, box, label='', color=(128, 128, 128), txt_color=(255, 255, 2 if label: w, h = self.font.getsize(label) # text width, height outside = box[1] - h >= 0 # label fits outside box - self.draw.rectangle([box[0], + self.draw.rectangle((box[0], box[1] - h if outside else box[1], box[0] + w + 1, - box[1] + 1 if outside else box[1] + h + 1], fill=color) + box[1] + 1 if outside else box[1] + h + 1), fill=color) # self.draw.text((box[0], box[1]), label, fill=txt_color, font=self.font, anchor='ls') # for PIL>8.0 self.draw.text((box[0], box[1] - h if outside else box[1]), label, fill=txt_color, font=self.font) else: # cv2 @@ -210,7 +207,7 @@ def plot_images(images, targets, paths=None, fname='images.jpg', names=None, max # Annotate fs = int((h + w) * ns * 0.01) # font size - annotator = Annotator(mosaic, line_width=round(fs / 10), font_size=fs, pil=True) + annotator = Annotator(mosaic, line_width=round(fs / 10), font_size=fs, pil=True, example=names) for i in range(i + 1): x, y = int(w * (i // ns)), int(h * (i % ns)) # block origin annotator.rectangle([x, y, x + w, y + h], None, (255, 255, 255), width=2) # borders From a82292ec5376cd7ff07fc6e85b731c09cdaeff4f Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 3 Feb 2022 19:55:19 +0100 Subject: [PATCH 0859/1976] Social icons after text (#6473) * Social icons after text * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update README.md Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- README.md | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/README.md b/README.md index f9947b98557d..7bfea7c24e8f 100644 --- a/README.md +++ b/README.md @@ -13,7 +13,13 @@ Open In Kaggle Join Forum +
+

+YOLOv5 🚀 is a family of object detection architectures and models pretrained on the COCO dataset, and represents Ultralytics + open-source research into future vision AI methods, incorporating lessons learned and best practices evolved over thousands of hours of research and development. +

+ -
-

-YOLOv5 🚀 is a family of object detection architectures and models pretrained on the COCO dataset, and represents Ultralytics - open-source research into future vision AI methods, incorporating lessons learned and best practices evolved over thousands of hours of research and development. -

- From 63ddb6f0d06f6309aa42bababd08c859197a27af Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 26 Feb 2022 19:15:12 +0100 Subject: [PATCH 0919/1976] Update autoanchor.py (#6794) * Update autoanchor.py * Update autoanchor.py --- utils/autoanchor.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/utils/autoanchor.py b/utils/autoanchor.py index 27d6fb68bb38..51d4de306efd 100644 --- a/utils/autoanchor.py +++ b/utils/autoanchor.py @@ -57,9 +57,10 @@ def metric(k): # compute metric anchors = torch.tensor(anchors, device=m.anchors.device).type_as(m.anchors) m.anchors[:] = anchors.clone().view_as(m.anchors) / m.stride.to(m.anchors.device).view(-1, 1, 1) # loss check_anchor_order(m) - LOGGER.info(f'{PREFIX}New anchors saved to model. Update model *.yaml to use these anchors in the future.') + s = f'{PREFIX}Done ✅ (optional: update model *.yaml to use these anchors in the future)' else: - LOGGER.info(f'{PREFIX}Original anchors better than new anchors. Proceeding with original anchors.') + s = f'{PREFIX}Done ⚠️ (original anchors better than new anchors, proceeding with original anchors)' + LOGGER.info(emojis(s)) def kmean_anchors(dataset='./data/coco128.yaml', n=9, img_size=640, thr=4.0, gen=1000, verbose=True): @@ -120,7 +121,7 @@ def print_results(k, verbose=True): # Filter i = (wh0 < 3.0).any(1).sum() if i: - LOGGER.info(f'{PREFIX}WARNING: Extremely small objects found. {i} of {len(wh0)} labels are < 3 pixels in size.') + LOGGER.info(f'{PREFIX}WARNING: Extremely small objects found: {i} of {len(wh0)} labels are < 3 pixels in size') wh = wh0[(wh0 >= 2.0).any(1)] # filter > 2 pixels # wh = wh * (npr.rand(wh.shape[0], 1) * 0.9 + 0.1) # multiply by random scale 0-1 From bcc92e2169a233c3e974db40ddc9b496d9c29ec8 Mon Sep 17 00:00:00 2001 From: Louis Combaldieu Date: Fri, 4 Mar 2022 09:39:23 +0100 Subject: [PATCH 0920/1976] Update sweep.yaml (#6825) * Update sweep.yaml Changed focal loss gamma search range between 1 and 4 * Update sweep.yaml lowered the min value to match default --- utils/loggers/wandb/sweep.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/loggers/wandb/sweep.yaml b/utils/loggers/wandb/sweep.yaml index c7790d75f6b2..688b1ea0285f 100644 --- a/utils/loggers/wandb/sweep.yaml +++ b/utils/loggers/wandb/sweep.yaml @@ -88,7 +88,7 @@ parameters: fl_gamma: distribution: uniform min: 0.0 - max: 0.1 + max: 4.0 hsv_h: distribution: uniform min: 0.0 From 601dbb83f01b58355211f2565cfa4eecb48b1220 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 4 Mar 2022 10:32:18 +0100 Subject: [PATCH 0921/1976] AutoAnchor improved initialization robustness (#6854) * Update AutoAnchor * Update AutoAnchor * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- utils/autoanchor.py | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/utils/autoanchor.py b/utils/autoanchor.py index 51d4de306efd..a631c21a3b26 100644 --- a/utils/autoanchor.py +++ b/utils/autoanchor.py @@ -125,15 +125,17 @@ def print_results(k, verbose=True): wh = wh0[(wh0 >= 2.0).any(1)] # filter > 2 pixels # wh = wh * (npr.rand(wh.shape[0], 1) * 0.9 + 0.1) # multiply by random scale 0-1 - # Kmeans calculation - LOGGER.info(f'{PREFIX}Running kmeans for {n} anchors on {len(wh)} points...') - s = wh.std(0) # sigmas for whitening - k = kmeans(wh / s, n, iter=30)[0] * s # points - if len(k) != n: # kmeans may return fewer points than requested if wh is insufficient or too similar - LOGGER.warning(f'{PREFIX}WARNING: scipy.cluster.vq.kmeans returned only {len(k)} of {n} requested points') + # Kmeans init + try: + LOGGER.info(f'{PREFIX}Running kmeans for {n} anchors on {len(wh)} points...') + assert n <= len(wh) # apply overdetermined constraint + s = wh.std(0) # sigmas for whitening + k = kmeans(wh / s, n, iter=30)[0] * s # points + assert n == len(k) # kmeans may return fewer points than requested if wh is insufficient or too similar + except Exception: + LOGGER.warning(f'{PREFIX}WARNING: switching strategies from kmeans to random init') k = np.sort(npr.rand(n * 2)).reshape(n, 2) * img_size # random init - wh = torch.tensor(wh, dtype=torch.float32) # filtered - wh0 = torch.tensor(wh0, dtype=torch.float32) # unfiltered + wh, wh0 = (torch.tensor(x, dtype=torch.float32) for x in (wh, wh0)) k = print_results(k, verbose=False) # Plot From 8a66ebad44e8ecf90c7d27757c832579398d4baf Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 4 Mar 2022 14:10:13 +0100 Subject: [PATCH 0922/1976] Add `*.ts` to `VID_FORMATS` (#6859) --- utils/datasets.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/utils/datasets.py b/utils/datasets.py index e132e04f6d9d..c325b9910ed3 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -33,8 +33,8 @@ # Parameters HELP_URL = 'https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data' -IMG_FORMATS = ['bmp', 'dng', 'jpeg', 'jpg', 'mpo', 'png', 'tif', 'tiff', 'webp'] # include image suffixes -VID_FORMATS = ['asf', 'avi', 'gif', 'm4v', 'mkv', 'mov', 'mp4', 'mpeg', 'mpg', 'wmv'] # include video suffixes +IMG_FORMATS = 'bmp', 'dng', 'jpeg', 'jpg', 'mpo', 'png', 'tif', 'tiff', 'webp' # include image suffixes +VID_FORMATS = 'asf', 'avi', 'gif', 'm4v', 'mkv', 'mov', 'mp4', 'mpeg', 'mpg', 'ts', 'wmv' # include video suffixes # Get orientation exif tag for orientation in ExifTags.TAGS.keys(): From 47288407450f83ccbdbd2e950bf339e30e67a181 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 6 Mar 2022 16:16:17 +0100 Subject: [PATCH 0923/1976] Update `--cache disk` deprecate `*_npy/` dirs (#6876) * Updates * Updates * Updates * Updates * Updates * Updates * Updates * Updates * Updates * Updates * Cleanup * Cleanup --- utils/datasets.py | 76 +++++++++++++++--------------- utils/loggers/wandb/wandb_utils.py | 2 +- val.py | 2 +- 3 files changed, 40 insertions(+), 40 deletions(-) diff --git a/utils/datasets.py b/utils/datasets.py index c325b9910ed3..6a2dc58dd6cd 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -407,19 +407,19 @@ def __init__(self, path, img_size=640, batch_size=16, augment=False, hyp=None, r # f += [p.parent / x.lstrip(os.sep) for x in t] # local to global path (pathlib) else: raise Exception(f'{prefix}{p} does not exist') - self.img_files = sorted(x.replace('/', os.sep) for x in f if x.split('.')[-1].lower() in IMG_FORMATS) + self.im_files = sorted(x.replace('/', os.sep) for x in f if x.split('.')[-1].lower() in IMG_FORMATS) # self.img_files = sorted([x for x in f if x.suffix[1:].lower() in IMG_FORMATS]) # pathlib - assert self.img_files, f'{prefix}No images found' + assert self.im_files, f'{prefix}No images found' except Exception as e: raise Exception(f'{prefix}Error loading data from {path}: {e}\nSee {HELP_URL}') # Check cache - self.label_files = img2label_paths(self.img_files) # labels + self.label_files = img2label_paths(self.im_files) # labels cache_path = (p if p.is_file() else Path(self.label_files[0]).parent).with_suffix('.cache') try: cache, exists = np.load(cache_path, allow_pickle=True).item(), True # load dict assert cache['version'] == self.cache_version # same version - assert cache['hash'] == get_hash(self.label_files + self.img_files) # same hash + assert cache['hash'] == get_hash(self.label_files + self.im_files) # same hash except Exception: cache, exists = self.cache_labels(cache_path, prefix), False # cache @@ -437,7 +437,7 @@ def __init__(self, path, img_size=640, batch_size=16, augment=False, hyp=None, r labels, shapes, self.segments = zip(*cache.values()) self.labels = list(labels) self.shapes = np.array(shapes, dtype=np.float64) - self.img_files = list(cache.keys()) # update + self.im_files = list(cache.keys()) # update self.label_files = img2label_paths(cache.keys()) # update n = len(shapes) # number of images bi = np.floor(np.arange(n) / batch_size).astype(np.int) # batch index @@ -466,7 +466,7 @@ def __init__(self, path, img_size=640, batch_size=16, augment=False, hyp=None, r s = self.shapes # wh ar = s[:, 1] / s[:, 0] # aspect ratio irect = ar.argsort() - self.img_files = [self.img_files[i] for i in irect] + self.im_files = [self.im_files[i] for i in irect] self.label_files = [self.label_files[i] for i in irect] self.labels = [self.labels[i] for i in irect] self.shapes = s[irect] # wh @@ -485,24 +485,20 @@ def __init__(self, path, img_size=640, batch_size=16, augment=False, hyp=None, r self.batch_shapes = np.ceil(np.array(shapes) * img_size / stride + pad).astype(np.int) * stride # Cache images into RAM/disk for faster training (WARNING: large datasets may exceed system resources) - self.imgs, self.img_npy = [None] * n, [None] * n + self.ims = [None] * n + self.npy_files = [Path(f).with_suffix('.npy') for f in self.im_files] if cache_images: - if cache_images == 'disk': - self.im_cache_dir = Path(Path(self.img_files[0]).parent.as_posix() + '_npy') - self.img_npy = [self.im_cache_dir / Path(f).with_suffix('.npy').name for f in self.img_files] - self.im_cache_dir.mkdir(parents=True, exist_ok=True) gb = 0 # Gigabytes of cached images - self.img_hw0, self.img_hw = [None] * n, [None] * n - results = ThreadPool(NUM_THREADS).imap(self.load_image, range(n)) + self.im_hw0, self.im_hw = [None] * n, [None] * n + fcn = self.cache_images_to_disk if cache_images == 'disk' else self.load_image + results = ThreadPool(NUM_THREADS).imap(fcn, range(n)) pbar = tqdm(enumerate(results), total=n) for i, x in pbar: if cache_images == 'disk': - if not self.img_npy[i].exists(): - np.save(self.img_npy[i].as_posix(), x[0]) - gb += self.img_npy[i].stat().st_size + gb += self.npy_files[i].stat().st_size else: # 'ram' - self.imgs[i], self.img_hw0[i], self.img_hw[i] = x # im, hw_orig, hw_resized = load_image(self, i) - gb += self.imgs[i].nbytes + self.ims[i], self.im_hw0[i], self.im_hw[i] = x # im, hw_orig, hw_resized = load_image(self, i) + gb += self.ims[i].nbytes pbar.desc = f'{prefix}Caching images ({gb / 1E9:.1f}GB {cache_images})' pbar.close() @@ -512,8 +508,8 @@ def cache_labels(self, path=Path('./labels.cache'), prefix=''): nm, nf, ne, nc, msgs = 0, 0, 0, 0, [] # number missing, found, empty, corrupt, messages desc = f"{prefix}Scanning '{path.parent / path.stem}' images and labels..." with Pool(NUM_THREADS) as pool: - pbar = tqdm(pool.imap(verify_image_label, zip(self.img_files, self.label_files, repeat(prefix))), - desc=desc, total=len(self.img_files)) + pbar = tqdm(pool.imap(verify_image_label, zip(self.im_files, self.label_files, repeat(prefix))), + desc=desc, total=len(self.im_files)) for im_file, lb, shape, segments, nm_f, nf_f, ne_f, nc_f, msg in pbar: nm += nm_f nf += nf_f @@ -530,8 +526,8 @@ def cache_labels(self, path=Path('./labels.cache'), prefix=''): LOGGER.info('\n'.join(msgs)) if nf == 0: LOGGER.warning(f'{prefix}WARNING: No labels found in {path}. See {HELP_URL}') - x['hash'] = get_hash(self.label_files + self.img_files) - x['results'] = nf, nm, ne, nc, len(self.img_files) + x['hash'] = get_hash(self.label_files + self.im_files) + x['results'] = nf, nm, ne, nc, len(self.im_files) x['msgs'] = msgs # warnings x['version'] = self.cache_version # cache version try: @@ -543,7 +539,7 @@ def cache_labels(self, path=Path('./labels.cache'), prefix=''): return x def __len__(self): - return len(self.img_files) + return len(self.im_files) # def __iter__(self): # self.count = -1 @@ -622,17 +618,15 @@ def __getitem__(self, index): img = img.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB img = np.ascontiguousarray(img) - return torch.from_numpy(img), labels_out, self.img_files[index], shapes + return torch.from_numpy(img), labels_out, self.im_files[index], shapes def load_image(self, i): - # loads 1 image from dataset index 'i', returns (im, original hw, resized hw) - im = self.imgs[i] + # Loads 1 image from dataset index 'i', returns (im, original hw, resized hw) + im, f, fn = self.ims[i], self.im_files[i], self.npy_files[i], if im is None: # not cached in RAM - npy = self.img_npy[i] - if npy and npy.exists(): # load npy - im = np.load(npy) + if fn.exists(): # load npy + im = np.load(fn) else: # read image - f = self.img_files[i] im = cv2.imread(f) # BGR assert im is not None, f'Image Not Found {f}' h0, w0 = im.shape[:2] # orig hw @@ -643,7 +637,13 @@ def load_image(self, i): interpolation=cv2.INTER_LINEAR if (self.augment or r > 1) else cv2.INTER_AREA) return im, (h0, w0), im.shape[:2] # im, hw_original, hw_resized else: - return self.imgs[i], self.img_hw0[i], self.img_hw[i] # im, hw_original, hw_resized + return self.ims[i], self.im_hw0[i], self.im_hw[i] # im, hw_original, hw_resized + + def cache_images_to_disk(self, i): + # Saves an image as an *.npy file for faster loading + f = self.npy_files[i] + if not f.exists(): + np.save(f.as_posix(), cv2.imread(self.im_files[i])) def load_mosaic(self, index): # YOLOv5 4-mosaic loader. Loads 1 image + 3 random images into a 4-image mosaic @@ -777,16 +777,16 @@ def load_mosaic9(self, index): @staticmethod def collate_fn(batch): - img, label, path, shapes = zip(*batch) # transposed + im, label, path, shapes = zip(*batch) # transposed for i, lb in enumerate(label): lb[:, 0] = i # add target image index for build_targets() - return torch.stack(img, 0), torch.cat(label, 0), path, shapes + return torch.stack(im, 0), torch.cat(label, 0), path, shapes @staticmethod def collate_fn4(batch): img, label, path, shapes = zip(*batch) # transposed n = len(shapes) // 4 - img4, label4, path4, shapes4 = [], [], path[:n], shapes[:n] + im4, label4, path4, shapes4 = [], [], path[:n], shapes[:n] ho = torch.tensor([[0.0, 0, 0, 1, 0, 0]]) wo = torch.tensor([[0.0, 0, 1, 0, 0, 0]]) @@ -800,13 +800,13 @@ def collate_fn4(batch): else: im = torch.cat((torch.cat((img[i], img[i + 1]), 1), torch.cat((img[i + 2], img[i + 3]), 1)), 2) lb = torch.cat((label[i], label[i + 1] + ho, label[i + 2] + wo, label[i + 3] + ho + wo), 0) * s - img4.append(im) + im4.append(im) label4.append(lb) for i, lb in enumerate(label4): lb[:, 0] = i # add target image index for build_targets() - return torch.stack(img4, 0), torch.cat(label4, 0), path4, shapes4 + return torch.stack(im4, 0), torch.cat(label4, 0), path4, shapes4 # Ancillary functions -------------------------------------------------------------------------------------------------- @@ -999,12 +999,12 @@ def hub_ops(f, max_dim=1920): 'image_stats': {'total': dataset.n, 'unlabelled': int(np.all(x == 0, 1).sum()), 'per_class': (x > 0).sum(0).tolist()}, 'labels': [{str(Path(k).name): round_labels(v.tolist())} for k, v in - zip(dataset.img_files, dataset.labels)]} + zip(dataset.im_files, dataset.labels)]} if hub: im_dir = hub_dir / 'images' im_dir.mkdir(parents=True, exist_ok=True) - for _ in tqdm(ThreadPool(NUM_THREADS).imap(hub_ops, dataset.img_files), total=dataset.n, desc='HUB Ops'): + for _ in tqdm(ThreadPool(NUM_THREADS).imap(hub_ops, dataset.im_files), total=dataset.n, desc='HUB Ops'): pass # Profile diff --git a/utils/loggers/wandb/wandb_utils.py b/utils/loggers/wandb/wandb_utils.py index 3835436543d2..786e58a19972 100644 --- a/utils/loggers/wandb/wandb_utils.py +++ b/utils/loggers/wandb/wandb_utils.py @@ -403,7 +403,7 @@ def create_dataset_table(self, dataset: LoadImagesAndLabels, class_to_id: Dict[i # TODO: Explore multiprocessing to slpit this loop parallely| This is essential for speeding up the the logging artifact = wandb.Artifact(name=name, type="dataset") img_files = tqdm([dataset.path]) if isinstance(dataset.path, str) and Path(dataset.path).is_dir() else None - img_files = tqdm(dataset.img_files) if not img_files else img_files + img_files = tqdm(dataset.im_files) if not img_files else img_files for img_file in img_files: if Path(img_file).is_dir(): artifact.add_dir(img_file, name='data/images') diff --git a/val.py b/val.py index 78abbda8231a..8bde37bd5dc7 100644 --- a/val.py +++ b/val.py @@ -297,7 +297,7 @@ def run(data, pred = anno.loadRes(pred_json) # init predictions api eval = COCOeval(anno, pred, 'bbox') if is_coco: - eval.params.imgIds = [int(Path(x).stem) for x in dataloader.dataset.img_files] # image IDs to evaluate + eval.params.imgIds = [int(Path(x).stem) for x in dataloader.dataset.im_files] # image IDs to evaluate eval.evaluate() eval.accumulate() eval.summarize() From 7e98b4801a2f3e607aa2636a4346e2482f961596 Mon Sep 17 00:00:00 2001 From: vnekat <92971065+vnekat@users.noreply.github.com> Date: Mon, 7 Mar 2022 00:50:01 +0530 Subject: [PATCH 0924/1976] Update yolov5s.yaml (#6865) * Update yolov5s.yaml * Update yolov5s.yaml Co-authored-by: Glenn Jocher From 596de6d5a00223dc4be86377dfba6df4341b76b1 Mon Sep 17 00:00:00 2001 From: DavidB Date: Mon, 7 Mar 2022 03:21:16 +0700 Subject: [PATCH 0925/1976] Default FP16 TensorRT export (#6798) * Assert engine precision #6777 * Default to FP32 inputs for TensorRT engines * Default to FP16 TensorRT exports #6777 * Remove wrong line #6777 * Automatically adjust detect.py input precision #6777 * Automatically adjust val.py input precision #6777 * Add missing colon * Cleanup * Cleanup * Remove default trt_fp16_input definition * Experiment * Reorder detect.py if statement to after half checks * Update common.py * Update export.py * Cleanup Co-authored-by: Glenn Jocher --- detect.py | 4 ++++ export.py | 5 ++--- models/common.py | 3 +++ val.py | 4 ++++ 4 files changed, 13 insertions(+), 3 deletions(-) diff --git a/detect.py b/detect.py index 76f67bea1b90..ba43ed9e1eed 100644 --- a/detect.py +++ b/detect.py @@ -97,6 +97,10 @@ def run(weights=ROOT / 'yolov5s.pt', # model.pt path(s) half &= (pt or jit or onnx or engine) and device.type != 'cpu' # FP16 supported on limited backends with CUDA if pt or jit: model.model.half() if half else model.model.float() + elif engine and model.trt_fp16_input != half: + LOGGER.info('model ' + ( + 'requires' if model.trt_fp16_input else 'incompatible with') + ' --half. Adjusting automatically.') + half = model.trt_fp16_input # Dataloader if webcam: diff --git a/export.py b/export.py index 286df623d252..7a5205d55ee6 100644 --- a/export.py +++ b/export.py @@ -233,9 +233,8 @@ def export_engine(model, im, file, train, half, simplify, workspace=4, verbose=F for out in outputs: LOGGER.info(f'{prefix}\toutput "{out.name}" with shape {out.shape} and dtype {out.dtype}') - half &= builder.platform_has_fast_fp16 - LOGGER.info(f'{prefix} building FP{16 if half else 32} engine in {f}') - if half: + LOGGER.info(f'{prefix} building FP{16 if builder.platform_has_fast_fp16 else 32} engine in {f}') + if builder.platform_has_fast_fp16: config.set_flag(trt.BuilderFlag.FP16) with builder.build_engine(network, config) as engine, open(f, 'wb') as t: t.write(engine.serialize()) diff --git a/models/common.py b/models/common.py index 0dae0244e932..70ee7105abfc 100644 --- a/models/common.py +++ b/models/common.py @@ -338,6 +338,7 @@ def __init__(self, weights='yolov5s.pt', device=None, dnn=False, data=None): import tensorrt as trt # https://developer.nvidia.com/nvidia-tensorrt-download check_version(trt.__version__, '7.0.0', hard=True) # require tensorrt>=7.0.0 Binding = namedtuple('Binding', ('name', 'dtype', 'shape', 'data', 'ptr')) + trt_fp16_input = False logger = trt.Logger(trt.Logger.INFO) with open(w, 'rb') as f, trt.Runtime(logger) as runtime: model = runtime.deserialize_cuda_engine(f.read()) @@ -348,6 +349,8 @@ def __init__(self, weights='yolov5s.pt', device=None, dnn=False, data=None): shape = tuple(model.get_binding_shape(index)) data = torch.from_numpy(np.empty(shape, dtype=np.dtype(dtype))).to(device) bindings[name] = Binding(name, dtype, shape, data, int(data.data_ptr())) + if model.binding_is_input(index) and dtype == np.float16: + trt_fp16_input = True binding_addrs = OrderedDict((n, d.ptr) for n, d in bindings.items()) context = model.create_execution_context() batch_size = bindings['images'].shape[0] diff --git a/val.py b/val.py index 8bde37bd5dc7..dfbfa3935210 100644 --- a/val.py +++ b/val.py @@ -144,6 +144,10 @@ def run(data, model.model.half() if half else model.model.float() elif engine: batch_size = model.batch_size + if model.trt_fp16_input != half: + LOGGER.info('model ' + ( + 'requires' if model.trt_fp16_input else 'incompatible with') + ' --half. Adjusting automatically.') + half = model.trt_fp16_input else: half = False batch_size = 1 # export.py models default to batch-size 1 From c8a589920e877016c8a9be00fd0077005dc68f51 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 7 Mar 2022 13:48:59 +0100 Subject: [PATCH 0926/1976] Bump actions/setup-python from 2 to 3 (#6880) Bumps [actions/setup-python](https://github.com/actions/setup-python) from 2 to 3. - [Release notes](https://github.com/actions/setup-python/releases) - [Commits](https://github.com/actions/setup-python/compare/v2...v3) --- updated-dependencies: - dependency-name: actions/setup-python dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/ci-testing.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci-testing.yml b/.github/workflows/ci-testing.yml index 5cf1613ab0cd..10fab276f8f2 100644 --- a/.github/workflows/ci-testing.yml +++ b/.github/workflows/ci-testing.yml @@ -27,7 +27,7 @@ jobs: steps: - uses: actions/checkout@v2 - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v2 + uses: actions/setup-python@v3 with: python-version: ${{ matrix.python-version }} From a5a1760ea6d1c172b91fa5b0606434c8379b45f0 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 7 Mar 2022 13:49:27 +0100 Subject: [PATCH 0927/1976] Bump actions/checkout from 2 to 3 (#6881) Bumps [actions/checkout](https://github.com/actions/checkout) from 2 to 3. - [Release notes](https://github.com/actions/checkout/releases) - [Changelog](https://github.com/actions/checkout/blob/main/CHANGELOG.md) - [Commits](https://github.com/actions/checkout/compare/v2...v3) --- updated-dependencies: - dependency-name: actions/checkout dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/ci-testing.yml | 2 +- .github/workflows/codeql-analysis.yml | 2 +- .github/workflows/rebase.yml | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/ci-testing.yml b/.github/workflows/ci-testing.yml index 10fab276f8f2..f2096ce17a17 100644 --- a/.github/workflows/ci-testing.yml +++ b/.github/workflows/ci-testing.yml @@ -25,7 +25,7 @@ jobs: # Timeout: https://stackoverflow.com/a/59076067/4521646 timeout-minutes: 60 steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 - name: Set up Python ${{ matrix.python-version }} uses: actions/setup-python@v3 with: diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index 67f51f0e8bce..8bc88e957a36 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -22,7 +22,7 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@v2 + uses: actions/checkout@v3 # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL diff --git a/.github/workflows/rebase.yml b/.github/workflows/rebase.yml index a4db1efb2971..75c57546166b 100644 --- a/.github/workflows/rebase.yml +++ b/.github/workflows/rebase.yml @@ -11,7 +11,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout the latest code - uses: actions/checkout@v2 + uses: actions/checkout@v3 with: token: ${{ secrets.ACTIONS_TOKEN }} fetch-depth: 0 # otherwise, you will fail to push refs to dest repo From acc58c1dcfba054ef936ee1458a8ff74a088ee74 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 7 Mar 2022 13:52:53 +0100 Subject: [PATCH 0928/1976] Fix TRT `max_workspace_size` deprecation notice (#6856) * Fix TRT `max_workspace_size` deprecation notice * Update export.py * Update export.py --- export.py | 1 + 1 file changed, 1 insertion(+) diff --git a/export.py b/export.py index 7a5205d55ee6..1e3d3e2f2e71 100644 --- a/export.py +++ b/export.py @@ -218,6 +218,7 @@ def export_engine(model, im, file, train, half, simplify, workspace=4, verbose=F builder = trt.Builder(logger) config = builder.create_builder_config() config.max_workspace_size = workspace * 1 << 30 + # config.set_memory_pool_limit(trt.MemoryPoolType.WORKSPACE, workspace << 30) # fix TRT 8.4 deprecation notice flag = (1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)) network = builder.create_network(flag) From e6e36aac109794999f1dafab244b9ec4887a33d1 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 7 Mar 2022 19:26:37 +0100 Subject: [PATCH 0929/1976] Update bytes to GB with bitshift (#6886) --- utils/__init__.py | 7 +++---- utils/autobatch.py | 7 ++++--- utils/general.py | 5 +++-- utils/torch_utils.py | 2 +- 4 files changed, 11 insertions(+), 10 deletions(-) diff --git a/utils/__init__.py b/utils/__init__.py index 4658ed6473cd..a63c473a4340 100644 --- a/utils/__init__.py +++ b/utils/__init__.py @@ -21,14 +21,13 @@ def notebook_init(verbose=True): if is_colab(): shutil.rmtree('/content/sample_data', ignore_errors=True) # remove colab /sample_data directory + # System info if verbose: - # System info - # gb = 1 / 1000 ** 3 # bytes to GB - gib = 1 / 1024 ** 3 # bytes to GiB + gb = 1 << 30 # bytes to GiB (1024 ** 3) ram = psutil.virtual_memory().total total, used, free = shutil.disk_usage("/") display.clear_output() - s = f'({os.cpu_count()} CPUs, {ram * gib:.1f} GB RAM, {(total - free) * gib:.1f}/{total * gib:.1f} GB disk)' + s = f'({os.cpu_count()} CPUs, {ram / gb:.1f} GB RAM, {(total - free) / gb:.1f}/{total / gb:.1f} GB disk)' else: s = '' diff --git a/utils/autobatch.py b/utils/autobatch.py index cb94f041e95d..e53b4787b87d 100644 --- a/utils/autobatch.py +++ b/utils/autobatch.py @@ -34,11 +34,12 @@ def autobatch(model, imgsz=640, fraction=0.9, batch_size=16): LOGGER.info(f'{prefix}CUDA not detected, using default CPU batch-size {batch_size}') return batch_size + gb = 1 << 30 # bytes to GiB (1024 ** 3) d = str(device).upper() # 'CUDA:0' properties = torch.cuda.get_device_properties(device) # device properties - t = properties.total_memory / 1024 ** 3 # (GiB) - r = torch.cuda.memory_reserved(device) / 1024 ** 3 # (GiB) - a = torch.cuda.memory_allocated(device) / 1024 ** 3 # (GiB) + t = properties.total_memory / gb # (GiB) + r = torch.cuda.memory_reserved(device) / gb # (GiB) + a = torch.cuda.memory_allocated(device) / gb # (GiB) f = t - (r + a) # free inside reserved LOGGER.info(f'{prefix}{d} ({properties.name}) {t:.2f}G total, {r:.2f}G reserved, {a:.2f}G allocated, {f:.2f}G free') diff --git a/utils/general.py b/utils/general.py index d1594a8b5cea..36c180fe4cf2 100755 --- a/utils/general.py +++ b/utils/general.py @@ -223,11 +223,12 @@ def emojis(str=''): def file_size(path): # Return file/dir size (MB) + mb = 1 << 20 # bytes to MiB (1024 ** 2) path = Path(path) if path.is_file(): - return path.stat().st_size / 1E6 + return path.stat().st_size / mb elif path.is_dir(): - return sum(f.stat().st_size for f in path.glob('**/*') if f.is_file()) / 1E6 + return sum(f.stat().st_size for f in path.glob('**/*') if f.is_file()) / mb else: return 0.0 diff --git a/utils/torch_utils.py b/utils/torch_utils.py index c11d2a4269ef..2e6fba06626a 100644 --- a/utils/torch_utils.py +++ b/utils/torch_utils.py @@ -86,7 +86,7 @@ def select_device(device='', batch_size=0, newline=True): space = ' ' * (len(s) + 1) for i, d in enumerate(devices): p = torch.cuda.get_device_properties(i) - s += f"{'' if i == 0 else space}CUDA:{d} ({p.name}, {p.total_memory / 1024 ** 2:.0f}MiB)\n" # bytes to MB + s += f"{'' if i == 0 else space}CUDA:{d} ({p.name}, {p.total_memory / (1 << 20):.0f}MiB)\n" # bytes to MB else: s += 'CPU\n' From 6dd82c025298d219a1eb1fe8e486fb99d5324d34 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 9 Mar 2022 18:22:53 +0100 Subject: [PATCH 0930/1976] Move `git_describe()` to general.py (#6918) * Move `git_describe()` to general.py * Move `git_describe()` to general.py --- utils/general.py | 21 +++++++++++++++++++++ utils/torch_utils.py | 21 ++------------------- 2 files changed, 23 insertions(+), 19 deletions(-) diff --git a/utils/general.py b/utils/general.py index 36c180fe4cf2..a7891cbccbab 100755 --- a/utils/general.py +++ b/utils/general.py @@ -15,6 +15,7 @@ import signal import time import urllib +from datetime import datetime from itertools import repeat from multiprocessing.pool import ThreadPool from pathlib import Path @@ -221,6 +222,18 @@ def emojis(str=''): return str.encode().decode('ascii', 'ignore') if platform.system() == 'Windows' else str +def file_age(path=__file__): + # Return days since last file update + dt = (datetime.now() - datetime.fromtimestamp(Path(path).stat().st_mtime)) # delta + return dt.days # + dt.seconds / 86400 # fractional days + + +def file_update_date(path=__file__): + # Return human-readable file modification date, i.e. '2021-3-26' + t = datetime.fromtimestamp(Path(path).stat().st_mtime) + return f'{t.year}-{t.month}-{t.day}' + + def file_size(path): # Return file/dir size (MB) mb = 1 << 20 # bytes to MiB (1024 ** 2) @@ -243,6 +256,14 @@ def check_online(): return False +def git_describe(path=ROOT): # path must be a directory + # Return human-readable git description, i.e. v5.0-5-g3e25f1e https://git-scm.com/docs/git-describe + try: + return check_output(f'git -C {path} describe --tags --long --always', shell=True).decode()[:-1] + except Exception: + return '' + + @try_except @WorkingDirectory(ROOT) def check_git_status(): diff --git a/utils/torch_utils.py b/utils/torch_utils.py index 2e6fba06626a..efcacc9ca735 100644 --- a/utils/torch_utils.py +++ b/utils/torch_utils.py @@ -3,7 +3,6 @@ PyTorch utils """ -import datetime import math import os import platform @@ -12,14 +11,13 @@ import warnings from contextlib import contextmanager from copy import deepcopy -from pathlib import Path import torch import torch.distributed as dist import torch.nn as nn import torch.nn.functional as F -from utils.general import LOGGER +from utils.general import LOGGER, file_update_date, git_describe try: import thop # for FLOPs computation @@ -40,21 +38,6 @@ def torch_distributed_zero_first(local_rank: int): dist.barrier(device_ids=[0]) -def date_modified(path=__file__): - # Return human-readable file modification date, i.e. '2021-3-26' - t = datetime.datetime.fromtimestamp(Path(path).stat().st_mtime) - return f'{t.year}-{t.month}-{t.day}' - - -def git_describe(path=Path(__file__).parent): # path must be a directory - # Return human-readable git description, i.e. v5.0-5-g3e25f1e https://git-scm.com/docs/git-describe - s = f'git -C {path} describe --tags --long --always' - try: - return subprocess.check_output(s, shell=True, stderr=subprocess.STDOUT).decode()[:-1] - except subprocess.CalledProcessError: - return '' # not a git repository - - def device_count(): # Returns number of CUDA devices available. Safe version of torch.cuda.device_count(). Only works on Linux. assert platform.system() == 'Linux', 'device_count() function only works on Linux' @@ -67,7 +50,7 @@ def device_count(): def select_device(device='', batch_size=0, newline=True): # device = 'cpu' or '0' or '0,1,2,3' - s = f'YOLOv5 🚀 {git_describe() or date_modified()} torch {torch.__version__} ' # string + s = f'YOLOv5 🚀 {git_describe() or file_update_date()} torch {torch.__version__} ' # string device = str(device).strip().lower().replace('cuda:', '') # to string, 'cuda:0' to '0' cpu = device == 'cpu' if cpu: From d3d9cbce221b2ced46dde374f24fde72c8e71c37 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 10 Mar 2022 12:41:06 +0100 Subject: [PATCH 0931/1976] PyTorch 1.11.0 compatibility updates (#6932) Resolves `AttributeError: 'Upsample' object has no attribute 'recompute_scale_factor'` first raised in https://github.com/ultralytics/yolov5/issues/5499 --- models/experimental.py | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/models/experimental.py b/models/experimental.py index 463e5514a06e..01bdfe72db4f 100644 --- a/models/experimental.py +++ b/models/experimental.py @@ -94,21 +94,22 @@ def attempt_load(weights, map_location=None, inplace=True, fuse=True): model = Ensemble() for w in weights if isinstance(weights, list) else [weights]: ckpt = torch.load(attempt_download(w), map_location=map_location) # load - if fuse: - model.append(ckpt['ema' if ckpt.get('ema') else 'model'].float().fuse().eval()) # FP32 model - else: - model.append(ckpt['ema' if ckpt.get('ema') else 'model'].float().eval()) # without layer fuse + ckpt = (ckpt['ema'] or ckpt['model']).float() # FP32 model + model.append(ckpt.fuse().eval() if fuse else ckpt.eval()) # fused or un-fused model in eval mode # Compatibility updates for m in model.modules(): - if type(m) in [nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6, nn.SiLU, Detect, Model]: - m.inplace = inplace # pytorch 1.7.0 compatibility - if type(m) is Detect: + t = type(m) + if t in (nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6, nn.SiLU, Detect, Model): + m.inplace = inplace # torch 1.7.0 compatibility + if t is Detect: if not isinstance(m.anchor_grid, list): # new Detect Layer compatibility delattr(m, 'anchor_grid') setattr(m, 'anchor_grid', [torch.zeros(1)] * m.nl) - elif type(m) is Conv: - m._non_persistent_buffers_set = set() # pytorch 1.6.0 compatibility + elif t is nn.Upsample: + m.recompute_scale_factor = None # torch 1.11.0 compatibility + elif t is Conv: + m._non_persistent_buffers_set = set() # torch 1.6.0 compatibility if len(model) == 1: return model[-1] # return model From 055e72af5b887832d5e7267ac9226c825d498cd2 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 10 Mar 2022 12:58:41 +0100 Subject: [PATCH 0932/1976] Optimize PyTorch 1.11.0 compatibility update (#6933) --- models/experimental.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/models/experimental.py b/models/experimental.py index 01bdfe72db4f..782ecbeface9 100644 --- a/models/experimental.py +++ b/models/experimental.py @@ -106,10 +106,10 @@ def attempt_load(weights, map_location=None, inplace=True, fuse=True): if not isinstance(m.anchor_grid, list): # new Detect Layer compatibility delattr(m, 'anchor_grid') setattr(m, 'anchor_grid', [torch.zeros(1)] * m.nl) - elif t is nn.Upsample: - m.recompute_scale_factor = None # torch 1.11.0 compatibility elif t is Conv: m._non_persistent_buffers_set = set() # torch 1.6.0 compatibility + elif t is nn.Upsample and not hasattr(m, 'recompute_scale_factor'): + m.recompute_scale_factor = None # torch 1.11.0 compatibility if len(model) == 1: return model[-1] # return model From caf7ad0500f8fc58567a7aa01ca91d5ee77691d6 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 10 Mar 2022 18:41:47 +0100 Subject: [PATCH 0933/1976] Allow 3-point segments (#6938) May resolve https://github.com/ultralytics/yolov5/issues/6931 --- utils/datasets.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/datasets.py b/utils/datasets.py index 6a2dc58dd6cd..00d0d94e0847 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -906,7 +906,7 @@ def verify_image_label(args): nf = 1 # label found with open(lb_file) as f: lb = [x.split() for x in f.read().strip().splitlines() if len(x)] - if any([len(x) > 8 for x in lb]): # is segment + if any(len(x) > 6 for x in lb): # is segment classes = np.array([x[0] for x in lb], dtype=np.float32) segments = [np.array(x[1:], dtype=np.float32).reshape(-1, 2) for x in lb] # (cls, xy1...) lb = np.concatenate((classes.reshape(-1, 1), segments2boxes(segments)), 1) # (cls, xywh) From 84efa62b2d0a619309a7437aa82cebdfc4de1bed Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 11 Mar 2022 16:18:40 +0100 Subject: [PATCH 0934/1976] Fix PyTorch Hub export inference shapes (#6949) May resolve https://github.com/ultralytics/yolov5/issues/6947 --- models/common.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/models/common.py b/models/common.py index 70ee7105abfc..ac3af20d533e 100644 --- a/models/common.py +++ b/models/common.py @@ -544,10 +544,9 @@ def forward(self, imgs, size=640, augment=False, profile=False): g = (size / max(s)) # gain shape1.append([y * g for y in s]) imgs[i] = im if im.data.contiguous else np.ascontiguousarray(im) # update - shape1 = [make_divisible(x, self.stride) for x in np.stack(shape1, 0).max(0)] # inference shape - x = [letterbox(im, new_shape=shape1 if self.pt else size, auto=False)[0] for im in imgs] # pad - x = np.stack(x, 0) if n > 1 else x[0][None] # stack - x = np.ascontiguousarray(x.transpose((0, 3, 1, 2))) # BHWC to BCHW + shape1 = [make_divisible(x, self.stride) if self.pt else size for x in np.array(shape1).max(0)] # inf shape + x = [letterbox(im, new_shape=shape1, auto=False)[0] for im in imgs] # pad + x = np.ascontiguousarray(np.array(x).transpose((0, 3, 1, 2))) # stack and BHWC to BCHW x = torch.from_numpy(x).to(p.device).type_as(p) / 255 # uint8 to fp16/32 t.append(time_sync()) From b94b59e199047aa8bf2cdd4401ae9f5f42b929e6 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 11 Mar 2022 16:31:52 +0100 Subject: [PATCH 0935/1976] DetectMultiBackend() `--half` handling (#6945) * DetectMultiBackend() `--half` handling * CI fixes * rename .half to .fp16 to avoid conflict * warmup fix * val update * engine update * engine update --- detect.py | 17 ++++------------- models/common.py | 13 ++++++++----- val.py | 25 +++++++++---------------- 3 files changed, 21 insertions(+), 34 deletions(-) diff --git a/detect.py b/detect.py index ba43ed9e1eed..ccb9fbf5103f 100644 --- a/detect.py +++ b/detect.py @@ -89,19 +89,10 @@ def run(weights=ROOT / 'yolov5s.pt', # model.pt path(s) # Load model device = select_device(device) - model = DetectMultiBackend(weights, device=device, dnn=dnn, data=data) - stride, names, pt, jit, onnx, engine = model.stride, model.names, model.pt, model.jit, model.onnx, model.engine + model = DetectMultiBackend(weights, device=device, dnn=dnn, data=data, fp16=half) + stride, names, pt = model.stride, model.names, model.pt imgsz = check_img_size(imgsz, s=stride) # check image size - # Half - half &= (pt or jit or onnx or engine) and device.type != 'cpu' # FP16 supported on limited backends with CUDA - if pt or jit: - model.model.half() if half else model.model.float() - elif engine and model.trt_fp16_input != half: - LOGGER.info('model ' + ( - 'requires' if model.trt_fp16_input else 'incompatible with') + ' --half. Adjusting automatically.') - half = model.trt_fp16_input - # Dataloader if webcam: view_img = check_imshow() @@ -114,12 +105,12 @@ def run(weights=ROOT / 'yolov5s.pt', # model.pt path(s) vid_path, vid_writer = [None] * bs, [None] * bs # Run inference - model.warmup(imgsz=(1 if pt else bs, 3, *imgsz), half=half) # warmup + model.warmup(imgsz=(1 if pt else bs, 3, *imgsz)) # warmup dt, seen = [0.0, 0.0, 0.0], 0 for path, im, im0s, vid_cap, s in dataset: t1 = time_sync() im = torch.from_numpy(im).to(device) - im = im.half() if half else im.float() # uint8 to fp16/32 + im = im.half() if model.fp16 else im.float() # uint8 to fp16/32 im /= 255 # 0 - 255 to 0.0 - 1.0 if len(im.shape) == 3: im = im[None] # expand for batch dim diff --git a/models/common.py b/models/common.py index ac3af20d533e..251463525392 100644 --- a/models/common.py +++ b/models/common.py @@ -277,7 +277,7 @@ def forward(self, x): class DetectMultiBackend(nn.Module): # YOLOv5 MultiBackend class for python inference on various backends - def __init__(self, weights='yolov5s.pt', device=None, dnn=False, data=None): + def __init__(self, weights='yolov5s.pt', device=torch.device('cpu'), dnn=False, data=None, fp16=False): # Usage: # PyTorch: weights = *.pt # TorchScript: *.torchscript @@ -297,6 +297,7 @@ def __init__(self, weights='yolov5s.pt', device=None, dnn=False, data=None): pt, jit, onnx, xml, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs = self.model_type(w) # get backend stride, names = 64, [f'class{i}' for i in range(1000)] # assign defaults w = attempt_download(w) # download if not local + fp16 &= (pt or jit or onnx or engine) and device.type != 'cpu' # FP16 if data: # data.yaml path (optional) with open(data, errors='ignore') as f: names = yaml.safe_load(f)['names'] # class names @@ -305,11 +306,13 @@ def __init__(self, weights='yolov5s.pt', device=None, dnn=False, data=None): model = attempt_load(weights if isinstance(weights, list) else w, map_location=device) stride = max(int(model.stride.max()), 32) # model stride names = model.module.names if hasattr(model, 'module') else model.names # get class names + model.half() if fp16 else model.float() self.model = model # explicitly assign for to(), cpu(), cuda(), half() elif jit: # TorchScript LOGGER.info(f'Loading {w} for TorchScript inference...') extra_files = {'config.txt': ''} # model metadata model = torch.jit.load(w, _extra_files=extra_files) + model.half() if fp16 else model.float() if extra_files['config.txt']: d = json.loads(extra_files['config.txt']) # extra_files dict stride, names = int(d['stride']), d['names'] @@ -338,11 +341,11 @@ def __init__(self, weights='yolov5s.pt', device=None, dnn=False, data=None): import tensorrt as trt # https://developer.nvidia.com/nvidia-tensorrt-download check_version(trt.__version__, '7.0.0', hard=True) # require tensorrt>=7.0.0 Binding = namedtuple('Binding', ('name', 'dtype', 'shape', 'data', 'ptr')) - trt_fp16_input = False logger = trt.Logger(trt.Logger.INFO) with open(w, 'rb') as f, trt.Runtime(logger) as runtime: model = runtime.deserialize_cuda_engine(f.read()) bindings = OrderedDict() + fp16 = False # default updated below for index in range(model.num_bindings): name = model.get_binding_name(index) dtype = trt.nptype(model.get_binding_dtype(index)) @@ -350,7 +353,7 @@ def __init__(self, weights='yolov5s.pt', device=None, dnn=False, data=None): data = torch.from_numpy(np.empty(shape, dtype=np.dtype(dtype))).to(device) bindings[name] = Binding(name, dtype, shape, data, int(data.data_ptr())) if model.binding_is_input(index) and dtype == np.float16: - trt_fp16_input = True + fp16 = True binding_addrs = OrderedDict((n, d.ptr) for n, d in bindings.items()) context = model.create_execution_context() batch_size = bindings['images'].shape[0] @@ -458,11 +461,11 @@ def forward(self, im, augment=False, visualize=False, val=False): y = torch.tensor(y) if isinstance(y, np.ndarray) else y return (y, []) if val else y - def warmup(self, imgsz=(1, 3, 640, 640), half=False): + def warmup(self, imgsz=(1, 3, 640, 640)): # Warmup model by running inference once if self.pt or self.jit or self.onnx or self.engine: # warmup types if isinstance(self.device, torch.device) and self.device.type != 'cpu': # only warmup GPU models - im = torch.zeros(*imgsz).to(self.device).type(torch.half if half else torch.float) # input image + im = torch.zeros(*imgsz).to(self.device).type(torch.half if self.fp16 else torch.float) # input image self.forward(im) # warmup @staticmethod diff --git a/val.py b/val.py index dfbfa3935210..64c4d4ff9dae 100644 --- a/val.py +++ b/val.py @@ -125,7 +125,6 @@ def run(data, training = model is not None if training: # called by train.py device, pt, jit, engine = next(model.parameters()).device, True, False, False # get model device, PyTorch model - half &= device.type != 'cpu' # half precision only supported on CUDA model.half() if half else model.float() else: # called directly @@ -136,23 +135,17 @@ def run(data, (save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir # Load model - model = DetectMultiBackend(weights, device=device, dnn=dnn, data=data) - stride, pt, jit, onnx, engine = model.stride, model.pt, model.jit, model.onnx, model.engine + model = DetectMultiBackend(weights, device=device, dnn=dnn, data=data, fp16=half) + stride, pt, jit, engine = model.stride, model.pt, model.jit, model.engine imgsz = check_img_size(imgsz, s=stride) # check image size - half &= (pt or jit or onnx or engine) and device.type != 'cpu' # FP16 supported on limited backends with CUDA - if pt or jit: - model.model.half() if half else model.model.float() - elif engine: + half = model.fp16 # FP16 supported on limited backends with CUDA + if engine: batch_size = model.batch_size - if model.trt_fp16_input != half: - LOGGER.info('model ' + ( - 'requires' if model.trt_fp16_input else 'incompatible with') + ' --half. Adjusting automatically.') - half = model.trt_fp16_input else: - half = False - batch_size = 1 # export.py models default to batch-size 1 - device = torch.device('cpu') - LOGGER.info(f'Forcing --batch-size 1 square inference shape(1,3,{imgsz},{imgsz}) for non-PyTorch backends') + device = model.device + if not pt or jit: + batch_size = 1 # export.py models default to batch-size 1 + LOGGER.info(f'Forcing --batch-size 1 square inference (1,3,{imgsz},{imgsz}) for non-PyTorch models') # Data data = check_dataset(data) # check @@ -166,7 +159,7 @@ def run(data, # Dataloader if not training: - model.warmup(imgsz=(1 if pt else batch_size, 3, imgsz, imgsz), half=half) # warmup + model.warmup(imgsz=(1 if pt else batch_size, 3, imgsz, imgsz)) # warmup pad = 0.0 if task in ('speed', 'benchmark') else 0.5 rect = False if task == 'benchmark' else pt # square inference for benchmarks task = task if task in ('train', 'val', 'test') else 'val' # path to train/val/test images From c6b4f84fd1ce03496d64db4d4b1e5895ca5c879b Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 12 Mar 2022 00:45:07 +0100 Subject: [PATCH 0936/1976] Update Dockerfile `torch==1.11.0+cu113` (#6954) --- Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Dockerfile b/Dockerfile index 489dd04ce5c9..896751d50d2d 100644 --- a/Dockerfile +++ b/Dockerfile @@ -11,7 +11,7 @@ COPY requirements.txt . RUN python -m pip install --upgrade pip RUN pip uninstall -y torch torchvision torchtext RUN pip install --no-cache -r requirements.txt albumentations wandb gsutil notebook \ - torch==1.10.2+cu113 torchvision==0.11.3+cu113 -f https://download.pytorch.org/whl/cu113/torch_stable.html + torch==1.11.0+cu113 torchvision==0.12.0+cu113 -f https://download.pytorch.org/whl/cu113/torch_stable.html # RUN pip install --no-cache -U torch torchvision # Create working directory From c84dd27d62d979bf4a97472808a7ef8747d64491 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 12 Mar 2022 12:57:08 +0100 Subject: [PATCH 0937/1976] New val.py `cuda` variable (#6957) * New val.py `cuda` variable Fix for ONNX GPU val. * Update val.py --- val.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/val.py b/val.py index 64c4d4ff9dae..8f2119531949 100644 --- a/val.py +++ b/val.py @@ -143,7 +143,7 @@ def run(data, batch_size = model.batch_size else: device = model.device - if not pt or jit: + if not (pt or jit): batch_size = 1 # export.py models default to batch-size 1 LOGGER.info(f'Forcing --batch-size 1 square inference (1,3,{imgsz},{imgsz}) for non-PyTorch models') @@ -152,6 +152,7 @@ def run(data, # Configure model.eval() + cuda = device.type != 'cpu' is_coco = isinstance(data.get('val'), str) and data['val'].endswith('coco/val2017.txt') # COCO dataset nc = 1 if single_cls else int(data['nc']) # number of classes iouv = torch.linspace(0.5, 0.95, 10).to(device) # iou vector for mAP@0.5:0.95 @@ -177,7 +178,7 @@ def run(data, pbar = tqdm(dataloader, desc=s, bar_format='{l_bar}{bar:10}{r_bar}{bar:-10b}') # progress bar for batch_i, (im, targets, paths, shapes) in enumerate(pbar): t1 = time_sync() - if pt or jit or engine: + if cuda: im = im.to(device, non_blocking=True) targets = targets.to(device) im = im.half() if half else im.float() # uint8 to fp16/32 From 52c1399fdc6c3db550123e47a2cdcb6dc951e211 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 12 Mar 2022 13:16:29 +0100 Subject: [PATCH 0938/1976] DetectMultiBackend() return `device` update (#6958) Fixes ONNX validation that returns outputs on CPU. --- models/common.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/models/common.py b/models/common.py index 251463525392..48cf55795dd4 100644 --- a/models/common.py +++ b/models/common.py @@ -458,7 +458,8 @@ def forward(self, im, augment=False, visualize=False, val=False): y = (y.astype(np.float32) - zero_point) * scale # re-scale y[..., :4] *= [w, h, w, h] # xywh normalized to pixels - y = torch.tensor(y) if isinstance(y, np.ndarray) else y + if isinstance(y, np.ndarray): + y = torch.tensor(y, device=self.device) return (y, []) if val else y def warmup(self, imgsz=(1, 3, 640, 640)): From 701e1177ac5cfec2f10552e55766d184ca760e12 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 12 Mar 2022 14:00:48 +0100 Subject: [PATCH 0939/1976] Tensor initialization on device improvements (#6959) * Update common.py speed improvements Eliminate .to() ops where possible for reduced data transfer overhead. Primarily affects warmup and PyTorch Hub inference. * Updates * Updates * Update detect.py * Update val.py --- models/common.py | 2 +- val.py | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/models/common.py b/models/common.py index 48cf55795dd4..83aecb7569d6 100644 --- a/models/common.py +++ b/models/common.py @@ -466,7 +466,7 @@ def warmup(self, imgsz=(1, 3, 640, 640)): # Warmup model by running inference once if self.pt or self.jit or self.onnx or self.engine: # warmup types if isinstance(self.device, torch.device) and self.device.type != 'cpu': # only warmup GPU models - im = torch.zeros(*imgsz).to(self.device).type(torch.half if self.fp16 else torch.float) # input image + im = torch.zeros(*imgsz, dtype=torch.half if self.fp16 else torch.float, device=self.device) # input self.forward(im) # warmup @staticmethod diff --git a/val.py b/val.py index 8f2119531949..2dd2aec679f9 100644 --- a/val.py +++ b/val.py @@ -87,7 +87,7 @@ def process_batch(detections, labels, iouv): matches = matches[np.unique(matches[:, 1], return_index=True)[1]] # matches = matches[matches[:, 2].argsort()[::-1]] matches = matches[np.unique(matches[:, 0], return_index=True)[1]] - matches = torch.Tensor(matches).to(iouv.device) + matches = torch.from_numpy(matches).to(iouv.device) correct[matches[:, 1].long()] = matches[:, 2:3] >= iouv return correct @@ -155,7 +155,7 @@ def run(data, cuda = device.type != 'cpu' is_coco = isinstance(data.get('val'), str) and data['val'].endswith('coco/val2017.txt') # COCO dataset nc = 1 if single_cls else int(data['nc']) # number of classes - iouv = torch.linspace(0.5, 0.95, 10).to(device) # iou vector for mAP@0.5:0.95 + iouv = torch.linspace(0.5, 0.95, 10, device=device) # iou vector for mAP@0.5:0.95 niou = iouv.numel() # Dataloader @@ -196,7 +196,7 @@ def run(data, loss += compute_loss([x.float() for x in train_out], targets)[1] # box, obj, cls # NMS - targets[:, 2:] *= torch.Tensor([width, height, width, height]).to(device) # to pixels + targets[:, 2:] *= torch.tensor((width, height, width, height), device=device) # to pixels lb = [targets[targets[:, 0] == i, 1:] for i in range(nb)] if save_hybrid else [] # for autolabelling t3 = time_sync() out = non_max_suppression(out, conf_thres, iou_thres, labels=lb, multi_label=True, agnostic=single_cls) From c13d4ce7ef30acc78e3dbdd9aa4f17e01ed34521 Mon Sep 17 00:00:00 2001 From: paradigm Date: Sat, 12 Mar 2022 16:15:09 +0100 Subject: [PATCH 0940/1976] EdgeTPU optimizations (#6808) * removed transpose op for better edgetpu support * fix for training case * enabled experimental new quantizer flag * precalculate add and mul ops at compile time Co-authored-by: Glenn Jocher --- export.py | 2 +- models/tf.py | 10 ++++++---- 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/export.py b/export.py index 1e3d3e2f2e71..7dd06433fe36 100644 --- a/export.py +++ b/export.py @@ -331,7 +331,7 @@ def export_tflite(keras_model, im, file, int8, data, ncalib, prefix=colorstr('Te converter.target_spec.supported_types = [] converter.inference_input_type = tf.uint8 # or tf.int8 converter.inference_output_type = tf.uint8 # or tf.int8 - converter.experimental_new_quantizer = False + converter.experimental_new_quantizer = True f = str(file).replace('.pt', '-int8.tflite') tflite_model = converter.convert() diff --git a/models/tf.py b/models/tf.py index 74681e403afd..728907f8fb47 100644 --- a/models/tf.py +++ b/models/tf.py @@ -222,19 +222,21 @@ def call(self, inputs): x.append(self.m[i](inputs[i])) # x(bs,20,20,255) to x(bs,3,20,20,85) ny, nx = self.imgsz[0] // self.stride[i], self.imgsz[1] // self.stride[i] - x[i] = tf.transpose(tf.reshape(x[i], [-1, ny * nx, self.na, self.no]), [0, 2, 1, 3]) + x[i] = tf.reshape(x[i], [-1, ny * nx, self.na, self.no]) if not self.training: # inference y = tf.sigmoid(x[i]) - xy = (y[..., 0:2] * 2 - 0.5 + self.grid[i]) * self.stride[i] # xy - wh = (y[..., 2:4] * 2) ** 2 * self.anchor_grid[i] + grid = tf.transpose(self.grid[i], [0, 2, 1, 3]) - 0.5 + anchor_grid = tf.transpose(self.anchor_grid[i], [0, 2, 1, 3]) * 4 + xy = (y[..., 0:2] * 2 + grid) * self.stride[i] # xy + wh = y[..., 2:4] ** 2 * anchor_grid # Normalize xywh to 0-1 to reduce calibration error xy /= tf.constant([[self.imgsz[1], self.imgsz[0]]], dtype=tf.float32) wh /= tf.constant([[self.imgsz[1], self.imgsz[0]]], dtype=tf.float32) y = tf.concat([xy, wh, y[..., 4:]], -1) z.append(tf.reshape(y, [-1, self.na * ny * nx, self.no])) - return x if self.training else (tf.concat(z, 1), x) + return tf.transpose(x, [0, 2, 1, 3]) if self.training else (tf.concat(z, 1), x) @staticmethod def _make_grid(nx=20, ny=20): From 2d45de617e0e80fb96424425587b6ce123aa0012 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 14 Mar 2022 10:54:51 +0100 Subject: [PATCH 0941/1976] Model `ema` key backward compatibility fix (#6972) Fix for older model loading issue in https://github.com/ultralytics/yolov5/commit/d3d9cbce221b2ced46dde374f24fde72c8e71c37#commitcomment-68622388 --- models/experimental.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/models/experimental.py b/models/experimental.py index 782ecbeface9..1230f4656c8f 100644 --- a/models/experimental.py +++ b/models/experimental.py @@ -94,7 +94,7 @@ def attempt_load(weights, map_location=None, inplace=True, fuse=True): model = Ensemble() for w in weights if isinstance(weights, list) else [weights]: ckpt = torch.load(attempt_download(w), map_location=map_location) # load - ckpt = (ckpt['ema'] or ckpt['model']).float() # FP32 model + ckpt = (ckpt.get('ema') or ckpt['model']).float() # FP32 model model.append(ckpt.fuse().eval() if fuse else ckpt.eval()) # fused or un-fused model in eval mode # Compatibility updates From 99de551f979f6aca1f817504831c821cff64b5fd Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 14 Mar 2022 12:41:06 +0100 Subject: [PATCH 0942/1976] pt model to cpu on TF export --- export.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/export.py b/export.py index 7dd06433fe36..c50de15cf0b8 100644 --- a/export.py +++ b/export.py @@ -494,7 +494,7 @@ def run(data=ROOT / 'data/coco128.yaml', # 'dataset.yaml path' if int8 or edgetpu: # TFLite --int8 bug https://github.com/ultralytics/yolov5/issues/5707 check_requirements(('flatbuffers==1.12',)) # required before `import tensorflow` assert not (tflite and tfjs), 'TFLite and TF.js models must be exported separately, please pass only one type.' - model, f[5] = export_saved_model(model, im, file, dynamic, tf_nms=nms or agnostic_nms or tfjs, + model, f[5] = export_saved_model(model.cpu(), im, file, dynamic, tf_nms=nms or agnostic_nms or tfjs, agnostic_nms=agnostic_nms or tfjs, topk_per_class=topk_per_class, topk_all=topk_all, conf_thres=conf_thres, iou_thres=iou_thres) # keras model if pb or tfjs: # pb prerequisite to tfjs From 932dc78496ca532a41780335468589ad7f0147f7 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 14 Mar 2022 15:07:13 +0100 Subject: [PATCH 0943/1976] YOLOv5 Export Benchmarks for GPU (#6963) * Add benchmarks.py GPU support * Updates * Updates * Updates * Updates * Add --half * Add TRT requirements * Cleanup * Add TF to warmup types * Update export.py * Update export.py * Update benchmarks.py --- export.py | 24 ++++++++++++------------ models/common.py | 7 ++++--- utils/benchmarks.py | 18 +++++++++++++++--- 3 files changed, 31 insertions(+), 18 deletions(-) diff --git a/export.py b/export.py index c50de15cf0b8..d4f980fdb993 100644 --- a/export.py +++ b/export.py @@ -75,18 +75,18 @@ def export_formats(): # YOLOv5 export formats - x = [['PyTorch', '-', '.pt'], - ['TorchScript', 'torchscript', '.torchscript'], - ['ONNX', 'onnx', '.onnx'], - ['OpenVINO', 'openvino', '_openvino_model'], - ['TensorRT', 'engine', '.engine'], - ['CoreML', 'coreml', '.mlmodel'], - ['TensorFlow SavedModel', 'saved_model', '_saved_model'], - ['TensorFlow GraphDef', 'pb', '.pb'], - ['TensorFlow Lite', 'tflite', '.tflite'], - ['TensorFlow Edge TPU', 'edgetpu', '_edgetpu.tflite'], - ['TensorFlow.js', 'tfjs', '_web_model']] - return pd.DataFrame(x, columns=['Format', 'Argument', 'Suffix']) + x = [['PyTorch', '-', '.pt', True], + ['TorchScript', 'torchscript', '.torchscript', True], + ['ONNX', 'onnx', '.onnx', True], + ['OpenVINO', 'openvino', '_openvino_model', False], + ['TensorRT', 'engine', '.engine', True], + ['CoreML', 'coreml', '.mlmodel', False], + ['TensorFlow SavedModel', 'saved_model', '_saved_model', True], + ['TensorFlow GraphDef', 'pb', '.pb', True], + ['TensorFlow Lite', 'tflite', '.tflite', False], + ['TensorFlow Edge TPU', 'edgetpu', '_edgetpu.tflite', False], + ['TensorFlow.js', 'tfjs', '_web_model', False]] + return pd.DataFrame(x, columns=['Format', 'Argument', 'Suffix', 'GPU']) def export_torchscript(model, im, file, optimize, prefix=colorstr('TorchScript:')): diff --git a/models/common.py b/models/common.py index 83aecb7569d6..4ad040fcd7f1 100644 --- a/models/common.py +++ b/models/common.py @@ -464,10 +464,11 @@ def forward(self, im, augment=False, visualize=False, val=False): def warmup(self, imgsz=(1, 3, 640, 640)): # Warmup model by running inference once - if self.pt or self.jit or self.onnx or self.engine: # warmup types - if isinstance(self.device, torch.device) and self.device.type != 'cpu': # only warmup GPU models + if any((self.pt, self.jit, self.onnx, self.engine, self.saved_model, self.pb)): # warmup types + if self.device.type != 'cpu': # only warmup GPU models im = torch.zeros(*imgsz, dtype=torch.half if self.fp16 else torch.float, device=self.device) # input - self.forward(im) # warmup + for _ in range(2 if self.jit else 1): # + self.forward(im) # warmup @staticmethod def model_type(p='path/to/model.pt'): diff --git a/utils/benchmarks.py b/utils/benchmarks.py index 962df812a9d3..bdbbdc43b639 100644 --- a/utils/benchmarks.py +++ b/utils/benchmarks.py @@ -19,6 +19,7 @@ Requirements: $ pip install -r requirements.txt coremltools onnx onnx-simplifier onnxruntime openvino-dev tensorflow-cpu # CPU $ pip install -r requirements.txt coremltools onnx onnx-simplifier onnxruntime-gpu openvino-dev tensorflow # GPU + $ pip install -U nvidia-tensorrt --index-url https://pypi.ngc.nvidia.com # TensorRT Usage: $ python utils/benchmarks.py --weights yolov5s.pt --img 640 @@ -41,20 +42,29 @@ import val from utils import notebook_init from utils.general import LOGGER, print_args +from utils.torch_utils import select_device def run(weights=ROOT / 'yolov5s.pt', # weights path imgsz=640, # inference size (pixels) batch_size=1, # batch size data=ROOT / 'data/coco128.yaml', # dataset.yaml path + device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu + half=False, # use FP16 half-precision inference ): y, t = [], time.time() formats = export.export_formats() - for i, (name, f, suffix) in formats.iterrows(): # index, (name, file, suffix) + device = select_device(device) + for i, (name, f, suffix, gpu) in formats.iterrows(): # index, (name, file, suffix, gpu-capable) try: - w = weights if f == '-' else export.run(weights=weights, imgsz=[imgsz], include=[f], device='cpu')[-1] + if device.type != 'cpu': + assert gpu, f'{name} inference not supported on GPU' + if f == '-': + w = weights # PyTorch format + else: + w = export.run(weights=weights, imgsz=[imgsz], include=[f], device=device, half=half)[-1] # all others assert suffix in str(w), 'export failed' - result = val.run(data, w, batch_size, imgsz=imgsz, plots=False, device='cpu', task='benchmark') + result = val.run(data, w, batch_size, imgsz, plots=False, device=device, task='benchmark', half=half) metrics = result[0] # metrics (mp, mr, map50, map, *losses(box, obj, cls)) speeds = result[2] # times (preprocess, inference, postprocess) y.append([name, metrics[3], speeds[1]]) # mAP, t_inference @@ -78,6 +88,8 @@ def parse_opt(): parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=640, help='inference size (pixels)') parser.add_argument('--batch-size', type=int, default=1, help='batch size') parser.add_argument('--data', type=str, default=ROOT / 'data/coco128.yaml', help='dataset.yaml path') + parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') + parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference') opt = parser.parse_args() print_args(FILE.stem, opt) return opt From c09fb2aa95b6ca86c460aa106e2308805649feb9 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 15 Mar 2022 16:32:56 +0100 Subject: [PATCH 0944/1976] Update TQDM bar format (#6988) --- utils/autoanchor.py | 2 +- utils/datasets.py | 7 ++++--- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/utils/autoanchor.py b/utils/autoanchor.py index a631c21a3b26..6cd2267a375a 100644 --- a/utils/autoanchor.py +++ b/utils/autoanchor.py @@ -152,7 +152,7 @@ def print_results(k, verbose=True): # Evolve f, sh, mp, s = anchor_fitness(k), k.shape, 0.9, 0.1 # fitness, generations, mutation prob, sigma - pbar = tqdm(range(gen), desc=f'{PREFIX}Evolving anchors with Genetic Algorithm:') # progress bar + pbar = tqdm(range(gen), bar_format='{l_bar}{bar:10}{r_bar}{bar:-10b}') # progress bar for _ in pbar: v = np.ones(sh) while (v == 1).all(): # mutate until a change occurs (prevent duplicates) diff --git a/utils/datasets.py b/utils/datasets.py index 00d0d94e0847..5ce6d607fb7a 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -35,6 +35,7 @@ HELP_URL = 'https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data' IMG_FORMATS = 'bmp', 'dng', 'jpeg', 'jpg', 'mpo', 'png', 'tif', 'tiff', 'webp' # include image suffixes VID_FORMATS = 'asf', 'avi', 'gif', 'm4v', 'mkv', 'mov', 'mp4', 'mpeg', 'mpg', 'ts', 'wmv' # include video suffixes +BAR_FORMAT = '{l_bar}{bar:10}{r_bar}{bar:-10b}' # tqdm bar format # Get orientation exif tag for orientation in ExifTags.TAGS.keys(): @@ -427,7 +428,7 @@ def __init__(self, path, img_size=640, batch_size=16, augment=False, hyp=None, r nf, nm, ne, nc, n = cache.pop('results') # found, missing, empty, corrupt, total if exists: d = f"Scanning '{cache_path}' images and labels... {nf} found, {nm} missing, {ne} empty, {nc} corrupt" - tqdm(None, desc=prefix + d, total=n, initial=n) # display cache results + tqdm(None, desc=prefix + d, total=n, initial=n, bar_format=BAR_FORMAT) # display cache results if cache['msgs']: LOGGER.info('\n'.join(cache['msgs'])) # display warnings assert nf > 0 or not augment, f'{prefix}No labels in {cache_path}. Can not train without labels. See {HELP_URL}' @@ -492,7 +493,7 @@ def __init__(self, path, img_size=640, batch_size=16, augment=False, hyp=None, r self.im_hw0, self.im_hw = [None] * n, [None] * n fcn = self.cache_images_to_disk if cache_images == 'disk' else self.load_image results = ThreadPool(NUM_THREADS).imap(fcn, range(n)) - pbar = tqdm(enumerate(results), total=n) + pbar = tqdm(enumerate(results), total=n, bar_format=BAR_FORMAT) for i, x in pbar: if cache_images == 'disk': gb += self.npy_files[i].stat().st_size @@ -509,7 +510,7 @@ def cache_labels(self, path=Path('./labels.cache'), prefix=''): desc = f"{prefix}Scanning '{path.parent / path.stem}' images and labels..." with Pool(NUM_THREADS) as pool: pbar = tqdm(pool.imap(verify_image_label, zip(self.im_files, self.label_files, repeat(prefix))), - desc=desc, total=len(self.im_files)) + desc=desc, total=len(self.im_files), bar_format=BAR_FORMAT) for im_file, lb, shape, segments, nm_f, nf_f, ne_f, nc_f, msg in pbar: nm += nm_f nf += nf_f From 3f634d43c8ecea14aa9037e2fd28ded0433d491d Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 16 Mar 2022 15:33:54 +0100 Subject: [PATCH 0945/1976] Conditional `Timeout()` by OS (disable on Windows) (#7013) * Conditional `Timeout()` by OS (disable on Windows) * Update general.py --- utils/general.py | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/utils/general.py b/utils/general.py index a7891cbccbab..e8b3b05c5fe1 100755 --- a/utils/general.py +++ b/utils/general.py @@ -123,13 +123,15 @@ def _timeout_handler(self, signum, frame): raise TimeoutError(self.timeout_message) def __enter__(self): - signal.signal(signal.SIGALRM, self._timeout_handler) # Set handler for SIGALRM - signal.alarm(self.seconds) # start countdown for SIGALRM to be raised + if platform.system() != 'Windows': # not supported on Windows + signal.signal(signal.SIGALRM, self._timeout_handler) # Set handler for SIGALRM + signal.alarm(self.seconds) # start countdown for SIGALRM to be raised def __exit__(self, exc_type, exc_val, exc_tb): - signal.alarm(0) # Cancel SIGALRM if it's scheduled - if self.suppress and exc_type is TimeoutError: # Suppress TimeoutError - return True + if platform.system() != 'Windows': + signal.alarm(0) # Cancel SIGALRM if it's scheduled + if self.suppress and exc_type is TimeoutError: # Suppress TimeoutError + return True class WorkingDirectory(contextlib.ContextDecorator): From 7c6a33564a84a0e78ec19da66ea6016d51c32e0a Mon Sep 17 00:00:00 2001 From: Max Strobel Date: Thu, 17 Mar 2022 16:37:09 +0100 Subject: [PATCH 0946/1976] fix: add default PIL font as fallback (#7010) * fix: add default font as fallback Add default font as fallback if the downloading of the Arial.ttf font fails for some reason, e.g. no access to public internet. * Update plots.py Co-authored-by: Maximilian Strobel Co-authored-by: Glenn Jocher --- utils/plots.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/utils/plots.py b/utils/plots.py index 6c3f5bcaef37..90f3f241cc5a 100644 --- a/utils/plots.py +++ b/utils/plots.py @@ -7,6 +7,7 @@ import os from copy import copy from pathlib import Path +from urllib.error import URLError import cv2 import matplotlib @@ -55,11 +56,13 @@ def check_pil_font(font=FONT, size=10): try: return ImageFont.truetype(str(font) if font.exists() else font.name, size) except Exception: # download if missing - check_font(font) try: + check_font(font) return ImageFont.truetype(str(font), size) except TypeError: check_requirements('Pillow>=8.4.0') # known issue https://github.com/ultralytics/yolov5/issues/5374 + except URLError: # not online + return ImageFont.load_default() class Annotator: From 4effd064b169fc049b4a4bca401b120bf2e93c14 Mon Sep 17 00:00:00 2001 From: Mrinal Jain Date: Fri, 18 Mar 2022 07:29:24 -0400 Subject: [PATCH 0947/1976] Consistent saved_model output format (#7032) --- export.py | 2 +- models/common.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/export.py b/export.py index d4f980fdb993..2d4a68e62f89 100644 --- a/export.py +++ b/export.py @@ -275,7 +275,7 @@ def export_saved_model(model, im, file, dynamic, m = m.get_concrete_function(spec) frozen_func = convert_variables_to_constants_v2(m) tfm = tf.Module() - tfm.__call__ = tf.function(lambda x: frozen_func(x), [spec]) + tfm.__call__ = tf.function(lambda x: frozen_func(x)[0], [spec]) tfm.__call__(im) tf.saved_model.save( tfm, diff --git a/models/common.py b/models/common.py index 4ad040fcd7f1..5561d92ecb73 100644 --- a/models/common.py +++ b/models/common.py @@ -441,7 +441,7 @@ def forward(self, im, augment=False, visualize=False, val=False): else: # TensorFlow (SavedModel, GraphDef, Lite, Edge TPU) im = im.permute(0, 2, 3, 1).cpu().numpy() # torch BCHW to numpy BHWC shape(1,320,192,3) if self.saved_model: # SavedModel - y = (self.model(im, training=False) if self.keras else self.model(im)[0]).numpy() + y = (self.model(im, training=False) if self.keras else self.model(im)).numpy() elif self.pb: # GraphDef y = self.frozen_func(x=self.tf.constant(im)).numpy() else: # Lite or Edge TPU From b0ba101ac0aa898a4e4b867d377e140af8d4258a Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 20 Mar 2022 01:04:48 +0100 Subject: [PATCH 0948/1976] `ComputeLoss()` indexing/speed improvements (#7048) * device as class attribute * Update loss.py * Update loss.py * improve zeros * tensor split --- utils/loss.py | 37 +++++++++++++++++++------------------ 1 file changed, 19 insertions(+), 18 deletions(-) diff --git a/utils/loss.py b/utils/loss.py index 5aa9f017d2af..0f0137817955 100644 --- a/utils/loss.py +++ b/utils/loss.py @@ -89,9 +89,10 @@ def forward(self, pred, true): class ComputeLoss: + sort_obj_iou = False + # Compute losses def __init__(self, model, autobalance=False): - self.sort_obj_iou = False device = next(model.parameters()).device # get model device h = model.hyp # hyperparameters @@ -111,26 +112,28 @@ def __init__(self, model, autobalance=False): self.balance = {3: [4.0, 1.0, 0.4]}.get(det.nl, [4.0, 1.0, 0.25, 0.06, 0.02]) # P3-P7 self.ssi = list(det.stride).index(16) if autobalance else 0 # stride 16 index self.BCEcls, self.BCEobj, self.gr, self.hyp, self.autobalance = BCEcls, BCEobj, 1.0, h, autobalance + self.device = device for k in 'na', 'nc', 'nl', 'anchors': setattr(self, k, getattr(det, k)) - def __call__(self, p, targets): # predictions, targets, model - device = targets.device - lcls, lbox, lobj = torch.zeros(1, device=device), torch.zeros(1, device=device), torch.zeros(1, device=device) + def __call__(self, p, targets): # predictions, targets + lcls = torch.zeros(1, device=self.device) # class loss + lbox = torch.zeros(1, device=self.device) # box loss + lobj = torch.zeros(1, device=self.device) # object loss tcls, tbox, indices, anchors = self.build_targets(p, targets) # targets # Losses for i, pi in enumerate(p): # layer index, layer predictions b, a, gj, gi = indices[i] # image, anchor, gridy, gridx - tobj = torch.zeros_like(pi[..., 0], device=device) # target obj + tobj = torch.zeros(pi.shape[:4], device=self.device) # target obj n = b.shape[0] # number of targets if n: - ps = pi[b, a, gj, gi] # prediction subset corresponding to targets + pxy, pwh, _, pcls = pi[b, a, gj, gi].tensor_split((2, 4, 5), dim=1) # target-subset of predictions # Regression - pxy = ps[:, :2].sigmoid() * 2 - 0.5 - pwh = (ps[:, 2:4].sigmoid() * 2) ** 2 * anchors[i] + pxy = pxy.sigmoid() * 2 - 0.5 + pwh = (pwh.sigmoid() * 2) ** 2 * anchors[i] pbox = torch.cat((pxy, pwh), 1) # predicted box iou = bbox_iou(pbox.T, tbox[i], x1y1x2y2=False, CIoU=True) # iou(prediction, target) lbox += (1.0 - iou).mean() # iou loss @@ -144,9 +147,9 @@ def __call__(self, p, targets): # predictions, targets, model # Classification if self.nc > 1: # cls loss (only if multiple classes) - t = torch.full_like(ps[:, 5:], self.cn, device=device) # targets + t = torch.full_like(pcls, self.cn, device=self.device) # targets t[range(n), tcls[i]] = self.cp - lcls += self.BCEcls(ps[:, 5:], t) # BCE + lcls += self.BCEcls(pcls, t) # BCE # Append targets to text file # with open('targets.txt', 'a') as file: @@ -170,15 +173,15 @@ def build_targets(self, p, targets): # Build targets for compute_loss(), input targets(image,class,x,y,w,h) na, nt = self.na, targets.shape[0] # number of anchors, targets tcls, tbox, indices, anch = [], [], [], [] - gain = torch.ones(7, device=targets.device) # normalized to gridspace gain - ai = torch.arange(na, device=targets.device).float().view(na, 1).repeat(1, nt) # same as .repeat_interleave(nt) + gain = torch.ones(7, device=self.device) # normalized to gridspace gain + ai = torch.arange(na, device=self.device).float().view(na, 1).repeat(1, nt) # same as .repeat_interleave(nt) targets = torch.cat((targets.repeat(na, 1, 1), ai[:, :, None]), 2) # append anchor indices g = 0.5 # bias off = torch.tensor([[0, 0], [1, 0], [0, 1], [-1, 0], [0, -1], # j,k,l,m # [1, 1], [1, -1], [-1, 1], [-1, -1], # jk,jm,lk,lm - ], device=targets.device).float() * g # offsets + ], device=self.device).float() * g # offsets for i in range(self.nl): anchors = self.anchors[i] @@ -206,14 +209,12 @@ def build_targets(self, p, targets): offsets = 0 # Define - b, c = t[:, :2].long().T # image, class - gxy = t[:, 2:4] # grid xy - gwh = t[:, 4:6] # grid wh + bc, gxy, gwh, a = t.unsafe_chunk(4, dim=1) # (image, class), grid xy, grid wh, anchors + a, (b, c) = a.long().view(-1), bc.long().T # anchors, image, class gij = (gxy - offsets).long() - gi, gj = gij.T # grid xy indices + gi, gj = gij.T # grid indices # Append - a = t[:, 6].long() # anchor indices indices.append((b, a, gj.clamp_(0, gain[3] - 1), gi.clamp_(0, gain[2] - 1))) # image, anchor, grid indices tbox.append(torch.cat((gxy - gij, gwh), 1)) # box anch.append(anchors[a]) # anchors From 9ebec7885fb461993cf7123b36abf61ffd5dfd95 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 20 Mar 2022 13:51:22 +0100 Subject: [PATCH 0949/1976] Update Dockerfile to `git clone` instead of `COPY` (#7053) Resolves git command errors that currently happen in image, i.e.: ```bash root@382ae64aeca2:/usr/src/app# git pull Warning: Permanently added the ECDSA host key for IP address '140.82.113.3' to the list of known hosts. git@github.com: Permission denied (publickey). fatal: Could not read from remote repository. Please make sure you have the correct access rights and the repository exists. ``` --- Dockerfile | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/Dockerfile b/Dockerfile index 896751d50d2d..304e8b2801a9 100644 --- a/Dockerfile +++ b/Dockerfile @@ -19,7 +19,8 @@ RUN mkdir -p /usr/src/app WORKDIR /usr/src/app # Copy contents -COPY . /usr/src/app +RUN git clone https://github.com/ultralytics/yolov5 /usr/src/app +# COPY . /usr/src/app # Downloads to user config dir ADD https://ultralytics.com/assets/Arial.ttf /root/.config/Ultralytics/ From 6843ea5d7f9c5d4b8132d00ba17fb296dc81d867 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 20 Mar 2022 13:55:32 +0100 Subject: [PATCH 0950/1976] Create SECURITY.md (#7054) * Create SECURITY.md Resolves https://github.com/ultralytics/yolov5/issues/7052 * Move into ./github * Update SECURITY.md --- .github/SECURITY.md | 7 +++++++ 1 file changed, 7 insertions(+) create mode 100644 .github/SECURITY.md diff --git a/.github/SECURITY.md b/.github/SECURITY.md new file mode 100644 index 000000000000..aa3e8409da6b --- /dev/null +++ b/.github/SECURITY.md @@ -0,0 +1,7 @@ +# Security Policy + +We aim to make YOLOv5 🚀 as secure as possible! If you find potential vulnerabilities or have any concerns please let us know so we can investigate and take corrective action if needed. + +### Reporting a Vulnerability + +To report vulnerabilities please email us at hello@ultralytics.com or visit https://ultralytics.com/contact. Thank you! From 9f4d11379bb931586c1f51c1d85c6fac9fc37eda Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 20 Mar 2022 14:18:05 +0100 Subject: [PATCH 0951/1976] Fix incomplete URL substring sanitation (#7056) Resolves code scanning alert in https://github.com/ultralytics/yolov5/issues/7055 --- utils/datasets.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/utils/datasets.py b/utils/datasets.py index 5ce6d607fb7a..8627344af7b4 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -15,6 +15,7 @@ from multiprocessing.pool import Pool, ThreadPool from pathlib import Path from threading import Thread +from urllib.parse import urlparse from zipfile import ZipFile import cv2 @@ -301,7 +302,7 @@ def __init__(self, sources='streams.txt', img_size=640, stride=32, auto=True): for i, s in enumerate(sources): # index, source # Start thread to read frames from video stream st = f'{i + 1}/{n}: {s}... ' - if 'youtube.com/' in s or 'youtu.be/' in s: # if source is YouTube video + if urlparse(s).hostname in ('youtube.com', 'youtu.be'): # if source is YouTube video check_requirements(('pafy', 'youtube_dl==2020.12.2')) import pafy s = pafy.new(s).getbest(preftype="mp4").url # YouTube URL From 529fbc1814f899eab2df8146944c23d0e168610e Mon Sep 17 00:00:00 2001 From: Philip Gutjahr Date: Sun, 20 Mar 2022 15:46:29 +0100 Subject: [PATCH 0952/1976] Use PIL to eliminate chroma subsampling in crops (#7008) * use pillow to save higher-quality jpg (w/o color subsampling) * Cleanup and doc issue Co-authored-by: Glenn Jocher --- utils/plots.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/utils/plots.py b/utils/plots.py index 90f3f241cc5a..a30c0faf962a 100644 --- a/utils/plots.py +++ b/utils/plots.py @@ -458,7 +458,7 @@ def profile_idetection(start=0, stop=0, labels=(), save_dir=''): plt.savefig(Path(save_dir) / 'idetection_profile.png', dpi=200) -def save_one_box(xyxy, im, file='image.jpg', gain=1.02, pad=10, square=False, BGR=False, save=True): +def save_one_box(xyxy, im, file=Path('im.jpg'), gain=1.02, pad=10, square=False, BGR=False, save=True): # Save image crop as {file} with crop size multiple {gain} and {pad} pixels. Save and/or return crop xyxy = torch.tensor(xyxy).view(-1, 4) b = xyxy2xywh(xyxy) # boxes @@ -470,5 +470,7 @@ def save_one_box(xyxy, im, file='image.jpg', gain=1.02, pad=10, square=False, BG crop = im[int(xyxy[0, 1]):int(xyxy[0, 3]), int(xyxy[0, 0]):int(xyxy[0, 2]), ::(1 if BGR else -1)] if save: file.parent.mkdir(parents=True, exist_ok=True) # make directory - cv2.imwrite(str(increment_path(file).with_suffix('.jpg')), crop) + f = str(increment_path(file).with_suffix('.jpg')) + # cv2.imwrite(f, crop) # https://github.com/ultralytics/yolov5/issues/7007 chroma subsampling issue + Image.fromarray(cv2.cvtColor(crop, cv2.COLOR_BGR2RGB)).save(f, quality=95, subsampling=0) return crop From f327eee614384583a93e6f5374280e78b80a250d Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 20 Mar 2022 16:27:51 +0100 Subject: [PATCH 0953/1976] Fix `check_anchor_order()` in pixel-space not grid-space (#7060) * Update `check_anchor_order()` Use mean area per output layer for added stability. * Check in pixel-space not grid-space fix --- models/yolo.py | 2 +- utils/autoanchor.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/models/yolo.py b/models/yolo.py index f659a04545b9..2f4bbe0f71d1 100644 --- a/models/yolo.py +++ b/models/yolo.py @@ -110,8 +110,8 @@ def __init__(self, cfg='yolov5s.yaml', ch=3, nc=None, anchors=None): # model, i s = 256 # 2x min stride m.inplace = self.inplace m.stride = torch.tensor([s / x.shape[-2] for x in self.forward(torch.zeros(1, ch, s, s))]) # forward + check_anchor_order(m) # must be in pixel-space (not grid-space) m.anchors /= m.stride.view(-1, 1, 1) - check_anchor_order(m) self.stride = m.stride self._initialize_biases() # only run once diff --git a/utils/autoanchor.py b/utils/autoanchor.py index 6cd2267a375a..7eb46af91195 100644 --- a/utils/autoanchor.py +++ b/utils/autoanchor.py @@ -17,7 +17,7 @@ def check_anchor_order(m): # Check anchor order against stride order for YOLOv5 Detect() module m, and correct if necessary - a = m.anchors.prod(-1).view(-1) # anchor area + a = m.anchors.prod(-1).mean(-1).view(-1) # mean anchor area per output layer da = a[-1] - a[0] # delta a ds = m.stride[-1] - m.stride[0] # delta s if da.sign() != ds.sign(): # same order From d5e363f29d7619f2a186678eb6d61672f49b11f1 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 20 Mar 2022 18:02:05 +0100 Subject: [PATCH 0954/1976] Update detect.py non-inplace with `y.tensor_split()` (#7062) --- models/yolo.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/models/yolo.py b/models/yolo.py index 2f4bbe0f71d1..09215101e8a0 100644 --- a/models/yolo.py +++ b/models/yolo.py @@ -62,9 +62,10 @@ def forward(self, x): y[..., 0:2] = (y[..., 0:2] * 2 - 0.5 + self.grid[i]) * self.stride[i] # xy y[..., 2:4] = (y[..., 2:4] * 2) ** 2 * self.anchor_grid[i] # wh else: # for YOLOv5 on AWS Inferentia https://github.com/ultralytics/yolov5/pull/2953 - xy = (y[..., 0:2] * 2 - 0.5 + self.grid[i]) * self.stride[i] # xy - wh = (y[..., 2:4] * 2) ** 2 * self.anchor_grid[i] # wh - y = torch.cat((xy, wh, y[..., 4:]), -1) + xy, wh, conf = y.tensor_split((2, 4), 4) + xy = (xy * 2 - 0.5 + self.grid[i]) * self.stride[i] # xy + wh = (wh * 2) ** 2 * self.anchor_grid[i] # wh + y = torch.cat((xy, wh, conf), 4) z.append(y.view(bs, -1, self.no)) return x if self.training else (torch.cat(z, 1), x) From 0529b77232d72c41557fb03753caa356f583e5fc Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 20 Mar 2022 18:03:37 +0100 Subject: [PATCH 0955/1976] Update common.py lists for tuples (#7063) Improved profiling. --- models/common.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/models/common.py b/models/common.py index 5561d92ecb73..066f8774d3c3 100644 --- a/models/common.py +++ b/models/common.py @@ -31,7 +31,7 @@ def autopad(k, p=None): # kernel, padding # Pad to 'same' if p is None: - p = k // 2 if isinstance(k, int) else [x // 2 for x in k] # auto-pad + p = k // 2 if isinstance(k, int) else (x // 2 for x in k) # auto-pad return p @@ -133,7 +133,7 @@ def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, nu self.cv2 = Conv(c1, c_, 1, 1) self.cv3 = Conv(2 * c_, c2, 1) # act=FReLU(c2) self.m = nn.Sequential(*(Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n))) - # self.m = nn.Sequential(*[CrossConv(c_, c_, 3, 1, g, 1.0, shortcut) for _ in range(n)]) + # self.m = nn.Sequential(*(CrossConv(c_, c_, 3, 1, g, 1.0, shortcut) for _ in range(n))) def forward(self, x): return self.cv3(torch.cat((self.m(self.cv1(x)), self.cv2(x)), dim=1)) @@ -194,7 +194,7 @@ def forward(self, x): warnings.simplefilter('ignore') # suppress torch 1.9.0 max_pool2d() warning y1 = self.m(x) y2 = self.m(y1) - return self.cv2(torch.cat([x, y1, y2, self.m(y2)], 1)) + return self.cv2(torch.cat((x, y1, y2, self.m(y2)), 1)) class Focus(nn.Module): @@ -205,7 +205,7 @@ def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True): # ch_in, ch_out, k # self.contract = Contract(gain=2) def forward(self, x): # x(b,c,w,h) -> y(b,4c,w/2,h/2) - return self.conv(torch.cat([x[..., ::2, ::2], x[..., 1::2, ::2], x[..., ::2, 1::2], x[..., 1::2, 1::2]], 1)) + return self.conv(torch.cat((x[..., ::2, ::2], x[..., 1::2, ::2], x[..., ::2, 1::2], x[..., 1::2, 1::2]), 1)) # return self.conv(self.contract(x)) @@ -219,7 +219,7 @@ def __init__(self, c1, c2, k=1, s=1, g=1, act=True): # ch_in, ch_out, kernel, s def forward(self, x): y = self.cv1(x) - return torch.cat([y, self.cv2(y)], 1) + return torch.cat((y, self.cv2(y)), 1) class GhostBottleneck(nn.Module): From e278fd63ec6c09d264c2bc983ad91717c577e97c Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 20 Mar 2022 18:15:22 +0100 Subject: [PATCH 0956/1976] Update W&B message to `LOGGER.info()` (#7064) --- utils/loggers/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/loggers/__init__.py b/utils/loggers/__init__.py index 86ccf38443a9..ce0bea00e1af 100644 --- a/utils/loggers/__init__.py +++ b/utils/loggers/__init__.py @@ -56,7 +56,7 @@ def __init__(self, save_dir=None, weights=None, opt=None, hyp=None, logger=None, if not wandb: prefix = colorstr('Weights & Biases: ') s = f"{prefix}run 'pip install wandb' to automatically track and visualize YOLOv5 🚀 runs (RECOMMENDED)" - print(emojis(s)) + self.logger.info(emojis(s)) # TensorBoard s = self.save_dir From 9e75cbf4c18457297cd7b28653ebeb5b1262e8c9 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 20 Mar 2022 18:17:04 +0100 Subject: [PATCH 0957/1976] Update __init__.py (#7065) --- utils/loggers/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/loggers/__init__.py b/utils/loggers/__init__.py index ce0bea00e1af..866bdc4be2f5 100644 --- a/utils/loggers/__init__.py +++ b/utils/loggers/__init__.py @@ -47,7 +47,7 @@ def __init__(self, save_dir=None, weights=None, opt=None, hyp=None, logger=None, 'metrics/precision', 'metrics/recall', 'metrics/mAP_0.5', 'metrics/mAP_0.5:0.95', # metrics 'val/box_loss', 'val/obj_loss', 'val/cls_loss', # val loss 'x/lr0', 'x/lr1', 'x/lr2'] # params - self.best_keys = ['best/epoch', 'best/precision', 'best/recall', 'best/mAP_0.5', 'best/mAP_0.5:0.95',] + self.best_keys = ['best/epoch', 'best/precision', 'best/recall', 'best/mAP_0.5', 'best/mAP_0.5:0.95'] for k in LOGGERS: setattr(self, k, None) # init empty logger dictionary self.csv = True # always log to csv From 178c1095768a81edefc4c4ae87984ab1962e0906 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 20 Mar 2022 18:37:27 +0100 Subject: [PATCH 0958/1976] Add non-zero `da` `check_anchor_order()` condition (#7066) --- utils/autoanchor.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/autoanchor.py b/utils/autoanchor.py index 7eb46af91195..882712d45a38 100644 --- a/utils/autoanchor.py +++ b/utils/autoanchor.py @@ -20,7 +20,7 @@ def check_anchor_order(m): a = m.anchors.prod(-1).mean(-1).view(-1) # mean anchor area per output layer da = a[-1] - a[0] # delta a ds = m.stride[-1] - m.stride[0] # delta s - if da.sign() != ds.sign(): # same order + if da and (da.sign() != ds.sign()): # same order LOGGER.info(f'{PREFIX}Reversing anchor order') m.anchors[:] = m.anchors.flip(0) From 9cd89b75cca8bb165a3b19c9b8356f7b3bb22b31 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 20 Mar 2022 18:55:13 +0100 Subject: [PATCH 0959/1976] Fix2 `check_anchor_order()` in pixel-space not grid-space (#7067) Follows https://github.com/ultralytics/yolov5/pull/7060 which provided only a partial solution to this issue. #7060 resolved occurences in yolo.py, this applies the same fix in autoanchor.py. --- utils/autoanchor.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/utils/autoanchor.py b/utils/autoanchor.py index 882712d45a38..77518abe9889 100644 --- a/utils/autoanchor.py +++ b/utils/autoanchor.py @@ -40,7 +40,8 @@ def metric(k): # compute metric bpr = (best > 1 / thr).float().mean() # best possible recall return bpr, aat - anchors = m.anchors.clone() * m.stride.to(m.anchors.device).view(-1, 1, 1) # current anchors + stride = m.stride.to(m.anchors.device).view(-1, 1, 1) # model strides + anchors = m.anchors.clone() * stride # current anchors bpr, aat = metric(anchors.cpu().view(-1, 2)) s = f'\n{PREFIX}{aat:.2f} anchors/target, {bpr:.3f} Best Possible Recall (BPR). ' if bpr > 0.98: # threshold to recompute @@ -55,8 +56,9 @@ def metric(k): # compute metric new_bpr = metric(anchors)[0] if new_bpr > bpr: # replace anchors anchors = torch.tensor(anchors, device=m.anchors.device).type_as(m.anchors) - m.anchors[:] = anchors.clone().view_as(m.anchors) / m.stride.to(m.anchors.device).view(-1, 1, 1) # loss - check_anchor_order(m) + m.anchors[:] = anchors.clone().view_as(m.anchors) + check_anchor_order(m) # must be in pixel-space (not grid-space) + m.anchors /= stride s = f'{PREFIX}Done ✅ (optional: update model *.yaml to use these anchors in the future)' else: s = f'{PREFIX}Done ⚠️ (original anchors better than new anchors, proceeding with original anchors)' From 9b771a3e7112f864cb9c877733eca9240e8fb4a5 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 21 Mar 2022 09:33:39 +0100 Subject: [PATCH 0960/1976] Revert "Update detect.py non-inplace with `y.tensor_split()` (#7062)" (#7074) This reverts commit d5e363f29d7619f2a186678eb6d61672f49b11f1. --- models/yolo.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/models/yolo.py b/models/yolo.py index 09215101e8a0..2f4bbe0f71d1 100644 --- a/models/yolo.py +++ b/models/yolo.py @@ -62,10 +62,9 @@ def forward(self, x): y[..., 0:2] = (y[..., 0:2] * 2 - 0.5 + self.grid[i]) * self.stride[i] # xy y[..., 2:4] = (y[..., 2:4] * 2) ** 2 * self.anchor_grid[i] # wh else: # for YOLOv5 on AWS Inferentia https://github.com/ultralytics/yolov5/pull/2953 - xy, wh, conf = y.tensor_split((2, 4), 4) - xy = (xy * 2 - 0.5 + self.grid[i]) * self.stride[i] # xy - wh = (wh * 2) ** 2 * self.anchor_grid[i] # wh - y = torch.cat((xy, wh, conf), 4) + xy = (y[..., 0:2] * 2 - 0.5 + self.grid[i]) * self.stride[i] # xy + wh = (y[..., 2:4] * 2) ** 2 * self.anchor_grid[i] # wh + y = torch.cat((xy, wh, y[..., 4:]), -1) z.append(y.view(bs, -1, self.no)) return x if self.training else (torch.cat(z, 1), x) From 6f128031d073754ee8ed6b6a85ecb6c0619cd0a7 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 21 Mar 2022 18:35:36 +0100 Subject: [PATCH 0961/1976] Update loss.py with `if self.gr < 1:` (#7087) * Update loss.py with `if self.gr < 1:` * Update loss.py --- utils/loss.py | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/utils/loss.py b/utils/loss.py index 0f0137817955..b49cc7f66e66 100644 --- a/utils/loss.py +++ b/utils/loss.py @@ -139,11 +139,13 @@ def __call__(self, p, targets): # predictions, targets lbox += (1.0 - iou).mean() # iou loss # Objectness - score_iou = iou.detach().clamp(0).type(tobj.dtype) + iou = iou.detach().clamp(0).type(tobj.dtype) if self.sort_obj_iou: - sort_id = torch.argsort(score_iou) - b, a, gj, gi, score_iou = b[sort_id], a[sort_id], gj[sort_id], gi[sort_id], score_iou[sort_id] - tobj[b, a, gj, gi] = (1.0 - self.gr) + self.gr * score_iou # iou ratio + j = iou.argsort() + b, a, gj, gi, iou = b[j], a[j], gj[j], gi[j], iou[j] + if self.gr < 1: + iou = (1.0 - self.gr) + self.gr * iou + tobj[b, a, gj, gi] = iou # iou ratio # Classification if self.nc > 1: # cls loss (only if multiple classes) From a2d617ece94dcd8c9bc205ea70f1223c84fdbe3a Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 21 Mar 2022 19:18:34 +0100 Subject: [PATCH 0962/1976] Update loss for FP16 `tobj` (#7088) --- utils/loss.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/loss.py b/utils/loss.py index b49cc7f66e66..a06330e034bc 100644 --- a/utils/loss.py +++ b/utils/loss.py @@ -125,7 +125,7 @@ def __call__(self, p, targets): # predictions, targets # Losses for i, pi in enumerate(p): # layer index, layer predictions b, a, gj, gi = indices[i] # image, anchor, gridy, gridx - tobj = torch.zeros(pi.shape[:4], device=self.device) # target obj + tobj = torch.zeros(pi.shape[:4], dtype=pi.dtype, device=self.device) # target obj n = b.shape[0] # number of targets if n: From a600baed8efc6407ec4fb7a71fa1dbe3be23d441 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 22 Mar 2022 15:41:19 +0100 Subject: [PATCH 0963/1976] Update model summary to display model name (#7101) --- utils/torch_utils.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/utils/torch_utils.py b/utils/torch_utils.py index efcacc9ca735..793c9c184a44 100644 --- a/utils/torch_utils.py +++ b/utils/torch_utils.py @@ -229,7 +229,8 @@ def model_info(model, verbose=False, img_size=640): except (ImportError, Exception): fs = '' - LOGGER.info(f"Model Summary: {len(list(model.modules()))} layers, {n_p} parameters, {n_g} gradients{fs}") + name = model.yaml_file.rstrip('.yaml').replace('yolov5', 'YOLOv5') if hasattr(model, 'yaml_file') else 'Model' + LOGGER.info(f"{name} summary: {len(list(model.modules()))} layers, {n_p} parameters, {n_g} gradients{fs}") def scale_img(img, ratio=1.0, same_shape=False, gs=32): # img(16,3,256,416) From 05aae1733352289e4c4dca031159df7f0354d049 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 22 Mar 2022 17:36:05 +0100 Subject: [PATCH 0964/1976] `torch.split()` 1.7.0 compatibility fix (#7102) * Update loss.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update loss.py Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- utils/loss.py | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/utils/loss.py b/utils/loss.py index a06330e034bc..bf9b592d4ad2 100644 --- a/utils/loss.py +++ b/utils/loss.py @@ -108,13 +108,15 @@ def __init__(self, model, autobalance=False): if g > 0: BCEcls, BCEobj = FocalLoss(BCEcls, g), FocalLoss(BCEobj, g) - det = de_parallel(model).model[-1] # Detect() module - self.balance = {3: [4.0, 1.0, 0.4]}.get(det.nl, [4.0, 1.0, 0.25, 0.06, 0.02]) # P3-P7 - self.ssi = list(det.stride).index(16) if autobalance else 0 # stride 16 index + m = de_parallel(model).model[-1] # Detect() module + self.balance = {3: [4.0, 1.0, 0.4]}.get(m.nl, [4.0, 1.0, 0.25, 0.06, 0.02]) # P3-P7 + self.ssi = list(m.stride).index(16) if autobalance else 0 # stride 16 index self.BCEcls, self.BCEobj, self.gr, self.hyp, self.autobalance = BCEcls, BCEobj, 1.0, h, autobalance + self.na = m.na # number of anchors + self.nc = m.nc # number of classes + self.nl = m.nl # number of layers + self.anchors = m.anchors self.device = device - for k in 'na', 'nc', 'nl', 'anchors': - setattr(self, k, getattr(det, k)) def __call__(self, p, targets): # predictions, targets lcls = torch.zeros(1, device=self.device) # class loss @@ -129,7 +131,8 @@ def __call__(self, p, targets): # predictions, targets n = b.shape[0] # number of targets if n: - pxy, pwh, _, pcls = pi[b, a, gj, gi].tensor_split((2, 4, 5), dim=1) # target-subset of predictions + # pxy, pwh, _, pcls = pi[b, a, gj, gi].tensor_split((2, 4, 5), dim=1) # faster, requires torch 1.8.0 + pxy, pwh, _, pcls = pi[b, a, gj, gi].split((2, 2, 1, self.nc), 1) # target-subset of predictions # Regression pxy = pxy.sigmoid() * 2 - 0.5 From ee0b3b2a953bd50ba29b39119a09ef9521596416 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 22 Mar 2022 18:02:35 +0100 Subject: [PATCH 0965/1976] Update benchmarks significant digits (#7103) --- utils/benchmarks.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/benchmarks.py b/utils/benchmarks.py index bdbbdc43b639..446248c03f68 100644 --- a/utils/benchmarks.py +++ b/utils/benchmarks.py @@ -67,7 +67,7 @@ def run(weights=ROOT / 'yolov5s.pt', # weights path result = val.run(data, w, batch_size, imgsz, plots=False, device=device, task='benchmark', half=half) metrics = result[0] # metrics (mp, mr, map50, map, *losses(box, obj, cls)) speeds = result[2] # times (preprocess, inference, postprocess) - y.append([name, metrics[3], speeds[1]]) # mAP, t_inference + y.append([name, round(metrics[3], 4), round(speeds[1], 2)]) # mAP, t_inference except Exception as e: LOGGER.warning(f'WARNING: Benchmark failure for {name}: {e}') y.append([name, None, None]) # mAP, t_inference From 6134ec5d9484ac9ac743121b1c74709e93c68a88 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 22 Mar 2022 20:05:07 +0100 Subject: [PATCH 0966/1976] Model summary `pathlib` fix (#7104) Stems not working correctly for YOLOv5l with current .rstrip() implementation. After fix: ``` YOLOv5l summary: 468 layers, 46563709 parameters, 46563709 gradients, 109.3 GFLOPs ``` --- utils/torch_utils.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/utils/torch_utils.py b/utils/torch_utils.py index 793c9c184a44..72f8a0fd1659 100644 --- a/utils/torch_utils.py +++ b/utils/torch_utils.py @@ -11,6 +11,7 @@ import warnings from contextlib import contextmanager from copy import deepcopy +from pathlib import Path import torch import torch.distributed as dist @@ -229,7 +230,7 @@ def model_info(model, verbose=False, img_size=640): except (ImportError, Exception): fs = '' - name = model.yaml_file.rstrip('.yaml').replace('yolov5', 'YOLOv5') if hasattr(model, 'yaml_file') else 'Model' + name = Path(model.yaml_file).stem.replace('yolov5', 'YOLOv5') if hasattr(model, 'yaml_file') else 'Model' LOGGER.info(f"{name} summary: {len(list(model.modules()))} layers, {n_p} parameters, {n_g} gradients{fs}") From ecc2c7ba73e71211b192cba69e255afad92de67a Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 22 Mar 2022 20:44:07 +0100 Subject: [PATCH 0967/1976] Remove named arguments where possible (#7105) * Remove named arguments where possible Speed improvements. * Update yolo.py * Update yolo.py * Update yolo.py --- models/common.py | 14 +++++++------- models/yolo.py | 10 +++++----- 2 files changed, 12 insertions(+), 12 deletions(-) diff --git a/models/common.py b/models/common.py index 066f8774d3c3..0286c74fe8cd 100644 --- a/models/common.py +++ b/models/common.py @@ -121,7 +121,7 @@ def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, nu def forward(self, x): y1 = self.cv3(self.m(self.cv1(x))) y2 = self.cv2(x) - return self.cv4(self.act(self.bn(torch.cat((y1, y2), dim=1)))) + return self.cv4(self.act(self.bn(torch.cat((y1, y2), 1)))) class C3(nn.Module): @@ -136,7 +136,7 @@ def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, nu # self.m = nn.Sequential(*(CrossConv(c_, c_, 3, 1, g, 1.0, shortcut) for _ in range(n))) def forward(self, x): - return self.cv3(torch.cat((self.m(self.cv1(x)), self.cv2(x)), dim=1)) + return self.cv3(torch.cat((self.m(self.cv1(x)), self.cv2(x)), 1)) class C3TR(C3): @@ -527,7 +527,7 @@ def forward(self, imgs, size=640, augment=False, profile=False): p = next(self.model.parameters()) if self.pt else torch.zeros(1) # for device and type autocast = self.amp and (p.device.type != 'cpu') # Automatic Mixed Precision (AMP) inference if isinstance(imgs, torch.Tensor): # torch - with amp.autocast(enabled=autocast): + with amp.autocast(autocast): return self.model(imgs.to(p.device).type_as(p), augment, profile) # inference # Pre-process @@ -550,19 +550,19 @@ def forward(self, imgs, size=640, augment=False, profile=False): shape1.append([y * g for y in s]) imgs[i] = im if im.data.contiguous else np.ascontiguousarray(im) # update shape1 = [make_divisible(x, self.stride) if self.pt else size for x in np.array(shape1).max(0)] # inf shape - x = [letterbox(im, new_shape=shape1, auto=False)[0] for im in imgs] # pad + x = [letterbox(im, shape1, auto=False)[0] for im in imgs] # pad x = np.ascontiguousarray(np.array(x).transpose((0, 3, 1, 2))) # stack and BHWC to BCHW x = torch.from_numpy(x).to(p.device).type_as(p) / 255 # uint8 to fp16/32 t.append(time_sync()) - with amp.autocast(enabled=autocast): + with amp.autocast(autocast): # Inference y = self.model(x, augment, profile) # forward t.append(time_sync()) # Post-process - y = non_max_suppression(y if self.dmb else y[0], self.conf, iou_thres=self.iou, classes=self.classes, - agnostic=self.agnostic, multi_label=self.multi_label, max_det=self.max_det) # NMS + y = non_max_suppression(y if self.dmb else y[0], self.conf, self.iou, self.classes, self.agnostic, + self.multi_label, max_det=self.max_det) # NMS for i in range(n): scale_coords(shape1, y[i][:, :4], shape0[i]) diff --git a/models/yolo.py b/models/yolo.py index 2f4bbe0f71d1..9f4701c49f9d 100644 --- a/models/yolo.py +++ b/models/yolo.py @@ -71,13 +71,13 @@ def forward(self, x): def _make_grid(self, nx=20, ny=20, i=0): d = self.anchors[i].device + shape = 1, self.na, ny, nx, 2 # grid shape if check_version(torch.__version__, '1.10.0'): # torch>=1.10.0 meshgrid workaround for torch>=0.7 compatibility - yv, xv = torch.meshgrid([torch.arange(ny, device=d), torch.arange(nx, device=d)], indexing='ij') + yv, xv = torch.meshgrid(torch.arange(ny, device=d), torch.arange(nx, device=d), indexing='ij') else: - yv, xv = torch.meshgrid([torch.arange(ny, device=d), torch.arange(nx, device=d)]) - grid = torch.stack((xv, yv), 2).expand((1, self.na, ny, nx, 2)).float() - anchor_grid = (self.anchors[i].clone() * self.stride[i]) \ - .view((1, self.na, 1, 1, 2)).expand((1, self.na, ny, nx, 2)).float() + yv, xv = torch.meshgrid(torch.arange(ny, device=d), torch.arange(nx, device=d)) + grid = torch.stack((xv, yv), 2).expand(shape).float() + anchor_grid = (self.anchors[i] * self.stride[i]).view((1, self.na, 1, 1, 2)).expand(shape).float() return grid, anchor_grid From c3ae4e4af6f75aff537b876adc11da3de441dd60 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 23 Mar 2022 01:19:37 +0100 Subject: [PATCH 0968/1976] Multi-threaded VisDrone and VOC downloads (#7108) * Multi-threaded VOC download * Update VOC.yaml * Update * Update general.py * Update general.py --- data/GlobalWheat2020.yaml | 1 + data/Objects365.yaml | 1 + data/SKU-110K.yaml | 1 + data/VOC.yaml | 2 +- data/VisDrone.yaml | 2 +- data/coco.yaml | 1 + utils/general.py | 11 +++++++---- 7 files changed, 13 insertions(+), 6 deletions(-) diff --git a/data/GlobalWheat2020.yaml b/data/GlobalWheat2020.yaml index 869dace0be2b..c1ba289f2833 100644 --- a/data/GlobalWheat2020.yaml +++ b/data/GlobalWheat2020.yaml @@ -34,6 +34,7 @@ names: ['wheat_head'] # class names download: | from utils.general import download, Path + # Download dir = Path(yaml['path']) # dataset root dir urls = ['https://zenodo.org/record/4298502/files/global-wheat-codalab-official.zip', diff --git a/data/Objects365.yaml b/data/Objects365.yaml index 4c7cf3fdb2c8..bd6e5d6e1144 100644 --- a/data/Objects365.yaml +++ b/data/Objects365.yaml @@ -65,6 +65,7 @@ download: | from utils.general import Path, download, np, xyxy2xywhn + # Make Directories dir = Path(yaml['path']) # dataset root dir for p in 'images', 'labels': diff --git a/data/SKU-110K.yaml b/data/SKU-110K.yaml index 9481b7a04aee..46459eab6bb7 100644 --- a/data/SKU-110K.yaml +++ b/data/SKU-110K.yaml @@ -24,6 +24,7 @@ download: | from tqdm import tqdm from utils.general import np, pd, Path, download, xyxy2xywh + # Download dir = Path(yaml['path']) # dataset root dir parent = Path(dir.parent) # download dir diff --git a/data/VOC.yaml b/data/VOC.yaml index 975d56466de1..be04fb1e2ecb 100644 --- a/data/VOC.yaml +++ b/data/VOC.yaml @@ -62,7 +62,7 @@ download: | urls = [url + 'VOCtrainval_06-Nov-2007.zip', # 446MB, 5012 images url + 'VOCtest_06-Nov-2007.zip', # 438MB, 4953 images url + 'VOCtrainval_11-May-2012.zip'] # 1.95GB, 17126 images - download(urls, dir=dir / 'images', delete=False) + download(urls, dir=dir / 'images', delete=False, threads=3) # Convert path = dir / f'images/VOCdevkit' diff --git a/data/VisDrone.yaml b/data/VisDrone.yaml index 83a5c7d55e06..2a3b2f03e674 100644 --- a/data/VisDrone.yaml +++ b/data/VisDrone.yaml @@ -54,7 +54,7 @@ download: | 'https://github.com/ultralytics/yolov5/releases/download/v1.0/VisDrone2019-DET-val.zip', 'https://github.com/ultralytics/yolov5/releases/download/v1.0/VisDrone2019-DET-test-dev.zip', 'https://github.com/ultralytics/yolov5/releases/download/v1.0/VisDrone2019-DET-test-challenge.zip'] - download(urls, dir=dir) + download(urls, dir=dir, threads=4) # Convert for d in 'VisDrone2019-DET-train', 'VisDrone2019-DET-val', 'VisDrone2019-DET-test-dev': diff --git a/data/coco.yaml b/data/coco.yaml index 3ed7e48a2185..7494fc2f9cd1 100644 --- a/data/coco.yaml +++ b/data/coco.yaml @@ -30,6 +30,7 @@ names: ['person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 't download: | from utils.general import download, Path + # Download labels segments = False # segment or box labels dir = Path(yaml['path']) # dataset root dir diff --git a/utils/general.py b/utils/general.py index e8b3b05c5fe1..b0c5e9d69ab7 100755 --- a/utils/general.py +++ b/utils/general.py @@ -449,8 +449,9 @@ def check_dataset(data, autodownload=True): if val: val = [Path(x).resolve() for x in (val if isinstance(val, list) else [val])] # val path if not all(x.exists() for x in val): - LOGGER.info('\nDataset not found, missing paths: %s' % [str(x) for x in val if not x.exists()]) + LOGGER.info(emojis('\nDataset not found ⚠️, missing paths %s' % [str(x) for x in val if not x.exists()])) if s and autodownload: # download script + t = time.time() root = path.parent if 'path' in data else '..' # unzip directory i.e. '../' if s.startswith('http') and s.endswith('.zip'): # URL f = Path(s).name # filename @@ -465,9 +466,11 @@ def check_dataset(data, autodownload=True): r = os.system(s) else: # python script r = exec(s, {'yaml': data}) # return None - LOGGER.info(f"Dataset autodownload {f'success, saved to {root}' if r in (0, None) else 'failure'}\n") + dt = f'({round(time.time() - t, 1)}s)' + s = f"success ✅ {dt}, saved to {colorstr('bold', root)}" if r in (0, None) else f"failure {dt} ❌" + LOGGER.info(emojis(f"Dataset download {s}")) else: - raise Exception('Dataset not found.') + raise Exception(emojis('Dataset not found ❌')) return data # dictionary @@ -491,7 +494,7 @@ def download_one(url, dir): if curl: os.system(f"curl -L '{url}' -o '{f}' --retry 9 -C -") # curl download, retry and resume on fail else: - torch.hub.download_url_to_file(url, f, progress=True) # torch download + torch.hub.download_url_to_file(url, f, progress=threads == 1) # torch download if unzip and f.suffix in ('.zip', '.gz'): LOGGER.info(f'Unzipping {f}...') if f.suffix == '.zip': From bc3ed957ce0f0990a3cb408e462197b83b0d075f Mon Sep 17 00:00:00 2001 From: yeshanliu <41566254+yeshanliu@users.noreply.github.com> Date: Wed, 23 Mar 2022 22:35:15 +0800 Subject: [PATCH 0969/1976] `np.fromfile()` Chinese image paths fix (#6979) * :tada: :new: now can read Chinese image path. use "cv2.imdecode(np.fromfile(f, np.uint8), cv2.IMREAD_COLOR)" instead of "cv2.imread(f)" for Chinese image path. * Update datasets.py * Update __init__.py Co-authored-by: Glenn Jocher --- utils/datasets.py | 3 +++ utils/loggers/__init__.py | 3 +++ 2 files changed, 6 insertions(+) diff --git a/utils/datasets.py b/utils/datasets.py index 8627344af7b4..f212e54633be 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -32,6 +32,9 @@ segments2boxes, xyn2xy, xywh2xyxy, xywhn2xyxy, xyxy2xywhn) from utils.torch_utils import torch_distributed_zero_first +# Remap +cv2.imread = lambda x: cv2.imdecode(np.fromfile(x, np.uint8), cv2.IMREAD_COLOR) # for Chinese filenames + # Parameters HELP_URL = 'https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data' IMG_FORMATS = 'bmp', 'dng', 'jpeg', 'jpg', 'mpo', 'png', 'tif', 'tiff', 'webp' # include image suffixes diff --git a/utils/loggers/__init__.py b/utils/loggers/__init__.py index 866bdc4be2f5..ff6722ecd48a 100644 --- a/utils/loggers/__init__.py +++ b/utils/loggers/__init__.py @@ -148,6 +148,9 @@ def on_train_end(self, last, best, plots, epoch, results): if self.tb: import cv2 + import numpy as np + + cv2.imread = lambda x: cv2.imdecode(np.fromfile(x, np.uint8), cv2.IMREAD_COLOR) # remap for Chinese files for f in files: self.tb.add_image(f.stem, cv2.imread(str(f))[..., ::-1], epoch, dataformats='HWC') From a0a4adf6de4de3d9d5ac00c23796c844a8e57200 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 24 Mar 2022 11:31:22 +0100 Subject: [PATCH 0970/1976] Add PyTorch Hub `results.save(labels=False)` option (#7129) Resolves https://github.com/ultralytics/yolov5/issues/388#issuecomment-1077121821 --- models/common.py | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/models/common.py b/models/common.py index 0286c74fe8cd..115e3c3145ff 100644 --- a/models/common.py +++ b/models/common.py @@ -131,7 +131,7 @@ def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, nu c_ = int(c2 * e) # hidden channels self.cv1 = Conv(c1, c_, 1, 1) self.cv2 = Conv(c1, c_, 1, 1) - self.cv3 = Conv(2 * c_, c2, 1) # act=FReLU(c2) + self.cv3 = Conv(2 * c_, c2, 1) # optional act=FReLU(c2) self.m = nn.Sequential(*(Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n))) # self.m = nn.Sequential(*(CrossConv(c_, c_, 3, 1, g, 1.0, shortcut) for _ in range(n))) @@ -589,7 +589,7 @@ def __init__(self, imgs, pred, files, times=(0, 0, 0, 0), names=None, shape=None self.t = tuple((times[i + 1] - times[i]) * 1000 / self.n for i in range(3)) # timestamps (ms) self.s = shape # inference BCHW shape - def display(self, pprint=False, show=False, save=False, crop=False, render=False, save_dir=Path('')): + def display(self, pprint=False, show=False, save=False, crop=False, render=False, labels=True, save_dir=Path('')): crops = [] for i, (im, pred) in enumerate(zip(self.imgs, self.pred)): s = f'image {i + 1}/{len(self.pred)}: {im.shape[0]}x{im.shape[1]} ' # string @@ -606,7 +606,7 @@ def display(self, pprint=False, show=False, save=False, crop=False, render=False crops.append({'box': box, 'conf': conf, 'cls': cls, 'label': label, 'im': save_one_box(box, im, file=file, save=save)}) else: # all others - annotator.box_label(box, label, color=colors(cls)) + annotator.box_label(box, label if labels else '', color=colors(cls)) im = annotator.im else: s += '(no detections)' @@ -633,19 +633,19 @@ def print(self): LOGGER.info(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {tuple(self.s)}' % self.t) - def show(self): - self.display(show=True) # show results + def show(self, labels=True): + self.display(show=True, labels=labels) # show results - def save(self, save_dir='runs/detect/exp'): + def save(self, labels=True, save_dir='runs/detect/exp'): save_dir = increment_path(save_dir, exist_ok=save_dir != 'runs/detect/exp', mkdir=True) # increment save_dir - self.display(save=True, save_dir=save_dir) # save results + self.display(save=True, labels=labels, save_dir=save_dir) # save results def crop(self, save=True, save_dir='runs/detect/exp'): save_dir = increment_path(save_dir, exist_ok=save_dir != 'runs/detect/exp', mkdir=True) if save else None return self.display(crop=True, save=save, save_dir=save_dir) # crop results - def render(self): - self.display(render=True) # render results + def render(self, labels=True): + self.display(render=True, labels=labels) # render results return self.imgs def pandas(self): From d115bbf509aa86ed553d1dc57c078c842393cca8 Mon Sep 17 00:00:00 2001 From: RcINS Date: Fri, 25 Mar 2022 20:25:30 +0800 Subject: [PATCH 0971/1976] Fix `cv2.imwrite` on non-ASCII paths (#7139) * Fix imwrite on non-ASCII paths * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update general.py * Update __init__.py * Update __init__.py * Update datasets.py * Update hubconf.py * Update detect.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update general.py Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Glenn Jocher --- detect.py | 3 +-- hubconf.py | 3 ++- utils/datasets.py | 6 +----- utils/general.py | 17 ++++++++++++++++- utils/loggers/__init__.py | 6 +----- 5 files changed, 21 insertions(+), 14 deletions(-) diff --git a/detect.py b/detect.py index ccb9fbf5103f..046f7ae57b5c 100644 --- a/detect.py +++ b/detect.py @@ -29,7 +29,6 @@ import sys from pathlib import Path -import cv2 import torch import torch.backends.cudnn as cudnn @@ -41,7 +40,7 @@ from models.common import DetectMultiBackend from utils.datasets import IMG_FORMATS, VID_FORMATS, LoadImages, LoadStreams -from utils.general import (LOGGER, check_file, check_img_size, check_imshow, check_requirements, colorstr, +from utils.general import (LOGGER, check_file, check_img_size, check_imshow, check_requirements, colorstr, cv2, increment_path, non_max_suppression, print_args, scale_coords, strip_optimizer, xyxy2xywh) from utils.plots import Annotator, colors, save_one_box from utils.torch_utils import select_device, time_sync diff --git a/hubconf.py b/hubconf.py index 39fa614b2e34..d719b80034af 100644 --- a/hubconf.py +++ b/hubconf.py @@ -127,10 +127,11 @@ def yolov5x6(pretrained=True, channels=3, classes=80, autoshape=True, verbose=Tr # Verify inference from pathlib import Path - import cv2 import numpy as np from PIL import Image + from utils.general import cv2 + imgs = ['data/images/zidane.jpg', # filename Path('data/images/zidane.jpg'), # Path 'https://ultralytics.com/images/zidane.jpg', # URI diff --git a/utils/datasets.py b/utils/datasets.py index f212e54633be..d0b35e808000 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -18,7 +18,6 @@ from urllib.parse import urlparse from zipfile import ZipFile -import cv2 import numpy as np import torch import torch.nn.functional as F @@ -29,12 +28,9 @@ from utils.augmentations import Albumentations, augment_hsv, copy_paste, letterbox, mixup, random_perspective from utils.general import (DATASETS_DIR, LOGGER, NUM_THREADS, check_dataset, check_requirements, check_yaml, clean_str, - segments2boxes, xyn2xy, xywh2xyxy, xywhn2xyxy, xyxy2xywhn) + cv2, segments2boxes, xyn2xy, xywh2xyxy, xywhn2xyxy, xyxy2xywhn) from utils.torch_utils import torch_distributed_zero_first -# Remap -cv2.imread = lambda x: cv2.imdecode(np.fromfile(x, np.uint8), cv2.IMREAD_COLOR) # for Chinese filenames - # Parameters HELP_URL = 'https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data' IMG_FORMATS = 'bmp', 'dng', 'jpeg', 'jpg', 'mpo', 'png', 'tif', 'tiff', 'webp' # include image suffixes diff --git a/utils/general.py b/utils/general.py index b0c5e9d69ab7..dcdb58cb0f51 100755 --- a/utils/general.py +++ b/utils/general.py @@ -904,5 +904,20 @@ def increment_path(path, exist_ok=False, sep='', mkdir=False): return path -# Variables +# OpenCV Chinese-friendly functions ------------------------------------------------------------------------------------ +def imread(path): + return cv2.imdecode(np.fromfile(path, np.uint8), cv2.IMREAD_COLOR) + + +def imwrite(path, im): + try: + cv2.imencode(Path(path).suffix, im)[1].tofile(path) + return True + except Exception: + return False + + +cv2.imread, cv2.imwrite = imread, imwrite # redefine + +# Variables ------------------------------------------------------------------------------------------------------------ NCOLS = 0 if is_docker() else shutil.get_terminal_size().columns # terminal window size for tqdm diff --git a/utils/loggers/__init__.py b/utils/loggers/__init__.py index ff6722ecd48a..bb8523c0219e 100644 --- a/utils/loggers/__init__.py +++ b/utils/loggers/__init__.py @@ -11,7 +11,7 @@ import torch from torch.utils.tensorboard import SummaryWriter -from utils.general import colorstr, emojis +from utils.general import colorstr, cv2, emojis from utils.loggers.wandb.wandb_utils import WandbLogger from utils.plots import plot_images, plot_results from utils.torch_utils import de_parallel @@ -147,10 +147,6 @@ def on_train_end(self, last, best, plots, epoch, results): files = [(self.save_dir / f) for f in files if (self.save_dir / f).exists()] # filter if self.tb: - import cv2 - import numpy as np - - cv2.imread = lambda x: cv2.imdecode(np.fromfile(x, np.uint8), cv2.IMREAD_COLOR) # remap for Chinese files for f in files: self.tb.add_image(f.stem, cv2.imread(str(f))[..., ::-1], epoch, dataformats='HWC') From a4c661873f1edfe3d687bd01c4477e56739c7db3 Mon Sep 17 00:00:00 2001 From: Zengyf-CVer <41098760+Zengyf-CVer@users.noreply.github.com> Date: Fri, 25 Mar 2022 20:40:55 +0800 Subject: [PATCH 0972/1976] Fix `detect.py --view-img` for non-ASCII paths (#7093) * Update detect.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update general.py * Update detect.py * Update general.py * Update general.py * Update general.py * Update general.py * Update general.py * Update general.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update general.py * Update general.py Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Glenn Jocher --- utils/general.py | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/utils/general.py b/utils/general.py index dcdb58cb0f51..45e23edff062 100755 --- a/utils/general.py +++ b/utils/general.py @@ -905,6 +905,9 @@ def increment_path(path, exist_ok=False, sep='', mkdir=False): # OpenCV Chinese-friendly functions ------------------------------------------------------------------------------------ +imshow_ = cv2.imshow # copy to avoid recursion errors + + def imread(path): return cv2.imdecode(np.fromfile(path, np.uint8), cv2.IMREAD_COLOR) @@ -917,7 +920,11 @@ def imwrite(path, im): return False -cv2.imread, cv2.imwrite = imread, imwrite # redefine +def imshow(path, im): + imshow_(path.encode('unicode_escape').decode(), im) + + +cv2.imread, cv2.imwrite, cv2.imshow = imread, imwrite, imshow # redefine # Variables ------------------------------------------------------------------------------------------------------------ NCOLS = 0 if is_docker() else shutil.get_terminal_size().columns # terminal window size for tqdm From 7a2a11893b56c67903f0dc4e313235e544189601 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 25 Mar 2022 14:45:23 +0100 Subject: [PATCH 0973/1976] Add Architecture Summary to README Tutorials (#7146) * Add Architecture Summary to README Tutorials Per https://github.com/ultralytics/yolov5/issues/6998#issuecomment-1073517405 * Update README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 3ebc085b6c33..54c5cbd83f5b 100644 --- a/README.md +++ b/README.md @@ -162,7 +162,7 @@ python train.py --data coco.yaml --cfg yolov5n.yaml --weights '' --batch-size 12 * [Model Pruning/Sparsity](https://github.com/ultralytics/yolov5/issues/304) * [Hyperparameter Evolution](https://github.com/ultralytics/yolov5/issues/607) * [Transfer Learning with Frozen Layers](https://github.com/ultralytics/yolov5/issues/1314)  ⭐ NEW -* [TensorRT Deployment](https://github.com/wang-xinyu/tensorrtx) +* [Architecture Summary](https://github.com/ultralytics/yolov5/issues/6998)  ⭐ NEW
From 26bfd4446559814ab5b1a2fa34584dcb3a49da6c Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 26 Mar 2022 11:45:28 +0100 Subject: [PATCH 0974/1976] Adjust NMS time limit warning to batch size (#7156) --- utils/general.py | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/utils/general.py b/utils/general.py index 45e23edff062..e1751c4bb62d 100755 --- a/utils/general.py +++ b/utils/general.py @@ -709,6 +709,7 @@ def non_max_suppression(prediction, conf_thres=0.25, iou_thres=0.45, classes=Non list of detections, on (n,6) tensor per image [xyxy, conf, cls] """ + bs = prediction.shape[0] # batch size nc = prediction.shape[2] - 5 # number of classes xc = prediction[..., 4] > conf_thres # candidates @@ -719,13 +720,13 @@ def non_max_suppression(prediction, conf_thres=0.25, iou_thres=0.45, classes=Non # Settings min_wh, max_wh = 2, 7680 # (pixels) minimum and maximum box width and height max_nms = 30000 # maximum number of boxes into torchvision.ops.nms() - time_limit = 10.0 # seconds to quit after + time_limit = 0.030 * bs # seconds to quit after redundant = True # require redundant detections multi_label &= nc > 1 # multiple labels per box (adds 0.5ms/img) merge = False # use merge-NMS - t = time.time() - output = [torch.zeros((0, 6), device=prediction.device)] * prediction.shape[0] + t, warn_time = time.time(), True + output = [torch.zeros((0, 6), device=prediction.device)] * bs for xi, x in enumerate(prediction): # image index, image inference # Apply constraints x[((x[..., 2:4] < min_wh) | (x[..., 2:4] > max_wh)).any(1), 4] = 0 # width-height @@ -789,7 +790,9 @@ def non_max_suppression(prediction, conf_thres=0.25, iou_thres=0.45, classes=Non output[xi] = x[i] if (time.time() - t) > time_limit: - LOGGER.warning(f'WARNING: NMS time limit {time_limit}s exceeded') + if warn_time: + LOGGER.warning(f'WARNING: NMS time limit {time_limit:3f}s exceeded') + warn_time = False break # time limit exceeded return output From e19f87eb4bcdc01ee0570cf283fb3d031dbe5451 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 26 Mar 2022 14:18:53 +0100 Subject: [PATCH 0975/1976] Sidestep `os.path.relpath()` Windows bug (#7158) * Sidestep os.path.relpath() Windows bug os.path.relpath() seems to have a major bug on Windows due to Windows horrible path handling. This fix attempts to sidestep the issue. ``` File "C:\Users\mkokg/.cache\torch\hub\ultralytics_yolov5_master\export.py", line 64, in ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative File "C:\Users\mkokg\AppData\Local\Programs\Python\Python310\lib\ntpath.py", line 718, in relpath raise ValueError("path is on mount %r, start on mount %r" % ( ValueError: path is on mount 'C:', start on mount 'D:' ``` * Update yolo.py * Update yolo.py * Update yolo.py * Update export.py --- export.py | 3 ++- models/yolo.py | 5 ++++- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/export.py b/export.py index 2d4a68e62f89..7517dc4678da 100644 --- a/export.py +++ b/export.py @@ -61,7 +61,8 @@ ROOT = FILE.parents[0] # YOLOv5 root directory if str(ROOT) not in sys.path: sys.path.append(str(ROOT)) # add ROOT to PATH -ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative +if platform.system() != 'Windows': + ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative from models.common import Conv from models.experimental import attempt_load diff --git a/models/yolo.py b/models/yolo.py index 9f4701c49f9d..11e17d28fa47 100644 --- a/models/yolo.py +++ b/models/yolo.py @@ -7,6 +7,8 @@ """ import argparse +import os +import platform import sys from copy import deepcopy from pathlib import Path @@ -15,7 +17,8 @@ ROOT = FILE.parents[1] # YOLOv5 root directory if str(ROOT) not in sys.path: sys.path.append(str(ROOT)) # add ROOT to PATH -# ROOT = ROOT.relative_to(Path.cwd()) # relative +if platform.system() != 'Windows': + ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative from models.common import * from models.experimental import * From 3373aab56c28ce2160d6e8f09035db49061a2619 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 26 Mar 2022 16:52:58 +0100 Subject: [PATCH 0976/1976] NMS unused variable fix (#7161) * NMS unused variable fix * Update general.py --- utils/general.py | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/utils/general.py b/utils/general.py index e1751c4bb62d..5905211cfa59 100755 --- a/utils/general.py +++ b/utils/general.py @@ -703,7 +703,7 @@ def clip_coords(boxes, shape): def non_max_suppression(prediction, conf_thres=0.25, iou_thres=0.45, classes=None, agnostic=False, multi_label=False, labels=(), max_det=300): - """Runs Non-Maximum Suppression (NMS) on inference results + """Non-Maximum Suppression (NMS) on inference results to reject overlapping bounding boxes Returns: list of detections, on (n,6) tensor per image [xyxy, conf, cls] @@ -718,18 +718,19 @@ def non_max_suppression(prediction, conf_thres=0.25, iou_thres=0.45, classes=Non assert 0 <= iou_thres <= 1, f'Invalid IoU {iou_thres}, valid values are between 0.0 and 1.0' # Settings - min_wh, max_wh = 2, 7680 # (pixels) minimum and maximum box width and height + # min_wh = 2 # (pixels) minimum box width and height + max_wh = 7680 # (pixels) maximum box width and height max_nms = 30000 # maximum number of boxes into torchvision.ops.nms() time_limit = 0.030 * bs # seconds to quit after redundant = True # require redundant detections multi_label &= nc > 1 # multiple labels per box (adds 0.5ms/img) merge = False # use merge-NMS - t, warn_time = time.time(), True + t = time.time() output = [torch.zeros((0, 6), device=prediction.device)] * bs for xi, x in enumerate(prediction): # image index, image inference # Apply constraints - x[((x[..., 2:4] < min_wh) | (x[..., 2:4] > max_wh)).any(1), 4] = 0 # width-height + # x[((x[..., 2:4] < min_wh) | (x[..., 2:4] > max_wh)).any(1), 4] = 0 # width-height x = x[xc[xi]] # confidence # Cat apriori labels if autolabelling @@ -790,9 +791,7 @@ def non_max_suppression(prediction, conf_thres=0.25, iou_thres=0.45, classes=Non output[xi] = x[i] if (time.time() - t) > time_limit: - if warn_time: - LOGGER.warning(f'WARNING: NMS time limit {time_limit:3f}s exceeded') - warn_time = False + LOGGER.warning(f'WARNING: NMS time limit {time_limit:.3f}s exceeded') break # time limit exceeded return output From 7830e91b9aec29180de9505316f8c8de607a6014 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 26 Mar 2022 16:53:42 +0100 Subject: [PATCH 0977/1976] `yolo.py --profile` default GPU batch size 16 --- models/yolo.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/models/yolo.py b/models/yolo.py index 11e17d28fa47..fb01aaafedcf 100644 --- a/models/yolo.py +++ b/models/yolo.py @@ -314,7 +314,7 @@ def parse_model(d, ch): # model_dict, input_channels(3) # Profile if opt.profile: - img = torch.rand(8 if torch.cuda.is_available() else 1, 3, 640, 640).to(device) + img = torch.rand(16 if torch.cuda.is_available() else 1, 3, 640, 640).to(device) y = model(img, profile=True) # Test all models From b2194b90156e74e5a1480cd2457d1b41ec2dc181 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 27 Mar 2022 20:24:42 +0200 Subject: [PATCH 0978/1976] `yolo.py --profile` updates (#7170) --- models/yolo.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/models/yolo.py b/models/yolo.py index fb01aaafedcf..e88db79ca8c7 100644 --- a/models/yolo.py +++ b/models/yolo.py @@ -310,11 +310,11 @@ def parse_model(d, ch): # model_dict, input_channels(3) # Create model model = Model(opt.cfg).to(device) - model.train() # Profile if opt.profile: - img = torch.rand(16 if torch.cuda.is_available() else 1, 3, 640, 640).to(device) + model.eval().fuse() + img = torch.rand(8 if torch.cuda.is_available() else 1, 3, 640, 640).to(device) y = model(img, profile=True) # Test all models From 1832264dd684256715384dd12e6c40696c89d903 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 28 Mar 2022 02:26:44 +0200 Subject: [PATCH 0979/1976] Update --- models/common.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/models/common.py b/models/common.py index 115e3c3145ff..5dd4843ed66d 100644 --- a/models/common.py +++ b/models/common.py @@ -124,6 +124,9 @@ def forward(self, x): return self.cv4(self.act(self.bn(torch.cat((y1, y2), 1)))) +from models.experimental import CrossConv + + class C3(nn.Module): # CSP Bottleneck with 3 convolutions def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion @@ -132,8 +135,8 @@ def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, nu self.cv1 = Conv(c1, c_, 1, 1) self.cv2 = Conv(c1, c_, 1, 1) self.cv3 = Conv(2 * c_, c2, 1) # optional act=FReLU(c2) - self.m = nn.Sequential(*(Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n))) - # self.m = nn.Sequential(*(CrossConv(c_, c_, 3, 1, g, 1.0, shortcut) for _ in range(n))) + # self.m = nn.Sequential(*(Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n))) + self.m = nn.Sequential(*(CrossConv(c_, c_, 3, 1, g, 1.0, shortcut) for _ in range(n))) def forward(self, x): return self.cv3(torch.cat((self.m(self.cv1(x)), self.cv2(x)), 1)) From ee77632393b5f0989e92f39d2c3aeef9d4ebf0e6 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 28 Mar 2022 02:31:00 +0200 Subject: [PATCH 0980/1976] Revert `C3()` change (#7172) --- models/common.py | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/models/common.py b/models/common.py index 5dd4843ed66d..115e3c3145ff 100644 --- a/models/common.py +++ b/models/common.py @@ -124,9 +124,6 @@ def forward(self, x): return self.cv4(self.act(self.bn(torch.cat((y1, y2), 1)))) -from models.experimental import CrossConv - - class C3(nn.Module): # CSP Bottleneck with 3 convolutions def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion @@ -135,8 +132,8 @@ def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, nu self.cv1 = Conv(c1, c_, 1, 1) self.cv2 = Conv(c1, c_, 1, 1) self.cv3 = Conv(2 * c_, c2, 1) # optional act=FReLU(c2) - # self.m = nn.Sequential(*(Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n))) - self.m = nn.Sequential(*(CrossConv(c_, c_, 3, 1, g, 1.0, shortcut) for _ in range(n))) + self.m = nn.Sequential(*(Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n))) + # self.m = nn.Sequential(*(CrossConv(c_, c_, 3, 1, g, 1.0, shortcut) for _ in range(n))) def forward(self, x): return self.cv3(torch.cat((self.m(self.cv1(x)), self.cv2(x)), 1)) From d51f9b2ff6e60b7eaaafc7e8d991f0d6dbb786cd Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 28 Mar 2022 10:42:19 +0200 Subject: [PATCH 0981/1976] Bump actions/cache from 2.1.7 to 3 (#7175) Bumps [actions/cache](https://github.com/actions/cache) from 2.1.7 to 3. - [Release notes](https://github.com/actions/cache/releases) - [Commits](https://github.com/actions/cache/compare/v2.1.7...v3) --- updated-dependencies: - dependency-name: actions/cache dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/ci-testing.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci-testing.yml b/.github/workflows/ci-testing.yml index f2096ce17a17..59193e05e08c 100644 --- a/.github/workflows/ci-testing.yml +++ b/.github/workflows/ci-testing.yml @@ -39,7 +39,7 @@ jobs: python -c "from pip._internal.locations import USER_CACHE_DIR; print('::set-output name=dir::' + USER_CACHE_DIR)" - name: Cache pip - uses: actions/cache@v2.1.7 + uses: actions/cache@v3 with: path: ${{ steps.pip-cache.outputs.dir }} key: ${{ runner.os }}-${{ matrix.python-version }}-pip-${{ hashFiles('requirements.txt') }} From cf4f3c3455d14c62e11dcce9f1d30211509da72f Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 29 Mar 2022 10:15:53 +0200 Subject: [PATCH 0982/1976] yolo.py profiling updates (#7178) * yolo.py profiling updates * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- models/yolo.py | 26 ++++++++++++-------------- 1 file changed, 12 insertions(+), 14 deletions(-) diff --git a/models/yolo.py b/models/yolo.py index e88db79ca8c7..81ab539deffa 100644 --- a/models/yolo.py +++ b/models/yolo.py @@ -25,7 +25,8 @@ from utils.autoanchor import check_anchor_order from utils.general import LOGGER, check_version, check_yaml, make_divisible, print_args from utils.plots import feature_visualization -from utils.torch_utils import fuse_conv_and_bn, initialize_weights, model_info, scale_img, select_device, time_sync +from utils.torch_utils import (fuse_conv_and_bn, initialize_weights, model_info, profile, scale_img, select_device, + time_sync) try: import thop # for FLOPs computation @@ -300,8 +301,10 @@ def parse_model(d, ch): # model_dict, input_channels(3) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--cfg', type=str, default='yolov5s.yaml', help='model.yaml') + parser.add_argument('--batch-size', type=int, default=1, help='total batch size for all GPUs') parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') parser.add_argument('--profile', action='store_true', help='profile model speed') + parser.add_argument('--line-profile', action='store_true', help='profile model speed layer by layer') parser.add_argument('--test', action='store_true', help='test all yolo*.yaml') opt = parser.parse_args() opt.cfg = check_yaml(opt.cfg) # check YAML @@ -309,24 +312,19 @@ def parse_model(d, ch): # model_dict, input_channels(3) device = select_device(opt.device) # Create model + im = torch.rand(opt.batch_size, 3, 640, 640).to(device) model = Model(opt.cfg).to(device) - # Profile - if opt.profile: - model.eval().fuse() - img = torch.rand(8 if torch.cuda.is_available() else 1, 3, 640, 640).to(device) - y = model(img, profile=True) + # Options + if opt.line_profile: # profile layer by layer + _ = model(im, profile=True) - # Test all models - if opt.test: + elif opt.profile: # profile forward-backward + results = profile(input=im, ops=[model], n=3) + + elif opt.test: # test all models for cfg in Path(ROOT / 'models').rglob('yolo*.yaml'): try: _ = Model(cfg) except Exception as e: print(f'Error in {cfg}: {e}') - - # Tensorboard (not working https://github.com/ultralytics/yolov5/issues/2898) - # from torch.utils.tensorboard import SummaryWriter - # tb_writer = SummaryWriter('.') - # LOGGER.info("Run 'tensorboard --logdir=models' to view tensorboard at http://localhost:6006/") - # tb_writer.add_graph(torch.jit.trace(model, img, strict=False), []) # add model graph From 9c1e91aea2437f56f1729ad8f92ce7a7d54f1268 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 30 Mar 2022 12:53:49 +0200 Subject: [PATCH 0983/1976] Update tutorial.ipynb (#7212) --- tutorial.ipynb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tutorial.ipynb b/tutorial.ipynb index 1479a164cd8e..0379fb1a3c57 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -1077,7 +1077,7 @@ }, "source": [ "# VOC\n", - "for b, m in zip([64, 64, 32, 16], ['yolov5s', 'yolov5m', 'yolov5l', 'yolov5x']): # zip(batch_size, model)\n", + "for b, m in zip([64, 64, 64, 32, 16], ['yolov5n', 'yolov5s', 'yolov5m', 'yolov5l', 'yolov5x']): # zip(batch_size, model)\n", " !python train.py --batch {b} --weights {m}.pt --data VOC.yaml --epochs 50 --cache --img 512 --nosave --hyp hyp.VOC.yaml --project VOC --name {m}" ], "execution_count": null, From c94736acece384ed2d5a7299ee82af2969abb48b Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 30 Mar 2022 16:01:55 +0200 Subject: [PATCH 0984/1976] `ENV OMP_NUM_THREADS=8` (#7215) --- Dockerfile | 1 + 1 file changed, 1 insertion(+) diff --git a/Dockerfile b/Dockerfile index 304e8b2801a9..59aa99faa1d6 100644 --- a/Dockerfile +++ b/Dockerfile @@ -26,6 +26,7 @@ RUN git clone https://github.com/ultralytics/yolov5 /usr/src/app ADD https://ultralytics.com/assets/Arial.ttf /root/.config/Ultralytics/ # Set environment variables +ENV OMP_NUM_THREADS=8 # ENV HOME=/usr/src/app From df9008ee69cac78524cc84500c7fc282a1a1d4bd Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 31 Mar 2022 13:17:22 +0200 Subject: [PATCH 0985/1976] Add train.py `--name cfg` option (#7202) Automatically names run as --cfg argument --- train.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/train.py b/train.py index 60be962d447f..36a0e7a7ba66 100644 --- a/train.py +++ b/train.py @@ -519,6 +519,8 @@ def main(opt, callbacks=Callbacks()): if opt.project == str(ROOT / 'runs/train'): # if default project name, rename to runs/evolve opt.project = str(ROOT / 'runs/evolve') opt.exist_ok, opt.resume = opt.resume, False # pass resume to exist_ok and disable resume + if opt.name == 'cfg': + opt.name = Path(opt.cfg).stem # use model.yaml as name opt.save_dir = str(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok)) # DDP mode From c3d5ac151eaedb61495e5866f13a9746d3706abc Mon Sep 17 00:00:00 2001 From: Jirka Borovec Date: Thu, 31 Mar 2022 23:52:34 +0900 Subject: [PATCH 0986/1976] precommit: yapf (#5494) * precommit: yapf * align isort * fix # Conflicts: # utils/plots.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update setup.cfg * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update setup.cfg * Update setup.cfg * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update wandb_utils.py * Update augmentations.py * Update setup.cfg * Update yolo.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update val.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * simplify colorstr * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * val run fix * export.py last comma * Update export.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update hubconf.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * PyTorch Hub tuple fix * PyTorch Hub tuple fix2 * PyTorch Hub tuple fix3 * Update setup Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Glenn Jocher --- .pre-commit-config.yaml | 11 +-- detect.py | 5 +- export.py | 110 ++++++++++++--------- hubconf.py | 13 +-- models/common.py | 37 +++++--- models/experimental.py | 4 +- models/tf.py | 67 +++++++++---- models/yolo.py | 4 +- setup.cfg | 14 +++ train.py | 147 ++++++++++++++++------------- utils/activations.py | 2 - utils/augmentations.py | 15 ++- utils/benchmarks.py | 5 +- utils/callbacks.py | 7 +- utils/datasets.py | 112 ++++++++++++++-------- utils/downloads.py | 17 ++-- utils/general.py | 74 ++++++++------- utils/loggers/__init__.py | 21 ++++- utils/loggers/wandb/wandb_utils.py | 112 ++++++++++++---------- utils/loss.py | 14 ++- utils/metrics.py | 11 ++- utils/plots.py | 30 ++++-- utils/torch_utils.py | 1 - val.py | 25 +++-- 24 files changed, 527 insertions(+), 331 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 526a5609fdd7..0b4fedcd2d43 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -36,12 +36,11 @@ repos: - id: isort name: Sort imports - # TODO - #- repo: https://github.com/pre-commit/mirrors-yapf - # rev: v0.31.0 - # hooks: - # - id: yapf - # name: formatting + - repo: https://github.com/pre-commit/mirrors-yapf + rev: v0.31.0 + hooks: + - id: yapf + name: formatting # TODO #- repo: https://github.com/executablebooks/mdformat diff --git a/detect.py b/detect.py index 046f7ae57b5c..2875285ee314 100644 --- a/detect.py +++ b/detect.py @@ -47,7 +47,8 @@ @torch.no_grad() -def run(weights=ROOT / 'yolov5s.pt', # model.pt path(s) +def run( + weights=ROOT / 'yolov5s.pt', # model.pt path(s) source=ROOT / 'data/images', # file/dir/URL/glob, 0 for webcam data=ROOT / 'data/coco128.yaml', # dataset.yaml path imgsz=(640, 640), # inference size (height, width) @@ -73,7 +74,7 @@ def run(weights=ROOT / 'yolov5s.pt', # model.pt path(s) hide_conf=False, # hide confidences half=False, # use FP16 half-precision inference dnn=False, # use OpenCV DNN for ONNX inference - ): +): source = str(source) save_img = not nosave and not source.endswith('.txt') # save inference images is_file = Path(source).suffix[1:] in (IMG_FORMATS + VID_FORMATS) diff --git a/export.py b/export.py index 7517dc4678da..78b886fa3a6b 100644 --- a/export.py +++ b/export.py @@ -76,16 +76,11 @@ def export_formats(): # YOLOv5 export formats - x = [['PyTorch', '-', '.pt', True], - ['TorchScript', 'torchscript', '.torchscript', True], - ['ONNX', 'onnx', '.onnx', True], - ['OpenVINO', 'openvino', '_openvino_model', False], - ['TensorRT', 'engine', '.engine', True], - ['CoreML', 'coreml', '.mlmodel', False], - ['TensorFlow SavedModel', 'saved_model', '_saved_model', True], - ['TensorFlow GraphDef', 'pb', '.pb', True], - ['TensorFlow Lite', 'tflite', '.tflite', False], - ['TensorFlow Edge TPU', 'edgetpu', '_edgetpu.tflite', False], + x = [['PyTorch', '-', '.pt', True], ['TorchScript', 'torchscript', '.torchscript', True], + ['ONNX', 'onnx', '.onnx', True], ['OpenVINO', 'openvino', '_openvino_model', False], + ['TensorRT', 'engine', '.engine', True], ['CoreML', 'coreml', '.mlmodel', False], + ['TensorFlow SavedModel', 'saved_model', '_saved_model', True], ['TensorFlow GraphDef', 'pb', '.pb', True], + ['TensorFlow Lite', 'tflite', '.tflite', False], ['TensorFlow Edge TPU', 'edgetpu', '_edgetpu.tflite', False], ['TensorFlow.js', 'tfjs', '_web_model', False]] return pd.DataFrame(x, columns=['Format', 'Argument', 'Suffix', 'GPU']) @@ -119,14 +114,25 @@ def export_onnx(model, im, file, opset, train, dynamic, simplify, prefix=colorst LOGGER.info(f'\n{prefix} starting export with onnx {onnx.__version__}...') f = file.with_suffix('.onnx') - torch.onnx.export(model, im, f, verbose=False, opset_version=opset, - training=torch.onnx.TrainingMode.TRAINING if train else torch.onnx.TrainingMode.EVAL, - do_constant_folding=not train, - input_names=['images'], - output_names=['output'], - dynamic_axes={'images': {0: 'batch', 2: 'height', 3: 'width'}, # shape(1,3,640,640) - 'output': {0: 'batch', 1: 'anchors'} # shape(1,25200,85) - } if dynamic else None) + torch.onnx.export( + model, + im, + f, + verbose=False, + opset_version=opset, + training=torch.onnx.TrainingMode.TRAINING if train else torch.onnx.TrainingMode.EVAL, + do_constant_folding=not train, + input_names=['images'], + output_names=['output'], + dynamic_axes={ + 'images': { + 0: 'batch', + 2: 'height', + 3: 'width'}, # shape(1,3,640,640) + 'output': { + 0: 'batch', + 1: 'anchors'} # shape(1,25200,85) + } if dynamic else None) # Checks model_onnx = onnx.load(f) # load onnx model @@ -140,10 +146,9 @@ def export_onnx(model, im, file, opset, train, dynamic, simplify, prefix=colorst import onnxsim LOGGER.info(f'{prefix} simplifying with onnx-simplifier {onnxsim.__version__}...') - model_onnx, check = onnxsim.simplify( - model_onnx, - dynamic_input_shape=dynamic, - input_shapes={'images': list(im.shape)} if dynamic else None) + model_onnx, check = onnxsim.simplify(model_onnx, + dynamic_input_shape=dynamic, + input_shapes={'images': list(im.shape)} if dynamic else None) assert check, 'assert check failed' onnx.save(model_onnx, f) except Exception as e: @@ -246,9 +251,18 @@ def export_engine(model, im, file, train, half, simplify, workspace=4, verbose=F LOGGER.info(f'\n{prefix} export failure: {e}') -def export_saved_model(model, im, file, dynamic, - tf_nms=False, agnostic_nms=False, topk_per_class=100, topk_all=100, iou_thres=0.45, - conf_thres=0.25, keras=False, prefix=colorstr('TensorFlow SavedModel:')): +def export_saved_model(model, + im, + file, + dynamic, + tf_nms=False, + agnostic_nms=False, + topk_per_class=100, + topk_all=100, + iou_thres=0.45, + conf_thres=0.25, + keras=False, + prefix=colorstr('TensorFlow SavedModel:')): # YOLOv5 TensorFlow SavedModel export try: import tensorflow as tf @@ -278,11 +292,10 @@ def export_saved_model(model, im, file, dynamic, tfm = tf.Module() tfm.__call__ = tf.function(lambda x: frozen_func(x)[0], [spec]) tfm.__call__(im) - tf.saved_model.save( - tfm, - f, - options=tf.saved_model.SaveOptions(experimental_custom_gradients=False) if - check_version(tf.__version__, '2.6') else tf.saved_model.SaveOptions()) + tf.saved_model.save(tfm, + f, + options=tf.saved_model.SaveOptions(experimental_custom_gradients=False) + if check_version(tf.__version__, '2.6') else tf.saved_model.SaveOptions()) LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') return keras_model, f except Exception as e: @@ -352,10 +365,10 @@ def export_edgetpu(keras_model, im, file, prefix=colorstr('Edge TPU:')): if subprocess.run(cmd + ' >/dev/null', shell=True).returncode != 0: LOGGER.info(f'\n{prefix} export requires Edge TPU compiler. Attempting install from {help_url}') sudo = subprocess.run('sudo --version >/dev/null', shell=True).returncode == 0 # sudo installed on system - for c in ['curl https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -', - 'echo "deb https://packages.cloud.google.com/apt coral-edgetpu-stable main" | sudo tee /etc/apt/sources.list.d/coral-edgetpu.list', - 'sudo apt-get update', - 'sudo apt-get install edgetpu-compiler']: + for c in ( + 'curl https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -', + 'echo "deb https://packages.cloud.google.com/apt coral-edgetpu-stable main" | sudo tee /etc/apt/sources.list.d/coral-edgetpu.list', + 'sudo apt-get update', 'sudo apt-get install edgetpu-compiler'): subprocess.run(c if sudo else c.replace('sudo ', ''), shell=True, check=True) ver = subprocess.run(cmd, shell=True, capture_output=True, check=True).stdout.decode().split()[-1] @@ -395,12 +408,10 @@ def export_tfjs(keras_model, im, file, prefix=colorstr('TensorFlow.js:')): r'{"outputs": {"Identity.?.?": {"name": "Identity.?.?"}, ' r'"Identity.?.?": {"name": "Identity.?.?"}, ' r'"Identity.?.?": {"name": "Identity.?.?"}, ' - r'"Identity.?.?": {"name": "Identity.?.?"}}}', - r'{"outputs": {"Identity": {"name": "Identity"}, ' + r'"Identity.?.?": {"name": "Identity.?.?"}}}', r'{"outputs": {"Identity": {"name": "Identity"}, ' r'"Identity_1": {"name": "Identity_1"}, ' r'"Identity_2": {"name": "Identity_2"}, ' - r'"Identity_3": {"name": "Identity_3"}}}', - json) + r'"Identity_3": {"name": "Identity_3"}}}', json) j.write(subst) LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') @@ -410,7 +421,8 @@ def export_tfjs(keras_model, im, file, prefix=colorstr('TensorFlow.js:')): @torch.no_grad() -def run(data=ROOT / 'data/coco128.yaml', # 'dataset.yaml path' +def run( + data=ROOT / 'data/coco128.yaml', # 'dataset.yaml path' weights=ROOT / 'yolov5s.pt', # weights path imgsz=(640, 640), # image (height, width) batch_size=1, # batch size @@ -431,8 +443,8 @@ def run(data=ROOT / 'data/coco128.yaml', # 'dataset.yaml path' topk_per_class=100, # TF.js NMS: topk per class to keep topk_all=100, # TF.js NMS: topk for all classes to keep iou_thres=0.45, # TF.js NMS: IoU threshold - conf_thres=0.25 # TF.js NMS: confidence threshold - ): + conf_thres=0.25, # TF.js NMS: confidence threshold +): t = time.time() include = [x.lower() for x in include] # to lowercase formats = tuple(export_formats()['Argument'][1:]) # --include arguments @@ -495,9 +507,16 @@ def run(data=ROOT / 'data/coco128.yaml', # 'dataset.yaml path' if int8 or edgetpu: # TFLite --int8 bug https://github.com/ultralytics/yolov5/issues/5707 check_requirements(('flatbuffers==1.12',)) # required before `import tensorflow` assert not (tflite and tfjs), 'TFLite and TF.js models must be exported separately, please pass only one type.' - model, f[5] = export_saved_model(model.cpu(), im, file, dynamic, tf_nms=nms or agnostic_nms or tfjs, - agnostic_nms=agnostic_nms or tfjs, topk_per_class=topk_per_class, - topk_all=topk_all, conf_thres=conf_thres, iou_thres=iou_thres) # keras model + model, f[5] = export_saved_model(model.cpu(), + im, + file, + dynamic, + tf_nms=nms or agnostic_nms or tfjs, + agnostic_nms=agnostic_nms or tfjs, + topk_per_class=topk_per_class, + topk_all=topk_all, + conf_thres=conf_thres, + iou_thres=iou_thres) # keras model if pb or tfjs: # pb prerequisite to tfjs f[6] = export_pb(model, im, file) if tflite or edgetpu: @@ -542,7 +561,8 @@ def parse_opt(): parser.add_argument('--topk-all', type=int, default=100, help='TF.js NMS: topk for all classes to keep') parser.add_argument('--iou-thres', type=float, default=0.45, help='TF.js NMS: IoU threshold') parser.add_argument('--conf-thres', type=float, default=0.25, help='TF.js NMS: confidence threshold') - parser.add_argument('--include', nargs='+', + parser.add_argument('--include', + nargs='+', default=['torchscript', 'onnx'], help='torchscript, onnx, openvino, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs') opt = parser.parse_args() diff --git a/hubconf.py b/hubconf.py index d719b80034af..86aa07b9466f 100644 --- a/hubconf.py +++ b/hubconf.py @@ -132,12 +132,13 @@ def yolov5x6(pretrained=True, channels=3, classes=80, autoshape=True, verbose=Tr from utils.general import cv2 - imgs = ['data/images/zidane.jpg', # filename - Path('data/images/zidane.jpg'), # Path - 'https://ultralytics.com/images/zidane.jpg', # URI - cv2.imread('data/images/bus.jpg')[:, :, ::-1], # OpenCV - Image.open('data/images/bus.jpg'), # PIL - np.zeros((320, 640, 3))] # numpy + imgs = [ + 'data/images/zidane.jpg', # filename + Path('data/images/zidane.jpg'), # Path + 'https://ultralytics.com/images/zidane.jpg', # URI + cv2.imread('data/images/bus.jpg')[:, :, ::-1], # OpenCV + Image.open('data/images/bus.jpg'), # PIL + np.zeros((320, 640, 3))] # numpy results = model(imgs, size=320) # batched inference results.print() diff --git a/models/common.py b/models/common.py index 115e3c3145ff..8396caa1af5c 100644 --- a/models/common.py +++ b/models/common.py @@ -227,11 +227,12 @@ class GhostBottleneck(nn.Module): def __init__(self, c1, c2, k=3, s=1): # ch_in, ch_out, kernel, stride super().__init__() c_ = c2 // 2 - self.conv = nn.Sequential(GhostConv(c1, c_, 1, 1), # pw - DWConv(c_, c_, k, s, act=False) if s == 2 else nn.Identity(), # dw - GhostConv(c_, c2, 1, 1, act=False)) # pw-linear - self.shortcut = nn.Sequential(DWConv(c1, c1, k, s, act=False), - Conv(c1, c2, 1, 1, act=False)) if s == 2 else nn.Identity() + self.conv = nn.Sequential( + GhostConv(c1, c_, 1, 1), # pw + DWConv(c_, c_, k, s, act=False) if s == 2 else nn.Identity(), # dw + GhostConv(c_, c2, 1, 1, act=False)) # pw-linear + self.shortcut = nn.Sequential(DWConv(c1, c1, k, s, act=False), Conv(c1, c2, 1, 1, + act=False)) if s == 2 else nn.Identity() def forward(self, x): return self.conv(x) + self.shortcut(x) @@ -387,9 +388,10 @@ def wrap_frozen_graph(gd, inputs, outputs): Interpreter, load_delegate = tf.lite.Interpreter, tf.lite.experimental.load_delegate, if edgetpu: # Edge TPU https://coral.ai/software/#edgetpu-runtime LOGGER.info(f'Loading {w} for TensorFlow Lite Edge TPU inference...') - delegate = {'Linux': 'libedgetpu.so.1', - 'Darwin': 'libedgetpu.1.dylib', - 'Windows': 'edgetpu.dll'}[platform.system()] + delegate = { + 'Linux': 'libedgetpu.so.1', + 'Darwin': 'libedgetpu.1.dylib', + 'Windows': 'edgetpu.dll'}[platform.system()] interpreter = Interpreter(model_path=w, experimental_delegates=[load_delegate(delegate)]) else: # Lite LOGGER.info(f'Loading {w} for TensorFlow Lite inference...') @@ -531,7 +533,7 @@ def forward(self, imgs, size=640, augment=False, profile=False): return self.model(imgs.to(p.device).type_as(p), augment, profile) # inference # Pre-process - n, imgs = (len(imgs), imgs) if isinstance(imgs, list) else (1, [imgs]) # number of images, list of images + n, imgs = (len(imgs), list(imgs)) if isinstance(imgs, (list, tuple)) else (1, [imgs]) # number, list of images shape0, shape1, files = [], [], [] # image and inference shapes, filenames for i, im in enumerate(imgs): f = f'image{i}' # filename @@ -561,8 +563,13 @@ def forward(self, imgs, size=640, augment=False, profile=False): t.append(time_sync()) # Post-process - y = non_max_suppression(y if self.dmb else y[0], self.conf, self.iou, self.classes, self.agnostic, - self.multi_label, max_det=self.max_det) # NMS + y = non_max_suppression(y if self.dmb else y[0], + self.conf, + self.iou, + self.classes, + self.agnostic, + self.multi_label, + max_det=self.max_det) # NMS for i in range(n): scale_coords(shape1, y[i][:, :4], shape0[i]) @@ -603,8 +610,12 @@ def display(self, pprint=False, show=False, save=False, crop=False, render=False label = f'{self.names[int(cls)]} {conf:.2f}' if crop: file = save_dir / 'crops' / self.names[int(cls)] / self.files[i] if save else None - crops.append({'box': box, 'conf': conf, 'cls': cls, 'label': label, - 'im': save_one_box(box, im, file=file, save=save)}) + crops.append({ + 'box': box, + 'conf': conf, + 'cls': cls, + 'label': label, + 'im': save_one_box(box, im, file=file, save=save)}) else: # all others annotator.box_label(box, label if labels else '', color=colors(cls)) im = annotator.im diff --git a/models/experimental.py b/models/experimental.py index 1230f4656c8f..e166722cbfca 100644 --- a/models/experimental.py +++ b/models/experimental.py @@ -63,8 +63,8 @@ def __init__(self, c1, c2, k=(1, 3), s=1, equal_ch=True): # ch_in, ch_out, kern a[0] = 1 c_ = np.linalg.lstsq(a, b, rcond=None)[0].round() # solve for equal weight indices, ax = b - self.m = nn.ModuleList( - [nn.Conv2d(c1, int(c_), k, s, k // 2, groups=math.gcd(c1, int(c_)), bias=False) for k, c_ in zip(k, c_)]) + self.m = nn.ModuleList([ + nn.Conv2d(c1, int(c_), k, s, k // 2, groups=math.gcd(c1, int(c_)), bias=False) for k, c_ in zip(k, c_)]) self.bn = nn.BatchNorm2d(c2) self.act = nn.SiLU() diff --git a/models/tf.py b/models/tf.py index 728907f8fb47..c6fb6b82a72e 100644 --- a/models/tf.py +++ b/models/tf.py @@ -69,7 +69,11 @@ def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True, w=None): # see https://stackoverflow.com/questions/52975843/comparing-conv2d-with-padding-between-tensorflow-and-pytorch conv = keras.layers.Conv2D( - c2, k, s, 'SAME' if s == 1 else 'VALID', use_bias=False if hasattr(w, 'bn') else True, + c2, + k, + s, + 'SAME' if s == 1 else 'VALID', + use_bias=False if hasattr(w, 'bn') else True, kernel_initializer=keras.initializers.Constant(w.conv.weight.permute(2, 3, 1, 0).numpy()), bias_initializer='zeros' if hasattr(w, 'bn') else keras.initializers.Constant(w.conv.bias.numpy())) self.conv = conv if s == 1 else keras.Sequential([TFPad(autopad(k, p)), conv]) @@ -98,10 +102,10 @@ def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True, w=None): def call(self, inputs): # x(b,w,h,c) -> y(b,w/2,h/2,4c) # inputs = inputs / 255 # normalize 0-255 to 0-1 - return self.conv(tf.concat([inputs[:, ::2, ::2, :], - inputs[:, 1::2, ::2, :], - inputs[:, ::2, 1::2, :], - inputs[:, 1::2, 1::2, :]], 3)) + return self.conv( + tf.concat( + [inputs[:, ::2, ::2, :], inputs[:, 1::2, ::2, :], inputs[:, ::2, 1::2, :], inputs[:, 1::2, 1::2, :]], + 3)) class TFBottleneck(keras.layers.Layer): @@ -123,9 +127,14 @@ def __init__(self, c1, c2, k, s=1, g=1, bias=True, w=None): super().__init__() assert g == 1, "TF v2.2 Conv2D does not support 'groups' argument" self.conv = keras.layers.Conv2D( - c2, k, s, 'VALID', use_bias=bias, + c2, + k, + s, + 'VALID', + use_bias=bias, kernel_initializer=keras.initializers.Constant(w.weight.permute(2, 3, 1, 0).numpy()), - bias_initializer=keras.initializers.Constant(w.bias.numpy()) if bias else None, ) + bias_initializer=keras.initializers.Constant(w.bias.numpy()) if bias else None, + ) def call(self, inputs): return self.conv(inputs) @@ -206,8 +215,7 @@ def __init__(self, nc=80, anchors=(), ch=(), imgsz=(640, 640), w=None): # detec self.na = len(anchors[0]) // 2 # number of anchors self.grid = [tf.zeros(1)] * self.nl # init grid self.anchors = tf.convert_to_tensor(w.anchors.numpy(), dtype=tf.float32) - self.anchor_grid = tf.reshape(self.anchors * tf.reshape(self.stride, [self.nl, 1, 1]), - [self.nl, 1, -1, 1, 2]) + self.anchor_grid = tf.reshape(self.anchors * tf.reshape(self.stride, [self.nl, 1, 1]), [self.nl, 1, -1, 1, 2]) self.m = [TFConv2d(x, self.no * self.na, 1, w=w.m[i]) for i, x in enumerate(ch)] self.training = False # set to False after building model self.imgsz = imgsz @@ -339,7 +347,13 @@ def __init__(self, cfg='yolov5s.yaml', ch=3, nc=None, model=None, imgsz=(640, 64 self.yaml['nc'] = nc # override yaml value self.model, self.savelist = parse_model(deepcopy(self.yaml), ch=[ch], model=model, imgsz=imgsz) - def predict(self, inputs, tf_nms=False, agnostic_nms=False, topk_per_class=100, topk_all=100, iou_thres=0.45, + def predict(self, + inputs, + tf_nms=False, + agnostic_nms=False, + topk_per_class=100, + topk_all=100, + iou_thres=0.45, conf_thres=0.25): y = [] # outputs x = inputs @@ -361,8 +375,13 @@ def predict(self, inputs, tf_nms=False, agnostic_nms=False, topk_per_class=100, return nms, x[1] else: boxes = tf.expand_dims(boxes, 2) - nms = tf.image.combined_non_max_suppression( - boxes, scores, topk_per_class, topk_all, iou_thres, conf_thres, clip_boxes=False) + nms = tf.image.combined_non_max_suppression(boxes, + scores, + topk_per_class, + topk_all, + iou_thres, + conf_thres, + clip_boxes=False) return nms, x[1] return x[0] # output only first tensor [1,6300,85] = [xywh, conf, class0, class1, ...] @@ -383,7 +402,8 @@ class AgnosticNMS(keras.layers.Layer): # TF Agnostic NMS def call(self, input, topk_all, iou_thres, conf_thres): # wrap map_fn to avoid TypeSpec related error https://stackoverflow.com/a/65809989/3036450 - return tf.map_fn(lambda x: self._nms(x, topk_all, iou_thres, conf_thres), input, + return tf.map_fn(lambda x: self._nms(x, topk_all, iou_thres, conf_thres), + input, fn_output_signature=(tf.float32, tf.float32, tf.float32, tf.int32), name='agnostic_nms') @@ -392,20 +412,26 @@ def _nms(x, topk_all=100, iou_thres=0.45, conf_thres=0.25): # agnostic NMS boxes, classes, scores = x class_inds = tf.cast(tf.argmax(classes, axis=-1), tf.float32) scores_inp = tf.reduce_max(scores, -1) - selected_inds = tf.image.non_max_suppression( - boxes, scores_inp, max_output_size=topk_all, iou_threshold=iou_thres, score_threshold=conf_thres) + selected_inds = tf.image.non_max_suppression(boxes, + scores_inp, + max_output_size=topk_all, + iou_threshold=iou_thres, + score_threshold=conf_thres) selected_boxes = tf.gather(boxes, selected_inds) padded_boxes = tf.pad(selected_boxes, paddings=[[0, topk_all - tf.shape(selected_boxes)[0]], [0, 0]], - mode="CONSTANT", constant_values=0.0) + mode="CONSTANT", + constant_values=0.0) selected_scores = tf.gather(scores_inp, selected_inds) padded_scores = tf.pad(selected_scores, paddings=[[0, topk_all - tf.shape(selected_boxes)[0]]], - mode="CONSTANT", constant_values=-1.0) + mode="CONSTANT", + constant_values=-1.0) selected_classes = tf.gather(class_inds, selected_inds) padded_classes = tf.pad(selected_classes, paddings=[[0, topk_all - tf.shape(selected_boxes)[0]]], - mode="CONSTANT", constant_values=-1.0) + mode="CONSTANT", + constant_values=-1.0) valid_detections = tf.shape(selected_inds)[0] return padded_boxes, padded_scores, padded_classes, valid_detections @@ -421,11 +447,12 @@ def representative_dataset_gen(dataset, ncalib=100): break -def run(weights=ROOT / 'yolov5s.pt', # weights path +def run( + weights=ROOT / 'yolov5s.pt', # weights path imgsz=(640, 640), # inference size h,w batch_size=1, # batch size dynamic=False, # dynamic batch size - ): +): # PyTorch model im = torch.zeros((batch_size, 3, *imgsz)) # BCHW image model = attempt_load(weights, map_location=torch.device('cpu'), inplace=True, fuse=False) diff --git a/models/yolo.py b/models/yolo.py index 81ab539deffa..4cdfea34d63e 100644 --- a/models/yolo.py +++ b/models/yolo.py @@ -260,8 +260,8 @@ def parse_model(d, ch): # model_dict, input_channels(3) pass n = n_ = max(round(n * gd), 1) if n > 1 else n # depth gain - if m in [Conv, GhostConv, Bottleneck, GhostBottleneck, SPP, SPPF, DWConv, MixConv2d, Focus, CrossConv, - BottleneckCSP, C3, C3TR, C3SPP, C3Ghost]: + if m in (Conv, GhostConv, Bottleneck, GhostBottleneck, SPP, SPPF, DWConv, MixConv2d, Focus, CrossConv, + BottleneckCSP, C3, C3TR, C3SPP, C3Ghost): c1, c2 = ch[f], args[0] if c2 != no: # if not output c2 = make_divisible(c2 * gw, 8) diff --git a/setup.cfg b/setup.cfg index 20ea49a8b4d6..c387d84a33e2 100644 --- a/setup.cfg +++ b/setup.cfg @@ -1,5 +1,6 @@ # Project-wide configuration file, can be used for package metadata and other toll configurations # Example usage: global configuration for PEP8 (via flake8) setting or default pytest arguments +# Local usage: pip install pre-commit, pre-commit run --all-files [metadata] license_file = LICENSE @@ -42,4 +43,17 @@ ignore = [isort] # https://pycqa.github.io/isort/docs/configuration/options.html line_length = 120 +# see: https://pycqa.github.io/isort/docs/configuration/multi_line_output_modes.html multi_line_output = 0 + + +[yapf] +based_on_style = pep8 +spaces_before_comment = 2 +COLUMN_LIMIT = 120 +COALESCE_BRACKETS = True +SPACES_AROUND_POWER_OPERATOR = True +SPACE_BETWEEN_ENDING_COMMA_AND_CLOSING_BRACKET = False +SPLIT_BEFORE_CLOSING_BRACKET = False +SPLIT_BEFORE_FIRST_ARGUMENT = False +# EACH_DICT_ENTRY_ON_SEPARATE_LINE = False diff --git a/train.py b/train.py index 36a0e7a7ba66..fbaaeb8ef930 100644 --- a/train.py +++ b/train.py @@ -62,11 +62,7 @@ WORLD_SIZE = int(os.getenv('WORLD_SIZE', 1)) -def train(hyp, # path/to/hyp.yaml or hyp dictionary - opt, - device, - callbacks - ): +def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictionary save_dir, epochs, batch_size, weights, single_cls, evolve, data, cfg, resume, noval, nosave, workers, freeze = \ Path(opt.save_dir), opt.epochs, opt.batch_size, opt.weights, opt.single_cls, opt.evolve, opt.data, opt.cfg, \ opt.resume, opt.noval, opt.nosave, opt.workers, opt.freeze @@ -220,20 +216,38 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary LOGGER.info('Using SyncBatchNorm()') # Trainloader - train_loader, dataset = create_dataloader(train_path, imgsz, batch_size // WORLD_SIZE, gs, single_cls, - hyp=hyp, augment=True, cache=None if opt.cache == 'val' else opt.cache, - rect=opt.rect, rank=LOCAL_RANK, workers=workers, - image_weights=opt.image_weights, quad=opt.quad, - prefix=colorstr('train: '), shuffle=True) + train_loader, dataset = create_dataloader(train_path, + imgsz, + batch_size // WORLD_SIZE, + gs, + single_cls, + hyp=hyp, + augment=True, + cache=None if opt.cache == 'val' else opt.cache, + rect=opt.rect, + rank=LOCAL_RANK, + workers=workers, + image_weights=opt.image_weights, + quad=opt.quad, + prefix=colorstr('train: '), + shuffle=True) mlc = int(np.concatenate(dataset.labels, 0)[:, 0].max()) # max label class nb = len(train_loader) # number of batches assert mlc < nc, f'Label class {mlc} exceeds nc={nc} in {data}. Possible class labels are 0-{nc - 1}' # Process 0 if RANK in [-1, 0]: - val_loader = create_dataloader(val_path, imgsz, batch_size // WORLD_SIZE * 2, gs, single_cls, - hyp=hyp, cache=None if noval else opt.cache, - rect=True, rank=-1, workers=workers * 2, pad=0.5, + val_loader = create_dataloader(val_path, + imgsz, + batch_size // WORLD_SIZE * 2, + gs, + single_cls, + hyp=hyp, + cache=None if noval else opt.cache, + rect=True, + rank=-1, + workers=workers * 2, + pad=0.5, prefix=colorstr('val: '))[0] if not resume: @@ -350,8 +364,8 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary if RANK in [-1, 0]: mloss = (mloss * i + loss_items) / (i + 1) # update mean losses mem = f'{torch.cuda.memory_reserved() / 1E9 if torch.cuda.is_available() else 0:.3g}G' # (GB) - pbar.set_description(('%10s' * 2 + '%10.4g' * 5) % ( - f'{epoch}/{epochs - 1}', mem, *mloss, targets.shape[0], imgs.shape[-1])) + pbar.set_description(('%10s' * 2 + '%10.4g' * 5) % + (f'{epoch}/{epochs - 1}', mem, *mloss, targets.shape[0], imgs.shape[-1])) callbacks.run('on_train_batch_end', ni, model, imgs, targets, paths, plots, opt.sync_bn) if callbacks.stop_training: return @@ -387,14 +401,15 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary # Save model if (not nosave) or (final_epoch and not evolve): # if save - ckpt = {'epoch': epoch, - 'best_fitness': best_fitness, - 'model': deepcopy(de_parallel(model)).half(), - 'ema': deepcopy(ema.ema).half(), - 'updates': ema.updates, - 'optimizer': optimizer.state_dict(), - 'wandb_id': loggers.wandb.wandb_run.id if loggers.wandb else None, - 'date': datetime.now().isoformat()} + ckpt = { + 'epoch': epoch, + 'best_fitness': best_fitness, + 'model': deepcopy(de_parallel(model)).half(), + 'ema': deepcopy(ema.ema).half(), + 'updates': ema.updates, + 'optimizer': optimizer.state_dict(), + 'wandb_id': loggers.wandb.wandb_run.id if loggers.wandb else None, + 'date': datetime.now().isoformat()} # Save last, best and delete torch.save(ckpt, last) @@ -428,19 +443,20 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary strip_optimizer(f) # strip optimizers if f is best: LOGGER.info(f'\nValidating {f}...') - results, _, _ = val.run(data_dict, - batch_size=batch_size // WORLD_SIZE * 2, - imgsz=imgsz, - model=attempt_load(f, device).half(), - iou_thres=0.65 if is_coco else 0.60, # best pycocotools results at 0.65 - single_cls=single_cls, - dataloader=val_loader, - save_dir=save_dir, - save_json=is_coco, - verbose=True, - plots=True, - callbacks=callbacks, - compute_loss=compute_loss) # val best model with plots + results, _, _ = val.run( + data_dict, + batch_size=batch_size // WORLD_SIZE * 2, + imgsz=imgsz, + model=attempt_load(f, device).half(), + iou_thres=0.65 if is_coco else 0.60, # best pycocotools results at 0.65 + single_cls=single_cls, + dataloader=val_loader, + save_dir=save_dir, + save_json=is_coco, + verbose=True, + plots=True, + callbacks=callbacks, + compute_loss=compute_loss) # val best model with plots if is_coco: callbacks.run('on_fit_epoch_end', list(mloss) + list(results) + lr, epoch, best_fitness, fi) @@ -546,35 +562,36 @@ def main(opt, callbacks=Callbacks()): # Evolve hyperparameters (optional) else: # Hyperparameter evolution metadata (mutation scale 0-1, lower_limit, upper_limit) - meta = {'lr0': (1, 1e-5, 1e-1), # initial learning rate (SGD=1E-2, Adam=1E-3) - 'lrf': (1, 0.01, 1.0), # final OneCycleLR learning rate (lr0 * lrf) - 'momentum': (0.3, 0.6, 0.98), # SGD momentum/Adam beta1 - 'weight_decay': (1, 0.0, 0.001), # optimizer weight decay - 'warmup_epochs': (1, 0.0, 5.0), # warmup epochs (fractions ok) - 'warmup_momentum': (1, 0.0, 0.95), # warmup initial momentum - 'warmup_bias_lr': (1, 0.0, 0.2), # warmup initial bias lr - 'box': (1, 0.02, 0.2), # box loss gain - 'cls': (1, 0.2, 4.0), # cls loss gain - 'cls_pw': (1, 0.5, 2.0), # cls BCELoss positive_weight - 'obj': (1, 0.2, 4.0), # obj loss gain (scale with pixels) - 'obj_pw': (1, 0.5, 2.0), # obj BCELoss positive_weight - 'iou_t': (0, 0.1, 0.7), # IoU training threshold - 'anchor_t': (1, 2.0, 8.0), # anchor-multiple threshold - 'anchors': (2, 2.0, 10.0), # anchors per output grid (0 to ignore) - 'fl_gamma': (0, 0.0, 2.0), # focal loss gamma (efficientDet default gamma=1.5) - 'hsv_h': (1, 0.0, 0.1), # image HSV-Hue augmentation (fraction) - 'hsv_s': (1, 0.0, 0.9), # image HSV-Saturation augmentation (fraction) - 'hsv_v': (1, 0.0, 0.9), # image HSV-Value augmentation (fraction) - 'degrees': (1, 0.0, 45.0), # image rotation (+/- deg) - 'translate': (1, 0.0, 0.9), # image translation (+/- fraction) - 'scale': (1, 0.0, 0.9), # image scale (+/- gain) - 'shear': (1, 0.0, 10.0), # image shear (+/- deg) - 'perspective': (0, 0.0, 0.001), # image perspective (+/- fraction), range 0-0.001 - 'flipud': (1, 0.0, 1.0), # image flip up-down (probability) - 'fliplr': (0, 0.0, 1.0), # image flip left-right (probability) - 'mosaic': (1, 0.0, 1.0), # image mixup (probability) - 'mixup': (1, 0.0, 1.0), # image mixup (probability) - 'copy_paste': (1, 0.0, 1.0)} # segment copy-paste (probability) + meta = { + 'lr0': (1, 1e-5, 1e-1), # initial learning rate (SGD=1E-2, Adam=1E-3) + 'lrf': (1, 0.01, 1.0), # final OneCycleLR learning rate (lr0 * lrf) + 'momentum': (0.3, 0.6, 0.98), # SGD momentum/Adam beta1 + 'weight_decay': (1, 0.0, 0.001), # optimizer weight decay + 'warmup_epochs': (1, 0.0, 5.0), # warmup epochs (fractions ok) + 'warmup_momentum': (1, 0.0, 0.95), # warmup initial momentum + 'warmup_bias_lr': (1, 0.0, 0.2), # warmup initial bias lr + 'box': (1, 0.02, 0.2), # box loss gain + 'cls': (1, 0.2, 4.0), # cls loss gain + 'cls_pw': (1, 0.5, 2.0), # cls BCELoss positive_weight + 'obj': (1, 0.2, 4.0), # obj loss gain (scale with pixels) + 'obj_pw': (1, 0.5, 2.0), # obj BCELoss positive_weight + 'iou_t': (0, 0.1, 0.7), # IoU training threshold + 'anchor_t': (1, 2.0, 8.0), # anchor-multiple threshold + 'anchors': (2, 2.0, 10.0), # anchors per output grid (0 to ignore) + 'fl_gamma': (0, 0.0, 2.0), # focal loss gamma (efficientDet default gamma=1.5) + 'hsv_h': (1, 0.0, 0.1), # image HSV-Hue augmentation (fraction) + 'hsv_s': (1, 0.0, 0.9), # image HSV-Saturation augmentation (fraction) + 'hsv_v': (1, 0.0, 0.9), # image HSV-Value augmentation (fraction) + 'degrees': (1, 0.0, 45.0), # image rotation (+/- deg) + 'translate': (1, 0.0, 0.9), # image translation (+/- fraction) + 'scale': (1, 0.0, 0.9), # image scale (+/- gain) + 'shear': (1, 0.0, 10.0), # image shear (+/- deg) + 'perspective': (0, 0.0, 0.001), # image perspective (+/- fraction), range 0-0.001 + 'flipud': (1, 0.0, 1.0), # image flip up-down (probability) + 'fliplr': (0, 0.0, 1.0), # image flip left-right (probability) + 'mosaic': (1, 0.0, 1.0), # image mixup (probability) + 'mixup': (1, 0.0, 1.0), # image mixup (probability) + 'copy_paste': (1, 0.0, 1.0)} # segment copy-paste (probability) with open(opt.hyp, errors='ignore') as f: hyp = yaml.safe_load(f) # load hyps dict diff --git a/utils/activations.py b/utils/activations.py index a4ff789cf336..b104ac18b03b 100644 --- a/utils/activations.py +++ b/utils/activations.py @@ -64,7 +64,6 @@ class AconC(nn.Module): AconC: (p1*x-p2*x) * sigmoid(beta*(p1*x-p2*x)) + p2*x, beta is a learnable parameter according to "Activate or Not: Learning Customized Activation" . """ - def __init__(self, c1): super().__init__() self.p1 = nn.Parameter(torch.randn(1, c1, 1, 1)) @@ -81,7 +80,6 @@ class MetaAconC(nn.Module): MetaAconC: (p1*x-p2*x) * sigmoid(beta*(p1*x-p2*x)) + p2*x, beta is generated by a small network according to "Activate or Not: Learning Customized Activation" . """ - def __init__(self, c1, k=1, s=1, r=16): # ch_in, kernel, stride, r super().__init__() c2 = max(r, c1 // r) diff --git a/utils/augmentations.py b/utils/augmentations.py index 0311b97b63db..3f764c06ae3b 100644 --- a/utils/augmentations.py +++ b/utils/augmentations.py @@ -21,15 +21,15 @@ def __init__(self): import albumentations as A check_version(A.__version__, '1.0.3', hard=True) # version requirement - self.transform = A.Compose([ + T = [ A.Blur(p=0.01), A.MedianBlur(p=0.01), A.ToGray(p=0.01), A.CLAHE(p=0.01), A.RandomBrightnessContrast(p=0.0), A.RandomGamma(p=0.0), - A.ImageCompression(quality_lower=75, p=0.0)], - bbox_params=A.BboxParams(format='yolo', label_fields=['class_labels'])) + A.ImageCompression(quality_lower=75, p=0.0)] # transforms + self.transform = A.Compose(T, bbox_params=A.BboxParams(format='yolo', label_fields=['class_labels'])) LOGGER.info(colorstr('albumentations: ') + ', '.join(f'{x}' for x in self.transform.transforms if x.p)) except ImportError: # package not installed, skip @@ -121,7 +121,14 @@ def letterbox(im, new_shape=(640, 640), color=(114, 114, 114), auto=True, scaleF return im, ratio, (dw, dh) -def random_perspective(im, targets=(), segments=(), degrees=10, translate=.1, scale=.1, shear=10, perspective=0.0, +def random_perspective(im, + targets=(), + segments=(), + degrees=10, + translate=.1, + scale=.1, + shear=10, + perspective=0.0, border=(0, 0)): # torchvision.transforms.RandomAffine(degrees=(-10, 10), translate=(0.1, 0.1), scale=(0.9, 1.1), shear=(-10, 10)) # targets = [cls, xyxy] diff --git a/utils/benchmarks.py b/utils/benchmarks.py index 446248c03f68..5bfa872cc3fb 100644 --- a/utils/benchmarks.py +++ b/utils/benchmarks.py @@ -45,13 +45,14 @@ from utils.torch_utils import select_device -def run(weights=ROOT / 'yolov5s.pt', # weights path +def run( + weights=ROOT / 'yolov5s.pt', # weights path imgsz=640, # inference size (pixels) batch_size=1, # batch size data=ROOT / 'data/coco128.yaml', # dataset.yaml path device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu half=False, # use FP16 half-precision inference - ): +): y, t = [], time.time() formats = export.export_formats() device = select_device(device) diff --git a/utils/callbacks.py b/utils/callbacks.py index c51c268f20d6..6323985b8098 100644 --- a/utils/callbacks.py +++ b/utils/callbacks.py @@ -8,13 +8,11 @@ class Callbacks: """" Handles all registered callbacks for YOLOv5 Hooks """ - def __init__(self): # Define the available callbacks self._callbacks = { 'on_pretrain_routine_start': [], 'on_pretrain_routine_end': [], - 'on_train_start': [], 'on_train_epoch_start': [], 'on_train_batch_start': [], @@ -22,19 +20,16 @@ def __init__(self): 'on_before_zero_grad': [], 'on_train_batch_end': [], 'on_train_epoch_end': [], - 'on_val_start': [], 'on_val_batch_start': [], 'on_val_image_end': [], 'on_val_batch_end': [], 'on_val_end': [], - 'on_fit_epoch_end': [], # fit = train + val 'on_model_save': [], 'on_train_end': [], 'on_params_update': [], - 'teardown': [], - } + 'teardown': [],} self.stop_training = False # set True to interrupt training def register_action(self, hook, name='', callback=None): diff --git a/utils/datasets.py b/utils/datasets.py index d0b35e808000..7e8b423c3174 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -77,14 +77,14 @@ def exif_transpose(image): exif = image.getexif() orientation = exif.get(0x0112, 1) # default 1 if orientation > 1: - method = {2: Image.FLIP_LEFT_RIGHT, - 3: Image.ROTATE_180, - 4: Image.FLIP_TOP_BOTTOM, - 5: Image.TRANSPOSE, - 6: Image.ROTATE_270, - 7: Image.TRANSVERSE, - 8: Image.ROTATE_90, - }.get(orientation) + method = { + 2: Image.FLIP_LEFT_RIGHT, + 3: Image.ROTATE_180, + 4: Image.FLIP_TOP_BOTTOM, + 5: Image.TRANSPOSE, + 6: Image.ROTATE_270, + 7: Image.TRANSVERSE, + 8: Image.ROTATE_90,}.get(orientation) if method is not None: image = image.transpose(method) del exif[0x0112] @@ -92,22 +92,39 @@ def exif_transpose(image): return image -def create_dataloader(path, imgsz, batch_size, stride, single_cls=False, hyp=None, augment=False, cache=False, pad=0.0, - rect=False, rank=-1, workers=8, image_weights=False, quad=False, prefix='', shuffle=False): +def create_dataloader(path, + imgsz, + batch_size, + stride, + single_cls=False, + hyp=None, + augment=False, + cache=False, + pad=0.0, + rect=False, + rank=-1, + workers=8, + image_weights=False, + quad=False, + prefix='', + shuffle=False): if rect and shuffle: LOGGER.warning('WARNING: --rect is incompatible with DataLoader shuffle, setting shuffle=False') shuffle = False with torch_distributed_zero_first(rank): # init dataset *.cache only once if DDP - dataset = LoadImagesAndLabels(path, imgsz, batch_size, - augment=augment, # augmentation - hyp=hyp, # hyperparameters - rect=rect, # rectangular batches - cache_images=cache, - single_cls=single_cls, - stride=int(stride), - pad=pad, - image_weights=image_weights, - prefix=prefix) + dataset = LoadImagesAndLabels( + path, + imgsz, + batch_size, + augment=augment, # augmentation + hyp=hyp, # hyperparameters + rect=rect, # rectangular batches + cache_images=cache, + single_cls=single_cls, + stride=int(stride), + pad=pad, + image_weights=image_weights, + prefix=prefix) batch_size = min(batch_size, len(dataset)) nd = torch.cuda.device_count() # number of CUDA devices @@ -128,7 +145,6 @@ class InfiniteDataLoader(dataloader.DataLoader): Uses same syntax as vanilla DataLoader """ - def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) object.__setattr__(self, 'batch_sampler', _RepeatSampler(self.batch_sampler)) @@ -148,7 +164,6 @@ class _RepeatSampler: Args: sampler (Sampler) """ - def __init__(self, sampler): self.sampler = sampler @@ -380,8 +395,19 @@ class LoadImagesAndLabels(Dataset): # YOLOv5 train_loader/val_loader, loads images and labels for training and validation cache_version = 0.6 # dataset labels *.cache version - def __init__(self, path, img_size=640, batch_size=16, augment=False, hyp=None, rect=False, image_weights=False, - cache_images=False, single_cls=False, stride=32, pad=0.0, prefix=''): + def __init__(self, + path, + img_size=640, + batch_size=16, + augment=False, + hyp=None, + rect=False, + image_weights=False, + cache_images=False, + single_cls=False, + stride=32, + pad=0.0, + prefix=''): self.img_size = img_size self.augment = augment self.hyp = hyp @@ -510,7 +536,9 @@ def cache_labels(self, path=Path('./labels.cache'), prefix=''): desc = f"{prefix}Scanning '{path.parent / path.stem}' images and labels..." with Pool(NUM_THREADS) as pool: pbar = tqdm(pool.imap(verify_image_label, zip(self.im_files, self.label_files, repeat(prefix))), - desc=desc, total=len(self.im_files), bar_format=BAR_FORMAT) + desc=desc, + total=len(self.im_files), + bar_format=BAR_FORMAT) for im_file, lb, shape, segments, nm_f, nf_f, ne_f, nc_f, msg in pbar: nm += nm_f nf += nf_f @@ -576,7 +604,8 @@ def __getitem__(self, index): labels[:, 1:] = xywhn2xyxy(labels[:, 1:], ratio[0] * w, ratio[1] * h, padw=pad[0], padh=pad[1]) if self.augment: - img, labels = random_perspective(img, labels, + img, labels = random_perspective(img, + labels, degrees=hyp['degrees'], translate=hyp['translate'], scale=hyp['scale'], @@ -633,8 +662,7 @@ def load_image(self, i): h0, w0 = im.shape[:2] # orig hw r = self.img_size / max(h0, w0) # ratio if r != 1: # if sizes are not equal - im = cv2.resize(im, - (int(w0 * r), int(h0 * r)), + im = cv2.resize(im, (int(w0 * r), int(h0 * r)), interpolation=cv2.INTER_LINEAR if (self.augment or r > 1) else cv2.INTER_AREA) return im, (h0, w0), im.shape[:2] # im, hw_original, hw_resized else: @@ -692,7 +720,9 @@ def load_mosaic(self, index): # Augment img4, labels4, segments4 = copy_paste(img4, labels4, segments4, p=self.hyp['copy_paste']) - img4, labels4 = random_perspective(img4, labels4, segments4, + img4, labels4 = random_perspective(img4, + labels4, + segments4, degrees=self.hyp['degrees'], translate=self.hyp['translate'], scale=self.hyp['scale'], @@ -766,7 +796,9 @@ def load_mosaic9(self, index): # img9, labels9 = replicate(img9, labels9) # replicate # Augment - img9, labels9 = random_perspective(img9, labels9, segments9, + img9, labels9 = random_perspective(img9, + labels9, + segments9, degrees=self.hyp['degrees'], translate=self.hyp['translate'], scale=self.hyp['scale'], @@ -795,8 +827,8 @@ def collate_fn4(batch): for i in range(n): # zidane torch.zeros(16,3,720,1280) # BCHW i *= 4 if random.random() < 0.5: - im = F.interpolate(img[i].unsqueeze(0).float(), scale_factor=2.0, mode='bilinear', align_corners=False)[ - 0].type(img[i].type()) + im = F.interpolate(img[i].unsqueeze(0).float(), scale_factor=2.0, mode='bilinear', + align_corners=False)[0].type(img[i].type()) lb = label[i] else: im = torch.cat((torch.cat((img[i], img[i + 1]), 1), torch.cat((img[i + 2], img[i + 3]), 1)), 2) @@ -946,7 +978,6 @@ def dataset_stats(path='coco128.yaml', autodownload=False, verbose=False, profil autodownload: Attempt to download dataset if not found locally verbose: Print stats dictionary """ - def round_labels(labels): # Update labels to integer class and 6 decimal place floats return [[int(c), *(round(x, 4) for x in points)] for c, *points in labels] @@ -996,11 +1027,16 @@ def hub_ops(f, max_dim=1920): for label in tqdm(dataset.labels, total=dataset.n, desc='Statistics'): x.append(np.bincount(label[:, 0].astype(int), minlength=data['nc'])) x = np.array(x) # shape(128x80) - stats[split] = {'instance_stats': {'total': int(x.sum()), 'per_class': x.sum(0).tolist()}, - 'image_stats': {'total': dataset.n, 'unlabelled': int(np.all(x == 0, 1).sum()), - 'per_class': (x > 0).sum(0).tolist()}, - 'labels': [{str(Path(k).name): round_labels(v.tolist())} for k, v in - zip(dataset.im_files, dataset.labels)]} + stats[split] = { + 'instance_stats': { + 'total': int(x.sum()), + 'per_class': x.sum(0).tolist()}, + 'image_stats': { + 'total': dataset.n, + 'unlabelled': int(np.all(x == 0, 1).sum()), + 'per_class': (x > 0).sum(0).tolist()}, + 'labels': [{ + str(Path(k).name): round_labels(v.tolist())} for k, v in zip(dataset.im_files, dataset.labels)]} if hub: im_dir = hub_dir / 'images' diff --git a/utils/downloads.py b/utils/downloads.py index d7b87cb2cadd..4a012cc05849 100644 --- a/utils/downloads.py +++ b/utils/downloads.py @@ -63,19 +63,21 @@ def attempt_download(file, repo='ultralytics/yolov5'): # from utils.downloads i assets = [x['name'] for x in response['assets']] # release assets, i.e. ['yolov5s.pt', 'yolov5m.pt', ...] tag = response['tag_name'] # i.e. 'v1.0' except Exception: # fallback plan - assets = ['yolov5n.pt', 'yolov5s.pt', 'yolov5m.pt', 'yolov5l.pt', 'yolov5x.pt', - 'yolov5n6.pt', 'yolov5s6.pt', 'yolov5m6.pt', 'yolov5l6.pt', 'yolov5x6.pt'] + assets = [ + 'yolov5n.pt', 'yolov5s.pt', 'yolov5m.pt', 'yolov5l.pt', 'yolov5x.pt', 'yolov5n6.pt', 'yolov5s6.pt', + 'yolov5m6.pt', 'yolov5l6.pt', 'yolov5x6.pt'] try: tag = subprocess.check_output('git tag', shell=True, stderr=subprocess.STDOUT).decode().split()[-1] except Exception: tag = 'v6.0' # current release if name in assets: - safe_download(file, - url=f'https://github.com/{repo}/releases/download/{tag}/{name}', - # url2=f'https://storage.googleapis.com/{repo}/ckpt/{name}', # backup url (optional) - min_bytes=1E5, - error_msg=f'{file} missing, try downloading from https://github.com/{repo}/releases/') + safe_download( + file, + url=f'https://github.com/{repo}/releases/download/{tag}/{name}', + # url2=f'https://storage.googleapis.com/{repo}/ckpt/{name}', # backup url (optional) + min_bytes=1E5, + error_msg=f'{file} missing, try downloading from https://github.com/{repo}/releases/') return str(file) @@ -122,6 +124,7 @@ def get_token(cookie="./cookie"): return line.split()[-1] return "" + # Google utils: https://cloud.google.com/storage/docs/reference/libraries ---------------------------------------------- # # diff --git a/utils/general.py b/utils/general.py index 5905211cfa59..a64680bc06e5 100755 --- a/utils/general.py +++ b/utils/general.py @@ -536,25 +536,26 @@ def one_cycle(y1=0.0, y2=1.0, steps=100): def colorstr(*input): # Colors a string https://en.wikipedia.org/wiki/ANSI_escape_code, i.e. colorstr('blue', 'hello world') *args, string = input if len(input) > 1 else ('blue', 'bold', input[0]) # color arguments, string - colors = {'black': '\033[30m', # basic colors - 'red': '\033[31m', - 'green': '\033[32m', - 'yellow': '\033[33m', - 'blue': '\033[34m', - 'magenta': '\033[35m', - 'cyan': '\033[36m', - 'white': '\033[37m', - 'bright_black': '\033[90m', # bright colors - 'bright_red': '\033[91m', - 'bright_green': '\033[92m', - 'bright_yellow': '\033[93m', - 'bright_blue': '\033[94m', - 'bright_magenta': '\033[95m', - 'bright_cyan': '\033[96m', - 'bright_white': '\033[97m', - 'end': '\033[0m', # misc - 'bold': '\033[1m', - 'underline': '\033[4m'} + colors = { + 'black': '\033[30m', # basic colors + 'red': '\033[31m', + 'green': '\033[32m', + 'yellow': '\033[33m', + 'blue': '\033[34m', + 'magenta': '\033[35m', + 'cyan': '\033[36m', + 'white': '\033[37m', + 'bright_black': '\033[90m', # bright colors + 'bright_red': '\033[91m', + 'bright_green': '\033[92m', + 'bright_yellow': '\033[93m', + 'bright_blue': '\033[94m', + 'bright_magenta': '\033[95m', + 'bright_cyan': '\033[96m', + 'bright_white': '\033[97m', + 'end': '\033[0m', # misc + 'bold': '\033[1m', + 'underline': '\033[4m'} return ''.join(colors[x] for x in args) + f'{string}' + colors['end'] @@ -591,9 +592,10 @@ def coco80_to_coco91_class(): # converts 80-index (val2014) to 91-index (paper) # b = np.loadtxt('data/coco_paper.names', dtype='str', delimiter='\n') # x1 = [list(a[i] == b).index(True) + 1 for i in range(80)] # darknet to coco # x2 = [list(b[i] == a).index(True) if any(b[i] == a) else None for i in range(91)] # coco to darknet - x = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 27, 28, 31, 32, 33, 34, - 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, - 64, 65, 67, 70, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 84, 85, 86, 87, 88, 89, 90] + x = [ + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 27, 28, 31, 32, 33, 34, + 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, + 64, 65, 67, 70, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 84, 85, 86, 87, 88, 89, 90] return x @@ -701,8 +703,14 @@ def clip_coords(boxes, shape): boxes[:, [1, 3]] = boxes[:, [1, 3]].clip(0, shape[0]) # y1, y2 -def non_max_suppression(prediction, conf_thres=0.25, iou_thres=0.45, classes=None, agnostic=False, multi_label=False, - labels=(), max_det=300): +def non_max_suppression(prediction, + conf_thres=0.25, + iou_thres=0.45, + classes=None, + agnostic=False, + multi_label=False, + labels=(), + max_det=300): """Non-Maximum Suppression (NMS) on inference results to reject overlapping bounding boxes Returns: @@ -816,8 +824,8 @@ def strip_optimizer(f='best.pt', s=''): # from utils.general import *; strip_op def print_mutation(results, hyp, save_dir, bucket, prefix=colorstr('evolve: ')): evolve_csv = save_dir / 'evolve.csv' evolve_yaml = save_dir / 'hyp_evolve.yaml' - keys = ('metrics/precision', 'metrics/recall', 'metrics/mAP_0.5', 'metrics/mAP_0.5:0.95', - 'val/box_loss', 'val/obj_loss', 'val/cls_loss') + tuple(hyp.keys()) # [results + hyps] + keys = ('metrics/precision', 'metrics/recall', 'metrics/mAP_0.5', 'metrics/mAP_0.5:0.95', 'val/box_loss', + 'val/obj_loss', 'val/cls_loss') + tuple(hyp.keys()) # [results + hyps] keys = tuple(x.strip() for x in keys) vals = results + tuple(hyp.values()) n = len(keys) @@ -839,17 +847,15 @@ def print_mutation(results, hyp, save_dir, bucket, prefix=colorstr('evolve: ')): data = data.rename(columns=lambda x: x.strip()) # strip keys i = np.argmax(fitness(data.values[:, :4])) # generations = len(data) - f.write('# YOLOv5 Hyperparameter Evolution Results\n' + - f'# Best generation: {i}\n' + - f'# Last generation: {generations - 1}\n' + - '# ' + ', '.join(f'{x.strip():>20s}' for x in keys[:7]) + '\n' + - '# ' + ', '.join(f'{x:>20.5g}' for x in data.values[i, :7]) + '\n\n') + f.write('# YOLOv5 Hyperparameter Evolution Results\n' + f'# Best generation: {i}\n' + + f'# Last generation: {generations - 1}\n' + '# ' + ', '.join(f'{x.strip():>20s}' for x in keys[:7]) + + '\n' + '# ' + ', '.join(f'{x:>20.5g}' for x in data.values[i, :7]) + '\n\n') yaml.safe_dump(data.loc[i][7:].to_dict(), f, sort_keys=False) # Print to screen - LOGGER.info(prefix + f'{generations} generations finished, current result:\n' + - prefix + ', '.join(f'{x.strip():>20s}' for x in keys) + '\n' + - prefix + ', '.join(f'{x:20.5g}' for x in vals) + '\n\n') + LOGGER.info(prefix + f'{generations} generations finished, current result:\n' + prefix + + ', '.join(f'{x.strip():>20s}' for x in keys) + '\n' + prefix + ', '.join(f'{x:20.5g}' + for x in vals) + '\n\n') if bucket: os.system(f'gsutil cp {evolve_csv} {evolve_yaml} gs://{bucket}') # upload diff --git a/utils/loggers/__init__.py b/utils/loggers/__init__.py index bb8523c0219e..2e639dfb9b53 100644 --- a/utils/loggers/__init__.py +++ b/utils/loggers/__init__.py @@ -43,10 +43,20 @@ def __init__(self, save_dir=None, weights=None, opt=None, hyp=None, logger=None, self.hyp = hyp self.logger = logger # for printing results to console self.include = include - self.keys = ['train/box_loss', 'train/obj_loss', 'train/cls_loss', # train loss - 'metrics/precision', 'metrics/recall', 'metrics/mAP_0.5', 'metrics/mAP_0.5:0.95', # metrics - 'val/box_loss', 'val/obj_loss', 'val/cls_loss', # val loss - 'x/lr0', 'x/lr1', 'x/lr2'] # params + self.keys = [ + 'train/box_loss', + 'train/obj_loss', + 'train/cls_loss', # train loss + 'metrics/precision', + 'metrics/recall', + 'metrics/mAP_0.5', + 'metrics/mAP_0.5:0.95', # metrics + 'val/box_loss', + 'val/obj_loss', + 'val/cls_loss', # val loss + 'x/lr0', + 'x/lr1', + 'x/lr2'] # params self.best_keys = ['best/epoch', 'best/precision', 'best/recall', 'best/mAP_0.5', 'best/mAP_0.5:0.95'] for k in LOGGERS: setattr(self, k, None) # init empty logger dictionary @@ -155,7 +165,8 @@ def on_train_end(self, last, best, plots, epoch, results): self.wandb.log({"Results": [wandb.Image(str(f), caption=f.name) for f in files]}) # Calling wandb.log. TODO: Refactor this into WandbLogger.log_model if not self.opt.evolve: - wandb.log_artifact(str(best if best.exists() else last), type='model', + wandb.log_artifact(str(best if best.exists() else last), + type='model', name='run_' + self.wandb.wandb_run.id + '_model', aliases=['latest', 'best', 'stripped']) self.wandb.finish_run() diff --git a/utils/loggers/wandb/wandb_utils.py b/utils/loggers/wandb/wandb_utils.py index 786e58a19972..6ec2559e29ac 100644 --- a/utils/loggers/wandb/wandb_utils.py +++ b/utils/loggers/wandb/wandb_utils.py @@ -46,10 +46,10 @@ def check_wandb_dataset(data_file): if check_file(data_file) and data_file.endswith('.yaml'): with open(data_file, errors='ignore') as f: data_dict = yaml.safe_load(f) - is_trainset_wandb_artifact = (isinstance(data_dict['train'], str) and - data_dict['train'].startswith(WANDB_ARTIFACT_PREFIX)) - is_valset_wandb_artifact = (isinstance(data_dict['val'], str) and - data_dict['val'].startswith(WANDB_ARTIFACT_PREFIX)) + is_trainset_wandb_artifact = isinstance(data_dict['train'], + str) and data_dict['train'].startswith(WANDB_ARTIFACT_PREFIX) + is_valset_wandb_artifact = isinstance(data_dict['val'], + str) and data_dict['val'].startswith(WANDB_ARTIFACT_PREFIX) if is_trainset_wandb_artifact or is_valset_wandb_artifact: return data_dict else: @@ -116,7 +116,6 @@ class WandbLogger(): For more on how this logger is used, see the Weights & Biases documentation: https://docs.wandb.com/guides/integrations/yolov5 """ - def __init__(self, opt, run_id=None, job_type='Training'): """ - Initialize WandbLogger instance @@ -181,8 +180,7 @@ def __init__(self, opt, run_id=None, job_type='Training'): self.wandb_artifact_data_dict = self.wandb_artifact_data_dict or self.data_dict # write data_dict to config. useful for resuming from artifacts. Do this only when not resuming. - self.wandb_run.config.update({'data_dict': self.wandb_artifact_data_dict}, - allow_val_change=True) + self.wandb_run.config.update({'data_dict': self.wandb_artifact_data_dict}, allow_val_change=True) self.setup_training(opt) if self.job_type == 'Dataset Creation': @@ -200,8 +198,7 @@ def check_and_upload_dataset(self, opt): Updated dataset info dictionary where local dataset paths are replaced by WAND_ARFACT_PREFIX links. """ assert wandb, 'Install wandb to upload dataset' - config_path = self.log_dataset_artifact(opt.data, - opt.single_cls, + config_path = self.log_dataset_artifact(opt.data, opt.single_cls, 'YOLOv5' if opt.project == 'runs/train' else Path(opt.project).stem) with open(config_path, errors='ignore') as f: wandb_data_dict = yaml.safe_load(f) @@ -230,10 +227,10 @@ def setup_training(self, opt): config.hyp, config.imgsz data_dict = self.data_dict if self.val_artifact is None: # If --upload_dataset is set, use the existing artifact, don't download - self.train_artifact_path, self.train_artifact = self.download_dataset_artifact(data_dict.get('train'), - opt.artifact_alias) - self.val_artifact_path, self.val_artifact = self.download_dataset_artifact(data_dict.get('val'), - opt.artifact_alias) + self.train_artifact_path, self.train_artifact = self.download_dataset_artifact( + data_dict.get('train'), opt.artifact_alias) + self.val_artifact_path, self.val_artifact = self.download_dataset_artifact( + data_dict.get('val'), opt.artifact_alias) if self.train_artifact_path is not None: train_path = Path(self.train_artifact_path) / 'data/images/' @@ -308,14 +305,15 @@ def log_model(self, path, opt, epoch, fitness_score, best_model=False): fitness_score (float) -- fitness score for current epoch best_model (boolean) -- Boolean representing if the current checkpoint is the best yet. """ - model_artifact = wandb.Artifact('run_' + wandb.run.id + '_model', type='model', metadata={ - 'original_url': str(path), - 'epochs_trained': epoch + 1, - 'save period': opt.save_period, - 'project': opt.project, - 'total_epochs': opt.epochs, - 'fitness_score': fitness_score - }) + model_artifact = wandb.Artifact('run_' + wandb.run.id + '_model', + type='model', + metadata={ + 'original_url': str(path), + 'epochs_trained': epoch + 1, + 'save period': opt.save_period, + 'project': opt.project, + 'total_epochs': opt.epochs, + 'fitness_score': fitness_score}) model_artifact.add_file(str(path / 'last.pt'), name='last.pt') wandb.log_artifact(model_artifact, aliases=['latest', 'last', 'epoch ' + str(self.current_epoch), 'best' if best_model else '']) @@ -344,13 +342,14 @@ def log_dataset_artifact(self, data_file, single_cls, project, overwrite_config= # log train set if not log_val_only: - self.train_artifact = self.create_dataset_table(LoadImagesAndLabels( - data['train'], rect=True, batch_size=1), names, name='train') if data.get('train') else None + self.train_artifact = self.create_dataset_table(LoadImagesAndLabels(data['train'], rect=True, batch_size=1), + names, + name='train') if data.get('train') else None if data.get('train'): data['train'] = WANDB_ARTIFACT_PREFIX + str(Path(project) / 'train') - self.val_artifact = self.create_dataset_table(LoadImagesAndLabels( - data['val'], rect=True, batch_size=1), names, name='val') if data.get('val') else None + self.val_artifact = self.create_dataset_table( + LoadImagesAndLabels(data['val'], rect=True, batch_size=1), names, name='val') if data.get('val') else None if data.get('val'): data['val'] = WANDB_ARTIFACT_PREFIX + str(Path(project) / 'val') @@ -412,17 +411,21 @@ def create_dataset_table(self, dataset: LoadImagesAndLabels, class_to_id: Dict[i else: artifact.add_file(img_file, name='data/images/' + Path(img_file).name) label_file = Path(img2label_paths([img_file])[0]) - artifact.add_file(str(label_file), - name='data/labels/' + label_file.name) if label_file.exists() else None + artifact.add_file(str(label_file), name='data/labels/' + + label_file.name) if label_file.exists() else None table = wandb.Table(columns=["id", "train_image", "Classes", "name"]) class_set = wandb.Classes([{'id': id, 'name': name} for id, name in class_to_id.items()]) for si, (img, labels, paths, shapes) in enumerate(tqdm(dataset)): box_data, img_classes = [], {} for cls, *xywh in labels[:, 1:].tolist(): cls = int(cls) - box_data.append({"position": {"middle": [xywh[0], xywh[1]], "width": xywh[2], "height": xywh[3]}, - "class_id": cls, - "box_caption": "%s" % (class_to_id[cls])}) + box_data.append({ + "position": { + "middle": [xywh[0], xywh[1]], + "width": xywh[2], + "height": xywh[3]}, + "class_id": cls, + "box_caption": "%s" % (class_to_id[cls])}) img_classes[cls] = class_to_id[cls] boxes = {"ground_truth": {"box_data": box_data, "class_labels": class_to_id}} # inference-space table.add_data(si, wandb.Image(paths, classes=class_set, boxes=boxes), list(img_classes.values()), @@ -446,12 +449,17 @@ def log_training_progress(self, predn, path, names): for *xyxy, conf, cls in predn.tolist(): if conf >= 0.25: cls = int(cls) - box_data.append( - {"position": {"minX": xyxy[0], "minY": xyxy[1], "maxX": xyxy[2], "maxY": xyxy[3]}, - "class_id": cls, - "box_caption": f"{names[cls]} {conf:.3f}", - "scores": {"class_score": conf}, - "domain": "pixel"}) + box_data.append({ + "position": { + "minX": xyxy[0], + "minY": xyxy[1], + "maxX": xyxy[2], + "maxY": xyxy[3]}, + "class_id": cls, + "box_caption": f"{names[cls]} {conf:.3f}", + "scores": { + "class_score": conf}, + "domain": "pixel"}) avg_conf_per_class[cls] += conf if cls in pred_class_count: @@ -464,12 +472,9 @@ def log_training_progress(self, predn, path, names): boxes = {"predictions": {"box_data": box_data, "class_labels": names}} # inference-space id = self.val_table_path_map[Path(path).name] - self.result_table.add_data(self.current_epoch, - id, - self.val_table.data[id][1], + self.result_table.add_data(self.current_epoch, id, self.val_table.data[id][1], wandb.Image(self.val_table.data[id][1], boxes=boxes, classes=class_set), - *avg_conf_per_class - ) + *avg_conf_per_class) def val_one_image(self, pred, predn, path, names, im): """ @@ -485,11 +490,17 @@ def val_one_image(self, pred, predn, path, names, im): if len(self.bbox_media_panel_images) < self.max_imgs_to_log and self.current_epoch > 0: if self.current_epoch % self.bbox_interval == 0: - box_data = [{"position": {"minX": xyxy[0], "minY": xyxy[1], "maxX": xyxy[2], "maxY": xyxy[3]}, - "class_id": int(cls), - "box_caption": f"{names[int(cls)]} {conf:.3f}", - "scores": {"class_score": conf}, - "domain": "pixel"} for *xyxy, conf, cls in pred.tolist()] + box_data = [{ + "position": { + "minX": xyxy[0], + "minY": xyxy[1], + "maxX": xyxy[2], + "maxY": xyxy[3]}, + "class_id": int(cls), + "box_caption": f"{names[int(cls)]} {conf:.3f}", + "scores": { + "class_score": conf}, + "domain": "pixel"} for *xyxy, conf, cls in pred.tolist()] boxes = {"predictions": {"box_data": box_data, "class_labels": names}} # inference-space self.bbox_media_panel_images.append(wandb.Image(im, boxes=boxes, caption=path.name)) @@ -519,7 +530,8 @@ def end_epoch(self, best_result=False): wandb.log(self.log_dict) except BaseException as e: LOGGER.info( - f"An error occurred in wandb logger. The training will proceed without interruption. More info\n{e}") + f"An error occurred in wandb logger. The training will proceed without interruption. More info\n{e}" + ) self.wandb_run.finish() self.wandb_run = None @@ -527,8 +539,10 @@ def end_epoch(self, best_result=False): self.bbox_media_panel_images = [] if self.result_artifact: self.result_artifact.add(self.result_table, 'result') - wandb.log_artifact(self.result_artifact, aliases=['latest', 'last', 'epoch ' + str(self.current_epoch), - ('best' if best_result else '')]) + wandb.log_artifact(self.result_artifact, + aliases=[ + 'latest', 'last', 'epoch ' + str(self.current_epoch), + ('best' if best_result else '')]) wandb.log({"evaluation": self.result_table}) columns = ["epoch", "id", "ground truth", "prediction"] diff --git a/utils/loss.py b/utils/loss.py index bf9b592d4ad2..fa8095515477 100644 --- a/utils/loss.py +++ b/utils/loss.py @@ -183,10 +183,16 @@ def build_targets(self, p, targets): targets = torch.cat((targets.repeat(na, 1, 1), ai[:, :, None]), 2) # append anchor indices g = 0.5 # bias - off = torch.tensor([[0, 0], - [1, 0], [0, 1], [-1, 0], [0, -1], # j,k,l,m - # [1, 1], [1, -1], [-1, 1], [-1, -1], # jk,jm,lk,lm - ], device=self.device).float() * g # offsets + off = torch.tensor( + [ + [0, 0], + [1, 0], + [0, 1], + [-1, 0], + [0, -1], # j,k,l,m + # [1, 1], [1, -1], [-1, 1], [-1, -1], # jk,jm,lk,lm + ], + device=self.device).float() * g # offsets for i in range(self.nl): anchors = self.anchors[i] diff --git a/utils/metrics.py b/utils/metrics.py index 857fa5d81f91..216956e90ecc 100644 --- a/utils/metrics.py +++ b/utils/metrics.py @@ -184,7 +184,14 @@ def plot(self, normalize=True, save_dir='', names=()): labels = (0 < nn < 99) and (nn == nc) # apply names to ticklabels with warnings.catch_warnings(): warnings.simplefilter('ignore') # suppress empty matrix RuntimeWarning: All-NaN slice encountered - sn.heatmap(array, annot=nc < 30, annot_kws={"size": 8}, cmap='Blues', fmt='.2f', square=True, vmin=0.0, + sn.heatmap(array, + annot=nc < 30, + annot_kws={ + "size": 8}, + cmap='Blues', + fmt='.2f', + square=True, + vmin=0.0, xticklabels=names + ['background FP'] if labels else "auto", yticklabels=names + ['background FN'] if labels else "auto").set_facecolor((1, 1, 1)) fig.axes[0].set_xlabel('True') @@ -253,7 +260,6 @@ def box_iou(box1, box2): iou (Tensor[N, M]): the NxM matrix containing the pairwise IoU values for every element in boxes1 and boxes2 """ - def box_area(box): # box = 4xn return (box[2] - box[0]) * (box[3] - box[1]) @@ -300,6 +306,7 @@ def wh_iou(wh1, wh2): # Plots ---------------------------------------------------------------------------------------------------------------- + def plot_pr_curve(px, py, ap, save_dir='pr_curve.png', names=()): # Precision-recall curve fig, ax = plt.subplots(1, 1, figsize=(9, 6), tight_layout=True) diff --git a/utils/plots.py b/utils/plots.py index a30c0faf962a..51e9cfdf6e04 100644 --- a/utils/plots.py +++ b/utils/plots.py @@ -89,10 +89,11 @@ def box_label(self, box, label='', color=(128, 128, 128), txt_color=(255, 255, 2 if label: w, h = self.font.getsize(label) # text width, height outside = box[1] - h >= 0 # label fits outside box - self.draw.rectangle((box[0], - box[1] - h if outside else box[1], - box[0] + w + 1, - box[1] + 1 if outside else box[1] + h + 1), fill=color) + self.draw.rectangle( + (box[0], box[1] - h if outside else box[1], box[0] + w + 1, + box[1] + 1 if outside else box[1] + h + 1), + fill=color, + ) # self.draw.text((box[0], box[1]), label, fill=txt_color, font=self.font, anchor='ls') # for PIL>8.0 self.draw.text((box[0], box[1] - h if outside else box[1]), label, fill=txt_color, font=self.font) else: # cv2 @@ -104,8 +105,13 @@ def box_label(self, box, label='', color=(128, 128, 128), txt_color=(255, 255, 2 outside = p1[1] - h - 3 >= 0 # label fits outside box p2 = p1[0] + w, p1[1] - h - 3 if outside else p1[1] + h + 3 cv2.rectangle(self.im, p1, p2, color, -1, cv2.LINE_AA) # filled - cv2.putText(self.im, label, (p1[0], p1[1] - 2 if outside else p1[1] + h + 2), 0, self.lw / 3, txt_color, - thickness=tf, lineType=cv2.LINE_AA) + cv2.putText(self.im, + label, (p1[0], p1[1] - 2 if outside else p1[1] + h + 2), + 0, + self.lw / 3, + txt_color, + thickness=tf, + lineType=cv2.LINE_AA) def rectangle(self, xy, fill=None, outline=None, width=1): # Add rectangle to image (PIL-only) @@ -307,11 +313,19 @@ def plot_val_study(file='', dir='', x=None): # from utils.plots import *; plot_ ax[i].set_title(s[i]) j = y[3].argmax() + 1 - ax2.plot(y[5, 1:j], y[3, 1:j] * 1E2, '.-', linewidth=2, markersize=8, + ax2.plot(y[5, 1:j], + y[3, 1:j] * 1E2, + '.-', + linewidth=2, + markersize=8, label=f.stem.replace('study_coco_', '').replace('yolo', 'YOLO')) ax2.plot(1E3 / np.array([209, 140, 97, 58, 35, 18]), [34.6, 40.5, 43.0, 47.5, 49.7, 51.5], - 'k.-', linewidth=2, markersize=8, alpha=.25, label='EfficientDet') + 'k.-', + linewidth=2, + markersize=8, + alpha=.25, + label='EfficientDet') ax2.grid(alpha=0.2) ax2.set_yticks(np.arange(20, 60, 5)) diff --git a/utils/torch_utils.py b/utils/torch_utils.py index 72f8a0fd1659..bc96ec75be7c 100644 --- a/utils/torch_utils.py +++ b/utils/torch_utils.py @@ -284,7 +284,6 @@ class ModelEMA: Keeps a moving average of everything in the model state_dict (parameters and buffers) For EMA details see https://www.tensorflow.org/api_docs/python/tf/train/ExponentialMovingAverage """ - def __init__(self, model, decay=0.9999, tau=2000, updates=0): # Create EMA self.ema = deepcopy(de_parallel(model)).eval() # FP32 EMA diff --git a/val.py b/val.py index 2dd2aec679f9..bc4abc248dc8 100644 --- a/val.py +++ b/val.py @@ -62,10 +62,11 @@ def save_one_json(predn, jdict, path, class_map): box = xyxy2xywh(predn[:, :4]) # xywh box[:, :2] -= box[:, 2:] / 2 # xy center to top-left corner for p, b in zip(predn.tolist(), box.tolist()): - jdict.append({'image_id': image_id, - 'category_id': class_map[int(p[5])], - 'bbox': [round(x, 3) for x in b], - 'score': round(p[4], 5)}) + jdict.append({ + 'image_id': image_id, + 'category_id': class_map[int(p[5])], + 'bbox': [round(x, 3) for x in b], + 'score': round(p[4], 5)}) def process_batch(detections, labels, iouv): @@ -93,7 +94,8 @@ def process_batch(detections, labels, iouv): @torch.no_grad() -def run(data, +def run( + data, weights=None, # model.pt path(s) batch_size=32, # batch size imgsz=640, # inference size (pixels) @@ -120,7 +122,7 @@ def run(data, plots=True, callbacks=Callbacks(), compute_loss=None, - ): +): # Initialize/load model and set device training = model is not None if training: # called by train.py @@ -164,8 +166,15 @@ def run(data, pad = 0.0 if task in ('speed', 'benchmark') else 0.5 rect = False if task == 'benchmark' else pt # square inference for benchmarks task = task if task in ('train', 'val', 'test') else 'val' # path to train/val/test images - dataloader = create_dataloader(data[task], imgsz, batch_size, stride, single_cls, pad=pad, rect=rect, - workers=workers, prefix=colorstr(f'{task}: '))[0] + dataloader = create_dataloader(data[task], + imgsz, + batch_size, + stride, + single_cls, + pad=pad, + rect=rect, + workers=workers, + prefix=colorstr(f'{task}: '))[0] seen = 0 confusion_matrix = ConfusionMatrix(nc=nc) From 2c3221844b604c7e3f26c1f26d0c5ed78f700fd5 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 31 Mar 2022 17:11:43 +0200 Subject: [PATCH 0987/1976] CLI `fire` prep updates (#7229) * CLI fire prep updates * revert unintentional TF export change --- detect.py | 2 +- export.py | 2 +- models/tf.py | 2 +- models/yolo.py | 2 +- train.py | 2 +- utils/benchmarks.py | 2 +- utils/general.py | 15 ++++++++++++--- val.py | 2 +- 8 files changed, 19 insertions(+), 10 deletions(-) diff --git a/detect.py b/detect.py index 2875285ee314..14ff9a6ab421 100644 --- a/detect.py +++ b/detect.py @@ -238,7 +238,7 @@ def parse_opt(): parser.add_argument('--dnn', action='store_true', help='use OpenCV DNN for ONNX inference') opt = parser.parse_args() opt.imgsz *= 2 if len(opt.imgsz) == 1 else 1 # expand - print_args(FILE.stem, opt) + print_args(vars(opt)) return opt diff --git a/export.py b/export.py index 78b886fa3a6b..e146dad42980 100644 --- a/export.py +++ b/export.py @@ -566,7 +566,7 @@ def parse_opt(): default=['torchscript', 'onnx'], help='torchscript, onnx, openvino, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs') opt = parser.parse_args() - print_args(FILE.stem, opt) + print_args(vars(opt)) return opt diff --git a/models/tf.py b/models/tf.py index c6fb6b82a72e..1b7653bce8f6 100644 --- a/models/tf.py +++ b/models/tf.py @@ -480,7 +480,7 @@ def parse_opt(): parser.add_argument('--dynamic', action='store_true', help='dynamic batch size') opt = parser.parse_args() opt.imgsz *= 2 if len(opt.imgsz) == 1 else 1 # expand - print_args(FILE.stem, opt) + print_args(vars(opt)) return opt diff --git a/models/yolo.py b/models/yolo.py index 4cdfea34d63e..e18614cb37bd 100644 --- a/models/yolo.py +++ b/models/yolo.py @@ -308,7 +308,7 @@ def parse_model(d, ch): # model_dict, input_channels(3) parser.add_argument('--test', action='store_true', help='test all yolo*.yaml') opt = parser.parse_args() opt.cfg = check_yaml(opt.cfg) # check YAML - print_args(FILE.stem, opt) + print_args(vars(opt)) device = select_device(opt.device) # Create model diff --git a/train.py b/train.py index fbaaeb8ef930..38c25c053e26 100644 --- a/train.py +++ b/train.py @@ -515,7 +515,7 @@ def parse_opt(known=False): def main(opt, callbacks=Callbacks()): # Checks if RANK in [-1, 0]: - print_args(FILE.stem, opt) + print_args(vars(opt)) check_git_status() check_requirements(exclude=['thop']) diff --git a/utils/benchmarks.py b/utils/benchmarks.py index 5bfa872cc3fb..69243725b48a 100644 --- a/utils/benchmarks.py +++ b/utils/benchmarks.py @@ -92,7 +92,7 @@ def parse_opt(): parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference') opt = parser.parse_args() - print_args(FILE.stem, opt) + print_args(vars(opt)) return opt diff --git a/utils/general.py b/utils/general.py index a64680bc06e5..9622a32c5c70 100755 --- a/utils/general.py +++ b/utils/general.py @@ -5,6 +5,7 @@ import contextlib import glob +import inspect import logging import math import os @@ -20,6 +21,7 @@ from multiprocessing.pool import ThreadPool from pathlib import Path from subprocess import check_output +from typing import Optional from zipfile import ZipFile import cv2 @@ -163,9 +165,15 @@ def methods(instance): return [f for f in dir(instance) if callable(getattr(instance, f)) and not f.startswith("__")] -def print_args(name, opt): - # Print argparser arguments - LOGGER.info(colorstr(f'{name}: ') + ', '.join(f'{k}={v}' for k, v in vars(opt).items())) +def print_args(args: Optional[dict] = None, show_file=True, show_fcn=False): + # Print function arguments (optional args dict) + x = inspect.currentframe().f_back # previous frame + file, _, fcn, _, _ = inspect.getframeinfo(x) + if args is None: # get args automatically + args, _, _, frm = inspect.getargvalues(x) + args = {k: v for k, v in frm.items() if k in args} + s = (f'{Path(file).stem}: ' if show_file else '') + (f'{fcn}: ' if show_fcn else '') + LOGGER.info(colorstr(s) + ', '.join(f'{k}={v}' for k, v in args.items())) def init_seeds(seed=0): @@ -346,6 +354,7 @@ def check_img_size(imgsz, s=32, floor=0): if isinstance(imgsz, int): # integer i.e. img_size=640 new_size = max(make_divisible(imgsz, int(s)), floor) else: # list i.e. img_size=[640, 480] + imgsz = list(imgsz) # convert to list if tuple new_size = [max(make_divisible(x, int(s)), floor) for x in imgsz] if new_size != imgsz: LOGGER.warning(f'WARNING: --img-size {imgsz} must be multiple of max stride {s}, updating to {new_size}') diff --git a/val.py b/val.py index bc4abc248dc8..019beedea61a 100644 --- a/val.py +++ b/val.py @@ -350,7 +350,7 @@ def parse_opt(): opt.data = check_yaml(opt.data) # check YAML opt.save_json |= opt.data.endswith('coco.yaml') opt.save_txt |= opt.save_hybrid - print_args(FILE.stem, opt) + print_args(vars(opt)) return opt From 4d157f578a7bbff08d1e17a4e6e47aece4d91207 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 31 Mar 2022 17:26:34 +0200 Subject: [PATCH 0988/1976] Update .pre-commit-config.yaml (#7230) --- .pre-commit-config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 0b4fedcd2d43..208cb072c8aa 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -40,7 +40,7 @@ repos: rev: v0.31.0 hooks: - id: yapf - name: formatting + name: YAPF formatting # TODO #- repo: https://github.com/executablebooks/mdformat From 734ab033fdd7542bde14cab6c040415eb51dc9ac Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 1 Apr 2022 00:07:23 +0200 Subject: [PATCH 0989/1976] SavedModel TF Serve Fix (#7228) * SavedModel TF Serve Fix Fix for https://github.com/ultralytics/yolov5/issues/7205 proposed by @tylertroy * Update export.py --- export.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/export.py b/export.py index e146dad42980..cc7a74db9af2 100644 --- a/export.py +++ b/export.py @@ -285,12 +285,12 @@ def export_saved_model(model, if keras: keras_model.save(f, save_format='tf') else: - m = tf.function(lambda x: keras_model(x)) # full model spec = tf.TensorSpec(keras_model.inputs[0].shape, keras_model.inputs[0].dtype) + m = tf.function(lambda x: keras_model(x)) # full model m = m.get_concrete_function(spec) frozen_func = convert_variables_to_constants_v2(m) tfm = tf.Module() - tfm.__call__ = tf.function(lambda x: frozen_func(x)[0], [spec]) + tfm.__call__ = tf.function(lambda x: frozen_func(x)[:4] if tf_nms else frozen_func(x)[0], [spec]) tfm.__call__(im) tf.saved_model.save(tfm, f, From 71621df87589faea19ba4c4098bb68e73201f30c Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 1 Apr 2022 00:24:37 +0200 Subject: [PATCH 0990/1976] Create CODE_OF_CONDUCT.md (#7233) --- .github/CODE_OF_CONDUCT.md | 128 +++++++++++++++++++++++++++++++++++++ 1 file changed, 128 insertions(+) create mode 100644 .github/CODE_OF_CONDUCT.md diff --git a/.github/CODE_OF_CONDUCT.md b/.github/CODE_OF_CONDUCT.md new file mode 100644 index 000000000000..ef10b05fc88e --- /dev/null +++ b/.github/CODE_OF_CONDUCT.md @@ -0,0 +1,128 @@ +# YOLOv5 🚀 Contributor Covenant Code of Conduct + +## Our Pledge + +We as members, contributors, and leaders pledge to make participation in our +community a harassment-free experience for everyone, regardless of age, body +size, visible or invisible disability, ethnicity, sex characteristics, gender +identity and expression, level of experience, education, socio-economic status, +nationality, personal appearance, race, religion, or sexual identity +and orientation. + +We pledge to act and interact in ways that contribute to an open, welcoming, +diverse, inclusive, and healthy community. + +## Our Standards + +Examples of behavior that contributes to a positive environment for our +community include: + +* Demonstrating empathy and kindness toward other people +* Being respectful of differing opinions, viewpoints, and experiences +* Giving and gracefully accepting constructive feedback +* Accepting responsibility and apologizing to those affected by our mistakes, + and learning from the experience +* Focusing on what is best not just for us as individuals, but for the + overall community + +Examples of unacceptable behavior include: + +* The use of sexualized language or imagery, and sexual attention or + advances of any kind +* Trolling, insulting or derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or email + address, without their explicit permission +* Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Enforcement Responsibilities + +Community leaders are responsible for clarifying and enforcing our standards of +acceptable behavior and will take appropriate and fair corrective action in +response to any behavior that they deem inappropriate, threatening, offensive, +or harmful. + +Community leaders have the right and responsibility to remove, edit, or reject +comments, commits, code, wiki edits, issues, and other contributions that are +not aligned to this Code of Conduct, and will communicate reasons for moderation +decisions when appropriate. + +## Scope + +This Code of Conduct applies within all community spaces, and also applies when +an individual is officially representing the community in public spaces. +Examples of representing our community include using an official e-mail address, +posting via an official social media account, or acting as an appointed +representative at an online or offline event. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported to the community leaders responsible for enforcement at +hello@ultralytics.com. +All complaints will be reviewed and investigated promptly and fairly. + +All community leaders are obligated to respect the privacy and security of the +reporter of any incident. + +## Enforcement Guidelines + +Community leaders will follow these Community Impact Guidelines in determining +the consequences for any action they deem in violation of this Code of Conduct: + +### 1. Correction + +**Community Impact**: Use of inappropriate language or other behavior deemed +unprofessional or unwelcome in the community. + +**Consequence**: A private, written warning from community leaders, providing +clarity around the nature of the violation and an explanation of why the +behavior was inappropriate. A public apology may be requested. + +### 2. Warning + +**Community Impact**: A violation through a single incident or series +of actions. + +**Consequence**: A warning with consequences for continued behavior. No +interaction with the people involved, including unsolicited interaction with +those enforcing the Code of Conduct, for a specified period of time. This +includes avoiding interactions in community spaces as well as external channels +like social media. Violating these terms may lead to a temporary or +permanent ban. + +### 3. Temporary Ban + +**Community Impact**: A serious violation of community standards, including +sustained inappropriate behavior. + +**Consequence**: A temporary ban from any sort of interaction or public +communication with the community for a specified period of time. No public or +private interaction with the people involved, including unsolicited interaction +with those enforcing the Code of Conduct, is allowed during this period. +Violating these terms may lead to a permanent ban. + +### 4. Permanent Ban + +**Community Impact**: Demonstrating a pattern of violation of community +standards, including sustained inappropriate behavior, harassment of an +individual, or aggression toward or disparagement of classes of individuals. + +**Consequence**: A permanent ban from any sort of public interaction within +the community. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], +version 2.0, available at +https://www.contributor-covenant.org/version/2/0/code_of_conduct.html. + +Community Impact Guidelines were inspired by [Mozilla's code of conduct +enforcement ladder](https://github.com/mozilla/diversity). + +[homepage]: https://www.contributor-covenant.org + +For answers to common questions about this code of conduct, see the FAQ at +https://www.contributor-covenant.org/faq. Translations are available at +https://www.contributor-covenant.org/translations. From 37675e110f3d2635dbc3acc8794e782c452e4ad5 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 1 Apr 2022 21:38:49 +0200 Subject: [PATCH 0991/1976] Fix `www.youtube.com` hostname (#7242) * Fix `www.youtube.com` hostname * Update datasets.py --- utils/datasets.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/datasets.py b/utils/datasets.py index 7e8b423c3174..b2d4fa54ae0d 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -316,7 +316,7 @@ def __init__(self, sources='streams.txt', img_size=640, stride=32, auto=True): for i, s in enumerate(sources): # index, source # Start thread to read frames from video stream st = f'{i + 1}/{n}: {s}... ' - if urlparse(s).hostname in ('youtube.com', 'youtu.be'): # if source is YouTube video + if urlparse(s).hostname in ('www.youtube.com', 'youtube.com', 'youtu.be'): # if source is YouTube video check_requirements(('pafy', 'youtube_dl==2020.12.2')) import pafy s = pafy.new(s).getbest(preftype="mp4").url # YouTube URL From a19406b39dbc45db0bbae8d0b7da9d6281f9af1e Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 2 Apr 2022 15:05:00 +0200 Subject: [PATCH 0992/1976] Update minimum Python>=3.7.0 (#7247) --- utils/general.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/general.py b/utils/general.py index 9622a32c5c70..379e9e0f47a4 100755 --- a/utils/general.py +++ b/utils/general.py @@ -295,7 +295,7 @@ def check_git_status(): LOGGER.info(emojis(s)) # emoji-safe -def check_python(minimum='3.6.2'): +def check_python(minimum='3.7.0'): # Check current python version vs. required python version check_version(platform.python_version(), minimum, name='Python ', hard=True) From 6f4eb95af72589c0f751111978631db8d38da7f0 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 3 Apr 2022 00:18:18 +0200 Subject: [PATCH 0993/1976] Update setup.cfg to `description_file` field (#7248) Resolve `UserWarning: Usage of dash-separated 'description-file' will not be supported in future versions. Please use the underscore name 'description_file' instead` --- setup.cfg | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.cfg b/setup.cfg index c387d84a33e2..020a75740e97 100644 --- a/setup.cfg +++ b/setup.cfg @@ -4,7 +4,7 @@ [metadata] license_file = LICENSE -description-file = README.md +description_file = README.md [tool:pytest] From 3d3483cf0c085977d66684c0e2439ea31f38ab60 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 3 Apr 2022 12:14:12 +0200 Subject: [PATCH 0994/1976] Update tutorial.ipynb (#7254) --- tutorial.ipynb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tutorial.ipynb b/tutorial.ipynb index 0379fb1a3c57..1a6d41526140 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -1078,7 +1078,7 @@ "source": [ "# VOC\n", "for b, m in zip([64, 64, 64, 32, 16], ['yolov5n', 'yolov5s', 'yolov5m', 'yolov5l', 'yolov5x']): # zip(batch_size, model)\n", - " !python train.py --batch {b} --weights {m}.pt --data VOC.yaml --epochs 50 --cache --img 512 --nosave --hyp hyp.VOC.yaml --project VOC --name {m}" + " !python train.py --batch {b} --weights {m}.pt --data VOC.yaml --epochs 50 --img 512 --hyp hyp.VOC.yaml --project VOC --name {m} --cache" ], "execution_count": null, "outputs": [] From 035b5548e47541767565a1934054bf47404757df Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 3 Apr 2022 12:18:24 +0200 Subject: [PATCH 0995/1976] Update tutorial.ipynb (#7255) --- tutorial.ipynb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tutorial.ipynb b/tutorial.ipynb index 1a6d41526140..d5a10dfd5952 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -1077,7 +1077,7 @@ }, "source": [ "# VOC\n", - "for b, m in zip([64, 64, 64, 32, 16], ['yolov5n', 'yolov5s', 'yolov5m', 'yolov5l', 'yolov5x']): # zip(batch_size, model)\n", + "for b, m in zip([64, 64, 64, 32, 16], ['yolov5n', 'yolov5s', 'yolov5m', 'yolov5l', 'yolov5x']): # batch, model\n", " !python train.py --batch {b} --weights {m}.pt --data VOC.yaml --epochs 50 --img 512 --hyp hyp.VOC.yaml --project VOC --name {m} --cache" ], "execution_count": null, From dda669a12c4df7b282a1378e251f8314e6179bcb Mon Sep 17 00:00:00 2001 From: Zengyf-CVer <41098760+Zengyf-CVer@users.noreply.github.com> Date: Sun, 3 Apr 2022 19:19:26 +0800 Subject: [PATCH 0996/1976] Fix Flask REST API (#7210) * Update restapi.py * Update restapi.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Cleanup * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: Glenn Jocher Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- utils/flask_rest_api/restapi.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/utils/flask_rest_api/restapi.py b/utils/flask_rest_api/restapi.py index b93ad16a0f58..38868cc98d84 100644 --- a/utils/flask_rest_api/restapi.py +++ b/utils/flask_rest_api/restapi.py @@ -1,5 +1,5 @@ """ -Run a rest API exposing the yolov5s object detection model +Run a Flask REST API exposing a YOLOv5s model """ import argparse import io @@ -31,7 +31,10 @@ def predict(): if __name__ == "__main__": parser = argparse.ArgumentParser(description="Flask API exposing YOLOv5 model") parser.add_argument("--port", default=5000, type=int, help="port number") - args = parser.parse_args() + opt = parser.parse_args() + + # Fix known issue urllib.error.HTTPError 403: rate limit exceeded https://github.com/ultralytics/yolov5/pull/7210 + torch.hub._validate_not_a_forked_repo = lambda a, b, c: True model = torch.hub.load("ultralytics/yolov5", "yolov5s", force_reload=True) # force_reload to recache - app.run(host="0.0.0.0", port=args.port) # debug=True causes Restarting with stat + app.run(host="0.0.0.0", port=opt.port) # debug=True causes Restarting with stat From ffcbd8ca97f037a83c5e0bc30a691e745b1c3cc9 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 3 Apr 2022 18:45:05 +0200 Subject: [PATCH 0997/1976] Export with official `nn.SiLU()` (#7256) * Update * Update time_limit --- export.py | 11 ++++------- utils/general.py | 2 +- 2 files changed, 5 insertions(+), 8 deletions(-) diff --git a/export.py b/export.py index cc7a74db9af2..e73715ea13e9 100644 --- a/export.py +++ b/export.py @@ -54,7 +54,6 @@ import pandas as pd import torch -import torch.nn as nn from torch.utils.mobile_optimizer import optimize_for_mobile FILE = Path(__file__).resolve() @@ -64,10 +63,8 @@ if platform.system() != 'Windows': ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative -from models.common import Conv from models.experimental import attempt_load from models.yolo import Detect -from utils.activations import SiLU from utils.datasets import LoadImages from utils.general import (LOGGER, check_dataset, check_img_size, check_requirements, check_version, colorstr, file_size, print_args, url2file) @@ -474,10 +471,10 @@ def run( im, model = im.half(), model.half() # to FP16 model.train() if train else model.eval() # training mode = no Detect() layer grid construction for k, m in model.named_modules(): - if isinstance(m, Conv): # assign export-friendly activations - if isinstance(m.act, nn.SiLU): - m.act = SiLU() - elif isinstance(m, Detect): + # if isinstance(m, Conv): # assign export-friendly activations + # if isinstance(m.act, nn.SiLU): + # m.act = SiLU() + if isinstance(m, Detect): m.inplace = inplace m.onnx_dynamic = dynamic if hasattr(m, 'forward_export'): diff --git a/utils/general.py b/utils/general.py index 379e9e0f47a4..da7dbb6d3e55 100755 --- a/utils/general.py +++ b/utils/general.py @@ -738,7 +738,7 @@ def non_max_suppression(prediction, # min_wh = 2 # (pixels) minimum box width and height max_wh = 7680 # (pixels) maximum box width and height max_nms = 30000 # maximum number of boxes into torchvision.ops.nms() - time_limit = 0.030 * bs # seconds to quit after + time_limit = 0.1 + 0.03 * bs # seconds to quit after redundant = True # require redundant detections multi_label &= nc > 1 # multiple labels per box (adds 0.5ms/img) merge = False # use merge-NMS From 4f839b7970555f100c4380fa7a6e0e089a93ac2a Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 3 Apr 2022 19:26:23 +0200 Subject: [PATCH 0998/1976] Refactor out-of-place `Detect()` for reduced ops (#7257) --- models/yolo.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/models/yolo.py b/models/yolo.py index e18614cb37bd..f255a812b11a 100644 --- a/models/yolo.py +++ b/models/yolo.py @@ -66,9 +66,9 @@ def forward(self, x): y[..., 0:2] = (y[..., 0:2] * 2 - 0.5 + self.grid[i]) * self.stride[i] # xy y[..., 2:4] = (y[..., 2:4] * 2) ** 2 * self.anchor_grid[i] # wh else: # for YOLOv5 on AWS Inferentia https://github.com/ultralytics/yolov5/pull/2953 - xy = (y[..., 0:2] * 2 - 0.5 + self.grid[i]) * self.stride[i] # xy + xy = (y[..., 0:2] * 2 + (self.grid[i] - 0.5)) * self.stride[i] # xy wh = (y[..., 2:4] * 2) ** 2 * self.anchor_grid[i] # wh - y = torch.cat((xy, wh, y[..., 4:]), -1) + y = torch.cat((xy, wh, y[..., 4:]), 4) z.append(y.view(bs, -1, self.no)) return x if self.training else (torch.cat(z, 1), x) From ad0e4d5d199dc2da92d2058b57b0970fe2924bca Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 3 Apr 2022 20:05:50 +0200 Subject: [PATCH 0999/1976] `torch.split()` replace slicing on out-of-place inference (#7258) --- models/yolo.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/models/yolo.py b/models/yolo.py index f255a812b11a..3dd5fe9dcd25 100644 --- a/models/yolo.py +++ b/models/yolo.py @@ -66,9 +66,10 @@ def forward(self, x): y[..., 0:2] = (y[..., 0:2] * 2 - 0.5 + self.grid[i]) * self.stride[i] # xy y[..., 2:4] = (y[..., 2:4] * 2) ** 2 * self.anchor_grid[i] # wh else: # for YOLOv5 on AWS Inferentia https://github.com/ultralytics/yolov5/pull/2953 - xy = (y[..., 0:2] * 2 + (self.grid[i] - 0.5)) * self.stride[i] # xy - wh = (y[..., 2:4] * 2) ** 2 * self.anchor_grid[i] # wh - y = torch.cat((xy, wh, y[..., 4:]), 4) + xy, wh, conf = y.split((2, 2, self.nc + 1), 4) # y.tensor_split((2, 4, 5), 4) # torch 1.8.0 + xy = (xy * 2 + (self.grid[i] - 0.5)) * self.stride[i] # xy + wh = (wh * 2) ** 2 * self.anchor_grid[i] # wh + y = torch.cat((xy, wh, conf), 4) z.append(y.view(bs, -1, self.no)) return x if self.training else (torch.cat(z, 1), x) From 779efbb9ca26b9ed4177a59936ec1d0dfdc9365e Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 3 Apr 2022 21:21:55 +0200 Subject: [PATCH 1000/1976] Update --- utils/benchmarks.py | 1 + 1 file changed, 1 insertion(+) diff --git a/utils/benchmarks.py b/utils/benchmarks.py index 69243725b48a..36e827848584 100644 --- a/utils/benchmarks.py +++ b/utils/benchmarks.py @@ -58,6 +58,7 @@ def run( device = select_device(device) for i, (name, f, suffix, gpu) in formats.iterrows(): # index, (name, file, suffix, gpu-capable) try: + assert i < 9, 'Edge TPU and TF.js not supported' if device.type != 'cpu': assert gpu, f'{name} inference not supported on GPU' if f == '-': From 05cf0d1a44430230e75339ff7cfdd26bdf554502 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 3 Apr 2022 21:29:20 +0200 Subject: [PATCH 1001/1976] Export single output only (#7259) * Update * Update * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- export.py | 1 + models/yolo.py | 3 ++- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/export.py b/export.py index e73715ea13e9..574bf8d9ed61 100644 --- a/export.py +++ b/export.py @@ -477,6 +477,7 @@ def run( if isinstance(m, Detect): m.inplace = inplace m.onnx_dynamic = dynamic + m.export = True if hasattr(m, 'forward_export'): m.forward = m.forward_export # assign custom forward (optional) diff --git a/models/yolo.py b/models/yolo.py index 3dd5fe9dcd25..fee5e932fd4d 100644 --- a/models/yolo.py +++ b/models/yolo.py @@ -37,6 +37,7 @@ class Detect(nn.Module): stride = None # strides computed during build onnx_dynamic = False # ONNX export parameter + export = False # export mode def __init__(self, nc=80, anchors=(), ch=(), inplace=True): # detection layer super().__init__() @@ -72,7 +73,7 @@ def forward(self, x): y = torch.cat((xy, wh, conf), 4) z.append(y.view(bs, -1, self.no)) - return x if self.training else (torch.cat(z, 1), x) + return x if self.training else (torch.cat(z, 1),) if self.export else (torch.cat(z, 1), x) def _make_grid(self, nx=20, ny=20, i=0): d = self.anchors[i].device From 8bc839ed8e423c7baeb778e60e4d6f67eb0d5f3d Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 3 Apr 2022 22:51:11 +0200 Subject: [PATCH 1002/1976] TorchScript single-output fix (#7261) --- export.py | 18 ++++++++++++------ models/common.py | 7 ++++--- 2 files changed, 16 insertions(+), 9 deletions(-) diff --git a/export.py b/export.py index 574bf8d9ed61..87be00376778 100644 --- a/export.py +++ b/export.py @@ -73,12 +73,18 @@ def export_formats(): # YOLOv5 export formats - x = [['PyTorch', '-', '.pt', True], ['TorchScript', 'torchscript', '.torchscript', True], - ['ONNX', 'onnx', '.onnx', True], ['OpenVINO', 'openvino', '_openvino_model', False], - ['TensorRT', 'engine', '.engine', True], ['CoreML', 'coreml', '.mlmodel', False], - ['TensorFlow SavedModel', 'saved_model', '_saved_model', True], ['TensorFlow GraphDef', 'pb', '.pb', True], - ['TensorFlow Lite', 'tflite', '.tflite', False], ['TensorFlow Edge TPU', 'edgetpu', '_edgetpu.tflite', False], - ['TensorFlow.js', 'tfjs', '_web_model', False]] + x = [ + ['PyTorch', '-', '.pt', True], + ['TorchScript', 'torchscript', '.torchscript', True], + ['ONNX', 'onnx', '.onnx', True], + ['OpenVINO', 'openvino', '_openvino_model', False], + ['TensorRT', 'engine', '.engine', True], + ['CoreML', 'coreml', '.mlmodel', False], + ['TensorFlow SavedModel', 'saved_model', '_saved_model', True], + ['TensorFlow GraphDef', 'pb', '.pb', True], + ['TensorFlow Lite', 'tflite', '.tflite', False], + ['TensorFlow Edge TPU', 'edgetpu', '_edgetpu.tflite', False], + ['TensorFlow.js', 'tfjs', '_web_model', False],] return pd.DataFrame(x, columns=['Format', 'Argument', 'Suffix', 'GPU']) diff --git a/models/common.py b/models/common.py index 8396caa1af5c..dcd3e5f408dd 100644 --- a/models/common.py +++ b/models/common.py @@ -406,9 +406,10 @@ def wrap_frozen_graph(gd, inputs, outputs): def forward(self, im, augment=False, visualize=False, val=False): # YOLOv5 MultiBackend inference b, ch, h, w = im.shape # batch, channel, height, width - if self.pt or self.jit: # PyTorch - y = self.model(im) if self.jit else self.model(im, augment=augment, visualize=visualize) - return y if val else y[0] + if self.pt: # PyTorch + y = self.model(im, augment=augment, visualize=visualize)[0] + elif self.jit: # TorchScript + y = self.model(im)[0] elif self.dnn: # ONNX OpenCV DNN im = im.cpu().numpy() # torch to numpy self.net.setInput(im) From ea72b84f5e690cb516642ce2d9ae200145b0af34 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 3 Apr 2022 23:40:23 +0200 Subject: [PATCH 1003/1976] Integrate offset into grid (#7262) Eliminate 1 op during training and inference. --- models/yolo.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/models/yolo.py b/models/yolo.py index fee5e932fd4d..d6f5c0961e0d 100644 --- a/models/yolo.py +++ b/models/yolo.py @@ -64,11 +64,11 @@ def forward(self, x): y = x[i].sigmoid() if self.inplace: - y[..., 0:2] = (y[..., 0:2] * 2 - 0.5 + self.grid[i]) * self.stride[i] # xy + y[..., 0:2] = (y[..., 0:2] * 2 + self.grid[i]) * self.stride[i] # xy y[..., 2:4] = (y[..., 2:4] * 2) ** 2 * self.anchor_grid[i] # wh else: # for YOLOv5 on AWS Inferentia https://github.com/ultralytics/yolov5/pull/2953 xy, wh, conf = y.split((2, 2, self.nc + 1), 4) # y.tensor_split((2, 4, 5), 4) # torch 1.8.0 - xy = (xy * 2 + (self.grid[i] - 0.5)) * self.stride[i] # xy + xy = (xy * 2 + self.grid[i]) * self.stride[i] # xy wh = (wh * 2) ** 2 * self.anchor_grid[i] # wh y = torch.cat((xy, wh, conf), 4) z.append(y.view(bs, -1, self.no)) @@ -82,7 +82,7 @@ def _make_grid(self, nx=20, ny=20, i=0): yv, xv = torch.meshgrid(torch.arange(ny, device=d), torch.arange(nx, device=d), indexing='ij') else: yv, xv = torch.meshgrid(torch.arange(ny, device=d), torch.arange(nx, device=d)) - grid = torch.stack((xv, yv), 2).expand(shape).float() + grid = torch.stack((xv, yv), 2).expand(shape).float() - 0.5 # add grid offset, i.e. y = 2.0 * x - 0.5 anchor_grid = (self.anchors[i] * self.stride[i]).view((1, self.na, 1, 1, 2)).expand(shape).float() return grid, anchor_grid From 7882950577116eff9085b96abd8036522f2de7ca Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 4 Apr 2022 22:47:00 +0200 Subject: [PATCH 1004/1976] [pre-commit.ci] pre-commit suggestions (#7279) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * [pre-commit.ci] pre-commit suggestions updates: - [github.com/asottile/pyupgrade: v2.31.0 → v2.31.1](https://github.com/asottile/pyupgrade/compare/v2.31.0...v2.31.1) - [github.com/pre-commit/mirrors-yapf: v0.31.0 → v0.32.0](https://github.com/pre-commit/mirrors-yapf/compare/v0.31.0...v0.32.0) * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update yolo.py * Update activations.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update activations.py * Update tf.py * Update tf.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Glenn Jocher --- .pre-commit-config.yaml | 4 ++-- models/tf.py | 5 +++++ models/yolo.py | 1 + utils/activations.py | 20 ++++++++++++-------- utils/callbacks.py | 1 + utils/datasets.py | 3 +++ utils/loggers/wandb/wandb_utils.py | 1 + utils/metrics.py | 1 + utils/torch_utils.py | 1 + 9 files changed, 27 insertions(+), 10 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 208cb072c8aa..ae61892b68b2 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -24,7 +24,7 @@ repos: - id: check-docstring-first - repo: https://github.com/asottile/pyupgrade - rev: v2.31.0 + rev: v2.31.1 hooks: - id: pyupgrade args: [--py36-plus] @@ -37,7 +37,7 @@ repos: name: Sort imports - repo: https://github.com/pre-commit/mirrors-yapf - rev: v0.31.0 + rev: v0.32.0 hooks: - id: yapf name: YAPF formatting diff --git a/models/tf.py b/models/tf.py index 1b7653bce8f6..a15569e3b465 100644 --- a/models/tf.py +++ b/models/tf.py @@ -50,6 +50,7 @@ def call(self, inputs): class TFPad(keras.layers.Layer): + def __init__(self, pad): super().__init__() self.pad = tf.constant([[0, 0], [pad, pad], [pad, pad], [0, 0]]) @@ -206,6 +207,7 @@ def call(self, inputs): class TFDetect(keras.layers.Layer): + # TF YOLOv5 Detect layer def __init__(self, nc=80, anchors=(), ch=(), imgsz=(640, 640), w=None): # detection layer super().__init__() self.stride = tf.convert_to_tensor(w.stride.numpy(), dtype=tf.float32) @@ -255,6 +257,7 @@ def _make_grid(nx=20, ny=20): class TFUpsample(keras.layers.Layer): + # TF version of torch.nn.Upsample() def __init__(self, size, scale_factor, mode, w=None): # warning: all arguments needed including 'w' super().__init__() assert scale_factor == 2, "scale_factor must be 2" @@ -269,6 +272,7 @@ def call(self, inputs): class TFConcat(keras.layers.Layer): + # TF version of torch.concat() def __init__(self, dimension=1, w=None): super().__init__() assert dimension == 1, "convert only NCHW to NHWC concat" @@ -331,6 +335,7 @@ def parse_model(d, ch, model, imgsz): # model_dict, input_channels(3) class TFModel: + # TF YOLOv5 model def __init__(self, cfg='yolov5s.yaml', ch=3, nc=None, model=None, imgsz=(640, 640)): # model, channels, classes super().__init__() if isinstance(cfg, dict): diff --git a/models/yolo.py b/models/yolo.py index d6f5c0961e0d..85c5a96997f2 100644 --- a/models/yolo.py +++ b/models/yolo.py @@ -88,6 +88,7 @@ def _make_grid(self, nx=20, ny=20, i=0): class Model(nn.Module): + # YOLOv5 model def __init__(self, cfg='yolov5s.yaml', ch=3, nc=None, anchors=None): # model, input channels, number of classes super().__init__() if isinstance(cfg, dict): diff --git a/utils/activations.py b/utils/activations.py index b104ac18b03b..084ce8c41230 100644 --- a/utils/activations.py +++ b/utils/activations.py @@ -8,29 +8,32 @@ import torch.nn.functional as F -# SiLU https://arxiv.org/pdf/1606.08415.pdf ---------------------------------------------------------------------------- -class SiLU(nn.Module): # export-friendly version of nn.SiLU() +class SiLU(nn.Module): + # SiLU activation https://arxiv.org/pdf/1606.08415.pdf @staticmethod def forward(x): return x * torch.sigmoid(x) -class Hardswish(nn.Module): # export-friendly version of nn.Hardswish() +class Hardswish(nn.Module): + # Hard-SiLU activation @staticmethod def forward(x): # return x * F.hardsigmoid(x) # for TorchScript and CoreML return x * F.hardtanh(x + 3, 0.0, 6.0) / 6.0 # for TorchScript, CoreML and ONNX -# Mish https://github.com/digantamisra98/Mish -------------------------------------------------------------------------- class Mish(nn.Module): + # Mish activation https://github.com/digantamisra98/Mish @staticmethod def forward(x): return x * F.softplus(x).tanh() class MemoryEfficientMish(nn.Module): + # Mish activation memory-efficient class F(torch.autograd.Function): + @staticmethod def forward(ctx, x): ctx.save_for_backward(x) @@ -47,8 +50,8 @@ def forward(self, x): return self.F.apply(x) -# FReLU https://arxiv.org/abs/2007.11824 ------------------------------------------------------------------------------- class FReLU(nn.Module): + # FReLU activation https://arxiv.org/abs/2007.11824 def __init__(self, c1, k=3): # ch_in, kernel super().__init__() self.conv = nn.Conv2d(c1, c1, k, 1, 1, groups=c1, bias=False) @@ -58,12 +61,12 @@ def forward(self, x): return torch.max(x, self.bn(self.conv(x))) -# ACON https://arxiv.org/pdf/2009.04759.pdf ---------------------------------------------------------------------------- class AconC(nn.Module): - r""" ACON activation (activate or not). + r""" ACON activation (activate or not) AconC: (p1*x-p2*x) * sigmoid(beta*(p1*x-p2*x)) + p2*x, beta is a learnable parameter according to "Activate or Not: Learning Customized Activation" . """ + def __init__(self, c1): super().__init__() self.p1 = nn.Parameter(torch.randn(1, c1, 1, 1)) @@ -76,10 +79,11 @@ def forward(self, x): class MetaAconC(nn.Module): - r""" ACON activation (activate or not). + r""" ACON activation (activate or not) MetaAconC: (p1*x-p2*x) * sigmoid(beta*(p1*x-p2*x)) + p2*x, beta is generated by a small network according to "Activate or Not: Learning Customized Activation" . """ + def __init__(self, c1, k=1, s=1, r=16): # ch_in, kernel, stride, r super().__init__() c2 = max(r, c1 // r) diff --git a/utils/callbacks.py b/utils/callbacks.py index 6323985b8098..c6b3be1cbd69 100644 --- a/utils/callbacks.py +++ b/utils/callbacks.py @@ -8,6 +8,7 @@ class Callbacks: """" Handles all registered callbacks for YOLOv5 Hooks """ + def __init__(self): # Define the available callbacks self._callbacks = { diff --git a/utils/datasets.py b/utils/datasets.py index b2d4fa54ae0d..c12d3d9b9649 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -145,6 +145,7 @@ class InfiniteDataLoader(dataloader.DataLoader): Uses same syntax as vanilla DataLoader """ + def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) object.__setattr__(self, 'batch_sampler', _RepeatSampler(self.batch_sampler)) @@ -164,6 +165,7 @@ class _RepeatSampler: Args: sampler (Sampler) """ + def __init__(self, sampler): self.sampler = sampler @@ -978,6 +980,7 @@ def dataset_stats(path='coco128.yaml', autodownload=False, verbose=False, profil autodownload: Attempt to download dataset if not found locally verbose: Print stats dictionary """ + def round_labels(labels): # Update labels to integer class and 6 decimal place floats return [[int(c), *(round(x, 4) for x in points)] for c, *points in labels] diff --git a/utils/loggers/wandb/wandb_utils.py b/utils/loggers/wandb/wandb_utils.py index 6ec2559e29ac..08b568d074a2 100644 --- a/utils/loggers/wandb/wandb_utils.py +++ b/utils/loggers/wandb/wandb_utils.py @@ -116,6 +116,7 @@ class WandbLogger(): For more on how this logger is used, see the Weights & Biases documentation: https://docs.wandb.com/guides/integrations/yolov5 """ + def __init__(self, opt, run_id=None, job_type='Training'): """ - Initialize WandbLogger instance diff --git a/utils/metrics.py b/utils/metrics.py index 216956e90ecc..0674beddc0fb 100644 --- a/utils/metrics.py +++ b/utils/metrics.py @@ -260,6 +260,7 @@ def box_iou(box1, box2): iou (Tensor[N, M]): the NxM matrix containing the pairwise IoU values for every element in boxes1 and boxes2 """ + def box_area(box): # box = 4xn return (box[2] - box[0]) * (box[3] - box[1]) diff --git a/utils/torch_utils.py b/utils/torch_utils.py index bc96ec75be7c..72f8a0fd1659 100644 --- a/utils/torch_utils.py +++ b/utils/torch_utils.py @@ -284,6 +284,7 @@ class ModelEMA: Keeps a moving average of everything in the model state_dict (parameters and buffers) For EMA details see https://www.tensorflow.org/api_docs/python/tf/train/ExponentialMovingAverage """ + def __init__(self, model, decay=0.9999, tau=2000, updates=0): # Create EMA self.ema = deepcopy(de_parallel(model)).eval() # FP32 EMA From 2da68664b51b847ff73d007e1eba6364ec452764 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 5 Apr 2022 00:52:37 +0200 Subject: [PATCH 1005/1976] Update Dockerfile (#7282) --- Dockerfile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Dockerfile b/Dockerfile index 59aa99faa1d6..7df6c1854156 100644 --- a/Dockerfile +++ b/Dockerfile @@ -19,8 +19,8 @@ RUN mkdir -p /usr/src/app WORKDIR /usr/src/app # Copy contents -RUN git clone https://github.com/ultralytics/yolov5 /usr/src/app -# COPY . /usr/src/app +COPY . /usr/src/app +RUN git clone https://github.com/ultralytics/yolov5 /usr/src/yolov5 # Downloads to user config dir ADD https://ultralytics.com/assets/Arial.ttf /root/.config/Ultralytics/ From 8d0291f3af881c315d8a6c1d39d1af2b1ff45359 Mon Sep 17 00:00:00 2001 From: leeflix <41200990+leeflix@users.noreply.github.com> Date: Tue, 5 Apr 2022 11:33:08 +0200 Subject: [PATCH 1006/1976] Enable TensorFlow ops for `--nms` and `--agnostic-nms` (#7281) * enable TensorFlow ops if flag --nms or --agnostic-nms is used * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update export.py * Update export.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Glenn Jocher --- export.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/export.py b/export.py index 87be00376778..c0b98ce40fd5 100644 --- a/export.py +++ b/export.py @@ -327,7 +327,7 @@ def export_pb(keras_model, im, file, prefix=colorstr('TensorFlow GraphDef:')): LOGGER.info(f'\n{prefix} export failure: {e}') -def export_tflite(keras_model, im, file, int8, data, ncalib, prefix=colorstr('TensorFlow Lite:')): +def export_tflite(keras_model, im, file, int8, data, nms, agnostic_nms, prefix=colorstr('TensorFlow Lite:')): # YOLOv5 TensorFlow Lite export try: import tensorflow as tf @@ -343,13 +343,15 @@ def export_tflite(keras_model, im, file, int8, data, ncalib, prefix=colorstr('Te if int8: from models.tf import representative_dataset_gen dataset = LoadImages(check_dataset(data)['train'], img_size=imgsz, auto=False) # representative data - converter.representative_dataset = lambda: representative_dataset_gen(dataset, ncalib) + converter.representative_dataset = lambda: representative_dataset_gen(dataset, ncalib=100) converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8] converter.target_spec.supported_types = [] converter.inference_input_type = tf.uint8 # or tf.int8 converter.inference_output_type = tf.uint8 # or tf.int8 converter.experimental_new_quantizer = True f = str(file).replace('.pt', '-int8.tflite') + if nms or agnostic_nms: + converter.target_spec.supported_ops.append(tf.lite.OpsSet.SELECT_TF_OPS) tflite_model = converter.convert() open(f, "wb").write(tflite_model) @@ -524,7 +526,7 @@ def run( if pb or tfjs: # pb prerequisite to tfjs f[6] = export_pb(model, im, file) if tflite or edgetpu: - f[7] = export_tflite(model, im, file, int8=int8 or edgetpu, data=data, ncalib=100) + f[7] = export_tflite(model, im, file, int8=int8 or edgetpu, data=data, nms=nms, agnostic_nms=agnostic_nms) if edgetpu: f[8] = export_edgetpu(model, im, file) if tfjs: From 2181ef371e5493eb3cddcfa50b59804cbabce73d Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 5 Apr 2022 11:49:32 +0200 Subject: [PATCH 1007/1976] Update `cv2.imread()` patch with flags argument (#7287) --- utils/general.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/utils/general.py b/utils/general.py index da7dbb6d3e55..65dd9326797e 100755 --- a/utils/general.py +++ b/utils/general.py @@ -925,8 +925,8 @@ def increment_path(path, exist_ok=False, sep='', mkdir=False): imshow_ = cv2.imshow # copy to avoid recursion errors -def imread(path): - return cv2.imdecode(np.fromfile(path, np.uint8), cv2.IMREAD_COLOR) +def imread(path, flags=cv2.IMREAD_COLOR): + return cv2.imdecode(np.fromfile(path, np.uint8), flags) def imwrite(path, im): From 5f97001ed4e5deb5c92eb200a79b5cb9da861130 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 5 Apr 2022 12:54:25 +0200 Subject: [PATCH 1008/1976] Context manager `open(file) as f` fixes (#7289) * Flask context manager `open()` fix * Additional read context manager fixes --- data/VOC.yaml | 3 ++- export.py | 3 ++- models/common.py | 3 ++- utils/flask_rest_api/example_request.py | 12 +++++++++--- utils/flask_rest_api/restapi.py | 2 ++ 5 files changed, 17 insertions(+), 6 deletions(-) diff --git a/data/VOC.yaml b/data/VOC.yaml index be04fb1e2ecb..9865967dd028 100644 --- a/data/VOC.yaml +++ b/data/VOC.yaml @@ -72,7 +72,8 @@ download: | imgs_path.mkdir(exist_ok=True, parents=True) lbs_path.mkdir(exist_ok=True, parents=True) - image_ids = open(path / f'VOC{year}/ImageSets/Main/{image_set}.txt').read().strip().split() + with open(path / f'VOC{year}/ImageSets/Main/{image_set}.txt') as f: + image_ids = f.read().strip().split() for id in tqdm(image_ids, desc=f'{image_set}{year}'): f = path / f'VOC{year}/JPEGImages/{id}.jpg' # old img path lb_path = (lbs_path / f.name).with_suffix('.txt') # new label path diff --git a/export.py b/export.py index c0b98ce40fd5..df4f3b6e05ef 100644 --- a/export.py +++ b/export.py @@ -407,7 +407,8 @@ def export_tfjs(keras_model, im, file, prefix=colorstr('TensorFlow.js:')): f'--output_node_names="Identity,Identity_1,Identity_2,Identity_3" {f_pb} {f}' subprocess.run(cmd, shell=True) - json = open(f_json).read() + with open(f_json) as j: + json = j.read() with open(f_json, 'w') as j: # sort JSON Identity_* in ascending order subst = re.sub( r'{"outputs": {"Identity.?.?": {"name": "Identity.?.?"}, ' diff --git a/models/common.py b/models/common.py index dcd3e5f408dd..5a83bce33fc8 100644 --- a/models/common.py +++ b/models/common.py @@ -378,7 +378,8 @@ def wrap_frozen_graph(gd, inputs, outputs): return x.prune(tf.nest.map_structure(ge, inputs), tf.nest.map_structure(ge, outputs)) gd = tf.Graph().as_graph_def() # graph_def - gd.ParseFromString(open(w, 'rb').read()) + with open(w, 'rb') as f: + gd.ParseFromString(f.read()) frozen_func = wrap_frozen_graph(gd, inputs="x:0", outputs="Identity:0") elif tflite or edgetpu: # https://www.tensorflow.org/lite/guide/python#install_tensorflow_lite_for_python try: # https://coral.ai/docs/edgetpu/tflite-python/#update-existing-tf-lite-code-for-the-edge-tpu diff --git a/utils/flask_rest_api/example_request.py b/utils/flask_rest_api/example_request.py index ff21f30f93ca..773ad8932967 100644 --- a/utils/flask_rest_api/example_request.py +++ b/utils/flask_rest_api/example_request.py @@ -1,12 +1,18 @@ -"""Perform test request""" +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +""" +Perform test request +""" + import pprint import requests DETECTION_URL = "http://localhost:5000/v1/object-detection/yolov5s" -TEST_IMAGE = "zidane.jpg" +IMAGE = "zidane.jpg" -image_data = open(TEST_IMAGE, "rb").read() +# Read image +with open(IMAGE, "rb") as f: + image_data = f.read() response = requests.post(DETECTION_URL, files={"image": image_data}).json() diff --git a/utils/flask_rest_api/restapi.py b/utils/flask_rest_api/restapi.py index 38868cc98d84..62adb4bbf716 100644 --- a/utils/flask_rest_api/restapi.py +++ b/utils/flask_rest_api/restapi.py @@ -1,6 +1,8 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license """ Run a Flask REST API exposing a YOLOv5s model """ + import argparse import io From d2e7ba2a3af8f6f17fa5240422b964a1ecf717d5 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 5 Apr 2022 14:23:15 +0200 Subject: [PATCH 1009/1976] val.py `--weights` and `--data` compatibility check (#7292) Improved error messages for understanding of user error with val.py. May help https://github.com/ultralytics/yolov5/issues/7291 --- val.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/val.py b/val.py index 019beedea61a..50a6d91edfff 100644 --- a/val.py +++ b/val.py @@ -162,6 +162,10 @@ def run( # Dataloader if not training: + if pt and not single_cls: # check --weights are trained on --data + ncm = model.model.yaml['nc'] + assert ncm == nc, f'{weights[0]} ({ncm} classes) trained on different --data than what you passed ({nc} ' \ + f'classes). Pass correct combination of --weights and --data that are trained together.' model.warmup(imgsz=(1 if pt else batch_size, 3, imgsz, imgsz)) # warmup pad = 0.0 if task in ('speed', 'benchmark') else 0.5 rect = False if task == 'benchmark' else pt # square inference for benchmarks From b1300f3e0b7f1f5971b1d3abc6b7a0c0bd92b389 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 5 Apr 2022 15:14:54 +0200 Subject: [PATCH 1010/1976] Add dataset sizes (zipped) (#7293) --- data/Argoverse.yaml | 2 +- data/GlobalWheat2020.yaml | 2 +- data/Objects365.yaml | 2 +- data/SKU-110K.yaml | 2 +- data/VOC.yaml | 2 +- data/VisDrone.yaml | 2 +- data/coco.yaml | 2 +- data/coco128.yaml | 2 +- data/xView.yaml | 2 +- 9 files changed, 9 insertions(+), 9 deletions(-) diff --git a/data/Argoverse.yaml b/data/Argoverse.yaml index 312791b33a2d..43426f5ebe15 100644 --- a/data/Argoverse.yaml +++ b/data/Argoverse.yaml @@ -4,7 +4,7 @@ # parent # ├── yolov5 # └── datasets -# └── Argoverse ← downloads here +# └── Argoverse ← downloads here (31.3 GB) # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..] diff --git a/data/GlobalWheat2020.yaml b/data/GlobalWheat2020.yaml index c1ba289f2833..4c43693f1d82 100644 --- a/data/GlobalWheat2020.yaml +++ b/data/GlobalWheat2020.yaml @@ -4,7 +4,7 @@ # parent # ├── yolov5 # └── datasets -# └── GlobalWheat2020 ← downloads here +# └── GlobalWheat2020 ← downloads here (7.0 GB) # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..] diff --git a/data/Objects365.yaml b/data/Objects365.yaml index bd6e5d6e1144..1e09fd718479 100644 --- a/data/Objects365.yaml +++ b/data/Objects365.yaml @@ -4,7 +4,7 @@ # parent # ├── yolov5 # └── datasets -# └── Objects365 ← downloads here +# └── Objects365 ← downloads here (750 GB) # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..] diff --git a/data/SKU-110K.yaml b/data/SKU-110K.yaml index 46459eab6bb7..2acf34d155bd 100644 --- a/data/SKU-110K.yaml +++ b/data/SKU-110K.yaml @@ -4,7 +4,7 @@ # parent # ├── yolov5 # └── datasets -# └── SKU-110K ← downloads here +# └── SKU-110K ← downloads here (13.6 GB) # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..] diff --git a/data/VOC.yaml b/data/VOC.yaml index 9865967dd028..4fec304133be 100644 --- a/data/VOC.yaml +++ b/data/VOC.yaml @@ -4,7 +4,7 @@ # parent # ├── yolov5 # └── datasets -# └── VOC ← downloads here +# └── VOC ← downloads here (2.8 GB) # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..] diff --git a/data/VisDrone.yaml b/data/VisDrone.yaml index 2a3b2f03e674..fe87588ee870 100644 --- a/data/VisDrone.yaml +++ b/data/VisDrone.yaml @@ -4,7 +4,7 @@ # parent # ├── yolov5 # └── datasets -# └── VisDrone ← downloads here +# └── VisDrone ← downloads here (2.3 GB) # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..] diff --git a/data/coco.yaml b/data/coco.yaml index 7494fc2f9cd1..0c0c4adab05d 100644 --- a/data/coco.yaml +++ b/data/coco.yaml @@ -4,7 +4,7 @@ # parent # ├── yolov5 # └── datasets -# └── coco ← downloads here +# └── coco ← downloads here (20.1 GB) # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..] diff --git a/data/coco128.yaml b/data/coco128.yaml index d07c704407a1..2517d2079257 100644 --- a/data/coco128.yaml +++ b/data/coco128.yaml @@ -4,7 +4,7 @@ # parent # ├── yolov5 # └── datasets -# └── coco128 ← downloads here +# └── coco128 ← downloads here (7 MB) # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..] diff --git a/data/xView.yaml b/data/xView.yaml index fd82828dcb8c..3b38f1ff4439 100644 --- a/data/xView.yaml +++ b/data/xView.yaml @@ -5,7 +5,7 @@ # parent # ├── yolov5 # └── datasets -# └── xView ← downloads here +# └── xView ← downloads here (20.7 GB) # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..] From c759bbdf19f3c430e778a84a76849145ebf58d25 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 5 Apr 2022 15:55:16 +0200 Subject: [PATCH 1011/1976] Add `check_requirements(('pycocotools>=2.0',))` (#7295) Add `check_requirements(('pycocotools>=2.0',))` --- data/Objects365.yaml | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/data/Objects365.yaml b/data/Objects365.yaml index 1e09fd718479..82b42a120d40 100644 --- a/data/Objects365.yaml +++ b/data/Objects365.yaml @@ -60,11 +60,12 @@ names: ['Person', 'Sneakers', 'Chair', 'Other Shoes', 'Hat', 'Car', 'Lamp', 'Gla # Download script/URL (optional) --------------------------------------------------------------------------------------- download: | - from pycocotools.coco import COCO from tqdm import tqdm - - from utils.general import Path, download, np, xyxy2xywhn - + + from utils.general import Path, check_requirements, download, np, xyxy2xywhn + + check_requirements(('pycocotools>=2.0',)) + from pycocotools.coco import COCO # Make Directories dir = Path(yaml['path']) # dataset root dir From 741fac815e366d74eed020efb8c68a23828ee3e9 Mon Sep 17 00:00:00 2001 From: Max Strobel Date: Tue, 5 Apr 2022 17:38:13 +0200 Subject: [PATCH 1012/1976] fix: disable usage of root logger (#7296) * fix: disable usage of root logger `logging.basicConfig` configures Python's root logger. This prohibits fine control of logging, overwrites logging configuration done outside the package, and is not best practice. Instead, the used logger is now configured directly, and the root logger is untouched. Example: If yolov5 is used as part of another project with some sophisticated logging, the internal `logging.basicConfig` call overwrites all the external configuration. * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update general.py * Update general.py * Comment kaggle * Uncomment kaggle Co-authored-by: Maximilian Strobel Co-authored-by: Glenn Jocher Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- data/Objects365.yaml | 4 ++-- utils/general.py | 12 +++++++++--- 2 files changed, 11 insertions(+), 5 deletions(-) diff --git a/data/Objects365.yaml b/data/Objects365.yaml index 82b42a120d40..114bee2b159c 100644 --- a/data/Objects365.yaml +++ b/data/Objects365.yaml @@ -61,9 +61,9 @@ names: ['Person', 'Sneakers', 'Chair', 'Other Shoes', 'Hat', 'Car', 'Lamp', 'Gla # Download script/URL (optional) --------------------------------------------------------------------------------------- download: | from tqdm import tqdm - + from utils.general import Path, check_requirements, download, np, xyxy2xywhn - + check_requirements(('pycocotools>=2.0',)) from pycocotools.coco import COCO diff --git a/utils/general.py b/utils/general.py index 65dd9326797e..5316f504871a 100755 --- a/utils/general.py +++ b/utils/general.py @@ -82,11 +82,17 @@ def set_logging(name=None, verbose=VERBOSE): for h in logging.root.handlers: logging.root.removeHandler(h) # remove all handlers associated with the root logger object rank = int(os.getenv('RANK', -1)) # rank in world for Multi-GPU trainings - logging.basicConfig(format="%(message)s", level=logging.INFO if (verbose and rank in (-1, 0)) else logging.WARNING) - return logging.getLogger(name) + level = logging.INFO if (verbose and rank in (-1, 0)) else logging.WARNING + log = logging.getLogger(name) + log.setLevel(level) + handler = logging.StreamHandler() + handler.setFormatter(logging.Formatter("%(message)s")) + handler.setLevel(level) + log.addHandler(handler) -LOGGER = set_logging('yolov5') # define globally (used in train.py, val.py, detect.py, etc.) +set_logging() # run before defining LOGGER +LOGGER = logging.getLogger("yolov5") # define globally (used in train.py, val.py, detect.py, etc.) def user_config_dir(dir='Ultralytics', env_var='YOLOV5_CONFIG_DIR'): From d257c75c848ccab4d9195300a61195cf0dfef1bf Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 5 Apr 2022 21:21:57 +0200 Subject: [PATCH 1013/1976] Update export.py (#7301) * Update export.py Simplify code. * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- export.py | 5 ----- 1 file changed, 5 deletions(-) diff --git a/export.py b/export.py index df4f3b6e05ef..16ba2ffce3ec 100644 --- a/export.py +++ b/export.py @@ -480,15 +480,10 @@ def run( im, model = im.half(), model.half() # to FP16 model.train() if train else model.eval() # training mode = no Detect() layer grid construction for k, m in model.named_modules(): - # if isinstance(m, Conv): # assign export-friendly activations - # if isinstance(m.act, nn.SiLU): - # m.act = SiLU() if isinstance(m, Detect): m.inplace = inplace m.onnx_dynamic = dynamic m.export = True - if hasattr(m, 'forward_export'): - m.forward = m.forward_export # assign custom forward (optional) for _ in range(2): y = model(im) # dry runs From f735458987f7e80c32739bfe0440cbcad36aeae3 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 6 Apr 2022 12:20:24 +0200 Subject: [PATCH 1014/1976] Use `tqdm.auto` (#7311) --- data/Argoverse.yaml | 2 +- data/Objects365.yaml | 2 +- data/SKU-110K.yaml | 2 +- data/VOC.yaml | 2 +- data/VisDrone.yaml | 2 +- data/xView.yaml | 2 +- train.py | 2 +- utils/autoanchor.py | 2 +- utils/datasets.py | 2 +- utils/loggers/wandb/wandb_utils.py | 2 +- val.py | 2 +- 11 files changed, 11 insertions(+), 11 deletions(-) diff --git a/data/Argoverse.yaml b/data/Argoverse.yaml index 43426f5ebe15..9d114f55dce8 100644 --- a/data/Argoverse.yaml +++ b/data/Argoverse.yaml @@ -22,7 +22,7 @@ names: ['person', 'bicycle', 'car', 'motorcycle', 'bus', 'truck', 'traffic download: | import json - from tqdm import tqdm + from tqdm.auto import tqdm from utils.general import download, Path diff --git a/data/Objects365.yaml b/data/Objects365.yaml index 114bee2b159c..ab8207d200f5 100644 --- a/data/Objects365.yaml +++ b/data/Objects365.yaml @@ -60,7 +60,7 @@ names: ['Person', 'Sneakers', 'Chair', 'Other Shoes', 'Hat', 'Car', 'Lamp', 'Gla # Download script/URL (optional) --------------------------------------------------------------------------------------- download: | - from tqdm import tqdm + from tqdm.auto import tqdm from utils.general import Path, check_requirements, download, np, xyxy2xywhn diff --git a/data/SKU-110K.yaml b/data/SKU-110K.yaml index 2acf34d155bd..2fd689b1bcac 100644 --- a/data/SKU-110K.yaml +++ b/data/SKU-110K.yaml @@ -21,7 +21,7 @@ names: ['object'] # class names # Download script/URL (optional) --------------------------------------------------------------------------------------- download: | import shutil - from tqdm import tqdm + from tqdm.auto import tqdm from utils.general import np, pd, Path, download, xyxy2xywh diff --git a/data/VOC.yaml b/data/VOC.yaml index 4fec304133be..fbe3b193bf2e 100644 --- a/data/VOC.yaml +++ b/data/VOC.yaml @@ -29,7 +29,7 @@ names: ['aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat', ' download: | import xml.etree.ElementTree as ET - from tqdm import tqdm + from tqdm.auto import tqdm from utils.general import download, Path diff --git a/data/VisDrone.yaml b/data/VisDrone.yaml index fe87588ee870..ef7e6c4fed35 100644 --- a/data/VisDrone.yaml +++ b/data/VisDrone.yaml @@ -24,7 +24,7 @@ download: | def visdrone2yolo(dir): from PIL import Image - from tqdm import tqdm + from tqdm.auto import tqdm def convert_box(size, box): # Convert VisDrone box to YOLO xywh box diff --git a/data/xView.yaml b/data/xView.yaml index 3b38f1ff4439..aac2d026e424 100644 --- a/data/xView.yaml +++ b/data/xView.yaml @@ -34,7 +34,7 @@ download: | import numpy as np from PIL import Image - from tqdm import tqdm + from tqdm.auto import tqdm from utils.datasets import autosplit from utils.general import download, xyxy2xywhn diff --git a/train.py b/train.py index 38c25c053e26..dfce5a195660 100644 --- a/train.py +++ b/train.py @@ -30,7 +30,7 @@ from torch.cuda import amp from torch.nn.parallel import DistributedDataParallel as DDP from torch.optim import SGD, Adam, AdamW, lr_scheduler -from tqdm import tqdm +from tqdm.auto import tqdm FILE = Path(__file__).resolve() ROOT = FILE.parents[0] # YOLOv5 root directory diff --git a/utils/autoanchor.py b/utils/autoanchor.py index 77518abe9889..cdcecd855a51 100644 --- a/utils/autoanchor.py +++ b/utils/autoanchor.py @@ -8,7 +8,7 @@ import numpy as np import torch import yaml -from tqdm import tqdm +from tqdm.auto import tqdm from utils.general import LOGGER, colorstr, emojis diff --git a/utils/datasets.py b/utils/datasets.py index c12d3d9b9649..578e5b829dc0 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -24,7 +24,7 @@ import yaml from PIL import ExifTags, Image, ImageOps from torch.utils.data import DataLoader, Dataset, dataloader, distributed -from tqdm import tqdm +from tqdm.auto import tqdm from utils.augmentations import Albumentations, augment_hsv, copy_paste, letterbox, mixup, random_perspective from utils.general import (DATASETS_DIR, LOGGER, NUM_THREADS, check_dataset, check_requirements, check_yaml, clean_str, diff --git a/utils/loggers/wandb/wandb_utils.py b/utils/loggers/wandb/wandb_utils.py index 08b568d074a2..e65d028f28db 100644 --- a/utils/loggers/wandb/wandb_utils.py +++ b/utils/loggers/wandb/wandb_utils.py @@ -8,7 +8,7 @@ from typing import Dict import yaml -from tqdm import tqdm +from tqdm.auto import tqdm FILE = Path(__file__).resolve() ROOT = FILE.parents[3] # YOLOv5 root directory diff --git a/val.py b/val.py index 50a6d91edfff..58a12ceae254 100644 --- a/val.py +++ b/val.py @@ -27,7 +27,7 @@ import numpy as np import torch -from tqdm import tqdm +from tqdm.auto import tqdm FILE = Path(__file__).resolve() ROOT = FILE.parents[0] # YOLOv5 root directory From 32661f75ac6eaa8c5dfd0ad36abfaa8d4e4fe700 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 6 Apr 2022 13:12:41 +0200 Subject: [PATCH 1015/1976] Add `retry=3` to `download()` (#7313) * Add `retry=3` to `download()` * Update general.py * Update general.py * Update general.py * Update VOC.yaml * Update VisDrone.yaml --- data/VOC.yaml | 2 +- data/VisDrone.yaml | 2 +- utils/general.py | 24 ++++++++++++++++++------ 3 files changed, 20 insertions(+), 8 deletions(-) diff --git a/data/VOC.yaml b/data/VOC.yaml index fbe3b193bf2e..93a1f181ce8c 100644 --- a/data/VOC.yaml +++ b/data/VOC.yaml @@ -62,7 +62,7 @@ download: | urls = [url + 'VOCtrainval_06-Nov-2007.zip', # 446MB, 5012 images url + 'VOCtest_06-Nov-2007.zip', # 438MB, 4953 images url + 'VOCtrainval_11-May-2012.zip'] # 1.95GB, 17126 images - download(urls, dir=dir / 'images', delete=False, threads=3) + download(urls, dir=dir / 'images', delete=False, curl=True, threads=3) # Convert path = dir / f'images/VOCdevkit' diff --git a/data/VisDrone.yaml b/data/VisDrone.yaml index ef7e6c4fed35..c38fb2ab769e 100644 --- a/data/VisDrone.yaml +++ b/data/VisDrone.yaml @@ -54,7 +54,7 @@ download: | 'https://github.com/ultralytics/yolov5/releases/download/v1.0/VisDrone2019-DET-val.zip', 'https://github.com/ultralytics/yolov5/releases/download/v1.0/VisDrone2019-DET-test-dev.zip', 'https://github.com/ultralytics/yolov5/releases/download/v1.0/VisDrone2019-DET-test-challenge.zip'] - download(urls, dir=dir, threads=4) + download(urls, dir=dir, curl=True, threads=4) # Convert for d in 'VisDrone2019-DET-train', 'VisDrone2019-DET-val', 'VisDrone2019-DET-test-dev': diff --git a/utils/general.py b/utils/general.py index 5316f504871a..6c2558db74c4 100755 --- a/utils/general.py +++ b/utils/general.py @@ -497,20 +497,32 @@ def url2file(url): return file -def download(url, dir='.', unzip=True, delete=True, curl=False, threads=1): +def download(url, dir='.', unzip=True, delete=True, curl=False, threads=1, retry=3): # Multi-threaded file download and unzip function, used in data.yaml for autodownload def download_one(url, dir): # Download 1 file + success = True f = dir / Path(url).name # filename if Path(url).is_file(): # exists in current path Path(url).rename(f) # move to dir elif not f.exists(): LOGGER.info(f'Downloading {url} to {f}...') - if curl: - os.system(f"curl -L '{url}' -o '{f}' --retry 9 -C -") # curl download, retry and resume on fail - else: - torch.hub.download_url_to_file(url, f, progress=threads == 1) # torch download - if unzip and f.suffix in ('.zip', '.gz'): + for i in range(retry + 1): + if curl: + s = 'sS' if threads > 1 else '' # silent + r = os.system(f"curl -{s}L '{url}' -o '{f}' --retry 9 -C -") # curl download + success = r == 0 + else: + torch.hub.download_url_to_file(url, f, progress=threads == 1) # torch download + success = f.is_file() + if success: + break + elif i < retry: + LOGGER.warning(f'Download failure, retrying {i + 1}/{retry} {url}...') + else: + LOGGER.warning(f'Failed to download {url}...') + + if unzip and success and f.suffix in ('.zip', '.gz'): LOGGER.info(f'Unzipping {f}...') if f.suffix == '.zip': ZipFile(f).extractall(path=dir) # unzip From 245d6459a93bb707d9624027bf9ebf40bd925ca8 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 6 Apr 2022 17:23:34 +0200 Subject: [PATCH 1016/1976] Add callbacks (#7315) * Add `on_train_start()` callback * Update * Update --- train.py | 4 ++++ utils/loggers/__init__.py | 4 ++++ val.py | 4 ++++ 3 files changed, 12 insertions(+) diff --git a/train.py b/train.py index dfce5a195660..b7f70ab5bea4 100644 --- a/train.py +++ b/train.py @@ -66,6 +66,7 @@ def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictio save_dir, epochs, batch_size, weights, single_cls, evolve, data, cfg, resume, noval, nosave, workers, freeze = \ Path(opt.save_dir), opt.epochs, opt.batch_size, opt.weights, opt.single_cls, opt.evolve, opt.data, opt.cfg, \ opt.resume, opt.noval, opt.nosave, opt.workers, opt.freeze + callbacks.run('on_pretrain_routine_start') # Directories w = save_dir / 'weights' # weights dir @@ -291,11 +292,13 @@ def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictio scaler = amp.GradScaler(enabled=cuda) stopper = EarlyStopping(patience=opt.patience) compute_loss = ComputeLoss(model) # init loss class + callbacks.run('on_train_start') LOGGER.info(f'Image sizes {imgsz} train, {imgsz} val\n' f'Using {train_loader.num_workers * WORLD_SIZE} dataloader workers\n' f"Logging results to {colorstr('bold', save_dir)}\n" f'Starting training for {epochs} epochs...') for epoch in range(start_epoch, epochs): # epoch ------------------------------------------------------------------ + callbacks.run('on_train_epoch_start') model.train() # Update image weights (optional, single-GPU only) @@ -317,6 +320,7 @@ def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictio pbar = tqdm(pbar, total=nb, bar_format='{l_bar}{bar:10}{r_bar}{bar:-10b}') # progress bar optimizer.zero_grad() for i, (imgs, targets, paths, _) in pbar: # batch ------------------------------------------------------------- + callbacks.run('on_train_batch_start') ni = i + nb * epoch # number integrated batches (since train start) imgs = imgs.to(device, non_blocking=True).float() / 255 # uint8 to float32, 0-255 to 0.0-1.0 diff --git a/utils/loggers/__init__.py b/utils/loggers/__init__.py index 2e639dfb9b53..bab133cc35a9 100644 --- a/utils/loggers/__init__.py +++ b/utils/loggers/__init__.py @@ -84,6 +84,10 @@ def __init__(self, save_dir=None, weights=None, opt=None, hyp=None, logger=None, else: self.wandb = None + def on_train_start(self): + # Callback runs on train start + pass + def on_pretrain_routine_end(self): # Callback runs on pre-train routine end paths = self.save_dir.glob('*labels*.jpg') # training labels diff --git a/val.py b/val.py index 58a12ceae254..48f396626b54 100644 --- a/val.py +++ b/val.py @@ -188,8 +188,10 @@ def run( dt, p, r, f1, mp, mr, map50, map = [0.0, 0.0, 0.0], 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 loss = torch.zeros(3, device=device) jdict, stats, ap, ap_class = [], [], [], [] + callbacks.run('on_val_start') pbar = tqdm(dataloader, desc=s, bar_format='{l_bar}{bar:10}{r_bar}{bar:-10b}') # progress bar for batch_i, (im, targets, paths, shapes) in enumerate(pbar): + callbacks.run('on_val_batch_start') t1 = time_sync() if cuda: im = im.to(device, non_blocking=True) @@ -260,6 +262,8 @@ def run( f = save_dir / f'val_batch{batch_i}_pred.jpg' # predictions Thread(target=plot_images, args=(im, output_to_target(out), paths, f, names), daemon=True).start() + callbacks.run('on_val_batch_end') + # Compute metrics stats = [np.concatenate(x, 0) for x in zip(*stats)] # to numpy if len(stats) and stats[0].any(): From a88a81469a54838abfbba0885e1c47c9e87ce3e2 Mon Sep 17 00:00:00 2001 From: Nick Martin Date: Wed, 6 Apr 2022 09:35:33 -0700 Subject: [PATCH 1017/1976] Copy wandb param dict before training to avoid overwrites (#7317) * Copy wandb param dict before training to avoid overwrites. Copy the hyperparameter dict retrieved from wandb configuration before passing it to `train()`. Training overwrites parameters in the dictionary (eg scaling obj/box/cls gains), which causes the values reported in wandb to not match the input values. This is confusing as it makes it hard to reproduce a run, and also throws off wandb's Bayesian sweep algorithm. * Cleanup Co-authored-by: Glenn Jocher --- utils/loggers/wandb/sweep.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/utils/loggers/wandb/sweep.py b/utils/loggers/wandb/sweep.py index 206059bc30bf..d49ea6f2778b 100644 --- a/utils/loggers/wandb/sweep.py +++ b/utils/loggers/wandb/sweep.py @@ -16,8 +16,8 @@ def sweep(): wandb.init() - # Get hyp dict from sweep agent - hyp_dict = vars(wandb.config).get("_items") + # Get hyp dict from sweep agent. Copy because train() modifies parameters which confused wandb. + hyp_dict = vars(wandb.config).get("_items").copy() # Workaround: get necessary opt args opt = parse_opt(known=True) From 0ca85ed65f124871fa7686dcf0efbd8dc9699856 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 6 Apr 2022 23:52:19 +0200 Subject: [PATCH 1018/1976] Update Objects365.yaml (#7323) Updated dataset size to 712GB (includes undeleted zips). --- data/Objects365.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/data/Objects365.yaml b/data/Objects365.yaml index ab8207d200f5..8e6326b38595 100644 --- a/data/Objects365.yaml +++ b/data/Objects365.yaml @@ -4,7 +4,7 @@ # parent # ├── yolov5 # └── datasets -# └── Objects365 ← downloads here (750 GB) +# └── Objects365 ← downloads here (712 GB) # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..] From b7faeda0f225f909ce87ffe504e829062ac44ca4 Mon Sep 17 00:00:00 2001 From: Nrupatunga Date: Thu, 7 Apr 2022 17:22:44 +0530 Subject: [PATCH 1019/1976] Fix Tf export for BottleneckCSP (#7330) --- models/tf.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/models/tf.py b/models/tf.py index a15569e3b465..04b1cd378f18 100644 --- a/models/tf.py +++ b/models/tf.py @@ -152,7 +152,7 @@ def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5, w=None): self.cv3 = TFConv2d(c_, c_, 1, 1, bias=False, w=w.cv3) self.cv4 = TFConv(2 * c_, c2, 1, 1, w=w.cv4) self.bn = TFBN(w.bn) - self.act = lambda x: keras.activations.relu(x, alpha=0.1) + self.act = lambda x: keras.activations.swish(x) self.m = keras.Sequential([TFBottleneck(c_, c_, shortcut, g, e=1.0, w=w.m[j]) for j in range(n)]) def call(self, inputs): From 5783de26fe14d8a890090329d6ce17c468f47dfa Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 7 Apr 2022 16:12:44 +0200 Subject: [PATCH 1020/1976] Objects365 dataset breakdown images vs zips (#7335) --- data/Objects365.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/data/Objects365.yaml b/data/Objects365.yaml index 8e6326b38595..334c23c359cf 100644 --- a/data/Objects365.yaml +++ b/data/Objects365.yaml @@ -4,7 +4,7 @@ # parent # ├── yolov5 # └── datasets -# └── Objects365 ← downloads here (712 GB) +# └── Objects365 ← downloads here (712 GB = 367G data + 345G zips) # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..] From 676e10cf1abc03360b56d8030adea2cd0d0af353 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 7 Apr 2022 16:15:01 +0200 Subject: [PATCH 1021/1976] Simplify callbacks.py return (#7333) * Simplify callbacks.py return * Indent args (pytorch convention) * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- utils/callbacks.py | 19 ++++++++----------- 1 file changed, 8 insertions(+), 11 deletions(-) diff --git a/utils/callbacks.py b/utils/callbacks.py index c6b3be1cbd69..2b32df0bf1c1 100644 --- a/utils/callbacks.py +++ b/utils/callbacks.py @@ -38,9 +38,9 @@ def register_action(self, hook, name='', callback=None): Register a new action to a callback hook Args: - hook The callback hook name to register the action to - name The name of the action for later reference - callback The callback to fire + hook: The callback hook name to register the action to + name: The name of the action for later reference + callback: The callback to fire """ assert hook in self._callbacks, f"hook '{hook}' not found in callbacks {self._callbacks}" assert callable(callback), f"callback '{callback}' is not callable" @@ -51,21 +51,18 @@ def get_registered_actions(self, hook=None): Returns all the registered actions by callback hook Args: - hook The name of the hook to check, defaults to all + hook: The name of the hook to check, defaults to all """ - if hook: - return self._callbacks[hook] - else: - return self._callbacks + return self._callbacks[hook] if hook else self._callbacks def run(self, hook, *args, **kwargs): """ Loop through the registered actions and fire all callbacks Args: - hook The name of the hook to check, defaults to all - args Arguments to receive from YOLOv5 - kwargs Keyword Arguments to receive from YOLOv5 + hook: The name of the hook to check, defaults to all + args: Arguments to receive from YOLOv5 + kwargs: Keyword Arguments to receive from YOLOv5 """ assert hook in self._callbacks, f"hook '{hook}' not found in callbacks {self._callbacks}" From 5f941a84efdd45c986cd1c3764ced99e7c8e8294 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 7 Apr 2022 16:44:08 +0200 Subject: [PATCH 1022/1976] Print dataset scan only `if RANK in (-1, 0)` (#7337) * Print dataset scan only `if RANK in (-1, 0)` * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- train.py | 10 +++++----- utils/datasets.py | 3 ++- 2 files changed, 7 insertions(+), 6 deletions(-) diff --git a/train.py b/train.py index b7f70ab5bea4..d6764116b27c 100644 --- a/train.py +++ b/train.py @@ -316,7 +316,7 @@ def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictio train_loader.sampler.set_epoch(epoch) pbar = enumerate(train_loader) LOGGER.info(('\n' + '%10s' * 7) % ('Epoch', 'gpu_mem', 'box', 'obj', 'cls', 'labels', 'img_size')) - if RANK in [-1, 0]: + if RANK in (-1, 0): pbar = tqdm(pbar, total=nb, bar_format='{l_bar}{bar:10}{r_bar}{bar:-10b}') # progress bar optimizer.zero_grad() for i, (imgs, targets, paths, _) in pbar: # batch ------------------------------------------------------------- @@ -365,7 +365,7 @@ def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictio last_opt_step = ni # Log - if RANK in [-1, 0]: + if RANK in (-1, 0): mloss = (mloss * i + loss_items) / (i + 1) # update mean losses mem = f'{torch.cuda.memory_reserved() / 1E9 if torch.cuda.is_available() else 0:.3g}G' # (GB) pbar.set_description(('%10s' * 2 + '%10.4g' * 5) % @@ -379,7 +379,7 @@ def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictio lr = [x['lr'] for x in optimizer.param_groups] # for loggers scheduler.step() - if RANK in [-1, 0]: + if RANK in (-1, 0): # mAP callbacks.run('on_train_epoch_end', epoch=epoch) ema.update_attr(model, include=['yaml', 'nc', 'hyp', 'names', 'stride', 'class_weights']) @@ -440,7 +440,7 @@ def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictio # end epoch ---------------------------------------------------------------------------------------------------- # end training ----------------------------------------------------------------------------------------------------- - if RANK in [-1, 0]: + if RANK in (-1, 0): LOGGER.info(f'\n{epoch - start_epoch + 1} epochs completed in {(time.time() - t0) / 3600:.3f} hours.') for f in last, best: if f.exists(): @@ -518,7 +518,7 @@ def parse_opt(known=False): def main(opt, callbacks=Callbacks()): # Checks - if RANK in [-1, 0]: + if RANK in (-1, 0): print_args(vars(opt)) check_git_status() check_requirements(exclude=['thop']) diff --git a/utils/datasets.py b/utils/datasets.py index 578e5b829dc0..3fa9aa4c6ca1 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -36,6 +36,7 @@ IMG_FORMATS = 'bmp', 'dng', 'jpeg', 'jpg', 'mpo', 'png', 'tif', 'tiff', 'webp' # include image suffixes VID_FORMATS = 'asf', 'avi', 'gif', 'm4v', 'mkv', 'mov', 'mp4', 'mpeg', 'mpg', 'ts', 'wmv' # include video suffixes BAR_FORMAT = '{l_bar}{bar:10}{r_bar}{bar:-10b}' # tqdm bar format +LOCAL_RANK = int(os.getenv('LOCAL_RANK', -1)) # https://pytorch.org/docs/stable/elastic/run.html # Get orientation exif tag for orientation in ExifTags.TAGS.keys(): @@ -454,7 +455,7 @@ def __init__(self, # Display cache nf, nm, ne, nc, n = cache.pop('results') # found, missing, empty, corrupt, total - if exists: + if exists and LOCAL_RANK in (-1, 0): d = f"Scanning '{cache_path}' images and labels... {nf} found, {nm} missing, {ne} empty, {nc} corrupt" tqdm(None, desc=prefix + d, total=n, initial=n, bar_format=BAR_FORMAT) # display cache results if cache['msgs']: From 302b00b5f4b93bb6cdb3c651dc9f06b66d06016d Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 8 Apr 2022 12:55:16 +0200 Subject: [PATCH 1023/1976] Update `_make_grid()` (#7346) --- models/yolo.py | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/models/yolo.py b/models/yolo.py index 85c5a96997f2..f072aeeb8eac 100644 --- a/models/yolo.py +++ b/models/yolo.py @@ -77,13 +77,15 @@ def forward(self, x): def _make_grid(self, nx=20, ny=20, i=0): d = self.anchors[i].device + t = self.anchors[i].dtype shape = 1, self.na, ny, nx, 2 # grid shape + y, x = torch.arange(ny, device=d, dtype=t), torch.arange(nx, device=d, dtype=t) if check_version(torch.__version__, '1.10.0'): # torch>=1.10.0 meshgrid workaround for torch>=0.7 compatibility - yv, xv = torch.meshgrid(torch.arange(ny, device=d), torch.arange(nx, device=d), indexing='ij') + yv, xv = torch.meshgrid(y, x, indexing='ij') else: - yv, xv = torch.meshgrid(torch.arange(ny, device=d), torch.arange(nx, device=d)) - grid = torch.stack((xv, yv), 2).expand(shape).float() - 0.5 # add grid offset, i.e. y = 2.0 * x - 0.5 - anchor_grid = (self.anchors[i] * self.stride[i]).view((1, self.na, 1, 1, 2)).expand(shape).float() + yv, xv = torch.meshgrid(y, x) + grid = torch.stack((xv, yv), 2).expand(shape) - 0.5 # add grid offset, i.e. y = 2.0 * x - 0.5 + anchor_grid = (self.anchors[i] * self.stride[i]).view((1, self.na, 1, 1, 2)).expand(shape) return grid, anchor_grid From 446e6f563af1e92358603dda07c7462134c02b14 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 8 Apr 2022 23:05:15 +0200 Subject: [PATCH 1024/1976] Rename 'MacOS' to 'macOS' (#7349) --- .github/workflows/greetings.yml | 2 +- detect.py | 2 +- export.py | 2 +- tutorial.ipynb | 2 +- utils/loggers/wandb/README.md | 2 +- val.py | 2 +- 6 files changed, 6 insertions(+), 6 deletions(-) diff --git a/.github/workflows/greetings.yml b/.github/workflows/greetings.yml index 58fbcbfa90af..0b749f438dd2 100644 --- a/.github/workflows/greetings.yml +++ b/.github/workflows/greetings.yml @@ -56,4 +56,4 @@ jobs: CI CPU testing - If this badge is green, all [YOLOv5 GitHub Actions](https://github.com/ultralytics/yolov5/actions) Continuous Integration (CI) tests are currently passing. CI tests verify correct operation of YOLOv5 training ([train.py](https://github.com/ultralytics/yolov5/blob/master/train.py)), validation ([val.py](https://github.com/ultralytics/yolov5/blob/master/val.py)), inference ([detect.py](https://github.com/ultralytics/yolov5/blob/master/detect.py)) and export ([export.py](https://github.com/ultralytics/yolov5/blob/master/export.py)) on MacOS, Windows, and Ubuntu every 24 hours and on every commit. + If this badge is green, all [YOLOv5 GitHub Actions](https://github.com/ultralytics/yolov5/actions) Continuous Integration (CI) tests are currently passing. CI tests verify correct operation of YOLOv5 training ([train.py](https://github.com/ultralytics/yolov5/blob/master/train.py)), validation ([val.py](https://github.com/ultralytics/yolov5/blob/master/val.py)), inference ([detect.py](https://github.com/ultralytics/yolov5/blob/master/detect.py)) and export ([export.py](https://github.com/ultralytics/yolov5/blob/master/export.py)) on macOS, Windows, and Ubuntu every 24 hours and on every commit. diff --git a/detect.py b/detect.py index 14ff9a6ab421..bc93631caa4e 100644 --- a/detect.py +++ b/detect.py @@ -17,7 +17,7 @@ yolov5s.onnx # ONNX Runtime or OpenCV DNN with --dnn yolov5s.xml # OpenVINO yolov5s.engine # TensorRT - yolov5s.mlmodel # CoreML (MacOS-only) + yolov5s.mlmodel # CoreML (macOS-only) yolov5s_saved_model # TensorFlow SavedModel yolov5s.pb # TensorFlow GraphDef yolov5s.tflite # TensorFlow Lite diff --git a/export.py b/export.py index 16ba2ffce3ec..ceb7862a49be 100644 --- a/export.py +++ b/export.py @@ -29,7 +29,7 @@ yolov5s.onnx # ONNX Runtime or OpenCV DNN with --dnn yolov5s.xml # OpenVINO yolov5s.engine # TensorRT - yolov5s.mlmodel # CoreML (MacOS-only) + yolov5s.mlmodel # CoreML (macOS-only) yolov5s_saved_model # TensorFlow SavedModel yolov5s.pb # TensorFlow GraphDef yolov5s.tflite # TensorFlow Lite diff --git a/tutorial.ipynb b/tutorial.ipynb index d5a10dfd5952..dd6f520334b0 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -962,7 +962,7 @@ "\n", "![CI CPU testing](https://github.com/ultralytics/yolov5/workflows/CI%20CPU%20testing/badge.svg)\n", "\n", - "If this badge is green, all [YOLOv5 GitHub Actions](https://github.com/ultralytics/yolov5/actions) Continuous Integration (CI) tests are currently passing. CI tests verify correct operation of YOLOv5 training ([train.py](https://github.com/ultralytics/yolov5/blob/master/train.py)), testing ([val.py](https://github.com/ultralytics/yolov5/blob/master/val.py)), inference ([detect.py](https://github.com/ultralytics/yolov5/blob/master/detect.py)) and export ([export.py](https://github.com/ultralytics/yolov5/blob/master/export.py)) on MacOS, Windows, and Ubuntu every 24 hours and on every commit.\n" + "If this badge is green, all [YOLOv5 GitHub Actions](https://github.com/ultralytics/yolov5/actions) Continuous Integration (CI) tests are currently passing. CI tests verify correct operation of YOLOv5 training ([train.py](https://github.com/ultralytics/yolov5/blob/master/train.py)), testing ([val.py](https://github.com/ultralytics/yolov5/blob/master/val.py)), inference ([detect.py](https://github.com/ultralytics/yolov5/blob/master/detect.py)) and export ([export.py](https://github.com/ultralytics/yolov5/blob/master/export.py)) on macOS, Windows, and Ubuntu every 24 hours and on every commit.\n" ] }, { diff --git a/utils/loggers/wandb/README.md b/utils/loggers/wandb/README.md index 63d999859e6d..3e9c9fd38433 100644 --- a/utils/loggers/wandb/README.md +++ b/utils/loggers/wandb/README.md @@ -149,4 +149,4 @@ YOLOv5 may be run in any of the following up-to-date verified environments (with ![CI CPU testing](https://github.com/ultralytics/yolov5/workflows/CI%20CPU%20testing/badge.svg) -If this badge is green, all [YOLOv5 GitHub Actions](https://github.com/ultralytics/yolov5/actions) Continuous Integration (CI) tests are currently passing. CI tests verify correct operation of YOLOv5 training ([train.py](https://github.com/ultralytics/yolov5/blob/master/train.py)), validation ([val.py](https://github.com/ultralytics/yolov5/blob/master/val.py)), inference ([detect.py](https://github.com/ultralytics/yolov5/blob/master/detect.py)) and export ([export.py](https://github.com/ultralytics/yolov5/blob/master/export.py)) on MacOS, Windows, and Ubuntu every 24 hours and on every commit. +If this badge is green, all [YOLOv5 GitHub Actions](https://github.com/ultralytics/yolov5/actions) Continuous Integration (CI) tests are currently passing. CI tests verify correct operation of YOLOv5 training ([train.py](https://github.com/ultralytics/yolov5/blob/master/train.py)), validation ([val.py](https://github.com/ultralytics/yolov5/blob/master/val.py)), inference ([detect.py](https://github.com/ultralytics/yolov5/blob/master/detect.py)) and export ([export.py](https://github.com/ultralytics/yolov5/blob/master/export.py)) on macOS, Windows, and Ubuntu every 24 hours and on every commit. diff --git a/val.py b/val.py index 48f396626b54..5841437051c2 100644 --- a/val.py +++ b/val.py @@ -11,7 +11,7 @@ yolov5s.onnx # ONNX Runtime or OpenCV DNN with --dnn yolov5s.xml # OpenVINO yolov5s.engine # TensorRT - yolov5s.mlmodel # CoreML (MacOS-only) + yolov5s.mlmodel # CoreML (macOS-only) yolov5s_saved_model # TensorFlow SavedModel yolov5s.pb # TensorFlow GraphDef yolov5s.tflite # TensorFlow Lite From 698a5d7f26002e7b0b0d535d981c2b92f25bc76e Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 9 Apr 2022 01:32:16 +0200 Subject: [PATCH 1025/1976] Add `python benchmarks.py --test` for export-only (#7350) * Test exports * Fix precommit --- utils/benchmarks.py | 44 +++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 41 insertions(+), 3 deletions(-) diff --git a/utils/benchmarks.py b/utils/benchmarks.py index 36e827848584..1c1bb7a8f2ed 100644 --- a/utils/benchmarks.py +++ b/utils/benchmarks.py @@ -52,20 +52,26 @@ def run( data=ROOT / 'data/coco128.yaml', # dataset.yaml path device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu half=False, # use FP16 half-precision inference + test=False, # test exports only ): y, t = [], time.time() formats = export.export_formats() device = select_device(device) for i, (name, f, suffix, gpu) in formats.iterrows(): # index, (name, file, suffix, gpu-capable) try: - assert i < 9, 'Edge TPU and TF.js not supported' + assert i != 9, 'Edge TPU not supported' + assert i != 10, 'TF.js not supported' if device.type != 'cpu': assert gpu, f'{name} inference not supported on GPU' + + # Export if f == '-': w = weights # PyTorch format else: w = export.run(weights=weights, imgsz=[imgsz], include=[f], device=device, half=half)[-1] # all others assert suffix in str(w), 'export failed' + + # Validate result = val.run(data, w, batch_size, imgsz, plots=False, device=device, task='benchmark', half=half) metrics = result[0] # metrics (mp, mr, map50, map, *losses(box, obj, cls)) speeds = result[2] # times (preprocess, inference, postprocess) @@ -78,8 +84,39 @@ def run( LOGGER.info('\n') parse_opt() notebook_init() # print system info - py = pd.DataFrame(y, columns=['Format', 'mAP@0.5:0.95', 'Inference time (ms)']) + py = pd.DataFrame(y, columns=['Format', 'mAP@0.5:0.95', 'Inference time (ms)'] if map else ['Format', 'Export', '']) LOGGER.info(f'\nBenchmarks complete ({time.time() - t:.2f}s)') + LOGGER.info(str(py if map else py.iloc[:, :2])) + return py + + +def test( + weights=ROOT / 'yolov5s.pt', # weights path + imgsz=640, # inference size (pixels) + batch_size=1, # batch size + data=ROOT / 'data/coco128.yaml', # dataset.yaml path + device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu + half=False, # use FP16 half-precision inference + test=False, # test exports only +): + y, t = [], time.time() + formats = export.export_formats() + device = select_device(device) + for i, (name, f, suffix, gpu) in formats.iterrows(): # index, (name, file, suffix, gpu-capable) + try: + w = weights if f == '-' else \ + export.run(weights=weights, imgsz=[imgsz], include=[f], device=device, half=half)[-1] # weights + assert suffix in str(w), 'export failed' + y.append([name, True]) + except Exception: + y.append([name, False]) # mAP, t_inference + + # Print results + LOGGER.info('\n') + parse_opt() + notebook_init() # print system info + py = pd.DataFrame(y, columns=['Format', 'Export']) + LOGGER.info(f'\nExports complete ({time.time() - t:.2f}s)') LOGGER.info(str(py)) return py @@ -92,13 +129,14 @@ def parse_opt(): parser.add_argument('--data', type=str, default=ROOT / 'data/coco128.yaml', help='dataset.yaml path') parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference') + parser.add_argument('--test', action='store_true', help='test exports only') opt = parser.parse_args() print_args(vars(opt)) return opt def main(opt): - run(**vars(opt)) + test(**vars(opt)) if opt.test else run(**vars(opt)) if __name__ == "__main__": From 3bb233a7fb5b23e8128855eba1aaf347b1e86f49 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 9 Apr 2022 13:27:49 +0200 Subject: [PATCH 1026/1976] Add ONNX export metadata (#7353) --- export.py | 8 +++++++- models/common.py | 3 +++ 2 files changed, 10 insertions(+), 1 deletion(-) diff --git a/export.py b/export.py index ceb7862a49be..ecead3ef5a90 100644 --- a/export.py +++ b/export.py @@ -140,7 +140,13 @@ def export_onnx(model, im, file, opset, train, dynamic, simplify, prefix=colorst # Checks model_onnx = onnx.load(f) # load onnx model onnx.checker.check_model(model_onnx) # check onnx model - # LOGGER.info(onnx.helper.printable_graph(model_onnx.graph)) # print + + # Metadata + d = {'stride': int(max(model.stride)), 'names': model.names} + for k, v in d.items(): + meta = model_onnx.metadata_props.add() + meta.key, meta.value = k, str(v) + onnx.save(model_onnx, f) # Simplify if simplify: diff --git a/models/common.py b/models/common.py index 5a83bce33fc8..49175f76a53a 100644 --- a/models/common.py +++ b/models/common.py @@ -328,6 +328,9 @@ def __init__(self, weights='yolov5s.pt', device=torch.device('cpu'), dnn=False, import onnxruntime providers = ['CUDAExecutionProvider', 'CPUExecutionProvider'] if cuda else ['CPUExecutionProvider'] session = onnxruntime.InferenceSession(w, providers=providers) + meta = session.get_modelmeta().custom_metadata_map # metadata + if 'stride' in meta: + stride, names = int(meta['stride']), eval(meta['names']) elif xml: # OpenVINO LOGGER.info(f'Loading {w} for OpenVINO inference...') check_requirements(('openvino-dev',)) # requires openvino-dev: https://pypi.org/project/openvino-dev/ From aa542ce6a65658ff931fee9bbab77c0145c152f0 Mon Sep 17 00:00:00 2001 From: rglkt <50093021+rglkt@users.noreply.github.com> Date: Sun, 10 Apr 2022 01:11:55 +0800 Subject: [PATCH 1027/1976] DetectMultiBackend() default `stride=32` (#7342) * set common default stride as 32 * restore default stride, and set it on argument optional * fix wrong use of opt * fix missing parameter of stride * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix format of parameters * Update val.py * Update common.py Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Glenn Jocher --- models/common.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/models/common.py b/models/common.py index 49175f76a53a..6ab82ab51ff4 100644 --- a/models/common.py +++ b/models/common.py @@ -296,7 +296,7 @@ def __init__(self, weights='yolov5s.pt', device=torch.device('cpu'), dnn=False, super().__init__() w = str(weights[0] if isinstance(weights, list) else weights) pt, jit, onnx, xml, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs = self.model_type(w) # get backend - stride, names = 64, [f'class{i}' for i in range(1000)] # assign defaults + stride, names = 32, [f'class{i}' for i in range(1000)] # assign defaults w = attempt_download(w) # download if not local fp16 &= (pt or jit or onnx or engine) and device.type != 'cpu' # FP16 if data: # data.yaml path (optional) From 406ee528f0fb78e6f814b9a53765bc54183f0a0b Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 10 Apr 2022 13:46:07 +0200 Subject: [PATCH 1028/1976] Loss and IoU speed improvements (#7361) * Loss speed improvements * bbox_iou speed improvements * bbox_ioa speed improvements * box_iou speed improvements * box_iou speed improvements --- utils/loss.py | 8 +++---- utils/metrics.py | 54 +++++++++++++++++++++++------------------------- val.py | 4 ++-- 3 files changed, 32 insertions(+), 34 deletions(-) diff --git a/utils/loss.py b/utils/loss.py index fa8095515477..b5d050e46047 100644 --- a/utils/loss.py +++ b/utils/loss.py @@ -138,7 +138,7 @@ def __call__(self, p, targets): # predictions, targets pxy = pxy.sigmoid() * 2 - 0.5 pwh = (pwh.sigmoid() * 2) ** 2 * anchors[i] pbox = torch.cat((pxy, pwh), 1) # predicted box - iou = bbox_iou(pbox.T, tbox[i], x1y1x2y2=False, CIoU=True) # iou(prediction, target) + iou = bbox_iou(pbox, tbox[i], CIoU=True).squeeze() # iou(prediction, target) lbox += (1.0 - iou).mean() # iou loss # Objectness @@ -180,7 +180,7 @@ def build_targets(self, p, targets): tcls, tbox, indices, anch = [], [], [], [] gain = torch.ones(7, device=self.device) # normalized to gridspace gain ai = torch.arange(na, device=self.device).float().view(na, 1).repeat(1, nt) # same as .repeat_interleave(nt) - targets = torch.cat((targets.repeat(na, 1, 1), ai[:, :, None]), 2) # append anchor indices + targets = torch.cat((targets.repeat(na, 1, 1), ai[..., None]), 2) # append anchor indices g = 0.5 # bias off = torch.tensor( @@ -199,10 +199,10 @@ def build_targets(self, p, targets): gain[2:6] = torch.tensor(p[i].shape)[[3, 2, 3, 2]] # xyxy gain # Match targets to anchors - t = targets * gain + t = targets * gain # shape(3,n,7) if nt: # Matches - r = t[:, :, 4:6] / anchors[:, None] # wh ratio + r = t[..., 4:6] / anchors[:, None] # wh ratio j = torch.max(r, 1 / r).max(2)[0] < self.hyp['anchor_t'] # compare # j = wh_iou(anchors, t[:, 4:6]) > model.hyp['iou_t'] # iou(3,n)=wh_iou(anchors(3,2), gwh(n,2)) t = t[j] # filter diff --git a/utils/metrics.py b/utils/metrics.py index 0674beddc0fb..ff43a3073062 100644 --- a/utils/metrics.py +++ b/utils/metrics.py @@ -206,37 +206,36 @@ def print(self): print(' '.join(map(str, self.matrix[i]))) -def bbox_iou(box1, box2, x1y1x2y2=True, GIoU=False, DIoU=False, CIoU=False, eps=1e-7): - # Returns the IoU of box1 to box2. box1 is 4, box2 is nx4 - box2 = box2.T +def bbox_iou(box1, box2, xywh=True, GIoU=False, DIoU=False, CIoU=False, eps=1e-7): + # Returns Intersection over Union (IoU) of box1(1,4) to box2(n,4) # Get the coordinates of bounding boxes - if x1y1x2y2: # x1, y1, x2, y2 = box1 - b1_x1, b1_y1, b1_x2, b1_y2 = box1[0], box1[1], box1[2], box1[3] - b2_x1, b2_y1, b2_x2, b2_y2 = box2[0], box2[1], box2[2], box2[3] - else: # transform from xywh to xyxy - b1_x1, b1_x2 = box1[0] - box1[2] / 2, box1[0] + box1[2] / 2 - b1_y1, b1_y2 = box1[1] - box1[3] / 2, box1[1] + box1[3] / 2 - b2_x1, b2_x2 = box2[0] - box2[2] / 2, box2[0] + box2[2] / 2 - b2_y1, b2_y2 = box2[1] - box2[3] / 2, box2[1] + box2[3] / 2 + if xywh: # transform from xywh to xyxy + (x1, y1, w1, h1), (x2, y2, w2, h2) = box1.chunk(4, 1), box2.chunk(4, 1) + w1_, h1_, w2_, h2_ = w1 / 2, h1 / 2, w2 / 2, h2 / 2 + b1_x1, b1_x2, b1_y1, b1_y2 = x1 - w1_, x1 + w1_, y1 - h1_, y1 + h1_ + b2_x1, b2_x2, b2_y1, b2_y2 = x2 - w2_, x2 + w2_, y2 - h2_, y2 + h2_ + else: # x1, y1, x2, y2 = box1 + b1_x1, b1_y1, b1_x2, b1_y2 = box1.chunk(4, 1) + b2_x1, b2_y1, b2_x2, b2_y2 = box2.chunk(4, 1) + w1, h1 = b1_x2 - b1_x1, b1_y2 - b1_y1 + eps + w2, h2 = b2_x2 - b2_x1, b2_y2 - b2_y1 + eps # Intersection area inter = (torch.min(b1_x2, b2_x2) - torch.max(b1_x1, b2_x1)).clamp(0) * \ (torch.min(b1_y2, b2_y2) - torch.max(b1_y1, b2_y1)).clamp(0) # Union Area - w1, h1 = b1_x2 - b1_x1, b1_y2 - b1_y1 + eps - w2, h2 = b2_x2 - b2_x1, b2_y2 - b2_y1 + eps union = w1 * h1 + w2 * h2 - inter + eps + # IoU iou = inter / union if CIoU or DIoU or GIoU: cw = torch.max(b1_x2, b2_x2) - torch.min(b1_x1, b2_x1) # convex (smallest enclosing box) width ch = torch.max(b1_y2, b2_y2) - torch.min(b1_y1, b2_y1) # convex height if CIoU or DIoU: # Distance or Complete IoU https://arxiv.org/abs/1911.08287v1 c2 = cw ** 2 + ch ** 2 + eps # convex diagonal squared - rho2 = ((b2_x1 + b2_x2 - b1_x1 - b1_x2) ** 2 + - (b2_y1 + b2_y2 - b1_y1 - b1_y2) ** 2) / 4 # center distance squared + rho2 = ((b2_x1 + b2_x2 - b1_x1 - b1_x2) ** 2 + (b2_y1 + b2_y2 - b1_y1 - b1_y2) ** 2) / 4 # center dist ** 2 if CIoU: # https://github.com/Zzh-tju/DIoU-SSD-pytorch/blob/master/utils/box/box_utils.py#L47 v = (4 / math.pi ** 2) * torch.pow(torch.atan(w2 / h2) - torch.atan(w1 / h1), 2) with torch.no_grad(): @@ -248,6 +247,11 @@ def bbox_iou(box1, box2, x1y1x2y2=True, GIoU=False, DIoU=False, CIoU=False, eps= return iou # IoU +def box_area(box): + # box = xyxy(4,n) + return (box[2] - box[0]) * (box[3] - box[1]) + + def box_iou(box1, box2): # https://github.com/pytorch/vision/blob/master/torchvision/ops/boxes.py """ @@ -261,16 +265,12 @@ def box_iou(box1, box2): IoU values for every element in boxes1 and boxes2 """ - def box_area(box): - # box = 4xn - return (box[2] - box[0]) * (box[3] - box[1]) - - area1 = box_area(box1.T) - area2 = box_area(box2.T) - # inter(N,M) = (rb(N,M,2) - lt(N,M,2)).clamp(0).prod(2) - inter = (torch.min(box1[:, None, 2:], box2[:, 2:]) - torch.max(box1[:, None, :2], box2[:, :2])).clamp(0).prod(2) - return inter / (area1[:, None] + area2 - inter) # iou = inter / (area1 + area2 - inter) + (a1, a2), (b1, b2) = box1[:, None].chunk(2, 2), box2.chunk(2, 1) + inter = (torch.min(a2, b2) - torch.max(a1, b1)).clamp(0).prod(2) + + # IoU = inter / (area1 + area2 - inter) + return inter / (box_area(box1.T)[:, None] + box_area(box2.T) - inter) def bbox_ioa(box1, box2, eps=1E-7): @@ -280,11 +280,9 @@ def bbox_ioa(box1, box2, eps=1E-7): returns: np.array of shape(n) """ - box2 = box2.transpose() - # Get the coordinates of bounding boxes - b1_x1, b1_y1, b1_x2, b1_y2 = box1[0], box1[1], box1[2], box1[3] - b2_x1, b2_y1, b2_x2, b2_y2 = box2[0], box2[1], box2[2], box2[3] + b1_x1, b1_y1, b1_x2, b1_y2 = box1 + b2_x1, b2_y1, b2_x2, b2_y2 = box2.T # Intersection area inter_area = (np.minimum(b1_x2, b2_x2) - np.maximum(b1_x1, b2_x1)).clip(0) * \ diff --git a/val.py b/val.py index 5841437051c2..36f2a6c0284b 100644 --- a/val.py +++ b/val.py @@ -38,10 +38,10 @@ from models.common import DetectMultiBackend from utils.callbacks import Callbacks from utils.datasets import create_dataloader -from utils.general import (LOGGER, box_iou, check_dataset, check_img_size, check_requirements, check_yaml, +from utils.general import (LOGGER, check_dataset, check_img_size, check_requirements, check_yaml, coco80_to_coco91_class, colorstr, increment_path, non_max_suppression, print_args, scale_coords, xywh2xyxy, xyxy2xywh) -from utils.metrics import ConfusionMatrix, ap_per_class +from utils.metrics import ConfusionMatrix, ap_per_class, box_iou from utils.plots import output_to_target, plot_images, plot_val_study from utils.torch_utils import select_device, time_sync From 1993efd59e54e990add1b562ac147e57722987f9 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 10 Apr 2022 13:53:28 +0200 Subject: [PATCH 1029/1976] Swap `unsafe_chunk()` for `chunk()` (#7362) Eliminates all unsafe function in YOLOv5 out of an abundance of caution. --- utils/loss.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/loss.py b/utils/loss.py index b5d050e46047..a1b0ff6c1244 100644 --- a/utils/loss.py +++ b/utils/loss.py @@ -220,7 +220,7 @@ def build_targets(self, p, targets): offsets = 0 # Define - bc, gxy, gwh, a = t.unsafe_chunk(4, dim=1) # (image, class), grid xy, grid wh, anchors + bc, gxy, gwh, a = t.chunk(4, 1) # (image, class), grid xy, grid wh, anchors a, (b, c) = a.long().view(-1), bc.long().T # anchors, image, class gij = (gxy - offsets).long() gi, gj = gij.T # grid indices From db36f13c7afa1d0b2a77d3437e46f6f5fe58c020 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 10 Apr 2022 14:40:33 +0200 Subject: [PATCH 1030/1976] Delete FUNDING.yml (#7363) Deleting as redundant with FUNDING.yml present in organization repo at https://github.com/ultralytics/.github --- .github/FUNDING.yml | 5 ----- 1 file changed, 5 deletions(-) delete mode 100644 .github/FUNDING.yml diff --git a/.github/FUNDING.yml b/.github/FUNDING.yml deleted file mode 100644 index 3da386f7e724..000000000000 --- a/.github/FUNDING.yml +++ /dev/null @@ -1,5 +0,0 @@ -# These are supported funding model platforms - -github: glenn-jocher -patreon: ultralytics -open_collective: ultralytics From b8d4f2bf74812fc299d6d363b441a99feb14af27 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 10 Apr 2022 14:50:01 +0200 Subject: [PATCH 1031/1976] Replace Slack with Community Forum in issues (#7364) --- .github/ISSUE_TEMPLATE/config.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml index f388d7bacf66..4db7cefb2707 100644 --- a/.github/ISSUE_TEMPLATE/config.yml +++ b/.github/ISSUE_TEMPLATE/config.yml @@ -1,8 +1,8 @@ blank_issues_enabled: true contact_links: - - name: Slack - url: https://join.slack.com/t/ultralytics/shared_invite/zt-w29ei8bp-jczz7QYUmDtgo6r6KcMIAg - about: Ask on Ultralytics Slack Forum + - name: 💬 Forum + url: https://community.ultralytics.com/ + about: Ask on Ultralytics Community Forum - name: Stack Overflow url: https://stackoverflow.com/search?q=YOLOv5 about: Ask on Stack Overflow with 'YOLOv5' tag From 8c420c4c1fb3b83ef0e60749d46bcc2ec9967fc5 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 10 Apr 2022 15:17:25 +0200 Subject: [PATCH 1032/1976] Update ci-testing.yml (#7365) Remove keras==2.6.0 patch --- .github/workflows/ci-testing.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/ci-testing.yml b/.github/workflows/ci-testing.yml index 59193e05e08c..e5d5fc434f06 100644 --- a/.github/workflows/ci-testing.yml +++ b/.github/workflows/ci-testing.yml @@ -50,8 +50,8 @@ jobs: - name: Install dependencies run: | python -m pip install --upgrade pip - pip install -qr requirements.txt -f https://download.pytorch.org/whl/cpu/torch_stable.html - pip install -q onnx tensorflow-cpu keras==2.6.0 # wandb # extras + pip install -qr requirements.txt -f https://download.pytorch.org/whl/cpu/torch_stable.html \ + onnx tensorflow-cpu # wandb python --version pip --version pip list From 71685cbf91a9f60eb2f9c46ced8fa7becf6813d9 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 11 Apr 2022 10:26:13 +0200 Subject: [PATCH 1033/1976] Bump actions/stale from 4 to 5 (#7371) Bumps [actions/stale](https://github.com/actions/stale) from 4 to 5. - [Release notes](https://github.com/actions/stale/releases) - [Changelog](https://github.com/actions/stale/blob/main/CHANGELOG.md) - [Commits](https://github.com/actions/stale/compare/v4...v5) --- updated-dependencies: - dependency-name: actions/stale dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/stale.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml index 7a83950c17b7..78b2161f73b0 100644 --- a/.github/workflows/stale.yml +++ b/.github/workflows/stale.yml @@ -9,7 +9,7 @@ jobs: stale: runs-on: ubuntu-latest steps: - - uses: actions/stale@v4 + - uses: actions/stale@v5 with: repo-token: ${{ secrets.GITHUB_TOKEN }} stale-issue-message: | From bd2dda8e64b384acd34f54a1aacfa7fc8997be13 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 11 Apr 2022 12:34:35 +0200 Subject: [PATCH 1034/1976] Update optimizer param group strategy (#7376) * Update optimizer param group strategy Avoid empty lists on missing BathNorm2d models as in https://github.com/ultralytics/yolov5/issues/7375 * fix init --- train.py | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/train.py b/train.py index d6764116b27c..e023a3418454 100644 --- a/train.py +++ b/train.py @@ -150,27 +150,27 @@ def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictio hyp['weight_decay'] *= batch_size * accumulate / nbs # scale weight_decay LOGGER.info(f"Scaled weight_decay = {hyp['weight_decay']}") - g0, g1, g2 = [], [], [] # optimizer parameter groups + g = [], [], [] # optimizer parameter groups for v in model.modules(): if hasattr(v, 'bias') and isinstance(v.bias, nn.Parameter): # bias - g2.append(v.bias) + g[2].append(v.bias) if isinstance(v, nn.BatchNorm2d): # weight (no decay) - g0.append(v.weight) + g[1].append(v.weight) elif hasattr(v, 'weight') and isinstance(v.weight, nn.Parameter): # weight (with decay) - g1.append(v.weight) + g[0].append(v.weight) if opt.optimizer == 'Adam': - optimizer = Adam(g0, lr=hyp['lr0'], betas=(hyp['momentum'], 0.999)) # adjust beta1 to momentum + optimizer = Adam(g[2], lr=hyp['lr0'], betas=(hyp['momentum'], 0.999)) # adjust beta1 to momentum elif opt.optimizer == 'AdamW': - optimizer = AdamW(g0, lr=hyp['lr0'], betas=(hyp['momentum'], 0.999)) # adjust beta1 to momentum + optimizer = AdamW(g[2], lr=hyp['lr0'], betas=(hyp['momentum'], 0.999)) # adjust beta1 to momentum else: - optimizer = SGD(g0, lr=hyp['lr0'], momentum=hyp['momentum'], nesterov=True) + optimizer = SGD(g[2], lr=hyp['lr0'], momentum=hyp['momentum'], nesterov=True) - optimizer.add_param_group({'params': g1, 'weight_decay': hyp['weight_decay']}) # add g1 with weight_decay - optimizer.add_param_group({'params': g2}) # add g2 (biases) + optimizer.add_param_group({'params': g[0], 'weight_decay': hyp['weight_decay']}) # add g0 with weight_decay + optimizer.add_param_group({'params': g[1]}) # add g1 (BatchNorm2d weights) LOGGER.info(f"{colorstr('optimizer:')} {type(optimizer).__name__} with parameter groups " - f"{len(g0)} weight (no decay), {len(g1)} weight, {len(g2)} bias") - del g0, g1, g2 + f"{len(g[1])} weight (no decay), {len(g[0])} weight, {len(g[2])} bias") + del g # Scheduler if opt.cos_lr: From fa569cdae52dfd3074561129c3a5185bded60b16 Mon Sep 17 00:00:00 2001 From: Vardan Agarwal <35430842+vardanagarwal@users.noreply.github.com> Date: Mon, 11 Apr 2022 17:34:22 +0530 Subject: [PATCH 1035/1976] Add support for different normalization layers (#7377) * Add support for different normalization layers. * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Cleanup Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Glenn Jocher --- train.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/train.py b/train.py index e023a3418454..80bff18fd653 100644 --- a/train.py +++ b/train.py @@ -151,10 +151,11 @@ def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictio LOGGER.info(f"Scaled weight_decay = {hyp['weight_decay']}") g = [], [], [] # optimizer parameter groups + bn = nn.BatchNorm2d, nn.LazyBatchNorm2d, nn.GroupNorm, nn.InstanceNorm2d, nn.LazyInstanceNorm2d, nn.LayerNorm for v in model.modules(): if hasattr(v, 'bias') and isinstance(v.bias, nn.Parameter): # bias g[2].append(v.bias) - if isinstance(v, nn.BatchNorm2d): # weight (no decay) + if isinstance(v, bn): # weight (no decay) g[1].append(v.weight) elif hasattr(v, 'weight') and isinstance(v.weight, nn.Parameter): # weight (with decay) g[0].append(v.weight) From 4bb7eb8b849fc8a90823a60e2b7a8ec9e38926bf Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 12 Apr 2022 11:02:11 +0200 Subject: [PATCH 1036/1976] Dynamic normalization layer selection (#7392) * Dynamic normalization layer selection Based on actual available layers. Torch 1.7 compatible, resolves https://github.com/ultralytics/yolov5/issues/7381 * Update train.py --- train.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/train.py b/train.py index 80bff18fd653..806e2cebe561 100644 --- a/train.py +++ b/train.py @@ -151,7 +151,7 @@ def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictio LOGGER.info(f"Scaled weight_decay = {hyp['weight_decay']}") g = [], [], [] # optimizer parameter groups - bn = nn.BatchNorm2d, nn.LazyBatchNorm2d, nn.GroupNorm, nn.InstanceNorm2d, nn.LazyInstanceNorm2d, nn.LayerNorm + bn = tuple(v for k, v in nn.__dict__.items() if 'Norm' in k) # normalization layers, i.e. BatchNorm2d() for v in model.modules(): if hasattr(v, 'bias') and isinstance(v.bias, nn.Parameter): # bias g[2].append(v.bias) From 74aaab33129724e0f9f663cff268f7bb296c386b Mon Sep 17 00:00:00 2001 From: Ayush Chaurasia Date: Tue, 12 Apr 2022 15:16:56 +0530 Subject: [PATCH 1037/1976] Add version warning for wandb (#7385) * add version warning * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Cleanup * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update __init__.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Glenn Jocher --- utils/loggers/__init__.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/utils/loggers/__init__.py b/utils/loggers/__init__.py index bab133cc35a9..3a3ec1ee455b 100644 --- a/utils/loggers/__init__.py +++ b/utils/loggers/__init__.py @@ -81,6 +81,11 @@ def __init__(self, save_dir=None, weights=None, opt=None, hyp=None, logger=None, run_id = torch.load(self.weights).get('wandb_id') if self.opt.resume and not wandb_artifact_resume else None self.opt.hyp = self.hyp # add hyperparameters self.wandb = WandbLogger(self.opt, run_id) + # temp warn. because nested artifacts not supported after 0.12.10 + if pkg.parse_version(wandb.__version__) >= pkg.parse_version('0.12.11'): + self.logger.warning( + "YOLOv5 temporarily requires wandb version 0.12.10 or below. Some features may not work as expected." + ) else: self.wandb = None From 5333b55e7403f1f2db629eadf63b81200f8f8db2 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 12 Apr 2022 14:57:50 +0200 Subject: [PATCH 1038/1976] Remove OpenVINO ONNX `opset<=12` check (#7398) No longer needed. --- export.py | 1 - 1 file changed, 1 deletion(-) diff --git a/export.py b/export.py index ecead3ef5a90..e1e7207058b5 100644 --- a/export.py +++ b/export.py @@ -473,7 +473,6 @@ def run( # Checks imgsz *= 2 if len(imgsz) == 1 else 1 # expand - opset = 12 if ('openvino' in include) else opset # OpenVINO requires opset <= 12 assert nc == len(names), f'Model class count {nc} != len(names) {len(names)}' # Input From 2da2466168116a9fa81f4acab744dc9fe8f90cac Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 12 Apr 2022 15:08:53 +0200 Subject: [PATCH 1039/1976] Fix EdgeTPU output directory (#7399) * Fix EdgeTPU output directory Outputs to same directory as --weights * Update export.py --- export.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/export.py b/export.py index e1e7207058b5..00b98517cdf6 100644 --- a/export.py +++ b/export.py @@ -387,7 +387,7 @@ def export_edgetpu(keras_model, im, file, prefix=colorstr('Edge TPU:')): f = str(file).replace('.pt', '-int8_edgetpu.tflite') # Edge TPU model f_tfl = str(file).replace('.pt', '-int8.tflite') # TFLite model - cmd = f"edgetpu_compiler -s {f_tfl}" + cmd = f"edgetpu_compiler -s -o {file.parent} {f_tfl}" subprocess.run(cmd, shell=True, check=True) LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') From 014acde79daee83e1f1801412cc7a48293e6e1f2 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 12 Apr 2022 17:26:53 +0200 Subject: [PATCH 1040/1976] Update `git_describe()` (#7402) * Update `git_describe()` Add .git path check to avoid `fatal: not a git repository (or any of the parent directories): .git` printout * Update general.py --- utils/general.py | 1 + 1 file changed, 1 insertion(+) diff --git a/utils/general.py b/utils/general.py index 6c2558db74c4..daef2a427111 100755 --- a/utils/general.py +++ b/utils/general.py @@ -275,6 +275,7 @@ def check_online(): def git_describe(path=ROOT): # path must be a directory # Return human-readable git description, i.e. v5.0-5-g3e25f1e https://git-scm.com/docs/git-describe try: + assert (Path(path) / '.git').is_dir() return check_output(f'git -C {path} describe --tags --long --always', shell=True).decode()[:-1] except Exception: return '' From 3eefab1bb109214a614485b6c5f80f22c122f2b2 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 15 Apr 2022 21:48:52 +0200 Subject: [PATCH 1041/1976] Remove `tensorrt` pip install check (#7439) --- export.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/export.py b/export.py index 00b98517cdf6..f97df4710b6f 100644 --- a/export.py +++ b/export.py @@ -209,8 +209,7 @@ def export_coreml(model, im, file, prefix=colorstr('CoreML:')): def export_engine(model, im, file, train, half, simplify, workspace=4, verbose=False, prefix=colorstr('TensorRT:')): # YOLOv5 TensorRT export https://developer.nvidia.com/tensorrt try: - check_requirements(('tensorrt',)) - import tensorrt as trt + import tensorrt as trt # pip install -U nvidia-tensorrt --index-url https://pypi.ngc.nvidia.com if trt.__version__[0] == '7': # TensorRT 7 handling https://github.com/ultralytics/yolov5/issues/6012 grid = model.model[-1].anchor_grid From c9a3b14a749edf77e2faf7ad41f5cd779bd106fd Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 16 Apr 2022 15:12:38 +0200 Subject: [PATCH 1042/1976] Disable `pbar` for DDP ranks > 0 (#7440) --- utils/datasets.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/datasets.py b/utils/datasets.py index 3fa9aa4c6ca1..ef04f51dffef 100755 --- a/utils/datasets.py +++ b/utils/datasets.py @@ -522,7 +522,7 @@ def __init__(self, self.im_hw0, self.im_hw = [None] * n, [None] * n fcn = self.cache_images_to_disk if cache_images == 'disk' else self.load_image results = ThreadPool(NUM_THREADS).imap(fcn, range(n)) - pbar = tqdm(enumerate(results), total=n, bar_format=BAR_FORMAT) + pbar = tqdm(enumerate(results), total=n, bar_format=BAR_FORMAT, disable=LOCAL_RANK > 0) for i, x in pbar: if cache_images == 'disk': gb += self.npy_files[i].stat().st_size From 7926afccde1a95a4c8dbeb9d2b8a901d9f220ca7 Mon Sep 17 00:00:00 2001 From: Cedric Perauer <40869163+Cedric-Perauer@users.noreply.github.com> Date: Sat, 16 Apr 2022 18:00:50 +0200 Subject: [PATCH 1043/1976] Add `--half` support for FP16 CoreML exports with (#7446) * add fp16 for coreml using --half * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix * Cleanup * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update export.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Glenn Jocher --- export.py | 17 +++++++++++++---- 1 file changed, 13 insertions(+), 4 deletions(-) diff --git a/export.py b/export.py index f97df4710b6f..2a5eff23c1a6 100644 --- a/export.py +++ b/export.py @@ -186,7 +186,7 @@ def export_openvino(model, im, file, prefix=colorstr('OpenVINO:')): LOGGER.info(f'\n{prefix} export failure: {e}') -def export_coreml(model, im, file, prefix=colorstr('CoreML:')): +def export_coreml(model, im, file, int8, half, prefix=colorstr('CoreML:')): # YOLOv5 CoreML export try: check_requirements(('coremltools',)) @@ -197,6 +197,14 @@ def export_coreml(model, im, file, prefix=colorstr('CoreML:')): ts = torch.jit.trace(model, im, strict=False) # TorchScript model ct_model = ct.convert(ts, inputs=[ct.ImageType('image', shape=im.shape, scale=1 / 255, bias=[0, 0, 0])]) + bits, mode = (8, 'kmeans_lut') if int8 else (16, 'linear') if half else (32, None) + if bits < 32: + if platform.system() == 'Darwin': # quantization only supported on macOS + with warnings.catch_warnings(): + warnings.filterwarnings("ignore", category=DeprecationWarning) # suppress numpy==1.20 float warning + ct_model = ct.models.neural_network.quantization_utils.quantize_weights(ct_model, bits, mode) + else: + print(f'{prefix} quantization only supported on macOS, skipping...') ct_model.save(f) LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') @@ -466,7 +474,8 @@ def run( # Load PyTorch model device = select_device(device) - assert not (device.type == 'cpu' and half), '--half only compatible with GPU export, i.e. use --device 0' + if half: + assert device.type != 'cpu' or coreml, '--half only compatible with GPU export, i.e. use --device 0' model = attempt_load(weights, map_location=device, inplace=True, fuse=True) # load FP32 model nc, names = model.nc, model.names # number of classes, class names @@ -480,7 +489,7 @@ def run( im = torch.zeros(batch_size, 3, *imgsz).to(device) # image size(1,3,320,192) BCHW iDetection # Update model - if half: + if half and not coreml: im, model = im.half(), model.half() # to FP16 model.train() if train else model.eval() # training mode = no Detect() layer grid construction for k, m in model.named_modules(): @@ -506,7 +515,7 @@ def run( if xml: # OpenVINO f[3] = export_openvino(model, im, file) if coreml: - _, f[4] = export_coreml(model, im, file) + _, f[4] = export_coreml(model, im, file, int8, half) # TensorFlow Exports if any((saved_model, pb, tflite, edgetpu, tfjs)): From 3a25e81b303b0b80b79e1c99f4bc2a602e23ab65 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 19 Apr 2022 15:07:05 -0700 Subject: [PATCH 1044/1976] Bump cirrus-actions/rebase from 1.5 to 1.6 (#7462) Bumps [cirrus-actions/rebase](https://github.com/cirrus-actions/rebase) from 1.5 to 1.6. - [Release notes](https://github.com/cirrus-actions/rebase/releases) - [Commits](https://github.com/cirrus-actions/rebase/compare/1.5...1.6) --- updated-dependencies: - dependency-name: cirrus-actions/rebase dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/rebase.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/rebase.yml b/.github/workflows/rebase.yml index 75c57546166b..d79d5cfb20c4 100644 --- a/.github/workflows/rebase.yml +++ b/.github/workflows/rebase.yml @@ -16,6 +16,6 @@ jobs: token: ${{ secrets.ACTIONS_TOKEN }} fetch-depth: 0 # otherwise, you will fail to push refs to dest repo - name: Automatic Rebase - uses: cirrus-actions/rebase@1.5 + uses: cirrus-actions/rebase@1.6 env: GITHUB_TOKEN: ${{ secrets.ACTIONS_TOKEN }} From d876caab4d8f54d11988c277eb2a237bbe405841 Mon Sep 17 00:00:00 2001 From: HERIUN Date: Wed, 20 Apr 2022 07:40:06 +0900 Subject: [PATCH 1045/1976] Update val.py (#7478) * Update val.py is_coco doesn't work!! '/' -> os.sep!! * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Cleanup * fix Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Glenn Jocher --- val.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/val.py b/val.py index 36f2a6c0284b..13971612ac78 100644 --- a/val.py +++ b/val.py @@ -155,7 +155,7 @@ def run( # Configure model.eval() cuda = device.type != 'cpu' - is_coco = isinstance(data.get('val'), str) and data['val'].endswith('coco/val2017.txt') # COCO dataset + is_coco = isinstance(data.get('val'), str) and data['val'].endswith(f'coco{os.sep}val2017.txt') # COCO dataset nc = 1 if single_cls else int(data['nc']) # number of classes iouv = torch.linspace(0.5, 0.95, 10, device=device) # iou vector for mAP@0.5:0.95 niou = iouv.numel() From c9042dc2adbb635aeca407c10cf492a6eb14d772 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 19 Apr 2022 17:32:15 -0700 Subject: [PATCH 1046/1976] Improved non-latin `Annotator()` plotting (#7488) * Improved non-latin labels Annotator plotting May resolve https://github.com/ultralytics/yolov5/issues/7460 * Update train.py * Update train.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * add progress arg Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- train.py | 8 +++++--- utils/general.py | 4 ++-- utils/plots.py | 7 ++++--- 3 files changed, 11 insertions(+), 8 deletions(-) diff --git a/train.py b/train.py index 806e2cebe561..c774430df293 100644 --- a/train.py +++ b/train.py @@ -48,13 +48,13 @@ from utils.downloads import attempt_download from utils.general import (LOGGER, check_dataset, check_file, check_git_status, check_img_size, check_requirements, check_suffix, check_yaml, colorstr, get_latest_run, increment_path, init_seeds, - intersect_dicts, labels_to_class_weights, labels_to_image_weights, methods, one_cycle, - print_args, print_mutation, strip_optimizer) + intersect_dicts, is_ascii, labels_to_class_weights, labels_to_image_weights, methods, + one_cycle, print_args, print_mutation, strip_optimizer) from utils.loggers import Loggers from utils.loggers.wandb.wandb_utils import check_wandb_resume from utils.loss import ComputeLoss from utils.metrics import fitness -from utils.plots import plot_evolve, plot_labels +from utils.plots import check_font, plot_evolve, plot_labels from utils.torch_utils import EarlyStopping, ModelEMA, de_parallel, select_device, torch_distributed_zero_first LOCAL_RANK = int(os.getenv('LOCAL_RANK', -1)) # https://pytorch.org/docs/stable/elastic/run.html @@ -105,6 +105,8 @@ def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictio init_seeds(1 + RANK) with torch_distributed_zero_first(LOCAL_RANK): data_dict = data_dict or check_dataset(data) # check if None + if not is_ascii(data_dict['names']): # non-latin labels, i.e. asian, arabic, cyrillic + check_font('Arial.Unicode.ttf', progress=True) train_path, val_path = data_dict['train'], data_dict['val'] nc = 1 if single_cls else int(data_dict['nc']) # number of classes names = ['item'] if single_cls and len(data_dict['names']) != 1 else data_dict['names'] # class names diff --git a/utils/general.py b/utils/general.py index daef2a427111..a4bc3cae9315 100755 --- a/utils/general.py +++ b/utils/general.py @@ -424,13 +424,13 @@ def check_file(file, suffix=''): return files[0] # return file -def check_font(font=FONT): +def check_font(font=FONT, progress=False): # Download font to CONFIG_DIR if necessary font = Path(font) if not font.exists() and not (CONFIG_DIR / font.name).exists(): url = "https://ultralytics.com/assets/" + font.name LOGGER.info(f'Downloading {url} to {CONFIG_DIR / font.name}...') - torch.hub.download_url_to_file(url, str(font), progress=False) + torch.hub.download_url_to_file(url, str(font), progress=progress) def check_dataset(data, autodownload=True): diff --git a/utils/plots.py b/utils/plots.py index 51e9cfdf6e04..842894e745df 100644 --- a/utils/plots.py +++ b/utils/plots.py @@ -19,7 +19,7 @@ from PIL import Image, ImageDraw, ImageFont from utils.general import (CONFIG_DIR, FONT, LOGGER, Timeout, check_font, check_requirements, clip_coords, - increment_path, is_ascii, is_chinese, try_except, xywh2xyxy, xyxy2xywh) + increment_path, is_ascii, try_except, xywh2xyxy, xyxy2xywh) from utils.metrics import fitness # Settings @@ -72,11 +72,12 @@ class Annotator: # YOLOv5 Annotator for train/val mosaics and jpgs and detect/hub inference annotations def __init__(self, im, line_width=None, font_size=None, font='Arial.ttf', pil=False, example='abc'): assert im.data.contiguous, 'Image not contiguous. Apply np.ascontiguousarray(im) to Annotator() input images.' - self.pil = pil or not is_ascii(example) or is_chinese(example) + non_ascii = not is_ascii(example) # non-latin labels, i.e. asian, arabic, cyrillic + self.pil = pil or non_ascii if self.pil: # use PIL self.im = im if isinstance(im, Image.Image) else Image.fromarray(im) self.draw = ImageDraw.Draw(self.im) - self.font = check_pil_font(font='Arial.Unicode.ttf' if is_chinese(example) else font, + self.font = check_pil_font(font='Arial.Unicode.ttf' if non_ascii else font, size=font_size or max(round(sum(self.im.size) / 2 * 0.035), 12)) else: # use cv2 self.im = im From ab5b9174940f29a62374bddaf38cd5d2eeb68e25 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 19 Apr 2022 17:50:02 -0700 Subject: [PATCH 1047/1976] `check_fonts()` download to `CONFIG_DIR` fix (#7489) Follows https://github.com/ultralytics/yolov5/pull/7488. Correct bug where fonts were downloading to current working directory rather than global CONFIG_DIR --- utils/general.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/utils/general.py b/utils/general.py index a4bc3cae9315..cc37ad5fff62 100755 --- a/utils/general.py +++ b/utils/general.py @@ -427,10 +427,11 @@ def check_file(file, suffix=''): def check_font(font=FONT, progress=False): # Download font to CONFIG_DIR if necessary font = Path(font) - if not font.exists() and not (CONFIG_DIR / font.name).exists(): + file = CONFIG_DIR / font.name + if not font.exists() and not file.exists(): url = "https://ultralytics.com/assets/" + font.name - LOGGER.info(f'Downloading {url} to {CONFIG_DIR / font.name}...') - torch.hub.download_url_to_file(url, str(font), progress=progress) + LOGGER.info(f'Downloading {url} to {file}...') + torch.hub.download_url_to_file(url, str(file), progress=progress) def check_dataset(data, autodownload=True): From 3f3852e2ff755275098c07fe3bf4d2bde103ab30 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 19 Apr 2022 21:15:04 -0700 Subject: [PATCH 1048/1976] Fix val.py Ensemble() (#7490) --- models/experimental.py | 5 +++-- val.py | 2 +- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/models/experimental.py b/models/experimental.py index e166722cbfca..b8d4d70d26e8 100644 --- a/models/experimental.py +++ b/models/experimental.py @@ -115,7 +115,8 @@ def attempt_load(weights, map_location=None, inplace=True, fuse=True): return model[-1] # return model else: print(f'Ensemble created with {weights}\n') - for k in ['names']: - setattr(model, k, getattr(model[-1], k)) + for k in 'names', 'nc', 'yaml': + setattr(model, k, getattr(model[0], k)) model.stride = model[torch.argmax(torch.tensor([m.stride.max() for m in model])).int()].stride # max stride + assert all(model[0].nc == m.nc for m in model), f'Models have different class counts: {[m.nc for m in model]}' return model # return ensemble diff --git a/val.py b/val.py index 13971612ac78..a773ff3e4fa3 100644 --- a/val.py +++ b/val.py @@ -163,7 +163,7 @@ def run( # Dataloader if not training: if pt and not single_cls: # check --weights are trained on --data - ncm = model.model.yaml['nc'] + ncm = model.model.nc assert ncm == nc, f'{weights[0]} ({ncm} classes) trained on different --data than what you passed ({nc} ' \ f'classes). Pass correct combination of --weights and --data that are trained together.' model.warmup(imgsz=(1 if pt else batch_size, 3, imgsz, imgsz)) # warmup From b77c8d9d72031bbccdd2ed26febd70483b467d2e Mon Sep 17 00:00:00 2001 From: Joseph Kocherhans Date: Wed, 20 Apr 2022 12:08:22 -0700 Subject: [PATCH 1049/1976] Added `YOLOv5_AUTOINSTALL` environment variable (#7505) * Added a way to skip dependency auto-installation. Setting the environment variable `YOLOv5_AUTOINSTALL=False` will skip installing any missing dependencies as if the user had passed `install=False` to `check_requirements`. * Cleanup Co-authored-by: Glenn Jocher --- utils/general.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/utils/general.py b/utils/general.py index cc37ad5fff62..92e3560de8c0 100755 --- a/utils/general.py +++ b/utils/general.py @@ -40,6 +40,7 @@ ROOT = FILE.parents[1] # YOLOv5 root directory DATASETS_DIR = ROOT.parent / 'datasets' # YOLOv5 datasets directory NUM_THREADS = min(8, max(1, os.cpu_count() - 1)) # number of YOLOv5 multiprocessing threads +AUTOINSTALL = str(os.getenv('YOLOv5_AUTOINSTALL', True)).lower() == 'true' # global auto-install mode VERBOSE = str(os.getenv('YOLOv5_VERBOSE', True)).lower() == 'true' # global verbose mode FONT = 'Arial.ttf' # https://ultralytics.com/assets/Arial.ttf @@ -338,7 +339,7 @@ def check_requirements(requirements=ROOT / 'requirements.txt', exclude=(), insta pkg.require(r) except Exception: # DistributionNotFound or VersionConflict if requirements not met s = f"{prefix} {r} not found and is required by YOLOv5" - if install: + if install and AUTOINSTALL: # check environment variable LOGGER.info(f"{s}, attempting auto-update...") try: assert check_online(), f"'pip install {r}' skipped (offline)" From 918d7b2b3f8433b80ff12b4407aa5ad524ddbf9d Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 20 Apr 2022 14:23:55 -0700 Subject: [PATCH 1050/1976] Refactor Dockerfiles to `utils/docker` (#7510) * Refactor Docker files * Refactor Docker files * Update Dockerfile --- .dockerignore => utils/docker/.dockerignore | 0 Dockerfile => utils/docker/Dockerfile | 3 +- utils/docker/Dockerfile-cpu | 37 +++++++++++++++++++++ 3 files changed, 38 insertions(+), 2 deletions(-) rename .dockerignore => utils/docker/.dockerignore (100%) rename Dockerfile => utils/docker/Dockerfile (94%) create mode 100644 utils/docker/Dockerfile-cpu diff --git a/.dockerignore b/utils/docker/.dockerignore similarity index 100% rename from .dockerignore rename to utils/docker/.dockerignore diff --git a/Dockerfile b/utils/docker/Dockerfile similarity index 94% rename from Dockerfile rename to utils/docker/Dockerfile index 7df6c1854156..a2a0f0cd9c1a 100644 --- a/Dockerfile +++ b/utils/docker/Dockerfile @@ -23,11 +23,10 @@ COPY . /usr/src/app RUN git clone https://github.com/ultralytics/yolov5 /usr/src/yolov5 # Downloads to user config dir -ADD https://ultralytics.com/assets/Arial.ttf /root/.config/Ultralytics/ +ADD https://ultralytics.com/assets/Arial.ttf https://ultralytics.com/assets/Arial.Unicode.ttf /root/.config/Ultralytics/ # Set environment variables ENV OMP_NUM_THREADS=8 -# ENV HOME=/usr/src/app # Usage Examples ------------------------------------------------------------------------------------------------------- diff --git a/utils/docker/Dockerfile-cpu b/utils/docker/Dockerfile-cpu new file mode 100644 index 000000000000..6e757baa3ef1 --- /dev/null +++ b/utils/docker/Dockerfile-cpu @@ -0,0 +1,37 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license + +# Start FROM Ubuntu image https://hub.docker.com/_/ubuntu +FROM ubuntu:latest + +# Install linux packages +RUN apt update +RUN DEBIAN_FRONTEND=noninteractive TZ=Etc/UTC apt install -y tzdata +RUN apt install -y python3-pip git zip curl htop screen libgl1-mesa-glx libglib2.0-0 +RUN alias python=python3 + +# Install python dependencies +COPY requirements.txt . +RUN python3 -m pip install --upgrade pip +RUN pip install --no-cache -r requirements.txt albumentations gsutil notebook \ + coremltools onnx onnx-simplifier onnxruntime openvino-dev tensorflow-cpu tensorflowjs \ + torch==1.11.0+cpu torchvision==0.12.0+cpu -f https://download.pytorch.org/whl/cpu/torch_stable.html + +# Create working directory +RUN mkdir -p /usr/src/app +WORKDIR /usr/src/app + +# Copy contents +COPY . /usr/src/app +RUN git clone https://github.com/ultralytics/yolov5 /usr/src/yolov5 + +# Downloads to user config dir +ADD https://ultralytics.com/assets/Arial.ttf https://ultralytics.com/assets/Arial.Unicode.ttf /root/.config/Ultralytics/ + + +# Usage Examples ------------------------------------------------------------------------------------------------------- + +# Build and Push +# t=ultralytics/yolov5:latest-cpu && sudo docker build -t $t . && sudo docker push $t + +# Pull and Run +# t=ultralytics/yolov5:latest-cpu && sudo docker pull $t && sudo docker run -it --ipc=host -v "$(pwd)"/datasets:/usr/src/datasets $t From 6ea81bb3a9bb1701bc0aa9ccca546368ce1fa400 Mon Sep 17 00:00:00 2001 From: Zengyf-CVer <41098760+Zengyf-CVer@users.noreply.github.com> Date: Thu, 21 Apr 2022 09:44:52 +0800 Subject: [PATCH 1051/1976] Add yesqa to precommit checks (#7511) * Update .pre-commit-config.yaml * Update .pre-commit-config.yaml --- .pre-commit-config.yaml | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index ae61892b68b2..bff7f8a40093 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -52,11 +52,10 @@ repos: # - mdformat-black # - mdformat_frontmatter - # TODO - #- repo: https://github.com/asottile/yesqa - # rev: v1.2.3 - # hooks: - # - id: yesqa + - repo: https://github.com/asottile/yesqa + rev: v1.3.0 + hooks: + - id: yesqa - repo: https://github.com/PyCQA/flake8 rev: 4.0.1 From 23718df1c6b546e525d06a6e2f6a4ebc9737bb4b Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 21 Apr 2022 18:21:01 -0700 Subject: [PATCH 1052/1976] Fix val `plots=plots` (#7524) --- train.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/train.py b/train.py index c774430df293..f6e66cb0ef09 100644 --- a/train.py +++ b/train.py @@ -461,7 +461,7 @@ def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictio save_dir=save_dir, save_json=is_coco, verbose=True, - plots=True, + plots=plots, callbacks=callbacks, compute_loss=compute_loss) # val best model with plots if is_coco: From d2e698c75c4845757d31af4c9116f004624151e2 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 21 Apr 2022 20:06:57 -0700 Subject: [PATCH 1053/1976] Reduce val device transfers (#7525) --- val.py | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) diff --git a/val.py b/val.py index a773ff3e4fa3..b2b3bc75911e 100644 --- a/val.py +++ b/val.py @@ -220,14 +220,14 @@ def run( # Metrics for si, pred in enumerate(out): labels = targets[targets[:, 0] == si, 1:] - nl = len(labels) - tcls = labels[:, 0].tolist() if nl else [] # target class + nl, npr = labels.shape[0], pred.shape[0] # number of labels, predictions path, shape = Path(paths[si]), shapes[si][0] + correct = torch.zeros(npr, niou, dtype=torch.bool, device=device) # init seen += 1 - if len(pred) == 0: + if npr == 0: if nl: - stats.append((torch.zeros(0, niou, dtype=torch.bool), torch.Tensor(), torch.Tensor(), tcls)) + stats.append((correct, *torch.zeros((3, 0)))) continue # Predictions @@ -244,9 +244,7 @@ def run( correct = process_batch(predn, labelsn, iouv) if plots: confusion_matrix.process_batch(predn, labelsn) - else: - correct = torch.zeros(pred.shape[0], niou, dtype=torch.bool) - stats.append((correct.cpu(), pred[:, 4].cpu(), pred[:, 5].cpu(), tcls)) # (correct, conf, pcls, tcls) + stats.append((correct, pred[:, 4], pred[:, 5], labels[:, 0])) # (correct, conf, pcls, tcls) # Save/log if save_txt: @@ -265,7 +263,7 @@ def run( callbacks.run('on_val_batch_end') # Compute metrics - stats = [np.concatenate(x, 0) for x in zip(*stats)] # to numpy + stats = [torch.cat(x, 0).cpu().numpy() for x in zip(*stats)] # to numpy if len(stats) and stats[0].any(): tp, fp, p, r, f1, ap, ap_class = ap_per_class(*stats, plot=plots, save_dir=save_dir, names=names) ap50, ap = ap[:, 0], ap.mean(1) # AP@0.5, AP@0.5:0.95 From b804b36bc4ea856ecec250add8ab39d4b5127eda Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 21 Apr 2022 20:31:26 -0700 Subject: [PATCH 1054/1976] Add Docker `--file` argument to build (#7527) --- utils/docker/Dockerfile | 2 +- utils/docker/Dockerfile-cpu | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/utils/docker/Dockerfile b/utils/docker/Dockerfile index a2a0f0cd9c1a..9bb24bb6bf3e 100644 --- a/utils/docker/Dockerfile +++ b/utils/docker/Dockerfile @@ -32,7 +32,7 @@ ENV OMP_NUM_THREADS=8 # Usage Examples ------------------------------------------------------------------------------------------------------- # Build and Push -# t=ultralytics/yolov5:latest && sudo docker build -t $t . && sudo docker push $t +# t=ultralytics/yolov5:latest && sudo docker build -f utils/docker/Dockerfile -t $t . && sudo docker push $t # Pull and Run # t=ultralytics/yolov5:latest && sudo docker pull $t && sudo docker run -it --ipc=host --gpus all $t diff --git a/utils/docker/Dockerfile-cpu b/utils/docker/Dockerfile-cpu index 6e757baa3ef1..d30c07e81172 100644 --- a/utils/docker/Dockerfile-cpu +++ b/utils/docker/Dockerfile-cpu @@ -31,7 +31,7 @@ ADD https://ultralytics.com/assets/Arial.ttf https://ultralytics.com/assets/Aria # Usage Examples ------------------------------------------------------------------------------------------------------- # Build and Push -# t=ultralytics/yolov5:latest-cpu && sudo docker build -t $t . && sudo docker push $t +# t=ultralytics/yolov5:latest-cpu && sudo docker build -f utils/docker/Dockerfile-cpu -t $t . && sudo docker push $t # Pull and Run # t=ultralytics/yolov5:latest-cpu && sudo docker pull $t && sudo docker run -it --ipc=host -v "$(pwd)"/datasets:/usr/src/datasets $t From 813eba85b266fe46b0ac02a62fce8b25e3eeabac Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 22 Apr 2022 12:01:14 -0700 Subject: [PATCH 1055/1976] Empty val batch CUDA device fix (#7539) Verified fix for https://github.com/ultralytics/yolov5/pull/7525#issuecomment-1106081123 --- val.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/val.py b/val.py index b2b3bc75911e..58113f016a58 100644 --- a/val.py +++ b/val.py @@ -227,7 +227,7 @@ def run( if npr == 0: if nl: - stats.append((correct, *torch.zeros((3, 0)))) + stats.append((correct, *torch.zeros((3, 0), device=device))) continue # Predictions From cc1d7df03c7c3c37367e76b237ac4b087ea040d4 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 22 Apr 2022 12:31:33 -0700 Subject: [PATCH 1056/1976] Autoinstall TensorRT if missing (#7537) * Autoinstall TensorRT if missing May resolve https://github.com/ultralytics/yolov5/issues/7464 * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update export.py * Update export.py * Update export.py * Update export.py Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- export.py | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/export.py b/export.py index 2a5eff23c1a6..93d98c801d02 100644 --- a/export.py +++ b/export.py @@ -217,7 +217,15 @@ def export_coreml(model, im, file, int8, half, prefix=colorstr('CoreML:')): def export_engine(model, im, file, train, half, simplify, workspace=4, verbose=False, prefix=colorstr('TensorRT:')): # YOLOv5 TensorRT export https://developer.nvidia.com/tensorrt try: - import tensorrt as trt # pip install -U nvidia-tensorrt --index-url https://pypi.ngc.nvidia.com + assert im.device.type != 'cpu', 'export running on CPU but must be on GPU, i.e. `python export.py --device 0`' + try: + import tensorrt as trt + except Exception: + s = f"\n{prefix} tensorrt not found and is required by YOLOv5" + LOGGER.info(f"{s}, attempting auto-update...") + r = '-U nvidia-tensorrt --index-url https://pypi.ngc.nvidia.com' + LOGGER.info(subprocess.check_output(f"pip install {r}", shell=True).decode()) + import tensorrt as trt if trt.__version__[0] == '7': # TensorRT 7 handling https://github.com/ultralytics/yolov5/issues/6012 grid = model.model[-1].anchor_grid @@ -230,7 +238,6 @@ def export_engine(model, im, file, train, half, simplify, workspace=4, verbose=F onnx = file.with_suffix('.onnx') LOGGER.info(f'\n{prefix} starting export with TensorRT {trt.__version__}...') - assert im.device.type != 'cpu', 'export running on CPU but must be on GPU, i.e. `python export.py --device 0`' assert onnx.exists(), f'failed to export ONNX file: {onnx}' f = file.with_suffix('.engine') # TensorRT engine file logger = trt.Logger(trt.Logger.INFO) From c264795f50b685a8bef7f0d740482b0265ae4898 Mon Sep 17 00:00:00 2001 From: Zengyf-CVer <41098760+Zengyf-CVer@users.noreply.github.com> Date: Sat, 23 Apr 2022 04:36:27 +0800 Subject: [PATCH 1057/1976] Add mdformat to precommit checks and update other version (#7529) * Update .pre-commit-config.yaml * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update .pre-commit-config.yaml * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update CONTRIBUTING.md * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update README.md * Update README.md * Update README.md Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Glenn Jocher --- .github/CODE_OF_CONDUCT.md | 24 ++++---- .pre-commit-config.yaml | 24 ++++---- CONTRIBUTING.md | 18 +++--- README.md | 53 ++++++++--------- utils/loggers/wandb/README.md | 106 +++++++++++++++++++--------------- 5 files changed, 119 insertions(+), 106 deletions(-) diff --git a/.github/CODE_OF_CONDUCT.md b/.github/CODE_OF_CONDUCT.md index ef10b05fc88e..27e59e9aab38 100644 --- a/.github/CODE_OF_CONDUCT.md +++ b/.github/CODE_OF_CONDUCT.md @@ -17,23 +17,23 @@ diverse, inclusive, and healthy community. Examples of behavior that contributes to a positive environment for our community include: -* Demonstrating empathy and kindness toward other people -* Being respectful of differing opinions, viewpoints, and experiences -* Giving and gracefully accepting constructive feedback -* Accepting responsibility and apologizing to those affected by our mistakes, +- Demonstrating empathy and kindness toward other people +- Being respectful of differing opinions, viewpoints, and experiences +- Giving and gracefully accepting constructive feedback +- Accepting responsibility and apologizing to those affected by our mistakes, and learning from the experience -* Focusing on what is best not just for us as individuals, but for the +- Focusing on what is best not just for us as individuals, but for the overall community Examples of unacceptable behavior include: -* The use of sexualized language or imagery, and sexual attention or +- The use of sexualized language or imagery, and sexual attention or advances of any kind -* Trolling, insulting or derogatory comments, and personal or political attacks -* Public or private harassment -* Publishing others' private information, such as a physical or email +- Trolling, insulting or derogatory comments, and personal or political attacks +- Public or private harassment +- Publishing others' private information, such as a physical or email address, without their explicit permission -* Other conduct which could reasonably be considered inappropriate in a +- Other conduct which could reasonably be considered inappropriate in a professional setting ## Enforcement Responsibilities @@ -121,8 +121,8 @@ https://www.contributor-covenant.org/version/2/0/code_of_conduct.html. Community Impact Guidelines were inspired by [Mozilla's code of conduct enforcement ladder](https://github.com/mozilla/diversity). -[homepage]: https://www.contributor-covenant.org - For answers to common questions about this code of conduct, see the FAQ at https://www.contributor-covenant.org/faq. Translations are available at https://www.contributor-covenant.org/translations. + +[homepage]: https://www.contributor-covenant.org diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index bff7f8a40093..924c940f2c1a 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -13,7 +13,7 @@ ci: repos: - repo: https://github.com/pre-commit/pre-commit-hooks - rev: v4.1.0 + rev: v4.2.0 hooks: - id: end-of-file-fixer - id: trailing-whitespace @@ -24,7 +24,7 @@ repos: - id: check-docstring-first - repo: https://github.com/asottile/pyupgrade - rev: v2.31.1 + rev: v2.32.0 hooks: - id: pyupgrade args: [--py36-plus] @@ -42,15 +42,17 @@ repos: - id: yapf name: YAPF formatting - # TODO - #- repo: https://github.com/executablebooks/mdformat - # rev: 0.7.7 - # hooks: - # - id: mdformat - # additional_dependencies: - # - mdformat-gfm - # - mdformat-black - # - mdformat_frontmatter + - repo: https://github.com/executablebooks/mdformat + rev: 0.7.14 + hooks: + - id: mdformat + additional_dependencies: + - mdformat-gfm + - mdformat-black + exclude: | + (?x)^( + README.md + )$ - repo: https://github.com/asottile/yesqa rev: v1.3.0 diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index ebde03a562a0..13b9b73b50cc 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -18,16 +18,19 @@ Submitting a PR is easy! This example shows how to submit a PR for updating `req ### 1. Select File to Update Select `requirements.txt` to update by clicking on it in GitHub. +

PR_step1

### 2. Click 'Edit this file' Button is in top-right corner. +

PR_step2

### 3. Make Changes Change `matplotlib` version from `3.2.2` to `3.3`. +

PR_step3

### 4. Preview Changes and Submit PR @@ -35,6 +38,7 @@ Change `matplotlib` version from `3.2.2` to `3.3`. Click on the **Preview changes** tab to verify your updates. At the bottom of the screen select 'Create a **new branch** for this commit', assign your branch a descriptive name such as `fix/matplotlib_version` and click the green **Propose changes** button. All done, your PR is now submitted to YOLOv5 for review and approval 😃! +

PR_step4

### PR recommendations @@ -70,21 +74,21 @@ understand and use to **reproduce** the problem. This is referred to by communit a [minimum reproducible example](https://stackoverflow.com/help/minimal-reproducible-example). Your code that reproduces the problem should be: -* ✅ **Minimal** – Use as little code as possible that still produces the same problem -* ✅ **Complete** – Provide **all** parts someone else needs to reproduce your problem in the question itself -* ✅ **Reproducible** – Test the code you're about to provide to make sure it reproduces the problem +- ✅ **Minimal** – Use as little code as possible that still produces the same problem +- ✅ **Complete** – Provide **all** parts someone else needs to reproduce your problem in the question itself +- ✅ **Reproducible** – Test the code you're about to provide to make sure it reproduces the problem In addition to the above requirements, for [Ultralytics](https://ultralytics.com/) to provide assistance your code should be: -* ✅ **Current** – Verify that your code is up-to-date with current +- ✅ **Current** – Verify that your code is up-to-date with current GitHub [master](https://github.com/ultralytics/yolov5/tree/master), and if necessary `git pull` or `git clone` a new copy to ensure your problem has not already been resolved by previous commits. -* ✅ **Unmodified** – Your problem must be reproducible without any modifications to the codebase in this +- ✅ **Unmodified** – Your problem must be reproducible without any modifications to the codebase in this repository. [Ultralytics](https://ultralytics.com/) does not provide support for custom code ⚠️. -If you believe your problem meets all of the above criteria, please close this issue and raise a new one using the 🐛 ** -Bug Report** [template](https://github.com/ultralytics/yolov5/issues/new/choose) and providing +If you believe your problem meets all of the above criteria, please close this issue and raise a new one using the 🐛 +**Bug Report** [template](https://github.com/ultralytics/yolov5/issues/new/choose) and providing a [minimum reproducible example](https://stackoverflow.com/help/minimal-reproducible-example) to help us better understand and diagnose your problem. diff --git a/README.md b/README.md index 54c5cbd83f5b..f1dd65b0a4b1 100644 --- a/README.md +++ b/README.md @@ -103,8 +103,6 @@ results.print() # or .show(), .save(), .crop(), .pandas(), etc.
- -
Inference with detect.py @@ -149,20 +147,20 @@ python train.py --data coco.yaml --cfg yolov5n.yaml --weights '' --batch-size 12
Tutorials -* [Train Custom Data](https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data)  🚀 RECOMMENDED -* [Tips for Best Training Results](https://github.com/ultralytics/yolov5/wiki/Tips-for-Best-Training-Results)  ☘️ +- [Train Custom Data](https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data)  🚀 RECOMMENDED +- [Tips for Best Training Results](https://github.com/ultralytics/yolov5/wiki/Tips-for-Best-Training-Results)  ☘️ RECOMMENDED -* [Weights & Biases Logging](https://github.com/ultralytics/yolov5/issues/1289)  🌟 NEW -* [Roboflow for Datasets, Labeling, and Active Learning](https://github.com/ultralytics/yolov5/issues/4975)  🌟 NEW -* [Multi-GPU Training](https://github.com/ultralytics/yolov5/issues/475) -* [PyTorch Hub](https://github.com/ultralytics/yolov5/issues/36)  ⭐ NEW -* [TFLite, ONNX, CoreML, TensorRT Export](https://github.com/ultralytics/yolov5/issues/251) 🚀 -* [Test-Time Augmentation (TTA)](https://github.com/ultralytics/yolov5/issues/303) -* [Model Ensembling](https://github.com/ultralytics/yolov5/issues/318) -* [Model Pruning/Sparsity](https://github.com/ultralytics/yolov5/issues/304) -* [Hyperparameter Evolution](https://github.com/ultralytics/yolov5/issues/607) -* [Transfer Learning with Frozen Layers](https://github.com/ultralytics/yolov5/issues/1314)  ⭐ NEW -* [Architecture Summary](https://github.com/ultralytics/yolov5/issues/6998)  ⭐ NEW +- [Weights & Biases Logging](https://github.com/ultralytics/yolov5/issues/1289)  🌟 NEW +- [Roboflow for Datasets, Labeling, and Active Learning](https://github.com/ultralytics/yolov5/issues/4975)  🌟 NEW +- [Multi-GPU Training](https://github.com/ultralytics/yolov5/issues/475) +- [PyTorch Hub](https://github.com/ultralytics/yolov5/issues/36)  ⭐ NEW +- [TFLite, ONNX, CoreML, TensorRT Export](https://github.com/ultralytics/yolov5/issues/251) 🚀 +- [Test-Time Augmentation (TTA)](https://github.com/ultralytics/yolov5/issues/303) +- [Model Ensembling](https://github.com/ultralytics/yolov5/issues/318) +- [Model Pruning/Sparsity](https://github.com/ultralytics/yolov5/issues/304) +- [Hyperparameter Evolution](https://github.com/ultralytics/yolov5/issues/607) +- [Transfer Learning with Frozen Layers](https://github.com/ultralytics/yolov5/issues/1314)  ⭐ NEW +- [Architecture Summary](https://github.com/ultralytics/yolov5/issues/6998)  ⭐ NEW
@@ -203,7 +201,6 @@ Get started in seconds with our verified environments. Click each icon below for |:-:|:-:| |Automatically track and visualize all your YOLOv5 training runs in the cloud with [Weights & Biases](https://wandb.ai/site?utm_campaign=repo_yolo_readme)|Label and export your custom datasets directly to YOLOv5 for training with [Roboflow](https://roboflow.com/?ref=ultralytics) | - + + + +##
文件
+ +请参阅[YOLOv5 Docs](https://docs.ultralytics.com),了解有关培训、测试和部署的完整文件。 + +##
快速开始案例
+ +
+安装 + +在[**Python>=3.7.0**](https://www.python.org/) 的环境中克隆版本仓并安装 [requirements.txt](https://github.com/ultralytics/yolov5/blob/master/requirements.txt),包括[**PyTorch>=1.7**](https://pytorch.org/get-started/locally/)。 +```bash +git clone https://github.com/ultralytics/yolov5 # 克隆 +cd yolov5 +pip install -r requirements.txt # 安装 +``` + +
+ +
+推断 + +YOLOv5 [PyTorch Hub](https://github.com/ultralytics/yolov5/issues/36) 推断. [模型](https://github.com/ultralytics/yolov5/tree/master/models) 自动从最新YOLOv5 [版本](https://github.com/ultralytics/yolov5/releases)下载。 + +```python +import torch + +# 模型 +model = torch.hub.load('ultralytics/yolov5', 'yolov5s') # or yolov5n - yolov5x6, custom + +# 图像 +img = 'https://ultralytics.com/images/zidane.jpg' # or file, Path, PIL, OpenCV, numpy, list + +# 推论 +results = model(img) + +# 结果 +results.print() # or .show(), .save(), .crop(), .pandas(), etc. +``` + +
+ +
+用 detect.py 进行推断 + +`detect.py` 在各种资源上运行推理, 从最新的YOLOv5 [版本](https://github.com/ultralytics/yolov5/releases) 中自动下载 [模型](https://github.com/ultralytics/yolov5/tree/master/models) 并保存结果来运行/检测。 + +```bash +python detect.py --source 0 # 网络摄像头 + img.jpg # 图像 + vid.mp4 # 视频 + path/ # 文件夹 + path/*.jpg # glob + 'https://youtu.be/Zgi9g1ksQHc' # YouTube + 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP 流 +``` + +
+ +
+训练 + +以下指令再现了YOLOv5 [COCO](https://github.com/ultralytics/yolov5/blob/master/data/scripts/get_coco.sh) +数据集结果. [模型](https://github.com/ultralytics/yolov5/tree/master/models) 和 [数据集](https://github.com/ultralytics/yolov5/tree/master/data) 自动从最新的YOLOv5 [版本](https://github.com/ultralytics/yolov5/releases)中下载。YOLOv5n/s/m/l/x的训练时间在V100 GPU上是1/2/4/6/8天(多GPU倍速). 尽可能使用最大的 `--batch-size`, 或通过 `--batch-size -1` 来实现 YOLOv5 [自动批处理](https://github.com/ultralytics/yolov5/pull/5092). 批量大小显示为V100-16GB。 + +```bash +python train.py --data coco.yaml --cfg yolov5n.yaml --weights '' --batch-size 128 + yolov5s 64 + yolov5m 40 + yolov5l 24 + yolov5x 16 +``` + + + +
+ +
+教程 + +- [训练自定义数据](https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data) 🚀 推荐 +- [获得最佳训练效果的技巧](https://github.com/ultralytics/yolov5/wiki/Tips-for-Best-Training-Results) ☘️ 推荐 +- [Weights & Biases 登陆](https://github.com/ultralytics/yolov5/issues/1289) 🌟 新 +- [Roboflow:数据集、标签和主动学习](https://github.com/ultralytics/yolov5/issues/4975) 🌟 新 +- [多GPU训练](https://github.com/ultralytics/yolov5/issues/475) +- [PyTorch Hub](https://github.com/ultralytics/yolov5/issues/36) ⭐ 新 +- [TFLite, ONNX, CoreML, TensorRT 导出](https://github.com/ultralytics/yolov5/issues/251) 🚀 +- [测试时数据增强 (TTA)](https://github.com/ultralytics/yolov5/issues/303) +- [模型组合](https://github.com/ultralytics/yolov5/issues/318) +- [模型剪枝/稀疏性](https://github.com/ultralytics/yolov5/issues/304) +- [超参数进化](https://github.com/ultralytics/yolov5/issues/607) +- [带有冻结层的迁移学习](https://github.com/ultralytics/yolov5/issues/1314) ⭐ 新 +- [架构概要](https://github.com/ultralytics/yolov5/issues/6998) ⭐ 新 + +
+ +##
环境
+ +使用经过我们验证的环境,几秒钟就可以开始。点击下面的每个图标了解详情。 + + + +##
一体化
+ + + +|Weights and Biases|Roboflow ⭐ 新| +|:-:|:-:| +|通过 [Weights & Biases](https://wandb.ai/site?utm_campaign=repo_yolo_readme) 自动跟踪和可视化你在云端的所有YOLOv5训练运行状态。|标记并将您的自定义数据集直接导出到YOLOv5,以便用 [Roboflow](https://roboflow.com/?ref=ultralytics) 进行训练。 | + + + +##
为什么是 YOLOv5
+ +

+
+ YOLOv5-P5 640 图像 (点击扩展) + +

+
+
+ 图片注释 (点击扩展) + +- **COCO AP val** 表示 mAP@0.5:0.95 在5000张图像的[COCO val2017](http://cocodataset.org)数据集上,在256到1536的不同推理大小上测量的指标。 +- **GPU Speed** 衡量的是在 [COCO val2017](http://cocodataset.org) 数据集上使用 [AWS p3.2xlarge](https://aws.amazon.com/ec2/instance-types/p3/) V100实例在批量大小为32时每张图像的平均推理时间。 +- **EfficientDet** 数据来自 [google/automl](https://github.com/google/automl) ,批量大小为 8。 +- **重制** 于 `python val.py --task study --data coco.yaml --iou 0.7 --weights yolov5n6.pt yolov5s6.pt yolov5m6.pt yolov5l6.pt yolov5x6.pt` + +
+ +### 预训练检查点 + +|Model |size
(pixels) |mAPval
0.5:0.95 |mAPval
0.5 |Speed
CPU b1
(ms) |Speed
V100 b1
(ms) |Speed
V100 b32
(ms) |params
(M) |FLOPs
@640 (B) +|--- |--- |--- |--- |--- |--- |--- |--- |--- +|[YOLOv5n][assets] |640 |28.0 |45.7 |**45** |**6.3**|**0.6**|**1.9**|**4.5** +|[YOLOv5s][assets] |640 |37.4 |56.8 |98 |6.4 |0.9 |7.2 |16.5 +|[YOLOv5m][assets] |640 |45.4 |64.1 |224 |8.2 |1.7 |21.2 |49.0 +|[YOLOv5l][assets] |640 |49.0 |67.3 |430 |10.1 |2.7 |46.5 |109.1 +|[YOLOv5x][assets] |640 |50.7 |68.9 |766 |12.1 |4.8 |86.7 |205.7 +| | | | | | | | | +|[YOLOv5n6][assets] |1280 |36.0 |54.4 |153 |8.1 |2.1 |3.2 |4.6 +|[YOLOv5s6][assets] |1280 |44.8 |63.7 |385 |8.2 |3.6 |12.6 |16.8 +|[YOLOv5m6][assets] |1280 |51.3 |69.3 |887 |11.1 |6.8 |35.7 |50.0 +|[YOLOv5l6][assets] |1280 |53.7 |71.3 |1784 |15.8 |10.5 |76.8 |111.4 +|[YOLOv5x6][assets]
+ [TTA][TTA]|1280
1536 |55.0
**55.8** |72.7
**72.7** |3136
- |26.2
- |19.4
- |140.7
- |209.8
- + +
+ 表格注释 (点击扩展) + +- 所有检查点都以默认设置训练到300个时期. Nano和Small模型用 [hyp.scratch-low.yaml](https://github.com/ultralytics/yolov5/blob/master/data/hyps/hyp.scratch-low.yaml) hyps, 其他模型使用 [hyp.scratch-high.yaml](https://github.com/ultralytics/yolov5/blob/master/data/hyps/hyp.scratch-high.yaml). +- **mAPval** 值是 [COCO val2017](http://cocodataset.org) 数据集上的单模型单尺度的值。 +
重制于 `python val.py --data coco.yaml --img 640 --conf 0.001 --iou 0.65` +- 使用 [AWS p3.2xlarge](https://aws.amazon.com/ec2/instance-types/p3/) 实例对COCO val图像的平均速度。不包括NMS时间(~1 ms/img) +
重制于`python val.py --data coco.yaml --img 640 --task speed --batch 1` +- **TTA** [测试时数据增强](https://github.com/ultralytics/yolov5/issues/303) 包括反射和比例增强. +
重制于 `python val.py --data coco.yaml --img 1536 --iou 0.7 --augment` + +
+ +##
贡献
+ +我们重视您的意见! 我们希望大家对YOLOv5的贡献尽可能的简单和透明。开始之前请先点击并查看我们的 [贡献指南](CONTRIBUTING.md),填写[YOLOv5调查问卷](https://ultralytics.com/survey?utm_source=github&utm_medium=social&utm_campaign=Survey) 来向我们发送您的经验反馈。真诚感谢我们所有的贡献者! + + +##
联系
+ +关于YOLOv5的漏洞和功能问题,请访问 [GitHub Issues](https://github.com/ultralytics/yolov5/issues)。业务咨询或技术支持服务请访问[https://ultralytics.com/contact](https://ultralytics.com/contact)。 + +
+ + + +[assets]: https://github.com/ultralytics/yolov5/releases +[tta]: https://github.com/ultralytics/yolov5/issues/303 diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 850527491859..0c24b1ee2a06 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -50,10 +50,7 @@ repos: additional_dependencies: - mdformat-gfm - mdformat-black - exclude: | - (?x)^( - README.md - )$ + exclude: "README.md|README_cn.md" - repo: https://github.com/asottile/yesqa rev: v1.3.0 diff --git a/README.md b/README.md index 953761229f77..b0ea0a5d814c 100644 --- a/README.md +++ b/README.md @@ -3,6 +3,8 @@

+ +English | [简体中文](.github/README_cn.md)
CI CPU testing From 0537e8dd13859c4b44db3bf6f39b9ff20eaf163b Mon Sep 17 00:00:00 2001 From: Nicholas Zolton <78943323+NicholasZolton@users.noreply.github.com> Date: Sun, 26 Jun 2022 17:04:11 -0500 Subject: [PATCH 1171/1976] Allow detect.py to use video size for initial window size (#8330) * fixed initial window size of detect.py being tiny * cleanup Co-authored-by: Glenn Jocher --- detect.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/detect.py b/detect.py index 9d92e4c169e4..bb09ce171a96 100644 --- a/detect.py +++ b/detect.py @@ -106,7 +106,7 @@ def run( # Run inference model.warmup(imgsz=(1 if pt else bs, 3, *imgsz)) # warmup - dt, seen = [0.0, 0.0, 0.0], 0 + seen, windows, dt = 0, [], [0.0, 0.0, 0.0] for path, im, im0s, vid_cap, s in dataset: t1 = time_sync() im = torch.from_numpy(im).to(device) @@ -173,7 +173,10 @@ def run( # Stream results im0 = annotator.result() if view_img: - cv2.namedWindow(str(p), cv2.WINDOW_NORMAL | cv2.WINDOW_KEEPRATIO) # allow window resize (Linux) + if p not in windows: + windows.append(p) + cv2.namedWindow(str(p), cv2.WINDOW_NORMAL | cv2.WINDOW_KEEPRATIO) # allow window resize (Linux) + cv2.resizeWindow(str(p), im0.shape[1], im0.shape[0]) cv2.imshow(str(p), im0) cv2.waitKey(1) # 1 millisecond From b0814c95214b7fd0464310b1cf151fd5c1337c6d Mon Sep 17 00:00:00 2001 From: Zhiqiang Wang Date: Mon, 27 Jun 2022 19:10:30 +0800 Subject: [PATCH 1172/1976] Revamp Chinese docs (#8350) Revamp Chines docs --- .github/README_cn.md | 38 +++++++++++++++++++------------------- 1 file changed, 19 insertions(+), 19 deletions(-) diff --git a/.github/README_cn.md b/.github/README_cn.md index 78719509ad85..7e90336d5157 100644 --- a/.github/README_cn.md +++ b/.github/README_cn.md @@ -60,7 +60,7 @@ YOLOv5🚀是一个在COCO数据集上预训练的物体检测架构和模型系 ##
文件
-请参阅[YOLOv5 Docs](https://docs.ultralytics.com),了解有关培训、测试和部署的完整文件。 +请参阅[YOLOv5 Docs](https://docs.ultralytics.com),了解有关训练、测试和部署的完整文件。 ##
快速开始案例
@@ -77,9 +77,9 @@ pip install -r requirements.txt # 安装
-推断 +推理 -YOLOv5 [PyTorch Hub](https://github.com/ultralytics/yolov5/issues/36) 推断. [模型](https://github.com/ultralytics/yolov5/tree/master/models) 自动从最新YOLOv5 [版本](https://github.com/ultralytics/yolov5/releases)下载。 +YOLOv5 [PyTorch Hub](https://github.com/ultralytics/yolov5/issues/36) 推理. [模型](https://github.com/ultralytics/yolov5/tree/master/models) 自动从最新YOLOv5 [版本](https://github.com/ultralytics/yolov5/releases)下载。 ```python import torch @@ -90,7 +90,7 @@ model = torch.hub.load('ultralytics/yolov5', 'yolov5s') # or yolov5n - yolov5x6 # 图像 img = 'https://ultralytics.com/images/zidane.jpg' # or file, Path, PIL, OpenCV, numpy, list -# 推论 +# 推理 results = model(img) # 结果 @@ -100,9 +100,9 @@ results.print() # or .show(), .save(), .crop(), .pandas(), etc.
-用 detect.py 进行推断 +用 detect.py 进行推理 -`detect.py` 在各种资源上运行推理, 从最新的YOLOv5 [版本](https://github.com/ultralytics/yolov5/releases) 中自动下载 [模型](https://github.com/ultralytics/yolov5/tree/master/models) 并保存结果来运行/检测。 +`detect.py` 在各种数据源上运行推理, 其会从最新的 YOLOv5 [版本](https://github.com/ultralytics/yolov5/releases) 中自动下载 [模型](https://github.com/ultralytics/yolov5/tree/master/models) 并将检测结果保存到 `runs/detect` 目录。 ```bash python detect.py --source 0 # 网络摄像头 @@ -119,8 +119,8 @@ python detect.py --source 0 # 网络摄像头
训练 -以下指令再现了YOLOv5 [COCO](https://github.com/ultralytics/yolov5/blob/master/data/scripts/get_coco.sh) -数据集结果. [模型](https://github.com/ultralytics/yolov5/tree/master/models) 和 [数据集](https://github.com/ultralytics/yolov5/tree/master/data) 自动从最新的YOLOv5 [版本](https://github.com/ultralytics/yolov5/releases)中下载。YOLOv5n/s/m/l/x的训练时间在V100 GPU上是1/2/4/6/8天(多GPU倍速). 尽可能使用最大的 `--batch-size`, 或通过 `--batch-size -1` 来实现 YOLOv5 [自动批处理](https://github.com/ultralytics/yolov5/pull/5092). 批量大小显示为V100-16GB。 +以下指令再现了 YOLOv5 [COCO](https://github.com/ultralytics/yolov5/blob/master/data/scripts/get_coco.sh) +数据集结果. [模型](https://github.com/ultralytics/yolov5/tree/master/models) 和 [数据集](https://github.com/ultralytics/yolov5/tree/master/data) 自动从最新的YOLOv5 [版本](https://github.com/ultralytics/yolov5/releases) 中下载。YOLOv5n/s/m/l/x的训练时间在V100 GPU上是 1/2/4/6/8天(多GPU倍速). 尽可能使用最大的 `--batch-size`, 或通过 `--batch-size -1` 来实现 YOLOv5 [自动批处理](https://github.com/ultralytics/yolov5/pull/5092). 批量大小显示为 V100-16GB。 ```bash python train.py --data coco.yaml --cfg yolov5n.yaml --weights '' --batch-size 128 @@ -139,13 +139,13 @@ python train.py --data coco.yaml --cfg yolov5n.yaml --weights '' --batch-size 12 - [训练自定义数据](https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data) 🚀 推荐 - [获得最佳训练效果的技巧](https://github.com/ultralytics/yolov5/wiki/Tips-for-Best-Training-Results) ☘️ 推荐 -- [Weights & Biases 登陆](https://github.com/ultralytics/yolov5/issues/1289) 🌟 新 +- [使用 Weights & Biases 记录实验](https://github.com/ultralytics/yolov5/issues/1289) 🌟 新 - [Roboflow:数据集、标签和主动学习](https://github.com/ultralytics/yolov5/issues/4975) 🌟 新 - [多GPU训练](https://github.com/ultralytics/yolov5/issues/475) - [PyTorch Hub](https://github.com/ultralytics/yolov5/issues/36) ⭐ 新 - [TFLite, ONNX, CoreML, TensorRT 导出](https://github.com/ultralytics/yolov5/issues/251) 🚀 - [测试时数据增强 (TTA)](https://github.com/ultralytics/yolov5/issues/303) -- [模型组合](https://github.com/ultralytics/yolov5/issues/318) +- [模型集成](https://github.com/ultralytics/yolov5/issues/318) - [模型剪枝/稀疏性](https://github.com/ultralytics/yolov5/issues/304) - [超参数进化](https://github.com/ultralytics/yolov5/issues/607) - [带有冻结层的迁移学习](https://github.com/ultralytics/yolov5/issues/1314) ⭐ 新 @@ -175,7 +175,7 @@ python train.py --data coco.yaml --cfg yolov5n.yaml --weights '' --batch-size 12 -##
一体化
+##
如何与第三方集成
@@ -199,7 +199,7 @@ We are super excited about our first-ever Ultralytics YOLOv5 🚀 EXPORT Competi

--> -##
为什么是 YOLOv5
+##
为什么选择 YOLOv5

@@ -212,8 +212,8 @@ We are super excited about our first-ever Ultralytics YOLOv5 🚀 EXPORT Competi - **COCO AP val** 表示 mAP@0.5:0.95 在5000张图像的[COCO val2017](http://cocodataset.org)数据集上,在256到1536的不同推理大小上测量的指标。 - **GPU Speed** 衡量的是在 [COCO val2017](http://cocodataset.org) 数据集上使用 [AWS p3.2xlarge](https://aws.amazon.com/ec2/instance-types/p3/) V100实例在批量大小为32时每张图像的平均推理时间。 -- **EfficientDet** 数据来自 [google/automl](https://github.com/google/automl) ,批量大小为 8。 -- **重制** 于 `python val.py --task study --data coco.yaml --iou 0.7 --weights yolov5n6.pt yolov5s6.pt yolov5m6.pt yolov5l6.pt yolov5x6.pt` +- **EfficientDet** 数据来自 [google/automl](https://github.com/google/automl) ,批量大小设置为 8。 +- 复现 mAP 方法: `python val.py --task study --data coco.yaml --iou 0.7 --weights yolov5n6.pt yolov5s6.pt yolov5m6.pt yolov5l6.pt yolov5x6.pt`
@@ -238,22 +238,22 @@ We are super excited about our first-ever Ultralytics YOLOv5 🚀 EXPORT Competi - 所有检查点都以默认设置训练到300个时期. Nano和Small模型用 [hyp.scratch-low.yaml](https://github.com/ultralytics/yolov5/blob/master/data/hyps/hyp.scratch-low.yaml) hyps, 其他模型使用 [hyp.scratch-high.yaml](https://github.com/ultralytics/yolov5/blob/master/data/hyps/hyp.scratch-high.yaml). - **mAPval** 值是 [COCO val2017](http://cocodataset.org) 数据集上的单模型单尺度的值。 -
重制于 `python val.py --data coco.yaml --img 640 --conf 0.001 --iou 0.65` +
复现方法: `python val.py --data coco.yaml --img 640 --conf 0.001 --iou 0.65` - 使用 [AWS p3.2xlarge](https://aws.amazon.com/ec2/instance-types/p3/) 实例对COCO val图像的平均速度。不包括NMS时间(~1 ms/img) -
重制于`python val.py --data coco.yaml --img 640 --task speed --batch 1` +
复现方法: `python val.py --data coco.yaml --img 640 --task speed --batch 1` - **TTA** [测试时数据增强](https://github.com/ultralytics/yolov5/issues/303) 包括反射和比例增强. -
重制于 `python val.py --data coco.yaml --img 1536 --iou 0.7 --augment` +
复现方法: `python val.py --data coco.yaml --img 1536 --iou 0.7 --augment`
##
贡献
-我们重视您的意见! 我们希望大家对YOLOv5的贡献尽可能的简单和透明。开始之前请先点击并查看我们的 [贡献指南](CONTRIBUTING.md),填写[YOLOv5调查问卷](https://ultralytics.com/survey?utm_source=github&utm_medium=social&utm_campaign=Survey) 来向我们发送您的经验反馈。真诚感谢我们所有的贡献者! +我们重视您的意见! 我们希望给大家提供尽可能的简单和透明的方式对 YOLOv5 做出贡献。开始之前请先点击并查看我们的 [贡献指南](CONTRIBUTING.md),填写[YOLOv5调查问卷](https://ultralytics.com/survey?utm_source=github&utm_medium=social&utm_campaign=Survey) 来向我们发送您的经验反馈。真诚感谢我们所有的贡献者! ##
联系
-关于YOLOv5的漏洞和功能问题,请访问 [GitHub Issues](https://github.com/ultralytics/yolov5/issues)。业务咨询或技术支持服务请访问[https://ultralytics.com/contact](https://ultralytics.com/contact)。 +关于YOLOv5的漏洞和功能问题,请访问 [GitHub Issues](https://github.com/ultralytics/yolov5/issues)。商业咨询或技术支持服务请访问[https://ultralytics.com/contact](https://ultralytics.com/contact)。
From 8ebf569d14aca4f0e5b1f730501ac73644d71ae0 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 27 Jun 2022 16:11:24 +0200 Subject: [PATCH 1173/1976] Fix bias warmup LR init (#8356) Per https://github.com/ultralytics/yolov5/issues/8352 --- train.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/train.py b/train.py index a06ad5a418f8..e1393213bb4b 100644 --- a/train.py +++ b/train.py @@ -335,7 +335,7 @@ def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictio accumulate = max(1, np.interp(ni, xi, [1, nbs / batch_size]).round()) for j, x in enumerate(optimizer.param_groups): # bias lr falls from 0.1 to lr0, all other lrs rise from 0.0 to lr0 - x['lr'] = np.interp(ni, xi, [hyp['warmup_bias_lr'] if j == 2 else 0.0, x['initial_lr'] * lf(epoch)]) + x['lr'] = np.interp(ni, xi, [hyp['warmup_bias_lr'] if j == 0 else 0.0, x['initial_lr'] * lf(epoch)]) if 'momentum' in x: x['momentum'] = np.interp(ni, xi, [hyp['warmup_momentum'], hyp['momentum']]) From 34df5032a7d2e83fe3d16770a03bd129b115d184 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 27 Jun 2022 17:46:49 +0200 Subject: [PATCH 1174/1976] Add File Size (MB) column to benchmarks (#8359) * Add filesize to benchmarks.py * Add filesize to benchmarks.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- utils/benchmarks.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/utils/benchmarks.py b/utils/benchmarks.py index d0f2a2529c5d..69d653a20916 100644 --- a/utils/benchmarks.py +++ b/utils/benchmarks.py @@ -41,7 +41,7 @@ import export import val from utils import notebook_init -from utils.general import LOGGER, check_yaml, print_args +from utils.general import LOGGER, check_yaml, file_size, print_args from utils.torch_utils import select_device @@ -75,10 +75,10 @@ def run( result = val.run(data, w, batch_size, imgsz, plots=False, device=device, task='benchmark', half=half) metrics = result[0] # metrics (mp, mr, map50, map, *losses(box, obj, cls)) speeds = result[2] # times (preprocess, inference, postprocess) - y.append([name, round(metrics[3], 4), round(speeds[1], 2)]) # mAP, t_inference + y.append([name, round(file_size(w), 1), round(metrics[3], 4), round(speeds[1], 2)]) # MB, mAP, t_inference except Exception as e: LOGGER.warning(f'WARNING: Benchmark failure for {name}: {e}') - y.append([name, None, None]) # mAP, t_inference + y.append([name, None, None, None]) # mAP, t_inference if pt_only and i == 0: break # break after PyTorch @@ -86,7 +86,8 @@ def run( LOGGER.info('\n') parse_opt() notebook_init() # print system info - py = pd.DataFrame(y, columns=['Format', 'mAP@0.5:0.95', 'Inference time (ms)'] if map else ['Format', 'Export', '']) + c = ['Format', 'Size (MB)', 'mAP@0.5:0.95', 'Inference time (ms)'] if map else ['Format', 'Export', '', ''] + py = pd.DataFrame(y, columns=c) LOGGER.info(f'\nBenchmarks complete ({time.time() - t:.2f}s)') LOGGER.info(str(py if map else py.iloc[:, :2])) return py From 50ff6eee31c72fe88bdd35fc7299b201cce0e9a6 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 28 Jun 2022 12:04:58 +0200 Subject: [PATCH 1175/1976] Update protobuf requirement from <=3.20.1 to <4.21.3 (#8346) Updates the requirements on [protobuf](https://github.com/protocolbuffers/protobuf) to permit the latest version. - [Release notes](https://github.com/protocolbuffers/protobuf/releases) - [Changelog](https://github.com/protocolbuffers/protobuf/blob/main/generate_changelog.py) - [Commits](https://github.com/protocolbuffers/protobuf/commits) --- updated-dependencies: - dependency-name: protobuf dependency-type: direct:production ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Glenn Jocher --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) mode change 100755 => 100644 requirements.txt diff --git a/requirements.txt b/requirements.txt old mode 100755 new mode 100644 index 1937b93b5dda..332a0b81c45b --- a/requirements.txt +++ b/requirements.txt @@ -12,7 +12,7 @@ scipy>=1.4.1 # Google Colab version torch>=1.7.0 torchvision>=0.8.1 tqdm>=4.41.0 -protobuf<=3.20.1 # https://github.com/ultralytics/yolov5/issues/8012 +protobuf<4.21.3 # https://github.com/ultralytics/yolov5/issues/8012 # Logging ------------------------------------- tensorboard>=2.4.1 From 0c1324067c348c985b0c689a1e71cd9ba01513e0 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 28 Jun 2022 15:22:15 +0200 Subject: [PATCH 1176/1976] Fix ONNX `--dynamic` export on GPU (#8378) * Fix ONNX `--dynamic` export on GPU Patch forces --dynamic export model and image to CPU. Resolves bug raised in https://github.com/ultralytics/yolov5/issues/8377 * Update export.py --- export.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/export.py b/export.py index 72e170a30bf2..9daf39f871c2 100644 --- a/export.py +++ b/export.py @@ -119,8 +119,8 @@ def export_onnx(model, im, file, opset, train, dynamic, simplify, prefix=colorst f = file.with_suffix('.onnx') torch.onnx.export( - model, - im, + model.cpu() if dynamic else model, # --dynamic only compatible with cpu + im.cpu() if dynamic else im, f, verbose=False, opset_version=opset, @@ -499,8 +499,6 @@ def run( im = torch.zeros(batch_size, 3, *imgsz).to(device) # image size(1,3,320,192) BCHW iDetection # Update model - if half and not coreml and not xml: - im, model = im.half(), model.half() # to FP16 model.train() if train else model.eval() # training mode = no Detect() layer grid construction for k, m in model.named_modules(): if isinstance(m, Detect): @@ -510,6 +508,8 @@ def run( for _ in range(2): y = model(im) # dry runs + if half and not coreml: + im, model = im.half(), model.half() # to FP16 shape = tuple(y[0].shape) # model output shape LOGGER.info(f"\n{colorstr('PyTorch:')} starting from {file} with output shape {shape} ({file_size(file):.1f} MB)") From f76a78e7078185ecdc67470d8658103cf2067c81 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 28 Jun 2022 17:34:24 +0200 Subject: [PATCH 1177/1976] Update tutorial.ipynb (#8380) --- tutorial.ipynb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tutorial.ipynb b/tutorial.ipynb index 664cbc156082..7cd9a2d17e94 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -369,7 +369,7 @@ "colab_type": "text" }, "source": [ - "\"Open" + "\"Open" ] }, { From 6935a54e603d634f6b0a9026604dc5875d1ca990 Mon Sep 17 00:00:00 2001 From: Giacomo Guiduzzi <10937563+giacomoguiduzzi@users.noreply.github.com> Date: Wed, 29 Jun 2022 12:41:46 +0200 Subject: [PATCH 1178/1976] Implementation of Early Stopping for DDP training (#8345) * Implementation of Early Stopping for DDP training This edit correctly uses the broadcast_object_list() function to send slave processes a boolean so to end the training phase if the variable is True, thus allowing the master process to destroy the process group and terminate. * Update train.py * Update train.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update train.py * Update train.py * Update train.py * Further cleanup This cleans up the definition of broadcast_list and removes the requirement for clear() afterward. Co-authored-by: Glenn Jocher Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- train.py | 24 ++++++++++-------------- 1 file changed, 10 insertions(+), 14 deletions(-) diff --git a/train.py b/train.py index e1393213bb4b..dd5eeb600a76 100644 --- a/train.py +++ b/train.py @@ -294,7 +294,7 @@ def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictio results = (0, 0, 0, 0, 0, 0, 0) # P, R, mAP@.5, mAP@.5-.95, val_loss(box, obj, cls) scheduler.last_epoch = start_epoch - 1 # do not move scaler = torch.cuda.amp.GradScaler(enabled=amp) - stopper = EarlyStopping(patience=opt.patience) + stopper, stop = EarlyStopping(patience=opt.patience), False compute_loss = ComputeLoss(model) # init loss class callbacks.run('on_train_start') LOGGER.info(f'Image sizes {imgsz} train, {imgsz} val\n' @@ -402,6 +402,7 @@ def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictio # Update best mAP fi = fitness(np.array(results).reshape(1, -1)) # weighted combination of [P, R, mAP@.5, mAP@.5-.95] + stop = stopper(epoch=epoch, fitness=fi) # early stop check if fi > best_fitness: best_fitness = fi log_vals = list(mloss) + list(results) + lr @@ -428,19 +429,14 @@ def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictio del ckpt callbacks.run('on_model_save', last, epoch, final_epoch, best_fitness, fi) - # Stop Single-GPU - if RANK == -1 and stopper(epoch=epoch, fitness=fi): - break - - # Stop DDP TODO: known issues shttps://github.com/ultralytics/yolov5/pull/4576 - # stop = stopper(epoch=epoch, fitness=fi) - # if RANK == 0: - # dist.broadcast_object_list([stop], 0) # broadcast 'stop' to all ranks - - # Stop DPP - # with torch_distributed_zero_first(RANK): - # if stop: - # break # must break all DDP ranks + # EarlyStopping + if RANK != -1: # if DDP training + broadcast_list = [stop if RANK == 0 else None] + dist.broadcast_object_list(broadcast_list, 0) # broadcast 'stop' to all ranks + if RANK != 0: + stop = broadcast_list[0] + if stop: + break # must break all DDP ranks # end epoch ---------------------------------------------------------------------------------------------------- # end training ----------------------------------------------------------------------------------------------------- From e50dc38d3687d18cd932aa342bca03ca7125bbe0 Mon Sep 17 00:00:00 2001 From: Amir Pourmand Date: Thu, 30 Jun 2022 17:31:31 +0430 Subject: [PATCH 1179/1976] Improve `--local_rank` arg comment (#8409) * add more docs * add more docs * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update train.py Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Glenn Jocher --- train.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/train.py b/train.py index dd5eeb600a76..3161159ba44d 100644 --- a/train.py +++ b/train.py @@ -504,7 +504,7 @@ def parse_opt(known=False): parser.add_argument('--patience', type=int, default=100, help='EarlyStopping patience (epochs without improvement)') parser.add_argument('--freeze', nargs='+', type=int, default=[0], help='Freeze layers: backbone=10, first3=0 1 2') parser.add_argument('--save-period', type=int, default=-1, help='Save checkpoint every x epochs (disabled if < 1)') - parser.add_argument('--local_rank', type=int, default=-1, help='DDP parameter, do not modify') + parser.add_argument('--local_rank', type=int, default=-1, help='Automatic DDP Multi-GPU argument, do not modify') # Weights & Biases arguments parser.add_argument('--entity', default=None, help='W&B: Entity') From 898332433a71b8846b15daa276a8ac45c9efa98b Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 30 Jun 2022 16:19:22 +0200 Subject: [PATCH 1180/1976] Update cache comments (#8414) * Update cache comments For better readability * Update dataloaders.py --- utils/dataloaders.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/utils/dataloaders.py b/utils/dataloaders.py index 51d1612d3d5d..5d4dfc6e4d14 100755 --- a/utils/dataloaders.py +++ b/utils/dataloaders.py @@ -449,10 +449,10 @@ def __init__(self, cache_path = (p if p.is_file() else Path(self.label_files[0]).parent).with_suffix('.cache') try: cache, exists = np.load(cache_path, allow_pickle=True).item(), True # load dict - assert cache['version'] == self.cache_version # same version - assert cache['hash'] == get_hash(self.label_files + self.im_files) # same hash + assert cache['version'] == self.cache_version # matches current version + assert cache['hash'] == get_hash(self.label_files + self.im_files) # identical hash except Exception: - cache, exists = self.cache_labels(cache_path, prefix), False # cache + cache, exists = self.cache_labels(cache_path, prefix), False # run cache ops # Display cache nf, nm, ne, nc, n = cache.pop('results') # found, missing, empty, corrupt, total From d94b4705a65e751a8238696704a6300df4ac33db Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 1 Jul 2022 15:41:14 +0200 Subject: [PATCH 1181/1976] TRT `--half` fix autocast images to FP16 (#8435) * TRT `--half` fix autocast images to FP16 Resolves bug raised in https://github.com/ultralytics/yolov5/issues/7822 * Update common.py --- models/common.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/models/common.py b/models/common.py index 7690f714def8..a6488dd85648 100644 --- a/models/common.py +++ b/models/common.py @@ -441,6 +441,9 @@ def wrap_frozen_graph(gd, inputs, outputs): def forward(self, im, augment=False, visualize=False, val=False): # YOLOv5 MultiBackend inference b, ch, h, w = im.shape # batch, channel, height, width + if self.fp16 and im.dtype != torch.float16: + im = im.half() # to FP16 + if self.pt: # PyTorch y = self.model(im, augment=augment, visualize=visualize)[0] elif self.jit: # TorchScript From da2ee3934e2572d700000cc1e5fdac615ba4dd79 Mon Sep 17 00:00:00 2001 From: Colin Wong Date: Fri, 1 Jul 2022 15:15:09 -0500 Subject: [PATCH 1182/1976] Expose OpenVINO `batch_size` similarly to TensorRT (#8437) --- models/common.py | 1 + 1 file changed, 1 insertion(+) diff --git a/models/common.py b/models/common.py index a6488dd85648..a40207fd4d7b 100644 --- a/models/common.py +++ b/models/common.py @@ -366,6 +366,7 @@ def __init__(self, weights='yolov5s.pt', device=torch.device('cpu'), dnn=False, if not Path(w).is_file(): # if not *.xml w = next(Path(w).glob('*.xml')) # get *.xml file from *_openvino_model dir network = ie.read_model(model=w, weights=Path(w).with_suffix('.bin')) + batch_size = network.batch_size executable_network = ie.compile_model(network, device_name="CPU") # device_name="MYRIAD" for Intel NCS2 output_layer = next(iter(executable_network.outputs)) meta = Path(w).with_suffix('.yaml') From 29d79a6360d8c7da8875284246847db3312e270a Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 2 Jul 2022 18:35:45 +0200 Subject: [PATCH 1183/1976] Do not prefer Apple MPS (#8446) Require explicit request for MPS, i.e. ```bash python detect.py --device mps ``` Reverts https://github.com/ultralytics/yolov5/pull/8210 for preferring MPS if available. Note that torch MPS is experiencing ongoing compatibility issues in https://github.com/pytorch/pytorch/issues/77886 --- utils/torch_utils.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/utils/torch_utils.py b/utils/torch_utils.py index b1b107ee4f1b..c21dc6658c1e 100644 --- a/utils/torch_utils.py +++ b/utils/torch_utils.py @@ -62,7 +62,7 @@ def select_device(device='', batch_size=0, newline=True): assert torch.cuda.is_available() and torch.cuda.device_count() >= len(device.replace(',', '')), \ f"Invalid CUDA '--device {device}' requested, use '--device cpu' or pass valid CUDA device(s)" - if not cpu and torch.cuda.is_available(): # prefer GPU if available + if not (cpu or mps) and torch.cuda.is_available(): # prefer GPU if available devices = device.split(',') if device else '0' # range(torch.cuda.device_count()) # i.e. 0,1,6,7 n = len(devices) # device count if n > 1 and batch_size > 0: # check batch_size is divisible by device_count @@ -72,7 +72,7 @@ def select_device(device='', batch_size=0, newline=True): p = torch.cuda.get_device_properties(i) s += f"{'' if i == 0 else space}CUDA:{d} ({p.name}, {p.total_memory / (1 << 20):.0f}MiB)\n" # bytes to MB arg = 'cuda:0' - elif not cpu and getattr(torch, 'has_mps', False) and torch.backends.mps.is_available(): # prefer MPS if available + elif mps and getattr(torch, 'has_mps', False) and torch.backends.mps.is_available(): # prefer MPS if available s += 'MPS\n' arg = 'mps' else: # revert to CPU From c7689198bc66023378f71aa80c0829a763a928bd Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 4 Jul 2022 15:01:11 +0200 Subject: [PATCH 1184/1976] Update stale.yml (#8465) --- .github/workflows/stale.yml | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml index ee08510b4a30..03d99790a4a7 100644 --- a/.github/workflows/stale.yml +++ b/.github/workflows/stale.yml @@ -32,7 +32,9 @@ jobs: Thank you for your contributions to YOLOv5 🚀 and Vision AI ⭐! stale-pr-message: 'This pull request has been automatically marked as stale because it has not had recent activity. It will be closed if no further activity occurs. Thank you for your contributions YOLOv5 🚀 and Vision AI ⭐.' - days-before-stale: 30 - days-before-close: 5 + days-before-issue-stale: 30 + days-before-issue-close: 10 + days-before-pr-stale: 90 + days-before-pr-close: 30 exempt-issue-labels: 'documentation,tutorial,TODO' operations-per-run: 300 # The maximum number of operations per run, used to control rate limiting. From fdc9d9198e0dea90d0536f63b6408b97b1399cc1 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 4 Jul 2022 22:09:24 +0200 Subject: [PATCH 1185/1976] [pre-commit.ci] pre-commit suggestions (#8470) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/pre-commit/pre-commit-hooks: v4.2.0 → v4.3.0](https://github.com/pre-commit/pre-commit-hooks/compare/v4.2.0...v4.3.0) - [github.com/asottile/pyupgrade: v2.32.1 → v2.34.0](https://github.com/asottile/pyupgrade/compare/v2.32.1...v2.34.0) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 0c24b1ee2a06..9b8f28c77506 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -13,7 +13,7 @@ ci: repos: - repo: https://github.com/pre-commit/pre-commit-hooks - rev: v4.2.0 + rev: v4.3.0 hooks: - id: end-of-file-fixer - id: trailing-whitespace @@ -24,7 +24,7 @@ repos: - id: check-docstring-first - repo: https://github.com/asottile/pyupgrade - rev: v2.32.1 + rev: v2.34.0 hooks: - id: pyupgrade name: Upgrade code From 1ab23fc67f52d44d5f8ce67a895e73c7cbd7aec5 Mon Sep 17 00:00:00 2001 From: Junya Morioka <77187490+mjun0812@users.noreply.github.com> Date: Thu, 7 Jul 2022 02:32:58 +0900 Subject: [PATCH 1186/1976] Exclude torch==1.12.0, torchvision==0.13.0 (Fix #8395) (#8497) Exclude torch==1.12.0, torchvision==0.13.0 --- requirements.txt | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/requirements.txt b/requirements.txt index 332a0b81c45b..ad3fd49691d4 100644 --- a/requirements.txt +++ b/requirements.txt @@ -9,8 +9,8 @@ Pillow>=7.1.2 PyYAML>=5.3.1 requests>=2.23.0 scipy>=1.4.1 # Google Colab version -torch>=1.7.0 -torchvision>=0.8.1 +torch>=1.7.0,!=1.12.0 # https://github.com/ultralytics/yolov5/issues/8395 +torchvision>=0.8.1,!=0.13.0 # https://github.com/ultralytics/yolov5/issues/8395 tqdm>=4.41.0 protobuf<4.21.3 # https://github.com/ultralytics/yolov5/issues/8012 From 36f64a981d08c1fc34c50ae2ff8a15769ee6b49b Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 7 Jul 2022 12:34:01 +0200 Subject: [PATCH 1187/1976] Update tutorial.ipynb (#8507) --- tutorial.ipynb | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tutorial.ipynb b/tutorial.ipynb index 7cd9a2d17e94..bdfba399a883 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -1105,8 +1105,8 @@ "# TensorRT \n", "# https://docs.nvidia.com/deeplearning/tensorrt/install-guide/index.html#installing-pip\n", "!pip install -U nvidia-tensorrt --index-url https://pypi.ngc.nvidia.com # install\n", - "!python export.py --weights yolov5s.pt --include engine --imgsz 640 640 --device 0 # export\n", - "!python detect.py --weights yolov5s.engine --imgsz 640 640 --device 0 # inference" + "!python export.py --weights yolov5s.pt --include engine --imgsz 640 --device 0 # export\n", + "!python detect.py --weights yolov5s.engine --imgsz 640 --device 0 # inference" ], "execution_count": null, "outputs": [] From 27d831b6e4ae4b0286ba0159f5c8542e052cd3c9 Mon Sep 17 00:00:00 2001 From: Ayush Chaurasia Date: Thu, 7 Jul 2022 18:09:29 +0530 Subject: [PATCH 1188/1976] Training reproducibility improvements (#8213) * attempt at reproducibility * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * use deterministic algs * fix everything :) * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * revert dataloader changes * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * process_batch as np * remove newline * Remove dataloader init fcn * Update val.py * Update train.py * revert additional changes * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update train.py * Add --seed arg * Update general.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update train.py * Update train.py * Update val.py * Update train.py * Update general.py * Update general.py * Add deterministic argument to init_seeds() Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Glenn Jocher --- train.py | 3 ++- utils/general.py | 10 +++++++++- 2 files changed, 11 insertions(+), 2 deletions(-) diff --git a/train.py b/train.py index 3161159ba44d..bf5b4c69d74c 100644 --- a/train.py +++ b/train.py @@ -101,7 +101,7 @@ def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictio # Config plots = not evolve and not opt.noplots # create plots cuda = device.type != 'cpu' - init_seeds(1 + RANK) + init_seeds(opt.seed + 1 + RANK, deterministic=True) with torch_distributed_zero_first(LOCAL_RANK): data_dict = data_dict or check_dataset(data) # check if None train_path, val_path = data_dict['train'], data_dict['val'] @@ -504,6 +504,7 @@ def parse_opt(known=False): parser.add_argument('--patience', type=int, default=100, help='EarlyStopping patience (epochs without improvement)') parser.add_argument('--freeze', nargs='+', type=int, default=[0], help='Freeze layers: backbone=10, first3=0 1 2') parser.add_argument('--save-period', type=int, default=-1, help='Save checkpoint every x epochs (disabled if < 1)') + parser.add_argument('--seed', type=int, default=0, help='Global training seed') parser.add_argument('--local_rank', type=int, default=-1, help='Automatic DDP Multi-GPU argument, do not modify') # Weights & Biases arguments diff --git a/utils/general.py b/utils/general.py index a3e242d78a17..17b689010b39 100755 --- a/utils/general.py +++ b/utils/general.py @@ -195,14 +195,22 @@ def print_args(args: Optional[dict] = None, show_file=True, show_fcn=False): LOGGER.info(colorstr(s) + ', '.join(f'{k}={v}' for k, v in args.items())) -def init_seeds(seed=0): +def init_seeds(seed=0, deterministic=False): # Initialize random number generator (RNG) seeds https://pytorch.org/docs/stable/notes/randomness.html # cudnn seed 0 settings are slower and more reproducible, else faster and less reproducible import torch.backends.cudnn as cudnn + + if deterministic and check_version(torch.__version__, '1.12.0'): # https://github.com/ultralytics/yolov5/pull/8213 + torch.use_deterministic_algorithms(True) + os.environ['CUBLAS_WORKSPACE_CONFIG'] = ':4096:8' + # os.environ['PYTHONHASHSEED'] = str(seed) + random.seed(seed) np.random.seed(seed) torch.manual_seed(seed) cudnn.benchmark, cudnn.deterministic = (False, True) if seed == 0 else (True, False) + # torch.cuda.manual_seed(seed) + # torch.cuda.manual_seed_all(seed) # for multi GPU, exception safe def intersect_dicts(da, db, exclude=()): From 9d7bc06ae7ea59eeb09be14a42cc4530cdb97a22 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 7 Jul 2022 20:13:42 +0200 Subject: [PATCH 1189/1976] Revert "Expose OpenVINO `batch_size` similarly to TensorRT" (#8510) Revert "Expose OpenVINO `batch_size` similarly to TensorRT (#8437)" This reverts commit da2ee3934e2572d700000cc1e5fdac615ba4dd79. --- models/common.py | 1 - 1 file changed, 1 deletion(-) diff --git a/models/common.py b/models/common.py index a40207fd4d7b..a6488dd85648 100644 --- a/models/common.py +++ b/models/common.py @@ -366,7 +366,6 @@ def __init__(self, weights='yolov5s.pt', device=torch.device('cpu'), dnn=False, if not Path(w).is_file(): # if not *.xml w = next(Path(w).glob('*.xml')) # get *.xml file from *_openvino_model dir network = ie.read_model(model=w, weights=Path(w).with_suffix('.bin')) - batch_size = network.batch_size executable_network = ie.compile_model(network, device_name="CPU") # device_name="MYRIAD" for Intel NCS2 output_layer = next(iter(executable_network.outputs)) meta = Path(w).with_suffix('.yaml') From dd28df98c2307abfe13f8857110bfcd6b5c4eb4b Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 7 Jul 2022 20:36:23 +0200 Subject: [PATCH 1190/1976] Avoid FP64 ops for MPS support in train.py (#8511) Avoid FP64 ops for MPS support Resolves https://github.com/ultralytics/yolov5/pull/7878#issuecomment-1177952614 --- utils/general.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/utils/general.py b/utils/general.py index 17b689010b39..a85a2915a31a 100755 --- a/utils/general.py +++ b/utils/general.py @@ -644,7 +644,7 @@ def labels_to_class_weights(labels, nc=80): return torch.Tensor() labels = np.concatenate(labels, 0) # labels.shape = (866643, 5) for COCO - classes = labels[:, 0].astype(np.int) # labels = [class xywh] + classes = labels[:, 0].astype(int) # labels = [class xywh] weights = np.bincount(classes, minlength=nc) # occurrences per class # Prepend gridpoint count (for uCE training) @@ -654,13 +654,13 @@ def labels_to_class_weights(labels, nc=80): weights[weights == 0] = 1 # replace empty bins with 1 weights = 1 / weights # number of targets per class weights /= weights.sum() # normalize - return torch.from_numpy(weights) + return torch.from_numpy(weights).float() def labels_to_image_weights(labels, nc=80, class_weights=np.ones(80)): # Produces image weights based on class_weights and image contents # Usage: index = random.choices(range(n), weights=image_weights, k=1) # weighted image sample - class_counts = np.array([np.bincount(x[:, 0].astype(np.int), minlength=nc) for x in labels]) + class_counts = np.array([np.bincount(x[:, 0].astype(int), minlength=nc) for x in labels]) return (class_weights.reshape(1, nc) * class_counts).sum(1) From 39d7a93619083cb8e37f5ef7708cf50b34e20ee1 Mon Sep 17 00:00:00 2001 From: UnglvKitDe <100289696+UnglvKitDe@users.noreply.github.com> Date: Thu, 7 Jul 2022 20:42:09 +0200 Subject: [PATCH 1191/1976] Fix AP calculation bug #8464 (#8484) Co-authored-by: Glenn Jocher --- val.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/val.py b/val.py index f4f4bab7e92d..77f6bbf5b7c2 100644 --- a/val.py +++ b/val.py @@ -227,7 +227,7 @@ def run( if npr == 0: if nl: - stats.append((correct, *torch.zeros((3, 0), device=device))) + stats.append((correct, *torch.zeros((2, 0), device=device), labels[:, 0])) continue # Predictions From 3e54651fcaee59561a405b00458bf95df1c8b82e Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 7 Jul 2022 23:41:34 +0200 Subject: [PATCH 1192/1976] Add `--hard-fail` argument to benchmarks for CI errors (#8513) * Add `--hard-fail` list argument to benchmarks for CI Will cause CI to fail on a benchmark failure for given indices. * Update ci-testing.yml * Attempt Failure (CI should fail) * Update benchmarks.py * Update export.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update benchmarks.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update ci-testing.yml * Update benchmarks.py * Update benchmarks.py Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .github/workflows/ci-testing.yml | 2 +- export.py | 24 ++++++++++++------------ utils/benchmarks.py | 16 ++++++++++++---- 3 files changed, 25 insertions(+), 17 deletions(-) diff --git a/.github/workflows/ci-testing.yml b/.github/workflows/ci-testing.yml index 4083ac354c46..f3e36675f49d 100644 --- a/.github/workflows/ci-testing.yml +++ b/.github/workflows/ci-testing.yml @@ -39,7 +39,7 @@ jobs: pip list - name: Run benchmarks run: | - python utils/benchmarks.py --weights ${{ matrix.model }}.pt --img 320 + python utils/benchmarks.py --weights ${{ matrix.model }}.pt --img 320 --hard-fail Tests: timeout-minutes: 60 diff --git a/export.py b/export.py index 9daf39f871c2..1d8f07fc9e2f 100644 --- a/export.py +++ b/export.py @@ -75,18 +75,18 @@ def export_formats(): # YOLOv5 export formats x = [ - ['PyTorch', '-', '.pt', True], - ['TorchScript', 'torchscript', '.torchscript', True], - ['ONNX', 'onnx', '.onnx', True], - ['OpenVINO', 'openvino', '_openvino_model', False], - ['TensorRT', 'engine', '.engine', True], - ['CoreML', 'coreml', '.mlmodel', False], - ['TensorFlow SavedModel', 'saved_model', '_saved_model', True], - ['TensorFlow GraphDef', 'pb', '.pb', True], - ['TensorFlow Lite', 'tflite', '.tflite', False], - ['TensorFlow Edge TPU', 'edgetpu', '_edgetpu.tflite', False], - ['TensorFlow.js', 'tfjs', '_web_model', False],] - return pd.DataFrame(x, columns=['Format', 'Argument', 'Suffix', 'GPU']) + ['PyTorch', '-', '.pt', True, True], + ['TorchScript', 'torchscript', '.torchscript', True, True], + ['ONNX', 'onnx', '.onnx', True, True], + ['OpenVINO', 'openvino', '_openvino_model', True, False], + ['TensorRT', 'engine', '.engine', False, True], + ['CoreML', 'coreml', '.mlmodel', True, False], + ['TensorFlow SavedModel', 'saved_model', '_saved_model', True, True], + ['TensorFlow GraphDef', 'pb', '.pb', True, True], + ['TensorFlow Lite', 'tflite', '.tflite', True, False], + ['TensorFlow Edge TPU', 'edgetpu', '_edgetpu.tflite', False, False], + ['TensorFlow.js', 'tfjs', '_web_model', False, False],] + return pd.DataFrame(x, columns=['Format', 'Argument', 'Suffix', 'CPU', 'GPU']) def export_torchscript(model, im, file, optimize, prefix=colorstr('TorchScript:')): diff --git a/utils/benchmarks.py b/utils/benchmarks.py index 69d653a20916..03bab9b6ded2 100644 --- a/utils/benchmarks.py +++ b/utils/benchmarks.py @@ -26,6 +26,7 @@ """ import argparse +import platform import sys import time from pathlib import Path @@ -54,14 +55,17 @@ def run( half=False, # use FP16 half-precision inference test=False, # test exports only pt_only=False, # test PyTorch only + hard_fail=False, # throw error on benchmark failure ): y, t = [], time.time() device = select_device(device) - for i, (name, f, suffix, gpu) in export.export_formats().iterrows(): # index, (name, file, suffix, gpu-capable) + for i, (name, f, suffix, cpu, gpu) in export.export_formats().iterrows(): # index, (name, file, suffix, CPU, GPU) try: - assert i != 9, 'Edge TPU not supported' - assert i != 10, 'TF.js not supported' - if device.type != 'cpu': + assert i not in (9, 10), f'{name} inference not supported' # Edge TPU and TF.js are unsupported + assert i != 5 or platform.system() == 'Darwin', f'{name} inference only supported on macOS>=10.13' + if 'cpu' in device.type: + assert cpu, f'{name} inference not supported on CPU' + if 'cuda' in device.type: assert gpu, f'{name} inference not supported on GPU' # Export @@ -77,6 +81,8 @@ def run( speeds = result[2] # times (preprocess, inference, postprocess) y.append([name, round(file_size(w), 1), round(metrics[3], 4), round(speeds[1], 2)]) # MB, mAP, t_inference except Exception as e: + if hard_fail: + assert type(e) is AssertionError, f'Benchmark --hard-fail for {name}: {e}' LOGGER.warning(f'WARNING: Benchmark failure for {name}: {e}') y.append([name, None, None, None]) # mAP, t_inference if pt_only and i == 0: @@ -102,6 +108,7 @@ def test( half=False, # use FP16 half-precision inference test=False, # test exports only pt_only=False, # test PyTorch only + hard_fail=False, # throw error on benchmark failure ): y, t = [], time.time() device = select_device(device) @@ -134,6 +141,7 @@ def parse_opt(): parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference') parser.add_argument('--test', action='store_true', help='test exports only') parser.add_argument('--pt-only', action='store_true', help='test PyTorch only') + parser.add_argument('--hard-fail', action='store_true', help='throw error on benchmark failure') opt = parser.parse_args() opt.data = check_yaml(opt.data) # check YAML print_args(vars(opt)) From f17444abcd647a299f23fe2bf6324b8947cdee22 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 7 Jul 2022 23:46:55 +0200 Subject: [PATCH 1193/1976] Simplify benchmarks.py assertions (#8515) --- utils/benchmarks.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/utils/benchmarks.py b/utils/benchmarks.py index 03bab9b6ded2..d412653c866f 100644 --- a/utils/benchmarks.py +++ b/utils/benchmarks.py @@ -61,12 +61,12 @@ def run( device = select_device(device) for i, (name, f, suffix, cpu, gpu) in export.export_formats().iterrows(): # index, (name, file, suffix, CPU, GPU) try: - assert i not in (9, 10), f'{name} inference not supported' # Edge TPU and TF.js are unsupported - assert i != 5 or platform.system() == 'Darwin', f'{name} inference only supported on macOS>=10.13' + assert i not in (9, 10), 'inference not supported' # Edge TPU and TF.js are unsupported + assert i != 5 or platform.system() == 'Darwin', 'inference only supported on macOS>=10.13' # CoreML if 'cpu' in device.type: - assert cpu, f'{name} inference not supported on CPU' + assert cpu, 'inference not supported on CPU' if 'cuda' in device.type: - assert gpu, f'{name} inference not supported on GPU' + assert gpu, 'inference not supported on GPU' # Export if f == '-': From be42a24d2376d997a98d10433373af84fa85917b Mon Sep 17 00:00:00 2001 From: Colin Wong Date: Thu, 7 Jul 2022 16:53:09 -0500 Subject: [PATCH 1194/1976] Properly expose `batch_size` from OpenVINO similarly to TensorRT (#8514) Properly expose `batch_size` from OpenVINO Co-authored-by: Glenn Jocher --- models/common.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/models/common.py b/models/common.py index a6488dd85648..61e94296b6d0 100644 --- a/models/common.py +++ b/models/common.py @@ -361,11 +361,16 @@ def __init__(self, weights='yolov5s.pt', device=torch.device('cpu'), dnn=False, elif xml: # OpenVINO LOGGER.info(f'Loading {w} for OpenVINO inference...') check_requirements(('openvino',)) # requires openvino-dev: https://pypi.org/project/openvino-dev/ - from openvino.runtime import Core + from openvino.runtime import Core, Layout, get_batch ie = Core() if not Path(w).is_file(): # if not *.xml w = next(Path(w).glob('*.xml')) # get *.xml file from *_openvino_model dir network = ie.read_model(model=w, weights=Path(w).with_suffix('.bin')) + if network.get_parameters()[0].get_layout().empty: + network.get_parameters()[0].set_layout(Layout("NCHW")) + batch_dim = get_batch(network) + if batch_dim.is_static: + batch_size = batch_dim.get_length() executable_network = ie.compile_model(network, device_name="CPU") # device_name="MYRIAD" for Intel NCS2 output_layer = next(iter(executable_network.outputs)) meta = Path(w).with_suffix('.yaml') From 63ba0cb18a59e882d7e50ba01b934178b0e4bc5a Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 8 Jul 2022 00:46:56 +0200 Subject: [PATCH 1195/1976] Add `--half` arguments to export.py Usage examples (#8516) --- export.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/export.py b/export.py index 1d8f07fc9e2f..ec9024484a3d 100644 --- a/export.py +++ b/export.py @@ -555,11 +555,12 @@ def run( # Finish f = [str(x) for x in f if x] # filter out '' and None if any(f): + h = '--half' if half else '' # --half FP16 inference arg LOGGER.info(f'\nExport complete ({time.time() - t:.2f}s)' f"\nResults saved to {colorstr('bold', file.parent.resolve())}" - f"\nDetect: python detect.py --weights {f[-1]}" + f"\nDetect: python detect.py --weights {f[-1]} {h}" + f"\nValidate: python val.py --weights {f[-1]} {h}" f"\nPyTorch Hub: model = torch.hub.load('ultralytics/yolov5', 'custom', '{f[-1]}')" - f"\nValidate: python val.py --weights {f[-1]}" f"\nVisualize: https://netron.app") return f # return list of exported files/dirs From c215e87393977cc5dd5381a82c63fddb6a8d0428 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 8 Jul 2022 13:49:20 +0200 Subject: [PATCH 1196/1976] XML export `--half` fix (#8522) Improved error reporting for https://github.com/ultralytics/yolov5/issues/8519 --- export.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/export.py b/export.py index ec9024484a3d..623844ff3531 100644 --- a/export.py +++ b/export.py @@ -484,7 +484,7 @@ def run( # Load PyTorch model device = select_device(device) if half: - assert device.type != 'cpu' or coreml or xml, '--half only compatible with GPU export, i.e. use --device 0' + assert device.type != 'cpu' or coreml, '--half only compatible with GPU export, i.e. use --device 0' assert not dynamic, '--half not compatible with --dynamic, i.e. use either --half or --dynamic but not both' model = attempt_load(weights, device=device, inplace=True, fuse=True) # load FP32 model nc, names = model.nc, model.names # number of classes, class names From 526e650553819dbff67897b9c752c4072e989823 Mon Sep 17 00:00:00 2001 From: Colin Wong Date: Fri, 8 Jul 2022 07:32:40 -0500 Subject: [PATCH 1197/1976] Fix `LoadImages()` with dataset YAML lists (#8517) * Fix LoadImages with dataset yaml lists * Update dataloaders.py * Update dataloaders.py * Simplify/refactor PR * Update dataloaders.py Co-authored-by: Colin Wong Co-authored-by: Glenn Jocher --- utils/dataloaders.py | 22 ++++++++++++---------- 1 file changed, 12 insertions(+), 10 deletions(-) diff --git a/utils/dataloaders.py b/utils/dataloaders.py index 5d4dfc6e4d14..4f1c98fd880d 100755 --- a/utils/dataloaders.py +++ b/utils/dataloaders.py @@ -176,15 +176,17 @@ def __iter__(self): class LoadImages: # YOLOv5 image/video dataloader, i.e. `python detect.py --source image.jpg/vid.mp4` def __init__(self, path, img_size=640, stride=32, auto=True): - p = str(Path(path).resolve()) # os-agnostic absolute path - if '*' in p: - files = sorted(glob.glob(p, recursive=True)) # glob - elif os.path.isdir(p): - files = sorted(glob.glob(os.path.join(p, '*.*'))) # dir - elif os.path.isfile(p): - files = [p] # files - else: - raise Exception(f'ERROR: {p} does not exist') + files = [] + for p in sorted(path) if isinstance(path, (list, tuple)) else [path]: + p = str(Path(p).resolve()) + if '*' in p: + files.extend(sorted(glob.glob(p, recursive=True))) # glob + elif os.path.isdir(p): + files.extend(sorted(glob.glob(os.path.join(p, '*.*')))) # dir + elif os.path.isfile(p): + files.append(p) # files + else: + raise FileNotFoundError(f'{p} does not exist') images = [x for x in files if x.split('.')[-1].lower() in IMG_FORMATS] videos = [x for x in files if x.split('.')[-1].lower() in VID_FORMATS] @@ -437,7 +439,7 @@ def __init__(self, f += [x.replace('./', parent) if x.startswith('./') else x for x in t] # local to global path # f += [p.parent / x.lstrip(os.sep) for x in t] # local to global path (pathlib) else: - raise Exception(f'{prefix}{p} does not exist') + raise FileNotFoundError(f'{prefix}{p} does not exist') self.im_files = sorted(x.replace('/', os.sep) for x in f if x.split('.')[-1].lower() in IMG_FORMATS) # self.img_files = sorted([x for x in f if x.suffix[1:].lower() in IMG_FORMATS]) # pathlib assert self.im_files, f'{prefix}No images found' From 7dafd1cb297869032d98406afc9f3e74f68b5bcd Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 11 Jul 2022 15:09:42 +0200 Subject: [PATCH 1198/1976] val.py `assert ncm == nc` fix (#8545) --- val.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/val.py b/val.py index 77f6bbf5b7c2..b0cc8e7f1577 100644 --- a/val.py +++ b/val.py @@ -164,7 +164,7 @@ def run( if not training: if pt and not single_cls: # check --weights are trained on --data ncm = model.model.nc - assert ncm == nc, f'{weights[0]} ({ncm} classes) trained on different --data than what you passed ({nc} ' \ + assert ncm == nc, f'{weights} ({ncm} classes) trained on different --data than what you passed ({nc} ' \ f'classes). Pass correct combination of --weights and --data that are trained together.' model.warmup(imgsz=(1 if pt else batch_size, 3, imgsz, imgsz)) # warmup pad = 0.0 if task in ('speed', 'benchmark') else 0.5 From a84cd02387d70fb5a6287682a221e8cd46dca87a Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 11 Jul 2022 16:07:11 +0200 Subject: [PATCH 1199/1976] CIoU protected divides (#8546) Protected divides in IOU function to resolve https://github.com/ultralytics/yolov5/issues/8539 --- utils/metrics.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/utils/metrics.py b/utils/metrics.py index e17747b703fa..858af23efadb 100644 --- a/utils/metrics.py +++ b/utils/metrics.py @@ -225,8 +225,8 @@ def bbox_iou(box1, box2, xywh=True, GIoU=False, DIoU=False, CIoU=False, eps=1e-7 else: # x1, y1, x2, y2 = box1 b1_x1, b1_y1, b1_x2, b1_y2 = box1.chunk(4, 1) b2_x1, b2_y1, b2_x2, b2_y2 = box2.chunk(4, 1) - w1, h1 = b1_x2 - b1_x1, b1_y2 - b1_y1 + eps - w2, h2 = b2_x2 - b2_x1, b2_y2 - b2_y1 + eps + w1, h1 = b1_x2 - b1_x1, b1_y2 - b1_y1 + w2, h2 = b2_x2 - b2_x1, b2_y2 - b2_y1 # Intersection area inter = (torch.min(b1_x2, b2_x2) - torch.max(b1_x1, b2_x1)).clamp(0) * \ @@ -244,7 +244,7 @@ def bbox_iou(box1, box2, xywh=True, GIoU=False, DIoU=False, CIoU=False, eps=1e-7 c2 = cw ** 2 + ch ** 2 + eps # convex diagonal squared rho2 = ((b2_x1 + b2_x2 - b1_x1 - b1_x2) ** 2 + (b2_y1 + b2_y2 - b1_y1 - b1_y2) ** 2) / 4 # center dist ** 2 if CIoU: # https://github.com/Zzh-tju/DIoU-SSD-pytorch/blob/master/utils/box/box_utils.py#L47 - v = (4 / math.pi ** 2) * torch.pow(torch.atan(w2 / h2) - torch.atan(w1 / h1), 2) + v = (4 / math.pi ** 2) * torch.pow(torch.atan(w2 / (h2 + eps)) - torch.atan(w1 / (h1 + eps)), 2) with torch.no_grad(): alpha = v / (v - iou + (1 + eps)) return iou - (rho2 / c2 + v * alpha) # CIoU From 04146371b9940e144080430eb5e28b828d2f9c3a Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 12 Jul 2022 01:58:25 +0200 Subject: [PATCH 1200/1976] Update metrics.py with IoU protected divides (#8550) --- utils/metrics.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/utils/metrics.py b/utils/metrics.py index 858af23efadb..6bba4cfe2a42 100644 --- a/utils/metrics.py +++ b/utils/metrics.py @@ -259,7 +259,7 @@ def box_area(box): return (box[2] - box[0]) * (box[3] - box[1]) -def box_iou(box1, box2): +def box_iou(box1, box2, eps=1e-7): # https://github.com/pytorch/vision/blob/master/torchvision/ops/boxes.py """ Return intersection-over-union (Jaccard index) of boxes. @@ -277,10 +277,10 @@ def box_iou(box1, box2): inter = (torch.min(a2, b2) - torch.max(a1, b1)).clamp(0).prod(2) # IoU = inter / (area1 + area2 - inter) - return inter / (box_area(box1.T)[:, None] + box_area(box2.T) - inter) + return inter / (box_area(box1.T)[:, None] + box_area(box2.T) - inter + eps) -def bbox_ioa(box1, box2, eps=1E-7): +def bbox_ioa(box1, box2, eps=1e-7): """ Returns the intersection over box2 area given box1, box2. Boxes are x1y1x2y2 box1: np.array of shape(4) box2: np.array of shape(nx4) @@ -302,12 +302,12 @@ def bbox_ioa(box1, box2, eps=1E-7): return inter_area / box2_area -def wh_iou(wh1, wh2): +def wh_iou(wh1, wh2, eps=1e-7): # Returns the nxm IoU matrix. wh1 is nx2, wh2 is mx2 wh1 = wh1[:, None] # [N,1,2] wh2 = wh2[None] # [1,M,2] inter = torch.min(wh1, wh2).prod(2) # [N,M] - return inter / (wh1.prod(2) + wh2.prod(2) - inter) # iou = inter / (area1 + area2 - inter) + return inter / (wh1.prod(2) + wh2.prod(2) - inter + eps) # iou = inter / (area1 + area2 - inter) # Plots ---------------------------------------------------------------------------------------------------------------- From fbd30205257d956f6c9840e9e9863e4bb7e1f3aa Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E6=9B=BE=E9=80=B8=E5=A4=AB=EF=BC=88Zeng=20Yifu=EF=BC=89?= <41098760+Zengyf-CVer@users.noreply.github.com> Date: Tue, 12 Jul 2022 19:19:25 +0800 Subject: [PATCH 1201/1976] Add TensorRT dependencies (#8553) Update requirements.txt --- requirements.txt | 2 ++ 1 file changed, 2 insertions(+) diff --git a/requirements.txt b/requirements.txt index ad3fd49691d4..931f93646b73 100644 --- a/requirements.txt +++ b/requirements.txt @@ -26,6 +26,8 @@ seaborn>=0.11.0 # coremltools>=4.1 # CoreML export # onnx>=1.9.0 # ONNX export # onnx-simplifier>=0.3.6 # ONNX simplifier +# nvidia-pyindex # TensorRT export +# nvidia-tensorrt # TensorRT export # scikit-learn==0.19.2 # CoreML quantization # tensorflow>=2.4.1 # TFLite export # tensorflowjs>=3.9.0 # TF.js export From 574ceedfc5f171a89417175bfb14fda6a2646603 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 12 Jul 2022 14:49:54 +0200 Subject: [PATCH 1202/1976] Add `thop>=0.1.0` (#8558) --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 931f93646b73..4a4f68539cad 100644 --- a/requirements.txt +++ b/requirements.txt @@ -36,7 +36,7 @@ seaborn>=0.11.0 # Extras -------------------------------------- ipython # interactive notebook psutil # system utilization -thop # FLOPs computation +thop>=0.1.0 # FLOPs computation # albumentations>=1.0.3 # pycocotools>=2.0 # COCO mAP # roboflow From f8722b4429e80f96be04b36e4efd84ce6583bfa1 Mon Sep 17 00:00:00 2001 From: Colin Wong Date: Wed, 13 Jul 2022 04:13:01 -0500 Subject: [PATCH 1203/1976] Raise error on suffix-less model path (#8561) Raise error on invalid model --- models/common.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/models/common.py b/models/common.py index 61e94296b6d0..fb5ac3a6f5a4 100644 --- a/models/common.py +++ b/models/common.py @@ -441,6 +441,8 @@ def wrap_frozen_graph(gd, inputs, outputs): output_details = interpreter.get_output_details() # outputs elif tfjs: raise Exception('ERROR: YOLOv5 TF.js inference is not supported') + else: + raise Exception(f'ERROR: {w} is not a supported format') self.__dict__.update(locals()) # assign all variables to self def forward(self, im, augment=False, visualize=False, val=False): From f4b05680f89795658e1c898a28ff51edbf22a63b Mon Sep 17 00:00:00 2001 From: Colin Wong Date: Fri, 15 Jul 2022 09:01:01 -0500 Subject: [PATCH 1204/1976] Assert `--optimize` not used with cuda device (#8569) --- export.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/export.py b/export.py index 623844ff3531..9868fcae95c3 100644 --- a/export.py +++ b/export.py @@ -492,6 +492,8 @@ def run( # Checks imgsz *= 2 if len(imgsz) == 1 else 1 # expand assert nc == len(names), f'Model class count {nc} != len(names) {len(names)}' + if optimize: + assert device.type != 'cuda', '--optimize not compatible with cuda devices, i.e. use --device cpu' # Input gs = int(max(model.stride)) # grid size (max stride) From 72a81e7a1c13cd3ae4675037f217d0ed3db9bc20 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 15 Jul 2022 16:01:29 +0200 Subject: [PATCH 1205/1976] Update requirements.txt comment spacing (#8562) --- requirements.txt | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/requirements.txt b/requirements.txt index 4a4f68539cad..c0f12ccdd018 100644 --- a/requirements.txt +++ b/requirements.txt @@ -10,7 +10,7 @@ PyYAML>=5.3.1 requests>=2.23.0 scipy>=1.4.1 # Google Colab version torch>=1.7.0,!=1.12.0 # https://github.com/ultralytics/yolov5/issues/8395 -torchvision>=0.8.1,!=0.13.0 # https://github.com/ultralytics/yolov5/issues/8395 +torchvision>=0.8.1,!=0.13.0 # https://github.com/ultralytics/yolov5/issues/8395 tqdm>=4.41.0 protobuf<4.21.3 # https://github.com/ultralytics/yolov5/issues/8012 @@ -26,8 +26,8 @@ seaborn>=0.11.0 # coremltools>=4.1 # CoreML export # onnx>=1.9.0 # ONNX export # onnx-simplifier>=0.3.6 # ONNX simplifier -# nvidia-pyindex # TensorRT export -# nvidia-tensorrt # TensorRT export +# nvidia-pyindex # TensorRT export +# nvidia-tensorrt # TensorRT export # scikit-learn==0.19.2 # CoreML quantization # tensorflow>=2.4.1 # TFLite export # tensorflowjs>=3.9.0 # TF.js export From 7204c1ca25fa69a911802edab719b4cc323103f4 Mon Sep 17 00:00:00 2001 From: Yonghye Kwon Date: Sat, 16 Jul 2022 22:51:48 +0900 Subject: [PATCH 1206/1976] Explicitly set `weight_decay` value (#8592) * explicitly set weight_decay value The default weight_decay value of AdamW is 1e-2, so we should set it to zero. * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Cleanup Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Glenn Jocher --- train.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/train.py b/train.py index bf5b4c69d74c..ff13f1e256ec 100644 --- a/train.py +++ b/train.py @@ -163,12 +163,12 @@ def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictio if opt.optimizer == 'Adam': optimizer = Adam(g[2], lr=hyp['lr0'], betas=(hyp['momentum'], 0.999)) # adjust beta1 to momentum elif opt.optimizer == 'AdamW': - optimizer = AdamW(g[2], lr=hyp['lr0'], betas=(hyp['momentum'], 0.999)) # adjust beta1 to momentum + optimizer = AdamW(g[2], lr=hyp['lr0'], betas=(hyp['momentum'], 0.999), weight_decay=0.0) else: optimizer = SGD(g[2], lr=hyp['lr0'], momentum=hyp['momentum'], nesterov=True) optimizer.add_param_group({'params': g[0], 'weight_decay': hyp['weight_decay']}) # add g0 with weight_decay - optimizer.add_param_group({'params': g[1]}) # add g1 (BatchNorm2d weights) + optimizer.add_param_group({'params': g[1], 'weight_decay': 0.0}) # add g1 (BatchNorm2d weights) LOGGER.info(f"{colorstr('optimizer:')} {type(optimizer).__name__} with parameter groups " f"{len(g[1])} weight (no decay), {len(g[0])} weight, {len(g[2])} bias") del g From cf28dda3660fcda0bac56a9ca75ca3c8749d1baf Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 16 Jul 2022 15:54:34 +0200 Subject: [PATCH 1207/1976] Update `scipy>=1.7.3` (#8595) --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index c0f12ccdd018..f5ae6175b6f1 100644 --- a/requirements.txt +++ b/requirements.txt @@ -8,7 +8,7 @@ opencv-python>=4.1.1 Pillow>=7.1.2 PyYAML>=5.3.1 requests>=2.23.0 -scipy>=1.4.1 # Google Colab version +scipy>=1.7.3 # Google Colab version torch>=1.7.0,!=1.12.0 # https://github.com/ultralytics/yolov5/issues/8395 torchvision>=0.8.1,!=0.13.0 # https://github.com/ultralytics/yolov5/issues/8395 tqdm>=4.41.0 From 5c45a4b13d1782a8ad9cb993a1d22430540bd197 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 16 Jul 2022 16:14:16 +0200 Subject: [PATCH 1208/1976] Update `tqdm>=4.64.0` and `thop>=0.1.1` (#8596) * Update `tqdm>=4.64.0` and `thop>=0.1.1` * Update requirements.txt --- requirements.txt | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/requirements.txt b/requirements.txt index f5ae6175b6f1..4550fc771b04 100644 --- a/requirements.txt +++ b/requirements.txt @@ -8,10 +8,10 @@ opencv-python>=4.1.1 Pillow>=7.1.2 PyYAML>=5.3.1 requests>=2.23.0 -scipy>=1.7.3 # Google Colab version +scipy>=1.4.1 torch>=1.7.0,!=1.12.0 # https://github.com/ultralytics/yolov5/issues/8395 torchvision>=0.8.1,!=0.13.0 # https://github.com/ultralytics/yolov5/issues/8395 -tqdm>=4.41.0 +tqdm>=4.64.0 protobuf<4.21.3 # https://github.com/ultralytics/yolov5/issues/8012 # Logging ------------------------------------- @@ -36,7 +36,7 @@ seaborn>=0.11.0 # Extras -------------------------------------- ipython # interactive notebook psutil # system utilization -thop>=0.1.0 # FLOPs computation +thop>=0.1.1 # FLOPs computation # albumentations>=1.0.3 # pycocotools>=2.0 # COCO mAP # roboflow From 6e86af3de85c449fa2574c2461d8919d86620e6c Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 16 Jul 2022 16:41:43 +0200 Subject: [PATCH 1209/1976] Add `pip install wheel` to avoid legacy `setup.py install` (#8597) * Update ci-testing with `pip install wheel` * Update ci-testing.yml * Update dockerfiles --- .github/workflows/ci-testing.yml | 4 ++-- utils/docker/Dockerfile | 2 +- utils/docker/Dockerfile-arm64 | 2 +- utils/docker/Dockerfile-cpu | 2 +- 4 files changed, 5 insertions(+), 5 deletions(-) diff --git a/.github/workflows/ci-testing.yml b/.github/workflows/ci-testing.yml index f3e36675f49d..e3359cd3a283 100644 --- a/.github/workflows/ci-testing.yml +++ b/.github/workflows/ci-testing.yml @@ -32,7 +32,7 @@ jobs: # restore-keys: ${{ runner.os }}-Benchmarks- - name: Install requirements run: | - python -m pip install --upgrade pip + python -m pip install --upgrade pip wheel pip install -r requirements.txt coremltools openvino-dev tensorflow-cpu --extra-index-url https://download.pytorch.org/whl/cpu python --version pip --version @@ -77,7 +77,7 @@ jobs: restore-keys: ${{ runner.os }}-${{ matrix.python-version }}-pip- - name: Install requirements run: | - python -m pip install --upgrade pip + python -m pip install --upgrade pip wheel pip install -r requirements.txt --extra-index-url https://download.pytorch.org/whl/cpu python --version pip --version diff --git a/utils/docker/Dockerfile b/utils/docker/Dockerfile index a5fc7cbd6c45..1a4b66b106b2 100644 --- a/utils/docker/Dockerfile +++ b/utils/docker/Dockerfile @@ -14,7 +14,7 @@ RUN apt update && apt install --no-install-recommends -y zip htop screen libgl1- # Install pip packages COPY requirements.txt . -RUN python -m pip install --upgrade pip +RUN python -m pip install --upgrade pip wheel RUN pip uninstall -y torch torchvision torchtext Pillow RUN pip install --no-cache -r requirements.txt albumentations wandb gsutil notebook Pillow>=9.1.0 \ 'opencv-python<4.6.0.66' \ diff --git a/utils/docker/Dockerfile-arm64 b/utils/docker/Dockerfile-arm64 index 2e261051dedd..bca161e67a37 100644 --- a/utils/docker/Dockerfile-arm64 +++ b/utils/docker/Dockerfile-arm64 @@ -17,7 +17,7 @@ RUN apt install --no-install-recommends -y python3-pip git zip curl htop gcc \ # Install pip packages COPY requirements.txt . -RUN python3 -m pip install --upgrade pip +RUN python3 -m pip install --upgrade pip wheel RUN pip install --no-cache -r requirements.txt gsutil notebook \ tensorflow-aarch64 # tensorflowjs \ diff --git a/utils/docker/Dockerfile-cpu b/utils/docker/Dockerfile-cpu index c8aa8c6a48c6..f05e920ad53f 100644 --- a/utils/docker/Dockerfile-cpu +++ b/utils/docker/Dockerfile-cpu @@ -16,7 +16,7 @@ RUN apt install --no-install-recommends -y python3-pip git zip curl htop libgl1- # Install pip packages COPY requirements.txt . -RUN python3 -m pip install --upgrade pip +RUN python3 -m pip install --upgrade pip wheel RUN pip install --no-cache -r requirements.txt albumentations gsutil notebook \ coremltools onnx onnx-simplifier onnxruntime openvino-dev tensorflow-cpu tensorflowjs \ --extra-index-url https://download.pytorch.org/whl/cpu From a34b376d0fb90076e698b1b95df55c9cafba899a Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 16 Jul 2022 23:46:23 +0200 Subject: [PATCH 1210/1976] Link fuse() to AutoShape() for Hub models (#8599) --- hubconf.py | 3 +-- models/common.py | 4 ++-- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/hubconf.py b/hubconf.py index df585f8cb411..6bb9484a856d 100644 --- a/hubconf.py +++ b/hubconf.py @@ -36,7 +36,6 @@ def _create(name, pretrained=True, channels=3, classes=80, autoshape=True, verbo if not verbose: LOGGER.setLevel(logging.WARNING) - check_requirements(exclude=('tensorboard', 'thop', 'opencv-python')) name = Path(name) path = name.with_suffix('.pt') if name.suffix == '' and not name.is_dir() else name # checkpoint path @@ -44,7 +43,7 @@ def _create(name, pretrained=True, channels=3, classes=80, autoshape=True, verbo device = select_device(device) if pretrained and channels == 3 and classes == 80: - model = DetectMultiBackend(path, device=device) # download/load FP32 model + model = DetectMultiBackend(path, device=device, fuse=autoshape) # download/load FP32 model # model = models.experimental.attempt_load(path, map_location=device) # download/load FP32 model else: cfg = list((Path(__file__).parent / 'models').rglob(f'{path.stem}.yaml'))[0] # model.yaml path diff --git a/models/common.py b/models/common.py index fb5ac3a6f5a4..5ea1c307f034 100644 --- a/models/common.py +++ b/models/common.py @@ -305,7 +305,7 @@ def forward(self, x): class DetectMultiBackend(nn.Module): # YOLOv5 MultiBackend class for python inference on various backends - def __init__(self, weights='yolov5s.pt', device=torch.device('cpu'), dnn=False, data=None, fp16=False): + def __init__(self, weights='yolov5s.pt', device=torch.device('cpu'), dnn=False, data=None, fp16=False, fuse=True): # Usage: # PyTorch: weights = *.pt # TorchScript: *.torchscript @@ -331,7 +331,7 @@ def __init__(self, weights='yolov5s.pt', device=torch.device('cpu'), dnn=False, names = yaml.safe_load(f)['names'] if pt: # PyTorch - model = attempt_load(weights if isinstance(weights, list) else w, device=device) + model = attempt_load(weights if isinstance(weights, list) else w, device=device, inplace=True, fuse=fuse) stride = max(int(model.stride.max()), 32) # model stride names = model.module.names if hasattr(model, 'module') else model.names # get class names model.half() if fp16 else model.float() From 24305787ae32b7e04f52a971a5865c461842662e Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 17 Jul 2022 00:55:55 +0200 Subject: [PATCH 1211/1976] FROM nvcr.io/nvidia/pytorch:22.06-py3 (#8600) --- utils/docker/Dockerfile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/utils/docker/Dockerfile b/utils/docker/Dockerfile index 1a4b66b106b2..312d169d1a76 100644 --- a/utils/docker/Dockerfile +++ b/utils/docker/Dockerfile @@ -3,7 +3,7 @@ # Image is CUDA-optimized for YOLOv5 single/multi-GPU training and inference # Start FROM NVIDIA PyTorch image https://ngc.nvidia.com/catalog/containers/nvidia:pytorch -FROM nvcr.io/nvidia/pytorch:22.05-py3 +FROM nvcr.io/nvidia/pytorch:22.06-py3 RUN rm -rf /opt/pytorch # remove 1.2GB dir # Downloads to user config dir @@ -15,7 +15,7 @@ RUN apt update && apt install --no-install-recommends -y zip htop screen libgl1- # Install pip packages COPY requirements.txt . RUN python -m pip install --upgrade pip wheel -RUN pip uninstall -y torch torchvision torchtext Pillow +RUN pip uninstall -y Pillow torchtext # torch torchvision RUN pip install --no-cache -r requirements.txt albumentations wandb gsutil notebook Pillow>=9.1.0 \ 'opencv-python<4.6.0.66' \ --extra-index-url https://download.pytorch.org/whl/cu113 From 51fb467b63191b5f0ff8391608bb96b5deb8c3ea Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 17 Jul 2022 11:43:52 +0200 Subject: [PATCH 1212/1976] Refactor optimizer initialization (#8607) * Refactor optimizer initialization * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update train.py * Update train.py Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- train.py | 29 ++++------------------------- utils/torch_utils.py | 32 +++++++++++++++++++++++++++++++- 2 files changed, 35 insertions(+), 26 deletions(-) diff --git a/train.py b/train.py index ff13f1e256ec..6b463bf56423 100644 --- a/train.py +++ b/train.py @@ -28,7 +28,7 @@ import torch.nn as nn import yaml from torch.nn.parallel import DistributedDataParallel as DDP -from torch.optim import SGD, Adam, AdamW, lr_scheduler +from torch.optim import lr_scheduler from tqdm import tqdm FILE = Path(__file__).resolve() @@ -54,7 +54,8 @@ from utils.loss import ComputeLoss from utils.metrics import fitness from utils.plots import plot_evolve, plot_labels -from utils.torch_utils import EarlyStopping, ModelEMA, de_parallel, select_device, torch_distributed_zero_first +from utils.torch_utils import (EarlyStopping, ModelEMA, de_parallel, select_device, smart_optimizer, + torch_distributed_zero_first) LOCAL_RANK = int(os.getenv('LOCAL_RANK', -1)) # https://pytorch.org/docs/stable/elastic/run.html RANK = int(os.getenv('RANK', -1)) @@ -149,29 +150,7 @@ def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictio accumulate = max(round(nbs / batch_size), 1) # accumulate loss before optimizing hyp['weight_decay'] *= batch_size * accumulate / nbs # scale weight_decay LOGGER.info(f"Scaled weight_decay = {hyp['weight_decay']}") - - g = [], [], [] # optimizer parameter groups - bn = tuple(v for k, v in nn.__dict__.items() if 'Norm' in k) # normalization layers, i.e. BatchNorm2d() - for v in model.modules(): - if hasattr(v, 'bias') and isinstance(v.bias, nn.Parameter): # bias - g[2].append(v.bias) - if isinstance(v, bn): # weight (no decay) - g[1].append(v.weight) - elif hasattr(v, 'weight') and isinstance(v.weight, nn.Parameter): # weight (with decay) - g[0].append(v.weight) - - if opt.optimizer == 'Adam': - optimizer = Adam(g[2], lr=hyp['lr0'], betas=(hyp['momentum'], 0.999)) # adjust beta1 to momentum - elif opt.optimizer == 'AdamW': - optimizer = AdamW(g[2], lr=hyp['lr0'], betas=(hyp['momentum'], 0.999), weight_decay=0.0) - else: - optimizer = SGD(g[2], lr=hyp['lr0'], momentum=hyp['momentum'], nesterov=True) - - optimizer.add_param_group({'params': g[0], 'weight_decay': hyp['weight_decay']}) # add g0 with weight_decay - optimizer.add_param_group({'params': g[1], 'weight_decay': 0.0}) # add g1 (BatchNorm2d weights) - LOGGER.info(f"{colorstr('optimizer:')} {type(optimizer).__name__} with parameter groups " - f"{len(g[1])} weight (no decay), {len(g[0])} weight, {len(g[2])} bias") - del g + optimizer = smart_optimizer(model, opt.optimizer, hyp['lr0'], hyp['momentum'], hyp['weight_decay']) # Scheduler if opt.cos_lr: diff --git a/utils/torch_utils.py b/utils/torch_utils.py index c21dc6658c1e..d82368dc6271 100644 --- a/utils/torch_utils.py +++ b/utils/torch_utils.py @@ -18,7 +18,7 @@ import torch.nn as nn import torch.nn.functional as F -from utils.general import LOGGER, file_date, git_describe +from utils.general import LOGGER, colorstr, file_date, git_describe try: import thop # for FLOPs computation @@ -260,6 +260,36 @@ def copy_attr(a, b, include=(), exclude=()): setattr(a, k, v) +def smart_optimizer(model, name='Adam', lr=0.001, momentum=0.9, weight_decay=1e-5): + # YOLOv5 3-param group optimizer: 0) weights with decay, 1) weights no decay, 2) biases no decay + g = [], [], [] # optimizer parameter groups + bn = tuple(v for k, v in nn.__dict__.items() if 'Norm' in k) # normalization layers, i.e. BatchNorm2d() + for v in model.modules(): + if hasattr(v, 'bias') and isinstance(v.bias, nn.Parameter): # bias (no decay) + g[2].append(v.bias) + if isinstance(v, bn): # weight (no decay) + g[1].append(v.weight) + elif hasattr(v, 'weight') and isinstance(v.weight, nn.Parameter): # weight (with decay) + g[0].append(v.weight) + + if name == 'Adam': + optimizer = torch.optim.Adam(g[2], lr=lr, betas=(momentum, 0.999)) # adjust beta1 to momentum + elif name == 'AdamW': + optimizer = torch.optim.AdamW(g[2], lr=lr, betas=(momentum, 0.999), weight_decay=0.0) + elif name == 'RMSProp': + optimizer = torch.optim.RMSprop(g[2], lr=lr, momentum=momentum) + elif name == 'SGD': + optimizer = torch.optim.SGD(g[2], lr=lr, momentum=momentum, nesterov=True) + else: + raise NotImplementedError(f'Optimizer {name} not implemented.') + + optimizer.add_param_group({'params': g[0], 'weight_decay': weight_decay}) # add g0 with weight_decay + optimizer.add_param_group({'params': g[1], 'weight_decay': 0.0}) # add g1 (BatchNorm2d weights) + LOGGER.info(f"{colorstr('optimizer:')} {type(optimizer).__name__} with parameter groups " + f"{len(g[1])} weight (no decay), {len(g[0])} weight, {len(g[2])} bias") + return optimizer + + class EarlyStopping: # YOLOv5 simple early stopper def __init__(self, patience=30): From 9cf5fd5ac33c096ae06f60667dd6582ddb84aa4c Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 18 Jul 2022 15:05:58 +0200 Subject: [PATCH 1213/1976] assert torch!=1.12.0 for DDP training (#8621) * assert torch!=1.12.0 for DDP training * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- requirements.txt | 4 ++-- train.py | 14 +++++--------- utils/torch_utils.py | 18 +++++++++++++++++- 3 files changed, 24 insertions(+), 12 deletions(-) diff --git a/requirements.txt b/requirements.txt index 4550fc771b04..a3284d6529eb 100644 --- a/requirements.txt +++ b/requirements.txt @@ -9,8 +9,8 @@ Pillow>=7.1.2 PyYAML>=5.3.1 requests>=2.23.0 scipy>=1.4.1 -torch>=1.7.0,!=1.12.0 # https://github.com/ultralytics/yolov5/issues/8395 -torchvision>=0.8.1,!=0.13.0 # https://github.com/ultralytics/yolov5/issues/8395 +torch>=1.7.0 +torchvision>=0.8.1 tqdm>=4.64.0 protobuf<4.21.3 # https://github.com/ultralytics/yolov5/issues/8012 diff --git a/train.py b/train.py index 6b463bf56423..c298692b7335 100644 --- a/train.py +++ b/train.py @@ -27,7 +27,6 @@ import torch.distributed as dist import torch.nn as nn import yaml -from torch.nn.parallel import DistributedDataParallel as DDP from torch.optim import lr_scheduler from tqdm import tqdm @@ -46,15 +45,15 @@ from utils.dataloaders import create_dataloader from utils.downloads import attempt_download from utils.general import (LOGGER, check_amp, check_dataset, check_file, check_git_status, check_img_size, - check_requirements, check_suffix, check_version, check_yaml, colorstr, get_latest_run, - increment_path, init_seeds, intersect_dicts, labels_to_class_weights, - labels_to_image_weights, methods, one_cycle, print_args, print_mutation, strip_optimizer) + check_requirements, check_suffix, check_yaml, colorstr, get_latest_run, increment_path, + init_seeds, intersect_dicts, labels_to_class_weights, labels_to_image_weights, methods, + one_cycle, print_args, print_mutation, strip_optimizer) from utils.loggers import Loggers from utils.loggers.wandb.wandb_utils import check_wandb_resume from utils.loss import ComputeLoss from utils.metrics import fitness from utils.plots import plot_evolve, plot_labels -from utils.torch_utils import (EarlyStopping, ModelEMA, de_parallel, select_device, smart_optimizer, +from utils.torch_utils import (EarlyStopping, ModelEMA, de_parallel, select_device, smart_DDP, smart_optimizer, torch_distributed_zero_first) LOCAL_RANK = int(os.getenv('LOCAL_RANK', -1)) # https://pytorch.org/docs/stable/elastic/run.html @@ -248,10 +247,7 @@ def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictio # DDP mode if cuda and RANK != -1: - if check_version(torch.__version__, '1.11.0'): - model = DDP(model, device_ids=[LOCAL_RANK], output_device=LOCAL_RANK, static_graph=True) - else: - model = DDP(model, device_ids=[LOCAL_RANK], output_device=LOCAL_RANK) + model = smart_DDP(model) # Model attributes nl = de_parallel(model).model[-1].nl # number of detection layers (to scale hyps) diff --git a/utils/torch_utils.py b/utils/torch_utils.py index d82368dc6271..5f2a22c36f1a 100644 --- a/utils/torch_utils.py +++ b/utils/torch_utils.py @@ -17,8 +17,13 @@ import torch.distributed as dist import torch.nn as nn import torch.nn.functional as F +from torch.nn.parallel import DistributedDataParallel as DDP -from utils.general import LOGGER, colorstr, file_date, git_describe +from utils.general import LOGGER, check_version, colorstr, file_date, git_describe + +LOCAL_RANK = int(os.getenv('LOCAL_RANK', -1)) # https://pytorch.org/docs/stable/elastic/run.html +RANK = int(os.getenv('RANK', -1)) +WORLD_SIZE = int(os.getenv('WORLD_SIZE', 1)) try: import thop # for FLOPs computation @@ -29,6 +34,17 @@ warnings.filterwarnings('ignore', message='User provided device_type of \'cuda\', but CUDA is not available. Disabling') +def smart_DDP(model): + # Model DDP creation with checks + assert not check_version(torch.__version__, '1.12.0', pinned=True), \ + 'torch==1.12.0 torchvision==0.13.0 DDP training is not supported due to a known issue. ' \ + 'Please upgrade or downgrade torch to use DDP. See https://github.com/ultralytics/yolov5/issues/8395' + if check_version(torch.__version__, '1.11.0'): + return DDP(model, device_ids=[LOCAL_RANK], output_device=LOCAL_RANK, static_graph=True) + else: + return DDP(model, device_ids=[LOCAL_RANK], output_device=LOCAL_RANK) + + @contextmanager def torch_distributed_zero_first(local_rank: int): # Decorator to make all processes in distributed training wait for each local_master to do something From fbe67e465375231474a2ad80a4389efc77ecff99 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 18 Jul 2022 17:53:30 +0200 Subject: [PATCH 1214/1976] Fix `OMP_NUM_THREADS=1` for macOS (#8624) Resolves https://github.com/ultralytics/yolov5/issues/8623 --- utils/general.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/general.py b/utils/general.py index a85a2915a31a..cb5ca500b9f3 100755 --- a/utils/general.py +++ b/utils/general.py @@ -52,7 +52,7 @@ pd.options.display.max_columns = 10 cv2.setNumThreads(0) # prevent OpenCV from multithreading (incompatible with PyTorch DataLoader) os.environ['NUMEXPR_MAX_THREADS'] = str(NUM_THREADS) # NumExpr max threads -os.environ['OMP_NUM_THREADS'] = str(NUM_THREADS) # OpenMP max threads (PyTorch and SciPy) +os.environ['OMP_NUM_THREADS'] = '1' if platform.system() == 'darwin' else str(NUM_THREADS) # OpenMP (PyTorch and SciPy) def is_kaggle(): From 92e47b85d952274480c8c5efa5900e686241a96b Mon Sep 17 00:00:00 2001 From: daquexian Date: Wed, 20 Jul 2022 01:01:24 +0800 Subject: [PATCH 1215/1976] Upgrade onnxsim to v0.4.1 (#8632) * upgrade onnxsim to v0.4.1 Signed-off-by: daquexian * Update export.py * Update export.py * Update export.py * Update export.py * Update export.py Co-authored-by: Glenn Jocher --- export.py | 9 ++++----- requirements.txt | 2 +- 2 files changed, 5 insertions(+), 6 deletions(-) diff --git a/export.py b/export.py index 9868fcae95c3..3629915f028d 100644 --- a/export.py +++ b/export.py @@ -152,13 +152,12 @@ def export_onnx(model, im, file, opset, train, dynamic, simplify, prefix=colorst # Simplify if simplify: try: - check_requirements(('onnx-simplifier',)) + cuda = torch.cuda.is_available() + check_requirements(('onnxruntime-gpu' if cuda else 'onnxruntime', 'onnx-simplifier>=0.4.1')) import onnxsim LOGGER.info(f'{prefix} simplifying with onnx-simplifier {onnxsim.__version__}...') - model_onnx, check = onnxsim.simplify(model_onnx, - dynamic_input_shape=dynamic, - input_shapes={'images': list(im.shape)} if dynamic else None) + model_onnx, check = onnxsim.simplify(model_onnx) assert check, 'assert check failed' onnx.save(model_onnx, f) except Exception as e: @@ -493,7 +492,7 @@ def run( imgsz *= 2 if len(imgsz) == 1 else 1 # expand assert nc == len(names), f'Model class count {nc} != len(names) {len(names)}' if optimize: - assert device.type != 'cuda', '--optimize not compatible with cuda devices, i.e. use --device cpu' + assert device.type == 'cpu', '--optimize not compatible with cuda devices, i.e. use --device cpu' # Input gs = int(max(model.stride)) # grid size (max stride) diff --git a/requirements.txt b/requirements.txt index a3284d6529eb..8548f67b5a48 100644 --- a/requirements.txt +++ b/requirements.txt @@ -25,7 +25,7 @@ seaborn>=0.11.0 # Export -------------------------------------- # coremltools>=4.1 # CoreML export # onnx>=1.9.0 # ONNX export -# onnx-simplifier>=0.3.6 # ONNX simplifier +# onnx-simplifier>=0.4.1 # ONNX simplifier # nvidia-pyindex # TensorRT export # nvidia-tensorrt # TensorRT export # scikit-learn==0.19.2 # CoreML quantization From 602d7ffb0e8667c63bd0007ecf3cfd29a46f9cc4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=BCdiger=20Busche?= Date: Thu, 21 Jul 2022 17:40:53 +0200 Subject: [PATCH 1216/1976] Check TensorBoard logger before adding graph (#8664) Otherwise, an error is thrown if the tensorboard logger is not included. --- utils/loggers/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/loggers/__init__.py b/utils/loggers/__init__.py index 42b696ba644f..88bdb0521619 100644 --- a/utils/loggers/__init__.py +++ b/utils/loggers/__init__.py @@ -102,7 +102,7 @@ def on_train_batch_end(self, ni, model, imgs, targets, paths, plots): # Callback runs on train batch end if plots: if ni == 0: - if not self.opt.sync_bn: # --sync known issue https://github.com/ultralytics/yolov5/issues/3754 + if self.tb and not self.opt.sync_bn: # --sync known issue https://github.com/ultralytics/yolov5/issues/3754 with warnings.catch_warnings(): warnings.simplefilter('ignore') # suppress jit trace warning self.tb.add_graph(torch.jit.trace(de_parallel(model), imgs[0:1], strict=False), []) From 4c1784bd158d3215aa7170b33578e1032442a160 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 21 Jul 2022 23:12:49 +0200 Subject: [PATCH 1217/1976] Use contextlib's suppress method to silence an error (#8668) --- models/yolo.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/models/yolo.py b/models/yolo.py index 02660e6c4130..56846815e08a 100644 --- a/models/yolo.py +++ b/models/yolo.py @@ -7,6 +7,7 @@ """ import argparse +import contextlib import os import platform import sys @@ -259,10 +260,8 @@ def parse_model(d, ch): # model_dict, input_channels(3) for i, (f, n, m, args) in enumerate(d['backbone'] + d['head']): # from, number, module, args m = eval(m) if isinstance(m, str) else m # eval strings for j, a in enumerate(args): - try: + with contextlib.suppress(NameError): args[j] = eval(a) if isinstance(a, str) else a # eval strings - except NameError: - pass n = n_ = max(round(n * gd), 1) if n > 1 else n # depth gain if m in (Conv, GhostConv, Bottleneck, GhostBottleneck, SPP, SPPF, DWConv, MixConv2d, Focus, CrossConv, From 38721de7ef6923f52c1ce1eb00a765a447c27d3c Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 22 Jul 2022 11:54:31 +0200 Subject: [PATCH 1218/1976] Update hubconf.py to reset LOGGER.level after load (#8674) Resolves silent outputs after model load --- hubconf.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/hubconf.py b/hubconf.py index 6bb9484a856d..8748279e027a 100644 --- a/hubconf.py +++ b/hubconf.py @@ -34,6 +34,7 @@ def _create(name, pretrained=True, channels=3, classes=80, autoshape=True, verbo from utils.general import LOGGER, check_requirements, intersect_dicts, logging from utils.torch_utils import select_device + level = LOGGER.level if not verbose: LOGGER.setLevel(logging.WARNING) check_requirements(exclude=('tensorboard', 'thop', 'opencv-python')) @@ -57,6 +58,7 @@ def _create(name, pretrained=True, channels=3, classes=80, autoshape=True, verbo model.names = ckpt['model'].names # set class names attribute if autoshape: model = AutoShape(model) # for file/URI/PIL/cv2/np inputs and NMS + LOGGER.setLevel(level) return model.to(device) except Exception as e: From b17629e54f5a392c8e32219ba03b06b7eb11a48a Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 22 Jul 2022 15:23:22 +0200 Subject: [PATCH 1219/1976] Update warning emojis (#8678) --- utils/general.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/utils/general.py b/utils/general.py index cb5ca500b9f3..925f7fbf0ecb 100755 --- a/utils/general.py +++ b/utils/general.py @@ -474,7 +474,7 @@ def check_dataset(data, autodownload=True): for k in 'train', 'val', 'nc': assert k in data, emojis(f"data.yaml '{k}:' field missing ❌") if 'names' not in data: - LOGGER.warning(emojis("data.yaml 'names:' field missing ⚠, assigning default names 'class0', 'class1', etc.")) + LOGGER.warning(emojis("data.yaml 'names:' field missing ⚠️, assigning default names 'class0', 'class1', etc.")) data['names'] = [f'class{i}' for i in range(data['nc'])] # default names # Resolve paths @@ -490,7 +490,7 @@ def check_dataset(data, autodownload=True): if val: val = [Path(x).resolve() for x in (val if isinstance(val, list) else [val])] # val path if not all(x.exists() for x in val): - LOGGER.info(emojis('\nDataset not found ⚠, missing paths %s' % [str(x) for x in val if not x.exists()])) + LOGGER.info(emojis('\nDataset not found ⚠️, missing paths %s' % [str(x) for x in val if not x.exists()])) if not s or not autodownload: raise Exception(emojis('Dataset not found ❌')) t = time.time() From b92430a83bfe11dd3be74e486c37b836be46bc98 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 22 Jul 2022 19:01:16 +0200 Subject: [PATCH 1220/1976] Update hubconf.py to reset logging level to INFO (#8680) --- hubconf.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/hubconf.py b/hubconf.py index 8748279e027a..25f9d1b82c14 100644 --- a/hubconf.py +++ b/hubconf.py @@ -34,7 +34,6 @@ def _create(name, pretrained=True, channels=3, classes=80, autoshape=True, verbo from utils.general import LOGGER, check_requirements, intersect_dicts, logging from utils.torch_utils import select_device - level = LOGGER.level if not verbose: LOGGER.setLevel(logging.WARNING) check_requirements(exclude=('tensorboard', 'thop', 'opencv-python')) @@ -58,7 +57,8 @@ def _create(name, pretrained=True, channels=3, classes=80, autoshape=True, verbo model.names = ckpt['model'].names # set class names attribute if autoshape: model = AutoShape(model) # for file/URI/PIL/cv2/np inputs and NMS - LOGGER.setLevel(level) + if not verbose: + LOGGER.setLevel(logging.INFO) # reset to default return model.to(device) except Exception as e: From 1c5e92aba11f0dd007716821e7cd151d532342a8 Mon Sep 17 00:00:00 2001 From: UnglvKitDe <100289696+UnglvKitDe@users.noreply.github.com> Date: Sat, 23 Jul 2022 01:25:17 +0200 Subject: [PATCH 1221/1976] Add generator and worker seed (#8602) * Add generator and worker seed * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update dataloaders.py * Update dataloaders.py * Update dataloaders.py Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Glenn Jocher --- utils/dataloaders.py | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/utils/dataloaders.py b/utils/dataloaders.py index 4f1c98fd880d..85a39ab52f82 100755 --- a/utils/dataloaders.py +++ b/utils/dataloaders.py @@ -91,6 +91,13 @@ def exif_transpose(image): return image +def seed_worker(worker_id): + # Set dataloader worker seed https://pytorch.org/docs/stable/notes/randomness.html#dataloader + worker_seed = torch.initial_seed() % 2 ** 32 + np.random.seed(worker_seed) + random.seed(worker_seed) + + def create_dataloader(path, imgsz, batch_size, @@ -130,13 +137,17 @@ def create_dataloader(path, nw = min([os.cpu_count() // max(nd, 1), batch_size if batch_size > 1 else 0, workers]) # number of workers sampler = None if rank == -1 else distributed.DistributedSampler(dataset, shuffle=shuffle) loader = DataLoader if image_weights else InfiniteDataLoader # only DataLoader allows for attribute updates + generator = torch.Generator() + generator.manual_seed(0) return loader(dataset, batch_size=batch_size, shuffle=shuffle and sampler is None, num_workers=nw, sampler=sampler, pin_memory=True, - collate_fn=LoadImagesAndLabels.collate_fn4 if quad else LoadImagesAndLabels.collate_fn), dataset + collate_fn=LoadImagesAndLabels.collate_fn4 if quad else LoadImagesAndLabels.collate_fn, + worker_init_fn=seed_worker, + generator=generator), dataset class InfiniteDataLoader(dataloader.DataLoader): From 7f7bd6fbcd214886aa2a275500eb5e05933bea05 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 23 Jul 2022 18:24:14 +0200 Subject: [PATCH 1222/1976] Set `torch.cuda.manual_seed_all()` for DDP (#8688) * Set `torch.cuda.manual_seed_all()` for DDP * Update general.py * Update general.py --- utils/general.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/utils/general.py b/utils/general.py index 925f7fbf0ecb..b049ce469a71 100755 --- a/utils/general.py +++ b/utils/general.py @@ -203,14 +203,14 @@ def init_seeds(seed=0, deterministic=False): if deterministic and check_version(torch.__version__, '1.12.0'): # https://github.com/ultralytics/yolov5/pull/8213 torch.use_deterministic_algorithms(True) os.environ['CUBLAS_WORKSPACE_CONFIG'] = ':4096:8' - # os.environ['PYTHONHASHSEED'] = str(seed) + os.environ['PYTHONHASHSEED'] = str(seed) random.seed(seed) np.random.seed(seed) torch.manual_seed(seed) cudnn.benchmark, cudnn.deterministic = (False, True) if seed == 0 else (True, False) - # torch.cuda.manual_seed(seed) - # torch.cuda.manual_seed_all(seed) # for multi GPU, exception safe + torch.cuda.manual_seed(seed) + torch.cuda.manual_seed_all(seed) # for Multi-GPU, exception safe def intersect_dicts(da, db, exclude=()): From b510957650c890dee876146c43dcda1fdfc279d6 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 23 Jul 2022 18:50:19 +0200 Subject: [PATCH 1223/1976] Move .dockerignore to root (#8690) --- utils/docker/.dockerignore => .dockerignore | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename utils/docker/.dockerignore => .dockerignore (100%) diff --git a/utils/docker/.dockerignore b/.dockerignore similarity index 100% rename from utils/docker/.dockerignore rename to .dockerignore From 916bdb1d61f23de92833bd491df54cda5c3ef0cc Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 23 Jul 2022 23:30:30 +0200 Subject: [PATCH 1224/1976] Faster crop saving (#8696) Faster crops Following https://github.com/ultralytics/yolov5/issues/8641#issuecomment-1193190325 --- utils/plots.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/utils/plots.py b/utils/plots.py index 1bbb9c09c33a..53e326c23f6e 100644 --- a/utils/plots.py +++ b/utils/plots.py @@ -484,6 +484,6 @@ def save_one_box(xyxy, im, file=Path('im.jpg'), gain=1.02, pad=10, square=False, if save: file.parent.mkdir(parents=True, exist_ok=True) # make directory f = str(increment_path(file).with_suffix('.jpg')) - # cv2.imwrite(f, crop) # https://github.com/ultralytics/yolov5/issues/7007 chroma subsampling issue - Image.fromarray(cv2.cvtColor(crop, cv2.COLOR_BGR2RGB)).save(f, quality=95, subsampling=0) + # cv2.imwrite(f, crop) # save BGR, https://github.com/ultralytics/yolov5/issues/7007 chroma subsampling issue + Image.fromarray(crop[..., ::-1]).save(f, quality=95, subsampling=0) # save RGB return crop From 0ab303b04499b6b912d8212a4fa10fe3fcb78efa Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 25 Jul 2022 00:02:09 +0200 Subject: [PATCH 1225/1976] Remove `else:` from load_image() (#8692) --- utils/dataloaders.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/utils/dataloaders.py b/utils/dataloaders.py index 85a39ab52f82..36610c88980a 100755 --- a/utils/dataloaders.py +++ b/utils/dataloaders.py @@ -682,8 +682,7 @@ def load_image(self, i): interp = cv2.INTER_LINEAR if (self.augment or r > 1) else cv2.INTER_AREA im = cv2.resize(im, (int(w0 * r), int(h0 * r)), interpolation=interp) return im, (h0, w0), im.shape[:2] # im, hw_original, hw_resized - else: - return self.ims[i], self.im_hw0[i], self.im_hw[i] # im, hw_original, hw_resized + return self.ims[i], self.im_hw0[i], self.im_hw[i] # im, hw_original, hw_resized def cache_images_to_disk(self, i): # Saves an image as an *.npy file for faster loading From 7215a0fb41a90d8a0bf259fa708dff608a1f0262 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 25 Jul 2022 13:57:05 +0200 Subject: [PATCH 1226/1976] Avoid cv2 window init code on Windows (#8712) Resolves https://github.com/ultralytics/yolov5/issues/8642 --- detect.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/detect.py b/detect.py index bb09ce171a96..01ad797ae6f1 100644 --- a/detect.py +++ b/detect.py @@ -26,6 +26,7 @@ import argparse import os +import platform import sys from pathlib import Path @@ -173,7 +174,7 @@ def run( # Stream results im0 = annotator.result() if view_img: - if p not in windows: + if platform.system() == 'Linux' and p not in windows: windows.append(p) cv2.namedWindow(str(p), cv2.WINDOW_NORMAL | cv2.WINDOW_KEEPRATIO) # allow window resize (Linux) cv2.resizeWindow(str(p), im0.shape[1], im0.shape[0]) From a6f197ae79d546efd58e4a4f206621196ab5cacd Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 25 Jul 2022 16:52:28 +0200 Subject: [PATCH 1227/1976] Update dataloaders.py (#8714) * Update dataloaders.py * Update dataloaders.py --- utils/dataloaders.py | 36 ++++++++++++++---------------------- 1 file changed, 14 insertions(+), 22 deletions(-) diff --git a/utils/dataloaders.py b/utils/dataloaders.py index 36610c88980a..c32f60fe4ec7 100755 --- a/utils/dataloaders.py +++ b/utils/dataloaders.py @@ -3,6 +3,7 @@ Dataloaders and dataset utils """ +import contextlib import glob import hashlib import json @@ -55,13 +56,10 @@ def get_hash(paths): def exif_size(img): # Returns exif-corrected PIL size s = img.size # (width, height) - try: + with contextlib.suppress(Exception): rotation = dict(img._getexif().items())[orientation] if rotation in [6, 8]: # rotation 270 or 90 s = (s[1], s[0]) - except Exception: - pass - return s @@ -859,18 +857,13 @@ def collate_fn4(batch): # Ancillary functions -------------------------------------------------------------------------------------------------- -def create_folder(path='./new'): - # Create folder - if os.path.exists(path): - shutil.rmtree(path) # delete output folder - os.makedirs(path) # make new output folder - - def flatten_recursive(path=DATASETS_DIR / 'coco128'): # Flatten a recursive directory by bringing all files to top level - new_path = Path(str(path) + '_flat') - create_folder(new_path) - for file in tqdm(glob.glob(str(Path(path)) + '/**/*.*', recursive=True)): + new_path = Path(f'{str(path)}_flat') + if os.path.exists(new_path): + shutil.rmtree(new_path) # delete output folder + os.makedirs(new_path) # make new output folder + for file in tqdm(glob.glob(f'{str(Path(path))}/**/*.*', recursive=True)): shutil.copyfile(file, new_path / Path(file).name) @@ -929,7 +922,7 @@ def autosplit(path=DATASETS_DIR / 'coco128/images', weights=(0.9, 0.1, 0.0), ann for i, img in tqdm(zip(indices, files), total=n): if not annotated_only or Path(img2label_paths([str(img)])[0]).exists(): # check label with open(path.parent / txt[i], 'a') as f: - f.write('./' + img.relative_to(path.parent).as_posix() + '\n') # add image to txt file + f.write(f'./{img.relative_to(path.parent).as_posix()}' + '\n') # add image to txt file def verify_image_label(args): @@ -1011,14 +1004,13 @@ def _find_yaml(dir): def _unzip(path): # Unzip data.zip - if str(path).endswith('.zip'): # path is data.zip - assert Path(path).is_file(), f'Error unzipping {path}, file not found' - ZipFile(path).extractall(path=path.parent) # unzip - dir = path.with_suffix('') # dataset directory == zip name - assert dir.is_dir(), f'Error unzipping {path}, {dir} not found. path/to/abc.zip MUST unzip to path/to/abc/' - return True, str(dir), _find_yaml(dir) # zipped, data_dir, yaml_path - else: # path is data.yaml + if not str(path).endswith('.zip'): # path is data.yaml return False, None, path + assert Path(path).is_file(), f'Error unzipping {path}, file not found' + ZipFile(path).extractall(path=path.parent) # unzip + dir = path.with_suffix('') # dataset directory == zip name + assert dir.is_dir(), f'Error unzipping {path}, {dir} not found. path/to/abc.zip MUST unzip to path/to/abc/' + return True, str(dir), _find_yaml(dir) # zipped, data_dir, yaml_path def _hub_ops(f, max_dim=1920): # HUB ops for 1 image 'f': resize and save at reduced quality in /dataset-hub for web/app viewing From b367860196a2590a5f44c9b18401dedfc0543077 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 25 Jul 2022 18:20:01 +0200 Subject: [PATCH 1228/1976] New `HUBDatasetStats()` class (#8716) * New `HUBDatasetStats()` class Usage examples: ``` from utils.dataloaders import * stats = HUBDatasetStats('coco128.yaml', autodownload=True) # method 1 stats = HUBDatasetStats('path/to/coco128_with_yaml.zip') # method 1 stats.get_json(save=False) stats.process_images() ``` @kalenmike * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update dataloaders.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update dataloaders.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update dataloaders.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update dataloaders.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- utils/dataloaders.py | 146 +++++++++++++++++++++---------------------- 1 file changed, 70 insertions(+), 76 deletions(-) diff --git a/utils/dataloaders.py b/utils/dataloaders.py index c32f60fe4ec7..9ccfe2545d75 100755 --- a/utils/dataloaders.py +++ b/utils/dataloaders.py @@ -977,21 +977,35 @@ def verify_image_label(args): return [None, None, None, None, nm, nf, ne, nc, msg] -def dataset_stats(path='coco128.yaml', autodownload=False, verbose=False, profile=False, hub=False): +class HUBDatasetStats(): """ Return dataset statistics dictionary with images and instances counts per split per class To run in parent directory: export PYTHONPATH="$PWD/yolov5" - Usage1: from utils.dataloaders import *; dataset_stats('coco128.yaml', autodownload=True) - Usage2: from utils.dataloaders import *; dataset_stats('path/to/coco128_with_yaml.zip') + Usage1: from utils.dataloaders import *; HUBDatasetStats('coco128.yaml', autodownload=True) + Usage2: from utils.dataloaders import *; HUBDatasetStats('path/to/coco128_with_yaml.zip') Arguments path: Path to data.yaml or data.zip (with data.yaml inside data.zip) autodownload: Attempt to download dataset if not found locally - verbose: Print stats dictionary """ - def _round_labels(labels): - # Update labels to integer class and 6 decimal place floats - return [[int(c), *(round(x, 4) for x in points)] for c, *points in labels] + def __init__(self, path='coco128.yaml', autodownload=False): + # Initialize class + zipped, data_dir, yaml_path = self._unzip(Path(path)) + try: + with open(check_yaml(yaml_path), errors='ignore') as f: + data = yaml.safe_load(f) # data dict + if zipped: + data['path'] = data_dir + except Exception as e: + raise Exception("error/HUB/dataset_stats/yaml_load") from e + + check_dataset(data, autodownload) # download dataset if missing + self.hub_dir = Path(data['path'] + '-hub') + self.im_dir = self.hub_dir / 'images' + self.im_dir.mkdir(parents=True, exist_ok=True) # makes /images + self.stats = {'nc': data['nc'], 'names': data['names']} # statistics dictionary + self.data = data + @staticmethod def _find_yaml(dir): # Return data.yaml file files = list(dir.glob('*.yaml')) or list(dir.rglob('*.yaml')) # try root level first and then recursive @@ -1002,7 +1016,7 @@ def _find_yaml(dir): assert len(files) == 1, f'Multiple *.yaml files found: {files}, only 1 *.yaml file allowed in {dir}' return files[0] - def _unzip(path): + def _unzip(self, path): # Unzip data.zip if not str(path).endswith('.zip'): # path is data.yaml return False, None, path @@ -1010,11 +1024,11 @@ def _unzip(path): ZipFile(path).extractall(path=path.parent) # unzip dir = path.with_suffix('') # dataset directory == zip name assert dir.is_dir(), f'Error unzipping {path}, {dir} not found. path/to/abc.zip MUST unzip to path/to/abc/' - return True, str(dir), _find_yaml(dir) # zipped, data_dir, yaml_path + return True, str(dir), self._find_yaml(dir) # zipped, data_dir, yaml_path - def _hub_ops(f, max_dim=1920): + def _hub_ops(self, f, max_dim=1920): # HUB ops for 1 image 'f': resize and save at reduced quality in /dataset-hub for web/app viewing - f_new = im_dir / Path(f).name # dataset-hub image filename + f_new = self.im_dir / Path(f).name # dataset-hub image filename try: # use PIL im = Image.open(f) r = max_dim / max(im.height, im.width) # ratio @@ -1030,69 +1044,49 @@ def _hub_ops(f, max_dim=1920): im = cv2.resize(im, (int(im_width * r), int(im_height * r)), interpolation=cv2.INTER_AREA) cv2.imwrite(str(f_new), im) - zipped, data_dir, yaml_path = _unzip(Path(path)) - try: - with open(check_yaml(yaml_path), errors='ignore') as f: - data = yaml.safe_load(f) # data dict - if zipped: - data['path'] = data_dir # TODO: should this be dir.resolve()?` - except Exception: - raise Exception("error/HUB/dataset_stats/yaml_load") - - check_dataset(data, autodownload) # download dataset if missing - hub_dir = Path(data['path'] + ('-hub' if hub else '')) - stats = {'nc': data['nc'], 'names': data['names']} # statistics dictionary - for split in 'train', 'val', 'test': - if data.get(split) is None: - stats[split] = None # i.e. no test set - continue - x = [] - dataset = LoadImagesAndLabels(data[split]) # load dataset - for label in tqdm(dataset.labels, total=dataset.n, desc='Statistics'): - x.append(np.bincount(label[:, 0].astype(int), minlength=data['nc'])) - x = np.array(x) # shape(128x80) - stats[split] = { - 'instance_stats': { - 'total': int(x.sum()), - 'per_class': x.sum(0).tolist()}, - 'image_stats': { - 'total': dataset.n, - 'unlabelled': int(np.all(x == 0, 1).sum()), - 'per_class': (x > 0).sum(0).tolist()}, - 'labels': [{ - str(Path(k).name): _round_labels(v.tolist())} for k, v in zip(dataset.im_files, dataset.labels)]} - - if hub: - im_dir = hub_dir / 'images' - im_dir.mkdir(parents=True, exist_ok=True) - for _ in tqdm(ThreadPool(NUM_THREADS).imap(_hub_ops, dataset.im_files), total=dataset.n, desc='HUB Ops'): + def get_json(self, save=False, verbose=False): + # Return dataset JSON for Ultralytics HUB + def _round(labels): + # Update labels to integer class and 6 decimal place floats + return [[int(c), *(round(x, 4) for x in points)] for c, *points in labels] + + for split in 'train', 'val', 'test': + if self.data.get(split) is None: + self.stats[split] = None # i.e. no test set + continue + dataset = LoadImagesAndLabels(self.data[split]) # load dataset + x = np.array([ + np.bincount(label[:, 0].astype(int), minlength=self.data['nc']) + for label in tqdm(dataset.labels, total=dataset.n, desc='Statistics')]) # shape(128x80) + self.stats[split] = { + 'instance_stats': { + 'total': int(x.sum()), + 'per_class': x.sum(0).tolist()}, + 'image_stats': { + 'total': dataset.n, + 'unlabelled': int(np.all(x == 0, 1).sum()), + 'per_class': (x > 0).sum(0).tolist()}, + 'labels': [{ + str(Path(k).name): _round(v.tolist())} for k, v in zip(dataset.im_files, dataset.labels)]} + + # Save, print and return + if save: + stats_path = self.hub_dir / 'stats.json' + print(f'Saving {stats_path.resolve()}...') + with open(stats_path, 'w') as f: + json.dump(self.stats, f) # save stats.json + if verbose: + print(json.dumps(self.stats, indent=2, sort_keys=False)) + return self.stats + + def process_images(self): + # Compress images for Ultralytics HUB + for split in 'train', 'val', 'test': + if self.data.get(split) is None: + continue + dataset = LoadImagesAndLabels(self.data[split]) # load dataset + desc = f'{split} images' + for _ in tqdm(ThreadPool(NUM_THREADS).imap(self._hub_ops, dataset.im_files), total=dataset.n, desc=desc): pass - - # Profile - stats_path = hub_dir / 'stats.json' - if profile: - for _ in range(1): - file = stats_path.with_suffix('.npy') - t1 = time.time() - np.save(file, stats) - t2 = time.time() - x = np.load(file, allow_pickle=True) - print(f'stats.npy times: {time.time() - t2:.3f}s read, {t2 - t1:.3f}s write') - - file = stats_path.with_suffix('.json') - t1 = time.time() - with open(file, 'w') as f: - json.dump(stats, f) # save stats *.json - t2 = time.time() - with open(file) as f: - x = json.load(f) # load hyps dict - print(f'stats.json times: {time.time() - t2:.3f}s read, {t2 - t1:.3f}s write') - - # Save, print and return - if hub: - print(f'Saving {stats_path.resolve()}...') - with open(stats_path, 'w') as f: - json.dump(stats, f) # save stats.json - if verbose: - print(json.dumps(stats, indent=2, sort_keys=False)) - return stats + print(f'Done. All images saved to {self.im_dir}') + return self.im_dir From 2e1291fdce26b3cff213e9e7ee8c196fa263b688 Mon Sep 17 00:00:00 2001 From: UnglvKitDe <100289696+UnglvKitDe@users.noreply.github.com> Date: Tue, 26 Jul 2022 13:52:56 +0200 Subject: [PATCH 1229/1976] Fix BGR->RGB Bug in albumentations #8641 (#8695) * Fix BGR->RGB Bug in albumentations https://github.com/ultralytics/yolov5/issues/8641 * Change transform methode from cv2 to numpy * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Simplify * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update augmentations.py Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Glenn Jocher --- utils/augmentations.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/utils/augmentations.py b/utils/augmentations.py index 3f764c06ae3b..97506ae25123 100644 --- a/utils/augmentations.py +++ b/utils/augmentations.py @@ -39,8 +39,9 @@ def __init__(self): def __call__(self, im, labels, p=1.0): if self.transform and random.random() < p: - new = self.transform(image=im, bboxes=labels[:, 1:], class_labels=labels[:, 0]) # transformed - im, labels = new['image'], np.array([[c, *b] for c, b in zip(new['class_labels'], new['bboxes'])]) + new = self.transform(image=im[..., ::-1], bboxes=labels[:, 1:], class_labels=labels[:, 0]) # transformed + im = new['image'][..., ::-1] # RGB to BGR + labels = np.array([[c, *b] for c, b in zip(new['class_labels'], new['bboxes'])]) return im, labels From d5116bbe9c9411b7c0c969fce32b86abd74c6d4a Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 26 Jul 2022 17:50:49 +0200 Subject: [PATCH 1230/1976] coremltools>=5.2 for CoreML export (#8725) --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 8548f67b5a48..de3239cbdd42 100644 --- a/requirements.txt +++ b/requirements.txt @@ -23,7 +23,7 @@ pandas>=1.1.4 seaborn>=0.11.0 # Export -------------------------------------- -# coremltools>=4.1 # CoreML export +# coremltools>=5.2 # CoreML export # onnx>=1.9.0 # ONNX export # onnx-simplifier>=0.4.1 # ONNX simplifier # nvidia-pyindex # TensorRT export From c775a296a7db2e523a230b2a0900ecd12845ecde Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 26 Jul 2022 19:00:48 +0200 Subject: [PATCH 1231/1976] Revert "Fix BGR->RGB Bug in albumentations #8641" (#8727) Revert "Fix BGR->RGB Bug in albumentations #8641 (#8695)" This reverts commit 2e1291fdce26b3cff213e9e7ee8c196fa263b688. --- utils/augmentations.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/utils/augmentations.py b/utils/augmentations.py index 97506ae25123..3f764c06ae3b 100644 --- a/utils/augmentations.py +++ b/utils/augmentations.py @@ -39,9 +39,8 @@ def __init__(self): def __call__(self, im, labels, p=1.0): if self.transform and random.random() < p: - new = self.transform(image=im[..., ::-1], bboxes=labels[:, 1:], class_labels=labels[:, 0]) # transformed - im = new['image'][..., ::-1] # RGB to BGR - labels = np.array([[c, *b] for c, b in zip(new['class_labels'], new['bboxes'])]) + new = self.transform(image=im, bboxes=labels[:, 1:], class_labels=labels[:, 0]) # transformed + im, labels = new['image'], np.array([[c, *b] for c, b in zip(new['class_labels'], new['bboxes'])]) return im, labels From 0b5ac224aef287ac3ac9ebf70ade60159450a0b1 Mon Sep 17 00:00:00 2001 From: Max Strobel Date: Tue, 26 Jul 2022 18:02:44 +0100 Subject: [PATCH 1232/1976] fix: broken ``is_docker`` check (#8711) Checking if ``/workspace`` exists is not a reliable method to check if the process runs in a docker container. Reusing the logic from the npm "is-docker" package to check if the process runs in a container. References: https://github.com/sindresorhus/is-docker/blob/main/index.js Fixes #8710. Co-authored-by: Maximilian Strobel Co-authored-by: Glenn Jocher --- utils/general.py | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/utils/general.py b/utils/general.py index b049ce469a71..67078338d762 100755 --- a/utils/general.py +++ b/utils/general.py @@ -224,9 +224,15 @@ def get_latest_run(search_dir='.'): return max(last_list, key=os.path.getctime) if last_list else '' -def is_docker(): - # Is environment a Docker container? - return Path('/workspace').exists() # or Path('/.dockerenv').exists() +def is_docker() -> bool: + """Check if the process runs inside a docker container.""" + if Path("/.dockerenv").exists(): + return True + try: # check if docker is in control groups + with open("/proc/self/cgroup") as file: + return any("docker" in line for line in file) + except OSError: + return False def is_colab(): From 3e858633b283767f038b4cab910a95e40fe8577b Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 27 Jul 2022 17:27:44 +0200 Subject: [PATCH 1233/1976] Revert protobuf<=3.20.1 (#8742) Resolve #8012 (again) --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index de3239cbdd42..6313cecee578 100644 --- a/requirements.txt +++ b/requirements.txt @@ -12,7 +12,7 @@ scipy>=1.4.1 torch>=1.7.0 torchvision>=0.8.1 tqdm>=4.64.0 -protobuf<4.21.3 # https://github.com/ultralytics/yolov5/issues/8012 +protobuf<=3.20.1 # https://github.com/ultralytics/yolov5/issues/8012 # Logging ------------------------------------- tensorboard>=2.4.1 From 587a3a37c57661c3a0ef710d2b309199fad632d2 Mon Sep 17 00:00:00 2001 From: Colin Wong Date: Fri, 29 Jul 2022 06:51:16 -0500 Subject: [PATCH 1234/1976] Dynamic batch size support for TensorRT (#8526) * Dynamic batch size support for TensorRT * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update export.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fix optimization profile when batch size is 1 * Warn users if they use batch-size=1 with dynamic * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * More descriptive assertion error * Fix syntax * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * pre-commit formatting sucked * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update export.py Co-authored-by: Colin Wong Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Glenn Jocher --- export.py | 21 +++++++++++++++------ models/common.py | 22 ++++++++++++++++------ 2 files changed, 31 insertions(+), 12 deletions(-) diff --git a/export.py b/export.py index 3629915f028d..4846624541e4 100644 --- a/export.py +++ b/export.py @@ -216,8 +216,9 @@ def export_coreml(model, im, file, int8, half, prefix=colorstr('CoreML:')): return None, None -def export_engine(model, im, file, train, half, simplify, workspace=4, verbose=False, prefix=colorstr('TensorRT:')): +def export_engine(model, im, file, train, half, dynamic, simplify, workspace=4, verbose=False): # YOLOv5 TensorRT export https://developer.nvidia.com/tensorrt + prefix = colorstr('TensorRT:') try: assert im.device.type != 'cpu', 'export running on CPU but must be on GPU, i.e. `python export.py --device 0`' try: @@ -230,11 +231,11 @@ def export_engine(model, im, file, train, half, simplify, workspace=4, verbose=F if trt.__version__[0] == '7': # TensorRT 7 handling https://github.com/ultralytics/yolov5/issues/6012 grid = model.model[-1].anchor_grid model.model[-1].anchor_grid = [a[..., :1, :1, :] for a in grid] - export_onnx(model, im, file, 12, train, False, simplify) # opset 12 + export_onnx(model, im, file, 12, train, dynamic, simplify) # opset 12 model.model[-1].anchor_grid = grid else: # TensorRT >= 8 check_version(trt.__version__, '8.0.0', hard=True) # require tensorrt>=8.0.0 - export_onnx(model, im, file, 13, train, False, simplify) # opset 13 + export_onnx(model, im, file, 13, train, dynamic, simplify) # opset 13 onnx = file.with_suffix('.onnx') LOGGER.info(f'\n{prefix} starting export with TensorRT {trt.__version__}...') @@ -263,6 +264,14 @@ def export_engine(model, im, file, train, half, simplify, workspace=4, verbose=F for out in outputs: LOGGER.info(f'{prefix}\toutput "{out.name}" with shape {out.shape} and dtype {out.dtype}') + if dynamic: + if im.shape[0] <= 1: + LOGGER.warning(f"{prefix}WARNING: --dynamic model requires maximum --batch-size argument") + profile = builder.create_optimization_profile() + for inp in inputs: + profile.set_shape(inp.name, (1, *im.shape[1:]), (max(1, im.shape[0] // 2), *im.shape[1:]), im.shape) + config.add_optimization_profile(profile) + LOGGER.info(f'{prefix} building FP{16 if builder.platform_has_fast_fp16 and half else 32} engine in {f}') if builder.platform_has_fast_fp16 and half: config.set_flag(trt.BuilderFlag.FP16) @@ -460,7 +469,7 @@ def run( keras=False, # use Keras optimize=False, # TorchScript: optimize for mobile int8=False, # CoreML/TF INT8 quantization - dynamic=False, # ONNX/TF: dynamic axes + dynamic=False, # ONNX/TF/TensorRT: dynamic axes simplify=False, # ONNX: simplify model opset=12, # ONNX: opset version verbose=False, # TensorRT: verbose log @@ -520,7 +529,7 @@ def run( if jit: f[0] = export_torchscript(model, im, file, optimize) if engine: # TensorRT required before ONNX - f[1] = export_engine(model, im, file, train, half, simplify, workspace, verbose) + f[1] = export_engine(model, im, file, train, half, dynamic, simplify, workspace, verbose) if onnx or xml: # OpenVINO requires ONNX f[2] = export_onnx(model, im, file, opset, train, dynamic, simplify) if xml: # OpenVINO @@ -579,7 +588,7 @@ def parse_opt(): parser.add_argument('--keras', action='store_true', help='TF: use Keras') parser.add_argument('--optimize', action='store_true', help='TorchScript: optimize for mobile') parser.add_argument('--int8', action='store_true', help='CoreML/TF INT8 quantization') - parser.add_argument('--dynamic', action='store_true', help='ONNX/TF: dynamic axes') + parser.add_argument('--dynamic', action='store_true', help='ONNX/TF/TensorRT: dynamic axes') parser.add_argument('--simplify', action='store_true', help='ONNX: simplify model') parser.add_argument('--opset', type=int, default=12, help='ONNX: opset version') parser.add_argument('--verbose', action='store_true', help='TensorRT: verbose log') diff --git a/models/common.py b/models/common.py index 5ea1c307f034..959c965e6002 100644 --- a/models/common.py +++ b/models/common.py @@ -384,19 +384,24 @@ def __init__(self, weights='yolov5s.pt', device=torch.device('cpu'), dnn=False, logger = trt.Logger(trt.Logger.INFO) with open(w, 'rb') as f, trt.Runtime(logger) as runtime: model = runtime.deserialize_cuda_engine(f.read()) + context = model.create_execution_context() bindings = OrderedDict() fp16 = False # default updated below + dynamic_input = False for index in range(model.num_bindings): name = model.get_binding_name(index) dtype = trt.nptype(model.get_binding_dtype(index)) - shape = tuple(model.get_binding_shape(index)) + if model.binding_is_input(index): + if -1 in tuple(model.get_binding_shape(index)): # dynamic + dynamic_input = True + context.set_binding_shape(index, tuple(model.get_profile_shape(0, index)[2])) + if dtype == np.float16: + fp16 = True + shape = tuple(context.get_binding_shape(index)) data = torch.from_numpy(np.empty(shape, dtype=np.dtype(dtype))).to(device) bindings[name] = Binding(name, dtype, shape, data, int(data.data_ptr())) - if model.binding_is_input(index) and dtype == np.float16: - fp16 = True binding_addrs = OrderedDict((n, d.ptr) for n, d in bindings.items()) - context = model.create_execution_context() - batch_size = bindings['images'].shape[0] + batch_size = bindings['images'].shape[0] # if dynamic, this is instead max batch size elif coreml: # CoreML LOGGER.info(f'Loading {w} for CoreML inference...') import coremltools as ct @@ -466,7 +471,12 @@ def forward(self, im, augment=False, visualize=False, val=False): im = im.cpu().numpy() # FP32 y = self.executable_network([im])[self.output_layer] elif self.engine: # TensorRT - assert im.shape == self.bindings['images'].shape, (im.shape, self.bindings['images'].shape) + if im.shape != self.bindings['images'].shape and self.dynamic_input: + self.context.set_binding_shape(self.model.get_binding_index('images'), im.shape) # reshape if dynamic + self.bindings['images'] = self.bindings['images']._replace(shape=im.shape) + assert im.shape == self.bindings['images'].shape, ( + f"image shape {im.shape} exceeds model max shape {self.bindings['images'].shape}" if self.dynamic_input + else f"image shape {im.shape} does not match model shape {self.bindings['images'].shape}") self.binding_addrs['images'] = int(im.data_ptr()) self.context.execute_v2(list(self.binding_addrs.values())) y = self.bindings['output'].data From 567397d67ae173fb82e06672a763cc28c5cfeb2b Mon Sep 17 00:00:00 2001 From: jbutle55 Date: Fri, 29 Jul 2022 06:06:23 -0600 Subject: [PATCH 1235/1976] Fix confusion matrix update when no predictions are made (#8748) * Fix confusion matrix update when no predictions are made * Update metrics.py * Simply confusion matrix changes * Simply confusion matrix fix Co-authored-by: Glenn Jocher --- utils/metrics.py | 6 ++++++ val.py | 2 ++ 2 files changed, 8 insertions(+) diff --git a/utils/metrics.py b/utils/metrics.py index 6bba4cfe2a42..9bf084c78854 100644 --- a/utils/metrics.py +++ b/utils/metrics.py @@ -139,6 +139,12 @@ def process_batch(self, detections, labels): Returns: None, updates confusion matrix accordingly """ + if detections is None: + gt_classes = labels.int() + for i, gc in enumerate(gt_classes): + self.matrix[self.nc, gc] += 1 # background FN + return + detections = detections[detections[:, 4] > self.conf] gt_classes = labels[:, 0].int() detection_classes = detections[:, 5].int() diff --git a/val.py b/val.py index b0cc8e7f1577..48207a1130a6 100644 --- a/val.py +++ b/val.py @@ -228,6 +228,8 @@ def run( if npr == 0: if nl: stats.append((correct, *torch.zeros((2, 0), device=device), labels[:, 0])) + if plots: + confusion_matrix.process_batch(detections=None, labels=labels[:, 0]) continue # Predictions From e309a855860bc3f618c3541909c515a65ffc35b0 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 29 Jul 2022 14:45:29 +0200 Subject: [PATCH 1236/1976] Add val.py no label warning (#8782) Help resolve confusion around zero-metrics val.py results when no labels are found in https://github.com/ultralytics/yolov5/issues/8753 --- val.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/val.py b/val.py index 48207a1130a6..006ade37d03e 100644 --- a/val.py +++ b/val.py @@ -275,6 +275,8 @@ def run( # Print results pf = '%20s' + '%11i' * 2 + '%11.3g' * 4 # print format LOGGER.info(pf % ('all', seen, nt.sum(), mp, mr, map50, map)) + if nt.sum() == 0: + LOGGER.warning(emojis(f'WARNING: no labels found in {task} set, can not compute metrics without labels ⚠️')) # Print results per class if (verbose or (nc < 50 and not training)) and nc > 1 and len(stats): From 52d3a9aee1016604652898fed679e55783e264ed Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 29 Jul 2022 17:07:24 +0200 Subject: [PATCH 1237/1976] Fix `detect.py --update` list bug (#8783) Fix detect.py --update Resolves https://github.com/ultralytics/yolov5/issues/8776 --- detect.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/detect.py b/detect.py index 01ad797ae6f1..8741e7f7fd62 100644 --- a/detect.py +++ b/detect.py @@ -210,7 +210,7 @@ def run( s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else '' LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}{s}") if update: - strip_optimizer(weights) # update model (to fix SourceChangeWarning) + strip_optimizer(weights[0]) # update model (to fix SourceChangeWarning) def parse_opt(): From e34ae8837b652a35f115d3e780c18abae4bb89ce Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 30 Jul 2022 14:04:03 +0200 Subject: [PATCH 1238/1976] ci-testing.yml Windows-friendly ENV variables (#8794) Per https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions --- .github/workflows/ci-testing.yml | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/.github/workflows/ci-testing.yml b/.github/workflows/ci-testing.yml index e3359cd3a283..61a527e62ecf 100644 --- a/.github/workflows/ci-testing.yml +++ b/.github/workflows/ci-testing.yml @@ -85,12 +85,12 @@ jobs: - name: Check environment run: | python -c "import utils; utils.notebook_init()" - echo "RUNNER_OS is $RUNNER_OS" - echo "GITHUB_EVENT_NAME is $GITHUB_EVENT_NAME" - echo "GITHUB_WORKFLOW is $GITHUB_WORKFLOW" - echo "GITHUB_ACTOR is $GITHUB_ACTOR" - echo "GITHUB_REPOSITORY is $GITHUB_REPOSITORY" - echo "GITHUB_REPOSITORY_OWNER is $GITHUB_REPOSITORY_OWNER" + echo "RUNNER_OS is ${{ runner.os }}" + echo "GITHUB_EVENT_NAME is ${{ github.event_name }}" + echo "GITHUB_WORKFLOW is ${{ github.workflow }}" + echo "GITHUB_ACTOR is ${{ github.actor }}" + echo "GITHUB_REPOSITORY is ${{ github.repository }}" + echo "GITHUB_REPOSITORY_OWNER is ${{ github.repository_owner }}" - name: Run tests shell: bash run: | From 9111246208a6f7ada69f2cdc1d5832f22486620a Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 30 Jul 2022 21:00:28 +0200 Subject: [PATCH 1239/1976] Add hubconf.py argparser (#8799) * Add hubconf.py argparser * Add hubconf.py argparser --- .github/workflows/ci-testing.yml | 2 +- hubconf.py | 19 ++++++++++++++----- 2 files changed, 15 insertions(+), 6 deletions(-) diff --git a/.github/workflows/ci-testing.yml b/.github/workflows/ci-testing.yml index 61a527e62ecf..5b492009d503 100644 --- a/.github/workflows/ci-testing.yml +++ b/.github/workflows/ci-testing.yml @@ -106,7 +106,7 @@ jobs: # Detect python detect.py --weights $model.pt --device $d python detect.py --weights $best --device $d - python hubconf.py # hub + python hubconf.py --model $model # hub # Export # python models/tf.py --weights $model.pt # build TF model python models/yolo.py --cfg $model.yaml # build PyTorch model diff --git a/hubconf.py b/hubconf.py index 25f9d1b82c14..f579c6471b20 100644 --- a/hubconf.py +++ b/hubconf.py @@ -41,7 +41,6 @@ def _create(name, pretrained=True, channels=3, classes=80, autoshape=True, verbo path = name.with_suffix('.pt') if name.suffix == '' and not name.is_dir() else name # checkpoint path try: device = select_device(device) - if pretrained and channels == 3 and classes == 80: model = DetectMultiBackend(path, device=device, fuse=autoshape) # download/load FP32 model # model = models.experimental.attempt_load(path, map_location=device) # download/load FP32 model @@ -123,10 +122,7 @@ def yolov5x6(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=T if __name__ == '__main__': - model = _create(name='yolov5s', pretrained=True, channels=3, classes=80, autoshape=True, verbose=True) - # model = custom(path='path/to/model.pt') # custom - - # Verify inference + import argparse from pathlib import Path import numpy as np @@ -134,6 +130,16 @@ def yolov5x6(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=T from utils.general import cv2 + # Argparser + parser = argparse.ArgumentParser() + parser.add_argument('--model', type=str, default='yolov5s', help='model name') + opt = parser.parse_args() + + # Model + model = _create(name=opt.model, pretrained=True, channels=3, classes=80, autoshape=True, verbose=True) + # model = custom(path='path/to/model.pt') # custom + + # Images imgs = [ 'data/images/zidane.jpg', # filename Path('data/images/zidane.jpg'), # Path @@ -142,6 +148,9 @@ def yolov5x6(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=T Image.open('data/images/bus.jpg'), # PIL np.zeros((320, 640, 3))] # numpy + # Inference results = model(imgs, size=320) # batched inference + + # Results results.print() results.save() From 56f5cb5a28ac8fb5afc49392633763203f37e9bb Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 30 Jul 2022 21:02:26 +0200 Subject: [PATCH 1240/1976] Print hubconf.py args (#8800) --- hubconf.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/hubconf.py b/hubconf.py index f579c6471b20..08122eaca9dc 100644 --- a/hubconf.py +++ b/hubconf.py @@ -128,12 +128,13 @@ def yolov5x6(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=T import numpy as np from PIL import Image - from utils.general import cv2 + from utils.general import cv2, print_args # Argparser parser = argparse.ArgumentParser() parser.add_argument('--model', type=str, default='yolov5s', help='model name') opt = parser.parse_args() + print_args(vars(opt)) # Model model = _create(name=opt.model, pretrained=True, channels=3, classes=80, autoshape=True, verbose=True) From ec4de43a8aabe497ade56de67bec2b86a22a9c61 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 30 Jul 2022 21:11:19 +0200 Subject: [PATCH 1241/1976] Update Colab Notebook CI (#8798) * Update Colab Notebook CI * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Created using Colaboratory * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update tutorial.ipynb Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- tutorial.ipynb | 38 ++++++++++++++++++-------------------- 1 file changed, 18 insertions(+), 20 deletions(-) diff --git a/tutorial.ipynb b/tutorial.ipynb index bdfba399a883..dcb1162b40af 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -414,7 +414,7 @@ "import utils\n", "display = utils.notebook_init() # checks" ], - "execution_count": 1, + "execution_count": null, "outputs": [ { "output_type": "stream", @@ -466,7 +466,7 @@ "!python detect.py --weights yolov5s.pt --img 640 --conf 0.25 --source data/images\n", "display.Image(filename='runs/detect/exp/zidane.jpg', width=600)" ], - "execution_count": 2, + "execution_count": null, "outputs": [ { "output_type": "stream", @@ -546,7 +546,7 @@ "torch.hub.download_url_to_file('https://ultralytics.com/assets/coco2017val.zip', 'tmp.zip')\n", "!unzip -q tmp.zip -d ../datasets && rm tmp.zip" ], - "execution_count": 3, + "execution_count": null, "outputs": [ { "output_type": "display_data", @@ -577,7 +577,7 @@ "# Run YOLOv5x on COCO val\n", "!python val.py --weights yolov5x.pt --data coco.yaml --img 640 --iou 0.65 --half" ], - "execution_count": 4, + "execution_count": null, "outputs": [ { "output_type": "stream", @@ -737,7 +737,7 @@ "# Train YOLOv5s on COCO128 for 3 epochs\n", "!python train.py --img 640 --batch 16 --epochs 3 --data coco128.yaml --weights yolov5s.pt --cache" ], - "execution_count": 7, + "execution_count": null, "outputs": [ { "output_type": "stream", @@ -1032,24 +1032,22 @@ "id": "FGH0ZjkGjejy" }, "source": [ - "# CI Checks\n", + "# YOLOv5 CI\n", "%%shell\n", - "export PYTHONPATH=\"$PWD\" # to run *.py. files in subdirectories\n", "rm -rf runs # remove runs/\n", - "for m in yolov5n; do # models\n", - " python train.py --img 64 --batch 32 --weights $m.pt --epochs 1 --device 0 # train pretrained\n", - " python train.py --img 64 --batch 32 --weights '' --cfg $m.yaml --epochs 1 --device 0 # train scratch\n", - " for d in 0 cpu; do # devices\n", - " python val.py --weights $m.pt --device $d # val official\n", - " python val.py --weights runs/train/exp/weights/best.pt --device $d # val custom\n", - " python detect.py --weights $m.pt --device $d # detect official\n", - " python detect.py --weights runs/train/exp/weights/best.pt --device $d # detect custom\n", + "m=yolov5n # official weights\n", + "b=runs/train/exp/weights/best # best.pt checkpoint\n", + "python train.py --imgsz 64 --batch 32 --weights $m.pt --cfg $m.yaml --epochs 1 --device 0 # train\n", + "for d in 0 cpu; do # devices\n", + " for w in $m $b; do # weights\n", + " python val.py --imgsz 64 --batch 32 --weights $w.pt --device $d # val\n", + " python detect.py --imgsz 64 --weights $w.pt --device $d # detect\n", " done\n", - " python hubconf.py # hub\n", - " python models/yolo.py --cfg $m.yaml # build PyTorch model\n", - " python models/tf.py --weights $m.pt # build TensorFlow model\n", - " python export.py --img 64 --batch 1 --weights $m.pt --include torchscript onnx # export\n", - "done" + "done\n", + "python hubconf.py --model $m # hub\n", + "python models/tf.py --weights $m.pt # build TF model\n", + "python models/yolo.py --cfg $m.yaml # build PyTorch model\n", + "python export.py --weights $m.pt --img 64 --include torchscript # export" ], "execution_count": null, "outputs": [] From 7921351b4e4030a2db9e1488f8ef5a166abff17d Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 30 Jul 2022 21:25:16 +0200 Subject: [PATCH 1242/1976] Deprecate torch 1.6.0 `compat _non_persistent_buffers_set` (#8797) Deprecate torch 1.6.0 compat _non_persistent_buffers_set --- models/experimental.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/models/experimental.py b/models/experimental.py index db8e5b8e1dfd..0317c7526c99 100644 --- a/models/experimental.py +++ b/models/experimental.py @@ -89,8 +89,6 @@ def attempt_load(weights, device=None, inplace=True, fuse=True): if t is Detect and not isinstance(m.anchor_grid, list): delattr(m, 'anchor_grid') setattr(m, 'anchor_grid', [torch.zeros(1)] * m.nl) - elif t is Conv: - m._non_persistent_buffers_set = set() # torch 1.6.0 compatibility elif t is nn.Upsample and not hasattr(m, 'recompute_scale_factor'): m.recompute_scale_factor = None # torch 1.11.0 compatibility From 1e89807d9a208727e3f0e9bf26a1e286d0ce416b Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 30 Jul 2022 22:19:40 +0200 Subject: [PATCH 1243/1976] `Detect.inplace=False` for multithread-safe inference (#8801) Detect.inplace=False for safe multithread inference --- hubconf.py | 1 + models/yolo.py | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/hubconf.py b/hubconf.py index 08122eaca9dc..5bb629005597 100644 --- a/hubconf.py +++ b/hubconf.py @@ -55,6 +55,7 @@ def _create(name, pretrained=True, channels=3, classes=80, autoshape=True, verbo if len(ckpt['model'].names) == classes: model.names = ckpt['model'].names # set class names attribute if autoshape: + model.model.model[-1].inplace = False # Detect.inplace=False for safe multithread inference model = AutoShape(model) # for file/URI/PIL/cv2/np inputs and NMS if not verbose: LOGGER.setLevel(logging.INFO) # reset to default diff --git a/models/yolo.py b/models/yolo.py index 56846815e08a..bc1893ccbc48 100644 --- a/models/yolo.py +++ b/models/yolo.py @@ -50,7 +50,7 @@ def __init__(self, nc=80, anchors=(), ch=(), inplace=True): # detection layer self.anchor_grid = [torch.zeros(1)] * self.nl # init anchor grid self.register_buffer('anchors', torch.tensor(anchors).float().view(self.nl, -1, 2)) # shape(nl,na,2) self.m = nn.ModuleList(nn.Conv2d(x, self.no * self.na, 1) for x in ch) # output conv - self.inplace = inplace # use in-place ops (e.g. slice assignment) + self.inplace = inplace # use inplace ops (e.g. slice assignment) def forward(self, x): z = [] # inference output From 59595c136581142766313c25d4fccd09c15a45b2 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 31 Jul 2022 04:17:39 +0200 Subject: [PATCH 1244/1976] Update train.py for `val.run(half=amp)` (#8804) Disable FP16 validation if AMP checks fail or amp=False. --- train.py | 1 + 1 file changed, 1 insertion(+) diff --git a/train.py b/train.py index c298692b7335..dc93c22d621a 100644 --- a/train.py +++ b/train.py @@ -367,6 +367,7 @@ def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictio results, maps, _ = val.run(data_dict, batch_size=batch_size // WORLD_SIZE * 2, imgsz=imgsz, + half=amp, model=ema.ema, single_cls=single_cls, dataloader=val_loader, From 34cb277dc5316d8c41cbc7e2020ccf9be5c7dd84 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 31 Jul 2022 14:17:23 +0200 Subject: [PATCH 1245/1976] Fix val.py 'no labels found bug' (#8806) Resolves https://github.com/ultralytics/yolov5/issues/8791 Bug first introduced in #8782 --- val.py | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/val.py b/val.py index 006ade37d03e..851d679d269b 100644 --- a/val.py +++ b/val.py @@ -182,7 +182,7 @@ def run( seen = 0 confusion_matrix = ConfusionMatrix(nc=nc) - names = {k: v for k, v in enumerate(model.names if hasattr(model, 'names') else model.module.names)} + names = dict(enumerate(model.names if hasattr(model, 'names') else model.module.names)) class_map = coco80_to_coco91_class() if is_coco else list(range(1000)) s = ('%20s' + '%11s' * 6) % ('Class', 'Images', 'Labels', 'P', 'R', 'mAP@.5', 'mAP@.5:.95') dt, p, r, f1, mp, mr, map50, map = [0.0, 0.0, 0.0], 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 @@ -250,7 +250,7 @@ def run( # Save/log if save_txt: - save_one_txt(predn, save_conf, shape, file=save_dir / 'labels' / (path.stem + '.txt')) + save_one_txt(predn, save_conf, shape, file=save_dir / 'labels' / f'{path.stem}.txt') if save_json: save_one_json(predn, jdict, path, class_map) # append to COCO-JSON dictionary callbacks.run('on_val_image_end', pred, predn, path, names, im[si]) @@ -268,9 +268,7 @@ def run( tp, fp, p, r, f1, ap, ap_class = ap_per_class(*stats, plot=plots, save_dir=save_dir, names=names) ap50, ap = ap[:, 0], ap.mean(1) # AP@0.5, AP@0.5:0.95 mp, mr, map50, map = p.mean(), r.mean(), ap50.mean(), ap.mean() - nt = np.bincount(stats[3].astype(int), minlength=nc) # number of targets per class - else: - nt = torch.zeros(1) + nt = np.bincount(stats[3].astype(int), minlength=nc) # number of targets per class # Print results pf = '%20s' + '%11i' * 2 + '%11.3g' * 4 # print format From 9559601b9a24812dc6ae7f3d88a47febef5d0757 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 31 Jul 2022 14:54:55 +0200 Subject: [PATCH 1246/1976] Update requirements.txt with tf-cpu and tf-aarch64 (#8807) --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 6313cecee578..a7c567a67edf 100644 --- a/requirements.txt +++ b/requirements.txt @@ -29,7 +29,7 @@ seaborn>=0.11.0 # nvidia-pyindex # TensorRT export # nvidia-tensorrt # TensorRT export # scikit-learn==0.19.2 # CoreML quantization -# tensorflow>=2.4.1 # TFLite export +# tensorflow>=2.4.1 # TFLite export (or tensorflow-cpu, tensorflow-aarch64) # tensorflowjs>=3.9.0 # TF.js export # openvino-dev # OpenVINO export From 555976b346b33483984dcd8ff05276bf1107dfc8 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 31 Jul 2022 15:23:57 +0200 Subject: [PATCH 1247/1976] FROM nvcr.io/nvidia/pytorch:22.07-py3 (#8808) --- utils/docker/Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/docker/Dockerfile b/utils/docker/Dockerfile index 312d169d1a76..0e0d82225bc4 100644 --- a/utils/docker/Dockerfile +++ b/utils/docker/Dockerfile @@ -3,7 +3,7 @@ # Image is CUDA-optimized for YOLOv5 single/multi-GPU training and inference # Start FROM NVIDIA PyTorch image https://ngc.nvidia.com/catalog/containers/nvidia:pytorch -FROM nvcr.io/nvidia/pytorch:22.06-py3 +FROM nvcr.io/nvidia/pytorch:22.07-py3 RUN rm -rf /opt/pytorch # remove 1.2GB dir # Downloads to user config dir From 7b72d9a6071cb39a578362175903f3db00ebcc7a Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 31 Jul 2022 16:12:32 +0200 Subject: [PATCH 1248/1976] Update ci-testing.yml streamlined tests (#8809) * Update ci-testing.yml streamlined tests * Update ci-testing.yml * Update ci-testing.yml --- .github/workflows/ci-testing.yml | 37 ++++++++++++++------------------ 1 file changed, 16 insertions(+), 21 deletions(-) diff --git a/.github/workflows/ci-testing.yml b/.github/workflows/ci-testing.yml index 5b492009d503..444bab75bbbc 100644 --- a/.github/workflows/ci-testing.yml +++ b/.github/workflows/ci-testing.yml @@ -95,27 +95,22 @@ jobs: shell: bash run: | # export PYTHONPATH="$PWD" # to run '$ python *.py' files in subdirectories - d=cpu # device - model=${{ matrix.model }} - best=runs/train/exp/weights/best.pt - # Train - python train.py --img 64 --batch 32 --weights $model.pt --cfg $model.yaml --epochs 1 --device $d - # Val - python val.py --img 64 --batch 32 --weights $model.pt --device $d - python val.py --img 64 --batch 32 --weights $best --device $d - # Detect - python detect.py --weights $model.pt --device $d - python detect.py --weights $best --device $d - python hubconf.py --model $model # hub - # Export - # python models/tf.py --weights $model.pt # build TF model - python models/yolo.py --cfg $model.yaml # build PyTorch model - python export.py --weights $model.pt --img 64 --include torchscript # export - # Python + m=${{ matrix.model }} # official weights + b=runs/train/exp/weights/best # best.pt checkpoint + python train.py --imgsz 64 --batch 32 --weights $m.pt --cfg $m.yaml --epochs 1 --device cpu # train + for d in cpu; do # devices + for w in $m $b; do # weights + python val.py --imgsz 64 --batch 32 --weights $w.pt --device $d # val + python detect.py --imgsz 64 --weights $w.pt --device $d # detect + done + done + python hubconf.py --model $m # hub + # python models/tf.py --weights $m.pt # build TF model + python models/yolo.py --cfg $m.yaml # build PyTorch model + python export.py --weights $m.pt --img 64 --include torchscript # export python - < Date: Sun, 31 Jul 2022 20:47:38 +0430 Subject: [PATCH 1249/1976] Check git status on upstream `ultralytics` or `origin` dynamically (#8694) * Add remote ultralytics and check git status with that * Simplify * Update general.py * Update general.py * s fix Co-authored-by: Glenn Jocher --- utils/general.py | 22 +++++++++++++++------- 1 file changed, 15 insertions(+), 7 deletions(-) diff --git a/utils/general.py b/utils/general.py index 67078338d762..bab0a5d9ab34 100755 --- a/utils/general.py +++ b/utils/general.py @@ -310,20 +310,28 @@ def git_describe(path=ROOT): # path must be a directory @try_except @WorkingDirectory(ROOT) -def check_git_status(): - # Recommend 'git pull' if code is out of date - msg = ', for updates see https://github.com/ultralytics/yolov5' +def check_git_status(repo='ultralytics/yolov5'): + # YOLOv5 status check, recommend 'git pull' if code is out of date + url = f'https://github.com/{repo}' + msg = f', for updates see {url}' s = colorstr('github: ') # string assert Path('.git').exists(), s + 'skipping check (not a git repository)' + msg assert not is_docker(), s + 'skipping check (Docker image)' + msg assert check_online(), s + 'skipping check (offline)' + msg - cmd = 'git fetch && git config --get remote.origin.url' - url = check_output(cmd, shell=True, timeout=5).decode().strip().rstrip('.git') # git fetch + splits = re.split(pattern=r'\s', string=check_output('git remote -v', shell=True).decode()) + matches = [repo in s for s in splits] + if any(matches): + remote = splits[matches.index(True) - 1] + else: + remote = 'ultralytics' + check_output(f'git remote add {remote} {url}', shell=True) + check_output(f'git fetch {remote}', shell=True, timeout=5) # git fetch branch = check_output('git rev-parse --abbrev-ref HEAD', shell=True).decode().strip() # checked out - n = int(check_output(f'git rev-list {branch}..origin/master --count', shell=True)) # commits behind + n = int(check_output(f'git rev-list {branch}..{remote}/master --count', shell=True)) # commits behind if n > 0: - s += f"⚠️ YOLOv5 is out of date by {n} commit{'s' * (n > 1)}. Use `git pull` or `git clone {url}` to update." + pull = 'git pull' if remote == 'origin' else f'git pull {remote} master' + s += f"⚠️ YOLOv5 is out of date by {n} commit{'s' * (n > 1)}. Use `{pull}` or `git clone {url}` to update." else: s += f'up to date with {url} ✅' LOGGER.info(emojis(s)) # emoji-safe From 40c41e42692011f32ce952b44b4bcb4f06e9e0b0 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 31 Jul 2022 19:57:40 +0200 Subject: [PATCH 1250/1976] Fix Colab-update pre-commit EOF bug (#8810) --- .pre-commit-config.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 9b8f28c77506..97da994e2917 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,6 +16,7 @@ repos: rev: v4.3.0 hooks: - id: end-of-file-fixer + stages: [commit] # avoid Colab update EOF issues - id: trailing-whitespace - id: check-case-conflict - id: check-yaml From 685332ede482488cec13a3d6c429d4f1e9b34960 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 31 Jul 2022 20:06:35 +0200 Subject: [PATCH 1251/1976] Update .pre-commit-config.yaml (#8811) --- .pre-commit-config.yaml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 97da994e2917..fe26ed5a93a5 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -15,8 +15,9 @@ repos: - repo: https://github.com/pre-commit/pre-commit-hooks rev: v4.3.0 hooks: + - id: no-commit-to-branch + args: ['--branch', 'master'] - id: end-of-file-fixer - stages: [commit] # avoid Colab update EOF issues - id: trailing-whitespace - id: check-case-conflict - id: check-yaml From 0e165c50f79a8ac4286d1920ca7a48220dc5a9db Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 31 Jul 2022 20:34:03 +0200 Subject: [PATCH 1252/1976] Created using Colaboratory --- tutorial.ipynb | 314 +++++++++++++++++++++++++------------------------ 1 file changed, 160 insertions(+), 154 deletions(-) diff --git a/tutorial.ipynb b/tutorial.ipynb index dcb1162b40af..b5cb4964aa6b 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -16,7 +16,7 @@ "accelerator": "GPU", "widgets": { "application/vnd.jupyter.widget-state+json": { - "572de771c7b34c1481def33bd5ed690d": { + "c79427d84662495db06b89a791d61f31": { "model_module": "@jupyter-widgets/controls", "model_name": "HBoxModel", "model_module_version": "1.5.0", @@ -31,14 +31,14 @@ "_view_name": "HBoxView", "box_style": "", "children": [ - "IPY_MODEL_20c89dc0d82a4bdf8756bf5e34152292", - "IPY_MODEL_61026f684725441db2a640e531807675", - "IPY_MODEL_8d2e16d90e13449598d7b3fac75f78a3" + "IPY_MODEL_469c8e5ae4d64adea773341ec22d5851", + "IPY_MODEL_2435573a321341878622d79e1f48f3db", + "IPY_MODEL_a4dcb697b08b4b70ab3ef3ffa54c28e4" ], - "layout": "IPY_MODEL_a09d90f1bd374ece9a29bc6cfe07c072" + "layout": "IPY_MODEL_87495c10d22c4b82bd724a4d7c300df3" } }, - "20c89dc0d82a4bdf8756bf5e34152292": { + "469c8e5ae4d64adea773341ec22d5851": { "model_module": "@jupyter-widgets/controls", "model_name": "HTMLModel", "model_module_version": "1.5.0", @@ -53,13 +53,13 @@ "_view_name": "HTMLView", "description": "", "description_tooltip": null, - "layout": "IPY_MODEL_801e720897804703b4d32f99f84cc3b8", + "layout": "IPY_MODEL_098c321358c24cdbb50f6c0e6623bf6c", "placeholder": "​", - "style": "IPY_MODEL_c9fb2e268cc94d508d909b3b72ac9df3", + "style": "IPY_MODEL_20184030ca9d4aef9dac0a149b89e4d3", "value": "100%" } }, - "61026f684725441db2a640e531807675": { + "2435573a321341878622d79e1f48f3db": { "model_module": "@jupyter-widgets/controls", "model_name": "FloatProgressModel", "model_module_version": "1.5.0", @@ -75,15 +75,15 @@ "bar_style": "success", "description": "", "description_tooltip": null, - "layout": "IPY_MODEL_bfbc16e88df24fae93e8c80538e78273", + "layout": "IPY_MODEL_790808c9b4fb448aa136cc1ade0f95b5", "max": 818322941, "min": 0, "orientation": "horizontal", - "style": "IPY_MODEL_d9ffa50bddb7455ca4d67ec220c4a10c", + "style": "IPY_MODEL_99b822fd56b749318b38d8ccbc4ac469", "value": 818322941 } }, - "8d2e16d90e13449598d7b3fac75f78a3": { + "a4dcb697b08b4b70ab3ef3ffa54c28e4": { "model_module": "@jupyter-widgets/controls", "model_name": "HTMLModel", "model_module_version": "1.5.0", @@ -98,13 +98,13 @@ "_view_name": "HTMLView", "description": "", "description_tooltip": null, - "layout": "IPY_MODEL_8be83ee30f804775aa55aeb021bf515b", + "layout": "IPY_MODEL_d542739146774953954e92db1666b951", "placeholder": "​", - "style": "IPY_MODEL_78e5b8dba72942bfacfee54ceec53784", - "value": " 780M/780M [01:28<00:00, 9.08MB/s]" + "style": "IPY_MODEL_e11f3a2c51204778832631a5f150b21d", + "value": " 780M/780M [02:31<00:00, 4.89MB/s]" } }, - "a09d90f1bd374ece9a29bc6cfe07c072": { + "87495c10d22c4b82bd724a4d7c300df3": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", @@ -156,7 +156,7 @@ "width": null } }, - "801e720897804703b4d32f99f84cc3b8": { + "098c321358c24cdbb50f6c0e6623bf6c": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", @@ -208,7 +208,7 @@ "width": null } }, - "c9fb2e268cc94d508d909b3b72ac9df3": { + "20184030ca9d4aef9dac0a149b89e4d3": { "model_module": "@jupyter-widgets/controls", "model_name": "DescriptionStyleModel", "model_module_version": "1.5.0", @@ -223,7 +223,7 @@ "description_width": "" } }, - "bfbc16e88df24fae93e8c80538e78273": { + "790808c9b4fb448aa136cc1ade0f95b5": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", @@ -275,7 +275,7 @@ "width": null } }, - "d9ffa50bddb7455ca4d67ec220c4a10c": { + "99b822fd56b749318b38d8ccbc4ac469": { "model_module": "@jupyter-widgets/controls", "model_name": "ProgressStyleModel", "model_module_version": "1.5.0", @@ -291,7 +291,7 @@ "description_width": "" } }, - "8be83ee30f804775aa55aeb021bf515b": { + "d542739146774953954e92db1666b951": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", @@ -343,7 +343,7 @@ "width": null } }, - "78e5b8dba72942bfacfee54ceec53784": { + "e11f3a2c51204778832631a5f150b21d": { "model_module": "@jupyter-widgets/controls", "model_name": "DescriptionStyleModel", "model_module_version": "1.5.0", @@ -403,7 +403,7 @@ "colab": { "base_uri": "https://localhost:8080/" }, - "outputId": "4bf03330-c2e8-43ec-c5da-b7f5e0b2b123" + "outputId": "7728cbd8-6240-4814-e8fe-a223b9e57ed9" }, "source": [ "!git clone https://github.com/ultralytics/yolov5 # clone\n", @@ -414,20 +414,20 @@ "import utils\n", "display = utils.notebook_init() # checks" ], - "execution_count": null, + "execution_count": 1, "outputs": [ { "output_type": "stream", "name": "stderr", "text": [ - "YOLOv5 🚀 v6.1-257-g669f707 Python-3.7.13 torch-1.11.0+cu113 CUDA:0 (Tesla V100-SXM2-16GB, 16160MiB)\n" + "YOLOv5 🚀 v6.1-343-g685332e Python-3.7.13 torch-1.12.0+cu113 CUDA:0 (Tesla V100-SXM2-16GB, 16160MiB)\n" ] }, { "output_type": "stream", "name": "stdout", "text": [ - "Setup complete ✅ (8 CPUs, 51.0 GB RAM, 38.8/166.8 GB disk)\n" + "Setup complete ✅ (8 CPUs, 51.0 GB RAM, 38.6/166.8 GB disk)\n" ] } ] @@ -460,29 +460,29 @@ "colab": { "base_uri": "https://localhost:8080/" }, - "outputId": "1d1bb361-c8f3-4ddd-8a19-864bb993e7ac" + "outputId": "2d81665e-a0c4-489a-c92e-fe815223adfb" }, "source": [ "!python detect.py --weights yolov5s.pt --img 640 --conf 0.25 --source data/images\n", - "display.Image(filename='runs/detect/exp/zidane.jpg', width=600)" + "#display.Image(filename='runs/detect/exp/zidane.jpg', width=600)" ], - "execution_count": null, + "execution_count": 2, "outputs": [ { "output_type": "stream", "name": "stdout", "text": [ "\u001b[34m\u001b[1mdetect: \u001b[0mweights=['yolov5s.pt'], source=data/images, data=data/coco128.yaml, imgsz=[640, 640], conf_thres=0.25, iou_thres=0.45, max_det=1000, device=, view_img=False, save_txt=False, save_conf=False, save_crop=False, nosave=False, classes=None, agnostic_nms=False, augment=False, visualize=False, update=False, project=runs/detect, name=exp, exist_ok=False, line_thickness=3, hide_labels=False, hide_conf=False, half=False, dnn=False\n", - "YOLOv5 🚀 v6.1-257-g669f707 Python-3.7.13 torch-1.11.0+cu113 CUDA:0 (Tesla V100-SXM2-16GB, 16160MiB)\n", + "YOLOv5 🚀 v6.1-343-g685332e Python-3.7.13 torch-1.12.0+cu113 CUDA:0 (Tesla V100-SXM2-16GB, 16160MiB)\n", "\n", "Downloading https://github.com/ultralytics/yolov5/releases/download/v6.1/yolov5s.pt to yolov5s.pt...\n", - "100% 14.1M/14.1M [00:00<00:00, 225MB/s]\n", + "100% 14.1M/14.1M [00:02<00:00, 6.87MB/s]\n", "\n", "Fusing layers... \n", "YOLOv5s summary: 213 layers, 7225885 parameters, 0 gradients\n", - "image 1/2 /content/yolov5/data/images/bus.jpg: 640x480 4 persons, 1 bus, Done. (0.013s)\n", - "image 2/2 /content/yolov5/data/images/zidane.jpg: 384x640 2 persons, 2 ties, Done. (0.015s)\n", - "Speed: 0.6ms pre-process, 14.1ms inference, 23.9ms NMS per image at shape (1, 3, 640, 640)\n", + "image 1/2 /content/yolov5/data/images/bus.jpg: 640x480 4 persons, 1 bus, Done. (0.014s)\n", + "image 2/2 /content/yolov5/data/images/zidane.jpg: 384x640 2 persons, 2 ties, Done. (0.019s)\n", + "Speed: 0.5ms pre-process, 16.3ms inference, 22.1ms NMS per image at shape (1, 3, 640, 640)\n", "Results saved to \u001b[1mruns/detect/exp\u001b[0m\n" ] } @@ -526,27 +526,27 @@ "base_uri": "https://localhost:8080/", "height": 49, "referenced_widgets": [ - "572de771c7b34c1481def33bd5ed690d", - "20c89dc0d82a4bdf8756bf5e34152292", - "61026f684725441db2a640e531807675", - "8d2e16d90e13449598d7b3fac75f78a3", - "a09d90f1bd374ece9a29bc6cfe07c072", - "801e720897804703b4d32f99f84cc3b8", - "c9fb2e268cc94d508d909b3b72ac9df3", - "bfbc16e88df24fae93e8c80538e78273", - "d9ffa50bddb7455ca4d67ec220c4a10c", - "8be83ee30f804775aa55aeb021bf515b", - "78e5b8dba72942bfacfee54ceec53784" + "c79427d84662495db06b89a791d61f31", + "469c8e5ae4d64adea773341ec22d5851", + "2435573a321341878622d79e1f48f3db", + "a4dcb697b08b4b70ab3ef3ffa54c28e4", + "87495c10d22c4b82bd724a4d7c300df3", + "098c321358c24cdbb50f6c0e6623bf6c", + "20184030ca9d4aef9dac0a149b89e4d3", + "790808c9b4fb448aa136cc1ade0f95b5", + "99b822fd56b749318b38d8ccbc4ac469", + "d542739146774953954e92db1666b951", + "e11f3a2c51204778832631a5f150b21d" ] }, - "outputId": "47c358af-138d-42d9-ca89-4364283df9e3" + "outputId": "d880071b-84ce-4567-9e42-a3c3a78bff73" }, "source": [ "# Download COCO val\n", "torch.hub.download_url_to_file('https://ultralytics.com/assets/coco2017val.zip', 'tmp.zip')\n", "!unzip -q tmp.zip -d ../datasets && rm tmp.zip" ], - "execution_count": null, + "execution_count": 3, "outputs": [ { "output_type": "display_data", @@ -557,7 +557,7 @@ "application/vnd.jupyter.widget-view+json": { "version_major": 2, "version_minor": 0, - "model_id": "572de771c7b34c1481def33bd5ed690d" + "model_id": "c79427d84662495db06b89a791d61f31" } }, "metadata": {} @@ -571,53 +571,53 @@ "colab": { "base_uri": "https://localhost:8080/" }, - "outputId": "979fe4c2-a058-44de-b401-3cb67878a1b9" + "outputId": "da9456fa-6663-44a8-975b-c99e89d0eb06" }, "source": [ "# Run YOLOv5x on COCO val\n", "!python val.py --weights yolov5x.pt --data coco.yaml --img 640 --iou 0.65 --half" ], - "execution_count": null, + "execution_count": 4, "outputs": [ { "output_type": "stream", "name": "stdout", "text": [ "\u001b[34m\u001b[1mval: \u001b[0mdata=/content/yolov5/data/coco.yaml, weights=['yolov5x.pt'], batch_size=32, imgsz=640, conf_thres=0.001, iou_thres=0.65, task=val, device=, workers=8, single_cls=False, augment=False, verbose=False, save_txt=False, save_hybrid=False, save_conf=False, save_json=True, project=runs/val, name=exp, exist_ok=False, half=True, dnn=False\n", - "YOLOv5 🚀 v6.1-257-g669f707 Python-3.7.13 torch-1.11.0+cu113 CUDA:0 (Tesla V100-SXM2-16GB, 16160MiB)\n", + "YOLOv5 🚀 v6.1-343-g685332e Python-3.7.13 torch-1.12.0+cu113 CUDA:0 (Tesla V100-SXM2-16GB, 16160MiB)\n", "\n", "Downloading https://github.com/ultralytics/yolov5/releases/download/v6.1/yolov5x.pt to yolov5x.pt...\n", - "100% 166M/166M [00:04<00:00, 39.4MB/s]\n", + "100% 166M/166M [00:16<00:00, 10.3MB/s]\n", "\n", "Fusing layers... \n", "YOLOv5x summary: 444 layers, 86705005 parameters, 0 gradients\n", "Downloading https://ultralytics.com/assets/Arial.ttf to /root/.config/Ultralytics/Arial.ttf...\n", - "100% 755k/755k [00:00<00:00, 47.9MB/s]\n", - "\u001b[34m\u001b[1mval: \u001b[0mScanning '/content/datasets/coco/val2017' images and labels...4952 found, 48 missing, 0 empty, 0 corrupt: 100% 5000/5000 [00:00<00:00, 8742.34it/s]\n", + "100% 755k/755k [00:00<00:00, 14.8MB/s]\n", + "\u001b[34m\u001b[1mval: \u001b[0mScanning '/content/datasets/coco/val2017' images and labels...4952 found, 48 missing, 0 empty, 0 corrupt: 100% 5000/5000 [00:00<00:00, 11214.34it/s]\n", "\u001b[34m\u001b[1mval: \u001b[0mNew cache created: /content/datasets/coco/val2017.cache\n", - " Class Images Labels P R mAP@.5 mAP@.5:.95: 100% 157/157 [01:11<00:00, 2.21it/s]\n", + " Class Images Labels P R mAP@.5 mAP@.5:.95: 100% 157/157 [01:05<00:00, 2.39it/s]\n", " all 5000 36335 0.743 0.625 0.683 0.504\n", - "Speed: 0.1ms pre-process, 4.9ms inference, 1.2ms NMS per image at shape (32, 3, 640, 640)\n", + "Speed: 0.1ms pre-process, 4.7ms inference, 1.1ms NMS per image at shape (32, 3, 640, 640)\n", "\n", "Evaluating pycocotools mAP... saving runs/val/exp/yolov5x_predictions.json...\n", "loading annotations into memory...\n", - "Done (t=0.42s)\n", + "Done (t=0.38s)\n", "creating index...\n", "index created!\n", "Loading and preparing results...\n", - "DONE (t=4.91s)\n", + "DONE (t=5.39s)\n", "creating index...\n", "index created!\n", "Running per image evaluation...\n", "Evaluate annotation type *bbox*\n", - "DONE (t=77.89s).\n", + "DONE (t=71.33s).\n", "Accumulating evaluation results...\n", - "DONE (t=15.36s).\n", + "DONE (t=12.45s).\n", " Average Precision (AP) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.506\n", " Average Precision (AP) @[ IoU=0.50 | area= all | maxDets=100 ] = 0.688\n", " Average Precision (AP) @[ IoU=0.75 | area= all | maxDets=100 ] = 0.549\n", " Average Precision (AP) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.340\n", - " Average Precision (AP) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.557\n", + " Average Precision (AP) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.558\n", " Average Precision (AP) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.651\n", " Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 1 ] = 0.382\n", " Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 10 ] = 0.631\n", @@ -731,26 +731,31 @@ "colab": { "base_uri": "https://localhost:8080/" }, - "outputId": "be9424b5-34d6-4de0-e951-2c5ae334721e" + "outputId": "9fe5caba-6b0f-4b6e-93a8-4075dae0ee35" }, "source": [ "# Train YOLOv5s on COCO128 for 3 epochs\n", "!python train.py --img 640 --batch 16 --epochs 3 --data coco128.yaml --weights yolov5s.pt --cache" ], - "execution_count": null, + "execution_count": 5, "outputs": [ { "output_type": "stream", "name": "stdout", "text": [ - "\u001b[34m\u001b[1mtrain: \u001b[0mweights=yolov5s.pt, cfg=, data=coco128.yaml, hyp=data/hyps/hyp.scratch-low.yaml, epochs=3, batch_size=16, imgsz=640, rect=False, resume=False, nosave=False, noval=False, noautoanchor=False, noplots=False, evolve=None, bucket=, cache=ram, image_weights=False, device=, multi_scale=False, single_cls=False, optimizer=SGD, sync_bn=False, workers=8, project=runs/train, name=exp, exist_ok=False, quad=False, cos_lr=False, label_smoothing=0.0, patience=100, freeze=[0], save_period=-1, local_rank=-1, entity=None, upload_dataset=False, bbox_interval=-1, artifact_alias=latest\n", - "\u001b[34m\u001b[1mgithub: \u001b[0mup to date with https://github.com/ultralytics/yolov5 ✅\n", - "YOLOv5 🚀 v6.1-257-g669f707 Python-3.7.13 torch-1.11.0+cu113 CUDA:0 (Tesla V100-SXM2-16GB, 16160MiB)\n", + "\u001b[34m\u001b[1mtrain: \u001b[0mweights=yolov5s.pt, cfg=, data=coco128.yaml, hyp=data/hyps/hyp.scratch-low.yaml, epochs=3, batch_size=16, imgsz=640, rect=False, resume=False, nosave=False, noval=False, noautoanchor=False, noplots=False, evolve=None, bucket=, cache=ram, image_weights=False, device=, multi_scale=False, single_cls=False, optimizer=SGD, sync_bn=False, workers=8, project=runs/train, name=exp, exist_ok=False, quad=False, cos_lr=False, label_smoothing=0.0, patience=100, freeze=[0], save_period=-1, seed=0, local_rank=-1, entity=None, upload_dataset=False, bbox_interval=-1, artifact_alias=latest\n", + "\u001b[34m\u001b[1mgithub: \u001b[0mskipping check (Docker image), for updates see https://github.com/ultralytics/yolov5\n", + "YOLOv5 🚀 v6.1-343-g685332e Python-3.7.13 torch-1.12.0+cu113 CUDA:0 (Tesla V100-SXM2-16GB, 16160MiB)\n", "\n", "\u001b[34m\u001b[1mhyperparameters: \u001b[0mlr0=0.01, lrf=0.01, momentum=0.937, weight_decay=0.0005, warmup_epochs=3.0, warmup_momentum=0.8, warmup_bias_lr=0.1, box=0.05, cls=0.5, cls_pw=1.0, obj=1.0, obj_pw=1.0, iou_t=0.2, anchor_t=4.0, fl_gamma=0.0, hsv_h=0.015, hsv_s=0.7, hsv_v=0.4, degrees=0.0, translate=0.1, scale=0.5, shear=0.0, perspective=0.0, flipud=0.0, fliplr=0.5, mosaic=1.0, mixup=0.0, copy_paste=0.0\n", "\u001b[34m\u001b[1mWeights & Biases: \u001b[0mrun 'pip install wandb' to automatically track and visualize YOLOv5 🚀 runs (RECOMMENDED)\n", "\u001b[34m\u001b[1mTensorBoard: \u001b[0mStart with 'tensorboard --logdir runs/train', view at http://localhost:6006/\n", "\n", + "Dataset not found ⚠️, missing paths ['/content/datasets/coco128/images/train2017']\n", + "Downloading https://ultralytics.com/assets/coco128.zip to coco128.zip...\n", + "100% 6.66M/6.66M [00:00<00:00, 31.8MB/s]\n", + "Dataset download success ✅ (1.5s), saved to \u001b[1m/content/datasets\u001b[0m\n", + "\n", " from n params module arguments \n", " 0 -1 1 3520 models.common.Conv [3, 32, 6, 2, 2] \n", " 1 -1 1 18560 models.common.Conv [32, 64, 3, 2] \n", @@ -777,17 +782,18 @@ " 22 [-1, 10] 1 0 models.common.Concat [1] \n", " 23 -1 1 1182720 models.common.C3 [512, 512, 1, False] \n", " 24 [17, 20, 23] 1 229245 models.yolo.Detect [80, [[10, 13, 16, 30, 33, 23], [30, 61, 62, 45, 59, 119], [116, 90, 156, 198, 373, 326]], [128, 256, 512]]\n", - "Model summary: 270 layers, 7235389 parameters, 7235389 gradients\n", + "Model summary: 270 layers, 7235389 parameters, 7235389 gradients, 16.6 GFLOPs\n", "\n", "Transferred 349/349 items from yolov5s.pt\n", "\u001b[34m\u001b[1mAMP: \u001b[0mchecks passed ✅\n", "Scaled weight_decay = 0.0005\n", "\u001b[34m\u001b[1moptimizer:\u001b[0m SGD with parameter groups 57 weight (no decay), 60 weight, 60 bias\n", "\u001b[34m\u001b[1malbumentations: \u001b[0mversion 1.0.3 required by YOLOv5, but version 0.1.12 is currently installed\n", - "\u001b[34m\u001b[1mtrain: \u001b[0mScanning '/content/datasets/coco128/labels/train2017.cache' images and labels... 128 found, 0 missing, 2 empty, 0 corrupt: 100% 128/128 [00:00 Date: Mon, 1 Aug 2022 02:09:36 +0200 Subject: [PATCH 1253/1976] Update .pre-commit-config.yaml (#8812) * Update .pre-commit-config.yaml Comment EOF fixer * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index fe26ed5a93a5..76716d160ac1 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -15,9 +15,7 @@ repos: - repo: https://github.com/pre-commit/pre-commit-hooks rev: v4.3.0 hooks: - - id: no-commit-to-branch - args: ['--branch', 'master'] - - id: end-of-file-fixer + # - id: end-of-file-fixer - id: trailing-whitespace - id: check-case-conflict - id: check-yaml From 39ce8ca19a1b97e36c73d86ecc70c2c3e42ac5c0 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 1 Aug 2022 03:01:44 +0200 Subject: [PATCH 1254/1976] Remove `assert not is_docker()` from GitHub checks (#8813) * Update * Update --- utils/general.py | 1 - 1 file changed, 1 deletion(-) diff --git a/utils/general.py b/utils/general.py index bab0a5d9ab34..22181d3faeb9 100755 --- a/utils/general.py +++ b/utils/general.py @@ -316,7 +316,6 @@ def check_git_status(repo='ultralytics/yolov5'): msg = f', for updates see {url}' s = colorstr('github: ') # string assert Path('.git').exists(), s + 'skipping check (not a git repository)' + msg - assert not is_docker(), s + 'skipping check (Docker image)' + msg assert check_online(), s + 'skipping check (offline)' + msg splits = re.split(pattern=r'\s', string=check_output('git remote -v', shell=True).decode()) From 7b9cc3205ae2cd9fdb0a56ca2818e17c5ae8346e Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 1 Aug 2022 03:33:28 +0200 Subject: [PATCH 1255/1976] Add .git to .dockerignore (#8815) --- .dockerignore | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.dockerignore b/.dockerignore index af51ccc3d8df..3b669254e779 100644 --- a/.dockerignore +++ b/.dockerignore @@ -1,5 +1,5 @@ # Repo-specific DockerIgnore ------------------------------------------------------------------------------------------- -#.git +.git .cache .idea runs From 0669f1b27bbdcbdbb0e2baf4e9f09c6fc8337ec7 Mon Sep 17 00:00:00 2001 From: UnglvKitDe <100289696+UnglvKitDe@users.noreply.github.com> Date: Mon, 1 Aug 2022 12:08:46 +0200 Subject: [PATCH 1256/1976] Add tensor hooks and 10.0 gradient clipping (#8598) * Add tensor hooks and gradient clipping https://github.com/ultralytics/yolov5/issues/8578 * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Remove retain_grad(), because its not necessary * Update train.py * Simplify * Update train.py * Update train.py * Update train.py * Update train.py Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Glenn Jocher --- train.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/train.py b/train.py index dc93c22d621a..6ada2a2f121b 100644 --- a/train.py +++ b/train.py @@ -131,6 +131,7 @@ def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictio freeze = [f'model.{x}.' for x in (freeze if len(freeze) > 1 else range(freeze[0]))] # layers to freeze for k, v in model.named_parameters(): v.requires_grad = True # train all layers + v.register_hook(lambda x: torch.nan_to_num(x)) # NaN to 0.0 if any(x in k for x in freeze): LOGGER.info(f'freezing {k}') v.requires_grad = False @@ -334,8 +335,10 @@ def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictio # Backward scaler.scale(loss).backward() - # Optimize + # Optimize - https://pytorch.org/docs/master/notes/amp_examples.html if ni - last_opt_step >= accumulate: + scaler.unscale_(optimizer) # unscale gradients + torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=10.0) # clip gradients scaler.step(optimizer) # optimizer.step scaler.update() optimizer.zero_grad() From 59578f2782cfbf4fe2b270a1c533f45b7cbbd56f Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 1 Aug 2022 20:28:24 +0200 Subject: [PATCH 1257/1976] Update README.md with contributors.png (#8820) * Update README.md with contributors.png Replace dynamic svg from opencollective with static png for improved stability and lighter size (400kB vs 2MB). @AyushExel * Update README.md * Update README.md * Update README_cn.md * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .github/README_cn.md | 4 +++- README.md | 3 ++- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/.github/README_cn.md b/.github/README_cn.md index 7e90336d5157..b653d435cfd1 100644 --- a/.github/README_cn.md +++ b/.github/README_cn.md @@ -249,7 +249,9 @@ We are super excited about our first-ever Ultralytics YOLOv5 🚀 EXPORT Competi ##
贡献
我们重视您的意见! 我们希望给大家提供尽可能的简单和透明的方式对 YOLOv5 做出贡献。开始之前请先点击并查看我们的 [贡献指南](CONTRIBUTING.md),填写[YOLOv5调查问卷](https://ultralytics.com/survey?utm_source=github&utm_medium=social&utm_campaign=Survey) 来向我们发送您的经验反馈。真诚感谢我们所有的贡献者! - + + + ##
联系
diff --git a/README.md b/README.md index b0ea0a5d814c..b959871211e5 100644 --- a/README.md +++ b/README.md @@ -259,7 +259,8 @@ We are super excited about our first-ever Ultralytics YOLOv5 🚀 EXPORT Competi We love your input! We want to make contributing to YOLOv5 as easy and transparent as possible. Please see our [Contributing Guide](CONTRIBUTING.md) to get started, and fill out the [YOLOv5 Survey](https://ultralytics.com/survey?utm_source=github&utm_medium=social&utm_campaign=Survey) to send us feedback on your experiences. Thank you to all our contributors! - + + ##
Contact
From f3c78a387e9a344b903fbd7bd12bfab2ea292351 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 1 Aug 2022 21:39:04 +0200 Subject: [PATCH 1258/1976] Remove hook `torch.nan_to_num(x)` (#8826) * Remove hook `torch.nan_to_num(x)` Observed erratic training behavior (green line) with the nan_to_num hook in classifier branch. I'm going to remove it from master. * Update train.py --- train.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/train.py b/train.py index 6ada2a2f121b..20fef265110c 100644 --- a/train.py +++ b/train.py @@ -131,7 +131,7 @@ def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictio freeze = [f'model.{x}.' for x in (freeze if len(freeze) > 1 else range(freeze[0]))] # layers to freeze for k, v in model.named_parameters(): v.requires_grad = True # train all layers - v.register_hook(lambda x: torch.nan_to_num(x)) # NaN to 0.0 + # v.register_hook(lambda x: torch.nan_to_num(x)) # NaN to 0 (commented for erratic training results) if any(x in k for x in freeze): LOGGER.info(f'freezing {k}') v.requires_grad = False From ba140e568555503c54a66c974e15922da9422f1a Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 1 Aug 2022 21:45:31 +0200 Subject: [PATCH 1259/1976] RUN git clone instead of COPY to `/usr/src/app` (#8827) Update --- utils/docker/Dockerfile | 4 ++-- utils/docker/Dockerfile-arm64 | 4 ++-- utils/docker/Dockerfile-cpu | 4 ++-- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/utils/docker/Dockerfile b/utils/docker/Dockerfile index 0e0d82225bc4..2280f209e6a1 100644 --- a/utils/docker/Dockerfile +++ b/utils/docker/Dockerfile @@ -25,8 +25,8 @@ RUN mkdir -p /usr/src/app WORKDIR /usr/src/app # Copy contents -COPY . /usr/src/app -RUN git clone https://github.com/ultralytics/yolov5 /usr/src/yolov5 +# COPY . /usr/src/app (issues as not a .git directory) +RUN git clone https://github.com/ultralytics/yolov5 /usr/src/app # Set environment variables ENV OMP_NUM_THREADS=8 diff --git a/utils/docker/Dockerfile-arm64 b/utils/docker/Dockerfile-arm64 index bca161e67a37..fe92c8d56146 100644 --- a/utils/docker/Dockerfile-arm64 +++ b/utils/docker/Dockerfile-arm64 @@ -29,8 +29,8 @@ RUN mkdir -p /usr/src/app WORKDIR /usr/src/app # Copy contents -COPY . /usr/src/app -RUN git clone https://github.com/ultralytics/yolov5 /usr/src/yolov5 +# COPY . /usr/src/app (issues as not a .git directory) +RUN git clone https://github.com/ultralytics/yolov5 /usr/src/app # Usage Examples ------------------------------------------------------------------------------------------------------- diff --git a/utils/docker/Dockerfile-cpu b/utils/docker/Dockerfile-cpu index f05e920ad53f..d61dfeffe22c 100644 --- a/utils/docker/Dockerfile-cpu +++ b/utils/docker/Dockerfile-cpu @@ -26,8 +26,8 @@ RUN mkdir -p /usr/src/app WORKDIR /usr/src/app # Copy contents -COPY . /usr/src/app -RUN git clone https://github.com/ultralytics/yolov5 /usr/src/yolov5 +# COPY . /usr/src/app (issues as not a .git directory) +RUN git clone https://github.com/ultralytics/yolov5 /usr/src/app # Usage Examples ------------------------------------------------------------------------------------------------------- From b7635efb6ee953615b4ca7d13017d79511ccd3be Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 1 Aug 2022 21:48:59 +0200 Subject: [PATCH 1260/1976] [pre-commit.ci] pre-commit suggestions (#8828) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/asottile/pyupgrade: v2.34.0 → v2.37.3](https://github.com/asottile/pyupgrade/compare/v2.34.0...v2.37.3) - [github.com/PyCQA/flake8: 4.0.1 → 5.0.2](https://github.com/PyCQA/flake8/compare/4.0.1...5.0.2) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 76716d160ac1..43aca019feb1 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -24,7 +24,7 @@ repos: - id: check-docstring-first - repo: https://github.com/asottile/pyupgrade - rev: v2.34.0 + rev: v2.37.3 hooks: - id: pyupgrade name: Upgrade code @@ -58,7 +58,7 @@ repos: - id: yesqa - repo: https://github.com/PyCQA/flake8 - rev: 4.0.1 + rev: 5.0.2 hooks: - id: flake8 name: PEP8 From 2e10909905b1e0e7eb7bac086600fe7ee2c0e6a5 Mon Sep 17 00:00:00 2001 From: Jackson Argo Date: Mon, 1 Aug 2022 19:46:08 -0400 Subject: [PATCH 1261/1976] Fix missing attr model.model when loading custom yolov model (#8830) * Update hubconf.py Loading a custom yolov model causes this line to fail. Adding a test to check if the model actually has a model.model field. With this check, I'm able to load the model no prob. Loading model via ```py model = torch.hub.load( 'ultralytics/yolov5', 'custom', 'models/frozen_backbone_coco_unlabeled_best.onnx', autoshape=True, force_reload=False ) ``` Causes traceback: ``` Traceback (most recent call last): File "/Users/jackson/Documents/GitHub/w210-capstone/.venv/lib/python3.10/site-packages/flask/app.py", line 2077, in wsgi_app response = self.full_dispatch_request() File "/Users/jackson/Documents/GitHub/w210-capstone/.venv/lib/python3.10/site-packages/flask/app.py", line 1525, in full_dispatch_request rv = self.handle_user_exception(e) File "/Users/jackson/Documents/GitHub/w210-capstone/.venv/lib/python3.10/site-packages/flask/app.py", line 1523, in full_dispatch_request rv = self.dispatch_request() File "/Users/jackson/Documents/GitHub/w210-capstone/.venv/lib/python3.10/site-packages/flask/app.py", line 1509, in dispatch_request return self.ensure_sync(self.view_functions[rule.endpoint])(**req.view_args) File "/Users/jackson/Documents/GitHub/w210-capstone/api/endpoints/predictions.py", line 26, in post_predictions yolov_predictions = predict_bounding_boxes_for_collection(collection_id) File "/Users/jackson/Documents/GitHub/w210-capstone/api/predictions/predict_bounding_boxes.py", line 43, in predict_bounding_boxes_for_collection model = torch.hub.load( File "/Users/jackson/Documents/GitHub/w210-capstone/.venv/lib/python3.10/site-packages/torch/hub.py", line 404, in load model = _load_local(repo_or_dir, model, *args, **kwargs) File "/Users/jackson/Documents/GitHub/w210-capstone/.venv/lib/python3.10/site-packages/torch/hub.py", line 433, in _load_local model = entry(*args, **kwargs) File "/Users/jackson/.cache/torch/hub/ultralytics_yolov5_master/hubconf.py", line 72, in custom return _create(path, autoshape=autoshape, verbose=_verbose, device=device) File "/Users/jackson/.cache/torch/hub/ultralytics_yolov5_master/hubconf.py", line 67, in _create raise Exception(s) from e Exception: 'DetectMultiBackend' object has no attribute 'model'. Cache may be out of date, try `force_reload=True` or see https://github.com/ultralytics/yolov5/issues/36 for help. Exception on /api/v1/predictions [POST] Traceback (most recent call last): File "/Users/jackson/.cache/torch/hub/ultralytics_yolov5_master/hubconf.py", line 58, in _create model.model.model[-1].inplace = False # Detect.inplace=False for safe multithread inference File "/Users/jackson/Documents/GitHub/w210-capstone/.venv/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1185, in __getattr__ raise AttributeError("'{}' object has no attribute '{}'".format( AttributeError: 'DetectMultiBackend' object has no attribute 'model' ``` * Update hubconf.py * Update common.py Co-authored-by: Glenn Jocher --- hubconf.py | 12 +++++++----- models/common.py | 3 +++ 2 files changed, 10 insertions(+), 5 deletions(-) diff --git a/hubconf.py b/hubconf.py index 5bb629005597..011eaa57ff34 100644 --- a/hubconf.py +++ b/hubconf.py @@ -29,6 +29,7 @@ def _create(name, pretrained=True, channels=3, classes=80, autoshape=True, verbo from pathlib import Path from models.common import AutoShape, DetectMultiBackend + from models.experimental import attempt_load from models.yolo import Model from utils.downloads import attempt_download from utils.general import LOGGER, check_requirements, intersect_dicts, logging @@ -42,8 +43,12 @@ def _create(name, pretrained=True, channels=3, classes=80, autoshape=True, verbo try: device = select_device(device) if pretrained and channels == 3 and classes == 80: - model = DetectMultiBackend(path, device=device, fuse=autoshape) # download/load FP32 model - # model = models.experimental.attempt_load(path, map_location=device) # download/load FP32 model + try: + model = DetectMultiBackend(path, device=device, fuse=autoshape) # detection model + if autoshape: + model = AutoShape(model) # for file/URI/PIL/cv2/np inputs and NMS + except Exception: + model = attempt_load(path, device=device, fuse=False) # arbitrary model else: cfg = list((Path(__file__).parent / 'models').rglob(f'{path.stem}.yaml'))[0] # model.yaml path model = Model(cfg, channels, classes) # create model @@ -54,9 +59,6 @@ def _create(name, pretrained=True, channels=3, classes=80, autoshape=True, verbo model.load_state_dict(csd, strict=False) # load if len(ckpt['model'].names) == classes: model.names = ckpt['model'].names # set class names attribute - if autoshape: - model.model.model[-1].inplace = False # Detect.inplace=False for safe multithread inference - model = AutoShape(model) # for file/URI/PIL/cv2/np inputs and NMS if not verbose: LOGGER.setLevel(logging.INFO) # reset to default return model.to(device) diff --git a/models/common.py b/models/common.py index 959c965e6002..c898d94a921a 100644 --- a/models/common.py +++ b/models/common.py @@ -562,6 +562,9 @@ def __init__(self, model, verbose=True): self.dmb = isinstance(model, DetectMultiBackend) # DetectMultiBackend() instance self.pt = not self.dmb or model.pt # PyTorch model self.model = model.eval() + if self.pt: + m = self.model.model.model[-1] if self.dmb else self.model.model[-1] # Detect() + m.inplace = False # Detect.inplace=False for safe multithread inference def _apply(self, fn): # Apply to(), cpu(), cuda(), half() to model tensors that are not parameters or registered buffers From 08c8c3e00a1b0fc7f03a7e76ca3cbf7a0d8542ae Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 2 Aug 2022 15:13:58 +0200 Subject: [PATCH 1262/1976] New `smart_resume()` (#8838) * New `smart_resume()` * Update torch_utils.py * Update torch_utils.py * Update torch_utils.py * fix --- train.py | 33 ++++++--------------------------- utils/torch_utils.py | 19 +++++++++++++++++++ 2 files changed, 25 insertions(+), 27 deletions(-) diff --git a/train.py b/train.py index 20fef265110c..99a43f8614c4 100644 --- a/train.py +++ b/train.py @@ -54,7 +54,7 @@ from utils.metrics import fitness from utils.plots import plot_evolve, plot_labels from utils.torch_utils import (EarlyStopping, ModelEMA, de_parallel, select_device, smart_DDP, smart_optimizer, - torch_distributed_zero_first) + smart_resume, torch_distributed_zero_first) LOCAL_RANK = int(os.getenv('LOCAL_RANK', -1)) # https://pytorch.org/docs/stable/elastic/run.html RANK = int(os.getenv('RANK', -1)) @@ -163,26 +163,9 @@ def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictio ema = ModelEMA(model) if RANK in {-1, 0} else None # Resume - start_epoch, best_fitness = 0, 0.0 + best_fitness, start_epoch = 0.0, 0 if pretrained: - # Optimizer - if ckpt['optimizer'] is not None: - optimizer.load_state_dict(ckpt['optimizer']) - best_fitness = ckpt['best_fitness'] - - # EMA - if ema and ckpt.get('ema'): - ema.ema.load_state_dict(ckpt['ema'].float().state_dict()) - ema.updates = ckpt['updates'] - - # Epochs - start_epoch = ckpt['epoch'] + 1 - if resume: - assert start_epoch > 0, f'{weights} training to {epochs} epochs is finished, nothing to resume.' - if epochs < start_epoch: - LOGGER.info(f"{weights} has been trained for {ckpt['epoch']} epochs. Fine-tuning for {epochs} more epochs.") - epochs += ckpt['epoch'] # finetune additional epochs - + best_fitness, start_epoch, epochs = smart_resume(ckpt, optimizer, ema, weights, epochs, resume) del ckpt, csd # DP mode @@ -212,8 +195,8 @@ def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictio quad=opt.quad, prefix=colorstr('train: '), shuffle=True) - mlc = int(np.concatenate(dataset.labels, 0)[:, 0].max()) # max label class - nb = len(train_loader) # number of batches + labels = np.concatenate(dataset.labels, 0) + mlc = int(labels[:, 0].max()) # max label class assert mlc < nc, f'Label class {mlc} exceeds nc={nc} in {data}. Possible class labels are 0-{nc - 1}' # Process 0 @@ -232,10 +215,6 @@ def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictio prefix=colorstr('val: '))[0] if not resume: - labels = np.concatenate(dataset.labels, 0) - # c = torch.tensor(labels[:, 0]) # classes - # cf = torch.bincount(c.long(), minlength=nc) + 1. # frequency - # model._initialize_biases(cf.to(device)) if plots: plot_labels(labels, names, save_dir) @@ -263,6 +242,7 @@ def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictio # Start training t0 = time.time() + nb = len(train_loader) # number of batches nw = max(round(hyp['warmup_epochs'] * nb), 100) # number of warmup iterations, max(3 epochs, 100 iterations) # nw = min(nw, (epochs - start_epoch) / 2 * nb) # limit warmup to < 1/2 of training last_opt_step = -1 @@ -510,7 +490,6 @@ def main(opt, callbacks=Callbacks()): with open(Path(ckpt).parent.parent / 'opt.yaml', errors='ignore') as f: opt = argparse.Namespace(**yaml.safe_load(f)) # replace opt.cfg, opt.weights, opt.resume = '', ckpt, True # reinstate - LOGGER.info(f'Resuming training from {ckpt}') else: opt.data, opt.cfg, opt.hyp, opt.weights, opt.project = \ check_file(opt.data), check_yaml(opt.cfg), check_yaml(opt.hyp), str(opt.weights), str(opt.project) # checks diff --git a/utils/torch_utils.py b/utils/torch_utils.py index 5f2a22c36f1a..391ddead2985 100644 --- a/utils/torch_utils.py +++ b/utils/torch_utils.py @@ -306,6 +306,25 @@ def smart_optimizer(model, name='Adam', lr=0.001, momentum=0.9, weight_decay=1e- return optimizer +def smart_resume(ckpt, optimizer, ema=None, weights='yolov5s.pt', epochs=300, resume=True): + # Resume training from a partially trained checkpoint + best_fitness = 0.0 + start_epoch = ckpt['epoch'] + 1 + if ckpt['optimizer'] is not None: + optimizer.load_state_dict(ckpt['optimizer']) # optimizer + best_fitness = ckpt['best_fitness'] + if ema and ckpt.get('ema'): + ema.ema.load_state_dict(ckpt['ema'].float().state_dict()) # EMA + ema.updates = ckpt['updates'] + if resume: + assert start_epoch > 0, f'{weights} training to {epochs} epochs is finished, nothing to resume.' + LOGGER.info(f'Resuming training from {weights} for {epochs - start_epoch} more epochs to {epochs} total epochs') + if epochs < start_epoch: + LOGGER.info(f"{weights} has been trained for {ckpt['epoch']} epochs. Fine-tuning for {epochs} more epochs.") + epochs += ckpt['epoch'] # finetune additional epochs + return best_fitness, start_epoch, epochs + + class EarlyStopping: # YOLOv5 simple early stopper def __init__(self, patience=30): From e5991c986725d1229b6d1f5b1533e10f9b41c850 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 3 Aug 2022 00:57:40 +0200 Subject: [PATCH 1263/1976] Created using Colaboratory --- tutorial.ipynb | 30 +++++++++++++----------------- 1 file changed, 13 insertions(+), 17 deletions(-) diff --git a/tutorial.ipynb b/tutorial.ipynb index b5cb4964aa6b..83be1039f22f 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -414,7 +414,7 @@ "import utils\n", "display = utils.notebook_init() # checks" ], - "execution_count": 1, + "execution_count": null, "outputs": [ { "output_type": "stream", @@ -466,7 +466,7 @@ "!python detect.py --weights yolov5s.pt --img 640 --conf 0.25 --source data/images\n", "#display.Image(filename='runs/detect/exp/zidane.jpg', width=600)" ], - "execution_count": 2, + "execution_count": null, "outputs": [ { "output_type": "stream", @@ -546,7 +546,7 @@ "torch.hub.download_url_to_file('https://ultralytics.com/assets/coco2017val.zip', 'tmp.zip')\n", "!unzip -q tmp.zip -d ../datasets && rm tmp.zip" ], - "execution_count": 3, + "execution_count": null, "outputs": [ { "output_type": "display_data", @@ -577,7 +577,7 @@ "# Run YOLOv5x on COCO val\n", "!python val.py --weights yolov5x.pt --data coco.yaml --img 640 --iou 0.65 --half" ], - "execution_count": 4, + "execution_count": null, "outputs": [ { "output_type": "stream", @@ -737,7 +737,7 @@ "# Train YOLOv5s on COCO128 for 3 epochs\n", "!python train.py --img 640 --batch 16 --epochs 3 --data coco128.yaml --weights yolov5s.pt --cache" ], - "execution_count": 5, + "execution_count": null, "outputs": [ { "output_type": "stream", @@ -917,13 +917,14 @@ "id": "DLI1JmHU7B0l" }, "source": [ - "## Weights & Biases Logging 🌟 NEW\n", + "## Weights & Biases Logging\n", "\n", - "[Weights & Biases](https://wandb.ai/site?utm_campaign=repo_yolo_notebook) (W&B) is now integrated with YOLOv5 for real-time visualization and cloud logging of training runs. This allows for better run comparison and introspection, as well improved visibility and collaboration for teams. To enable W&B `pip install wandb`, and then train normally (you will be guided through setup on first use). \n", + "[Weights & Biases](https://wandb.ai/site?utm_campaign=repo_yolo_notebook) (W&B) is integrated with YOLOv5 for real-time visualization and cloud logging of training runs. This allows for better run comparison and introspection, as well improved visibility and collaboration for teams. To enable W&B `pip install wandb`, and then train normally (you will be guided through setup on first use). \n", "\n", - "During training you will see live updates at [https://wandb.ai/home](https://wandb.ai/home?utm_campaign=repo_yolo_notebook), and you can create and share detailed [Reports](https://wandb.ai/glenn-jocher/yolov5_tutorial/reports/YOLOv5-COCO128-Tutorial-Results--VmlldzozMDI5OTY) of your results. For more information see the [YOLOv5 Weights & Biases Tutorial](https://github.com/ultralytics/yolov5/issues/1289). \n", + "During training you will see live updates at [https://wandb.ai/home](https://wandb.ai/home?utm_campaign=repo_yolo_notebook), and you can create and share detailed [Reports](https://wandb.ai/glenn-jocher/yolov5_tutorial/reports/YOLOv5-COCO128-Tutorial-Results--VmlldzozMDI5OTY) of your results. For more information see the [YOLOv5 Weights & Biases Tutorial](https://github.com/ultralytics/yolov5/issues/1289). \n", "\n", - "

\"Weights

" + "\n", + "\"Weights" ] }, { @@ -934,16 +935,11 @@ "source": [ "## Local Logging\n", "\n", - "All results are logged by default to `runs/train`, with a new experiment directory created for each new training as `runs/train/exp2`, `runs/train/exp3`, etc. View train and val jpgs to see mosaics, labels, predictions and augmentation effects. Note an Ultralytics **Mosaic Dataloader** is used for training (shown below), which combines 4 images into 1 mosaic during training.\n", - "\n", - "> \n", - "`train_batch0.jpg` shows train batch 0 mosaics and labels\n", + "All results are logged by default to `runs/train`, with a new experiment directory created for each new training as `runs/train/exp2`, `runs/train/exp3`, etc. View train and val statistics, mosaics, labels, predictions and augmentations, as well as metrics and charts including Precision-Recall curves and Confusion Matrices. \n", "\n", - "> \n", - "`test_batch0_labels.jpg` shows val batch 0 labels\n", + "A **Mosaic Dataloader** is used for training (shown in train*.jpg images), which combines 4 images into 1 mosaic during training.\n", "\n", - "> \n", - "`test_batch0_pred.jpg` shows val batch 0 _predictions_\n", + "\"Local\n", "\n", "Training results are automatically logged to [Tensorboard](https://www.tensorflow.org/tensorboard) and [CSV](https://github.com/ultralytics/yolov5/pull/4148) as `results.csv`, which is plotted as `results.png` (below) after training completes. You can also plot any `results.csv` file manually:\n", "\n", From 4d8d84b0ea7147aca64e7c38ce1bdb5fbb9c5a53 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 3 Aug 2022 12:49:02 +0200 Subject: [PATCH 1264/1976] Created using Colaboratory --- tutorial.ipynb | 17 +++++------------ 1 file changed, 5 insertions(+), 12 deletions(-) diff --git a/tutorial.ipynb b/tutorial.ipynb index 83be1039f22f..2aaa93b53df6 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -686,6 +686,8 @@ "- **Training Results** are saved to `runs/train/` with incrementing run directories, i.e. `runs/train/exp2`, `runs/train/exp3` etc.\n", "

\n", "\n", + "A **Mosaic Dataloader** is used for training which combines 4 images into 1 mosaic.\n", + "\n", "## Train on Custom Data with Roboflow 🌟 NEW\n", "\n", "[Roboflow](https://roboflow.com/?ref=ultralytics) enables you to easily **organize, label, and prepare** a high quality dataset with your own custom data. Roboflow also makes it easy to establish an active learning pipeline, collaborate with your team on dataset improvement, and integrate directly into your model building workflow with the `roboflow` pip package.\n", @@ -935,20 +937,11 @@ "source": [ "## Local Logging\n", "\n", - "All results are logged by default to `runs/train`, with a new experiment directory created for each new training as `runs/train/exp2`, `runs/train/exp3`, etc. View train and val statistics, mosaics, labels, predictions and augmentations, as well as metrics and charts including Precision-Recall curves and Confusion Matrices. \n", - "\n", - "A **Mosaic Dataloader** is used for training (shown in train*.jpg images), which combines 4 images into 1 mosaic during training.\n", - "\n", - "\"Local\n", - "\n", - "Training results are automatically logged to [Tensorboard](https://www.tensorflow.org/tensorboard) and [CSV](https://github.com/ultralytics/yolov5/pull/4148) as `results.csv`, which is plotted as `results.png` (below) after training completes. You can also plot any `results.csv` file manually:\n", + "Training results are automatically logged with [Tensorboard](https://www.tensorflow.org/tensorboard) and [CSV](https://github.com/ultralytics/yolov5/pull/4148) loggers to `runs/train`, with a new experiment directory created for each new training as `runs/train/exp2`, `runs/train/exp3`, etc.\n", "\n", - "```python\n", - "from utils.plots import plot_results \n", - "plot_results('path/to/results.csv') # plot 'results.csv' as 'results.png'\n", - "```\n", + "This directory contains train and val statistics, mosaics, labels, predictions and augmentated mosaics, as well as metrics and charts including precision-recall (PR) curves and confusion matrices. \n", "\n", - "\"COCO128" + "\"Local\n" ] }, { From a75a1105a1eced888e4b327048775f121436a725 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 3 Aug 2022 21:28:22 +0200 Subject: [PATCH 1265/1976] Self-contained checkpoint `--resume` (#8839) * Single checkpoint resume * Update train.py * Add hyp * Add hyp * Add hyp * FIX * avoid resume on url data * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * avoid resume on url data * avoid resume on url data * Update Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- train.py | 27 ++++++++++++++++++--------- utils/downloads.py | 10 ++++++---- utils/torch_utils.py | 5 +++-- 3 files changed, 27 insertions(+), 15 deletions(-) diff --git a/train.py b/train.py index 99a43f8614c4..17d16dba1531 100644 --- a/train.py +++ b/train.py @@ -43,7 +43,7 @@ from utils.autobatch import check_train_batch_size from utils.callbacks import Callbacks from utils.dataloaders import create_dataloader -from utils.downloads import attempt_download +from utils.downloads import attempt_download, is_url from utils.general import (LOGGER, check_amp, check_dataset, check_file, check_git_status, check_img_size, check_requirements, check_suffix, check_yaml, colorstr, get_latest_run, increment_path, init_seeds, intersect_dicts, labels_to_class_weights, labels_to_image_weights, methods, @@ -77,6 +77,7 @@ def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictio with open(hyp, errors='ignore') as f: hyp = yaml.safe_load(f) # load hyps dict LOGGER.info(colorstr('hyperparameters: ') + ', '.join(f'{k}={v}' for k, v in hyp.items())) + opt.hyp = hyp.copy() # for saving hyps to checkpoints # Save run settings if not evolve: @@ -377,6 +378,7 @@ def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictio 'updates': ema.updates, 'optimizer': optimizer.state_dict(), 'wandb_id': loggers.wandb.wandb_run.id if loggers.wandb else None, + 'opt': vars(opt), 'date': datetime.now().isoformat()} # Save last, best and delete @@ -472,8 +474,7 @@ def parse_opt(known=False): parser.add_argument('--bbox_interval', type=int, default=-1, help='W&B: Set bounding-box image logging interval') parser.add_argument('--artifact_alias', type=str, default='latest', help='W&B: Version of dataset artifact to use') - opt = parser.parse_known_args()[0] if known else parser.parse_args() - return opt + return parser.parse_known_args()[0] if known else parser.parse_args() def main(opt, callbacks=Callbacks()): @@ -484,12 +485,20 @@ def main(opt, callbacks=Callbacks()): check_requirements(exclude=['thop']) # Resume - if opt.resume and not check_wandb_resume(opt) and not opt.evolve: # resume an interrupted run - ckpt = opt.resume if isinstance(opt.resume, str) else get_latest_run() # specified or most recent path - assert os.path.isfile(ckpt), 'ERROR: --resume checkpoint does not exist' - with open(Path(ckpt).parent.parent / 'opt.yaml', errors='ignore') as f: - opt = argparse.Namespace(**yaml.safe_load(f)) # replace - opt.cfg, opt.weights, opt.resume = '', ckpt, True # reinstate + if opt.resume and not (check_wandb_resume(opt) or opt.evolve): # resume an interrupted run + last = Path(opt.resume if isinstance(opt.resume, str) else get_latest_run()) # specified or most recent last.pt + assert last.is_file(), f'ERROR: --resume checkpoint {last} does not exist' + opt_yaml = last.parent.parent / 'opt.yaml' # train options yaml + opt_data = opt.data # original dataset + if opt_yaml.is_file(): + with open(opt_yaml, errors='ignore') as f: + d = yaml.safe_load(f) + else: + d = torch.load(last, map_location='cpu')['opt'] + opt = argparse.Namespace(**d) # replace + opt.cfg, opt.weights, opt.resume = '', str(last), True # reinstate + if is_url(opt.data): + opt.data = str(opt_data) # avoid HUB resume auth timeout else: opt.data, opt.cfg, opt.hyp, opt.weights, opt.project = \ check_file(opt.data), check_yaml(opt.cfg), check_yaml(opt.hyp), str(opt.weights), str(opt.project) # checks diff --git a/utils/downloads.py b/utils/downloads.py index ebe5bd36e8ff..9d4780ad28b1 100644 --- a/utils/downloads.py +++ b/utils/downloads.py @@ -16,12 +16,14 @@ import torch -def is_url(url): +def is_url(url, check_online=True): # Check if online file exists try: - r = urllib.request.urlopen(url) # response - return r.getcode() == 200 - except urllib.request.HTTPError: + url = str(url) + result = urllib.parse.urlparse(url) + assert all([result.scheme, result.netloc, result.path]) # check if is url + return (urllib.request.urlopen(url).getcode() == 200) if check_online else True # check if exists online + except (AssertionError, urllib.request.HTTPError): return False diff --git a/utils/torch_utils.py b/utils/torch_utils.py index 391ddead2985..d5615c263e43 100644 --- a/utils/torch_utils.py +++ b/utils/torch_utils.py @@ -317,8 +317,9 @@ def smart_resume(ckpt, optimizer, ema=None, weights='yolov5s.pt', epochs=300, re ema.ema.load_state_dict(ckpt['ema'].float().state_dict()) # EMA ema.updates = ckpt['updates'] if resume: - assert start_epoch > 0, f'{weights} training to {epochs} epochs is finished, nothing to resume.' - LOGGER.info(f'Resuming training from {weights} for {epochs - start_epoch} more epochs to {epochs} total epochs') + assert start_epoch > 0, f'{weights} training to {epochs} epochs is finished, nothing to resume.\n' \ + f"Start a new training without --resume, i.e. 'python train.py --weights {weights}'" + LOGGER.info(f'Resuming training from {weights} from epoch {start_epoch} to {epochs} total epochs') if epochs < start_epoch: LOGGER.info(f"{weights} has been trained for {ckpt['epoch']} epochs. Fine-tuning for {epochs} more epochs.") epochs += ckpt['epoch'] # finetune additional epochs From 6884da3a32e97fafcaae5caaddfd13de773cd2dc Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 3 Aug 2022 23:32:31 +0200 Subject: [PATCH 1266/1976] Add check_file(data) i.e. `--data coco128.yaml` (#8851) * Add check_file(data) i.e. `--data coco128.yaml` * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- export.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/export.py b/export.py index 4846624541e4..e3f6af93d1cc 100644 --- a/export.py +++ b/export.py @@ -67,8 +67,8 @@ from models.experimental import attempt_load from models.yolo import Detect from utils.dataloaders import LoadImages -from utils.general import (LOGGER, check_dataset, check_img_size, check_requirements, check_version, colorstr, - file_size, print_args, url2file) +from utils.general import (LOGGER, check_dataset, check_file, check_img_size, check_requirements, check_version, + colorstr, file_size, print_args, url2file) from utils.torch_utils import select_device @@ -371,7 +371,7 @@ def export_tflite(keras_model, im, file, int8, data, nms, agnostic_nms, prefix=c converter.optimizations = [tf.lite.Optimize.DEFAULT] if int8: from models.tf import representative_dataset_gen - dataset = LoadImages(check_dataset(data)['train'], img_size=imgsz, auto=False) # representative data + dataset = LoadImages(check_dataset(check_file(data))['train'], img_size=imgsz, auto=False) converter.representative_dataset = lambda: representative_dataset_gen(dataset, ncalib=100) converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8] converter.target_spec.supported_types = [] From 628c05ca6ff1d7f79d1fc63c298008a1341ba99c Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 3 Aug 2022 23:38:36 +0200 Subject: [PATCH 1267/1976] export.py replace `check_file` -> `check_yaml` (#8852) * export.py replace `check_file` -> `check_yaml` * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- export.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/export.py b/export.py index e3f6af93d1cc..546087a4026c 100644 --- a/export.py +++ b/export.py @@ -67,7 +67,7 @@ from models.experimental import attempt_load from models.yolo import Detect from utils.dataloaders import LoadImages -from utils.general import (LOGGER, check_dataset, check_file, check_img_size, check_requirements, check_version, +from utils.general import (LOGGER, check_dataset, check_img_size, check_requirements, check_version, check_yaml, colorstr, file_size, print_args, url2file) from utils.torch_utils import select_device @@ -371,7 +371,7 @@ def export_tflite(keras_model, im, file, int8, data, nms, agnostic_nms, prefix=c converter.optimizations = [tf.lite.Optimize.DEFAULT] if int8: from models.tf import representative_dataset_gen - dataset = LoadImages(check_dataset(check_file(data))['train'], img_size=imgsz, auto=False) + dataset = LoadImages(check_dataset(check_yaml(data))['train'], img_size=imgsz, auto=False) converter.representative_dataset = lambda: representative_dataset_gen(dataset, ncalib=100) converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8] converter.target_spec.supported_types = [] From 84e7748564f83ba04601770f17a38cc55e6be661 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 4 Aug 2022 17:06:08 +0200 Subject: [PATCH 1268/1976] Update dataloaders.py remove `float64` shapes (#8865) May help https://github.com/ultralytics/yolov5/issues/8862 --- utils/dataloaders.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/dataloaders.py b/utils/dataloaders.py index 9ccfe2545d75..71e7428d4dc1 100755 --- a/utils/dataloaders.py +++ b/utils/dataloaders.py @@ -478,7 +478,7 @@ def __init__(self, [cache.pop(k) for k in ('hash', 'version', 'msgs')] # remove items labels, shapes, self.segments = zip(*cache.values()) self.labels = list(labels) - self.shapes = np.array(shapes, dtype=np.float64) + self.shapes = np.array(shapes) self.im_files = list(cache.keys()) # update self.label_files = img2label_paths(cache.keys()) # update n = len(shapes) # number of images From 38a6eb6e99b9e832e7de4a4a57c7b7e4e080fb44 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 4 Aug 2022 23:26:30 +0200 Subject: [PATCH 1269/1976] Fix TensorRT --dynamic excess outputs bug (#8869) * Fix TensorRT --dynamic excess outputs bug Potential fix for https://github.com/ultralytics/yolov5/issues/8790 * Cleanup * Update common.py * Update common.py * New fix --- models/common.py | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/models/common.py b/models/common.py index c898d94a921a..cfa688ba940b 100644 --- a/models/common.py +++ b/models/common.py @@ -387,13 +387,13 @@ def __init__(self, weights='yolov5s.pt', device=torch.device('cpu'), dnn=False, context = model.create_execution_context() bindings = OrderedDict() fp16 = False # default updated below - dynamic_input = False + dynamic = False for index in range(model.num_bindings): name = model.get_binding_name(index) dtype = trt.nptype(model.get_binding_dtype(index)) if model.binding_is_input(index): if -1 in tuple(model.get_binding_shape(index)): # dynamic - dynamic_input = True + dynamic = True context.set_binding_shape(index, tuple(model.get_profile_shape(0, index)[2])) if dtype == np.float16: fp16 = True @@ -471,12 +471,14 @@ def forward(self, im, augment=False, visualize=False, val=False): im = im.cpu().numpy() # FP32 y = self.executable_network([im])[self.output_layer] elif self.engine: # TensorRT - if im.shape != self.bindings['images'].shape and self.dynamic_input: - self.context.set_binding_shape(self.model.get_binding_index('images'), im.shape) # reshape if dynamic + if self.dynamic and im.shape != self.bindings['images'].shape: + i_in, i_out = (self.model.get_binding_index(x) for x in ('images', 'output')) + self.context.set_binding_shape(i_in, im.shape) # reshape if dynamic self.bindings['images'] = self.bindings['images']._replace(shape=im.shape) - assert im.shape == self.bindings['images'].shape, ( - f"image shape {im.shape} exceeds model max shape {self.bindings['images'].shape}" if self.dynamic_input - else f"image shape {im.shape} does not match model shape {self.bindings['images'].shape}") + self.bindings['output'].data.resize_(tuple(self.context.get_binding_shape(i_out))) + s = self.bindings['images'].shape + assert im.shape == s, f"image shape {im.shape} " + \ + f"exceeds model max shape {s}" if self.dynamic else f"does not match model shape {s}" self.binding_addrs['images'] = int(im.data_ptr()) self.context.execute_v2(list(self.binding_addrs.values())) y = self.bindings['output'].data From 731a2f8c1ff060bda5e84e34c7cbdd637cfe4d75 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 4 Aug 2022 23:34:15 +0200 Subject: [PATCH 1270/1976] Single-line TRT dynamic assertion (#8871) --- models/common.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/models/common.py b/models/common.py index cfa688ba940b..a1269c5f3372 100644 --- a/models/common.py +++ b/models/common.py @@ -477,8 +477,7 @@ def forward(self, im, augment=False, visualize=False, val=False): self.bindings['images'] = self.bindings['images']._replace(shape=im.shape) self.bindings['output'].data.resize_(tuple(self.context.get_binding_shape(i_out))) s = self.bindings['images'].shape - assert im.shape == s, f"image shape {im.shape} " + \ - f"exceeds model max shape {s}" if self.dynamic else f"does not match model shape {s}" + assert im.shape == s, f"input size {im.shape} {'>' if self.dynamic else 'not equal to'} max model size {s}" self.binding_addrs['images'] = int(im.data_ptr()) self.context.execute_v2(list(self.binding_addrs.values())) y = self.bindings['output'].data From bc9fcb176734e63d02a1a677c9b2e66f08a2a040 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 5 Aug 2022 14:45:41 +0200 Subject: [PATCH 1271/1976] HUBDatasetStats() preview images to 50 quality (#8880) @kalenmike should represent a 30% filesize reduction vs 75 quality --- utils/dataloaders.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/dataloaders.py b/utils/dataloaders.py index 71e7428d4dc1..00f6413df7ad 100755 --- a/utils/dataloaders.py +++ b/utils/dataloaders.py @@ -1034,7 +1034,7 @@ def _hub_ops(self, f, max_dim=1920): r = max_dim / max(im.height, im.width) # ratio if r < 1.0: # image too large im = im.resize((int(im.width * r), int(im.height * r))) - im.save(f_new, 'JPEG', quality=75, optimize=True) # save + im.save(f_new, 'JPEG', quality=50, optimize=True) # save except Exception as e: # use OpenCV print(f'WARNING: HUB ops PIL failure {f}: {e}') im = cv2.imread(f) From e073658e119dac7bd0bdb209ababc90121c6450d Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 5 Aug 2022 16:27:28 +0200 Subject: [PATCH 1272/1976] `--resume` training from URL weights (#8882) @kalenmike --- train.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/train.py b/train.py index 17d16dba1531..023a39b6c579 100644 --- a/train.py +++ b/train.py @@ -485,9 +485,8 @@ def main(opt, callbacks=Callbacks()): check_requirements(exclude=['thop']) # Resume - if opt.resume and not (check_wandb_resume(opt) or opt.evolve): # resume an interrupted run - last = Path(opt.resume if isinstance(opt.resume, str) else get_latest_run()) # specified or most recent last.pt - assert last.is_file(), f'ERROR: --resume checkpoint {last} does not exist' + if opt.resume and not (check_wandb_resume(opt) or opt.evolve): # resume from specified or most recent last.pt + last = Path(check_file(opt.resume) if isinstance(opt.resume, str) else get_latest_run()) opt_yaml = last.parent.parent / 'opt.yaml' # train options yaml opt_data = opt.data # original dataset if opt_yaml.is_file(): From daed7a844e7f2445b382ca77b0cc5ec84761389b Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 5 Aug 2022 16:42:10 +0200 Subject: [PATCH 1273/1976] `--resume` training from URL weights fix (#8884) --resume training from URL weights fix @kalenmike should fix data error on HUB resume --- train.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/train.py b/train.py index 023a39b6c579..c2f487afe8b0 100644 --- a/train.py +++ b/train.py @@ -496,8 +496,8 @@ def main(opt, callbacks=Callbacks()): d = torch.load(last, map_location='cpu')['opt'] opt = argparse.Namespace(**d) # replace opt.cfg, opt.weights, opt.resume = '', str(last), True # reinstate - if is_url(opt.data): - opt.data = str(opt_data) # avoid HUB resume auth timeout + if is_url(opt_data): + opt.data = check_file(opt_data) # avoid HUB resume auth timeout else: opt.data, opt.cfg, opt.hyp, opt.weights, opt.project = \ check_file(opt.data), check_yaml(opt.cfg), check_yaml(opt.hyp), str(opt.weights), str(opt.project) # checks From 2794483e091d50416289614a1a35f158fd25bee2 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 5 Aug 2022 17:10:44 +0200 Subject: [PATCH 1274/1976] Update CI to default Python 3.10 (#8883) * Update CI to default Python 3.10 * Update ci-testing.yml * Update ci-testing.yml --- .github/workflows/ci-testing.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/ci-testing.yml b/.github/workflows/ci-testing.yml index 444bab75bbbc..0b7fd824d7ea 100644 --- a/.github/workflows/ci-testing.yml +++ b/.github/workflows/ci-testing.yml @@ -17,7 +17,7 @@ jobs: strategy: matrix: os: [ubuntu-latest] - python-version: [3.9] + python-version: ['3.9'] # requires python<=3.9 model: [yolov5n] steps: - uses: actions/checkout@v3 @@ -48,7 +48,7 @@ jobs: fail-fast: false matrix: os: [ubuntu-latest, macos-latest, windows-latest] - python-version: [3.9] + python-version: ['3.10'] model: [yolov5n] include: - os: ubuntu-latest @@ -58,7 +58,7 @@ jobs: python-version: '3.8' model: yolov5n - os: ubuntu-latest - python-version: '3.10' + python-version: '3.9' model: yolov5n steps: - uses: actions/checkout@v3 From 378bde4bba56b70954d1aa1c75d876164da50d2a Mon Sep 17 00:00:00 2001 From: Victor Sonck Date: Fri, 5 Aug 2022 20:50:49 +0200 Subject: [PATCH 1275/1976] ClearML experiment tracking integration (#8620) * Add titles to matplotlib plots * Add ClearML Experiment Tracking integration. * Add ClearML Data Version Management automatic download when requested * Add ClearML Hyperparameter Optimization * ClearML save period integration * Fix wandb breaking when used with ClearML dataset * Fix wandb breaking when used with ClearML resume and dataset * Add ClearML documentation * fixed small bug in clearml integration that misreports epoch number * Final ClearMl additions before refactor * Add correct epoch reporting * Add remote execution and autoscaling docs for ClearML integration * Added images to clearml integration docs * fixed logo alignment bug and added hpo screenshot clearml * Fixed small epoch number bug in clearml integration * Remove saved model flush clearml * Cleanup clearml readme section * Cleaned up clearml logger docstring * Remove resume readme section clearml * Clearml integration cleanup * Updated ClearML documentation * Added dark vs light icons ClearML Readme * Clearml Readme styling * Add better gifs * Fixed gif file size * Add better images in tutorial notebook * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Addressed comments in PR #8620 * Fixed circular import * Fixed circular import * Update tutorial.ipynb * Update tutorial.ipynb * Inline comment * Restructured tutorial notebook * Add correct ClearML link to README * Update tutorial.ipynb * Update general.py * Update __init__.py * Update __init__.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update __init__.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update __init__.py * Update README.md * Update __init__.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * spelling * Update tutorial.ipynb * notebook cutt.ly links * Update README.md * Update README.md * cutt.ly links in tutorial * Removed labels as they show up on last subplot only Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Glenn Jocher --- README.md | 21 ++- requirements.txt | 1 + train.py | 2 + tutorial.ipynb | 27 ++- utils/general.py | 4 + utils/loggers/__init__.py | 70 ++++++-- utils/loggers/clearml/README.md | 222 +++++++++++++++++++++++++ utils/loggers/clearml/__init__.py | 0 utils/loggers/clearml/clearml_utils.py | 150 +++++++++++++++++ utils/loggers/clearml/hpo.py | 84 ++++++++++ utils/loggers/wandb/wandb_utils.py | 11 +- utils/metrics.py | 3 + utils/plots.py | 1 + 13 files changed, 575 insertions(+), 21 deletions(-) create mode 100644 utils/loggers/clearml/README.md create mode 100644 utils/loggers/clearml/__init__.py create mode 100644 utils/loggers/clearml/clearml_utils.py create mode 100644 utils/loggers/clearml/hpo.py diff --git a/README.md b/README.md index b959871211e5..5bc3c1c41b93 100644 --- a/README.md +++ b/README.md @@ -151,7 +151,8 @@ python train.py --data coco.yaml --cfg yolov5n.yaml --weights '' --batch-size 12 - [Train Custom Data](https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data)  🚀 RECOMMENDED - [Tips for Best Training Results](https://github.com/ultralytics/yolov5/wiki/Tips-for-Best-Training-Results)  ☘️ RECOMMENDED -- [Weights & Biases Logging](https://github.com/ultralytics/yolov5/issues/1289)  🌟 NEW +- [ClearML Logging](https://github.com/ultralytics/yolov5/tree/master/utils/loggers/clearml) 🌟 NEW +- [Weights & Biases Logging](https://github.com/ultralytics/yolov5/issues/1289) - [Roboflow for Datasets, Labeling, and Active Learning](https://github.com/ultralytics/yolov5/issues/4975)  🌟 NEW - [Multi-GPU Training](https://github.com/ultralytics/yolov5/issues/475) - [PyTorch Hub](https://github.com/ultralytics/yolov5/issues/36)  ⭐ NEW @@ -190,17 +191,23 @@ Get started in seconds with our verified environments. Click each icon below for ##
Integrations
-|Weights and Biases|Roboflow ⭐ NEW| -|:-:|:-:| -|Automatically track and visualize all your YOLOv5 training runs in the cloud with [Weights & Biases](https://wandb.ai/site?utm_campaign=repo_yolo_readme)|Label and export your custom datasets directly to YOLOv5 for training with [Roboflow](https://roboflow.com/?ref=ultralytics) | +|ClearML ⭐ NEW|Roboflow|Weights and Biases +|:-:|:-:|:-:| +|Automatically track, visualize and even remotely train YOLOv5 using [ClearML](https://cutt.ly/yolov5-readme-clearml) (open-source!)|Label and export your custom datasets directly to YOLOv5 for training with [Roboflow](https://roboflow.com/?ref=ultralytics) |Automatically track and visualize all your YOLOv5 training runs in the cloud with [Weights & Biases](https://wandb.ai/site?utm_campaign=repo_yolo_readme) ##
为什么选择 YOLOv5
@@ -239,6 +236,84 @@ We are super excited about our first-ever Ultralytics YOLOv5 🚀 EXPORT Competi
+ +##
Classification ⭐ NEW
+ +YOLOv5 [release v6.2](https://github.com/ultralytics/yolov5/releases) brings support for classification model training, validation, prediction and export! We've made training classifier models super simple. Click below to get started. + +
+ Classification Checkpoints (click to expand) + +
+ +We trained YOLOv5-cls classification models on ImageNet for 90 epochs using a 4xA100 instance, and we trained ResNet and EfficientNet models alongside with the same default training settings to compare. We exported all models to ONNX FP32 for CPU speed tests and to TensorRT FP16 for GPU speed tests. We ran all speed tests on Google [Colab Pro](https://colab.research.google.com/signup) for easy reproducibility. + +| Model | size
(pixels) | acc
top1 | acc
top5 | Training
90 epochs
4xA100 (hours) | Speed
ONNX CPU
(ms) | Speed
TensorRT V100
(ms) | params
(M) | FLOPs
@224 (B) | +|----------------------------------------------------------------------------------------------------|-----------------------|------------------|------------------|----------------------------------------------|--------------------------------|-------------------------------------|--------------------|------------------------| +| [YOLOv5n-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5n-cls.pt) | 224 | 64.6 | 85.4 | 7:59 | **3.3** | **0.5** | **2.5** | **0.5** | +| [YOLOv5s-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5s-cls.pt) | 224 | 71.5 | 90.2 | 8:09 | 6.6 | 0.6 | 5.4 | 1.4 | +| [YOLOv5m-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5m-cls.pt) | 224 | 75.9 | 92.9 | 10:06 | 15.5 | 0.9 | 12.9 | 3.9 | +| [YOLOv5l-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5l-cls.pt) | 224 | 78.0 | 94.0 | 11:56 | 26.9 | 1.4 | 26.5 | 8.5 | +| [YOLOv5x-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5x-cls.pt) | 224 | **79.0** | **94.4** | 15:04 | 54.3 | 1.8 | 48.1 | 15.9 | +| | +| [ResNet18](https://github.com/ultralytics/yolov5/releases/download/v6.2/resnet18.pt) | 224 | 70.3 | 89.5 | **6:47** | 11.2 | 0.5 | 11.7 | 3.7 | +| [ResNet34](https://github.com/ultralytics/yolov5/releases/download/v6.2/resnet34.pt) | 224 | 73.9 | 91.8 | 8:33 | 20.6 | 0.9 | 21.8 | 7.4 | +| [ResNet50](https://github.com/ultralytics/yolov5/releases/download/v6.2/resnet50.pt) | 224 | 76.8 | 93.4 | 11:10 | 23.4 | 1.0 | 25.6 | 8.5 | +| [ResNet101](https://github.com/ultralytics/yolov5/releases/download/v6.2/resnet101.pt) | 224 | 78.5 | 94.3 | 17:10 | 42.1 | 1.9 | 44.5 | 15.9 | +| | +| [EfficientNet_b0](https://github.com/ultralytics/yolov5/releases/download/v6.2/efficientnet_b0.pt) | 224 | 75.1 | 92.4 | 13:03 | 12.5 | 1.3 | 5.3 | 1.0 | +| [EfficientNet_b1](https://github.com/ultralytics/yolov5/releases/download/v6.2/efficientnet_b1.pt) | 224 | 76.4 | 93.2 | 17:04 | 14.9 | 1.6 | 7.8 | 1.5 | +| [EfficientNet_b2](https://github.com/ultralytics/yolov5/releases/download/v6.2/efficientnet_b2.pt) | 224 | 76.6 | 93.4 | 17:10 | 15.9 | 1.6 | 9.1 | 1.7 | +| [EfficientNet_b3](https://github.com/ultralytics/yolov5/releases/download/v6.2/efficientnet_b3.pt) | 224 | 77.7 | 94.0 | 19:19 | 18.9 | 1.9 | 12.2 | 2.4 | + +
+ Table Notes (click to expand) + +- All checkpoints are trained to 90 epochs with SGD optimizer with lr0=0.001 at image size 224 and all default settings. Runs logged to https://wandb.ai/glenn-jocher/YOLOv5-Classifier-v6-2. +- **Accuracy** values are for single-model single-scale on [ImageNet-1k](https://www.image-net.org/index.php) dataset.
Reproduce by `python classify/val.py --data ../datasets/imagenet --img 224` +- **Speed** averaged over 100 inference images using a Google [Colab Pro](https://colab.research.google.com/signup) V100 High-RAM instance.
Reproduce by `python classify/val.py --data ../datasets/imagenet --img 224 --batch 1` +- **Export** to ONNX at FP32 and TensorRT at FP16 done with `export.py`.
Reproduce by `python export.py --weights yolov5s-cls.pt --include engine onnx --imgsz 224` +
+
+ +
+ Classification Usage Examples (click to expand) + +### Train +YOLOv5 classification training supports auto-download of MNIST, Fashion-MNIST, CIFAR10, CIFAR100, Imagenette, Imagewoof, and ImageNet datasets with the `--data` argument. To start training on MNIST for example use `--data mnist`. + +```bash +# Single-GPU +python classify/train.py --model yolov5s-cls.pt --data cifar100 --epochs 5 --img 224 --batch 128 + +# Multi-GPU DDP +python -m torch.distributed.run --nproc_per_node 4 --master_port 1 classify/train.py --model yolov5s-cls.pt --data imagenet --epochs 5 --img 224 --device 0,1,2,3 +``` + +### Val +Validate accuracy on a pretrained model. To validate YOLOv5s-cls accuracy on ImageNet. +```bash +bash data/scripts/get_imagenet.sh --val # download ImageNet val split (6.3G, 50000 images) +python classify/val.py --weights yolov5s-cls.pt --data ../datasets/imagenet --img 224 +``` + +### Predict +Run a classification prediction on an image. +```bash +python classify/predict.py --weights yolov5s-cls.pt --data data/images/bus.jpg +``` +```python +model = torch.hub.load('ultralytics/yolov5', 'custom', 'yolov5s-cls.pt') # load from PyTorch Hub +``` + +### Export +Export a group of trained YOLOv5-cls, ResNet and EfficientNet models to ONNX and TensorRT. +```bash +python export.py --weights yolov5s-cls.pt resnet50.pt efficientnet_b0.pt --include onnx engine --img 224 +``` +
+ + ##
贡献
我们重视您的意见! 我们希望给大家提供尽可能的简单和透明的方式对 YOLOv5 做出贡献。开始之前请先点击并查看我们的 [贡献指南](CONTRIBUTING.md),填写[YOLOv5调查问卷](https://ultralytics.com/survey?utm_source=github&utm_medium=social&utm_campaign=Survey) 来向我们发送您的经验反馈。真诚感谢我们所有的贡献者! diff --git a/.github/workflows/ci-testing.yml b/.github/workflows/ci-testing.yml index 31d38ead530f..aa797c44d487 100644 --- a/.github/workflows/ci-testing.yml +++ b/.github/workflows/ci-testing.yml @@ -5,9 +5,9 @@ name: YOLOv5 CI on: push: - branches: [master] + branches: [ master ] pull_request: - branches: [master] + branches: [ master ] schedule: - cron: '0 0 * * *' # runs at 00:00 UTC every day @@ -16,9 +16,9 @@ jobs: runs-on: ${{ matrix.os }} strategy: matrix: - os: [ubuntu-latest] - python-version: ['3.9'] # requires python<=3.9 - model: [yolov5n] + os: [ ubuntu-latest ] + python-version: [ '3.9' ] # requires python<=3.9 + model: [ yolov5n ] steps: - uses: actions/checkout@v3 - uses: actions/setup-python@v4 @@ -47,9 +47,9 @@ jobs: strategy: fail-fast: false matrix: - os: [ubuntu-latest, macos-latest, windows-latest] - python-version: ['3.10'] - model: [yolov5n] + os: [ ubuntu-latest, macos-latest, windows-latest ] + python-version: [ '3.10' ] + model: [ yolov5n ] include: - os: ubuntu-latest python-version: '3.7' # '3.6.8' min @@ -87,7 +87,7 @@ jobs: else pip install -r requirements.txt --extra-index-url https://download.pytorch.org/whl/cpu fi - shell: bash # required for Windows compatibility + shell: bash # for Windows compatibility - name: Check environment run: | python -c "import utils; utils.notebook_init()" @@ -100,8 +100,8 @@ jobs: python --version pip --version pip list - - name: Run tests - shell: bash + - name: Test detection + shell: bash # for Windows compatibility run: | # export PYTHONPATH="$PWD" # to run '$ python *.py' files in subdirectories m=${{ matrix.model }} # official weights @@ -123,3 +123,13 @@ jobs: model = torch.hub.load('.', 'custom', path=path, source='local') print(model('data/images/bus.jpg')) EOF + - name: Test classification + shell: bash # for Windows compatibility + run: | + m=${{ matrix.model }}-cls.pt # official weights + b=runs/train-cls/exp/weights/best.pt # best.pt checkpoint + python classify/train.py --imgsz 32 --model $m --data mnist2560 --epochs 1 # train + python classify/val.py --imgsz 32 --weights $b --data ../datasets/mnist2560 # val + python classify/predict.py --imgsz 32 --weights $b --source ../datasets/mnist2560/test/7/60.png # predict + python classify/predict.py --imgsz 32 --weights $m --source data/images/bus.jpg # predict + python export.py --weights $b --img 64 --imgsz 224 --include torchscript # export diff --git a/README.md b/README.md index 62c7ed4f53e6..b368d1d6e264 100644 --- a/README.md +++ b/README.md @@ -201,14 +201,6 @@ Get started in seconds with our verified environments. Click each icon below for |:-:|:-:|:-:|:-:| |Automatically compile and quantize YOLOv5 for better inference performance in one click at [Deci](https://bit.ly/yolov5-deci-platform)|Automatically track, visualize and even remotely train YOLOv5 using [ClearML](https://cutt.ly/yolov5-readme-clearml) (open-source!)|Label and export your custom datasets directly to YOLOv5 for training with [Roboflow](https://roboflow.com/?ref=ultralytics) |Automatically track and visualize all your YOLOv5 training runs in the cloud with [Weights & Biases](https://wandb.ai/site?utm_campaign=repo_yolo_readme) - ##
Why YOLOv5
@@ -254,6 +246,83 @@ We are super excited about our first-ever Ultralytics YOLOv5 🚀 EXPORT Competi
+##
Classification ⭐ NEW
+ +YOLOv5 [release v6.2](https://github.com/ultralytics/yolov5/releases) brings support for classification model training, validation, prediction and export! We've made training classifier models super simple. Click below to get started. + +
+ Classification Checkpoints (click to expand) + +
+ +We trained YOLOv5-cls classification models on ImageNet for 90 epochs using a 4xA100 instance, and we trained ResNet and EfficientNet models alongside with the same default training settings to compare. We exported all models to ONNX FP32 for CPU speed tests and to TensorRT FP16 for GPU speed tests. We ran all speed tests on Google [Colab Pro](https://colab.research.google.com/signup) for easy reproducibility. + +| Model | size
(pixels) | acc
top1 | acc
top5 | Training
90 epochs
4xA100 (hours) | Speed
ONNX CPU
(ms) | Speed
TensorRT V100
(ms) | params
(M) | FLOPs
@224 (B) | +|----------------------------------------------------------------------------------------------------|-----------------------|------------------|------------------|----------------------------------------------|--------------------------------|-------------------------------------|--------------------|------------------------| +| [YOLOv5n-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5n-cls.pt) | 224 | 64.6 | 85.4 | 7:59 | **3.3** | **0.5** | **2.5** | **0.5** | +| [YOLOv5s-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5s-cls.pt) | 224 | 71.5 | 90.2 | 8:09 | 6.6 | 0.6 | 5.4 | 1.4 | +| [YOLOv5m-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5m-cls.pt) | 224 | 75.9 | 92.9 | 10:06 | 15.5 | 0.9 | 12.9 | 3.9 | +| [YOLOv5l-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5l-cls.pt) | 224 | 78.0 | 94.0 | 11:56 | 26.9 | 1.4 | 26.5 | 8.5 | +| [YOLOv5x-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5x-cls.pt) | 224 | **79.0** | **94.4** | 15:04 | 54.3 | 1.8 | 48.1 | 15.9 | +| | +| [ResNet18](https://github.com/ultralytics/yolov5/releases/download/v6.2/resnet18.pt) | 224 | 70.3 | 89.5 | **6:47** | 11.2 | 0.5 | 11.7 | 3.7 | +| [ResNet34](https://github.com/ultralytics/yolov5/releases/download/v6.2/resnet34.pt) | 224 | 73.9 | 91.8 | 8:33 | 20.6 | 0.9 | 21.8 | 7.4 | +| [ResNet50](https://github.com/ultralytics/yolov5/releases/download/v6.2/resnet50.pt) | 224 | 76.8 | 93.4 | 11:10 | 23.4 | 1.0 | 25.6 | 8.5 | +| [ResNet101](https://github.com/ultralytics/yolov5/releases/download/v6.2/resnet101.pt) | 224 | 78.5 | 94.3 | 17:10 | 42.1 | 1.9 | 44.5 | 15.9 | +| | +| [EfficientNet_b0](https://github.com/ultralytics/yolov5/releases/download/v6.2/efficientnet_b0.pt) | 224 | 75.1 | 92.4 | 13:03 | 12.5 | 1.3 | 5.3 | 1.0 | +| [EfficientNet_b1](https://github.com/ultralytics/yolov5/releases/download/v6.2/efficientnet_b1.pt) | 224 | 76.4 | 93.2 | 17:04 | 14.9 | 1.6 | 7.8 | 1.5 | +| [EfficientNet_b2](https://github.com/ultralytics/yolov5/releases/download/v6.2/efficientnet_b2.pt) | 224 | 76.6 | 93.4 | 17:10 | 15.9 | 1.6 | 9.1 | 1.7 | +| [EfficientNet_b3](https://github.com/ultralytics/yolov5/releases/download/v6.2/efficientnet_b3.pt) | 224 | 77.7 | 94.0 | 19:19 | 18.9 | 1.9 | 12.2 | 2.4 | + +
+ Table Notes (click to expand) + +- All checkpoints are trained to 90 epochs with SGD optimizer with lr0=0.001 at image size 224 and all default settings. Runs logged to https://wandb.ai/glenn-jocher/YOLOv5-Classifier-v6-2. +- **Accuracy** values are for single-model single-scale on [ImageNet-1k](https://www.image-net.org/index.php) dataset.
Reproduce by `python classify/val.py --data ../datasets/imagenet --img 224` +- **Speed** averaged over 100 inference images using a Google [Colab Pro](https://colab.research.google.com/signup) V100 High-RAM instance.
Reproduce by `python classify/val.py --data ../datasets/imagenet --img 224 --batch 1` +- **Export** to ONNX at FP32 and TensorRT at FP16 done with `export.py`.
Reproduce by `python export.py --weights yolov5s-cls.pt --include engine onnx --imgsz 224` +
+
+ +
+ Classification Usage Examples (click to expand) + +### Train +YOLOv5 classification training supports auto-download of MNIST, Fashion-MNIST, CIFAR10, CIFAR100, Imagenette, Imagewoof, and ImageNet datasets with the `--data` argument. To start training on MNIST for example use `--data mnist`. + +```bash +# Single-GPU +python classify/train.py --model yolov5s-cls.pt --data cifar100 --epochs 5 --img 224 --batch 128 + +# Multi-GPU DDP +python -m torch.distributed.run --nproc_per_node 4 --master_port 1 classify/train.py --model yolov5s-cls.pt --data imagenet --epochs 5 --img 224 --device 0,1,2,3 +``` + +### Val +Validate accuracy on a pretrained model. To validate YOLOv5s-cls accuracy on ImageNet. +```bash +bash data/scripts/get_imagenet.sh --val # download ImageNet val split (6.3G, 50000 images) +python classify/val.py --weights yolov5s-cls.pt --data ../datasets/imagenet --img 224 +``` + +### Predict +Run a classification prediction on an image. +```bash +python classify/predict.py --weights yolov5s-cls.pt --data data/images/bus.jpg +``` +```python +model = torch.hub.load('ultralytics/yolov5', 'custom', 'yolov5s-cls.pt') # load from PyTorch Hub +``` + +### Export +Export a group of trained YOLOv5-cls, ResNet and EfficientNet models to ONNX and TensorRT. +```bash +python export.py --weights yolov5s-cls.pt resnet50.pt efficientnet_b0.pt --include onnx engine --img 224 +``` +
+ + ##
Contribute
We love your input! We want to make contributing to YOLOv5 as easy and transparent as possible. Please see our [Contributing Guide](CONTRIBUTING.md) to get started, and fill out the [YOLOv5 Survey](https://ultralytics.com/survey?utm_source=github&utm_medium=social&utm_campaign=Survey) to send us feedback on your experiences. Thank you to all our contributors! diff --git a/classify/predict.py b/classify/predict.py new file mode 100644 index 000000000000..419830d43952 --- /dev/null +++ b/classify/predict.py @@ -0,0 +1,109 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +""" +Run classification inference on images + +Usage: + $ python classify/predict.py --weights yolov5s-cls.pt --source im.jpg +""" + +import argparse +import os +import sys +from pathlib import Path + +import cv2 +import torch.nn.functional as F + +FILE = Path(__file__).resolve() +ROOT = FILE.parents[1] # YOLOv5 root directory +if str(ROOT) not in sys.path: + sys.path.append(str(ROOT)) # add ROOT to PATH +ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative + +from classify.train import imshow_cls +from models.common import DetectMultiBackend +from utils.augmentations import classify_transforms +from utils.general import LOGGER, check_requirements, colorstr, increment_path, print_args +from utils.torch_utils import select_device, smart_inference_mode, time_sync + + +@smart_inference_mode() +def run( + weights=ROOT / 'yolov5s-cls.pt', # model.pt path(s) + source=ROOT / 'data/images/bus.jpg', # file/dir/URL/glob, 0 for webcam + imgsz=224, # inference size + device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu + half=False, # use FP16 half-precision inference + dnn=False, # use OpenCV DNN for ONNX inference + show=True, + project=ROOT / 'runs/predict-cls', # save to project/name + name='exp', # save to project/name + exist_ok=False, # existing project/name ok, do not increment +): + file = str(source) + seen, dt = 1, [0.0, 0.0, 0.0] + device = select_device(device) + + # Directories + save_dir = increment_path(Path(project) / name, exist_ok=exist_ok) # increment run + save_dir.mkdir(parents=True, exist_ok=True) # make dir + + # Transforms + transforms = classify_transforms(imgsz) + + # Load model + model = DetectMultiBackend(weights, device=device, dnn=dnn, fp16=half) + model.warmup(imgsz=(1, 3, imgsz, imgsz)) # warmup + + # Image + t1 = time_sync() + im = cv2.cvtColor(cv2.imread(file), cv2.COLOR_BGR2RGB) + im = transforms(im).unsqueeze(0).to(device) + im = im.half() if model.fp16 else im.float() + t2 = time_sync() + dt[0] += t2 - t1 + + # Inference + results = model(im) + t3 = time_sync() + dt[1] += t3 - t2 + + p = F.softmax(results, dim=1) # probabilities + i = p.argsort(1, descending=True)[:, :5].squeeze() # top 5 indices + dt[2] += time_sync() - t3 + LOGGER.info(f"image 1/1 {file}: {imgsz}x{imgsz} {', '.join(f'{model.names[j]} {p[0, j]:.2f}' for j in i)}") + + # Print results + t = tuple(x / seen * 1E3 for x in dt) # speeds per image + shape = (1, 3, imgsz, imgsz) + LOGGER.info(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms post-process per image at shape {shape}' % t) + if show: + imshow_cls(im, f=save_dir / Path(file).name, verbose=True) + LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}") + return p + + +def parse_opt(): + parser = argparse.ArgumentParser() + parser.add_argument('--weights', nargs='+', type=str, default=ROOT / 'yolov5s-cls.pt', help='model path(s)') + parser.add_argument('--source', type=str, default=ROOT / 'data/images/bus.jpg', help='file') + parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=224, help='train, val image size (pixels)') + parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') + parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference') + parser.add_argument('--dnn', action='store_true', help='use OpenCV DNN for ONNX inference') + parser.add_argument('--project', default=ROOT / 'runs/predict-cls', help='save to project/name') + parser.add_argument('--name', default='exp', help='save to project/name') + parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment') + opt = parser.parse_args() + print_args(vars(opt)) + return opt + + +def main(opt): + check_requirements(exclude=('tensorboard', 'thop')) + run(**vars(opt)) + + +if __name__ == "__main__": + opt = parse_opt() + main(opt) diff --git a/classify/train.py b/classify/train.py new file mode 100644 index 000000000000..f2b465567446 --- /dev/null +++ b/classify/train.py @@ -0,0 +1,325 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +""" +Train a YOLOv5 classifier model on a classification dataset +Datasets: --data mnist, fashion-mnist, cifar10, cifar100, imagenette, imagewoof, imagenet, or 'path/to/custom/dataset' + +Usage: + $ python classify/train.py --model yolov5s-cls.pt --data cifar100 --epochs 5 --img 128 + $ python -m torch.distributed.run --nproc_per_node 4 --master_port 1 classify/train.py --model yolov5s-cls.pt --data imagenet --epochs 5 --img 224 --device 0,1,2,3 +""" + +import argparse +import os +import subprocess +import sys +import time +from copy import deepcopy +from datetime import datetime +from pathlib import Path + +import torch +import torch.distributed as dist +import torch.hub as hub +import torch.optim.lr_scheduler as lr_scheduler +import torchvision +from torch.cuda import amp +from tqdm import tqdm + +FILE = Path(__file__).resolve() +ROOT = FILE.parents[1] # YOLOv5 root directory +if str(ROOT) not in sys.path: + sys.path.append(str(ROOT)) # add ROOT to PATH +ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative + +from classify import val as validate +from models.experimental import attempt_load +from models.yolo import ClassificationModel, DetectionModel +from utils.dataloaders import create_classification_dataloader +from utils.general import (DATASETS_DIR, LOGGER, WorkingDirectory, check_git_status, check_requirements, colorstr, + download, increment_path, init_seeds, print_args, yaml_save) +from utils.loggers import GenericLogger +from utils.plots import imshow_cls +from utils.torch_utils import (ModelEMA, model_info, reshape_classifier_output, select_device, smart_DDP, + smart_optimizer, smartCrossEntropyLoss, torch_distributed_zero_first) + +LOCAL_RANK = int(os.getenv('LOCAL_RANK', -1)) # https://pytorch.org/docs/stable/elastic/run.html +RANK = int(os.getenv('RANK', -1)) +WORLD_SIZE = int(os.getenv('WORLD_SIZE', 1)) + + +def train(opt, device): + init_seeds(opt.seed + 1 + RANK, deterministic=True) + save_dir, data, bs, epochs, nw, imgsz, pretrained = \ + opt.save_dir, Path(opt.data), opt.batch_size, opt.epochs, min(os.cpu_count() - 1, opt.workers), \ + opt.imgsz, str(opt.pretrained).lower() == 'true' + cuda = device.type != 'cpu' + + # Directories + wdir = save_dir / 'weights' + wdir.mkdir(parents=True, exist_ok=True) # make dir + last, best = wdir / 'last.pt', wdir / 'best.pt' + + # Save run settings + yaml_save(save_dir / 'opt.yaml', vars(opt)) + + # Logger + logger = GenericLogger(opt=opt, console_logger=LOGGER) if RANK in {-1, 0} else None + + # Download Dataset + with torch_distributed_zero_first(LOCAL_RANK), WorkingDirectory(ROOT): + data_dir = data if data.is_dir() else (DATASETS_DIR / data) + if not data_dir.is_dir(): + LOGGER.info(f'\nDataset not found ⚠️, missing path {data_dir}, attempting download...') + t = time.time() + if str(data) == 'imagenet': + subprocess.run(f"bash {ROOT / 'data/scripts/get_imagenet.sh'}", shell=True, check=True) + else: + url = f'https://github.com/ultralytics/yolov5/releases/download/v1.0/{data}.zip' + download(url, dir=data_dir.parent) + s = f"Dataset download success ✅ ({time.time() - t:.1f}s), saved to {colorstr('bold', data_dir)}\n" + LOGGER.info(s) + + # Dataloaders + nc = len([x for x in (data_dir / 'train').glob('*') if x.is_dir()]) # number of classes + trainloader = create_classification_dataloader(path=data_dir / 'train', + imgsz=imgsz, + batch_size=bs // WORLD_SIZE, + augment=True, + cache=opt.cache, + rank=LOCAL_RANK, + workers=nw) + + test_dir = data_dir / 'test' if (data_dir / 'test').exists() else data_dir / 'val' # data/test or data/val + if RANK in {-1, 0}: + testloader = create_classification_dataloader(path=test_dir, + imgsz=imgsz, + batch_size=bs // WORLD_SIZE * 2, + augment=False, + cache=opt.cache, + rank=-1, + workers=nw) + + # Model + with torch_distributed_zero_first(LOCAL_RANK), WorkingDirectory(ROOT): + if Path(opt.model).is_file() or opt.model.endswith('.pt'): + model = attempt_load(opt.model, device='cpu', fuse=False) + elif opt.model in torchvision.models.__dict__: # TorchVision models i.e. resnet50, efficientnet_b0 + model = torchvision.models.__dict__[opt.model](weights='IMAGENET1K_V1' if pretrained else None) + else: + m = hub.list('ultralytics/yolov5') # + hub.list('pytorch/vision') # models + raise ModuleNotFoundError(f'--model {opt.model} not found. Available models are: \n' + '\n'.join(m)) + if isinstance(model, DetectionModel): + LOGGER.warning("WARNING: pass YOLOv5 classifier model with '-cls' suffix, i.e. '--model yolov5s-cls.pt'") + model = ClassificationModel(model=model, nc=nc, cutoff=opt.cutoff or 10) # convert to classification model + reshape_classifier_output(model, nc) # update class count + for p in model.parameters(): + p.requires_grad = True # for training + for m in model.modules(): + if not pretrained and hasattr(m, 'reset_parameters'): + m.reset_parameters() + if isinstance(m, torch.nn.Dropout) and opt.dropout is not None: + m.p = opt.dropout # set dropout + model = model.to(device) + names = trainloader.dataset.classes # class names + model.names = names # attach class names + + # Info + if RANK in {-1, 0}: + model_info(model) + if opt.verbose: + LOGGER.info(model) + images, labels = next(iter(trainloader)) + file = imshow_cls(images[:25], labels[:25], names=names, f=save_dir / 'train_images.jpg') + logger.log_images(file, name='Train Examples') + logger.log_graph(model, imgsz) # log model + + # Optimizer + optimizer = smart_optimizer(model, opt.optimizer, opt.lr0, momentum=0.9, decay=5e-5) + + # Scheduler + lrf = 0.01 # final lr (fraction of lr0) + # lf = lambda x: ((1 + math.cos(x * math.pi / epochs)) / 2) * (1 - lrf) + lrf # cosine + lf = lambda x: (1 - x / epochs) * (1 - lrf) + lrf # linear + scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf) + # scheduler = lr_scheduler.OneCycleLR(optimizer, max_lr=lr0, total_steps=epochs, pct_start=0.1, + # final_div_factor=1 / 25 / lrf) + + # EMA + ema = ModelEMA(model) if RANK in {-1, 0} else None + + # DDP mode + if cuda and RANK != -1: + model = smart_DDP(model) + + # Train + t0 = time.time() + criterion = smartCrossEntropyLoss(label_smoothing=opt.label_smoothing) # loss function + best_fitness = 0.0 + scaler = amp.GradScaler(enabled=cuda) + val = test_dir.stem # 'val' or 'test' + LOGGER.info(f'Image sizes {imgsz} train, {imgsz} test\n' + f'Using {nw * WORLD_SIZE} dataloader workers\n' + f"Logging results to {colorstr('bold', save_dir)}\n" + f'Starting {opt.model} training on {data} dataset with {nc} classes for {epochs} epochs...\n\n' + f"{'Epoch':>10}{'GPU_mem':>10}{'train_loss':>12}{f'{val}_loss':>12}{'top1_acc':>12}{'top5_acc':>12}") + for epoch in range(epochs): # loop over the dataset multiple times + tloss, vloss, fitness = 0.0, 0.0, 0.0 # train loss, val loss, fitness + model.train() + if RANK != -1: + trainloader.sampler.set_epoch(epoch) + pbar = enumerate(trainloader) + if RANK in {-1, 0}: + pbar = tqdm(enumerate(trainloader), total=len(trainloader), bar_format='{l_bar}{bar:10}{r_bar}{bar:-10b}') + for i, (images, labels) in pbar: # progress bar + images, labels = images.to(device, non_blocking=True), labels.to(device) + + # Forward + with amp.autocast(enabled=cuda): # stability issues when enabled + loss = criterion(model(images), labels) + + # Backward + scaler.scale(loss).backward() + + # Optimize + scaler.unscale_(optimizer) # unscale gradients + torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=10.0) # clip gradients + scaler.step(optimizer) + scaler.update() + optimizer.zero_grad() + if ema: + ema.update(model) + + if RANK in {-1, 0}: + # Print + tloss = (tloss * i + loss.item()) / (i + 1) # update mean losses + mem = '%.3gG' % (torch.cuda.memory_reserved() / 1E9 if torch.cuda.is_available() else 0) # (GB) + pbar.desc = f"{f'{epoch + 1}/{epochs}':>10}{mem:>10}{tloss:>12.3g}" + ' ' * 36 + + # Test + if i == len(pbar) - 1: # last batch + top1, top5, vloss = validate.run(model=ema.ema, + dataloader=testloader, + criterion=criterion, + pbar=pbar) # test accuracy, loss + fitness = top1 # define fitness as top1 accuracy + + # Scheduler + scheduler.step() + + # Log metrics + if RANK in {-1, 0}: + # Best fitness + if fitness > best_fitness: + best_fitness = fitness + + # Log + metrics = { + "train/loss": tloss, + f"{val}/loss": vloss, + "metrics/accuracy_top1": top1, + "metrics/accuracy_top5": top5, + "lr/0": optimizer.param_groups[0]['lr']} # learning rate + logger.log_metrics(metrics, epoch) + + # Save model + final_epoch = epoch + 1 == epochs + if (not opt.nosave) or final_epoch: + ckpt = { + 'epoch': epoch, + 'best_fitness': best_fitness, + 'model': deepcopy(ema.ema).half(), # deepcopy(de_parallel(model)).half(), + 'ema': None, # deepcopy(ema.ema).half(), + 'updates': ema.updates, + 'optimizer': None, # optimizer.state_dict(), + 'opt': vars(opt), + 'date': datetime.now().isoformat()} + + # Save last, best and delete + torch.save(ckpt, last) + if best_fitness == fitness: + torch.save(ckpt, best) + del ckpt + + # Train complete + if RANK in {-1, 0} and final_epoch: + LOGGER.info(f'\nTraining complete ({(time.time() - t0) / 3600:.3f} hours)' + f"\nResults saved to {colorstr('bold', save_dir)}" + f"\nPredict: python classify/predict.py --weights {best} --source im.jpg" + f"\nValidate: python classify/val.py --weights {best} --data {data_dir}" + f"\nExport: python export.py --weights {best} --include onnx" + f"\nPyTorch Hub: model = torch.hub.load('ultralytics/yolov5', 'custom', '{best}')" + f"\nVisualize: https://netron.app\n") + + # Plot examples + images, labels = (x[:25] for x in next(iter(testloader))) # first 25 images and labels + pred = torch.max(ema.ema((images.half() if cuda else images.float()).to(device)), 1)[1] + file = imshow_cls(images, labels, pred, names, verbose=False, f=save_dir / 'test_images.jpg') + + # Log results + meta = {"epochs": epochs, "top1_acc": best_fitness, "date": datetime.now().isoformat()} + logger.log_images(file, name='Test Examples (true-predicted)', epoch=epoch) + logger.log_model(best, epochs, metadata=meta) + + +def parse_opt(known=False): + parser = argparse.ArgumentParser() + parser.add_argument('--model', type=str, default='yolov5s-cls.pt', help='initial weights path') + parser.add_argument('--data', type=str, default='mnist', help='cifar10, cifar100, mnist, imagenet, etc.') + parser.add_argument('--epochs', type=int, default=10) + parser.add_argument('--batch-size', type=int, default=64, help='total batch size for all GPUs') + parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=128, help='train, val image size (pixels)') + parser.add_argument('--nosave', action='store_true', help='only save final checkpoint') + parser.add_argument('--cache', type=str, nargs='?', const='ram', help='--cache images in "ram" (default) or "disk"') + parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') + parser.add_argument('--workers', type=int, default=8, help='max dataloader workers (per RANK in DDP mode)') + parser.add_argument('--project', default=ROOT / 'runs/train-cls', help='save to project/name') + parser.add_argument('--name', default='exp', help='save to project/name') + parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment') + parser.add_argument('--pretrained', nargs='?', const=True, default=True, help='start from i.e. --pretrained False') + parser.add_argument('--optimizer', choices=['SGD', 'Adam', 'AdamW', 'RMSProp'], default='Adam', help='optimizer') + parser.add_argument('--lr0', type=float, default=0.001, help='initial learning rate') + parser.add_argument('--label-smoothing', type=float, default=0.1, help='Label smoothing epsilon') + parser.add_argument('--cutoff', type=int, default=None, help='Model layer cutoff index for Classify() head') + parser.add_argument('--dropout', type=float, default=None, help='Dropout (fraction)') + parser.add_argument('--verbose', action='store_true', help='Verbose mode') + parser.add_argument('--seed', type=int, default=0, help='Global training seed') + parser.add_argument('--local_rank', type=int, default=-1, help='Automatic DDP Multi-GPU argument, do not modify') + return parser.parse_known_args()[0] if known else parser.parse_args() + + +def main(opt): + # Checks + if RANK in {-1, 0}: + print_args(vars(opt)) + check_git_status() + check_requirements() + + # DDP mode + device = select_device(opt.device, batch_size=opt.batch_size) + if LOCAL_RANK != -1: + assert opt.batch_size != -1, 'AutoBatch is coming soon for classification, please pass a valid --batch-size' + assert opt.batch_size % WORLD_SIZE == 0, f'--batch-size {opt.batch_size} must be multiple of WORLD_SIZE' + assert torch.cuda.device_count() > LOCAL_RANK, 'insufficient CUDA devices for DDP command' + torch.cuda.set_device(LOCAL_RANK) + device = torch.device('cuda', LOCAL_RANK) + dist.init_process_group(backend="nccl" if dist.is_nccl_available() else "gloo") + + # Parameters + opt.save_dir = increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok) # increment run + + # Train + train(opt, device) + + +def run(**kwargs): + # Usage: from yolov5 import classify; classify.train.run(data=mnist, imgsz=320, model='yolov5m') + opt = parse_opt(True) + for k, v in kwargs.items(): + setattr(opt, k, v) + main(opt) + return opt + + +if __name__ == "__main__": + opt = parse_opt() + main(opt) diff --git a/classify/val.py b/classify/val.py new file mode 100644 index 000000000000..0930ba8c9c51 --- /dev/null +++ b/classify/val.py @@ -0,0 +1,158 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +""" +Validate a classification model on a dataset + +Usage: + $ python classify/val.py --weights yolov5s-cls.pt --data ../datasets/imagenet +""" + +import argparse +import os +import sys +from pathlib import Path + +import torch +from tqdm import tqdm + +FILE = Path(__file__).resolve() +ROOT = FILE.parents[1] # YOLOv5 root directory +if str(ROOT) not in sys.path: + sys.path.append(str(ROOT)) # add ROOT to PATH +ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative + +from models.common import DetectMultiBackend +from utils.dataloaders import create_classification_dataloader +from utils.general import LOGGER, check_img_size, check_requirements, colorstr, increment_path, print_args +from utils.torch_utils import select_device, smart_inference_mode, time_sync + + +@smart_inference_mode() +def run( + data=ROOT / '../datasets/mnist', # dataset dir + weights=ROOT / 'yolov5s-cls.pt', # model.pt path(s) + batch_size=128, # batch size + imgsz=224, # inference size (pixels) + device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu + workers=8, # max dataloader workers (per RANK in DDP mode) + verbose=False, # verbose output + project=ROOT / 'runs/val-cls', # save to project/name + name='exp', # save to project/name + exist_ok=False, # existing project/name ok, do not increment + half=True, # use FP16 half-precision inference + dnn=False, # use OpenCV DNN for ONNX inference + model=None, + dataloader=None, + criterion=None, + pbar=None, +): + # Initialize/load model and set device + training = model is not None + if training: # called by train.py + device, pt, jit, engine = next(model.parameters()).device, True, False, False # get model device, PyTorch model + half &= device.type != 'cpu' # half precision only supported on CUDA + model.half() if half else model.float() + else: # called directly + device = select_device(device, batch_size=batch_size) + + # Directories + save_dir = increment_path(Path(project) / name, exist_ok=exist_ok) # increment run + save_dir.mkdir(parents=True, exist_ok=True) # make dir + + # Load model + model = DetectMultiBackend(weights, device=device, dnn=dnn, fp16=half) + stride, pt, jit, engine = model.stride, model.pt, model.jit, model.engine + imgsz = check_img_size(imgsz, s=stride) # check image size + half = model.fp16 # FP16 supported on limited backends with CUDA + if engine: + batch_size = model.batch_size + else: + device = model.device + if not (pt or jit): + batch_size = 1 # export.py models default to batch-size 1 + LOGGER.info(f'Forcing --batch-size 1 square inference (1,3,{imgsz},{imgsz}) for non-PyTorch models') + + # Dataloader + data = Path(data) + test_dir = data / 'test' if (data / 'test').exists() else data / 'val' # data/test or data/val + dataloader = create_classification_dataloader(path=test_dir, + imgsz=imgsz, + batch_size=batch_size, + augment=False, + rank=-1, + workers=workers) + + model.eval() + pred, targets, loss, dt = [], [], 0, [0.0, 0.0, 0.0] + n = len(dataloader) # number of batches + action = 'validating' if dataloader.dataset.root.stem == 'val' else 'testing' + desc = f"{pbar.desc[:-36]}{action:>36}" if pbar else f"{action}" + bar = tqdm(dataloader, desc, n, not training, bar_format='{l_bar}{bar:10}{r_bar}{bar:-10b}', position=0) + with torch.cuda.amp.autocast(enabled=device.type != 'cpu'): + for images, labels in bar: + t1 = time_sync() + images, labels = images.to(device, non_blocking=True), labels.to(device) + t2 = time_sync() + dt[0] += t2 - t1 + + y = model(images) + t3 = time_sync() + dt[1] += t3 - t2 + + pred.append(y.argsort(1, descending=True)[:, :5]) + targets.append(labels) + if criterion: + loss += criterion(y, labels) + dt[2] += time_sync() - t3 + + loss /= n + pred, targets = torch.cat(pred), torch.cat(targets) + correct = (targets[:, None] == pred).float() + acc = torch.stack((correct[:, 0], correct.max(1).values), dim=1) # (top1, top5) accuracy + top1, top5 = acc.mean(0).tolist() + + if pbar: + pbar.desc = f"{pbar.desc[:-36]}{loss:>12.3g}{top1:>12.3g}{top5:>12.3g}" + if verbose: # all classes + LOGGER.info(f"{'Class':>24}{'Images':>12}{'top1_acc':>12}{'top5_acc':>12}") + LOGGER.info(f"{'all':>24}{targets.shape[0]:>12}{top1:>12.3g}{top5:>12.3g}") + for i, c in enumerate(model.names): + aci = acc[targets == i] + top1i, top5i = aci.mean(0).tolist() + LOGGER.info(f"{c:>24}{aci.shape[0]:>12}{top1i:>12.3g}{top5i:>12.3g}") + + # Print results + t = tuple(x / len(dataloader.dataset.samples) * 1E3 for x in dt) # speeds per image + shape = (1, 3, imgsz, imgsz) + LOGGER.info(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms post-process per image at shape {shape}' % t) + LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}") + + return top1, top5, loss + + +def parse_opt(): + parser = argparse.ArgumentParser() + parser.add_argument('--data', type=str, default=ROOT / '../datasets/mnist', help='dataset path') + parser.add_argument('--weights', nargs='+', type=str, default=ROOT / 'yolov5s-cls.pt', help='model.pt path(s)') + parser.add_argument('--batch-size', type=int, default=128, help='batch size') + parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=224, help='inference size (pixels)') + parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') + parser.add_argument('--workers', type=int, default=8, help='max dataloader workers (per RANK in DDP mode)') + parser.add_argument('--verbose', nargs='?', const=True, default=True, help='verbose output') + parser.add_argument('--project', default=ROOT / 'runs/val-cls', help='save to project/name') + parser.add_argument('--name', default='exp', help='save to project/name') + parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment') + parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference') + parser.add_argument('--dnn', action='store_true', help='use OpenCV DNN for ONNX inference') + opt = parser.parse_args() + print_args(vars(opt)) + return opt + + +def main(opt): + check_requirements(exclude=('tensorboard', 'thop')) + run(**vars(opt)) + + +if __name__ == "__main__": + opt = parse_opt() + main(opt) diff --git a/data/ImageNet.yaml b/data/ImageNet.yaml new file mode 100644 index 000000000000..9f89b4268aff --- /dev/null +++ b/data/ImageNet.yaml @@ -0,0 +1,156 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# ImageNet-1k dataset https://www.image-net.org/index.php by Stanford University +# Simplified class names from https://github.com/anishathalye/imagenet-simple-labels +# Example usage: python classify/train.py --data imagenet +# parent +# ├── yolov5 +# └── datasets +# └── imagenet ← downloads here (144 GB) + + +# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..] +path: ../datasets/imagenet # dataset root dir +train: train # train images (relative to 'path') 1281167 images +val: val # val images (relative to 'path') 50000 images +test: # test images (optional) + +# Classes +nc: 1000 # number of classes +names: ['tench', 'goldfish', 'great white shark', 'tiger shark', 'hammerhead shark', 'electric ray', 'stingray', 'cock', + 'hen', 'ostrich', 'brambling', 'goldfinch', 'house finch', 'junco', 'indigo bunting', 'American robin', + 'bulbul', 'jay', 'magpie', 'chickadee', 'American dipper', 'kite', 'bald eagle', 'vulture', 'great grey owl', + 'fire salamander', 'smooth newt', 'newt', 'spotted salamander', 'axolotl', 'American bullfrog', 'tree frog', + 'tailed frog', 'loggerhead sea turtle', 'leatherback sea turtle', 'mud turtle', 'terrapin', 'box turtle', + 'banded gecko', 'green iguana', 'Carolina anole', 'desert grassland whiptail lizard', 'agama', + 'frilled-necked lizard', 'alligator lizard', 'Gila monster', 'European green lizard', 'chameleon', + 'Komodo dragon', 'Nile crocodile', 'American alligator', 'triceratops', 'worm snake', 'ring-necked snake', + 'eastern hog-nosed snake', 'smooth green snake', 'kingsnake', 'garter snake', 'water snake', 'vine snake', + 'night snake', 'boa constrictor', 'African rock python', 'Indian cobra', 'green mamba', 'sea snake', + 'Saharan horned viper', 'eastern diamondback rattlesnake', 'sidewinder', 'trilobite', 'harvestman', 'scorpion', + 'yellow garden spider', 'barn spider', 'European garden spider', 'southern black widow', 'tarantula', + 'wolf spider', 'tick', 'centipede', 'black grouse', 'ptarmigan', 'ruffed grouse', 'prairie grouse', 'peacock', + 'quail', 'partridge', 'grey parrot', 'macaw', 'sulphur-crested cockatoo', 'lorikeet', 'coucal', 'bee eater', + 'hornbill', 'hummingbird', 'jacamar', 'toucan', 'duck', 'red-breasted merganser', 'goose', 'black swan', + 'tusker', 'echidna', 'platypus', 'wallaby', 'koala', 'wombat', 'jellyfish', 'sea anemone', 'brain coral', + 'flatworm', 'nematode', 'conch', 'snail', 'slug', 'sea slug', 'chiton', 'chambered nautilus', 'Dungeness crab', + 'rock crab', 'fiddler crab', 'red king crab', 'American lobster', 'spiny lobster', 'crayfish', 'hermit crab', + 'isopod', 'white stork', 'black stork', 'spoonbill', 'flamingo', 'little blue heron', 'great egret', 'bittern', + 'crane (bird)', 'limpkin', 'common gallinule', 'American coot', 'bustard', 'ruddy turnstone', 'dunlin', + 'common redshank', 'dowitcher', 'oystercatcher', 'pelican', 'king penguin', 'albatross', 'grey whale', + 'killer whale', 'dugong', 'sea lion', 'Chihuahua', 'Japanese Chin', 'Maltese', 'Pekingese', 'Shih Tzu', + 'King Charles Spaniel', 'Papillon', 'toy terrier', 'Rhodesian Ridgeback', 'Afghan Hound', 'Basset Hound', + 'Beagle', 'Bloodhound', 'Bluetick Coonhound', 'Black and Tan Coonhound', 'Treeing Walker Coonhound', + 'English foxhound', 'Redbone Coonhound', 'borzoi', 'Irish Wolfhound', 'Italian Greyhound', 'Whippet', + 'Ibizan Hound', 'Norwegian Elkhound', 'Otterhound', 'Saluki', 'Scottish Deerhound', 'Weimaraner', + 'Staffordshire Bull Terrier', 'American Staffordshire Terrier', 'Bedlington Terrier', 'Border Terrier', + 'Kerry Blue Terrier', 'Irish Terrier', 'Norfolk Terrier', 'Norwich Terrier', 'Yorkshire Terrier', + 'Wire Fox Terrier', 'Lakeland Terrier', 'Sealyham Terrier', 'Airedale Terrier', 'Cairn Terrier', + 'Australian Terrier', 'Dandie Dinmont Terrier', 'Boston Terrier', 'Miniature Schnauzer', 'Giant Schnauzer', + 'Standard Schnauzer', 'Scottish Terrier', 'Tibetan Terrier', 'Australian Silky Terrier', + 'Soft-coated Wheaten Terrier', 'West Highland White Terrier', 'Lhasa Apso', 'Flat-Coated Retriever', + 'Curly-coated Retriever', 'Golden Retriever', 'Labrador Retriever', 'Chesapeake Bay Retriever', + 'German Shorthaired Pointer', 'Vizsla', 'English Setter', 'Irish Setter', 'Gordon Setter', 'Brittany', + 'Clumber Spaniel', 'English Springer Spaniel', 'Welsh Springer Spaniel', 'Cocker Spaniels', 'Sussex Spaniel', + 'Irish Water Spaniel', 'Kuvasz', 'Schipperke', 'Groenendael', 'Malinois', 'Briard', 'Australian Kelpie', + 'Komondor', 'Old English Sheepdog', 'Shetland Sheepdog', 'collie', 'Border Collie', 'Bouvier des Flandres', + 'Rottweiler', 'German Shepherd Dog', 'Dobermann', 'Miniature Pinscher', 'Greater Swiss Mountain Dog', + 'Bernese Mountain Dog', 'Appenzeller Sennenhund', 'Entlebucher Sennenhund', 'Boxer', 'Bullmastiff', + 'Tibetan Mastiff', 'French Bulldog', 'Great Dane', 'St. Bernard', 'husky', 'Alaskan Malamute', 'Siberian Husky', + 'Dalmatian', 'Affenpinscher', 'Basenji', 'pug', 'Leonberger', 'Newfoundland', 'Pyrenean Mountain Dog', + 'Samoyed', 'Pomeranian', 'Chow Chow', 'Keeshond', 'Griffon Bruxellois', 'Pembroke Welsh Corgi', + 'Cardigan Welsh Corgi', 'Toy Poodle', 'Miniature Poodle', 'Standard Poodle', 'Mexican hairless dog', + 'grey wolf', 'Alaskan tundra wolf', 'red wolf', 'coyote', 'dingo', 'dhole', 'African wild dog', 'hyena', + 'red fox', 'kit fox', 'Arctic fox', 'grey fox', 'tabby cat', 'tiger cat', 'Persian cat', 'Siamese cat', + 'Egyptian Mau', 'cougar', 'lynx', 'leopard', 'snow leopard', 'jaguar', 'lion', 'tiger', 'cheetah', 'brown bear', + 'American black bear', 'polar bear', 'sloth bear', 'mongoose', 'meerkat', 'tiger beetle', 'ladybug', + 'ground beetle', 'longhorn beetle', 'leaf beetle', 'dung beetle', 'rhinoceros beetle', 'weevil', 'fly', 'bee', + 'ant', 'grasshopper', 'cricket', 'stick insect', 'cockroach', 'mantis', 'cicada', 'leafhopper', 'lacewing', + 'dragonfly', 'damselfly', 'red admiral', 'ringlet', 'monarch butterfly', 'small white', 'sulphur butterfly', + 'gossamer-winged butterfly', 'starfish', 'sea urchin', 'sea cucumber', 'cottontail rabbit', 'hare', + 'Angora rabbit', 'hamster', 'porcupine', 'fox squirrel', 'marmot', 'beaver', 'guinea pig', 'common sorrel', + 'zebra', 'pig', 'wild boar', 'warthog', 'hippopotamus', 'ox', 'water buffalo', 'bison', 'ram', 'bighorn sheep', + 'Alpine ibex', 'hartebeest', 'impala', 'gazelle', 'dromedary', 'llama', 'weasel', 'mink', 'European polecat', + 'black-footed ferret', 'otter', 'skunk', 'badger', 'armadillo', 'three-toed sloth', 'orangutan', 'gorilla', + 'chimpanzee', 'gibbon', 'siamang', 'guenon', 'patas monkey', 'baboon', 'macaque', 'langur', + 'black-and-white colobus', 'proboscis monkey', 'marmoset', 'white-headed capuchin', 'howler monkey', 'titi', + "Geoffroy's spider monkey", 'common squirrel monkey', 'ring-tailed lemur', 'indri', 'Asian elephant', + 'African bush elephant', 'red panda', 'giant panda', 'snoek', 'eel', 'coho salmon', 'rock beauty', 'clownfish', + 'sturgeon', 'garfish', 'lionfish', 'pufferfish', 'abacus', 'abaya', 'academic gown', 'accordion', + 'acoustic guitar', 'aircraft carrier', 'airliner', 'airship', 'altar', 'ambulance', 'amphibious vehicle', + 'analog clock', 'apiary', 'apron', 'waste container', 'assault rifle', 'backpack', 'bakery', 'balance beam', + 'balloon', 'ballpoint pen', 'Band-Aid', 'banjo', 'baluster', 'barbell', 'barber chair', 'barbershop', 'barn', + 'barometer', 'barrel', 'wheelbarrow', 'baseball', 'basketball', 'bassinet', 'bassoon', 'swimming cap', + 'bath towel', 'bathtub', 'station wagon', 'lighthouse', 'beaker', 'military cap', 'beer bottle', 'beer glass', + 'bell-cot', 'bib', 'tandem bicycle', 'bikini', 'ring binder', 'binoculars', 'birdhouse', 'boathouse', + 'bobsleigh', 'bolo tie', 'poke bonnet', 'bookcase', 'bookstore', 'bottle cap', 'bow', 'bow tie', 'brass', 'bra', + 'breakwater', 'breastplate', 'broom', 'bucket', 'buckle', 'bulletproof vest', 'high-speed train', + 'butcher shop', 'taxicab', 'cauldron', 'candle', 'cannon', 'canoe', 'can opener', 'cardigan', 'car mirror', + 'carousel', 'tool kit', 'carton', 'car wheel', 'automated teller machine', 'cassette', 'cassette player', + 'castle', 'catamaran', 'CD player', 'cello', 'mobile phone', 'chain', 'chain-link fence', 'chain mail', + 'chainsaw', 'chest', 'chiffonier', 'chime', 'china cabinet', 'Christmas stocking', 'church', 'movie theater', + 'cleaver', 'cliff dwelling', 'cloak', 'clogs', 'cocktail shaker', 'coffee mug', 'coffeemaker', 'coil', + 'combination lock', 'computer keyboard', 'confectionery store', 'container ship', 'convertible', 'corkscrew', + 'cornet', 'cowboy boot', 'cowboy hat', 'cradle', 'crane (machine)', 'crash helmet', 'crate', 'infant bed', + 'Crock Pot', 'croquet ball', 'crutch', 'cuirass', 'dam', 'desk', 'desktop computer', 'rotary dial telephone', + 'diaper', 'digital clock', 'digital watch', 'dining table', 'dishcloth', 'dishwasher', 'disc brake', 'dock', + 'dog sled', 'dome', 'doormat', 'drilling rig', 'drum', 'drumstick', 'dumbbell', 'Dutch oven', 'electric fan', + 'electric guitar', 'electric locomotive', 'entertainment center', 'envelope', 'espresso machine', 'face powder', + 'feather boa', 'filing cabinet', 'fireboat', 'fire engine', 'fire screen sheet', 'flagpole', 'flute', + 'folding chair', 'football helmet', 'forklift', 'fountain', 'fountain pen', 'four-poster bed', 'freight car', + 'French horn', 'frying pan', 'fur coat', 'garbage truck', 'gas mask', 'gas pump', 'goblet', 'go-kart', + 'golf ball', 'golf cart', 'gondola', 'gong', 'gown', 'grand piano', 'greenhouse', 'grille', 'grocery store', + 'guillotine', 'barrette', 'hair spray', 'half-track', 'hammer', 'hamper', 'hair dryer', 'hand-held computer', + 'handkerchief', 'hard disk drive', 'harmonica', 'harp', 'harvester', 'hatchet', 'holster', 'home theater', + 'honeycomb', 'hook', 'hoop skirt', 'horizontal bar', 'horse-drawn vehicle', 'hourglass', 'iPod', 'clothes iron', + "jack-o'-lantern", 'jeans', 'jeep', 'T-shirt', 'jigsaw puzzle', 'pulled rickshaw', 'joystick', 'kimono', + 'knee pad', 'knot', 'lab coat', 'ladle', 'lampshade', 'laptop computer', 'lawn mower', 'lens cap', + 'paper knife', 'library', 'lifeboat', 'lighter', 'limousine', 'ocean liner', 'lipstick', 'slip-on shoe', + 'lotion', 'speaker', 'loupe', 'sawmill', 'magnetic compass', 'mail bag', 'mailbox', 'tights', 'tank suit', + 'manhole cover', 'maraca', 'marimba', 'mask', 'match', 'maypole', 'maze', 'measuring cup', 'medicine chest', + 'megalith', 'microphone', 'microwave oven', 'military uniform', 'milk can', 'minibus', 'miniskirt', 'minivan', + 'missile', 'mitten', 'mixing bowl', 'mobile home', 'Model T', 'modem', 'monastery', 'monitor', 'moped', + 'mortar', 'square academic cap', 'mosque', 'mosquito net', 'scooter', 'mountain bike', 'tent', 'computer mouse', + 'mousetrap', 'moving van', 'muzzle', 'nail', 'neck brace', 'necklace', 'nipple', 'notebook computer', 'obelisk', + 'oboe', 'ocarina', 'odometer', 'oil filter', 'organ', 'oscilloscope', 'overskirt', 'bullock cart', + 'oxygen mask', 'packet', 'paddle', 'paddle wheel', 'padlock', 'paintbrush', 'pajamas', 'palace', 'pan flute', + 'paper towel', 'parachute', 'parallel bars', 'park bench', 'parking meter', 'passenger car', 'patio', + 'payphone', 'pedestal', 'pencil case', 'pencil sharpener', 'perfume', 'Petri dish', 'photocopier', 'plectrum', + 'Pickelhaube', 'picket fence', 'pickup truck', 'pier', 'piggy bank', 'pill bottle', 'pillow', 'ping-pong ball', + 'pinwheel', 'pirate ship', 'pitcher', 'hand plane', 'planetarium', 'plastic bag', 'plate rack', 'plow', + 'plunger', 'Polaroid camera', 'pole', 'police van', 'poncho', 'billiard table', 'soda bottle', 'pot', + "potter's wheel", 'power drill', 'prayer rug', 'printer', 'prison', 'projectile', 'projector', 'hockey puck', + 'punching bag', 'purse', 'quill', 'quilt', 'race car', 'racket', 'radiator', 'radio', 'radio telescope', + 'rain barrel', 'recreational vehicle', 'reel', 'reflex camera', 'refrigerator', 'remote control', 'restaurant', + 'revolver', 'rifle', 'rocking chair', 'rotisserie', 'eraser', 'rugby ball', 'ruler', 'running shoe', 'safe', + 'safety pin', 'salt shaker', 'sandal', 'sarong', 'saxophone', 'scabbard', 'weighing scale', 'school bus', + 'schooner', 'scoreboard', 'CRT screen', 'screw', 'screwdriver', 'seat belt', 'sewing machine', 'shield', + 'shoe store', 'shoji', 'shopping basket', 'shopping cart', 'shovel', 'shower cap', 'shower curtain', 'ski', + 'ski mask', 'sleeping bag', 'slide rule', 'sliding door', 'slot machine', 'snorkel', 'snowmobile', 'snowplow', + 'soap dispenser', 'soccer ball', 'sock', 'solar thermal collector', 'sombrero', 'soup bowl', 'space bar', + 'space heater', 'space shuttle', 'spatula', 'motorboat', 'spider web', 'spindle', 'sports car', 'spotlight', + 'stage', 'steam locomotive', 'through arch bridge', 'steel drum', 'stethoscope', 'scarf', 'stone wall', + 'stopwatch', 'stove', 'strainer', 'tram', 'stretcher', 'couch', 'stupa', 'submarine', 'suit', 'sundial', + 'sunglass', 'sunglasses', 'sunscreen', 'suspension bridge', 'mop', 'sweatshirt', 'swimsuit', 'swing', 'switch', + 'syringe', 'table lamp', 'tank', 'tape player', 'teapot', 'teddy bear', 'television', 'tennis ball', + 'thatched roof', 'front curtain', 'thimble', 'threshing machine', 'throne', 'tile roof', 'toaster', + 'tobacco shop', 'toilet seat', 'torch', 'totem pole', 'tow truck', 'toy store', 'tractor', 'semi-trailer truck', + 'tray', 'trench coat', 'tricycle', 'trimaran', 'tripod', 'triumphal arch', 'trolleybus', 'trombone', 'tub', + 'turnstile', 'typewriter keyboard', 'umbrella', 'unicycle', 'upright piano', 'vacuum cleaner', 'vase', 'vault', + 'velvet', 'vending machine', 'vestment', 'viaduct', 'violin', 'volleyball', 'waffle iron', 'wall clock', + 'wallet', 'wardrobe', 'military aircraft', 'sink', 'washing machine', 'water bottle', 'water jug', + 'water tower', 'whiskey jug', 'whistle', 'wig', 'window screen', 'window shade', 'Windsor tie', 'wine bottle', + 'wing', 'wok', 'wooden spoon', 'wool', 'split-rail fence', 'shipwreck', 'yawl', 'yurt', 'website', 'comic book', + 'crossword', 'traffic sign', 'traffic light', 'dust jacket', 'menu', 'plate', 'guacamole', 'consomme', + 'hot pot', 'trifle', 'ice cream', 'ice pop', 'baguette', 'bagel', 'pretzel', 'cheeseburger', 'hot dog', + 'mashed potato', 'cabbage', 'broccoli', 'cauliflower', 'zucchini', 'spaghetti squash', 'acorn squash', + 'butternut squash', 'cucumber', 'artichoke', 'bell pepper', 'cardoon', 'mushroom', 'Granny Smith', 'strawberry', + 'orange', 'lemon', 'fig', 'pineapple', 'banana', 'jackfruit', 'custard apple', 'pomegranate', 'hay', + 'carbonara', 'chocolate syrup', 'dough', 'meatloaf', 'pizza', 'pot pie', 'burrito', 'red wine', 'espresso', + 'cup', 'eggnog', 'alp', 'bubble', 'cliff', 'coral reef', 'geyser', 'lakeshore', 'promontory', 'shoal', + 'seashore', 'valley', 'volcano', 'baseball player', 'bridegroom', 'scuba diver', 'rapeseed', 'daisy', + "yellow lady's slipper", 'corn', 'acorn', 'rose hip', 'horse chestnut seed', 'coral fungus', 'agaric', + 'gyromitra', 'stinkhorn mushroom', 'earth star', 'hen-of-the-woods', 'bolete', 'ear', + 'toilet paper'] # class names + +# Download script/URL (optional) +download: data/scripts/get_imagenet.sh diff --git a/data/scripts/download_weights.sh b/data/scripts/download_weights.sh index e9fa65394178..a4f3becfdbeb 100755 --- a/data/scripts/download_weights.sh +++ b/data/scripts/download_weights.sh @@ -1,7 +1,7 @@ #!/bin/bash # YOLOv5 🚀 by Ultralytics, GPL-3.0 license # Download latest models from https://github.com/ultralytics/yolov5/releases -# Example usage: bash path/to/download_weights.sh +# Example usage: bash data/scripts/download_weights.sh # parent # └── yolov5 # ├── yolov5s.pt ← downloads here @@ -11,10 +11,11 @@ python - <=7.0.0 + if device.type == 'cpu': + device = torch.device('cuda:0') Binding = namedtuple('Binding', ('name', 'dtype', 'shape', 'data', 'ptr')) logger = trt.Logger(trt.Logger.INFO) with open(w, 'rb') as f, trt.Runtime(logger) as runtime: @@ -398,8 +396,8 @@ def __init__(self, weights='yolov5s.pt', device=torch.device('cpu'), dnn=False, if dtype == np.float16: fp16 = True shape = tuple(context.get_binding_shape(index)) - data = torch.from_numpy(np.empty(shape, dtype=np.dtype(dtype))).to(device) - bindings[name] = Binding(name, dtype, shape, data, int(data.data_ptr())) + im = torch.from_numpy(np.empty(shape, dtype=dtype)).to(device) + bindings[name] = Binding(name, dtype, shape, im, int(im.data_ptr())) binding_addrs = OrderedDict((n, d.ptr) for n, d in bindings.items()) batch_size = bindings['images'].shape[0] # if dynamic, this is instead max batch size elif coreml: # CoreML @@ -445,9 +443,16 @@ def wrap_frozen_graph(gd, inputs, outputs): input_details = interpreter.get_input_details() # inputs output_details = interpreter.get_output_details() # outputs elif tfjs: - raise Exception('ERROR: YOLOv5 TF.js inference is not supported') + raise NotImplementedError('ERROR: YOLOv5 TF.js inference is not supported') else: - raise Exception(f'ERROR: {w} is not a supported format') + raise NotImplementedError(f'ERROR: {w} is not a supported format') + + # class names + if 'names' not in locals(): + names = yaml_load(data)['names'] if data else [f'class{i}' for i in range(999)] + if names[0] == 'n01440764' and len(names) == 1000: # ImageNet + names = yaml_load(ROOT / 'data/ImageNet.yaml')['names'] # human-readable names + self.__dict__.update(locals()) # assign all variables to self def forward(self, im, augment=False, visualize=False, val=False): @@ -457,7 +462,9 @@ def forward(self, im, augment=False, visualize=False, val=False): im = im.half() # to FP16 if self.pt: # PyTorch - y = self.model(im, augment=augment, visualize=visualize)[0] + y = self.model(im, augment=augment, visualize=visualize) if augment or visualize else self.model(im) + if isinstance(y, tuple): + y = y[0] elif self.jit: # TorchScript y = self.model(im)[0] elif self.dnn: # ONNX OpenCV DNN @@ -526,7 +533,7 @@ def warmup(self, imgsz=(1, 3, 640, 640)): self.forward(im) # warmup @staticmethod - def model_type(p='path/to/model.pt'): + def _model_type(p='path/to/model.pt'): # Return model type from model path, i.e. path='path/to/model.onnx' -> type=onnx from export import export_formats suffixes = list(export_formats().Suffix) + ['.xml'] # export suffixes @@ -540,8 +547,7 @@ def model_type(p='path/to/model.pt'): @staticmethod def _load_metadata(f='path/to/meta.yaml'): # Load metadata from meta.yaml if it exists - with open(f, errors='ignore') as f: - d = yaml.safe_load(f) + d = yaml_load(f) return d['stride'], d['names'] # assign stride, names @@ -753,10 +759,13 @@ class Classify(nn.Module): # Classification head, i.e. x(b,c1,20,20) to x(b,c2) def __init__(self, c1, c2, k=1, s=1, p=None, g=1): # ch_in, ch_out, kernel, stride, padding, groups super().__init__() - self.aap = nn.AdaptiveAvgPool2d(1) # to x(b,c1,1,1) - self.conv = nn.Conv2d(c1, c2, k, s, autopad(k, p), groups=g) # to x(b,c2,1,1) - self.flat = nn.Flatten() + c_ = 1280 # efficientnet_b0 size + self.conv = Conv(c1, c_, k, s, autopad(k, p), g) + self.pool = nn.AdaptiveAvgPool2d(1) # to x(b,c_,1,1) + self.drop = nn.Dropout(p=0.0, inplace=True) + self.linear = nn.Linear(c_, c2) # to x(b,c2) def forward(self, x): - z = torch.cat([self.aap(y) for y in (x if isinstance(x, list) else [x])], 1) # cat if list - return self.flat(self.conv(z)) # flatten to x(b,c2) + if isinstance(x, list): + x = torch.cat(x, 1) + return self.linear(self.drop(self.pool(self.conv(x)).flatten(1))) diff --git a/models/experimental.py b/models/experimental.py index 0317c7526c99..cb32d01ba46a 100644 --- a/models/experimental.py +++ b/models/experimental.py @@ -79,7 +79,9 @@ def attempt_load(weights, device=None, inplace=True, fuse=True): for w in weights if isinstance(weights, list) else [weights]: ckpt = torch.load(attempt_download(w), map_location='cpu') # load ckpt = (ckpt.get('ema') or ckpt['model']).to(device).float() # FP32 model - model.append(ckpt.fuse().eval() if fuse else ckpt.eval()) # fused or un-fused model in eval mode + if not hasattr(ckpt, 'stride'): + ckpt.stride = torch.tensor([32.]) # compatibility update for ResNet etc. + model.append(ckpt.fuse().eval() if fuse and hasattr(ckpt, 'fuse') else ckpt.eval()) # model in eval mode # Compatibility updates for m in model.modules(): @@ -92,11 +94,14 @@ def attempt_load(weights, device=None, inplace=True, fuse=True): elif t is nn.Upsample and not hasattr(m, 'recompute_scale_factor'): m.recompute_scale_factor = None # torch 1.11.0 compatibility + # Return model if len(model) == 1: - return model[-1] # return model + return model[-1] + + # Return detection ensemble print(f'Ensemble created with {weights}\n') for k in 'names', 'nc', 'yaml': setattr(model, k, getattr(model[0], k)) model.stride = model[torch.argmax(torch.tensor([m.stride.max() for m in model])).int()].stride # max stride assert all(model[0].nc == m.nc for m in model), f'Models have different class counts: {[m.nc for m in model]}' - return model # return ensemble + return model diff --git a/models/yolo.py b/models/yolo.py index 307b74844ca0..df4209726e0d 100644 --- a/models/yolo.py +++ b/models/yolo.py @@ -90,8 +90,64 @@ def _make_grid(self, nx=20, ny=20, i=0, torch_1_10=check_version(torch.__version return grid, anchor_grid -class Model(nn.Module): - # YOLOv5 model +class BaseModel(nn.Module): + # YOLOv5 base model + def forward(self, x, profile=False, visualize=False): + return self._forward_once(x, profile, visualize) # single-scale inference, train + + def _forward_once(self, x, profile=False, visualize=False): + y, dt = [], [] # outputs + for m in self.model: + if m.f != -1: # if not from previous layer + x = y[m.f] if isinstance(m.f, int) else [x if j == -1 else y[j] for j in m.f] # from earlier layers + if profile: + self._profile_one_layer(m, x, dt) + x = m(x) # run + y.append(x if m.i in self.save else None) # save output + if visualize: + feature_visualization(x, m.type, m.i, save_dir=visualize) + return x + + def _profile_one_layer(self, m, x, dt): + c = m == self.model[-1] # is final layer, copy input as inplace fix + o = thop.profile(m, inputs=(x.copy() if c else x,), verbose=False)[0] / 1E9 * 2 if thop else 0 # FLOPs + t = time_sync() + for _ in range(10): + m(x.copy() if c else x) + dt.append((time_sync() - t) * 100) + if m == self.model[0]: + LOGGER.info(f"{'time (ms)':>10s} {'GFLOPs':>10s} {'params':>10s} module") + LOGGER.info(f'{dt[-1]:10.2f} {o:10.2f} {m.np:10.0f} {m.type}') + if c: + LOGGER.info(f"{sum(dt):10.2f} {'-':>10s} {'-':>10s} Total") + + def fuse(self): # fuse model Conv2d() + BatchNorm2d() layers + LOGGER.info('Fusing layers... ') + for m in self.model.modules(): + if isinstance(m, (Conv, DWConv)) and hasattr(m, 'bn'): + m.conv = fuse_conv_and_bn(m.conv, m.bn) # update conv + delattr(m, 'bn') # remove batchnorm + m.forward = m.forward_fuse # update forward + self.info() + return self + + def info(self, verbose=False, img_size=640): # print model information + model_info(self, verbose, img_size) + + def _apply(self, fn): + # Apply to(), cpu(), cuda(), half() to model tensors that are not parameters or registered buffers + self = super()._apply(fn) + m = self.model[-1] # Detect() + if isinstance(m, Detect): + m.stride = fn(m.stride) + m.grid = list(map(fn, m.grid)) + if isinstance(m.anchor_grid, list): + m.anchor_grid = list(map(fn, m.anchor_grid)) + return self + + +class DetectionModel(BaseModel): + # YOLOv5 detection model def __init__(self, cfg='yolov5s.yaml', ch=3, nc=None, anchors=None): # model, input channels, number of classes super().__init__() if isinstance(cfg, dict): @@ -149,19 +205,6 @@ def _forward_augment(self, x): y = self._clip_augmented(y) # clip augmented tails return torch.cat(y, 1), None # augmented inference, train - def _forward_once(self, x, profile=False, visualize=False): - y, dt = [], [] # outputs - for m in self.model: - if m.f != -1: # if not from previous layer - x = y[m.f] if isinstance(m.f, int) else [x if j == -1 else y[j] for j in m.f] # from earlier layers - if profile: - self._profile_one_layer(m, x, dt) - x = m(x) # run - y.append(x if m.i in self.save else None) # save output - if visualize: - feature_visualization(x, m.type, m.i, save_dir=visualize) - return x - def _descale_pred(self, p, flips, scale, img_size): # de-scale predictions following augmented inference (inverse operation) if self.inplace: @@ -190,19 +233,6 @@ def _clip_augmented(self, y): y[-1] = y[-1][:, i:] # small return y - def _profile_one_layer(self, m, x, dt): - c = isinstance(m, Detect) # is final layer, copy input as inplace fix - o = thop.profile(m, inputs=(x.copy() if c else x,), verbose=False)[0] / 1E9 * 2 if thop else 0 # FLOPs - t = time_sync() - for _ in range(10): - m(x.copy() if c else x) - dt.append((time_sync() - t) * 100) - if m == self.model[0]: - LOGGER.info(f"{'time (ms)':>10s} {'GFLOPs':>10s} {'params':>10s} module") - LOGGER.info(f'{dt[-1]:10.2f} {o:10.2f} {m.np:10.0f} {m.type}') - if c: - LOGGER.info(f"{sum(dt):10.2f} {'-':>10s} {'-':>10s} Total") - def _initialize_biases(self, cf=None): # initialize biases into Detect(), cf is class frequency # https://arxiv.org/abs/1708.02002 section 3.3 # cf = torch.bincount(torch.tensor(np.concatenate(dataset.labels, 0)[:, 0]).long(), minlength=nc) + 1. @@ -213,41 +243,34 @@ def _initialize_biases(self, cf=None): # initialize biases into Detect(), cf is b[:, 5:] += math.log(0.6 / (m.nc - 0.999999)) if cf is None else torch.log(cf / cf.sum()) # cls mi.bias = torch.nn.Parameter(b.view(-1), requires_grad=True) - def _print_biases(self): - m = self.model[-1] # Detect() module - for mi in m.m: # from - b = mi.bias.detach().view(m.na, -1).T # conv.bias(255) to (3,85) - LOGGER.info( - ('%6g Conv2d.bias:' + '%10.3g' * 6) % (mi.weight.shape[1], *b[:5].mean(1).tolist(), b[5:].mean())) - # def _print_weights(self): - # for m in self.model.modules(): - # if type(m) is Bottleneck: - # LOGGER.info('%10.3g' % (m.w.detach().sigmoid() * 2)) # shortcut weights +Model = DetectionModel # retain YOLOv5 'Model' class for backwards compatibility - def fuse(self): # fuse model Conv2d() + BatchNorm2d() layers - LOGGER.info('Fusing layers... ') - for m in self.model.modules(): - if isinstance(m, (Conv, DWConv)) and hasattr(m, 'bn'): - m.conv = fuse_conv_and_bn(m.conv, m.bn) # update conv - delattr(m, 'bn') # remove batchnorm - m.forward = m.forward_fuse # update forward - self.info() - return self - def info(self, verbose=False, img_size=640): # print model information - model_info(self, verbose, img_size) - - def _apply(self, fn): - # Apply to(), cpu(), cuda(), half() to model tensors that are not parameters or registered buffers - self = super()._apply(fn) - m = self.model[-1] # Detect() - if isinstance(m, Detect): - m.stride = fn(m.stride) - m.grid = list(map(fn, m.grid)) - if isinstance(m.anchor_grid, list): - m.anchor_grid = list(map(fn, m.anchor_grid)) - return self +class ClassificationModel(BaseModel): + # YOLOv5 classification model + def __init__(self, cfg=None, model=None, nc=1000, cutoff=10): # yaml, model, number of classes, cutoff index + super().__init__() + self._from_detection_model(model, nc, cutoff) if model is not None else self._from_yaml(cfg) + + def _from_detection_model(self, model, nc=1000, cutoff=10): + # Create a YOLOv5 classification model from a YOLOv5 detection model + if isinstance(model, DetectMultiBackend): + model = model.model # unwrap DetectMultiBackend + model.model = model.model[:cutoff] # backbone + m = model.model[-1] # last layer + ch = m.conv.in_channels if hasattr(m, 'conv') else m.cv1.conv.in_channels # ch into module + c = Classify(ch, nc) # Classify() + c.i, c.f, c.type = m.i, m.f, 'models.common.Classify' # index, from, type + model.model[-1] = c # replace + self.model = model.model + self.stride = model.stride + self.save = [] + self.nc = nc + + def _from_yaml(self, cfg): + # Create a YOLOv5 classification model from a *.yaml file + self.model = None def parse_model(d, ch): # model_dict, input_channels(3) @@ -321,7 +344,7 @@ def parse_model(d, ch): # model_dict, input_channels(3) # Options if opt.line_profile: # profile layer by layer - _ = model(im, profile=True) + model(im, profile=True) elif opt.profile: # profile forward-backward results = profile(input=im, ops=[model], n=3) diff --git a/train.py b/train.py index d24ac57df23d..bbb26cdeafeb 100644 --- a/train.py +++ b/train.py @@ -47,7 +47,7 @@ from utils.general import (LOGGER, check_amp, check_dataset, check_file, check_git_status, check_img_size, check_requirements, check_suffix, check_yaml, colorstr, get_latest_run, increment_path, init_seeds, intersect_dicts, labels_to_class_weights, labels_to_image_weights, methods, - one_cycle, print_args, print_mutation, strip_optimizer) + one_cycle, print_args, print_mutation, strip_optimizer, yaml_save) from utils.loggers import Loggers from utils.loggers.wandb.wandb_utils import check_wandb_resume from utils.loss import ComputeLoss @@ -81,10 +81,8 @@ def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictio # Save run settings if not evolve: - with open(save_dir / 'hyp.yaml', 'w') as f: - yaml.safe_dump(hyp, f, sort_keys=False) - with open(save_dir / 'opt.yaml', 'w') as f: - yaml.safe_dump(vars(opt), f, sort_keys=False) + yaml_save(save_dir / 'hyp.yaml', hyp) + yaml_save(save_dir / 'opt.yaml', vars(opt)) # Loggers data_dict = None @@ -484,7 +482,7 @@ def main(opt, callbacks=Callbacks()): if RANK in {-1, 0}: print_args(vars(opt)) check_git_status() - check_requirements(exclude=['thop']) + check_requirements() # Resume if opt.resume and not (check_wandb_resume(opt) or opt.evolve): # resume from specified or most recent last.pt diff --git a/utils/augmentations.py b/utils/augmentations.py index 3f764c06ae3b..a55fefa68a76 100644 --- a/utils/augmentations.py +++ b/utils/augmentations.py @@ -8,15 +8,21 @@ import cv2 import numpy as np +import torchvision.transforms as T +import torchvision.transforms.functional as TF from utils.general import LOGGER, check_version, colorstr, resample_segments, segment2box from utils.metrics import bbox_ioa +IMAGENET_MEAN = 0.485, 0.456, 0.406 # RGB mean +IMAGENET_STD = 0.229, 0.224, 0.225 # RGB standard deviation + class Albumentations: # YOLOv5 Albumentations class (optional, only used if package is installed) def __init__(self): self.transform = None + prefix = colorstr('albumentations: ') try: import albumentations as A check_version(A.__version__, '1.0.3', hard=True) # version requirement @@ -31,11 +37,11 @@ def __init__(self): A.ImageCompression(quality_lower=75, p=0.0)] # transforms self.transform = A.Compose(T, bbox_params=A.BboxParams(format='yolo', label_fields=['class_labels'])) - LOGGER.info(colorstr('albumentations: ') + ', '.join(f'{x}' for x in self.transform.transforms if x.p)) + LOGGER.info(prefix + ', '.join(f'{x}'.replace('always_apply=False, ', '') for x in T if x.p)) except ImportError: # package not installed, skip pass except Exception as e: - LOGGER.info(colorstr('albumentations: ') + f'{e}') + LOGGER.info(f'{prefix}{e}') def __call__(self, im, labels, p=1.0): if self.transform and random.random() < p: @@ -44,6 +50,18 @@ def __call__(self, im, labels, p=1.0): return im, labels +def normalize(x, mean=IMAGENET_MEAN, std=IMAGENET_STD, inplace=False): + # Denormalize RGB images x per ImageNet stats in BCHW format, i.e. = (x - mean) / std + return TF.normalize(x, mean, std, inplace=inplace) + + +def denormalize(x, mean=IMAGENET_MEAN, std=IMAGENET_STD): + # Denormalize RGB images x per ImageNet stats in BCHW format, i.e. = x * std + mean + for i in range(3): + x[:, i] = x[:, i] * std[i] + mean[i] + return x + + def augment_hsv(im, hgain=0.5, sgain=0.5, vgain=0.5): # HSV color-space augmentation if hgain or sgain or vgain: @@ -282,3 +300,48 @@ def box_candidates(box1, box2, wh_thr=2, ar_thr=100, area_thr=0.1, eps=1e-16): w2, h2 = box2[2] - box2[0], box2[3] - box2[1] ar = np.maximum(w2 / (h2 + eps), h2 / (w2 + eps)) # aspect ratio return (w2 > wh_thr) & (h2 > wh_thr) & (w2 * h2 / (w1 * h1 + eps) > area_thr) & (ar < ar_thr) # candidates + + +def classify_albumentations(augment=True, + size=224, + scale=(0.08, 1.0), + hflip=0.5, + vflip=0.0, + jitter=0.4, + mean=IMAGENET_MEAN, + std=IMAGENET_STD, + auto_aug=False): + # YOLOv5 classification Albumentations (optional, only used if package is installed) + prefix = colorstr('albumentations: ') + try: + import albumentations as A + from albumentations.pytorch import ToTensorV2 + check_version(A.__version__, '1.0.3', hard=True) # version requirement + if augment: # Resize and crop + T = [A.RandomResizedCrop(height=size, width=size, scale=scale)] + if auto_aug: + # TODO: implement AugMix, AutoAug & RandAug in albumentation + LOGGER.info(f'{prefix}auto augmentations are currently not supported') + else: + if hflip > 0: + T += [A.HorizontalFlip(p=hflip)] + if vflip > 0: + T += [A.VerticalFlip(p=vflip)] + if jitter > 0: + color_jitter = (float(jitter),) * 3 # repeat value for brightness, contrast, satuaration, 0 hue + T += [A.ColorJitter(*color_jitter, 0)] + else: # Use fixed crop for eval set (reproducibility) + T = [A.SmallestMaxSize(max_size=size), A.CenterCrop(height=size, width=size)] + T += [A.Normalize(mean=mean, std=std), ToTensorV2()] # Normalize and convert to Tensor + LOGGER.info(prefix + ', '.join(f'{x}'.replace('always_apply=False, ', '') for x in T if x.p)) + return A.Compose(T) + + except ImportError: # package not installed, skip + pass + except Exception as e: + LOGGER.info(f'{prefix}{e}') + + +def classify_transforms(size=224): + # Transforms to apply if albumentations not installed + return T.Compose([T.ToTensor(), T.Resize(size), T.CenterCrop(size), T.Normalize(IMAGENET_MEAN, IMAGENET_STD)]) diff --git a/utils/dataloaders.py b/utils/dataloaders.py index 00f6413df7ad..2c04040bf25d 100755 --- a/utils/dataloaders.py +++ b/utils/dataloaders.py @@ -22,12 +22,14 @@ import numpy as np import torch import torch.nn.functional as F +import torchvision import yaml from PIL import ExifTags, Image, ImageOps from torch.utils.data import DataLoader, Dataset, dataloader, distributed from tqdm import tqdm -from utils.augmentations import Albumentations, augment_hsv, copy_paste, letterbox, mixup, random_perspective +from utils.augmentations import (Albumentations, augment_hsv, classify_albumentations, classify_transforms, copy_paste, + letterbox, mixup, random_perspective) from utils.general import (DATASETS_DIR, LOGGER, NUM_THREADS, check_dataset, check_requirements, check_yaml, clean_str, cv2, is_colab, is_kaggle, segments2boxes, xyn2xy, xywh2xyxy, xywhn2xyxy, xyxy2xywhn) from utils.torch_utils import torch_distributed_zero_first @@ -870,7 +872,7 @@ def flatten_recursive(path=DATASETS_DIR / 'coco128'): def extract_boxes(path=DATASETS_DIR / 'coco128'): # from utils.dataloaders import *; extract_boxes() # Convert detection dataset into classification dataset, with one directory per class path = Path(path) # images dir - shutil.rmtree(path / 'classifier') if (path / 'classifier').is_dir() else None # remove existing + shutil.rmtree(path / 'classification') if (path / 'classification').is_dir() else None # remove existing files = list(path.rglob('*.*')) n = len(files) # number of files for im_file in tqdm(files, total=n): @@ -1090,3 +1092,65 @@ def process_images(self): pass print(f'Done. All images saved to {self.im_dir}') return self.im_dir + + +# Classification dataloaders ------------------------------------------------------------------------------------------- +class ClassificationDataset(torchvision.datasets.ImageFolder): + """ + YOLOv5 Classification Dataset. + Arguments + root: Dataset path + transform: torchvision transforms, used by default + album_transform: Albumentations transforms, used if installed + """ + + def __init__(self, root, augment, imgsz, cache=False): + super().__init__(root=root) + self.torch_transforms = classify_transforms(imgsz) + self.album_transforms = classify_albumentations(augment, imgsz) if augment else None + self.cache_ram = cache is True or cache == 'ram' + self.cache_disk = cache == 'disk' + self.samples = [list(x) + [Path(x[0]).with_suffix('.npy'), None] for x in self.samples] # file, index, npy, im + + def __getitem__(self, i): + f, j, fn, im = self.samples[i] # filename, index, filename.with_suffix('.npy'), image + if self.album_transforms: + if self.cache_ram and im is None: + im = self.samples[i][3] = cv2.imread(f) + elif self.cache_disk: + if not fn.exists(): # load npy + np.save(fn.as_posix(), cv2.imread(f)) + im = np.load(fn) + else: # read image + im = cv2.imread(f) # BGR + sample = self.album_transforms(image=cv2.cvtColor(im, cv2.COLOR_BGR2RGB))["image"] + else: + sample = self.torch_transforms(self.loader(f)) + return sample, j + + +def create_classification_dataloader(path, + imgsz=224, + batch_size=16, + augment=True, + cache=False, + rank=-1, + workers=8, + shuffle=True): + # Returns Dataloader object to be used with YOLOv5 Classifier + with torch_distributed_zero_first(rank): # init dataset *.cache only once if DDP + dataset = ClassificationDataset(root=path, imgsz=imgsz, augment=augment, cache=cache) + batch_size = min(batch_size, len(dataset)) + nd = torch.cuda.device_count() + nw = min([os.cpu_count() // max(nd, 1), batch_size if batch_size > 1 else 0, workers]) + sampler = None if rank == -1 else distributed.DistributedSampler(dataset, shuffle=shuffle) + generator = torch.Generator() + generator.manual_seed(0) + return InfiniteDataLoader(dataset, + batch_size=batch_size, + shuffle=shuffle and sampler is None, + num_workers=nw, + sampler=sampler, + pin_memory=True, + worker_init_fn=seed_worker, + generator=generator) # or DataLoader(persistent_workers=True) diff --git a/utils/general.py b/utils/general.py index 2a3ce37cd853..1c525c45f649 100755 --- a/utils/general.py +++ b/utils/general.py @@ -217,7 +217,11 @@ def print_args(args: Optional[dict] = None, show_file=True, show_fcn=False): if args is None: # get args automatically args, _, _, frm = inspect.getargvalues(x) args = {k: v for k, v in frm.items() if k in args} - s = (f'{Path(file).stem}: ' if show_file else '') + (f'{fcn}: ' if show_fcn else '') + try: + file = Path(file).resolve().relative_to(ROOT).with_suffix('') + except ValueError: + file = Path(file).stem + s = (f'{file}: ' if show_file else '') + (f'{fcn}: ' if show_fcn else '') LOGGER.info(colorstr(s) + ', '.join(f'{k}={v}' for k, v in args.items())) @@ -345,7 +349,7 @@ def check_version(current='0.0.0', minimum='0.0.0', name='version ', pinned=Fals @try_except def check_requirements(requirements=ROOT / 'requirements.txt', exclude=(), install=True, cmds=()): - # Check installed dependencies meet requirements (pass *.txt file or list of packages) + # Check installed dependencies meet YOLOv5 requirements (pass *.txt file or list of packages) prefix = colorstr('red', 'bold', 'requirements:') check_python() # check python version if isinstance(requirements, (str, Path)): # requirements.txt file @@ -549,6 +553,18 @@ def amp_allclose(model, im): return False +def yaml_load(file='data.yaml'): + # Single-line safe yaml loading + with open(file, errors='ignore') as f: + return yaml.safe_load(f) + + +def yaml_save(file='data.yaml', data={}): + # Single-line safe yaml saving + with open(file, 'w') as f: + yaml.safe_dump({k: str(v) if isinstance(v, Path) else v for k, v in data.items()}, f, sort_keys=False) + + def url2file(url): # Convert URL to filename, i.e. https://url.com/file.txt?auth -> file.txt url = str(Path(url)).replace(':/', '://') # Pathlib turns :// -> :/ diff --git a/utils/loggers/__init__.py b/utils/loggers/__init__.py index 0f3eceafd0db..8ec846f8cfac 100644 --- a/utils/loggers/__init__.py +++ b/utils/loggers/__init__.py @@ -5,6 +5,7 @@ import os import warnings +from pathlib import Path import pkg_resources as pkg import torch @@ -76,7 +77,7 @@ def __init__(self, save_dir=None, weights=None, opt=None, hyp=None, logger=None, self.logger.info(s) if not clearml: prefix = colorstr('ClearML: ') - s = f"{prefix}run 'pip install clearml' to automatically track, visualize and remotely train YOLOv5 🚀 runs in ClearML" + s = f"{prefix}run 'pip install clearml' to automatically track, visualize and remotely train YOLOv5 🚀 in ClearML" self.logger.info(s) # TensorBoard @@ -121,11 +122,8 @@ def on_train_batch_end(self, ni, model, imgs, targets, paths, plots): # Callback runs on train batch end # ni: number integrated batches (since train start) if plots: - if ni == 0: - if self.tb and not self.opt.sync_bn: # --sync known issue https://github.com/ultralytics/yolov5/issues/3754 - with warnings.catch_warnings(): - warnings.simplefilter('ignore') # suppress jit trace warning - self.tb.add_graph(torch.jit.trace(de_parallel(model), imgs[0:1], strict=False), []) + if ni == 0 and not self.opt.sync_bn and self.tb: + log_tensorboard_graph(self.tb, model, imgsz=list(imgs.shape[2:4])) if ni < 3: f = self.save_dir / f'train_batch{ni}.jpg' # filename plot_images(imgs, targets, paths, f) @@ -233,3 +231,78 @@ def on_params_update(self, params): # params: A dict containing {param: value} pairs if self.wandb: self.wandb.wandb_run.config.update(params, allow_val_change=True) + + +class GenericLogger: + """ + YOLOv5 General purpose logger for non-task specific logging + Usage: from utils.loggers import GenericLogger; logger = GenericLogger(...) + Arguments + opt: Run arguments + console_logger: Console logger + include: loggers to include + """ + + def __init__(self, opt, console_logger, include=('tb', 'wandb')): + # init default loggers + self.save_dir = opt.save_dir + self.include = include + self.console_logger = console_logger + if 'tb' in self.include: + prefix = colorstr('TensorBoard: ') + self.console_logger.info( + f"{prefix}Start with 'tensorboard --logdir {self.save_dir.parent}', view at http://localhost:6006/") + self.tb = SummaryWriter(str(self.save_dir)) + + if wandb and 'wandb' in self.include: + self.wandb = wandb.init(project="YOLOv5-Classifier" if opt.project == "runs/train" else opt.project, + name=None if opt.name == "exp" else opt.name, + config=opt) + else: + self.wandb = None + + def log_metrics(self, metrics_dict, epoch): + # Log metrics dictionary to all loggers + if self.tb: + for k, v in metrics_dict.items(): + self.tb.add_scalar(k, v, epoch) + + if self.wandb: + self.wandb.log(metrics_dict, step=epoch) + + def log_images(self, files, name='Images', epoch=0): + # Log images to all loggers + files = [Path(f) for f in (files if isinstance(files, (tuple, list)) else [files])] # to Path + files = [f for f in files if f.exists()] # filter by exists + + if self.tb: + for f in files: + self.tb.add_image(f.stem, cv2.imread(str(f))[..., ::-1], epoch, dataformats='HWC') + + if self.wandb: + self.wandb.log({name: [wandb.Image(str(f), caption=f.name) for f in files]}, step=epoch) + + def log_graph(self, model, imgsz=(640, 640)): + # Log model graph to all loggers + if self.tb: + log_tensorboard_graph(self.tb, model, imgsz) + + def log_model(self, model_path, epoch=0, metadata={}): + # Log model to all loggers + if self.wandb: + art = wandb.Artifact(name=f"run_{wandb.run.id}_model", type="model", metadata=metadata) + art.add_file(str(model_path)) + wandb.log_artifact(art) + + +def log_tensorboard_graph(tb, model, imgsz=(640, 640)): + # Log model graph to TensorBoard + try: + p = next(model.parameters()) # for device, type + imgsz = (imgsz, imgsz) if isinstance(imgsz, int) else imgsz # expand + im = torch.zeros((1, 3, *imgsz)).to(p.device).type_as(p) # input image + with warnings.catch_warnings(): + warnings.simplefilter('ignore') # suppress jit trace warning + tb.add_graph(torch.jit.trace(de_parallel(model), im, strict=False), []) + except Exception: + print('WARNING: TensorBoard graph visualization failure') diff --git a/utils/plots.py b/utils/plots.py index d050f5d36aba..7417308c4d82 100644 --- a/utils/plots.py +++ b/utils/plots.py @@ -388,6 +388,35 @@ def plot_labels(labels, names=(), save_dir=Path('')): plt.close() +def imshow_cls(im, labels=None, pred=None, names=None, nmax=25, verbose=False, f=Path('images.jpg')): + # Show classification image grid with labels (optional) and predictions (optional) + from utils.augmentations import denormalize + + names = names or [f'class{i}' for i in range(1000)] + blocks = torch.chunk(denormalize(im.clone()).cpu().float(), len(im), + dim=0) # select batch index 0, block by channels + n = min(len(blocks), nmax) # number of plots + m = min(8, round(n ** 0.5)) # 8 x 8 default + fig, ax = plt.subplots(math.ceil(n / m), m) # 8 rows x n/8 cols + ax = ax.ravel() if m > 1 else [ax] + # plt.subplots_adjust(wspace=0.05, hspace=0.05) + for i in range(n): + ax[i].imshow(blocks[i].squeeze().permute((1, 2, 0)).numpy().clip(0.0, 1.0)) + ax[i].axis('off') + if labels is not None: + s = names[labels[i]] + (f'—{names[pred[i]]}' if pred is not None else '') + ax[i].set_title(s, fontsize=8, verticalalignment='top') + plt.savefig(f, dpi=300, bbox_inches='tight') + plt.close() + if verbose: + LOGGER.info(f"Saving {f}") + if labels is not None: + LOGGER.info('True: ' + ' '.join(f'{names[i]:3s}' for i in labels[:nmax])) + if pred is not None: + LOGGER.info('Predicted:' + ' '.join(f'{names[i]:3s}' for i in pred[:nmax])) + return f + + def plot_evolve(evolve_csv='path/to/evolve.csv'): # from utils.plots import *; plot_evolve() # Plot evolve.csv hyp evolution results evolve_csv = Path(evolve_csv) diff --git a/utils/torch_utils.py b/utils/torch_utils.py index 1ceb0aa346e9..1cdbe20f8670 100644 --- a/utils/torch_utils.py +++ b/utils/torch_utils.py @@ -42,6 +42,16 @@ def decorate(fn): return decorate +def smartCrossEntropyLoss(label_smoothing=0.0): + # Returns nn.CrossEntropyLoss with label smoothing enabled for torch>=1.10.0 + if check_version(torch.__version__, '1.10.0'): + return nn.CrossEntropyLoss(label_smoothing=label_smoothing) # loss function + else: + if label_smoothing > 0: + LOGGER.warning(f'WARNING: label smoothing {label_smoothing} requires torch>=1.10.0') + return nn.CrossEntropyLoss() # loss function + + def smart_DDP(model): # Model DDP creation with checks assert not check_version(torch.__version__, '1.12.0', pinned=True), \ @@ -53,6 +63,28 @@ def smart_DDP(model): return DDP(model, device_ids=[LOCAL_RANK], output_device=LOCAL_RANK) +def reshape_classifier_output(model, n=1000): + # Update a TorchVision classification model to class count 'n' if required + from models.common import Classify + name, m = list((model.model if hasattr(model, 'model') else model).named_children())[-1] # last module + if isinstance(m, Classify): # YOLOv5 Classify() head + if m.linear.out_features != n: + m.linear = nn.Linear(m.linear.in_features, n) + elif isinstance(m, nn.Linear): # ResNet, EfficientNet + if m.out_features != n: + setattr(model, name, nn.Linear(m.in_features, n)) + elif isinstance(m, nn.Sequential): + types = [type(x) for x in m] + if nn.Linear in types: + i = types.index(nn.Linear) # nn.Linear index + if m[i].out_features != n: + m[i] = nn.Linear(m[i].in_features, n) + elif nn.Conv2d in types: + i = types.index(nn.Conv2d) # nn.Conv2d index + if m[i].out_channels != n: + m[i] = nn.Conv2d(m[i].in_channels, n, m[i].kernel_size, m[i].stride, bias=m[i].bias) + + @contextmanager def torch_distributed_zero_first(local_rank: int): # Decorator to make all processes in distributed training wait for each local_master to do something @@ -117,14 +149,13 @@ def time_sync(): def profile(input, ops, n=10, device=None): - # YOLOv5 speed/memory/FLOPs profiler - # - # Usage: - # input = torch.randn(16, 3, 640, 640) - # m1 = lambda x: x * torch.sigmoid(x) - # m2 = nn.SiLU() - # profile(input, [m1, m2], n=100) # profile over 100 iterations - + """ YOLOv5 speed/memory/FLOPs profiler + Usage: + input = torch.randn(16, 3, 640, 640) + m1 = lambda x: x * torch.sigmoid(x) + m2 = nn.SiLU() + profile(input, [m1, m2], n=100) # profile over 100 iterations + """ results = [] if not isinstance(device, torch.device): device = select_device(device) @@ -313,6 +344,18 @@ def smart_optimizer(model, name='Adam', lr=0.001, momentum=0.9, decay=1e-5): return optimizer +def smart_hub_load(repo='ultralytics/yolov5', model='yolov5s', **kwargs): + # YOLOv5 torch.hub.load() wrapper with smart error/issue handling + if check_version(torch.__version__, '1.9.1'): + kwargs['skip_validation'] = True # validation causes GitHub API rate limit errors + if check_version(torch.__version__, '1.12.0'): + kwargs['trust_repo'] = True # argument required starting in torch 0.12 + try: + return torch.hub.load(repo, model, **kwargs) + except Exception: + return torch.hub.load(repo, model, force_reload=True, **kwargs) + + def smart_resume(ckpt, optimizer, ema=None, weights='yolov5s.pt', epochs=300, resume=True): # Resume training from a partially trained checkpoint best_fitness = 0.0 From e61756910758f59406255269921e55992ca0b64b Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 17 Aug 2022 15:33:37 +0200 Subject: [PATCH 1311/1976] Improve classification comments (#8997) --- .github/README_cn.md | 10 +++++----- README.md | 10 +++++----- classify/predict.py | 2 +- classify/train.py | 4 +++- classify/val.py | 3 ++- 5 files changed, 16 insertions(+), 13 deletions(-) diff --git a/.github/README_cn.md b/.github/README_cn.md index 86b502df61f7..816adf6b0449 100644 --- a/.github/README_cn.md +++ b/.github/README_cn.md @@ -269,7 +269,7 @@ We trained YOLOv5-cls classification models on ImageNet for 90 epochs using a 4x
Table Notes (click to expand) -- All checkpoints are trained to 90 epochs with SGD optimizer with lr0=0.001 at image size 224 and all default settings. Runs logged to https://wandb.ai/glenn-jocher/YOLOv5-Classifier-v6-2. +- All checkpoints are trained to 90 epochs with SGD optimizer with `lr0=0.001` and `weight_decay=5e-5` at image size 224 and all default settings.
Runs logged to https://wandb.ai/glenn-jocher/YOLOv5-Classifier-v6-2 - **Accuracy** values are for single-model single-scale on [ImageNet-1k](https://www.image-net.org/index.php) dataset.
Reproduce by `python classify/val.py --data ../datasets/imagenet --img 224` - **Speed** averaged over 100 inference images using a Google [Colab Pro](https://colab.research.google.com/signup) V100 High-RAM instance.
Reproduce by `python classify/val.py --data ../datasets/imagenet --img 224 --batch 1` - **Export** to ONNX at FP32 and TensorRT at FP16 done with `export.py`.
Reproduce by `python export.py --weights yolov5s-cls.pt --include engine onnx --imgsz 224` @@ -291,14 +291,14 @@ python -m torch.distributed.run --nproc_per_node 4 --master_port 1 classify/trai ``` ### Val -Validate accuracy on a pretrained model. To validate YOLOv5s-cls accuracy on ImageNet. +Validate YOLOv5m-cls accuracy on ImageNet-1k dataset: ```bash bash data/scripts/get_imagenet.sh --val # download ImageNet val split (6.3G, 50000 images) -python classify/val.py --weights yolov5s-cls.pt --data ../datasets/imagenet --img 224 +python classify/val.py --weights yolov5m-cls.pt --data ../datasets/imagenet --img 224 # validate ``` ### Predict -Run a classification prediction on an image. +Use pretrained YOLOv5s-cls.pt to predict bus.jpg: ```bash python classify/predict.py --weights yolov5s-cls.pt --data data/images/bus.jpg ``` @@ -307,7 +307,7 @@ model = torch.hub.load('ultralytics/yolov5', 'custom', 'yolov5s-cls.pt') # load ``` ### Export -Export a group of trained YOLOv5-cls, ResNet and EfficientNet models to ONNX and TensorRT. +Export a group of trained YOLOv5s-cls, ResNet and EfficientNet models to ONNX and TensorRT: ```bash python export.py --weights yolov5s-cls.pt resnet50.pt efficientnet_b0.pt --include onnx engine --img 224 ``` diff --git a/README.md b/README.md index b368d1d6e264..7335394402da 100644 --- a/README.md +++ b/README.md @@ -278,7 +278,7 @@ We trained YOLOv5-cls classification models on ImageNet for 90 epochs using a 4x
Table Notes (click to expand) -- All checkpoints are trained to 90 epochs with SGD optimizer with lr0=0.001 at image size 224 and all default settings. Runs logged to https://wandb.ai/glenn-jocher/YOLOv5-Classifier-v6-2. +- All checkpoints are trained to 90 epochs with SGD optimizer with `lr0=0.001` and `weight_decay=5e-5` at image size 224 and all default settings.
Runs logged to https://wandb.ai/glenn-jocher/YOLOv5-Classifier-v6-2 - **Accuracy** values are for single-model single-scale on [ImageNet-1k](https://www.image-net.org/index.php) dataset.
Reproduce by `python classify/val.py --data ../datasets/imagenet --img 224` - **Speed** averaged over 100 inference images using a Google [Colab Pro](https://colab.research.google.com/signup) V100 High-RAM instance.
Reproduce by `python classify/val.py --data ../datasets/imagenet --img 224 --batch 1` - **Export** to ONNX at FP32 and TensorRT at FP16 done with `export.py`.
Reproduce by `python export.py --weights yolov5s-cls.pt --include engine onnx --imgsz 224` @@ -300,14 +300,14 @@ python -m torch.distributed.run --nproc_per_node 4 --master_port 1 classify/trai ``` ### Val -Validate accuracy on a pretrained model. To validate YOLOv5s-cls accuracy on ImageNet. +Validate YOLOv5m-cls accuracy on ImageNet-1k dataset: ```bash bash data/scripts/get_imagenet.sh --val # download ImageNet val split (6.3G, 50000 images) -python classify/val.py --weights yolov5s-cls.pt --data ../datasets/imagenet --img 224 +python classify/val.py --weights yolov5m-cls.pt --data ../datasets/imagenet --img 224 # validate ``` ### Predict -Run a classification prediction on an image. +Use pretrained YOLOv5s-cls.pt to predict bus.jpg: ```bash python classify/predict.py --weights yolov5s-cls.pt --data data/images/bus.jpg ``` @@ -316,7 +316,7 @@ model = torch.hub.load('ultralytics/yolov5', 'custom', 'yolov5s-cls.pt') # load ``` ### Export -Export a group of trained YOLOv5-cls, ResNet and EfficientNet models to ONNX and TensorRT. +Export a group of trained YOLOv5s-cls, ResNet and EfficientNet models to ONNX and TensorRT: ```bash python export.py --weights yolov5s-cls.pt resnet50.pt efficientnet_b0.pt --include onnx engine --img 224 ``` diff --git a/classify/predict.py b/classify/predict.py index 419830d43952..4247e3c8e7fa 100644 --- a/classify/predict.py +++ b/classify/predict.py @@ -3,7 +3,7 @@ Run classification inference on images Usage: - $ python classify/predict.py --weights yolov5s-cls.pt --source im.jpg + $ python classify/predict.py --weights yolov5s-cls.pt --source data/images/bus.jpg """ import argparse diff --git a/classify/train.py b/classify/train.py index f2b465567446..b85f14236039 100644 --- a/classify/train.py +++ b/classify/train.py @@ -2,8 +2,10 @@ """ Train a YOLOv5 classifier model on a classification dataset Datasets: --data mnist, fashion-mnist, cifar10, cifar100, imagenette, imagewoof, imagenet, or 'path/to/custom/dataset' +YOLOv5-cls models: --model yolov5n-cls.pt, yolov5s-cls.pt, yolov5m-cls.pt, yolov5l-cls.pt, yolov5x-cls.pt +Torchvision models: --model resnet50, efficientnet_b0, etc. See https://pytorch.org/vision/stable/models.html -Usage: +Usage - Single-GPU and Multi-GPU DDP $ python classify/train.py --model yolov5s-cls.pt --data cifar100 --epochs 5 --img 128 $ python -m torch.distributed.run --nproc_per_node 4 --master_port 1 classify/train.py --model yolov5s-cls.pt --data imagenet --epochs 5 --img 224 --device 0,1,2,3 """ diff --git a/classify/val.py b/classify/val.py index 0930ba8c9c51..9d965d9f1fdc 100644 --- a/classify/val.py +++ b/classify/val.py @@ -3,7 +3,8 @@ Validate a classification model on a dataset Usage: - $ python classify/val.py --weights yolov5s-cls.pt --data ../datasets/imagenet + $ bash data/scripts/get_imagenet.sh --val # download ImageNet val split (6.3G, 50000 images) + $ python classify/val.py --weights yolov5m-cls.pt --data ../datasets/imagenet --img 224 # validate """ import argparse From 7c9486e16f6a2c35bf5cfca892898a11a81009fc Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 17 Aug 2022 15:48:17 +0200 Subject: [PATCH 1312/1976] Update `attempt_download(release='v6.2')` (#8998) * Update attempt_download(release='v6.2') Signed-off-by: Glenn Jocher * Update README.md Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- README.md | 20 ++++++++++---------- utils/downloads.py | 8 ++++---- 2 files changed, 14 insertions(+), 14 deletions(-) diff --git a/README.md b/README.md index 7335394402da..89e4f1199cde 100644 --- a/README.md +++ b/README.md @@ -224,17 +224,17 @@ Get started in seconds with our verified environments. Click each icon below for | Model | size
(pixels) | mAPval
0.5:0.95 | mAPval
0.5 | Speed
CPU b1
(ms) | Speed
V100 b1
(ms) | Speed
V100 b32
(ms) | params
(M) | FLOPs
@640 (B) | |------------------------------------------------------------------------------------------------------|-----------------------|-------------------------|--------------------|------------------------------|-------------------------------|--------------------------------|--------------------|------------------------| -| [YOLOv5n](https://github.com/ultralytics/yolov5/releases/download/v6.1/yolov5n.pt) | 640 | 28.0 | 45.7 | **45** | **6.3** | **0.6** | **1.9** | **4.5** | -| [YOLOv5s](https://github.com/ultralytics/yolov5/releases/download/v6.1/yolov5s.pt) | 640 | 37.4 | 56.8 | 98 | 6.4 | 0.9 | 7.2 | 16.5 | -| [YOLOv5m](https://github.com/ultralytics/yolov5/releases/download/v6.1/yolov5m.pt) | 640 | 45.4 | 64.1 | 224 | 8.2 | 1.7 | 21.2 | 49.0 | -| [YOLOv5l](https://github.com/ultralytics/yolov5/releases/download/v6.1/yolov5l.pt) | 640 | 49.0 | 67.3 | 430 | 10.1 | 2.7 | 46.5 | 109.1 | -| [YOLOv5x](https://github.com/ultralytics/yolov5/releases/download/v6.1/yolov5x.pt) | 640 | 50.7 | 68.9 | 766 | 12.1 | 4.8 | 86.7 | 205.7 | +| [YOLOv5n](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5n.pt) | 640 | 28.0 | 45.7 | **45** | **6.3** | **0.6** | **1.9** | **4.5** | +| [YOLOv5s](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5s.pt) | 640 | 37.4 | 56.8 | 98 | 6.4 | 0.9 | 7.2 | 16.5 | +| [YOLOv5m](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5m.pt) | 640 | 45.4 | 64.1 | 224 | 8.2 | 1.7 | 21.2 | 49.0 | +| [YOLOv5l](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5l.pt) | 640 | 49.0 | 67.3 | 430 | 10.1 | 2.7 | 46.5 | 109.1 | +| [YOLOv5x](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5x.pt) | 640 | 50.7 | 68.9 | 766 | 12.1 | 4.8 | 86.7 | 205.7 | | | | | | | | | | | -| [YOLOv5n6](https://github.com/ultralytics/yolov5/releases/download/v6.1/yolov5n6.pt) | 1280 | 36.0 | 54.4 | 153 | 8.1 | 2.1 | 3.2 | 4.6 | -| [YOLOv5s6](https://github.com/ultralytics/yolov5/releases/download/v6.1/yolov5s6.pt) | 1280 | 44.8 | 63.7 | 385 | 8.2 | 3.6 | 12.6 | 16.8 | -| [YOLOv5m6](https://github.com/ultralytics/yolov5/releases/download/v6.1/yolov5m6.pt) | 1280 | 51.3 | 69.3 | 887 | 11.1 | 6.8 | 35.7 | 50.0 | -| [YOLOv5l6](https://github.com/ultralytics/yolov5/releases/download/v6.1/yolov5l6.pt) | 1280 | 53.7 | 71.3 | 1784 | 15.8 | 10.5 | 76.8 | 111.4 | -| [YOLOv5x6](https://github.com/ultralytics/yolov5/releases/download/v6.1/yolov5x6.pt)
+ [TTA][TTA] | 1280
1536 | 55.0
**55.8** | 72.7
**72.7** | 3136
- | 26.2
- | 19.4
- | 140.7
- | 209.8
- | +| [YOLOv5n6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5n6.pt) | 1280 | 36.0 | 54.4 | 153 | 8.1 | 2.1 | 3.2 | 4.6 | +| [YOLOv5s6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5s6.pt) | 1280 | 44.8 | 63.7 | 385 | 8.2 | 3.6 | 12.6 | 16.8 | +| [YOLOv5m6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5m6.pt) | 1280 | 51.3 | 69.3 | 887 | 11.1 | 6.8 | 35.7 | 50.0 | +| [YOLOv5l6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5l6.pt) | 1280 | 53.7 | 71.3 | 1784 | 15.8 | 10.5 | 76.8 | 111.4 | +| [YOLOv5x6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5x6.pt)
+ [TTA][TTA] | 1280
1536 | 55.0
**55.8** | 72.7
**72.7** | 3136
- | 26.2
- | 19.4
- | 140.7
- | 209.8
- |
Table Notes (click to expand) diff --git a/utils/downloads.py b/utils/downloads.py index 9d4780ad28b1..c4d4a85c38ae 100644 --- a/utils/downloads.py +++ b/utils/downloads.py @@ -54,14 +54,14 @@ def safe_download(file, url, url2=None, min_bytes=1E0, error_msg=''): LOGGER.info('') -def attempt_download(file, repo='ultralytics/yolov5', release='v6.1'): - # Attempt file download from GitHub release assets if not found locally. release = 'latest', 'v6.1', etc. +def attempt_download(file, repo='ultralytics/yolov5', release='v6.2'): + # Attempt file download from GitHub release assets if not found locally. release = 'latest', 'v6.2', etc. from utils.general import LOGGER def github_assets(repository, version='latest'): - # Return GitHub repo tag (i.e. 'v6.1') and assets (i.e. ['yolov5s.pt', 'yolov5m.pt', ...]) + # Return GitHub repo tag (i.e. 'v6.2') and assets (i.e. ['yolov5s.pt', 'yolov5m.pt', ...]) if version != 'latest': - version = f'tags/{version}' # i.e. tags/v6.1 + version = f'tags/{version}' # i.e. tags/v6.2 response = requests.get(f'https://api.github.com/repos/{repository}/releases/{version}').json() # github api return response['tag_name'], [x['name'] for x in response['assets']] # tag, assets From fe809b8dad5236d86d5acbe047b5e0e6895b2b8a Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 17 Aug 2022 16:18:06 +0200 Subject: [PATCH 1313/1976] Created using Colaboratory --- tutorial.ipynb | 304 ++++++++++++++++++++++++------------------------- 1 file changed, 152 insertions(+), 152 deletions(-) diff --git a/tutorial.ipynb b/tutorial.ipynb index 61641bab1833..1438924e4112 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -17,7 +17,7 @@ "accelerator": "GPU", "widgets": { "application/vnd.jupyter.widget-state+json": { - "c31d2039ccf74c22b67841f4877d1186": { + "57c562894aed45cd9a107d0455e3e3f4": { "model_module": "@jupyter-widgets/controls", "model_name": "HBoxModel", "model_module_version": "1.5.0", @@ -32,14 +32,14 @@ "_view_name": "HBoxView", "box_style": "", "children": [ - "IPY_MODEL_d4bba1727c714d94ad58a72bffa07c4c", - "IPY_MODEL_9aeff9f1780b45f892422fdc96e56913", - "IPY_MODEL_bf55a7c71d074d3fa88b10b997820825" + "IPY_MODEL_040d53c6cc924350bcb656cd21a7c713", + "IPY_MODEL_e029890942a74c098408ce5a9a566d51", + "IPY_MODEL_8fb991c03e434566a4297b6ab9446f89" ], - "layout": "IPY_MODEL_d8b66044e2fb4f5b916696834d880c81" + "layout": "IPY_MODEL_a9a376923a7742d89fb335db709c7a7e" } }, - "d4bba1727c714d94ad58a72bffa07c4c": { + "040d53c6cc924350bcb656cd21a7c713": { "model_module": "@jupyter-widgets/controls", "model_name": "HTMLModel", "model_module_version": "1.5.0", @@ -54,13 +54,13 @@ "_view_name": "HTMLView", "description": "", "description_tooltip": null, - "layout": "IPY_MODEL_102e1deda239436fa72751c58202fa0f", + "layout": "IPY_MODEL_8b4276ac834c4735bf60ee9b761b9962", "placeholder": "​", - "style": "IPY_MODEL_4fd4431ced6c42368e18424912b877e4", + "style": "IPY_MODEL_52cc8da75b724198856617247541cb1e", "value": "100%" } }, - "9aeff9f1780b45f892422fdc96e56913": { + "e029890942a74c098408ce5a9a566d51": { "model_module": "@jupyter-widgets/controls", "model_name": "FloatProgressModel", "model_module_version": "1.5.0", @@ -76,15 +76,15 @@ "bar_style": "success", "description": "", "description_tooltip": null, - "layout": "IPY_MODEL_cdd709c4f40941bea1b2053523c9fac8", + "layout": "IPY_MODEL_b6652f46480243c4adf60e6440043d6f", "max": 818322941, "min": 0, "orientation": "horizontal", - "style": "IPY_MODEL_a1ef2d8de2b741c78ca5d938e2ddbcdf", + "style": "IPY_MODEL_e502754177ff4ea8abf82a6e9ac77a4a", "value": 818322941 } }, - "bf55a7c71d074d3fa88b10b997820825": { + "8fb991c03e434566a4297b6ab9446f89": { "model_module": "@jupyter-widgets/controls", "model_name": "HTMLModel", "model_module_version": "1.5.0", @@ -99,13 +99,13 @@ "_view_name": "HTMLView", "description": "", "description_tooltip": null, - "layout": "IPY_MODEL_0dbce99bb6184238842cbec0587d564a", + "layout": "IPY_MODEL_447398becdb04836b5ffb5915318db07", "placeholder": "​", - "style": "IPY_MODEL_91ff5f93f2a24c5790ab29e347965946", - "value": " 780M/780M [01:10<00:00, 10.5MB/s]" + "style": "IPY_MODEL_2fddcb27ad4a4caa81ff51111f8d0ed6", + "value": " 780M/780M [01:17<00:00, 12.3MB/s]" } }, - "d8b66044e2fb4f5b916696834d880c81": { + "a9a376923a7742d89fb335db709c7a7e": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", @@ -157,7 +157,7 @@ "width": null } }, - "102e1deda239436fa72751c58202fa0f": { + "8b4276ac834c4735bf60ee9b761b9962": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", @@ -209,7 +209,7 @@ "width": null } }, - "4fd4431ced6c42368e18424912b877e4": { + "52cc8da75b724198856617247541cb1e": { "model_module": "@jupyter-widgets/controls", "model_name": "DescriptionStyleModel", "model_module_version": "1.5.0", @@ -224,7 +224,7 @@ "description_width": "" } }, - "cdd709c4f40941bea1b2053523c9fac8": { + "b6652f46480243c4adf60e6440043d6f": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", @@ -276,7 +276,7 @@ "width": null } }, - "a1ef2d8de2b741c78ca5d938e2ddbcdf": { + "e502754177ff4ea8abf82a6e9ac77a4a": { "model_module": "@jupyter-widgets/controls", "model_name": "ProgressStyleModel", "model_module_version": "1.5.0", @@ -292,7 +292,7 @@ "description_width": "" } }, - "0dbce99bb6184238842cbec0587d564a": { + "447398becdb04836b5ffb5915318db07": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", @@ -344,7 +344,7 @@ "width": null } }, - "91ff5f93f2a24c5790ab29e347965946": { + "2fddcb27ad4a4caa81ff51111f8d0ed6": { "model_module": "@jupyter-widgets/controls", "model_name": "DescriptionStyleModel", "model_module_version": "1.5.0", @@ -404,7 +404,7 @@ "colab": { "base_uri": "https://localhost:8080/" }, - "outputId": "185d0979-edcd-4860-e6fb-b8a27dbf5096" + "outputId": "e0f693e4-413b-4cc8-ae7e-91537da370b0" }, "source": [ "!git clone https://github.com/ultralytics/yolov5 # clone\n", @@ -415,13 +415,13 @@ "import utils\n", "display = utils.notebook_init() # checks" ], - "execution_count": null, + "execution_count": 1, "outputs": [ { "output_type": "stream", "name": "stderr", "text": [ - "YOLOv5 🚀 v6.1-370-g20f1b7e Python-3.7.13 torch-1.12.0+cu113 CUDA:0 (Tesla V100-SXM2-16GB, 16160MiB)\n" + "YOLOv5 🚀 v6.2-2-g7c9486e Python-3.7.13 torch-1.12.1+cu113 CUDA:0 (Tesla V100-SXM2-16GB, 16160MiB)\n" ] }, { @@ -461,29 +461,29 @@ "colab": { "base_uri": "https://localhost:8080/" }, - "outputId": "4b13989f-32a4-4ef0-b403-06ff3aac255c" + "outputId": "941d625b-01a1-4f1b-dfd2-d9ef1c945715" }, "source": [ "!python detect.py --weights yolov5s.pt --img 640 --conf 0.25 --source data/images\n", - "#display.Image(filename='runs/detect/exp/zidane.jpg', width=600)" + "# display.Image(filename='runs/detect/exp/zidane.jpg', width=600)" ], - "execution_count": null, + "execution_count": 2, "outputs": [ { "output_type": "stream", "name": "stdout", "text": [ "\u001b[34m\u001b[1mdetect: \u001b[0mweights=['yolov5s.pt'], source=data/images, data=data/coco128.yaml, imgsz=[640, 640], conf_thres=0.25, iou_thres=0.45, max_det=1000, device=, view_img=False, save_txt=False, save_conf=False, save_crop=False, nosave=False, classes=None, agnostic_nms=False, augment=False, visualize=False, update=False, project=runs/detect, name=exp, exist_ok=False, line_thickness=3, hide_labels=False, hide_conf=False, half=False, dnn=False\n", - "YOLOv5 🚀 v6.1-370-g20f1b7e Python-3.7.13 torch-1.12.0+cu113 CUDA:0 (Tesla V100-SXM2-16GB, 16160MiB)\n", + "YOLOv5 🚀 v6.2-2-g7c9486e Python-3.7.13 torch-1.12.1+cu113 CUDA:0 (Tesla V100-SXM2-16GB, 16160MiB)\n", "\n", - "Downloading https://github.com/ultralytics/yolov5/releases/download/v6.1/yolov5s.pt to yolov5s.pt...\n", - "100% 14.1M/14.1M [00:00<00:00, 53.9MB/s]\n", + "Downloading https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5s.pt to yolov5s.pt...\n", + "100% 14.1M/14.1M [00:00<00:00, 50.5MB/s]\n", "\n", "Fusing layers... \n", "YOLOv5s summary: 213 layers, 7225885 parameters, 0 gradients\n", - "image 1/2 /content/yolov5/data/images/bus.jpg: 640x480 4 persons, 1 bus, Done. (0.016s)\n", - "image 2/2 /content/yolov5/data/images/zidane.jpg: 384x640 2 persons, 2 ties, Done. (0.021s)\n", - "Speed: 0.6ms pre-process, 18.6ms inference, 25.0ms NMS per image at shape (1, 3, 640, 640)\n", + "image 1/2 /content/yolov5/data/images/bus.jpg: 640x480 4 persons, 1 bus, Done. (0.014s)\n", + "image 2/2 /content/yolov5/data/images/zidane.jpg: 384x640 2 persons, 2 ties, Done. (0.020s)\n", + "Speed: 0.6ms pre-process, 17.0ms inference, 20.2ms NMS per image at shape (1, 3, 640, 640)\n", "Results saved to \u001b[1mruns/detect/exp\u001b[0m\n" ] } @@ -527,27 +527,27 @@ "base_uri": "https://localhost:8080/", "height": 49, "referenced_widgets": [ - "c31d2039ccf74c22b67841f4877d1186", - "d4bba1727c714d94ad58a72bffa07c4c", - "9aeff9f1780b45f892422fdc96e56913", - "bf55a7c71d074d3fa88b10b997820825", - "d8b66044e2fb4f5b916696834d880c81", - "102e1deda239436fa72751c58202fa0f", - "4fd4431ced6c42368e18424912b877e4", - "cdd709c4f40941bea1b2053523c9fac8", - "a1ef2d8de2b741c78ca5d938e2ddbcdf", - "0dbce99bb6184238842cbec0587d564a", - "91ff5f93f2a24c5790ab29e347965946" + "57c562894aed45cd9a107d0455e3e3f4", + "040d53c6cc924350bcb656cd21a7c713", + "e029890942a74c098408ce5a9a566d51", + "8fb991c03e434566a4297b6ab9446f89", + "a9a376923a7742d89fb335db709c7a7e", + "8b4276ac834c4735bf60ee9b761b9962", + "52cc8da75b724198856617247541cb1e", + "b6652f46480243c4adf60e6440043d6f", + "e502754177ff4ea8abf82a6e9ac77a4a", + "447398becdb04836b5ffb5915318db07", + "2fddcb27ad4a4caa81ff51111f8d0ed6" ] }, - "outputId": "a9004b06-37a6-41ed-a1f2-ac956f3963b3" + "outputId": "d593b41a-55e7-48a5-e285-5df449edc8c0" }, "source": [ "# Download COCO val\n", "torch.hub.download_url_to_file('https://ultralytics.com/assets/coco2017val.zip', 'tmp.zip')\n", "!unzip -q tmp.zip -d ../datasets && rm tmp.zip" ], - "execution_count": null, + "execution_count": 3, "outputs": [ { "output_type": "display_data", @@ -558,7 +558,7 @@ "application/vnd.jupyter.widget-view+json": { "version_major": 2, "version_minor": 0, - "model_id": "c31d2039ccf74c22b67841f4877d1186" + "model_id": "57c562894aed45cd9a107d0455e3e3f4" } }, "metadata": {} @@ -572,48 +572,48 @@ "colab": { "base_uri": "https://localhost:8080/" }, - "outputId": "c0f29758-4ec8-4def-893d-0efd6ed5b7f4" + "outputId": "701132a6-9ca8-4e1f-c89f-5d38893a6fc4" }, "source": [ "# Run YOLOv5x on COCO val\n", "!python val.py --weights yolov5x.pt --data coco.yaml --img 640 --iou 0.65 --half" ], - "execution_count": null, + "execution_count": 4, "outputs": [ { "output_type": "stream", "name": "stdout", "text": [ "\u001b[34m\u001b[1mval: \u001b[0mdata=/content/yolov5/data/coco.yaml, weights=['yolov5x.pt'], batch_size=32, imgsz=640, conf_thres=0.001, iou_thres=0.65, task=val, device=, workers=8, single_cls=False, augment=False, verbose=False, save_txt=False, save_hybrid=False, save_conf=False, save_json=True, project=runs/val, name=exp, exist_ok=False, half=True, dnn=False\n", - "YOLOv5 🚀 v6.1-370-g20f1b7e Python-3.7.13 torch-1.12.0+cu113 CUDA:0 (Tesla V100-SXM2-16GB, 16160MiB)\n", + "YOLOv5 🚀 v6.2-2-g7c9486e Python-3.7.13 torch-1.12.1+cu113 CUDA:0 (Tesla V100-SXM2-16GB, 16160MiB)\n", "\n", - "Downloading https://github.com/ultralytics/yolov5/releases/download/v6.1/yolov5x.pt to yolov5x.pt...\n", - "100% 166M/166M [00:35<00:00, 4.97MB/s]\n", + "Downloading https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5x.pt to yolov5x.pt...\n", + "100% 166M/166M [00:11<00:00, 15.1MB/s]\n", "\n", "Fusing layers... \n", "YOLOv5x summary: 444 layers, 86705005 parameters, 0 gradients\n", "Downloading https://ultralytics.com/assets/Arial.ttf to /root/.config/Ultralytics/Arial.ttf...\n", - "100% 755k/755k [00:00<00:00, 49.4MB/s]\n", - "\u001b[34m\u001b[1mval: \u001b[0mScanning '/content/datasets/coco/val2017' images and labels...4952 found, 48 missing, 0 empty, 0 corrupt: 100% 5000/5000 [00:00<00:00, 10716.86it/s]\n", + "100% 755k/755k [00:00<00:00, 48.6MB/s]\n", + "\u001b[34m\u001b[1mval: \u001b[0mScanning '/content/datasets/coco/val2017' images and labels...4952 found, 48 missing, 0 empty, 0 corrupt: 100% 5000/5000 [00:00<00:00, 10889.87it/s]\n", "\u001b[34m\u001b[1mval: \u001b[0mNew cache created: /content/datasets/coco/val2017.cache\n", - " Class Images Labels P R mAP@.5 mAP@.5:.95: 100% 157/157 [01:08<00:00, 2.28it/s]\n", + " Class Images Labels P R mAP@.5 mAP@.5:.95: 100% 157/157 [01:05<00:00, 2.38it/s]\n", " all 5000 36335 0.743 0.625 0.683 0.504\n", - "Speed: 0.1ms pre-process, 4.6ms inference, 1.2ms NMS per image at shape (32, 3, 640, 640)\n", + "Speed: 0.1ms pre-process, 4.7ms inference, 1.0ms NMS per image at shape (32, 3, 640, 640)\n", "\n", "Evaluating pycocotools mAP... saving runs/val/exp/yolov5x_predictions.json...\n", "loading annotations into memory...\n", - "Done (t=0.41s)\n", + "Done (t=0.39s)\n", "creating index...\n", "index created!\n", "Loading and preparing results...\n", - "DONE (t=5.64s)\n", + "DONE (t=5.53s)\n", "creating index...\n", "index created!\n", "Running per image evaluation...\n", "Evaluate annotation type *bbox*\n", - "DONE (t=72.86s).\n", + "DONE (t=73.01s).\n", "Accumulating evaluation results...\n", - "DONE (t=14.20s).\n", + "DONE (t=15.27s).\n", " Average Precision (AP) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.506\n", " Average Precision (AP) @[ IoU=0.50 | area= all | maxDets=100 ] = 0.688\n", " Average Precision (AP) @[ IoU=0.75 | area= all | maxDets=100 ] = 0.549\n", @@ -745,13 +745,13 @@ "colab": { "base_uri": "https://localhost:8080/" }, - "outputId": "bce1b4bd-1a14-4c07-aebd-6c11e91ad24b" + "outputId": "50a9318f-d438-41d5-db95-928f1842c057" }, "source": [ "# Train YOLOv5s on COCO128 for 3 epochs\n", "!python train.py --img 640 --batch 16 --epochs 3 --data coco128.yaml --weights yolov5s.pt --cache" ], - "execution_count": null, + "execution_count": 5, "outputs": [ { "output_type": "stream", @@ -759,17 +759,17 @@ "text": [ "\u001b[34m\u001b[1mtrain: \u001b[0mweights=yolov5s.pt, cfg=, data=coco128.yaml, hyp=data/hyps/hyp.scratch-low.yaml, epochs=3, batch_size=16, imgsz=640, rect=False, resume=False, nosave=False, noval=False, noautoanchor=False, noplots=False, evolve=None, bucket=, cache=ram, image_weights=False, device=, multi_scale=False, single_cls=False, optimizer=SGD, sync_bn=False, workers=8, project=runs/train, name=exp, exist_ok=False, quad=False, cos_lr=False, label_smoothing=0.0, patience=100, freeze=[0], save_period=-1, seed=0, local_rank=-1, entity=None, upload_dataset=False, bbox_interval=-1, artifact_alias=latest\n", "\u001b[34m\u001b[1mgithub: \u001b[0mup to date with https://github.com/ultralytics/yolov5 ✅\n", - "YOLOv5 🚀 v6.1-370-g20f1b7e Python-3.7.13 torch-1.12.0+cu113 CUDA:0 (Tesla V100-SXM2-16GB, 16160MiB)\n", + "YOLOv5 🚀 v6.2-2-g7c9486e Python-3.7.13 torch-1.12.1+cu113 CUDA:0 (Tesla V100-SXM2-16GB, 16160MiB)\n", "\n", "\u001b[34m\u001b[1mhyperparameters: \u001b[0mlr0=0.01, lrf=0.01, momentum=0.937, weight_decay=0.0005, warmup_epochs=3.0, warmup_momentum=0.8, warmup_bias_lr=0.1, box=0.05, cls=0.5, cls_pw=1.0, obj=1.0, obj_pw=1.0, iou_t=0.2, anchor_t=4.0, fl_gamma=0.0, hsv_h=0.015, hsv_s=0.7, hsv_v=0.4, degrees=0.0, translate=0.1, scale=0.5, shear=0.0, perspective=0.0, flipud=0.0, fliplr=0.5, mosaic=1.0, mixup=0.0, copy_paste=0.0\n", "\u001b[34m\u001b[1mWeights & Biases: \u001b[0mrun 'pip install wandb' to automatically track and visualize YOLOv5 🚀 runs in Weights & Biases\n", - "\u001b[34m\u001b[1mClearML: \u001b[0mrun 'pip install clearml' to automatically track, visualize and remotely train YOLOv5 🚀 runs in ClearML\n", + "\u001b[34m\u001b[1mClearML: \u001b[0mrun 'pip install clearml' to automatically track, visualize and remotely train YOLOv5 🚀 in ClearML\n", "\u001b[34m\u001b[1mTensorBoard: \u001b[0mStart with 'tensorboard --logdir runs/train', view at http://localhost:6006/\n", "\n", "Dataset not found ⚠️, missing paths ['/content/datasets/coco128/images/train2017']\n", "Downloading https://ultralytics.com/assets/coco128.zip to coco128.zip...\n", - "100% 6.66M/6.66M [00:00<00:00, 75.2MB/s]\n", - "Dataset download success ✅ (0.7s), saved to \u001b[1m/content/datasets\u001b[0m\n", + "100% 6.66M/6.66M [00:00<00:00, 12.4MB/s]\n", + "Dataset download success ✅ (1.3s), saved to \u001b[1m/content/datasets\u001b[0m\n", "\n", " from n params module arguments \n", " 0 -1 1 3520 models.common.Conv [3, 32, 6, 2, 2] \n", @@ -802,12 +802,12 @@ "Transferred 349/349 items from yolov5s.pt\n", "\u001b[34m\u001b[1mAMP: \u001b[0mchecks passed ✅\n", "\u001b[34m\u001b[1moptimizer:\u001b[0m SGD(lr=0.01) with parameter groups 57 weight(decay=0.0), 60 weight(decay=0.0005), 60 bias\n", - "\u001b[34m\u001b[1malbumentations: \u001b[0mBlur(always_apply=False, p=0.01, blur_limit=(3, 7)), MedianBlur(always_apply=False, p=0.01, blur_limit=(3, 7)), ToGray(always_apply=False, p=0.01), CLAHE(always_apply=False, p=0.01, clip_limit=(1, 4.0), tile_grid_size=(8, 8))\n", - "\u001b[34m\u001b[1mtrain: \u001b[0mScanning '/content/datasets/coco128/labels/train2017' images and labels...128 found, 0 missing, 2 empty, 0 corrupt: 100% 128/128 [00:00<00:00, 7926.40it/s]\n", + "\u001b[34m\u001b[1malbumentations: \u001b[0mBlur(p=0.01, blur_limit=(3, 7)), MedianBlur(p=0.01, blur_limit=(3, 7)), ToGray(p=0.01), CLAHE(p=0.01, clip_limit=(1, 4.0), tile_grid_size=(8, 8))\n", + "\u001b[34m\u001b[1mtrain: \u001b[0mScanning '/content/datasets/coco128/labels/train2017' images and labels...128 found, 0 missing, 2 empty, 0 corrupt: 100% 128/128 [00:00<00:00, 8516.89it/s]\n", "\u001b[34m\u001b[1mtrain: \u001b[0mNew cache created: /content/datasets/coco128/labels/train2017.cache\n", - "\u001b[34m\u001b[1mtrain: \u001b[0mCaching images (0.1GB ram): 100% 128/128 [00:00<00:00, 975.81it/s]\n", + "\u001b[34m\u001b[1mtrain: \u001b[0mCaching images (0.1GB ram): 100% 128/128 [00:00<00:00, 1043.44it/s]\n", "\u001b[34m\u001b[1mval: \u001b[0mScanning '/content/datasets/coco128/labels/train2017.cache' images and labels... 128 found, 0 missing, 2 empty, 0 corrupt: 100% 128/128 [00:00 Date: Wed, 17 Aug 2022 17:50:32 +0200 Subject: [PATCH 1314/1976] Update README_cn.md (#9001) Includes v6.2 updates Signed-off-by: KieraMengru0907 <108015280+KieraMengru0907@users.noreply.github.com> Signed-off-by: KieraMengru0907 <108015280+KieraMengru0907@users.noreply.github.com> --- .github/README_cn.md | 65 +++++++++++++++++++++++--------------------- 1 file changed, 34 insertions(+), 31 deletions(-) diff --git a/.github/README_cn.md b/.github/README_cn.md index 816adf6b0449..46aafd86ec9b 100644 --- a/.github/README_cn.md +++ b/.github/README_cn.md @@ -130,19 +130,22 @@ python train.py --data coco.yaml --cfg yolov5n.yaml --weights '' --batch-size 12
教程 -- [训练自定义数据](https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data) 🚀 推荐 -- [获得最佳训练效果的技巧](https://github.com/ultralytics/yolov5/wiki/Tips-for-Best-Training-Results) ☘️ 推荐 -- [使用 Weights & Biases 记录实验](https://github.com/ultralytics/yolov5/issues/1289) 🌟 新 -- [Roboflow:数据集、标签和主动学习](https://github.com/ultralytics/yolov5/issues/4975) 🌟 新 +- [训练自定义数据集](https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data) 🚀 推荐 +- [获得最佳训练效果的技巧](https://github.com/ultralytics/yolov5/wiki/Tips-for-Best-Training-Results) ☘️ + 推荐 - [多GPU训练](https://github.com/ultralytics/yolov5/issues/475) -- [PyTorch Hub](https://github.com/ultralytics/yolov5/issues/36) ⭐ 新 -- [TFLite, ONNX, CoreML, TensorRT 导出](https://github.com/ultralytics/yolov5/issues/251) 🚀 +- [PyTorch Hub](https://github.com/ultralytics/yolov5/issues/36) 🌟 新 +- [TFLite, ONNX, CoreML, TensorRT 输出](https://github.com/ultralytics/yolov5/issues/251) 🚀 - [测试时数据增强 (TTA)](https://github.com/ultralytics/yolov5/issues/303) - [模型集成](https://github.com/ultralytics/yolov5/issues/318) - [模型剪枝/稀疏性](https://github.com/ultralytics/yolov5/issues/304) - [超参数进化](https://github.com/ultralytics/yolov5/issues/607) -- [带有冻结层的迁移学习](https://github.com/ultralytics/yolov5/issues/1314) ⭐ 新 -- [架构概要](https://github.com/ultralytics/yolov5/issues/6998) ⭐ 新 +- [带有冻结层的迁移学习](https://github.com/ultralytics/yolov5/issues/1314) +- [架构概要](https://github.com/ultralytics/yolov5/issues/6998) 🌟 新 +- [使用Weights & Biases 记录实验](https://github.com/ultralytics/yolov5/issues/1289) +- [Roboflow:数据集,标签和主动学习](https://github.com/ultralytics/yolov5/issues/4975) 🌟 新 +- [使用ClearML 记录实验](https://github.com/ultralytics/yolov5/tree/master/utils/loggers/clearml) 🌟 新 +- [Deci 平台](https://github.com/ultralytics/yolov5/wiki/Deci-Platform) 🌟 新
@@ -186,7 +189,7 @@ python train.py --data coco.yaml --cfg yolov5n.yaml --weights '' --batch-size 12 |Deci ⭐ NEW|ClearML ⭐ NEW|Roboflow|Weights & Biases |:-:|:-:|:-:|:-:| -|Automatically compile and quantize YOLOv5 for better inference performance in one click at [Deci](https://bit.ly/yolov5-deci-platform)|Automatically track, visualize and even remotely train YOLOv5 using [ClearML](https://cutt.ly/yolov5-readme-clearml) (open-source!)|Label and export your custom datasets directly to YOLOv5 for training with [Roboflow](https://roboflow.com/?ref=ultralytics) |Automatically track and visualize all your YOLOv5 training runs in the cloud with [Weights & Biases](https://wandb.ai/site?utm_campaign=repo_yolo_readme) +|在[Deci](https://bit.ly/yolov5-deci-platform)一键自动编译和量化YOLOv5以提高推理性能|使用[ClearML](https://cutt.ly/yolov5-readme-clearml) (开源!)自动追踪,可视化,以及远程训练YOLOv5|标记并将您的自定义数据直接导出到YOLOv5后,用[Roboflow](https://roboflow.com/?ref=ultralytics)进行训练 |通过[Weights & Biases](https://wandb.ai/site?utm_campaign=repo_yolo_readme)自动跟踪以及可视化你在云端所有的YOLOv5训练运行情况 ##
为什么选择 YOLOv5
@@ -209,7 +212,7 @@ python train.py --data coco.yaml --cfg yolov5n.yaml --weights '' --batch-size 12 ### 预训练检查点 -| Model | size
(pixels) | mAPval
0.5:0.95 | mAPval
0.5 | Speed
CPU b1
(ms) | Speed
V100 b1
(ms) | Speed
V100 b32
(ms) | params
(M) | FLOPs
@640 (B) | +| 模型 | 规模
(像素) | mAP验证
0.5:0.95 | mAP验证
0.5 | 速度
CPU b1
(ms) | 速度
V100 b1
(ms) | 速度
V100 b32
(ms) | 参数
(M) | 浮点运算
@640 (B) | |------------------------------------------------------------------------------------------------------|-----------------------|-------------------------|--------------------|------------------------------|-------------------------------|--------------------------------|--------------------|------------------------| | [YOLOv5n](https://github.com/ultralytics/yolov5/releases/download/v6.1/yolov5n.pt) | 640 | 28.0 | 45.7 | **45** | **6.3** | **0.6** | **1.9** | **4.5** | | [YOLOv5s](https://github.com/ultralytics/yolov5/releases/download/v6.1/yolov5s.pt) | 640 | 37.4 | 56.8 | 98 | 6.4 | 0.9 | 7.2 | 16.5 | @@ -237,18 +240,18 @@ python train.py --data coco.yaml --cfg yolov5n.yaml --weights '' --batch-size 12
-##
Classification ⭐ NEW
+##
分类 ⭐ 新
-YOLOv5 [release v6.2](https://github.com/ultralytics/yolov5/releases) brings support for classification model training, validation, prediction and export! We've made training classifier models super simple. Click below to get started. +YOLOv5发布的[v6.2版本](https://github.com/ultralytics/yolov5/releases) 支持训练,验证,预测和输出分类模型!这使得训练分类器模型非常简单。点击下面开始尝试!
- Classification Checkpoints (click to expand) + 分类检查点 (点击展开)
-We trained YOLOv5-cls classification models on ImageNet for 90 epochs using a 4xA100 instance, and we trained ResNet and EfficientNet models alongside with the same default training settings to compare. We exported all models to ONNX FP32 for CPU speed tests and to TensorRT FP16 for GPU speed tests. We ran all speed tests on Google [Colab Pro](https://colab.research.google.com/signup) for easy reproducibility. +我们在ImageNet上使用了4xA100的实例训练YOLOv5-cls分类模型90个epochs,并以相同的默认设置同时训练了ResNet和EfficientNet模型来进行比较。我们将所有的模型导出到ONNX FP32进行CPU速度测试,又导出到TensorRT FP16进行GPU速度测试。最后,为了方便重现,我们在[Google Colab Pro](https://colab.research.google.com/signup)上进行了所有的速度测试。 -| Model | size
(pixels) | acc
top1 | acc
top5 | Training
90 epochs
4xA100 (hours) | Speed
ONNX CPU
(ms) | Speed
TensorRT V100
(ms) | params
(M) | FLOPs
@224 (B) | +| 模型 | 规模
(像素) | 准确度
第一 | 准确度
前五 | 训练
90 epochs
4xA100 (小时) | 速度
ONNX CPU
(ms) | 速度
TensorRT V100
(ms) | 参数
(M) | 浮点运算
@224 (B) | |----------------------------------------------------------------------------------------------------|-----------------------|------------------|------------------|----------------------------------------------|--------------------------------|-------------------------------------|--------------------|------------------------| | [YOLOv5n-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5n-cls.pt) | 224 | 64.6 | 85.4 | 7:59 | **3.3** | **0.5** | **2.5** | **0.5** | | [YOLOv5s-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5s-cls.pt) | 224 | 71.5 | 90.2 | 8:09 | 6.6 | 0.6 | 5.4 | 1.4 | @@ -267,38 +270,38 @@ We trained YOLOv5-cls classification models on ImageNet for 90 epochs using a 4x | [EfficientNet_b3](https://github.com/ultralytics/yolov5/releases/download/v6.2/efficientnet_b3.pt) | 224 | 77.7 | 94.0 | 19:19 | 18.9 | 1.9 | 12.2 | 2.4 |
- Table Notes (click to expand) + 表格注释 (点击扩展) -- All checkpoints are trained to 90 epochs with SGD optimizer with `lr0=0.001` and `weight_decay=5e-5` at image size 224 and all default settings.
Runs logged to https://wandb.ai/glenn-jocher/YOLOv5-Classifier-v6-2 -- **Accuracy** values are for single-model single-scale on [ImageNet-1k](https://www.image-net.org/index.php) dataset.
Reproduce by `python classify/val.py --data ../datasets/imagenet --img 224` -- **Speed** averaged over 100 inference images using a Google [Colab Pro](https://colab.research.google.com/signup) V100 High-RAM instance.
Reproduce by `python classify/val.py --data ../datasets/imagenet --img 224 --batch 1` -- **Export** to ONNX at FP32 and TensorRT at FP16 done with `export.py`.
Reproduce by `python export.py --weights yolov5s-cls.pt --include engine onnx --imgsz 224` +- 所有检查点都被SGD优化器训练到90 epochs, `lr0=0.001` 和 `weight_decay=5e-5`, 图像大小为224,全为默认设置。
运行数据记录于 https://wandb.ai/glenn-jocher/YOLOv5-Classifier-v6-2。 +- **准确度** 值为[ImageNet-1k](https://www.image-net.org/index.php)数据集上的单模型单尺度。
通过`python classify/val.py --data ../datasets/imagenet --img 224`进行复制。 +- 使用Google [Colab Pro](https://colab.research.google.com/signup) V100 High-RAM实例得出的100张推理图像的平均**速度**。
通过 `python classify/val.py --data ../datasets/imagenet --img 224 --batch 1`进行复制。 +- 用`export.py`**导出**到FP32的ONNX和FP16的TensorRT。
通过 `python export.py --weights yolov5s-cls.pt --include engine onnx --imgsz 224`进行复制。
- Classification Usage Examples (click to expand) + 分类使用实例 (点击展开) -### Train -YOLOv5 classification training supports auto-download of MNIST, Fashion-MNIST, CIFAR10, CIFAR100, Imagenette, Imagewoof, and ImageNet datasets with the `--data` argument. To start training on MNIST for example use `--data mnist`. +### 训练 +YOLOv5分类训练支持自动下载MNIST, Fashion-MNIST, CIFAR10, CIFAR100, Imagenette, Imagewoof和ImageNet数据集,并使用`--data` 参数. 打个比方,在MNIST上使用`--data mnist`开始训练。 ```bash -# Single-GPU +# 单GPU python classify/train.py --model yolov5s-cls.pt --data cifar100 --epochs 5 --img 224 --batch 128 -# Multi-GPU DDP +# 多-GPU DDP python -m torch.distributed.run --nproc_per_node 4 --master_port 1 classify/train.py --model yolov5s-cls.pt --data imagenet --epochs 5 --img 224 --device 0,1,2,3 ``` -### Val -Validate YOLOv5m-cls accuracy on ImageNet-1k dataset: +### 验证 +在ImageNet-1k数据集上验证YOLOv5m-cl的准确性: ```bash bash data/scripts/get_imagenet.sh --val # download ImageNet val split (6.3G, 50000 images) python classify/val.py --weights yolov5m-cls.pt --data ../datasets/imagenet --img 224 # validate ``` -### Predict -Use pretrained YOLOv5s-cls.pt to predict bus.jpg: +### 预测 +用提前训练好的YOLOv5s-cls.pt去预测bus.jpg: ```bash python classify/predict.py --weights yolov5s-cls.pt --data data/images/bus.jpg ``` @@ -306,8 +309,8 @@ python classify/predict.py --weights yolov5s-cls.pt --data data/images/bus.jpg model = torch.hub.load('ultralytics/yolov5', 'custom', 'yolov5s-cls.pt') # load from PyTorch Hub ``` -### Export -Export a group of trained YOLOv5s-cls, ResNet and EfficientNet models to ONNX and TensorRT: +### 导出 +导出一组训练好的YOLOv5s-cls, ResNet和EfficientNet模型到ONNX和TensorRT: ```bash python export.py --weights yolov5s-cls.pt resnet50.pt efficientnet_b0.pt --include onnx engine --img 224 ``` From e83b422a69bbd69628687b2dc50102c08877505c Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 17 Aug 2022 17:52:53 +0200 Subject: [PATCH 1315/1976] Update dataset `names` from array to dictionary (#9000) * Migrate dataset names to dictionary * fix check * backwards compat * predict fix * val fix * Keep dataset stats behavior identical Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- classify/predict.py | 2 +- data/Argoverse.yaml | 11 +- data/GlobalWheat2020.yaml | 4 +- data/ImageNet.yaml | 1138 ++++++++++++++++++++++++++++++++----- data/Objects365.yaml | 408 +++++++++++-- data/SKU-110K.yaml | 4 +- data/VOC.yaml | 24 +- data/VisDrone.yaml | 13 +- data/coco.yaml | 91 ++- data/coco128.yaml | 91 ++- data/xView.yaml | 71 ++- models/common.py | 2 +- utils/dataloaders.py | 2 +- utils/general.py | 8 +- val.py | 4 +- 15 files changed, 1646 insertions(+), 227 deletions(-) diff --git a/classify/predict.py b/classify/predict.py index 4247e3c8e7fa..87379e42159b 100644 --- a/classify/predict.py +++ b/classify/predict.py @@ -71,7 +71,7 @@ def run( p = F.softmax(results, dim=1) # probabilities i = p.argsort(1, descending=True)[:, :5].squeeze() # top 5 indices dt[2] += time_sync() - t3 - LOGGER.info(f"image 1/1 {file}: {imgsz}x{imgsz} {', '.join(f'{model.names[j]} {p[0, j]:.2f}' for j in i)}") + LOGGER.info(f"image 1/1 {file}: {imgsz}x{imgsz} {', '.join(f'{model.names[j]} {p[0, j]:.2f}' for j in i.tolist())}") # Print results t = tuple(x / seen * 1E3 for x in dt) # speeds per image diff --git a/data/Argoverse.yaml b/data/Argoverse.yaml index 9d21296e3291..e3e9ba161ed0 100644 --- a/data/Argoverse.yaml +++ b/data/Argoverse.yaml @@ -14,8 +14,15 @@ val: Argoverse-1.1/images/val/ # val images (relative to 'path') 15062 images test: Argoverse-1.1/images/test/ # test images (optional) https://eval.ai/web/challenges/challenge-page/800/overview # Classes -nc: 8 # number of classes -names: ['person', 'bicycle', 'car', 'motorcycle', 'bus', 'truck', 'traffic_light', 'stop_sign'] # class names +names: + 0: person + 1: bicycle + 2: car + 3: motorcycle + 4: bus + 5: truck + 6: traffic_light + 7: stop_sign # Download script/URL (optional) --------------------------------------------------------------------------------------- diff --git a/data/GlobalWheat2020.yaml b/data/GlobalWheat2020.yaml index 4c43693f1d82..01812d031bc5 100644 --- a/data/GlobalWheat2020.yaml +++ b/data/GlobalWheat2020.yaml @@ -26,8 +26,8 @@ test: # test images (optional) 1276 images - images/uq_1 # Classes -nc: 1 # number of classes -names: ['wheat_head'] # class names +names: + 0: wheat_head # Download script/URL (optional) --------------------------------------------------------------------------------------- diff --git a/data/ImageNet.yaml b/data/ImageNet.yaml index 9f89b4268aff..14f12950605f 100644 --- a/data/ImageNet.yaml +++ b/data/ImageNet.yaml @@ -15,142 +15,1008 @@ val: val # val images (relative to 'path') 50000 images test: # test images (optional) # Classes -nc: 1000 # number of classes -names: ['tench', 'goldfish', 'great white shark', 'tiger shark', 'hammerhead shark', 'electric ray', 'stingray', 'cock', - 'hen', 'ostrich', 'brambling', 'goldfinch', 'house finch', 'junco', 'indigo bunting', 'American robin', - 'bulbul', 'jay', 'magpie', 'chickadee', 'American dipper', 'kite', 'bald eagle', 'vulture', 'great grey owl', - 'fire salamander', 'smooth newt', 'newt', 'spotted salamander', 'axolotl', 'American bullfrog', 'tree frog', - 'tailed frog', 'loggerhead sea turtle', 'leatherback sea turtle', 'mud turtle', 'terrapin', 'box turtle', - 'banded gecko', 'green iguana', 'Carolina anole', 'desert grassland whiptail lizard', 'agama', - 'frilled-necked lizard', 'alligator lizard', 'Gila monster', 'European green lizard', 'chameleon', - 'Komodo dragon', 'Nile crocodile', 'American alligator', 'triceratops', 'worm snake', 'ring-necked snake', - 'eastern hog-nosed snake', 'smooth green snake', 'kingsnake', 'garter snake', 'water snake', 'vine snake', - 'night snake', 'boa constrictor', 'African rock python', 'Indian cobra', 'green mamba', 'sea snake', - 'Saharan horned viper', 'eastern diamondback rattlesnake', 'sidewinder', 'trilobite', 'harvestman', 'scorpion', - 'yellow garden spider', 'barn spider', 'European garden spider', 'southern black widow', 'tarantula', - 'wolf spider', 'tick', 'centipede', 'black grouse', 'ptarmigan', 'ruffed grouse', 'prairie grouse', 'peacock', - 'quail', 'partridge', 'grey parrot', 'macaw', 'sulphur-crested cockatoo', 'lorikeet', 'coucal', 'bee eater', - 'hornbill', 'hummingbird', 'jacamar', 'toucan', 'duck', 'red-breasted merganser', 'goose', 'black swan', - 'tusker', 'echidna', 'platypus', 'wallaby', 'koala', 'wombat', 'jellyfish', 'sea anemone', 'brain coral', - 'flatworm', 'nematode', 'conch', 'snail', 'slug', 'sea slug', 'chiton', 'chambered nautilus', 'Dungeness crab', - 'rock crab', 'fiddler crab', 'red king crab', 'American lobster', 'spiny lobster', 'crayfish', 'hermit crab', - 'isopod', 'white stork', 'black stork', 'spoonbill', 'flamingo', 'little blue heron', 'great egret', 'bittern', - 'crane (bird)', 'limpkin', 'common gallinule', 'American coot', 'bustard', 'ruddy turnstone', 'dunlin', - 'common redshank', 'dowitcher', 'oystercatcher', 'pelican', 'king penguin', 'albatross', 'grey whale', - 'killer whale', 'dugong', 'sea lion', 'Chihuahua', 'Japanese Chin', 'Maltese', 'Pekingese', 'Shih Tzu', - 'King Charles Spaniel', 'Papillon', 'toy terrier', 'Rhodesian Ridgeback', 'Afghan Hound', 'Basset Hound', - 'Beagle', 'Bloodhound', 'Bluetick Coonhound', 'Black and Tan Coonhound', 'Treeing Walker Coonhound', - 'English foxhound', 'Redbone Coonhound', 'borzoi', 'Irish Wolfhound', 'Italian Greyhound', 'Whippet', - 'Ibizan Hound', 'Norwegian Elkhound', 'Otterhound', 'Saluki', 'Scottish Deerhound', 'Weimaraner', - 'Staffordshire Bull Terrier', 'American Staffordshire Terrier', 'Bedlington Terrier', 'Border Terrier', - 'Kerry Blue Terrier', 'Irish Terrier', 'Norfolk Terrier', 'Norwich Terrier', 'Yorkshire Terrier', - 'Wire Fox Terrier', 'Lakeland Terrier', 'Sealyham Terrier', 'Airedale Terrier', 'Cairn Terrier', - 'Australian Terrier', 'Dandie Dinmont Terrier', 'Boston Terrier', 'Miniature Schnauzer', 'Giant Schnauzer', - 'Standard Schnauzer', 'Scottish Terrier', 'Tibetan Terrier', 'Australian Silky Terrier', - 'Soft-coated Wheaten Terrier', 'West Highland White Terrier', 'Lhasa Apso', 'Flat-Coated Retriever', - 'Curly-coated Retriever', 'Golden Retriever', 'Labrador Retriever', 'Chesapeake Bay Retriever', - 'German Shorthaired Pointer', 'Vizsla', 'English Setter', 'Irish Setter', 'Gordon Setter', 'Brittany', - 'Clumber Spaniel', 'English Springer Spaniel', 'Welsh Springer Spaniel', 'Cocker Spaniels', 'Sussex Spaniel', - 'Irish Water Spaniel', 'Kuvasz', 'Schipperke', 'Groenendael', 'Malinois', 'Briard', 'Australian Kelpie', - 'Komondor', 'Old English Sheepdog', 'Shetland Sheepdog', 'collie', 'Border Collie', 'Bouvier des Flandres', - 'Rottweiler', 'German Shepherd Dog', 'Dobermann', 'Miniature Pinscher', 'Greater Swiss Mountain Dog', - 'Bernese Mountain Dog', 'Appenzeller Sennenhund', 'Entlebucher Sennenhund', 'Boxer', 'Bullmastiff', - 'Tibetan Mastiff', 'French Bulldog', 'Great Dane', 'St. Bernard', 'husky', 'Alaskan Malamute', 'Siberian Husky', - 'Dalmatian', 'Affenpinscher', 'Basenji', 'pug', 'Leonberger', 'Newfoundland', 'Pyrenean Mountain Dog', - 'Samoyed', 'Pomeranian', 'Chow Chow', 'Keeshond', 'Griffon Bruxellois', 'Pembroke Welsh Corgi', - 'Cardigan Welsh Corgi', 'Toy Poodle', 'Miniature Poodle', 'Standard Poodle', 'Mexican hairless dog', - 'grey wolf', 'Alaskan tundra wolf', 'red wolf', 'coyote', 'dingo', 'dhole', 'African wild dog', 'hyena', - 'red fox', 'kit fox', 'Arctic fox', 'grey fox', 'tabby cat', 'tiger cat', 'Persian cat', 'Siamese cat', - 'Egyptian Mau', 'cougar', 'lynx', 'leopard', 'snow leopard', 'jaguar', 'lion', 'tiger', 'cheetah', 'brown bear', - 'American black bear', 'polar bear', 'sloth bear', 'mongoose', 'meerkat', 'tiger beetle', 'ladybug', - 'ground beetle', 'longhorn beetle', 'leaf beetle', 'dung beetle', 'rhinoceros beetle', 'weevil', 'fly', 'bee', - 'ant', 'grasshopper', 'cricket', 'stick insect', 'cockroach', 'mantis', 'cicada', 'leafhopper', 'lacewing', - 'dragonfly', 'damselfly', 'red admiral', 'ringlet', 'monarch butterfly', 'small white', 'sulphur butterfly', - 'gossamer-winged butterfly', 'starfish', 'sea urchin', 'sea cucumber', 'cottontail rabbit', 'hare', - 'Angora rabbit', 'hamster', 'porcupine', 'fox squirrel', 'marmot', 'beaver', 'guinea pig', 'common sorrel', - 'zebra', 'pig', 'wild boar', 'warthog', 'hippopotamus', 'ox', 'water buffalo', 'bison', 'ram', 'bighorn sheep', - 'Alpine ibex', 'hartebeest', 'impala', 'gazelle', 'dromedary', 'llama', 'weasel', 'mink', 'European polecat', - 'black-footed ferret', 'otter', 'skunk', 'badger', 'armadillo', 'three-toed sloth', 'orangutan', 'gorilla', - 'chimpanzee', 'gibbon', 'siamang', 'guenon', 'patas monkey', 'baboon', 'macaque', 'langur', - 'black-and-white colobus', 'proboscis monkey', 'marmoset', 'white-headed capuchin', 'howler monkey', 'titi', - "Geoffroy's spider monkey", 'common squirrel monkey', 'ring-tailed lemur', 'indri', 'Asian elephant', - 'African bush elephant', 'red panda', 'giant panda', 'snoek', 'eel', 'coho salmon', 'rock beauty', 'clownfish', - 'sturgeon', 'garfish', 'lionfish', 'pufferfish', 'abacus', 'abaya', 'academic gown', 'accordion', - 'acoustic guitar', 'aircraft carrier', 'airliner', 'airship', 'altar', 'ambulance', 'amphibious vehicle', - 'analog clock', 'apiary', 'apron', 'waste container', 'assault rifle', 'backpack', 'bakery', 'balance beam', - 'balloon', 'ballpoint pen', 'Band-Aid', 'banjo', 'baluster', 'barbell', 'barber chair', 'barbershop', 'barn', - 'barometer', 'barrel', 'wheelbarrow', 'baseball', 'basketball', 'bassinet', 'bassoon', 'swimming cap', - 'bath towel', 'bathtub', 'station wagon', 'lighthouse', 'beaker', 'military cap', 'beer bottle', 'beer glass', - 'bell-cot', 'bib', 'tandem bicycle', 'bikini', 'ring binder', 'binoculars', 'birdhouse', 'boathouse', - 'bobsleigh', 'bolo tie', 'poke bonnet', 'bookcase', 'bookstore', 'bottle cap', 'bow', 'bow tie', 'brass', 'bra', - 'breakwater', 'breastplate', 'broom', 'bucket', 'buckle', 'bulletproof vest', 'high-speed train', - 'butcher shop', 'taxicab', 'cauldron', 'candle', 'cannon', 'canoe', 'can opener', 'cardigan', 'car mirror', - 'carousel', 'tool kit', 'carton', 'car wheel', 'automated teller machine', 'cassette', 'cassette player', - 'castle', 'catamaran', 'CD player', 'cello', 'mobile phone', 'chain', 'chain-link fence', 'chain mail', - 'chainsaw', 'chest', 'chiffonier', 'chime', 'china cabinet', 'Christmas stocking', 'church', 'movie theater', - 'cleaver', 'cliff dwelling', 'cloak', 'clogs', 'cocktail shaker', 'coffee mug', 'coffeemaker', 'coil', - 'combination lock', 'computer keyboard', 'confectionery store', 'container ship', 'convertible', 'corkscrew', - 'cornet', 'cowboy boot', 'cowboy hat', 'cradle', 'crane (machine)', 'crash helmet', 'crate', 'infant bed', - 'Crock Pot', 'croquet ball', 'crutch', 'cuirass', 'dam', 'desk', 'desktop computer', 'rotary dial telephone', - 'diaper', 'digital clock', 'digital watch', 'dining table', 'dishcloth', 'dishwasher', 'disc brake', 'dock', - 'dog sled', 'dome', 'doormat', 'drilling rig', 'drum', 'drumstick', 'dumbbell', 'Dutch oven', 'electric fan', - 'electric guitar', 'electric locomotive', 'entertainment center', 'envelope', 'espresso machine', 'face powder', - 'feather boa', 'filing cabinet', 'fireboat', 'fire engine', 'fire screen sheet', 'flagpole', 'flute', - 'folding chair', 'football helmet', 'forklift', 'fountain', 'fountain pen', 'four-poster bed', 'freight car', - 'French horn', 'frying pan', 'fur coat', 'garbage truck', 'gas mask', 'gas pump', 'goblet', 'go-kart', - 'golf ball', 'golf cart', 'gondola', 'gong', 'gown', 'grand piano', 'greenhouse', 'grille', 'grocery store', - 'guillotine', 'barrette', 'hair spray', 'half-track', 'hammer', 'hamper', 'hair dryer', 'hand-held computer', - 'handkerchief', 'hard disk drive', 'harmonica', 'harp', 'harvester', 'hatchet', 'holster', 'home theater', - 'honeycomb', 'hook', 'hoop skirt', 'horizontal bar', 'horse-drawn vehicle', 'hourglass', 'iPod', 'clothes iron', - "jack-o'-lantern", 'jeans', 'jeep', 'T-shirt', 'jigsaw puzzle', 'pulled rickshaw', 'joystick', 'kimono', - 'knee pad', 'knot', 'lab coat', 'ladle', 'lampshade', 'laptop computer', 'lawn mower', 'lens cap', - 'paper knife', 'library', 'lifeboat', 'lighter', 'limousine', 'ocean liner', 'lipstick', 'slip-on shoe', - 'lotion', 'speaker', 'loupe', 'sawmill', 'magnetic compass', 'mail bag', 'mailbox', 'tights', 'tank suit', - 'manhole cover', 'maraca', 'marimba', 'mask', 'match', 'maypole', 'maze', 'measuring cup', 'medicine chest', - 'megalith', 'microphone', 'microwave oven', 'military uniform', 'milk can', 'minibus', 'miniskirt', 'minivan', - 'missile', 'mitten', 'mixing bowl', 'mobile home', 'Model T', 'modem', 'monastery', 'monitor', 'moped', - 'mortar', 'square academic cap', 'mosque', 'mosquito net', 'scooter', 'mountain bike', 'tent', 'computer mouse', - 'mousetrap', 'moving van', 'muzzle', 'nail', 'neck brace', 'necklace', 'nipple', 'notebook computer', 'obelisk', - 'oboe', 'ocarina', 'odometer', 'oil filter', 'organ', 'oscilloscope', 'overskirt', 'bullock cart', - 'oxygen mask', 'packet', 'paddle', 'paddle wheel', 'padlock', 'paintbrush', 'pajamas', 'palace', 'pan flute', - 'paper towel', 'parachute', 'parallel bars', 'park bench', 'parking meter', 'passenger car', 'patio', - 'payphone', 'pedestal', 'pencil case', 'pencil sharpener', 'perfume', 'Petri dish', 'photocopier', 'plectrum', - 'Pickelhaube', 'picket fence', 'pickup truck', 'pier', 'piggy bank', 'pill bottle', 'pillow', 'ping-pong ball', - 'pinwheel', 'pirate ship', 'pitcher', 'hand plane', 'planetarium', 'plastic bag', 'plate rack', 'plow', - 'plunger', 'Polaroid camera', 'pole', 'police van', 'poncho', 'billiard table', 'soda bottle', 'pot', - "potter's wheel", 'power drill', 'prayer rug', 'printer', 'prison', 'projectile', 'projector', 'hockey puck', - 'punching bag', 'purse', 'quill', 'quilt', 'race car', 'racket', 'radiator', 'radio', 'radio telescope', - 'rain barrel', 'recreational vehicle', 'reel', 'reflex camera', 'refrigerator', 'remote control', 'restaurant', - 'revolver', 'rifle', 'rocking chair', 'rotisserie', 'eraser', 'rugby ball', 'ruler', 'running shoe', 'safe', - 'safety pin', 'salt shaker', 'sandal', 'sarong', 'saxophone', 'scabbard', 'weighing scale', 'school bus', - 'schooner', 'scoreboard', 'CRT screen', 'screw', 'screwdriver', 'seat belt', 'sewing machine', 'shield', - 'shoe store', 'shoji', 'shopping basket', 'shopping cart', 'shovel', 'shower cap', 'shower curtain', 'ski', - 'ski mask', 'sleeping bag', 'slide rule', 'sliding door', 'slot machine', 'snorkel', 'snowmobile', 'snowplow', - 'soap dispenser', 'soccer ball', 'sock', 'solar thermal collector', 'sombrero', 'soup bowl', 'space bar', - 'space heater', 'space shuttle', 'spatula', 'motorboat', 'spider web', 'spindle', 'sports car', 'spotlight', - 'stage', 'steam locomotive', 'through arch bridge', 'steel drum', 'stethoscope', 'scarf', 'stone wall', - 'stopwatch', 'stove', 'strainer', 'tram', 'stretcher', 'couch', 'stupa', 'submarine', 'suit', 'sundial', - 'sunglass', 'sunglasses', 'sunscreen', 'suspension bridge', 'mop', 'sweatshirt', 'swimsuit', 'swing', 'switch', - 'syringe', 'table lamp', 'tank', 'tape player', 'teapot', 'teddy bear', 'television', 'tennis ball', - 'thatched roof', 'front curtain', 'thimble', 'threshing machine', 'throne', 'tile roof', 'toaster', - 'tobacco shop', 'toilet seat', 'torch', 'totem pole', 'tow truck', 'toy store', 'tractor', 'semi-trailer truck', - 'tray', 'trench coat', 'tricycle', 'trimaran', 'tripod', 'triumphal arch', 'trolleybus', 'trombone', 'tub', - 'turnstile', 'typewriter keyboard', 'umbrella', 'unicycle', 'upright piano', 'vacuum cleaner', 'vase', 'vault', - 'velvet', 'vending machine', 'vestment', 'viaduct', 'violin', 'volleyball', 'waffle iron', 'wall clock', - 'wallet', 'wardrobe', 'military aircraft', 'sink', 'washing machine', 'water bottle', 'water jug', - 'water tower', 'whiskey jug', 'whistle', 'wig', 'window screen', 'window shade', 'Windsor tie', 'wine bottle', - 'wing', 'wok', 'wooden spoon', 'wool', 'split-rail fence', 'shipwreck', 'yawl', 'yurt', 'website', 'comic book', - 'crossword', 'traffic sign', 'traffic light', 'dust jacket', 'menu', 'plate', 'guacamole', 'consomme', - 'hot pot', 'trifle', 'ice cream', 'ice pop', 'baguette', 'bagel', 'pretzel', 'cheeseburger', 'hot dog', - 'mashed potato', 'cabbage', 'broccoli', 'cauliflower', 'zucchini', 'spaghetti squash', 'acorn squash', - 'butternut squash', 'cucumber', 'artichoke', 'bell pepper', 'cardoon', 'mushroom', 'Granny Smith', 'strawberry', - 'orange', 'lemon', 'fig', 'pineapple', 'banana', 'jackfruit', 'custard apple', 'pomegranate', 'hay', - 'carbonara', 'chocolate syrup', 'dough', 'meatloaf', 'pizza', 'pot pie', 'burrito', 'red wine', 'espresso', - 'cup', 'eggnog', 'alp', 'bubble', 'cliff', 'coral reef', 'geyser', 'lakeshore', 'promontory', 'shoal', - 'seashore', 'valley', 'volcano', 'baseball player', 'bridegroom', 'scuba diver', 'rapeseed', 'daisy', - "yellow lady's slipper", 'corn', 'acorn', 'rose hip', 'horse chestnut seed', 'coral fungus', 'agaric', - 'gyromitra', 'stinkhorn mushroom', 'earth star', 'hen-of-the-woods', 'bolete', 'ear', - 'toilet paper'] # class names +names: + 0: tench + 1: goldfish + 2: great white shark + 3: tiger shark + 4: hammerhead shark + 5: electric ray + 6: stingray + 7: cock + 8: hen + 9: ostrich + 10: brambling + 11: goldfinch + 12: house finch + 13: junco + 14: indigo bunting + 15: American robin + 16: bulbul + 17: jay + 18: magpie + 19: chickadee + 20: American dipper + 21: kite + 22: bald eagle + 23: vulture + 24: great grey owl + 25: fire salamander + 26: smooth newt + 27: newt + 28: spotted salamander + 29: axolotl + 30: American bullfrog + 31: tree frog + 32: tailed frog + 33: loggerhead sea turtle + 34: leatherback sea turtle + 35: mud turtle + 36: terrapin + 37: box turtle + 38: banded gecko + 39: green iguana + 40: Carolina anole + 41: desert grassland whiptail lizard + 42: agama + 43: frilled-necked lizard + 44: alligator lizard + 45: Gila monster + 46: European green lizard + 47: chameleon + 48: Komodo dragon + 49: Nile crocodile + 50: American alligator + 51: triceratops + 52: worm snake + 53: ring-necked snake + 54: eastern hog-nosed snake + 55: smooth green snake + 56: kingsnake + 57: garter snake + 58: water snake + 59: vine snake + 60: night snake + 61: boa constrictor + 62: African rock python + 63: Indian cobra + 64: green mamba + 65: sea snake + 66: Saharan horned viper + 67: eastern diamondback rattlesnake + 68: sidewinder + 69: trilobite + 70: harvestman + 71: scorpion + 72: yellow garden spider + 73: barn spider + 74: European garden spider + 75: southern black widow + 76: tarantula + 77: wolf spider + 78: tick + 79: centipede + 80: black grouse + 81: ptarmigan + 82: ruffed grouse + 83: prairie grouse + 84: peacock + 85: quail + 86: partridge + 87: grey parrot + 88: macaw + 89: sulphur-crested cockatoo + 90: lorikeet + 91: coucal + 92: bee eater + 93: hornbill + 94: hummingbird + 95: jacamar + 96: toucan + 97: duck + 98: red-breasted merganser + 99: goose + 100: black swan + 101: tusker + 102: echidna + 103: platypus + 104: wallaby + 105: koala + 106: wombat + 107: jellyfish + 108: sea anemone + 109: brain coral + 110: flatworm + 111: nematode + 112: conch + 113: snail + 114: slug + 115: sea slug + 116: chiton + 117: chambered nautilus + 118: Dungeness crab + 119: rock crab + 120: fiddler crab + 121: red king crab + 122: American lobster + 123: spiny lobster + 124: crayfish + 125: hermit crab + 126: isopod + 127: white stork + 128: black stork + 129: spoonbill + 130: flamingo + 131: little blue heron + 132: great egret + 133: bittern + 134: crane (bird) + 135: limpkin + 136: common gallinule + 137: American coot + 138: bustard + 139: ruddy turnstone + 140: dunlin + 141: common redshank + 142: dowitcher + 143: oystercatcher + 144: pelican + 145: king penguin + 146: albatross + 147: grey whale + 148: killer whale + 149: dugong + 150: sea lion + 151: Chihuahua + 152: Japanese Chin + 153: Maltese + 154: Pekingese + 155: Shih Tzu + 156: King Charles Spaniel + 157: Papillon + 158: toy terrier + 159: Rhodesian Ridgeback + 160: Afghan Hound + 161: Basset Hound + 162: Beagle + 163: Bloodhound + 164: Bluetick Coonhound + 165: Black and Tan Coonhound + 166: Treeing Walker Coonhound + 167: English foxhound + 168: Redbone Coonhound + 169: borzoi + 170: Irish Wolfhound + 171: Italian Greyhound + 172: Whippet + 173: Ibizan Hound + 174: Norwegian Elkhound + 175: Otterhound + 176: Saluki + 177: Scottish Deerhound + 178: Weimaraner + 179: Staffordshire Bull Terrier + 180: American Staffordshire Terrier + 181: Bedlington Terrier + 182: Border Terrier + 183: Kerry Blue Terrier + 184: Irish Terrier + 185: Norfolk Terrier + 186: Norwich Terrier + 187: Yorkshire Terrier + 188: Wire Fox Terrier + 189: Lakeland Terrier + 190: Sealyham Terrier + 191: Airedale Terrier + 192: Cairn Terrier + 193: Australian Terrier + 194: Dandie Dinmont Terrier + 195: Boston Terrier + 196: Miniature Schnauzer + 197: Giant Schnauzer + 198: Standard Schnauzer + 199: Scottish Terrier + 200: Tibetan Terrier + 201: Australian Silky Terrier + 202: Soft-coated Wheaten Terrier + 203: West Highland White Terrier + 204: Lhasa Apso + 205: Flat-Coated Retriever + 206: Curly-coated Retriever + 207: Golden Retriever + 208: Labrador Retriever + 209: Chesapeake Bay Retriever + 210: German Shorthaired Pointer + 211: Vizsla + 212: English Setter + 213: Irish Setter + 214: Gordon Setter + 215: Brittany + 216: Clumber Spaniel + 217: English Springer Spaniel + 218: Welsh Springer Spaniel + 219: Cocker Spaniels + 220: Sussex Spaniel + 221: Irish Water Spaniel + 222: Kuvasz + 223: Schipperke + 224: Groenendael + 225: Malinois + 226: Briard + 227: Australian Kelpie + 228: Komondor + 229: Old English Sheepdog + 230: Shetland Sheepdog + 231: collie + 232: Border Collie + 233: Bouvier des Flandres + 234: Rottweiler + 235: German Shepherd Dog + 236: Dobermann + 237: Miniature Pinscher + 238: Greater Swiss Mountain Dog + 239: Bernese Mountain Dog + 240: Appenzeller Sennenhund + 241: Entlebucher Sennenhund + 242: Boxer + 243: Bullmastiff + 244: Tibetan Mastiff + 245: French Bulldog + 246: Great Dane + 247: St. Bernard + 248: husky + 249: Alaskan Malamute + 250: Siberian Husky + 251: Dalmatian + 252: Affenpinscher + 253: Basenji + 254: pug + 255: Leonberger + 256: Newfoundland + 257: Pyrenean Mountain Dog + 258: Samoyed + 259: Pomeranian + 260: Chow Chow + 261: Keeshond + 262: Griffon Bruxellois + 263: Pembroke Welsh Corgi + 264: Cardigan Welsh Corgi + 265: Toy Poodle + 266: Miniature Poodle + 267: Standard Poodle + 268: Mexican hairless dog + 269: grey wolf + 270: Alaskan tundra wolf + 271: red wolf + 272: coyote + 273: dingo + 274: dhole + 275: African wild dog + 276: hyena + 277: red fox + 278: kit fox + 279: Arctic fox + 280: grey fox + 281: tabby cat + 282: tiger cat + 283: Persian cat + 284: Siamese cat + 285: Egyptian Mau + 286: cougar + 287: lynx + 288: leopard + 289: snow leopard + 290: jaguar + 291: lion + 292: tiger + 293: cheetah + 294: brown bear + 295: American black bear + 296: polar bear + 297: sloth bear + 298: mongoose + 299: meerkat + 300: tiger beetle + 301: ladybug + 302: ground beetle + 303: longhorn beetle + 304: leaf beetle + 305: dung beetle + 306: rhinoceros beetle + 307: weevil + 308: fly + 309: bee + 310: ant + 311: grasshopper + 312: cricket + 313: stick insect + 314: cockroach + 315: mantis + 316: cicada + 317: leafhopper + 318: lacewing + 319: dragonfly + 320: damselfly + 321: red admiral + 322: ringlet + 323: monarch butterfly + 324: small white + 325: sulphur butterfly + 326: gossamer-winged butterfly + 327: starfish + 328: sea urchin + 329: sea cucumber + 330: cottontail rabbit + 331: hare + 332: Angora rabbit + 333: hamster + 334: porcupine + 335: fox squirrel + 336: marmot + 337: beaver + 338: guinea pig + 339: common sorrel + 340: zebra + 341: pig + 342: wild boar + 343: warthog + 344: hippopotamus + 345: ox + 346: water buffalo + 347: bison + 348: ram + 349: bighorn sheep + 350: Alpine ibex + 351: hartebeest + 352: impala + 353: gazelle + 354: dromedary + 355: llama + 356: weasel + 357: mink + 358: European polecat + 359: black-footed ferret + 360: otter + 361: skunk + 362: badger + 363: armadillo + 364: three-toed sloth + 365: orangutan + 366: gorilla + 367: chimpanzee + 368: gibbon + 369: siamang + 370: guenon + 371: patas monkey + 372: baboon + 373: macaque + 374: langur + 375: black-and-white colobus + 376: proboscis monkey + 377: marmoset + 378: white-headed capuchin + 379: howler monkey + 380: titi + 381: Geoffroy's spider monkey + 382: common squirrel monkey + 383: ring-tailed lemur + 384: indri + 385: Asian elephant + 386: African bush elephant + 387: red panda + 388: giant panda + 389: snoek + 390: eel + 391: coho salmon + 392: rock beauty + 393: clownfish + 394: sturgeon + 395: garfish + 396: lionfish + 397: pufferfish + 398: abacus + 399: abaya + 400: academic gown + 401: accordion + 402: acoustic guitar + 403: aircraft carrier + 404: airliner + 405: airship + 406: altar + 407: ambulance + 408: amphibious vehicle + 409: analog clock + 410: apiary + 411: apron + 412: waste container + 413: assault rifle + 414: backpack + 415: bakery + 416: balance beam + 417: balloon + 418: ballpoint pen + 419: Band-Aid + 420: banjo + 421: baluster + 422: barbell + 423: barber chair + 424: barbershop + 425: barn + 426: barometer + 427: barrel + 428: wheelbarrow + 429: baseball + 430: basketball + 431: bassinet + 432: bassoon + 433: swimming cap + 434: bath towel + 435: bathtub + 436: station wagon + 437: lighthouse + 438: beaker + 439: military cap + 440: beer bottle + 441: beer glass + 442: bell-cot + 443: bib + 444: tandem bicycle + 445: bikini + 446: ring binder + 447: binoculars + 448: birdhouse + 449: boathouse + 450: bobsleigh + 451: bolo tie + 452: poke bonnet + 453: bookcase + 454: bookstore + 455: bottle cap + 456: bow + 457: bow tie + 458: brass + 459: bra + 460: breakwater + 461: breastplate + 462: broom + 463: bucket + 464: buckle + 465: bulletproof vest + 466: high-speed train + 467: butcher shop + 468: taxicab + 469: cauldron + 470: candle + 471: cannon + 472: canoe + 473: can opener + 474: cardigan + 475: car mirror + 476: carousel + 477: tool kit + 478: carton + 479: car wheel + 480: automated teller machine + 481: cassette + 482: cassette player + 483: castle + 484: catamaran + 485: CD player + 486: cello + 487: mobile phone + 488: chain + 489: chain-link fence + 490: chain mail + 491: chainsaw + 492: chest + 493: chiffonier + 494: chime + 495: china cabinet + 496: Christmas stocking + 497: church + 498: movie theater + 499: cleaver + 500: cliff dwelling + 501: cloak + 502: clogs + 503: cocktail shaker + 504: coffee mug + 505: coffeemaker + 506: coil + 507: combination lock + 508: computer keyboard + 509: confectionery store + 510: container ship + 511: convertible + 512: corkscrew + 513: cornet + 514: cowboy boot + 515: cowboy hat + 516: cradle + 517: crane (machine) + 518: crash helmet + 519: crate + 520: infant bed + 521: Crock Pot + 522: croquet ball + 523: crutch + 524: cuirass + 525: dam + 526: desk + 527: desktop computer + 528: rotary dial telephone + 529: diaper + 530: digital clock + 531: digital watch + 532: dining table + 533: dishcloth + 534: dishwasher + 535: disc brake + 536: dock + 537: dog sled + 538: dome + 539: doormat + 540: drilling rig + 541: drum + 542: drumstick + 543: dumbbell + 544: Dutch oven + 545: electric fan + 546: electric guitar + 547: electric locomotive + 548: entertainment center + 549: envelope + 550: espresso machine + 551: face powder + 552: feather boa + 553: filing cabinet + 554: fireboat + 555: fire engine + 556: fire screen sheet + 557: flagpole + 558: flute + 559: folding chair + 560: football helmet + 561: forklift + 562: fountain + 563: fountain pen + 564: four-poster bed + 565: freight car + 566: French horn + 567: frying pan + 568: fur coat + 569: garbage truck + 570: gas mask + 571: gas pump + 572: goblet + 573: go-kart + 574: golf ball + 575: golf cart + 576: gondola + 577: gong + 578: gown + 579: grand piano + 580: greenhouse + 581: grille + 582: grocery store + 583: guillotine + 584: barrette + 585: hair spray + 586: half-track + 587: hammer + 588: hamper + 589: hair dryer + 590: hand-held computer + 591: handkerchief + 592: hard disk drive + 593: harmonica + 594: harp + 595: harvester + 596: hatchet + 597: holster + 598: home theater + 599: honeycomb + 600: hook + 601: hoop skirt + 602: horizontal bar + 603: horse-drawn vehicle + 604: hourglass + 605: iPod + 606: clothes iron + 607: jack-o'-lantern + 608: jeans + 609: jeep + 610: T-shirt + 611: jigsaw puzzle + 612: pulled rickshaw + 613: joystick + 614: kimono + 615: knee pad + 616: knot + 617: lab coat + 618: ladle + 619: lampshade + 620: laptop computer + 621: lawn mower + 622: lens cap + 623: paper knife + 624: library + 625: lifeboat + 626: lighter + 627: limousine + 628: ocean liner + 629: lipstick + 630: slip-on shoe + 631: lotion + 632: speaker + 633: loupe + 634: sawmill + 635: magnetic compass + 636: mail bag + 637: mailbox + 638: tights + 639: tank suit + 640: manhole cover + 641: maraca + 642: marimba + 643: mask + 644: match + 645: maypole + 646: maze + 647: measuring cup + 648: medicine chest + 649: megalith + 650: microphone + 651: microwave oven + 652: military uniform + 653: milk can + 654: minibus + 655: miniskirt + 656: minivan + 657: missile + 658: mitten + 659: mixing bowl + 660: mobile home + 661: Model T + 662: modem + 663: monastery + 664: monitor + 665: moped + 666: mortar + 667: square academic cap + 668: mosque + 669: mosquito net + 670: scooter + 671: mountain bike + 672: tent + 673: computer mouse + 674: mousetrap + 675: moving van + 676: muzzle + 677: nail + 678: neck brace + 679: necklace + 680: nipple + 681: notebook computer + 682: obelisk + 683: oboe + 684: ocarina + 685: odometer + 686: oil filter + 687: organ + 688: oscilloscope + 689: overskirt + 690: bullock cart + 691: oxygen mask + 692: packet + 693: paddle + 694: paddle wheel + 695: padlock + 696: paintbrush + 697: pajamas + 698: palace + 699: pan flute + 700: paper towel + 701: parachute + 702: parallel bars + 703: park bench + 704: parking meter + 705: passenger car + 706: patio + 707: payphone + 708: pedestal + 709: pencil case + 710: pencil sharpener + 711: perfume + 712: Petri dish + 713: photocopier + 714: plectrum + 715: Pickelhaube + 716: picket fence + 717: pickup truck + 718: pier + 719: piggy bank + 720: pill bottle + 721: pillow + 722: ping-pong ball + 723: pinwheel + 724: pirate ship + 725: pitcher + 726: hand plane + 727: planetarium + 728: plastic bag + 729: plate rack + 730: plow + 731: plunger + 732: Polaroid camera + 733: pole + 734: police van + 735: poncho + 736: billiard table + 737: soda bottle + 738: pot + 739: potter's wheel + 740: power drill + 741: prayer rug + 742: printer + 743: prison + 744: projectile + 745: projector + 746: hockey puck + 747: punching bag + 748: purse + 749: quill + 750: quilt + 751: race car + 752: racket + 753: radiator + 754: radio + 755: radio telescope + 756: rain barrel + 757: recreational vehicle + 758: reel + 759: reflex camera + 760: refrigerator + 761: remote control + 762: restaurant + 763: revolver + 764: rifle + 765: rocking chair + 766: rotisserie + 767: eraser + 768: rugby ball + 769: ruler + 770: running shoe + 771: safe + 772: safety pin + 773: salt shaker + 774: sandal + 775: sarong + 776: saxophone + 777: scabbard + 778: weighing scale + 779: school bus + 780: schooner + 781: scoreboard + 782: CRT screen + 783: screw + 784: screwdriver + 785: seat belt + 786: sewing machine + 787: shield + 788: shoe store + 789: shoji + 790: shopping basket + 791: shopping cart + 792: shovel + 793: shower cap + 794: shower curtain + 795: ski + 796: ski mask + 797: sleeping bag + 798: slide rule + 799: sliding door + 800: slot machine + 801: snorkel + 802: snowmobile + 803: snowplow + 804: soap dispenser + 805: soccer ball + 806: sock + 807: solar thermal collector + 808: sombrero + 809: soup bowl + 810: space bar + 811: space heater + 812: space shuttle + 813: spatula + 814: motorboat + 815: spider web + 816: spindle + 817: sports car + 818: spotlight + 819: stage + 820: steam locomotive + 821: through arch bridge + 822: steel drum + 823: stethoscope + 824: scarf + 825: stone wall + 826: stopwatch + 827: stove + 828: strainer + 829: tram + 830: stretcher + 831: couch + 832: stupa + 833: submarine + 834: suit + 835: sundial + 836: sunglass + 837: sunglasses + 838: sunscreen + 839: suspension bridge + 840: mop + 841: sweatshirt + 842: swimsuit + 843: swing + 844: switch + 845: syringe + 846: table lamp + 847: tank + 848: tape player + 849: teapot + 850: teddy bear + 851: television + 852: tennis ball + 853: thatched roof + 854: front curtain + 855: thimble + 856: threshing machine + 857: throne + 858: tile roof + 859: toaster + 860: tobacco shop + 861: toilet seat + 862: torch + 863: totem pole + 864: tow truck + 865: toy store + 866: tractor + 867: semi-trailer truck + 868: tray + 869: trench coat + 870: tricycle + 871: trimaran + 872: tripod + 873: triumphal arch + 874: trolleybus + 875: trombone + 876: tub + 877: turnstile + 878: typewriter keyboard + 879: umbrella + 880: unicycle + 881: upright piano + 882: vacuum cleaner + 883: vase + 884: vault + 885: velvet + 886: vending machine + 887: vestment + 888: viaduct + 889: violin + 890: volleyball + 891: waffle iron + 892: wall clock + 893: wallet + 894: wardrobe + 895: military aircraft + 896: sink + 897: washing machine + 898: water bottle + 899: water jug + 900: water tower + 901: whiskey jug + 902: whistle + 903: wig + 904: window screen + 905: window shade + 906: Windsor tie + 907: wine bottle + 908: wing + 909: wok + 910: wooden spoon + 911: wool + 912: split-rail fence + 913: shipwreck + 914: yawl + 915: yurt + 916: website + 917: comic book + 918: crossword + 919: traffic sign + 920: traffic light + 921: dust jacket + 922: menu + 923: plate + 924: guacamole + 925: consomme + 926: hot pot + 927: trifle + 928: ice cream + 929: ice pop + 930: baguette + 931: bagel + 932: pretzel + 933: cheeseburger + 934: hot dog + 935: mashed potato + 936: cabbage + 937: broccoli + 938: cauliflower + 939: zucchini + 940: spaghetti squash + 941: acorn squash + 942: butternut squash + 943: cucumber + 944: artichoke + 945: bell pepper + 946: cardoon + 947: mushroom + 948: Granny Smith + 949: strawberry + 950: orange + 951: lemon + 952: fig + 953: pineapple + 954: banana + 955: jackfruit + 956: custard apple + 957: pomegranate + 958: hay + 959: carbonara + 960: chocolate syrup + 961: dough + 962: meatloaf + 963: pizza + 964: pot pie + 965: burrito + 966: red wine + 967: espresso + 968: cup + 969: eggnog + 970: alp + 971: bubble + 972: cliff + 973: coral reef + 974: geyser + 975: lakeshore + 976: promontory + 977: shoal + 978: seashore + 979: valley + 980: volcano + 981: baseball player + 982: bridegroom + 983: scuba diver + 984: rapeseed + 985: daisy + 986: yellow lady's slipper + 987: corn + 988: acorn + 989: rose hip + 990: horse chestnut seed + 991: coral fungus + 992: agaric + 993: gyromitra + 994: stinkhorn mushroom + 995: earth star + 996: hen-of-the-woods + 997: bolete + 998: ear + 999: toilet paper + # Download script/URL (optional) download: data/scripts/get_imagenet.sh diff --git a/data/Objects365.yaml b/data/Objects365.yaml index 4cc94753f530..05b26a1f4796 100644 --- a/data/Objects365.yaml +++ b/data/Objects365.yaml @@ -14,48 +14,372 @@ val: images/val # val images (relative to 'path') 80000 images test: # test images (optional) # Classes -nc: 365 # number of classes -names: ['Person', 'Sneakers', 'Chair', 'Other Shoes', 'Hat', 'Car', 'Lamp', 'Glasses', 'Bottle', 'Desk', 'Cup', - 'Street Lights', 'Cabinet/shelf', 'Handbag/Satchel', 'Bracelet', 'Plate', 'Picture/Frame', 'Helmet', 'Book', - 'Gloves', 'Storage box', 'Boat', 'Leather Shoes', 'Flower', 'Bench', 'Potted Plant', 'Bowl/Basin', 'Flag', - 'Pillow', 'Boots', 'Vase', 'Microphone', 'Necklace', 'Ring', 'SUV', 'Wine Glass', 'Belt', 'Monitor/TV', - 'Backpack', 'Umbrella', 'Traffic Light', 'Speaker', 'Watch', 'Tie', 'Trash bin Can', 'Slippers', 'Bicycle', - 'Stool', 'Barrel/bucket', 'Van', 'Couch', 'Sandals', 'Basket', 'Drum', 'Pen/Pencil', 'Bus', 'Wild Bird', - 'High Heels', 'Motorcycle', 'Guitar', 'Carpet', 'Cell Phone', 'Bread', 'Camera', 'Canned', 'Truck', - 'Traffic cone', 'Cymbal', 'Lifesaver', 'Towel', 'Stuffed Toy', 'Candle', 'Sailboat', 'Laptop', 'Awning', - 'Bed', 'Faucet', 'Tent', 'Horse', 'Mirror', 'Power outlet', 'Sink', 'Apple', 'Air Conditioner', 'Knife', - 'Hockey Stick', 'Paddle', 'Pickup Truck', 'Fork', 'Traffic Sign', 'Balloon', 'Tripod', 'Dog', 'Spoon', 'Clock', - 'Pot', 'Cow', 'Cake', 'Dinning Table', 'Sheep', 'Hanger', 'Blackboard/Whiteboard', 'Napkin', 'Other Fish', - 'Orange/Tangerine', 'Toiletry', 'Keyboard', 'Tomato', 'Lantern', 'Machinery Vehicle', 'Fan', - 'Green Vegetables', 'Banana', 'Baseball Glove', 'Airplane', 'Mouse', 'Train', 'Pumpkin', 'Soccer', 'Skiboard', - 'Luggage', 'Nightstand', 'Tea pot', 'Telephone', 'Trolley', 'Head Phone', 'Sports Car', 'Stop Sign', - 'Dessert', 'Scooter', 'Stroller', 'Crane', 'Remote', 'Refrigerator', 'Oven', 'Lemon', 'Duck', 'Baseball Bat', - 'Surveillance Camera', 'Cat', 'Jug', 'Broccoli', 'Piano', 'Pizza', 'Elephant', 'Skateboard', 'Surfboard', - 'Gun', 'Skating and Skiing shoes', 'Gas stove', 'Donut', 'Bow Tie', 'Carrot', 'Toilet', 'Kite', 'Strawberry', - 'Other Balls', 'Shovel', 'Pepper', 'Computer Box', 'Toilet Paper', 'Cleaning Products', 'Chopsticks', - 'Microwave', 'Pigeon', 'Baseball', 'Cutting/chopping Board', 'Coffee Table', 'Side Table', 'Scissors', - 'Marker', 'Pie', 'Ladder', 'Snowboard', 'Cookies', 'Radiator', 'Fire Hydrant', 'Basketball', 'Zebra', 'Grape', - 'Giraffe', 'Potato', 'Sausage', 'Tricycle', 'Violin', 'Egg', 'Fire Extinguisher', 'Candy', 'Fire Truck', - 'Billiards', 'Converter', 'Bathtub', 'Wheelchair', 'Golf Club', 'Briefcase', 'Cucumber', 'Cigar/Cigarette', - 'Paint Brush', 'Pear', 'Heavy Truck', 'Hamburger', 'Extractor', 'Extension Cord', 'Tong', 'Tennis Racket', - 'Folder', 'American Football', 'earphone', 'Mask', 'Kettle', 'Tennis', 'Ship', 'Swing', 'Coffee Machine', - 'Slide', 'Carriage', 'Onion', 'Green beans', 'Projector', 'Frisbee', 'Washing Machine/Drying Machine', - 'Chicken', 'Printer', 'Watermelon', 'Saxophone', 'Tissue', 'Toothbrush', 'Ice cream', 'Hot-air balloon', - 'Cello', 'French Fries', 'Scale', 'Trophy', 'Cabbage', 'Hot dog', 'Blender', 'Peach', 'Rice', 'Wallet/Purse', - 'Volleyball', 'Deer', 'Goose', 'Tape', 'Tablet', 'Cosmetics', 'Trumpet', 'Pineapple', 'Golf Ball', - 'Ambulance', 'Parking meter', 'Mango', 'Key', 'Hurdle', 'Fishing Rod', 'Medal', 'Flute', 'Brush', 'Penguin', - 'Megaphone', 'Corn', 'Lettuce', 'Garlic', 'Swan', 'Helicopter', 'Green Onion', 'Sandwich', 'Nuts', - 'Speed Limit Sign', 'Induction Cooker', 'Broom', 'Trombone', 'Plum', 'Rickshaw', 'Goldfish', 'Kiwi fruit', - 'Router/modem', 'Poker Card', 'Toaster', 'Shrimp', 'Sushi', 'Cheese', 'Notepaper', 'Cherry', 'Pliers', 'CD', - 'Pasta', 'Hammer', 'Cue', 'Avocado', 'Hamimelon', 'Flask', 'Mushroom', 'Screwdriver', 'Soap', 'Recorder', - 'Bear', 'Eggplant', 'Board Eraser', 'Coconut', 'Tape Measure/Ruler', 'Pig', 'Showerhead', 'Globe', 'Chips', - 'Steak', 'Crosswalk Sign', 'Stapler', 'Camel', 'Formula 1', 'Pomegranate', 'Dishwasher', 'Crab', - 'Hoverboard', 'Meat ball', 'Rice Cooker', 'Tuba', 'Calculator', 'Papaya', 'Antelope', 'Parrot', 'Seal', - 'Butterfly', 'Dumbbell', 'Donkey', 'Lion', 'Urinal', 'Dolphin', 'Electric Drill', 'Hair Dryer', 'Egg tart', - 'Jellyfish', 'Treadmill', 'Lighter', 'Grapefruit', 'Game board', 'Mop', 'Radish', 'Baozi', 'Target', 'French', - 'Spring Rolls', 'Monkey', 'Rabbit', 'Pencil Case', 'Yak', 'Red Cabbage', 'Binoculars', 'Asparagus', 'Barbell', - 'Scallop', 'Noddles', 'Comb', 'Dumpling', 'Oyster', 'Table Tennis paddle', 'Cosmetics Brush/Eyeliner Pencil', - 'Chainsaw', 'Eraser', 'Lobster', 'Durian', 'Okra', 'Lipstick', 'Cosmetics Mirror', 'Curling', 'Table Tennis'] +names: + 0: Person + 1: Sneakers + 2: Chair + 3: Other Shoes + 4: Hat + 5: Car + 6: Lamp + 7: Glasses + 8: Bottle + 9: Desk + 10: Cup + 11: Street Lights + 12: Cabinet/shelf + 13: Handbag/Satchel + 14: Bracelet + 15: Plate + 16: Picture/Frame + 17: Helmet + 18: Book + 19: Gloves + 20: Storage box + 21: Boat + 22: Leather Shoes + 23: Flower + 24: Bench + 25: Potted Plant + 26: Bowl/Basin + 27: Flag + 28: Pillow + 29: Boots + 30: Vase + 31: Microphone + 32: Necklace + 33: Ring + 34: SUV + 35: Wine Glass + 36: Belt + 37: Monitor/TV + 38: Backpack + 39: Umbrella + 40: Traffic Light + 41: Speaker + 42: Watch + 43: Tie + 44: Trash bin Can + 45: Slippers + 46: Bicycle + 47: Stool + 48: Barrel/bucket + 49: Van + 50: Couch + 51: Sandals + 52: Basket + 53: Drum + 54: Pen/Pencil + 55: Bus + 56: Wild Bird + 57: High Heels + 58: Motorcycle + 59: Guitar + 60: Carpet + 61: Cell Phone + 62: Bread + 63: Camera + 64: Canned + 65: Truck + 66: Traffic cone + 67: Cymbal + 68: Lifesaver + 69: Towel + 70: Stuffed Toy + 71: Candle + 72: Sailboat + 73: Laptop + 74: Awning + 75: Bed + 76: Faucet + 77: Tent + 78: Horse + 79: Mirror + 80: Power outlet + 81: Sink + 82: Apple + 83: Air Conditioner + 84: Knife + 85: Hockey Stick + 86: Paddle + 87: Pickup Truck + 88: Fork + 89: Traffic Sign + 90: Balloon + 91: Tripod + 92: Dog + 93: Spoon + 94: Clock + 95: Pot + 96: Cow + 97: Cake + 98: Dinning Table + 99: Sheep + 100: Hanger + 101: Blackboard/Whiteboard + 102: Napkin + 103: Other Fish + 104: Orange/Tangerine + 105: Toiletry + 106: Keyboard + 107: Tomato + 108: Lantern + 109: Machinery Vehicle + 110: Fan + 111: Green Vegetables + 112: Banana + 113: Baseball Glove + 114: Airplane + 115: Mouse + 116: Train + 117: Pumpkin + 118: Soccer + 119: Skiboard + 120: Luggage + 121: Nightstand + 122: Tea pot + 123: Telephone + 124: Trolley + 125: Head Phone + 126: Sports Car + 127: Stop Sign + 128: Dessert + 129: Scooter + 130: Stroller + 131: Crane + 132: Remote + 133: Refrigerator + 134: Oven + 135: Lemon + 136: Duck + 137: Baseball Bat + 138: Surveillance Camera + 139: Cat + 140: Jug + 141: Broccoli + 142: Piano + 143: Pizza + 144: Elephant + 145: Skateboard + 146: Surfboard + 147: Gun + 148: Skating and Skiing shoes + 149: Gas stove + 150: Donut + 151: Bow Tie + 152: Carrot + 153: Toilet + 154: Kite + 155: Strawberry + 156: Other Balls + 157: Shovel + 158: Pepper + 159: Computer Box + 160: Toilet Paper + 161: Cleaning Products + 162: Chopsticks + 163: Microwave + 164: Pigeon + 165: Baseball + 166: Cutting/chopping Board + 167: Coffee Table + 168: Side Table + 169: Scissors + 170: Marker + 171: Pie + 172: Ladder + 173: Snowboard + 174: Cookies + 175: Radiator + 176: Fire Hydrant + 177: Basketball + 178: Zebra + 179: Grape + 180: Giraffe + 181: Potato + 182: Sausage + 183: Tricycle + 184: Violin + 185: Egg + 186: Fire Extinguisher + 187: Candy + 188: Fire Truck + 189: Billiards + 190: Converter + 191: Bathtub + 192: Wheelchair + 193: Golf Club + 194: Briefcase + 195: Cucumber + 196: Cigar/Cigarette + 197: Paint Brush + 198: Pear + 199: Heavy Truck + 200: Hamburger + 201: Extractor + 202: Extension Cord + 203: Tong + 204: Tennis Racket + 205: Folder + 206: American Football + 207: earphone + 208: Mask + 209: Kettle + 210: Tennis + 211: Ship + 212: Swing + 213: Coffee Machine + 214: Slide + 215: Carriage + 216: Onion + 217: Green beans + 218: Projector + 219: Frisbee + 220: Washing Machine/Drying Machine + 221: Chicken + 222: Printer + 223: Watermelon + 224: Saxophone + 225: Tissue + 226: Toothbrush + 227: Ice cream + 228: Hot-air balloon + 229: Cello + 230: French Fries + 231: Scale + 232: Trophy + 233: Cabbage + 234: Hot dog + 235: Blender + 236: Peach + 237: Rice + 238: Wallet/Purse + 239: Volleyball + 240: Deer + 241: Goose + 242: Tape + 243: Tablet + 244: Cosmetics + 245: Trumpet + 246: Pineapple + 247: Golf Ball + 248: Ambulance + 249: Parking meter + 250: Mango + 251: Key + 252: Hurdle + 253: Fishing Rod + 254: Medal + 255: Flute + 256: Brush + 257: Penguin + 258: Megaphone + 259: Corn + 260: Lettuce + 261: Garlic + 262: Swan + 263: Helicopter + 264: Green Onion + 265: Sandwich + 266: Nuts + 267: Speed Limit Sign + 268: Induction Cooker + 269: Broom + 270: Trombone + 271: Plum + 272: Rickshaw + 273: Goldfish + 274: Kiwi fruit + 275: Router/modem + 276: Poker Card + 277: Toaster + 278: Shrimp + 279: Sushi + 280: Cheese + 281: Notepaper + 282: Cherry + 283: Pliers + 284: CD + 285: Pasta + 286: Hammer + 287: Cue + 288: Avocado + 289: Hamimelon + 290: Flask + 291: Mushroom + 292: Screwdriver + 293: Soap + 294: Recorder + 295: Bear + 296: Eggplant + 297: Board Eraser + 298: Coconut + 299: Tape Measure/Ruler + 300: Pig + 301: Showerhead + 302: Globe + 303: Chips + 304: Steak + 305: Crosswalk Sign + 306: Stapler + 307: Camel + 308: Formula 1 + 309: Pomegranate + 310: Dishwasher + 311: Crab + 312: Hoverboard + 313: Meat ball + 314: Rice Cooker + 315: Tuba + 316: Calculator + 317: Papaya + 318: Antelope + 319: Parrot + 320: Seal + 321: Butterfly + 322: Dumbbell + 323: Donkey + 324: Lion + 325: Urinal + 326: Dolphin + 327: Electric Drill + 328: Hair Dryer + 329: Egg tart + 330: Jellyfish + 331: Treadmill + 332: Lighter + 333: Grapefruit + 334: Game board + 335: Mop + 336: Radish + 337: Baozi + 338: Target + 339: French + 340: Spring Rolls + 341: Monkey + 342: Rabbit + 343: Pencil Case + 344: Yak + 345: Red Cabbage + 346: Binoculars + 347: Asparagus + 348: Barbell + 349: Scallop + 350: Noddles + 351: Comb + 352: Dumpling + 353: Oyster + 354: Table Tennis paddle + 355: Cosmetics Brush/Eyeliner Pencil + 356: Chainsaw + 357: Eraser + 358: Lobster + 359: Durian + 360: Okra + 361: Lipstick + 362: Cosmetics Mirror + 363: Curling + 364: Table Tennis # Download script/URL (optional) --------------------------------------------------------------------------------------- diff --git a/data/SKU-110K.yaml b/data/SKU-110K.yaml index 2acf34d155bd..edae7171c660 100644 --- a/data/SKU-110K.yaml +++ b/data/SKU-110K.yaml @@ -14,8 +14,8 @@ val: val.txt # val images (relative to 'path') 588 images test: test.txt # test images (optional) 2936 images # Classes -nc: 1 # number of classes -names: ['object'] # class names +names: + 0: object # Download script/URL (optional) --------------------------------------------------------------------------------------- diff --git a/data/VOC.yaml b/data/VOC.yaml index 636ddc42d46c..bbe5cf90a838 100644 --- a/data/VOC.yaml +++ b/data/VOC.yaml @@ -20,9 +20,27 @@ test: # test images (optional) - images/test2007 # Classes -nc: 20 # number of classes -names: ['aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog', - 'horse', 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor'] # class names +names: + 0: aeroplane + 1: bicycle + 2: bird + 3: boat + 4: bottle + 5: bus + 6: car + 7: cat + 8: chair + 9: cow + 10: diningtable + 11: dog + 12: horse + 13: motorbike + 14: person + 15: pottedplant + 16: sheep + 17: sofa + 18: train + 19: tvmonitor # Download script/URL (optional) --------------------------------------------------------------------------------------- diff --git a/data/VisDrone.yaml b/data/VisDrone.yaml index 10337b46f104..a8bcf8e628ec 100644 --- a/data/VisDrone.yaml +++ b/data/VisDrone.yaml @@ -14,8 +14,17 @@ val: VisDrone2019-DET-val/images # val images (relative to 'path') 548 images test: VisDrone2019-DET-test-dev/images # test images (optional) 1610 images # Classes -nc: 10 # number of classes -names: ['pedestrian', 'people', 'bicycle', 'car', 'van', 'truck', 'tricycle', 'awning-tricycle', 'bus', 'motor'] +names: + 0: pedestrian + 1: people + 2: bicycle + 3: car + 4: van + 5: truck + 6: tricycle + 7: awning-tricycle + 8: bus + 9: motor # Download script/URL (optional) --------------------------------------------------------------------------------------- diff --git a/data/coco.yaml b/data/coco.yaml index 0c0c4adab05d..d64dfc7fed76 100644 --- a/data/coco.yaml +++ b/data/coco.yaml @@ -14,16 +14,87 @@ val: val2017.txt # val images (relative to 'path') 5000 images test: test-dev2017.txt # 20288 of 40670 images, submit to https://competitions.codalab.org/competitions/20794 # Classes -nc: 80 # number of classes -names: ['person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck', 'boat', 'traffic light', - 'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow', - 'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee', - 'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard', - 'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple', - 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', - 'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', - 'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear', - 'hair drier', 'toothbrush'] # class names +names: + 0: person + 1: bicycle + 2: car + 3: motorcycle + 4: airplane + 5: bus + 6: train + 7: truck + 8: boat + 9: traffic light + 10: fire hydrant + 11: stop sign + 12: parking meter + 13: bench + 14: bird + 15: cat + 16: dog + 17: horse + 18: sheep + 19: cow + 20: elephant + 21: bear + 22: zebra + 23: giraffe + 24: backpack + 25: umbrella + 26: handbag + 27: tie + 28: suitcase + 29: frisbee + 30: skis + 31: snowboard + 32: sports ball + 33: kite + 34: baseball bat + 35: baseball glove + 36: skateboard + 37: surfboard + 38: tennis racket + 39: bottle + 40: wine glass + 41: cup + 42: fork + 43: knife + 44: spoon + 45: bowl + 46: banana + 47: apple + 48: sandwich + 49: orange + 50: broccoli + 51: carrot + 52: hot dog + 53: pizza + 54: donut + 55: cake + 56: chair + 57: couch + 58: potted plant + 59: bed + 60: dining table + 61: toilet + 62: tv + 63: laptop + 64: mouse + 65: remote + 66: keyboard + 67: cell phone + 68: microwave + 69: oven + 70: toaster + 71: sink + 72: refrigerator + 73: book + 74: clock + 75: vase + 76: scissors + 77: teddy bear + 78: hair drier + 79: toothbrush # Download script/URL (optional) diff --git a/data/coco128.yaml b/data/coco128.yaml index 2517d2079257..12556736a571 100644 --- a/data/coco128.yaml +++ b/data/coco128.yaml @@ -14,16 +14,87 @@ val: images/train2017 # val images (relative to 'path') 128 images test: # test images (optional) # Classes -nc: 80 # number of classes -names: ['person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck', 'boat', 'traffic light', - 'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow', - 'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee', - 'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard', - 'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple', - 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', - 'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', - 'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear', - 'hair drier', 'toothbrush'] # class names +names: + 0: person + 1: bicycle + 2: car + 3: motorcycle + 4: airplane + 5: bus + 6: train + 7: truck + 8: boat + 9: traffic light + 10: fire hydrant + 11: stop sign + 12: parking meter + 13: bench + 14: bird + 15: cat + 16: dog + 17: horse + 18: sheep + 19: cow + 20: elephant + 21: bear + 22: zebra + 23: giraffe + 24: backpack + 25: umbrella + 26: handbag + 27: tie + 28: suitcase + 29: frisbee + 30: skis + 31: snowboard + 32: sports ball + 33: kite + 34: baseball bat + 35: baseball glove + 36: skateboard + 37: surfboard + 38: tennis racket + 39: bottle + 40: wine glass + 41: cup + 42: fork + 43: knife + 44: spoon + 45: bowl + 46: banana + 47: apple + 48: sandwich + 49: orange + 50: broccoli + 51: carrot + 52: hot dog + 53: pizza + 54: donut + 55: cake + 56: chair + 57: couch + 58: potted plant + 59: bed + 60: dining table + 61: toilet + 62: tv + 63: laptop + 64: mouse + 65: remote + 66: keyboard + 67: cell phone + 68: microwave + 69: oven + 70: toaster + 71: sink + 72: refrigerator + 73: book + 74: clock + 75: vase + 76: scissors + 77: teddy bear + 78: hair drier + 79: toothbrush # Download script/URL (optional) diff --git a/data/xView.yaml b/data/xView.yaml index 3b38f1ff4439..b134ceac8164 100644 --- a/data/xView.yaml +++ b/data/xView.yaml @@ -14,16 +14,67 @@ train: images/autosplit_train.txt # train images (relative to 'path') 90% of 84 val: images/autosplit_val.txt # train images (relative to 'path') 10% of 847 train images # Classes -nc: 60 # number of classes -names: ['Fixed-wing Aircraft', 'Small Aircraft', 'Cargo Plane', 'Helicopter', 'Passenger Vehicle', 'Small Car', 'Bus', - 'Pickup Truck', 'Utility Truck', 'Truck', 'Cargo Truck', 'Truck w/Box', 'Truck Tractor', 'Trailer', - 'Truck w/Flatbed', 'Truck w/Liquid', 'Crane Truck', 'Railway Vehicle', 'Passenger Car', 'Cargo Car', - 'Flat Car', 'Tank car', 'Locomotive', 'Maritime Vessel', 'Motorboat', 'Sailboat', 'Tugboat', 'Barge', - 'Fishing Vessel', 'Ferry', 'Yacht', 'Container Ship', 'Oil Tanker', 'Engineering Vehicle', 'Tower crane', - 'Container Crane', 'Reach Stacker', 'Straddle Carrier', 'Mobile Crane', 'Dump Truck', 'Haul Truck', - 'Scraper/Tractor', 'Front loader/Bulldozer', 'Excavator', 'Cement Mixer', 'Ground Grader', 'Hut/Tent', 'Shed', - 'Building', 'Aircraft Hangar', 'Damaged Building', 'Facility', 'Construction Site', 'Vehicle Lot', 'Helipad', - 'Storage Tank', 'Shipping container lot', 'Shipping Container', 'Pylon', 'Tower'] # class names +names: + 0: Fixed-wing Aircraft + 1: Small Aircraft + 2: Cargo Plane + 3: Helicopter + 4: Passenger Vehicle + 5: Small Car + 6: Bus + 7: Pickup Truck + 8: Utility Truck + 9: Truck + 10: Cargo Truck + 11: Truck w/Box + 12: Truck Tractor + 13: Trailer + 14: Truck w/Flatbed + 15: Truck w/Liquid + 16: Crane Truck + 17: Railway Vehicle + 18: Passenger Car + 19: Cargo Car + 20: Flat Car + 21: Tank car + 22: Locomotive + 23: Maritime Vessel + 24: Motorboat + 25: Sailboat + 26: Tugboat + 27: Barge + 28: Fishing Vessel + 29: Ferry + 30: Yacht + 31: Container Ship + 32: Oil Tanker + 33: Engineering Vehicle + 34: Tower crane + 35: Container Crane + 36: Reach Stacker + 37: Straddle Carrier + 38: Mobile Crane + 39: Dump Truck + 40: Haul Truck + 41: Scraper/Tractor + 42: Front loader/Bulldozer + 43: Excavator + 44: Cement Mixer + 45: Ground Grader + 46: Hut/Tent + 47: Shed + 48: Building + 49: Aircraft Hangar + 50: Damaged Building + 51: Facility + 52: Construction Site + 53: Vehicle Lot + 54: Helipad + 55: Storage Tank + 56: Shipping container lot + 57: Shipping Container + 58: Pylon + 59: Tower # Download script/URL (optional) --------------------------------------------------------------------------------------- diff --git a/models/common.py b/models/common.py index 17e40e60d7d7..30202ca1abd7 100644 --- a/models/common.py +++ b/models/common.py @@ -449,7 +449,7 @@ def wrap_frozen_graph(gd, inputs, outputs): # class names if 'names' not in locals(): - names = yaml_load(data)['names'] if data else [f'class{i}' for i in range(999)] + names = yaml_load(data)['names'] if data else {i: f'class{i}' for i in range(999)} if names[0] == 'n01440764' and len(names) == 1000: # ImageNet names = yaml_load(ROOT / 'data/ImageNet.yaml')['names'] # human-readable names diff --git a/utils/dataloaders.py b/utils/dataloaders.py index 2c04040bf25d..33e84ce4056e 100755 --- a/utils/dataloaders.py +++ b/utils/dataloaders.py @@ -1004,7 +1004,7 @@ def __init__(self, path='coco128.yaml', autodownload=False): self.hub_dir = Path(data['path'] + '-hub') self.im_dir = self.hub_dir / 'images' self.im_dir.mkdir(parents=True, exist_ok=True) # makes /images - self.stats = {'nc': data['nc'], 'names': data['names']} # statistics dictionary + self.stats = {'nc': data['nc'], 'names': list(data['names'].values())} # statistics dictionary self.data = data @staticmethod diff --git a/utils/general.py b/utils/general.py index 1c525c45f649..76872b696d79 100755 --- a/utils/general.py +++ b/utils/general.py @@ -481,11 +481,11 @@ def check_dataset(data, autodownload=True): data = yaml.safe_load(f) # dictionary # Checks - for k in 'train', 'val', 'nc': + for k in 'train', 'val', 'names': assert k in data, f"data.yaml '{k}:' field missing ❌" - if 'names' not in data: - LOGGER.warning("data.yaml 'names:' field missing ⚠️, assigning default names 'class0', 'class1', etc.") - data['names'] = [f'class{i}' for i in range(data['nc'])] # default names + if isinstance(data['names'], (list, tuple)): # old array format + data['names'] = dict(enumerate(data['names'])) # convert to dict + data['nc'] = len(data['names']) # Resolve paths path = Path(extract_dir or data.get('path') or '') # optional 'path' default to '.' diff --git a/val.py b/val.py index 130496233467..ce743b506aff 100644 --- a/val.py +++ b/val.py @@ -182,7 +182,9 @@ def run( seen = 0 confusion_matrix = ConfusionMatrix(nc=nc) - names = dict(enumerate(model.names if hasattr(model, 'names') else model.module.names)) + names = model.names if hasattr(model, 'names') else model.module.names # get class names + if isinstance(names, (list, tuple)): # old format + names = dict(enumerate(names)) class_map = coco80_to_coco91_class() if is_coco else list(range(1000)) s = ('%20s' + '%11s' * 6) % ('Class', 'Images', 'Labels', 'P', 'R', 'mAP@.5', 'mAP@.5:.95') dt, p, r, f1, mp, mr, map50, map = [0.0, 0.0, 0.0], 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 From 64e0757edffc6b2e927e16c8e2aa26439aceb4ce Mon Sep 17 00:00:00 2001 From: Ayush Chaurasia Date: Thu, 18 Aug 2022 02:11:43 +0530 Subject: [PATCH 1316/1976] [Classify]: Allow inference on dirs and videos (#9003) * allow image dirs * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update predict.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update dataloaders.py * Update predict.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update predict.py * Update predict.py Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Glenn Jocher --- classify/predict.py | 64 +++++++++++++++++++++++--------------------- utils/dataloaders.py | 25 ++++++++--------- 2 files changed, 46 insertions(+), 43 deletions(-) diff --git a/classify/predict.py b/classify/predict.py index 87379e42159b..7af5f60a2b9d 100644 --- a/classify/predict.py +++ b/classify/predict.py @@ -1,6 +1,6 @@ # YOLOv5 🚀 by Ultralytics, GPL-3.0 license """ -Run classification inference on images +Run classification inference on file/dir/URL/glob Usage: $ python classify/predict.py --weights yolov5s-cls.pt --source data/images/bus.jpg @@ -11,7 +11,6 @@ import sys from pathlib import Path -import cv2 import torch.nn.functional as F FILE = Path(__file__).resolve() @@ -20,27 +19,31 @@ sys.path.append(str(ROOT)) # add ROOT to PATH ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative -from classify.train import imshow_cls from models.common import DetectMultiBackend from utils.augmentations import classify_transforms -from utils.general import LOGGER, check_requirements, colorstr, increment_path, print_args +from utils.dataloaders import IMG_FORMATS, VID_FORMATS, LoadImages +from utils.general import LOGGER, check_file, check_requirements, colorstr, increment_path, print_args from utils.torch_utils import select_device, smart_inference_mode, time_sync @smart_inference_mode() def run( weights=ROOT / 'yolov5s-cls.pt', # model.pt path(s) - source=ROOT / 'data/images/bus.jpg', # file/dir/URL/glob, 0 for webcam + source=ROOT / 'data/images', # file/dir/URL/glob imgsz=224, # inference size device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu half=False, # use FP16 half-precision inference dnn=False, # use OpenCV DNN for ONNX inference - show=True, project=ROOT / 'runs/predict-cls', # save to project/name name='exp', # save to project/name exist_ok=False, # existing project/name ok, do not increment ): - file = str(source) + source = str(source) + is_file = Path(source).suffix[1:] in (IMG_FORMATS + VID_FORMATS) + is_url = source.lower().startswith(('rtsp://', 'rtmp://', 'http://', 'https://')) + if is_url and is_file: + source = check_file(source) # download + seen, dt = 1, [0.0, 0.0, 0.0] device = select_device(device) @@ -48,37 +51,36 @@ def run( save_dir = increment_path(Path(project) / name, exist_ok=exist_ok) # increment run save_dir.mkdir(parents=True, exist_ok=True) # make dir - # Transforms - transforms = classify_transforms(imgsz) - # Load model model = DetectMultiBackend(weights, device=device, dnn=dnn, fp16=half) model.warmup(imgsz=(1, 3, imgsz, imgsz)) # warmup - - # Image - t1 = time_sync() - im = cv2.cvtColor(cv2.imread(file), cv2.COLOR_BGR2RGB) - im = transforms(im).unsqueeze(0).to(device) - im = im.half() if model.fp16 else im.float() - t2 = time_sync() - dt[0] += t2 - t1 - - # Inference - results = model(im) - t3 = time_sync() - dt[1] += t3 - t2 - - p = F.softmax(results, dim=1) # probabilities - i = p.argsort(1, descending=True)[:, :5].squeeze() # top 5 indices - dt[2] += time_sync() - t3 - LOGGER.info(f"image 1/1 {file}: {imgsz}x{imgsz} {', '.join(f'{model.names[j]} {p[0, j]:.2f}' for j in i.tolist())}") + dataset = LoadImages(source, img_size=imgsz, transforms=classify_transforms(imgsz)) + for path, im, im0s, vid_cap, s in dataset: + # Image + t1 = time_sync() + im = im.unsqueeze(0).to(device) + im = im.half() if model.fp16 else im.float() + t2 = time_sync() + dt[0] += t2 - t1 + + # Inference + results = model(im) + t3 = time_sync() + dt[1] += t3 - t2 + + # Post-process + p = F.softmax(results, dim=1) # probabilities + i = p.argsort(1, descending=True)[:, :5].squeeze().tolist() # top 5 indices + dt[2] += time_sync() - t3 + # if save: + # imshow_cls(im, f=save_dir / Path(path).name, verbose=True) + seen += 1 + LOGGER.info(f"{s}{imgsz}x{imgsz} {', '.join(f'{model.names[j]} {p[0, j]:.2f}' for j in i)}") # Print results t = tuple(x / seen * 1E3 for x in dt) # speeds per image shape = (1, 3, imgsz, imgsz) LOGGER.info(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms post-process per image at shape {shape}' % t) - if show: - imshow_cls(im, f=save_dir / Path(file).name, verbose=True) LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}") return p @@ -86,7 +88,7 @@ def run( def parse_opt(): parser = argparse.ArgumentParser() parser.add_argument('--weights', nargs='+', type=str, default=ROOT / 'yolov5s-cls.pt', help='model path(s)') - parser.add_argument('--source', type=str, default=ROOT / 'data/images/bus.jpg', help='file') + parser.add_argument('--source', type=str, default=ROOT / 'data/images', help='file/dir/URL/glob') parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=224, help='train, val image size (pixels)') parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference') diff --git a/utils/dataloaders.py b/utils/dataloaders.py index 33e84ce4056e..3f26be2cd32d 100755 --- a/utils/dataloaders.py +++ b/utils/dataloaders.py @@ -186,7 +186,7 @@ def __iter__(self): class LoadImages: # YOLOv5 image/video dataloader, i.e. `python detect.py --source image.jpg/vid.mp4` - def __init__(self, path, img_size=640, stride=32, auto=True): + def __init__(self, path, img_size=640, stride=32, auto=True, transforms=None): files = [] for p in sorted(path) if isinstance(path, (list, tuple)) else [path]: p = str(Path(p).resolve()) @@ -210,6 +210,7 @@ def __init__(self, path, img_size=640, stride=32, auto=True): self.video_flag = [False] * ni + [True] * nv self.mode = 'image' self.auto = auto + self.transforms = transforms # optional if any(videos): self.new_video(videos[0]) # new video else: @@ -229,7 +230,7 @@ def __next__(self): if self.video_flag[self.count]: # Read video self.mode = 'video' - ret_val, img0 = self.cap.read() + ret_val, im0 = self.cap.read() while not ret_val: self.count += 1 self.cap.release() @@ -237,7 +238,7 @@ def __next__(self): raise StopIteration path = self.files[self.count] self.new_video(path) - ret_val, img0 = self.cap.read() + ret_val, im0 = self.cap.read() self.frame += 1 s = f'video {self.count + 1}/{self.nf} ({self.frame}/{self.frames}) {path}: ' @@ -245,18 +246,18 @@ def __next__(self): else: # Read image self.count += 1 - img0 = cv2.imread(path) # BGR - assert img0 is not None, f'Image Not Found {path}' + im0 = cv2.imread(path) # BGR + assert im0 is not None, f'Image Not Found {path}' s = f'image {self.count}/{self.nf} {path}: ' - # Padded resize - img = letterbox(img0, self.img_size, stride=self.stride, auto=self.auto)[0] - - # Convert - img = img.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB - img = np.ascontiguousarray(img) + if self.transforms: + im = self.transforms(cv2.cvtColor(im0, cv2.COLOR_BGR2RGB)) # classify transforms + else: + im = letterbox(im0, self.img_size, stride=self.stride, auto=self.auto)[0] # padded resize + im = im.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB + im = np.ascontiguousarray(im) # contiguous - return path, img, img0, self.cap, s + return path, im, im0, self.cap, s def new_video(self, path): self.frame = 0 From 0922bc2082d8c754bbd733d90bd1ccd2aea79ee9 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 17 Aug 2022 22:50:08 +0200 Subject: [PATCH 1317/1976] DockerHub tag update Usage example (#9005) Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- utils/docker/Dockerfile | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/utils/docker/Dockerfile b/utils/docker/Dockerfile index 2280f209e6a1..cf2c1c5cb3cb 100644 --- a/utils/docker/Dockerfile +++ b/utils/docker/Dockerfile @@ -49,11 +49,8 @@ ENV OMP_NUM_THREADS=8 # Kill all image-based # sudo docker kill $(sudo docker ps -qa --filter ancestor=ultralytics/yolov5:latest) -# Bash into running container -# sudo docker exec -it 5a9b5863d93d bash - -# Bash into stopped container -# id=$(sudo docker ps -qa) && sudo docker start $id && sudo docker exec -it $id bash +# DockerHub tag update +# t=ultralytics/yolov5:latest tnew=ultralytics/yolov5:v6.2 && sudo docker pull $t && sudo docker tag $t $tnew && sudo docker push $tnew # Clean up # docker system prune -a --volumes From 6728dad76df8d62ed3c08e39c224a773d20582a0 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 17 Aug 2022 22:57:55 +0200 Subject: [PATCH 1318/1976] Add weight `decay` to argparser (#9006) Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- classify/train.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/classify/train.py b/classify/train.py index b85f14236039..d55dc066d7a3 100644 --- a/classify/train.py +++ b/classify/train.py @@ -136,7 +136,7 @@ def train(opt, device): logger.log_graph(model, imgsz) # log model # Optimizer - optimizer = smart_optimizer(model, opt.optimizer, opt.lr0, momentum=0.9, decay=5e-5) + optimizer = smart_optimizer(model, opt.optimizer, opt.lr0, momentum=0.9, decay=opt.decay) # Scheduler lrf = 0.01 # final lr (fraction of lr0) @@ -280,6 +280,7 @@ def parse_opt(known=False): parser.add_argument('--pretrained', nargs='?', const=True, default=True, help='start from i.e. --pretrained False') parser.add_argument('--optimizer', choices=['SGD', 'Adam', 'AdamW', 'RMSProp'], default='Adam', help='optimizer') parser.add_argument('--lr0', type=float, default=0.001, help='initial learning rate') + parser.add_argument('--decay', type=float, default=5e-5, help='weight decay') parser.add_argument('--label-smoothing', type=float, default=0.1, help='Label smoothing epsilon') parser.add_argument('--cutoff', type=int, default=None, help='Model layer cutoff index for Classify() head') parser.add_argument('--dropout', type=float, default=None, help='Dropout (fraction)') From e08d568d39a8b1c24ec7eb54da80cf3b22f64f07 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 18 Aug 2022 01:08:52 +0200 Subject: [PATCH 1319/1976] Add glob quotes to detect.py usage example (#9007) Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- detect.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/detect.py b/detect.py index c699a749a09f..dd60b87ca33a 100644 --- a/detect.py +++ b/detect.py @@ -7,7 +7,7 @@ img.jpg # image vid.mp4 # video path/ # directory - path/*.jpg # glob + 'path/*.jpg' # glob 'https://youtu.be/Zgi9g1ksQHc' # YouTube 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream From 5c854fab5e43df82ebfd51197c2dc58e5212c5a6 Mon Sep 17 00:00:00 2001 From: glennjocher Date: Thu, 18 Aug 2022 02:44:50 +0200 Subject: [PATCH 1320/1976] requires grad after reset params --- classify/train.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/classify/train.py b/classify/train.py index d55dc066d7a3..9fb7c52b545a 100644 --- a/classify/train.py +++ b/classify/train.py @@ -114,13 +114,13 @@ def train(opt, device): LOGGER.warning("WARNING: pass YOLOv5 classifier model with '-cls' suffix, i.e. '--model yolov5s-cls.pt'") model = ClassificationModel(model=model, nc=nc, cutoff=opt.cutoff or 10) # convert to classification model reshape_classifier_output(model, nc) # update class count - for p in model.parameters(): - p.requires_grad = True # for training for m in model.modules(): if not pretrained and hasattr(m, 'reset_parameters'): m.reset_parameters() if isinstance(m, torch.nn.Dropout) and opt.dropout is not None: m.p = opt.dropout # set dropout + for p in model.parameters(): + p.requires_grad = True # for training model = model.to(device) names = trainloader.dataset.classes # class names model.names = names # attach class names From 529aafd737053264cf8676b29c37f5d5300460eb Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 18 Aug 2022 11:50:24 +0200 Subject: [PATCH 1321/1976] Fix TorchScript JSON string key bug (#9015) * Fix TorchScript JSON string key bug Resolves https://github.com/ultralytics/yolov5/issues/9011 Signed-off-by: Glenn Jocher * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Signed-off-by: Glenn Jocher Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- models/common.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/models/common.py b/models/common.py index 30202ca1abd7..4f93887c55e0 100644 --- a/models/common.py +++ b/models/common.py @@ -337,8 +337,10 @@ def __init__(self, weights='yolov5s.pt', device=torch.device('cpu'), dnn=False, extra_files = {'config.txt': ''} # model metadata model = torch.jit.load(w, _extra_files=extra_files) model.half() if fp16 else model.float() - if extra_files['config.txt']: - d = json.loads(extra_files['config.txt']) # extra_files dict + if extra_files['config.txt']: # load metadata dict + d = json.loads(extra_files['config.txt'], + object_hook=lambda d: {int(k) if k.isdigit() else k: v + for k, v in d.items()}) stride, names = int(d['stride']), d['names'] elif dnn: # ONNX OpenCV DNN LOGGER.info(f'Loading {w} for ONNX OpenCV DNN inference...') From 20049be2e7dc6f330e3620dd82761bc3f4d02e36 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 18 Aug 2022 14:06:15 +0200 Subject: [PATCH 1322/1976] EMA FP32 assert classification bug fix (#9016) * Return EMA float on classification val * verbose val fix * EMA check --- classify/val.py | 3 ++- export.py | 2 +- models/experimental.py | 10 +++++++--- train.py | 3 +-- utils/torch_utils.py | 7 +++---- 5 files changed, 14 insertions(+), 11 deletions(-) diff --git a/classify/val.py b/classify/val.py index 9d965d9f1fdc..b76fb5147ecd 100644 --- a/classify/val.py +++ b/classify/val.py @@ -116,7 +116,7 @@ def run( if verbose: # all classes LOGGER.info(f"{'Class':>24}{'Images':>12}{'top1_acc':>12}{'top5_acc':>12}") LOGGER.info(f"{'all':>24}{targets.shape[0]:>12}{top1:>12.3g}{top5:>12.3g}") - for i, c in enumerate(model.names): + for i, c in model.names.items(): aci = acc[targets == i] top1i, top5i = aci.mean(0).tolist() LOGGER.info(f"{c:>24}{aci.shape[0]:>12}{top1i:>12.3g}{top5i:>12.3g}") @@ -127,6 +127,7 @@ def run( LOGGER.info(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms post-process per image at shape {shape}' % t) LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}") + model.float() # for training return top1, top5, loss diff --git a/export.py b/export.py index 595039b24bce..7b398fdc4d93 100644 --- a/export.py +++ b/export.py @@ -599,7 +599,7 @@ def parse_opt(): parser.add_argument('--conf-thres', type=float, default=0.25, help='TF.js NMS: confidence threshold') parser.add_argument('--include', nargs='+', - default=['torchscript', 'onnx'], + default=['torchscript'], help='torchscript, onnx, openvino, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs') opt = parser.parse_args() print_args(vars(opt)) diff --git a/models/experimental.py b/models/experimental.py index cb32d01ba46a..02d35b9ebd11 100644 --- a/models/experimental.py +++ b/models/experimental.py @@ -8,7 +8,6 @@ import torch import torch.nn as nn -from models.common import Conv from utils.downloads import attempt_download @@ -79,11 +78,16 @@ def attempt_load(weights, device=None, inplace=True, fuse=True): for w in weights if isinstance(weights, list) else [weights]: ckpt = torch.load(attempt_download(w), map_location='cpu') # load ckpt = (ckpt.get('ema') or ckpt['model']).to(device).float() # FP32 model + + # Model compatibility updates if not hasattr(ckpt, 'stride'): - ckpt.stride = torch.tensor([32.]) # compatibility update for ResNet etc. + ckpt.stride = torch.tensor([32.]) + if hasattr(ckpt, 'names') and isinstance(ckpt.names, (list, tuple)): + ckpt.names = dict(enumerate(ckpt.names)) # convert to dict + model.append(ckpt.fuse().eval() if fuse and hasattr(ckpt, 'fuse') else ckpt.eval()) # model in eval mode - # Compatibility updates + # Module compatibility updates for m in model.modules(): t = type(m) if t in (nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6, nn.SiLU, Detect, Model): diff --git a/train.py b/train.py index bbb26cdeafeb..10a3bdb56002 100644 --- a/train.py +++ b/train.py @@ -107,8 +107,7 @@ def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictio data_dict = data_dict or check_dataset(data) # check if None train_path, val_path = data_dict['train'], data_dict['val'] nc = 1 if single_cls else int(data_dict['nc']) # number of classes - names = ['item'] if single_cls and len(data_dict['names']) != 1 else data_dict['names'] # class names - assert len(names) == nc, f'{len(names)} names found for nc={nc} dataset in {data}' # check + names = {0: 'item'} if single_cls and len(data_dict['names']) != 1 else data_dict['names'] # class names is_coco = isinstance(val_path, str) and val_path.endswith('coco/val2017.txt') # COCO dataset # Model diff --git a/utils/torch_utils.py b/utils/torch_utils.py index 1cdbe20f8670..ed56064ce02e 100644 --- a/utils/torch_utils.py +++ b/utils/torch_utils.py @@ -408,8 +408,6 @@ class ModelEMA: def __init__(self, model, decay=0.9999, tau=2000, updates=0): # Create EMA self.ema = deepcopy(de_parallel(model)).eval() # FP32 EMA - # if next(model.parameters()).device.type != 'cpu': - # self.ema.half() # FP16 EMA self.updates = updates # number of EMA updates self.decay = lambda x: decay * (1 - math.exp(-x / tau)) # decay exponential ramp (to help early epochs) for p in self.ema.parameters(): @@ -423,9 +421,10 @@ def update(self, model): msd = de_parallel(model).state_dict() # model state_dict for k, v in self.ema.state_dict().items(): - if v.dtype.is_floating_point: + if v.dtype.is_floating_point: # true for FP16 and FP32 v *= d - v += (1 - d) * msd[k].detach() + v += (1 - d) * msd[k] + assert v.dtype == msd[k].dtype == torch.float32, f'EMA {v.dtype} and model {msd[k]} must be updated in FP32' def update_attr(self, model, include=(), exclude=('process_group', 'reducer')): # Update EMA attributes From c0e7a776cd55e8c01b63714f6f7fea3d53f6bf5b Mon Sep 17 00:00:00 2001 From: cher-liang <88578531+cher-liang@users.noreply.github.com> Date: Thu, 18 Aug 2022 20:18:02 +0800 Subject: [PATCH 1323/1976] Faster pre-processing for gray image input (#9009) * faster 1 channel to 3 channels image conversion * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Glenn Jocher --- models/common.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/models/common.py b/models/common.py index 4f93887c55e0..f914c9d60fdb 100644 --- a/models/common.py +++ b/models/common.py @@ -617,7 +617,7 @@ def forward(self, imgs, size=640, augment=False, profile=False): files.append(Path(f).with_suffix('.jpg').name) if im.shape[0] < 5: # image in CHW im = im.transpose((1, 2, 0)) # reverse dataloader .transpose(2, 0, 1) - im = im[..., :3] if im.ndim == 3 else np.tile(im[..., None], 3) # enforce 3ch input + im = im[..., :3] if im.ndim == 3 else cv2.cvtColor(im, cv2.COLOR_GRAY2BGR) # enforce 3ch input s = im.shape[:2] # HWC shape0.append(s) # image shape g = (size / max(s)) # gain From d40cd0d454dcc34312cb5c40f45f64b76665c40c Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 18 Aug 2022 19:55:38 +0200 Subject: [PATCH 1324/1976] Improved `Profile()` inference timing (#9024) * Improved `Profile()` class * Update predict.py * Update val.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update val.py * Update AutoShape Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- classify/predict.py | 37 +++++++------- classify/val.py | 29 +++++------ detect.py | 35 ++++++------- models/common.py | 117 ++++++++++++++++++++++---------------------- utils/general.py | 18 +++++-- val.py | 31 ++++++------ 6 files changed, 133 insertions(+), 134 deletions(-) diff --git a/classify/predict.py b/classify/predict.py index 7af5f60a2b9d..0bf99140b8e3 100644 --- a/classify/predict.py +++ b/classify/predict.py @@ -22,8 +22,8 @@ from models.common import DetectMultiBackend from utils.augmentations import classify_transforms from utils.dataloaders import IMG_FORMATS, VID_FORMATS, LoadImages -from utils.general import LOGGER, check_file, check_requirements, colorstr, increment_path, print_args -from utils.torch_utils import select_device, smart_inference_mode, time_sync +from utils.general import LOGGER, Profile, check_file, check_requirements, colorstr, increment_path, print_args +from utils.torch_utils import select_device, smart_inference_mode @smart_inference_mode() @@ -44,7 +44,7 @@ def run( if is_url and is_file: source = check_file(source) # download - seen, dt = 1, [0.0, 0.0, 0.0] + dt = Profile(), Profile(), Profile() device = select_device(device) # Directories @@ -55,30 +55,27 @@ def run( model = DetectMultiBackend(weights, device=device, dnn=dnn, fp16=half) model.warmup(imgsz=(1, 3, imgsz, imgsz)) # warmup dataset = LoadImages(source, img_size=imgsz, transforms=classify_transforms(imgsz)) - for path, im, im0s, vid_cap, s in dataset: + for seen, (path, im, im0s, vid_cap, s) in enumerate(dataset): # Image - t1 = time_sync() - im = im.unsqueeze(0).to(device) - im = im.half() if model.fp16 else im.float() - t2 = time_sync() - dt[0] += t2 - t1 + with dt[0]: + im = im.unsqueeze(0).to(device) + im = im.half() if model.fp16 else im.float() # Inference - results = model(im) - t3 = time_sync() - dt[1] += t3 - t2 + with dt[1]: + results = model(im) # Post-process - p = F.softmax(results, dim=1) # probabilities - i = p.argsort(1, descending=True)[:, :5].squeeze().tolist() # top 5 indices - dt[2] += time_sync() - t3 - # if save: - # imshow_cls(im, f=save_dir / Path(path).name, verbose=True) - seen += 1 - LOGGER.info(f"{s}{imgsz}x{imgsz} {', '.join(f'{model.names[j]} {p[0, j]:.2f}' for j in i)}") + with dt[2]: + p = F.softmax(results, dim=1) # probabilities + i = p.argsort(1, descending=True)[:, :5].squeeze().tolist() # top 5 indices + # if save: + # imshow_cls(im, f=save_dir / Path(path).name, verbose=True) + LOGGER.info( + f"{s}{imgsz}x{imgsz} {', '.join(f'{model.names[j]} {p[0, j]:.2f}' for j in i)}, {dt[1].dt * 1E3:.1f}ms") # Print results - t = tuple(x / seen * 1E3 for x in dt) # speeds per image + t = tuple(x.t / (seen + 1) * 1E3 for x in dt) # speeds per image shape = (1, 3, imgsz, imgsz) LOGGER.info(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms post-process per image at shape {shape}' % t) LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}") diff --git a/classify/val.py b/classify/val.py index b76fb5147ecd..c91e2cf82c81 100644 --- a/classify/val.py +++ b/classify/val.py @@ -23,8 +23,8 @@ from models.common import DetectMultiBackend from utils.dataloaders import create_classification_dataloader -from utils.general import LOGGER, check_img_size, check_requirements, colorstr, increment_path, print_args -from utils.torch_utils import select_device, smart_inference_mode, time_sync +from utils.general import LOGGER, Profile, check_img_size, check_requirements, colorstr, increment_path, print_args +from utils.torch_utils import select_device, smart_inference_mode @smart_inference_mode() @@ -83,27 +83,24 @@ def run( workers=workers) model.eval() - pred, targets, loss, dt = [], [], 0, [0.0, 0.0, 0.0] + pred, targets, loss, dt = [], [], 0, (Profile(), Profile(), Profile()) n = len(dataloader) # number of batches action = 'validating' if dataloader.dataset.root.stem == 'val' else 'testing' desc = f"{pbar.desc[:-36]}{action:>36}" if pbar else f"{action}" bar = tqdm(dataloader, desc, n, not training, bar_format='{l_bar}{bar:10}{r_bar}{bar:-10b}', position=0) with torch.cuda.amp.autocast(enabled=device.type != 'cpu'): for images, labels in bar: - t1 = time_sync() - images, labels = images.to(device, non_blocking=True), labels.to(device) - t2 = time_sync() - dt[0] += t2 - t1 + with dt[0]: + images, labels = images.to(device, non_blocking=True), labels.to(device) - y = model(images) - t3 = time_sync() - dt[1] += t3 - t2 + with dt[1]: + y = model(images) - pred.append(y.argsort(1, descending=True)[:, :5]) - targets.append(labels) - if criterion: - loss += criterion(y, labels) - dt[2] += time_sync() - t3 + with dt[2]: + pred.append(y.argsort(1, descending=True)[:, :5]) + targets.append(labels) + if criterion: + loss += criterion(y, labels) loss /= n pred, targets = torch.cat(pred), torch.cat(targets) @@ -122,7 +119,7 @@ def run( LOGGER.info(f"{c:>24}{aci.shape[0]:>12}{top1i:>12.3g}{top5i:>12.3g}") # Print results - t = tuple(x / len(dataloader.dataset.samples) * 1E3 for x in dt) # speeds per image + t = tuple(x.t / len(dataloader.dataset.samples) * 1E3 for x in dt) # speeds per image shape = (1, 3, imgsz, imgsz) LOGGER.info(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms post-process per image at shape {shape}' % t) LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}") diff --git a/detect.py b/detect.py index dd60b87ca33a..93ae0baccd13 100644 --- a/detect.py +++ b/detect.py @@ -41,10 +41,10 @@ from models.common import DetectMultiBackend from utils.dataloaders import IMG_FORMATS, VID_FORMATS, LoadImages, LoadStreams -from utils.general import (LOGGER, check_file, check_img_size, check_imshow, check_requirements, colorstr, cv2, +from utils.general import (LOGGER, Profile, check_file, check_img_size, check_imshow, check_requirements, colorstr, cv2, increment_path, non_max_suppression, print_args, scale_coords, strip_optimizer, xyxy2xywh) from utils.plots import Annotator, colors, save_one_box -from utils.torch_utils import select_device, smart_inference_mode, time_sync +from utils.torch_utils import select_device, smart_inference_mode @smart_inference_mode() @@ -107,26 +107,23 @@ def run( # Run inference model.warmup(imgsz=(1 if pt else bs, 3, *imgsz)) # warmup - seen, windows, dt = 0, [], [0.0, 0.0, 0.0] + seen, windows, dt = 0, [], (Profile(), Profile(), Profile()) for path, im, im0s, vid_cap, s in dataset: - t1 = time_sync() - im = torch.from_numpy(im).to(device) - im = im.half() if model.fp16 else im.float() # uint8 to fp16/32 - im /= 255 # 0 - 255 to 0.0 - 1.0 - if len(im.shape) == 3: - im = im[None] # expand for batch dim - t2 = time_sync() - dt[0] += t2 - t1 + with dt[0]: + im = torch.from_numpy(im).to(device) + im = im.half() if model.fp16 else im.float() # uint8 to fp16/32 + im /= 255 # 0 - 255 to 0.0 - 1.0 + if len(im.shape) == 3: + im = im[None] # expand for batch dim # Inference - visualize = increment_path(save_dir / Path(path).stem, mkdir=True) if visualize else False - pred = model(im, augment=augment, visualize=visualize) - t3 = time_sync() - dt[1] += t3 - t2 + with dt[1]: + visualize = increment_path(save_dir / Path(path).stem, mkdir=True) if visualize else False + pred = model(im, augment=augment, visualize=visualize) # NMS - pred = non_max_suppression(pred, conf_thres, iou_thres, classes, agnostic_nms, max_det=max_det) - dt[2] += time_sync() - t3 + with dt[2]: + pred = non_max_suppression(pred, conf_thres, iou_thres, classes, agnostic_nms, max_det=max_det) # Second-stage classifier (optional) # pred = utils.general.apply_classifier(pred, classifier_model, im, im0s) @@ -201,10 +198,10 @@ def run( vid_writer[i].write(im0) # Print time (inference-only) - LOGGER.info(f'{s}Done. ({t3 - t2:.3f}s)') + LOGGER.info(f"{s}{'' if len(det) else '(no detections), '}{dt[1].dt * 1E3:.1f}ms") # Print results - t = tuple(x / seen * 1E3 for x in dt) # speeds per image + t = tuple(x.t / seen * 1E3 for x in dt) # speeds per image LOGGER.info(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {(1, 3, *imgsz)}' % t) if save_txt or save_img: s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else '' diff --git a/models/common.py b/models/common.py index f914c9d60fdb..33aa2ac12465 100644 --- a/models/common.py +++ b/models/common.py @@ -21,10 +21,11 @@ from torch.cuda import amp from utils.dataloaders import exif_transpose, letterbox -from utils.general import (LOGGER, ROOT, check_requirements, check_suffix, check_version, colorstr, increment_path, - make_divisible, non_max_suppression, scale_coords, xywh2xyxy, xyxy2xywh, yaml_load) +from utils.general import (LOGGER, ROOT, Profile, check_requirements, check_suffix, check_version, colorstr, + increment_path, make_divisible, non_max_suppression, scale_coords, xywh2xyxy, xyxy2xywh, + yaml_load) from utils.plots import Annotator, colors, save_one_box -from utils.torch_utils import copy_attr, smart_inference_mode, time_sync +from utils.torch_utils import copy_attr, smart_inference_mode def autopad(k, p=None): # kernel, padding @@ -587,9 +588,9 @@ def _apply(self, fn): return self @smart_inference_mode() - def forward(self, imgs, size=640, augment=False, profile=False): + def forward(self, ims, size=640, augment=False, profile=False): # Inference from various sources. For height=640, width=1280, RGB images example inputs are: - # file: imgs = 'data/images/zidane.jpg' # str or PosixPath + # file: ims = 'data/images/zidane.jpg' # str or PosixPath # URI: = 'https://ultralytics.com/images/zidane.jpg' # OpenCV: = cv2.imread('image.jpg')[:,:,::-1] # HWC BGR to RGB x(640,1280,3) # PIL: = Image.open('image.jpg') or ImageGrab.grab() # HWC x(640,1280,3) @@ -597,65 +598,65 @@ def forward(self, imgs, size=640, augment=False, profile=False): # torch: = torch.zeros(16,3,320,640) # BCHW (scaled to size=640, 0-1 values) # multiple: = [Image.open('image1.jpg'), Image.open('image2.jpg'), ...] # list of images - t = [time_sync()] - p = next(self.model.parameters()) if self.pt else torch.zeros(1, device=self.model.device) # for device, type - autocast = self.amp and (p.device.type != 'cpu') # Automatic Mixed Precision (AMP) inference - if isinstance(imgs, torch.Tensor): # torch - with amp.autocast(autocast): - return self.model(imgs.to(p.device).type_as(p), augment, profile) # inference - - # Pre-process - n, imgs = (len(imgs), list(imgs)) if isinstance(imgs, (list, tuple)) else (1, [imgs]) # number, list of images - shape0, shape1, files = [], [], [] # image and inference shapes, filenames - for i, im in enumerate(imgs): - f = f'image{i}' # filename - if isinstance(im, (str, Path)): # filename or uri - im, f = Image.open(requests.get(im, stream=True).raw if str(im).startswith('http') else im), im - im = np.asarray(exif_transpose(im)) - elif isinstance(im, Image.Image): # PIL Image - im, f = np.asarray(exif_transpose(im)), getattr(im, 'filename', f) or f - files.append(Path(f).with_suffix('.jpg').name) - if im.shape[0] < 5: # image in CHW - im = im.transpose((1, 2, 0)) # reverse dataloader .transpose(2, 0, 1) - im = im[..., :3] if im.ndim == 3 else cv2.cvtColor(im, cv2.COLOR_GRAY2BGR) # enforce 3ch input - s = im.shape[:2] # HWC - shape0.append(s) # image shape - g = (size / max(s)) # gain - shape1.append([y * g for y in s]) - imgs[i] = im if im.data.contiguous else np.ascontiguousarray(im) # update - shape1 = [make_divisible(x, self.stride) if self.pt else size for x in np.array(shape1).max(0)] # inf shape - x = [letterbox(im, shape1, auto=False)[0] for im in imgs] # pad - x = np.ascontiguousarray(np.array(x).transpose((0, 3, 1, 2))) # stack and BHWC to BCHW - x = torch.from_numpy(x).to(p.device).type_as(p) / 255 # uint8 to fp16/32 - t.append(time_sync()) + dt = (Profile(), Profile(), Profile()) + with dt[0]: + p = next(self.model.parameters()) if self.pt else torch.zeros(1, device=self.model.device) # param + autocast = self.amp and (p.device.type != 'cpu') # Automatic Mixed Precision (AMP) inference + if isinstance(ims, torch.Tensor): # torch + with amp.autocast(autocast): + return self.model(ims.to(p.device).type_as(p), augment, profile) # inference + + # Pre-process + n, ims = (len(ims), list(ims)) if isinstance(ims, (list, tuple)) else (1, [ims]) # number, list of images + shape0, shape1, files = [], [], [] # image and inference shapes, filenames + for i, im in enumerate(ims): + f = f'image{i}' # filename + if isinstance(im, (str, Path)): # filename or uri + im, f = Image.open(requests.get(im, stream=True).raw if str(im).startswith('http') else im), im + im = np.asarray(exif_transpose(im)) + elif isinstance(im, Image.Image): # PIL Image + im, f = np.asarray(exif_transpose(im)), getattr(im, 'filename', f) or f + files.append(Path(f).with_suffix('.jpg').name) + if im.shape[0] < 5: # image in CHW + im = im.transpose((1, 2, 0)) # reverse dataloader .transpose(2, 0, 1) + im = im[..., :3] if im.ndim == 3 else cv2.cvtColor(im, cv2.COLOR_GRAY2BGR) # enforce 3ch input + s = im.shape[:2] # HWC + shape0.append(s) # image shape + g = (size / max(s)) # gain + shape1.append([y * g for y in s]) + ims[i] = im if im.data.contiguous else np.ascontiguousarray(im) # update + shape1 = [make_divisible(x, self.stride) if self.pt else size for x in np.array(shape1).max(0)] # inf shape + x = [letterbox(im, shape1, auto=False)[0] for im in ims] # pad + x = np.ascontiguousarray(np.array(x).transpose((0, 3, 1, 2))) # stack and BHWC to BCHW + x = torch.from_numpy(x).to(p.device).type_as(p) / 255 # uint8 to fp16/32 with amp.autocast(autocast): # Inference - y = self.model(x, augment, profile) # forward - t.append(time_sync()) + with dt[1]: + y = self.model(x, augment, profile) # forward # Post-process - y = non_max_suppression(y if self.dmb else y[0], - self.conf, - self.iou, - self.classes, - self.agnostic, - self.multi_label, - max_det=self.max_det) # NMS - for i in range(n): - scale_coords(shape1, y[i][:, :4], shape0[i]) + with dt[2]: + y = non_max_suppression(y if self.dmb else y[0], + self.conf, + self.iou, + self.classes, + self.agnostic, + self.multi_label, + max_det=self.max_det) # NMS + for i in range(n): + scale_coords(shape1, y[i][:, :4], shape0[i]) - t.append(time_sync()) - return Detections(imgs, y, files, t, self.names, x.shape) + return Detections(ims, y, files, dt, self.names, x.shape) class Detections: # YOLOv5 detections class for inference results - def __init__(self, imgs, pred, files, times=(0, 0, 0, 0), names=None, shape=None): + def __init__(self, ims, pred, files, times=(0, 0, 0), names=None, shape=None): super().__init__() d = pred[0].device # device - gn = [torch.tensor([*(im.shape[i] for i in [1, 0, 1, 0]), 1, 1], device=d) for im in imgs] # normalizations - self.imgs = imgs # list of images as numpy arrays + gn = [torch.tensor([*(im.shape[i] for i in [1, 0, 1, 0]), 1, 1], device=d) for im in ims] # normalizations + self.ims = ims # list of images as numpy arrays self.pred = pred # list of tensors pred[0] = (xyxy, conf, cls) self.names = names # class names self.files = files # image filenames @@ -665,12 +666,12 @@ def __init__(self, imgs, pred, files, times=(0, 0, 0, 0), names=None, shape=None self.xyxyn = [x / g for x, g in zip(self.xyxy, gn)] # xyxy normalized self.xywhn = [x / g for x, g in zip(self.xywh, gn)] # xywh normalized self.n = len(self.pred) # number of images (batch size) - self.t = tuple((times[i + 1] - times[i]) * 1000 / self.n for i in range(3)) # timestamps (ms) + self.t = tuple(x.t / self.n * 1E3 for x in times) # timestamps (ms) self.s = shape # inference BCHW shape def display(self, pprint=False, show=False, save=False, crop=False, render=False, labels=True, save_dir=Path('')): crops = [] - for i, (im, pred) in enumerate(zip(self.imgs, self.pred)): + for i, (im, pred) in enumerate(zip(self.ims, self.pred)): s = f'image {i + 1}/{len(self.pred)}: {im.shape[0]}x{im.shape[1]} ' # string if pred.shape[0]: for c in pred[:, -1].unique(): @@ -705,7 +706,7 @@ def display(self, pprint=False, show=False, save=False, crop=False, render=False if i == self.n - 1: LOGGER.info(f"Saved {self.n} image{'s' * (self.n > 1)} to {colorstr('bold', save_dir)}") if render: - self.imgs[i] = np.asarray(im) + self.ims[i] = np.asarray(im) if crop: if save: LOGGER.info(f'Saved results to {save_dir}\n') @@ -728,7 +729,7 @@ def crop(self, save=True, save_dir='runs/detect/exp'): def render(self, labels=True): self.display(render=True, labels=labels) # render results - return self.imgs + return self.ims def pandas(self): # return detections as pandas DataFrames, i.e. print(results.pandas().xyxy[0]) @@ -743,9 +744,9 @@ def pandas(self): def tolist(self): # return a list of Detections objects, i.e. 'for result in results.tolist():' r = range(self.n) # iterable - x = [Detections([self.imgs[i]], [self.pred[i]], [self.files[i]], self.times, self.names, self.s) for i in r] + x = [Detections([self.ims[i]], [self.pred[i]], [self.files[i]], self.times, self.names, self.s) for i in r] # for d in x: - # for k in ['imgs', 'pred', 'xyxy', 'xyxyn', 'xywh', 'xywhn']: + # for k in ['ims', 'pred', 'xyxy', 'xyxyn', 'xywh', 'xywhn']: # setattr(d, k, getattr(d, k)[0]) # pop out of list return x diff --git a/utils/general.py b/utils/general.py index 76872b696d79..42d000918c13 100755 --- a/utils/general.py +++ b/utils/general.py @@ -141,16 +141,26 @@ def user_config_dir(dir='Ultralytics', env_var='YOLOV5_CONFIG_DIR'): class Profile(contextlib.ContextDecorator): - # Usage: @Profile() decorator or 'with Profile():' context manager + # YOLOv5 Profile class. Usage: @Profile() decorator or 'with Profile():' context manager + def __init__(self, t=0.0): + self.t = t + self.cuda = torch.cuda.is_available() + def __enter__(self): - self.start = time.time() + self.start = self.time() def __exit__(self, type, value, traceback): - print(f'Profile results: {time.time() - self.start:.5f}s') + self.dt = self.time() - self.start # delta-time + self.t += self.dt # accumulate dt + + def time(self): + if self.cuda: + torch.cuda.synchronize() + return time.time() class Timeout(contextlib.ContextDecorator): - # Usage: @Timeout(seconds) decorator or 'with Timeout(seconds):' context manager + # YOLOv5 Timeout class. Usage: @Timeout(seconds) decorator or 'with Timeout(seconds):' context manager def __init__(self, seconds, *, timeout_msg='', suppress_timeout_errors=True): self.seconds = int(seconds) self.timeout_message = timeout_msg diff --git a/val.py b/val.py index ce743b506aff..876fc5bf50bb 100644 --- a/val.py +++ b/val.py @@ -37,7 +37,7 @@ from models.common import DetectMultiBackend from utils.callbacks import Callbacks from utils.dataloaders import create_dataloader -from utils.general import (LOGGER, check_dataset, check_img_size, check_requirements, check_yaml, +from utils.general import (LOGGER, Profile, check_dataset, check_img_size, check_requirements, check_yaml, coco80_to_coco91_class, colorstr, increment_path, non_max_suppression, print_args, scale_coords, xywh2xyxy, xyxy2xywh) from utils.metrics import ConfusionMatrix, ap_per_class, box_iou @@ -187,26 +187,24 @@ def run( names = dict(enumerate(names)) class_map = coco80_to_coco91_class() if is_coco else list(range(1000)) s = ('%20s' + '%11s' * 6) % ('Class', 'Images', 'Labels', 'P', 'R', 'mAP@.5', 'mAP@.5:.95') - dt, p, r, f1, mp, mr, map50, map = [0.0, 0.0, 0.0], 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 + dt, p, r, f1, mp, mr, map50, map = (Profile(), Profile(), Profile()), 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 loss = torch.zeros(3, device=device) jdict, stats, ap, ap_class = [], [], [], [] callbacks.run('on_val_start') pbar = tqdm(dataloader, desc=s, bar_format='{l_bar}{bar:10}{r_bar}{bar:-10b}') # progress bar for batch_i, (im, targets, paths, shapes) in enumerate(pbar): callbacks.run('on_val_batch_start') - t1 = time_sync() - if cuda: - im = im.to(device, non_blocking=True) - targets = targets.to(device) - im = im.half() if half else im.float() # uint8 to fp16/32 - im /= 255 # 0 - 255 to 0.0 - 1.0 - nb, _, height, width = im.shape # batch size, channels, height, width - t2 = time_sync() - dt[0] += t2 - t1 + with dt[0]: + if cuda: + im = im.to(device, non_blocking=True) + targets = targets.to(device) + im = im.half() if half else im.float() # uint8 to fp16/32 + im /= 255 # 0 - 255 to 0.0 - 1.0 + nb, _, height, width = im.shape # batch size, channels, height, width # Inference - out, train_out = model(im) if training else model(im, augment=augment, val=True) # inference, loss outputs - dt[1] += time_sync() - t2 + with dt[1]: + out, train_out = model(im) if training else model(im, augment=augment, val=True) # inference, loss outputs # Loss if compute_loss: @@ -215,9 +213,8 @@ def run( # NMS targets[:, 2:] *= torch.tensor((width, height, width, height), device=device) # to pixels lb = [targets[targets[:, 0] == i, 1:] for i in range(nb)] if save_hybrid else [] # for autolabelling - t3 = time_sync() - out = non_max_suppression(out, conf_thres, iou_thres, labels=lb, multi_label=True, agnostic=single_cls) - dt[2] += time_sync() - t3 + with dt[2]: + out = non_max_suppression(out, conf_thres, iou_thres, labels=lb, multi_label=True, agnostic=single_cls) # Metrics for si, pred in enumerate(out): @@ -284,7 +281,7 @@ def run( LOGGER.info(pf % (names[c], seen, nt[c], p[i], r[i], ap50[i], ap[i])) # Print speeds - t = tuple(x / seen * 1E3 for x in dt) # speeds per image + t = tuple(x.t / seen * 1E3 for x in dt) # speeds per image if not training: shape = (batch_size, 3, imgsz, imgsz) LOGGER.info(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {shape}' % t) From 61adf017f231f470afca2636f1f13e4cce13914b Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 18 Aug 2022 20:12:33 +0200 Subject: [PATCH 1325/1976] `torch.empty()` for speed improvements (#9025) `torch.empty()` for speed improvement Signed-off-by: Glenn Jocher --- models/common.py | 4 ++-- models/yolo.py | 6 +++--- utils/autobatch.py | 2 +- utils/loggers/__init__.py | 2 +- utils/torch_utils.py | 2 +- 5 files changed, 8 insertions(+), 8 deletions(-) diff --git a/models/common.py b/models/common.py index 33aa2ac12465..44192e622bb5 100644 --- a/models/common.py +++ b/models/common.py @@ -531,7 +531,7 @@ def warmup(self, imgsz=(1, 3, 640, 640)): # Warmup model by running inference once warmup_types = self.pt, self.jit, self.onnx, self.engine, self.saved_model, self.pb if any(warmup_types) and self.device.type != 'cpu': - im = torch.zeros(*imgsz, dtype=torch.half if self.fp16 else torch.float, device=self.device) # input + im = torch.empty(*imgsz, dtype=torch.half if self.fp16 else torch.float, device=self.device) # input for _ in range(2 if self.jit else 1): # self.forward(im) # warmup @@ -600,7 +600,7 @@ def forward(self, ims, size=640, augment=False, profile=False): dt = (Profile(), Profile(), Profile()) with dt[0]: - p = next(self.model.parameters()) if self.pt else torch.zeros(1, device=self.model.device) # param + p = next(self.model.parameters()) if self.pt else torch.empty(1, device=self.model.device) # param autocast = self.amp and (p.device.type != 'cpu') # Automatic Mixed Precision (AMP) inference if isinstance(ims, torch.Tensor): # torch with amp.autocast(autocast): diff --git a/models/yolo.py b/models/yolo.py index df4209726e0d..32a47e9591da 100644 --- a/models/yolo.py +++ b/models/yolo.py @@ -46,8 +46,8 @@ def __init__(self, nc=80, anchors=(), ch=(), inplace=True): # detection layer self.no = nc + 5 # number of outputs per anchor self.nl = len(anchors) # number of detection layers self.na = len(anchors[0]) // 2 # number of anchors - self.grid = [torch.zeros(1)] * self.nl # init grid - self.anchor_grid = [torch.zeros(1)] * self.nl # init anchor grid + self.grid = [torch.empty(1)] * self.nl # init grid + self.anchor_grid = [torch.empty(1)] * self.nl # init anchor grid self.register_buffer('anchors', torch.tensor(anchors).float().view(self.nl, -1, 2)) # shape(nl,na,2) self.m = nn.ModuleList(nn.Conv2d(x, self.no * self.na, 1) for x in ch) # output conv self.inplace = inplace # use inplace ops (e.g. slice assignment) @@ -175,7 +175,7 @@ def __init__(self, cfg='yolov5s.yaml', ch=3, nc=None, anchors=None): # model, i if isinstance(m, Detect): s = 256 # 2x min stride m.inplace = self.inplace - m.stride = torch.tensor([s / x.shape[-2] for x in self.forward(torch.zeros(1, ch, s, s))]) # forward + m.stride = torch.tensor([s / x.shape[-2] for x in self.forward(torch.empty(1, ch, s, s))]) # forward check_anchor_order(m) # must be in pixel-space (not grid-space) m.anchors /= m.stride.view(-1, 1, 1) self.stride = m.stride diff --git a/utils/autobatch.py b/utils/autobatch.py index c231d24c0706..07cddc99f400 100644 --- a/utils/autobatch.py +++ b/utils/autobatch.py @@ -47,7 +47,7 @@ def autobatch(model, imgsz=640, fraction=0.9, batch_size=16): # Profile batch sizes batch_sizes = [1, 2, 4, 8, 16] try: - img = [torch.zeros(b, 3, imgsz, imgsz) for b in batch_sizes] + img = [torch.empty(b, 3, imgsz, imgsz) for b in batch_sizes] results = profile(img, model, n=3, device=device) except Exception as e: LOGGER.warning(f'{prefix}{e}') diff --git a/utils/loggers/__init__.py b/utils/loggers/__init__.py index 8ec846f8cfac..34704b625294 100644 --- a/utils/loggers/__init__.py +++ b/utils/loggers/__init__.py @@ -300,7 +300,7 @@ def log_tensorboard_graph(tb, model, imgsz=(640, 640)): try: p = next(model.parameters()) # for device, type imgsz = (imgsz, imgsz) if isinstance(imgsz, int) else imgsz # expand - im = torch.zeros((1, 3, *imgsz)).to(p.device).type_as(p) # input image + im = torch.empty((1, 3, *imgsz)).to(p.device).type_as(p) # input image with warnings.catch_warnings(): warnings.simplefilter('ignore') # suppress jit trace warning tb.add_graph(torch.jit.trace(de_parallel(model), im, strict=False), []) diff --git a/utils/torch_utils.py b/utils/torch_utils.py index ed56064ce02e..4de2520b26a2 100644 --- a/utils/torch_utils.py +++ b/utils/torch_utils.py @@ -282,7 +282,7 @@ def model_info(model, verbose=False, imgsz=640): try: # FLOPs p = next(model.parameters()) stride = max(int(model.stride.max()), 32) if hasattr(model, 'stride') else 32 # max stride - im = torch.zeros((1, p.shape[1], stride, stride), device=p.device) # input image in BCHW format + im = torch.empty((1, p.shape[1], stride, stride), device=p.device) # input image in BCHW format flops = thop.profile(deepcopy(model), inputs=(im,), verbose=False)[0] / 1E9 * 2 # stride GFLOPs imgsz = imgsz if isinstance(imgsz, list) else [imgsz, imgsz] # expand if int/float fs = f', {flops * imgsz[0] / stride * imgsz[1] / stride:.1f} GFLOPs' # 640x640 GFLOPs From de6e6c0110adbb41f829c1288d5cdab7105892ae Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 18 Aug 2022 20:23:14 +0200 Subject: [PATCH 1326/1976] Created using Colaboratory --- tutorial.ipynb | 136 ++++++++++++++++++++++++------------------------- 1 file changed, 68 insertions(+), 68 deletions(-) diff --git a/tutorial.ipynb b/tutorial.ipynb index 1438924e4112..97e572798427 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -17,7 +17,7 @@ "accelerator": "GPU", "widgets": { "application/vnd.jupyter.widget-state+json": { - "57c562894aed45cd9a107d0455e3e3f4": { + "6d6b90ead2db49b3bdf624b6ba9b44e9": { "model_module": "@jupyter-widgets/controls", "model_name": "HBoxModel", "model_module_version": "1.5.0", @@ -32,14 +32,14 @@ "_view_name": "HBoxView", "box_style": "", "children": [ - "IPY_MODEL_040d53c6cc924350bcb656cd21a7c713", - "IPY_MODEL_e029890942a74c098408ce5a9a566d51", - "IPY_MODEL_8fb991c03e434566a4297b6ab9446f89" + "IPY_MODEL_cb77443edb9e43328a56aaa4413a0df3", + "IPY_MODEL_954c8b8699e143bf92be6bfc02fc52f6", + "IPY_MODEL_a64775946e13477f83d8bba6086385b9" ], - "layout": "IPY_MODEL_a9a376923a7742d89fb335db709c7a7e" + "layout": "IPY_MODEL_1413611b7f4f4ef99e4f541f5ca35ed6" } }, - "040d53c6cc924350bcb656cd21a7c713": { + "cb77443edb9e43328a56aaa4413a0df3": { "model_module": "@jupyter-widgets/controls", "model_name": "HTMLModel", "model_module_version": "1.5.0", @@ -54,13 +54,13 @@ "_view_name": "HTMLView", "description": "", "description_tooltip": null, - "layout": "IPY_MODEL_8b4276ac834c4735bf60ee9b761b9962", + "layout": "IPY_MODEL_00737f5558eb4fbd968172acb978e54a", "placeholder": "​", - "style": "IPY_MODEL_52cc8da75b724198856617247541cb1e", + "style": "IPY_MODEL_f03e5ddfd1c04bedaf68ab02c3f6f0ea", "value": "100%" } }, - "e029890942a74c098408ce5a9a566d51": { + "954c8b8699e143bf92be6bfc02fc52f6": { "model_module": "@jupyter-widgets/controls", "model_name": "FloatProgressModel", "model_module_version": "1.5.0", @@ -76,15 +76,15 @@ "bar_style": "success", "description": "", "description_tooltip": null, - "layout": "IPY_MODEL_b6652f46480243c4adf60e6440043d6f", + "layout": "IPY_MODEL_6926db7e0035455f99e1dd4508c4b19c", "max": 818322941, "min": 0, "orientation": "horizontal", - "style": "IPY_MODEL_e502754177ff4ea8abf82a6e9ac77a4a", + "style": "IPY_MODEL_a6a52c9f828b458e97ddf7a11ae9275f", "value": 818322941 } }, - "8fb991c03e434566a4297b6ab9446f89": { + "a64775946e13477f83d8bba6086385b9": { "model_module": "@jupyter-widgets/controls", "model_name": "HTMLModel", "model_module_version": "1.5.0", @@ -99,13 +99,13 @@ "_view_name": "HTMLView", "description": "", "description_tooltip": null, - "layout": "IPY_MODEL_447398becdb04836b5ffb5915318db07", + "layout": "IPY_MODEL_c4c7dc45a1c24dc4b2c709e21271a37e", "placeholder": "​", - "style": "IPY_MODEL_2fddcb27ad4a4caa81ff51111f8d0ed6", - "value": " 780M/780M [01:17<00:00, 12.3MB/s]" + "style": "IPY_MODEL_09c43ffe2c7e4bdc9489e83f9d82ab73", + "value": " 780M/780M [01:12<00:00, 23.8MB/s]" } }, - "a9a376923a7742d89fb335db709c7a7e": { + "1413611b7f4f4ef99e4f541f5ca35ed6": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", @@ -157,7 +157,7 @@ "width": null } }, - "8b4276ac834c4735bf60ee9b761b9962": { + "00737f5558eb4fbd968172acb978e54a": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", @@ -209,7 +209,7 @@ "width": null } }, - "52cc8da75b724198856617247541cb1e": { + "f03e5ddfd1c04bedaf68ab02c3f6f0ea": { "model_module": "@jupyter-widgets/controls", "model_name": "DescriptionStyleModel", "model_module_version": "1.5.0", @@ -224,7 +224,7 @@ "description_width": "" } }, - "b6652f46480243c4adf60e6440043d6f": { + "6926db7e0035455f99e1dd4508c4b19c": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", @@ -276,7 +276,7 @@ "width": null } }, - "e502754177ff4ea8abf82a6e9ac77a4a": { + "a6a52c9f828b458e97ddf7a11ae9275f": { "model_module": "@jupyter-widgets/controls", "model_name": "ProgressStyleModel", "model_module_version": "1.5.0", @@ -292,7 +292,7 @@ "description_width": "" } }, - "447398becdb04836b5ffb5915318db07": { + "c4c7dc45a1c24dc4b2c709e21271a37e": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", @@ -344,7 +344,7 @@ "width": null } }, - "2fddcb27ad4a4caa81ff51111f8d0ed6": { + "09c43ffe2c7e4bdc9489e83f9d82ab73": { "model_module": "@jupyter-widgets/controls", "model_name": "DescriptionStyleModel", "model_module_version": "1.5.0", @@ -404,7 +404,7 @@ "colab": { "base_uri": "https://localhost:8080/" }, - "outputId": "e0f693e4-413b-4cc8-ae7e-91537da370b0" + "outputId": "508de90c-846e-495d-c7d6-50681af62a98" }, "source": [ "!git clone https://github.com/ultralytics/yolov5 # clone\n", @@ -421,7 +421,7 @@ "output_type": "stream", "name": "stderr", "text": [ - "YOLOv5 🚀 v6.2-2-g7c9486e Python-3.7.13 torch-1.12.1+cu113 CUDA:0 (Tesla V100-SXM2-16GB, 16160MiB)\n" + "YOLOv5 🚀 v6.2-15-g61adf01 Python-3.7.13 torch-1.12.1+cu113 CUDA:0 (Tesla V100-SXM2-16GB, 16160MiB)\n" ] }, { @@ -461,7 +461,7 @@ "colab": { "base_uri": "https://localhost:8080/" }, - "outputId": "941d625b-01a1-4f1b-dfd2-d9ef1c945715" + "outputId": "93881540-331e-4890-cd38-4c2776933238" }, "source": [ "!python detect.py --weights yolov5s.pt --img 640 --conf 0.25 --source data/images\n", @@ -474,16 +474,16 @@ "name": "stdout", "text": [ "\u001b[34m\u001b[1mdetect: \u001b[0mweights=['yolov5s.pt'], source=data/images, data=data/coco128.yaml, imgsz=[640, 640], conf_thres=0.25, iou_thres=0.45, max_det=1000, device=, view_img=False, save_txt=False, save_conf=False, save_crop=False, nosave=False, classes=None, agnostic_nms=False, augment=False, visualize=False, update=False, project=runs/detect, name=exp, exist_ok=False, line_thickness=3, hide_labels=False, hide_conf=False, half=False, dnn=False\n", - "YOLOv5 🚀 v6.2-2-g7c9486e Python-3.7.13 torch-1.12.1+cu113 CUDA:0 (Tesla V100-SXM2-16GB, 16160MiB)\n", + "YOLOv5 🚀 v6.2-15-g61adf01 Python-3.7.13 torch-1.12.1+cu113 CUDA:0 (Tesla V100-SXM2-16GB, 16160MiB)\n", "\n", "Downloading https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5s.pt to yolov5s.pt...\n", - "100% 14.1M/14.1M [00:00<00:00, 50.5MB/s]\n", + "100% 14.1M/14.1M [00:00<00:00, 39.3MB/s]\n", "\n", "Fusing layers... \n", "YOLOv5s summary: 213 layers, 7225885 parameters, 0 gradients\n", - "image 1/2 /content/yolov5/data/images/bus.jpg: 640x480 4 persons, 1 bus, Done. (0.014s)\n", - "image 2/2 /content/yolov5/data/images/zidane.jpg: 384x640 2 persons, 2 ties, Done. (0.020s)\n", - "Speed: 0.6ms pre-process, 17.0ms inference, 20.2ms NMS per image at shape (1, 3, 640, 640)\n", + "image 1/2 /content/yolov5/data/images/bus.jpg: 640x480 4 persons, 1 bus, 14.9ms\n", + "image 2/2 /content/yolov5/data/images/zidane.jpg: 384x640 2 persons, 2 ties, 22.0ms\n", + "Speed: 0.6ms pre-process, 18.4ms inference, 24.1ms NMS per image at shape (1, 3, 640, 640)\n", "Results saved to \u001b[1mruns/detect/exp\u001b[0m\n" ] } @@ -527,20 +527,20 @@ "base_uri": "https://localhost:8080/", "height": 49, "referenced_widgets": [ - "57c562894aed45cd9a107d0455e3e3f4", - "040d53c6cc924350bcb656cd21a7c713", - "e029890942a74c098408ce5a9a566d51", - "8fb991c03e434566a4297b6ab9446f89", - "a9a376923a7742d89fb335db709c7a7e", - "8b4276ac834c4735bf60ee9b761b9962", - "52cc8da75b724198856617247541cb1e", - "b6652f46480243c4adf60e6440043d6f", - "e502754177ff4ea8abf82a6e9ac77a4a", - "447398becdb04836b5ffb5915318db07", - "2fddcb27ad4a4caa81ff51111f8d0ed6" + "6d6b90ead2db49b3bdf624b6ba9b44e9", + "cb77443edb9e43328a56aaa4413a0df3", + "954c8b8699e143bf92be6bfc02fc52f6", + "a64775946e13477f83d8bba6086385b9", + "1413611b7f4f4ef99e4f541f5ca35ed6", + "00737f5558eb4fbd968172acb978e54a", + "f03e5ddfd1c04bedaf68ab02c3f6f0ea", + "6926db7e0035455f99e1dd4508c4b19c", + "a6a52c9f828b458e97ddf7a11ae9275f", + "c4c7dc45a1c24dc4b2c709e21271a37e", + "09c43ffe2c7e4bdc9489e83f9d82ab73" ] }, - "outputId": "d593b41a-55e7-48a5-e285-5df449edc8c0" + "outputId": "ed2ca46e-a1a9-4a16-c449-859278d8aa18" }, "source": [ "# Download COCO val\n", @@ -558,7 +558,7 @@ "application/vnd.jupyter.widget-view+json": { "version_major": 2, "version_minor": 0, - "model_id": "57c562894aed45cd9a107d0455e3e3f4" + "model_id": "6d6b90ead2db49b3bdf624b6ba9b44e9" } }, "metadata": {} @@ -572,7 +572,7 @@ "colab": { "base_uri": "https://localhost:8080/" }, - "outputId": "701132a6-9ca8-4e1f-c89f-5d38893a6fc4" + "outputId": "19a590ef-363e-424c-d9ce-78bbe0593cd5" }, "source": [ "# Run YOLOv5x on COCO val\n", @@ -585,35 +585,35 @@ "name": "stdout", "text": [ "\u001b[34m\u001b[1mval: \u001b[0mdata=/content/yolov5/data/coco.yaml, weights=['yolov5x.pt'], batch_size=32, imgsz=640, conf_thres=0.001, iou_thres=0.65, task=val, device=, workers=8, single_cls=False, augment=False, verbose=False, save_txt=False, save_hybrid=False, save_conf=False, save_json=True, project=runs/val, name=exp, exist_ok=False, half=True, dnn=False\n", - "YOLOv5 🚀 v6.2-2-g7c9486e Python-3.7.13 torch-1.12.1+cu113 CUDA:0 (Tesla V100-SXM2-16GB, 16160MiB)\n", + "YOLOv5 🚀 v6.2-15-g61adf01 Python-3.7.13 torch-1.12.1+cu113 CUDA:0 (Tesla V100-SXM2-16GB, 16160MiB)\n", "\n", "Downloading https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5x.pt to yolov5x.pt...\n", - "100% 166M/166M [00:11<00:00, 15.1MB/s]\n", + "100% 166M/166M [00:06<00:00, 28.1MB/s]\n", "\n", "Fusing layers... \n", "YOLOv5x summary: 444 layers, 86705005 parameters, 0 gradients\n", "Downloading https://ultralytics.com/assets/Arial.ttf to /root/.config/Ultralytics/Arial.ttf...\n", - "100% 755k/755k [00:00<00:00, 48.6MB/s]\n", - "\u001b[34m\u001b[1mval: \u001b[0mScanning '/content/datasets/coco/val2017' images and labels...4952 found, 48 missing, 0 empty, 0 corrupt: 100% 5000/5000 [00:00<00:00, 10889.87it/s]\n", + "100% 755k/755k [00:00<00:00, 47.3MB/s]\n", + "\u001b[34m\u001b[1mval: \u001b[0mScanning '/content/datasets/coco/val2017' images and labels...4952 found, 48 missing, 0 empty, 0 corrupt: 100% 5000/5000 [00:00<00:00, 10756.32it/s]\n", "\u001b[34m\u001b[1mval: \u001b[0mNew cache created: /content/datasets/coco/val2017.cache\n", - " Class Images Labels P R mAP@.5 mAP@.5:.95: 100% 157/157 [01:05<00:00, 2.38it/s]\n", + " Class Images Labels P R mAP@.5 mAP@.5:.95: 100% 157/157 [01:07<00:00, 2.33it/s]\n", " all 5000 36335 0.743 0.625 0.683 0.504\n", - "Speed: 0.1ms pre-process, 4.7ms inference, 1.0ms NMS per image at shape (32, 3, 640, 640)\n", + "Speed: 0.1ms pre-process, 4.6ms inference, 1.2ms NMS per image at shape (32, 3, 640, 640)\n", "\n", "Evaluating pycocotools mAP... saving runs/val/exp/yolov5x_predictions.json...\n", "loading annotations into memory...\n", - "Done (t=0.39s)\n", + "Done (t=0.41s)\n", "creating index...\n", "index created!\n", "Loading and preparing results...\n", - "DONE (t=5.53s)\n", + "DONE (t=5.64s)\n", "creating index...\n", "index created!\n", "Running per image evaluation...\n", "Evaluate annotation type *bbox*\n", - "DONE (t=73.01s).\n", + "DONE (t=76.80s).\n", "Accumulating evaluation results...\n", - "DONE (t=15.27s).\n", + "DONE (t=14.61s).\n", " Average Precision (AP) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.506\n", " Average Precision (AP) @[ IoU=0.50 | area= all | maxDets=100 ] = 0.688\n", " Average Precision (AP) @[ IoU=0.75 | area= all | maxDets=100 ] = 0.549\n", @@ -745,7 +745,7 @@ "colab": { "base_uri": "https://localhost:8080/" }, - "outputId": "50a9318f-d438-41d5-db95-928f1842c057" + "outputId": "47759d5e-34f0-4a6a-c714-ff533391cfff" }, "source": [ "# Train YOLOv5s on COCO128 for 3 epochs\n", @@ -759,7 +759,7 @@ "text": [ "\u001b[34m\u001b[1mtrain: \u001b[0mweights=yolov5s.pt, cfg=, data=coco128.yaml, hyp=data/hyps/hyp.scratch-low.yaml, epochs=3, batch_size=16, imgsz=640, rect=False, resume=False, nosave=False, noval=False, noautoanchor=False, noplots=False, evolve=None, bucket=, cache=ram, image_weights=False, device=, multi_scale=False, single_cls=False, optimizer=SGD, sync_bn=False, workers=8, project=runs/train, name=exp, exist_ok=False, quad=False, cos_lr=False, label_smoothing=0.0, patience=100, freeze=[0], save_period=-1, seed=0, local_rank=-1, entity=None, upload_dataset=False, bbox_interval=-1, artifact_alias=latest\n", "\u001b[34m\u001b[1mgithub: \u001b[0mup to date with https://github.com/ultralytics/yolov5 ✅\n", - "YOLOv5 🚀 v6.2-2-g7c9486e Python-3.7.13 torch-1.12.1+cu113 CUDA:0 (Tesla V100-SXM2-16GB, 16160MiB)\n", + "YOLOv5 🚀 v6.2-15-g61adf01 Python-3.7.13 torch-1.12.1+cu113 CUDA:0 (Tesla V100-SXM2-16GB, 16160MiB)\n", "\n", "\u001b[34m\u001b[1mhyperparameters: \u001b[0mlr0=0.01, lrf=0.01, momentum=0.937, weight_decay=0.0005, warmup_epochs=3.0, warmup_momentum=0.8, warmup_bias_lr=0.1, box=0.05, cls=0.5, cls_pw=1.0, obj=1.0, obj_pw=1.0, iou_t=0.2, anchor_t=4.0, fl_gamma=0.0, hsv_h=0.015, hsv_s=0.7, hsv_v=0.4, degrees=0.0, translate=0.1, scale=0.5, shear=0.0, perspective=0.0, flipud=0.0, fliplr=0.5, mosaic=1.0, mixup=0.0, copy_paste=0.0\n", "\u001b[34m\u001b[1mWeights & Biases: \u001b[0mrun 'pip install wandb' to automatically track and visualize YOLOv5 🚀 runs in Weights & Biases\n", @@ -768,8 +768,8 @@ "\n", "Dataset not found ⚠️, missing paths ['/content/datasets/coco128/images/train2017']\n", "Downloading https://ultralytics.com/assets/coco128.zip to coco128.zip...\n", - "100% 6.66M/6.66M [00:00<00:00, 12.4MB/s]\n", - "Dataset download success ✅ (1.3s), saved to \u001b[1m/content/datasets\u001b[0m\n", + "100% 6.66M/6.66M [00:00<00:00, 75.3MB/s]\n", + "Dataset download success ✅ (0.7s), saved to \u001b[1m/content/datasets\u001b[0m\n", "\n", " from n params module arguments \n", " 0 -1 1 3520 models.common.Conv [3, 32, 6, 2, 2] \n", @@ -803,11 +803,11 @@ "\u001b[34m\u001b[1mAMP: \u001b[0mchecks passed ✅\n", "\u001b[34m\u001b[1moptimizer:\u001b[0m SGD(lr=0.01) with parameter groups 57 weight(decay=0.0), 60 weight(decay=0.0005), 60 bias\n", "\u001b[34m\u001b[1malbumentations: \u001b[0mBlur(p=0.01, blur_limit=(3, 7)), MedianBlur(p=0.01, blur_limit=(3, 7)), ToGray(p=0.01), CLAHE(p=0.01, clip_limit=(1, 4.0), tile_grid_size=(8, 8))\n", - "\u001b[34m\u001b[1mtrain: \u001b[0mScanning '/content/datasets/coco128/labels/train2017' images and labels...128 found, 0 missing, 2 empty, 0 corrupt: 100% 128/128 [00:00<00:00, 8516.89it/s]\n", + "\u001b[34m\u001b[1mtrain: \u001b[0mScanning '/content/datasets/coco128/labels/train2017' images and labels...128 found, 0 missing, 2 empty, 0 corrupt: 100% 128/128 [00:00<00:00, 7246.20it/s]\n", "\u001b[34m\u001b[1mtrain: \u001b[0mNew cache created: /content/datasets/coco128/labels/train2017.cache\n", - "\u001b[34m\u001b[1mtrain: \u001b[0mCaching images (0.1GB ram): 100% 128/128 [00:00<00:00, 1043.44it/s]\n", + "\u001b[34m\u001b[1mtrain: \u001b[0mCaching images (0.1GB ram): 100% 128/128 [00:00<00:00, 986.21it/s]\n", "\u001b[34m\u001b[1mval: \u001b[0mScanning '/content/datasets/coco128/labels/train2017.cache' images and labels... 128 found, 0 missing, 2 empty, 0 corrupt: 100% 128/128 [00:00 Date: Thu, 18 Aug 2022 20:26:18 +0200 Subject: [PATCH 1327/1976] Remove unused `time_sync` import (#9026) Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- val.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/val.py b/val.py index 876fc5bf50bb..7b4fab4c63be 100644 --- a/val.py +++ b/val.py @@ -42,7 +42,7 @@ scale_coords, xywh2xyxy, xyxy2xywh) from utils.metrics import ConfusionMatrix, ap_per_class, box_iou from utils.plots import output_to_target, plot_images, plot_val_study -from utils.torch_utils import select_device, smart_inference_mode, time_sync +from utils.torch_utils import select_device, smart_inference_mode def save_one_txt(predn, save_conf, shape, file): From eb359c3a226f55c9b51efcfeae2e31c820e6e08a Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 18 Aug 2022 21:45:11 +0200 Subject: [PATCH 1328/1976] Add PyTorch Hub classification CI checks (#9027) * Add PyTorch Hub classification CI checks Add PyTorch Hub loading of official and custom trained classification models to CI checks. May help resolve https://github.com/ultralytics/yolov5/issues/8790#issuecomment-1219840718 Signed-off-by: Glenn Jocher * Update hubconf.py Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- .github/workflows/ci-testing.yml | 5 +++++ hubconf.py | 6 +++--- 2 files changed, 8 insertions(+), 3 deletions(-) diff --git a/.github/workflows/ci-testing.yml b/.github/workflows/ci-testing.yml index aa797c44d487..fde6fffe92f4 100644 --- a/.github/workflows/ci-testing.yml +++ b/.github/workflows/ci-testing.yml @@ -133,3 +133,8 @@ jobs: python classify/predict.py --imgsz 32 --weights $b --source ../datasets/mnist2560/test/7/60.png # predict python classify/predict.py --imgsz 32 --weights $m --source data/images/bus.jpg # predict python export.py --weights $b --img 64 --imgsz 224 --include torchscript # export + python - < Date: Fri, 19 Aug 2022 01:30:14 +0200 Subject: [PATCH 1329/1976] Created using Colaboratory --- tutorial.ipynb | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/tutorial.ipynb b/tutorial.ipynb index 97e572798427..7a1edf7ef86a 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -415,7 +415,7 @@ "import utils\n", "display = utils.notebook_init() # checks" ], - "execution_count": 1, + "execution_count": null, "outputs": [ { "output_type": "stream", @@ -467,7 +467,7 @@ "!python detect.py --weights yolov5s.pt --img 640 --conf 0.25 --source data/images\n", "# display.Image(filename='runs/detect/exp/zidane.jpg', width=600)" ], - "execution_count": 2, + "execution_count": null, "outputs": [ { "output_type": "stream", @@ -547,7 +547,7 @@ "torch.hub.download_url_to_file('https://ultralytics.com/assets/coco2017val.zip', 'tmp.zip')\n", "!unzip -q tmp.zip -d ../datasets && rm tmp.zip" ], - "execution_count": 3, + "execution_count": null, "outputs": [ { "output_type": "display_data", @@ -578,7 +578,7 @@ "# Run YOLOv5x on COCO val\n", "!python val.py --weights yolov5x.pt --data coco.yaml --img 640 --iou 0.65 --half" ], - "execution_count": 4, + "execution_count": null, "outputs": [ { "output_type": "stream", @@ -751,7 +751,7 @@ "# Train YOLOv5s on COCO128 for 3 epochs\n", "!python train.py --img 640 --batch 16 --epochs 3 --data coco128.yaml --weights yolov5s.pt --cache" ], - "execution_count": 5, + "execution_count": null, "outputs": [ { "output_type": "stream", @@ -1113,7 +1113,7 @@ "cell_type": "code", "source": [ "# Classification\n", - "for m in [*(f'yolov5{x}.pt' for x in 'nsmlx'), 'resnet50.pt', 'efficientnet_b0.pt']:\n", + "for m in [*(f'yolov5{x}-cls.pt' for x in 'nsmlx'), 'resnet50.pt', 'efficientnet_b0.pt']:\n", " for d in 'mnist', 'fashion-mnist', 'cifar10', 'cifar100', 'imagenette160', 'imagenette320', 'imagenette', 'imagewoof160', 'imagewoof320', 'imagewoof':\n", " !python classify/train.py --model {m} --data {d} --epochs 10 --project YOLOv5-cls --name {m}-{d}" ], From 840b7232dbaff6296e6e2519895c3065e937fdcf Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 19 Aug 2022 01:59:51 +0200 Subject: [PATCH 1330/1976] Attach transforms to model (#9028) * Attach transforms to model Signed-off-by: Glenn Jocher * Update val.py Signed-off-by: Glenn Jocher * Update train.py Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- classify/train.py | 10 +++++----- classify/val.py | 3 +-- 2 files changed, 6 insertions(+), 7 deletions(-) diff --git a/classify/train.py b/classify/train.py index 9fb7c52b545a..5881e16e47db 100644 --- a/classify/train.py +++ b/classify/train.py @@ -122,16 +122,16 @@ def train(opt, device): for p in model.parameters(): p.requires_grad = True # for training model = model.to(device) - names = trainloader.dataset.classes # class names - model.names = names # attach class names # Info if RANK in {-1, 0}: + model.names = trainloader.dataset.classes # attach class names + model.transforms = testloader.dataset.torch_transforms # attach inference transforms model_info(model) if opt.verbose: LOGGER.info(model) images, labels = next(iter(trainloader)) - file = imshow_cls(images[:25], labels[:25], names=names, f=save_dir / 'train_images.jpg') + file = imshow_cls(images[:25], labels[:25], names=model.names, f=save_dir / 'train_images.jpg') logger.log_images(file, name='Train Examples') logger.log_graph(model, imgsz) # log model @@ -254,8 +254,8 @@ def train(opt, device): # Plot examples images, labels = (x[:25] for x in next(iter(testloader))) # first 25 images and labels - pred = torch.max(ema.ema((images.half() if cuda else images.float()).to(device)), 1)[1] - file = imshow_cls(images, labels, pred, names, verbose=False, f=save_dir / 'test_images.jpg') + pred = torch.max(ema.ema(images.to(device)), 1)[1] + file = imshow_cls(images, labels, pred, model.names, verbose=False, f=save_dir / 'test_images.jpg') # Log results meta = {"epochs": epochs, "top1_acc": best_fitness, "date": datetime.now().isoformat()} diff --git a/classify/val.py b/classify/val.py index c91e2cf82c81..2353737957d3 100644 --- a/classify/val.py +++ b/classify/val.py @@ -39,7 +39,7 @@ def run( project=ROOT / 'runs/val-cls', # save to project/name name='exp', # save to project/name exist_ok=False, # existing project/name ok, do not increment - half=True, # use FP16 half-precision inference + half=False, # use FP16 half-precision inference dnn=False, # use OpenCV DNN for ONNX inference model=None, dataloader=None, @@ -124,7 +124,6 @@ def run( LOGGER.info(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms post-process per image at shape {shape}' % t) LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}") - model.float() # for training return top1, top5, loss From 1cd3e752def0ecbcb39a95d75e3c93fad3114ab9 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 19 Aug 2022 02:01:40 +0200 Subject: [PATCH 1331/1976] Created using Colaboratory --- tutorial.ipynb | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) diff --git a/tutorial.ipynb b/tutorial.ipynb index 7a1edf7ef86a..a70887e97360 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -1112,8 +1112,8 @@ { "cell_type": "code", "source": [ - "# Classification\n", - "for m in [*(f'yolov5{x}-cls.pt' for x in 'nsmlx'), 'resnet50.pt', 'efficientnet_b0.pt']:\n", + "# Classification train\n", + "for m in [*(f'yolov5{x}-cls.pt' for x in 'nsmlx'), 'resnet50.pt', 'resnet101.pt', 'efficientnet_b0.pt', 'efficientnet_b1.pt']:\n", " for d in 'mnist', 'fashion-mnist', 'cifar10', 'cifar100', 'imagenette160', 'imagenette320', 'imagenette', 'imagewoof160', 'imagewoof320', 'imagewoof':\n", " !python classify/train.py --model {m} --data {d} --epochs 10 --project YOLOv5-cls --name {m}-{d}" ], @@ -1123,6 +1123,19 @@ "execution_count": null, "outputs": [] }, + { + "cell_type": "code", + "source": [ + "# Classification val\n", + "!bash data/scripts/get_imagenet.sh --val # download ImageNet val split (6.3G, 50000 images)\n", + "!python classify/val.py --weights yolov5m-cls.pt --data ../datasets/imagenet --img 224 # validate" + ], + "metadata": { + "id": "yYgOiFNHZx-1" + }, + "execution_count": null, + "outputs": [] + }, { "cell_type": "code", "metadata": { From 781401ec70bc481b789b214003b722174e4b99e0 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 19 Aug 2022 15:06:37 +0200 Subject: [PATCH 1332/1976] Default --data `imagenette160` training (fastest) (#9033) * Default --data `imagenette160` training (fastest) Signed-off-by: Glenn Jocher * Update train.py Signed-off-by: Glenn Jocher * Update train.py Signed-off-by: Glenn Jocher * Update train.py Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- classify/train.py | 6 +++--- train.py | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/classify/train.py b/classify/train.py index 5881e16e47db..8fe90c1b19eb 100644 --- a/classify/train.py +++ b/classify/train.py @@ -6,7 +6,7 @@ Torchvision models: --model resnet50, efficientnet_b0, etc. See https://pytorch.org/vision/stable/models.html Usage - Single-GPU and Multi-GPU DDP - $ python classify/train.py --model yolov5s-cls.pt --data cifar100 --epochs 5 --img 128 + $ python classify/train.py --model yolov5s-cls.pt --data imagenette160 --epochs 5 --img 128 $ python -m torch.distributed.run --nproc_per_node 4 --master_port 1 classify/train.py --model yolov5s-cls.pt --data imagenet --epochs 5 --img 224 --device 0,1,2,3 """ @@ -266,8 +266,8 @@ def train(opt, device): def parse_opt(known=False): parser = argparse.ArgumentParser() parser.add_argument('--model', type=str, default='yolov5s-cls.pt', help='initial weights path') - parser.add_argument('--data', type=str, default='mnist', help='cifar10, cifar100, mnist, imagenet, etc.') - parser.add_argument('--epochs', type=int, default=10) + parser.add_argument('--data', type=str, default='imagenette160', help='cifar10, cifar100, mnist, imagenet, ...') + parser.add_argument('--epochs', type=int, default=10, help='total training epochs') parser.add_argument('--batch-size', type=int, default=64, help='total batch size for all GPUs') parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=128, help='train, val image size (pixels)') parser.add_argument('--nosave', action='store_true', help='only save final checkpoint') diff --git a/train.py b/train.py index 10a3bdb56002..279d52de6d74 100644 --- a/train.py +++ b/train.py @@ -436,7 +436,7 @@ def parse_opt(known=False): parser.add_argument('--cfg', type=str, default='', help='model.yaml path') parser.add_argument('--data', type=str, default=ROOT / 'data/coco128.yaml', help='dataset.yaml path') parser.add_argument('--hyp', type=str, default=ROOT / 'data/hyps/hyp.scratch-low.yaml', help='hyperparameters path') - parser.add_argument('--epochs', type=int, default=300) + parser.add_argument('--epochs', type=int, default=300, help='total training epochs') parser.add_argument('--batch-size', type=int, default=16, help='total batch size for all GPUs, -1 for autobatch') parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=640, help='train, val image size (pixels)') parser.add_argument('--rect', action='store_true', help='rectangular training') From 4a8ab3bc42d32f3e2e9c026b87dc29fba6143064 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 19 Aug 2022 15:07:04 +0200 Subject: [PATCH 1333/1976] VOC `names` dictionary fix (#9034) * VOC names dictionary fix Signed-off-by: Glenn Jocher * Update dataloaders.py Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- data/VOC.yaml | 5 +++-- utils/dataloaders.py | 10 ++++++---- 2 files changed, 9 insertions(+), 6 deletions(-) diff --git a/data/VOC.yaml b/data/VOC.yaml index bbe5cf90a838..27d38109c53a 100644 --- a/data/VOC.yaml +++ b/data/VOC.yaml @@ -65,12 +65,13 @@ download: | w = int(size.find('width').text) h = int(size.find('height').text) + names = list(yaml['names'].values()) # names list for obj in root.iter('object'): cls = obj.find('name').text - if cls in yaml['names'] and not int(obj.find('difficult').text) == 1: + if cls in names and int(obj.find('difficult').text) != 1: xmlbox = obj.find('bndbox') bb = convert_box((w, h), [float(xmlbox.find(x).text) for x in ('xmin', 'xmax', 'ymin', 'ymax')]) - cls_id = yaml['names'].index(cls) # class id + cls_id = names.index(cls) # class id out_file.write(" ".join([str(a) for a in (cls_id, *bb)]) + '\n') diff --git a/utils/dataloaders.py b/utils/dataloaders.py index 3f26be2cd32d..e73b20a58915 100755 --- a/utils/dataloaders.py +++ b/utils/dataloaders.py @@ -35,7 +35,7 @@ from utils.torch_utils import torch_distributed_zero_first # Parameters -HELP_URL = 'https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data' +HELP_URL = 'See https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data' IMG_FORMATS = 'bmp', 'dng', 'jpeg', 'jpg', 'mpo', 'png', 'tif', 'tiff', 'webp' # include image suffixes VID_FORMATS = 'asf', 'avi', 'gif', 'm4v', 'mkv', 'mov', 'mp4', 'mpeg', 'mpg', 'ts', 'wmv' # include video suffixes BAR_FORMAT = '{l_bar}{bar:10}{r_bar}{bar:-10b}' # tqdm bar format @@ -456,7 +456,7 @@ def __init__(self, # self.img_files = sorted([x for x in f if x.suffix[1:].lower() in IMG_FORMATS]) # pathlib assert self.im_files, f'{prefix}No images found' except Exception as e: - raise Exception(f'{prefix}Error loading data from {path}: {e}\nSee {HELP_URL}') + raise Exception(f'{prefix}Error loading data from {path}: {e}\n{HELP_URL}') # Check cache self.label_files = img2label_paths(self.im_files) # labels @@ -475,11 +475,13 @@ def __init__(self, tqdm(None, desc=prefix + d, total=n, initial=n, bar_format=BAR_FORMAT) # display cache results if cache['msgs']: LOGGER.info('\n'.join(cache['msgs'])) # display warnings - assert nf > 0 or not augment, f'{prefix}No labels in {cache_path}. Can not train without labels. See {HELP_URL}' + assert nf > 0 or not augment, f'{prefix}No labels found in {cache_path}, can not start training. {HELP_URL}' # Read cache [cache.pop(k) for k in ('hash', 'version', 'msgs')] # remove items labels, shapes, self.segments = zip(*cache.values()) + nl = len(np.concatenate(labels, 0)) # number of labels + assert nl > 0 or not augment, f'{prefix}All labels empty in {cache_path}, can not start training. {HELP_URL}' self.labels = list(labels) self.shapes = np.array(shapes) self.im_files = list(cache.keys()) # update @@ -572,7 +574,7 @@ def cache_labels(self, path=Path('./labels.cache'), prefix=''): if msgs: LOGGER.info('\n'.join(msgs)) if nf == 0: - LOGGER.warning(f'{prefix}WARNING: No labels found in {path}. See {HELP_URL}') + LOGGER.warning(f'{prefix}WARNING: No labels found in {path}. {HELP_URL}') x['hash'] = get_hash(self.label_files + self.im_files) x['results'] = nf, nm, ne, nc, len(self.im_files) x['msgs'] = msgs # warnings From fdcb92a938ef27d1b277a156af7f7922400279e3 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 19 Aug 2022 16:54:58 +0200 Subject: [PATCH 1334/1976] Update train.py `import val as validate` (#9037) * Update train.py `import val as validate` Signed-off-by: Glenn Jocher * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Signed-off-by: Glenn Jocher Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- train.py | 28 ++++++++++++++-------------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/train.py b/train.py index 279d52de6d74..665b4f5b609e 100644 --- a/train.py +++ b/train.py @@ -36,7 +36,7 @@ sys.path.append(str(ROOT)) # add ROOT to PATH ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative -import val # for end-of-epoch mAP +import val as validate # for end-of-epoch mAP from models.experimental import attempt_load from models.yolo import Model from utils.autoanchor import check_anchors @@ -347,17 +347,17 @@ def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictio ema.update_attr(model, include=['yaml', 'nc', 'hyp', 'names', 'stride', 'class_weights']) final_epoch = (epoch + 1 == epochs) or stopper.possible_stop if not noval or final_epoch: # Calculate mAP - results, maps, _ = val.run(data_dict, - batch_size=batch_size // WORLD_SIZE * 2, - imgsz=imgsz, - half=amp, - model=ema.ema, - single_cls=single_cls, - dataloader=val_loader, - save_dir=save_dir, - plots=False, - callbacks=callbacks, - compute_loss=compute_loss) + results, maps, _ = validate.run(data_dict, + batch_size=batch_size // WORLD_SIZE * 2, + imgsz=imgsz, + half=amp, + model=ema.ema, + single_cls=single_cls, + dataloader=val_loader, + save_dir=save_dir, + plots=False, + callbacks=callbacks, + compute_loss=compute_loss) # Update best mAP fi = fitness(np.array(results).reshape(1, -1)) # weighted combination of [P, R, mAP@.5, mAP@.5-.95] @@ -407,12 +407,12 @@ def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictio strip_optimizer(f) # strip optimizers if f is best: LOGGER.info(f'\nValidating {f}...') - results, _, _ = val.run( + results, _, _ = validate.run( data_dict, batch_size=batch_size // WORLD_SIZE * 2, imgsz=imgsz, model=attempt_load(f, device).half(), - iou_thres=0.65 if is_coco else 0.60, # best pycocotools results at 0.65 + iou_thres=0.65 if is_coco else 0.60, # best pycocotools at iou 0.65 single_cls=single_cls, dataloader=val_loader, save_dir=save_dir, From aed88848a25fe0f4d98e70e79f0ee876265b48fd Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 19 Aug 2022 18:00:38 +0200 Subject: [PATCH 1335/1976] Simplified notebook --- tutorial.ipynb | 67 ++++++++++++++------------------------------------ 1 file changed, 18 insertions(+), 49 deletions(-) diff --git a/tutorial.ipynb b/tutorial.ipynb index a70887e97360..1c5d77813f15 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -439,7 +439,7 @@ "id": "4JnkELT0cIJg" }, "source": [ - "# 1. Inference\n", + "# 1. Detect\n", "\n", "`detect.py` runs YOLOv5 inference on a variety of sources, downloading models automatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases), and saving results to `runs/detect`. Example inference sources are:\n", "\n", @@ -506,17 +506,7 @@ }, "source": [ "# 2. Validate\n", - "Validate a model's accuracy on [COCO](https://cocodataset.org/#home) val or test-dev datasets. Models are downloaded automatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases). To show results by class use the `--verbose` flag. Note that `pycocotools` metrics may be ~1% better than the equivalent repo metrics, as is visible below, due to slight differences in mAP computation." - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "eyTZYGgRjnMc" - }, - "source": [ - "## COCO val\n", - "Download [COCO val 2017](https://github.com/ultralytics/yolov5/blob/74b34872fdf41941cddcf243951cdb090fbac17b/data/coco.yaml#L14) dataset (1GB - 5000 images), and test model accuracy." + "Validate a model's accuracy on the [COCO](https://cocodataset.org/#home) dataset's `val` or `test` splits. Models are downloaded automatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases). To show results by class use the `--verbose` flag." ] }, { @@ -544,8 +534,8 @@ }, "source": [ "# Download COCO val\n", - "torch.hub.download_url_to_file('https://ultralytics.com/assets/coco2017val.zip', 'tmp.zip')\n", - "!unzip -q tmp.zip -d ../datasets && rm tmp.zip" + "torch.hub.download_url_to_file('https://ultralytics.com/assets/coco2017val.zip', 'tmp.zip') # download COCO val (1GB - 5000 images)\n", + "!unzip -q tmp.zip -d ../datasets && rm tmp.zip # unzip" ], "execution_count": null, "outputs": [ @@ -575,7 +565,7 @@ "outputId": "19a590ef-363e-424c-d9ce-78bbe0593cd5" }, "source": [ - "# Run YOLOv5x on COCO val\n", + "# Validate YOLOv5x on COCO val\n", "!python val.py --weights yolov5x.pt --data coco.yaml --img 640 --iou 0.65 --half" ], "execution_count": null, @@ -631,40 +621,6 @@ } ] }, - { - "cell_type": "markdown", - "metadata": { - "id": "rc_KbFk0juX2" - }, - "source": [ - "## COCO test\n", - "Download [COCO test2017](https://github.com/ultralytics/yolov5/blob/74b34872fdf41941cddcf243951cdb090fbac17b/data/coco.yaml#L15) dataset (7GB - 40,000 images), to test model accuracy on test-dev set (**20,000 images, no labels**). Results are saved to a `*.json` file which should be **zipped** and submitted to the evaluation server at https://competitions.codalab.org/competitions/20794." - ] - }, - { - "cell_type": "code", - "metadata": { - "id": "V0AJnSeCIHyJ" - }, - "source": [ - "# Download COCO test-dev2017\n", - "!bash data/scripts/get_coco.sh --test" - ], - "execution_count": null, - "outputs": [] - }, - { - "cell_type": "code", - "metadata": { - "id": "29GJXAP_lPrt" - }, - "source": [ - "# Run YOLOv5x on COCO test\n", - "!python val.py --weights yolov5x.pt --data coco.yaml --img 640 --iou 0.65 --half --task test" - ], - "execution_count": null, - "outputs": [] - }, { "cell_type": "markdown", "metadata": { @@ -1136,6 +1092,19 @@ "execution_count": null, "outputs": [] }, + { + "cell_type": "code", + "source": [ + "# Validate on COCO test. Zip results.json and submit to eval server at https://competitions.codalab.org/competitions/20794\n", + "!bash data/scripts/get_coco.sh --test # download COCO test-dev2017 (7GB - 40,000 images, test 20,000)\n", + "!python val.py --weights yolov5x.pt --data coco.yaml --img 640 --iou 0.65 --half --task test" + ], + "metadata": { + "id": "aq4DPWGu0Bl1" + }, + "execution_count": null, + "outputs": [] + }, { "cell_type": "code", "metadata": { From ba1c6773c2691943a355ad956105a4cb3aeedbca Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 19 Aug 2022 18:41:30 +0200 Subject: [PATCH 1336/1976] Created using Colaboratory --- tutorial.ipynb | 45 ++++++++++++++------------------------------- 1 file changed, 14 insertions(+), 31 deletions(-) diff --git a/tutorial.ipynb b/tutorial.ipynb index 1c5d77813f15..91e2d7e75eab 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -654,46 +654,29 @@ "

Label images lightning fast (including with model-assisted labeling)" ] }, - { - "cell_type": "code", - "metadata": { - "id": "bOy5KI2ncnWd" - }, - "source": [ - "# Tensorboard (optional)\n", - "%load_ext tensorboard\n", - "%tensorboard --logdir runs/train" - ], - "execution_count": null, - "outputs": [] - }, { "cell_type": "code", "source": [ - "# ClearML (optional)\n", - "%pip install -q clearml\n", - "!clearml-init" + "#@title Select YOLOv5 🚀 logger\n", + "logger = 'TensorBoard' #@param ['TensorBoard', 'ClearML', 'W&B']\n", + "\n", + "if logger == 'Tensorboard':\n", + " %load_ext tensorboard\n", + " %tensorboard --logdir runs/train\n", + "elif logger == 'ClearML':\n", + " %pip install -q clearml\n", + " !clearml-init\n", + "elif logger == 'W&B':\n", + " %pip install -q wandb\n", + " import wandb\n", + " wandb.login()" ], "metadata": { - "id": "DQhI6vvaRWjR" + "id": "i3oKtE4g-aNn" }, "execution_count": null, "outputs": [] }, - { - "cell_type": "code", - "metadata": { - "id": "2fLAV42oNb7M" - }, - "source": [ - "# Weights & Biases (optional)\n", - "%pip install -q wandb\n", - "import wandb\n", - "wandb.login()" - ], - "execution_count": null, - "outputs": [] - }, { "cell_type": "code", "metadata": { From a409ec7953e1c5dd572fc73f633de38efe0c101a Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 20 Aug 2022 16:29:08 +0200 Subject: [PATCH 1337/1976] AutoBatch protect from negative batch sizes (#9048) * AutoBatch protect from negative batch sizes Signed-off-by: Glenn Jocher * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * cleanup * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Signed-off-by: Glenn Jocher Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- utils/autobatch.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/utils/autobatch.py b/utils/autobatch.py index 07cddc99f400..8d12e46f0f09 100644 --- a/utils/autobatch.py +++ b/utils/autobatch.py @@ -60,6 +60,9 @@ def autobatch(model, imgsz=640, fraction=0.9, batch_size=16): i = results.index(None) # first fail index if b >= batch_sizes[i]: # y intercept above failure point b = batch_sizes[max(i - 1, 0)] # select prior safe point + if b < 1: # zero or negative batch size + b = 16 + LOGGER.warning(f'{prefix}WARNING: ⚠️ CUDA anomaly detected, recommend restart environment and retry command.') fraction = np.polyval(p, b) / t # actual fraction predicted LOGGER.info(f'{prefix}Using batch-size {b} for {d} {t * fraction:.2f}G/{t:.2f}G ({fraction * 100:.0f}%) ✅') From fc8758a49bd30526fb21d0683359e86be3a292a8 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 20 Aug 2022 16:45:11 +0200 Subject: [PATCH 1338/1976] Temporarily remove `macos-latest` from CI (#9049) * Temporarily remove macos-latest from CI macos-latest causing many failed CI runs that resolve after manually re-running 2 or 3 times. I don't know what the cause is. Will replace at a later date. Signed-off-by: Glenn Jocher * Update ci-testing.yml Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- .github/workflows/ci-testing.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci-testing.yml b/.github/workflows/ci-testing.yml index fde6fffe92f4..4ef930c61233 100644 --- a/.github/workflows/ci-testing.yml +++ b/.github/workflows/ci-testing.yml @@ -47,7 +47,7 @@ jobs: strategy: fail-fast: false matrix: - os: [ ubuntu-latest, macos-latest, windows-latest ] + os: [ ubuntu-latest, windows-latest ] # macos-latest bug https://github.com/ultralytics/yolov5/pull/9049 python-version: [ '3.10' ] model: [ yolov5n ] include: From f258cf8b37aeb3062230d43e1e9a4bf3b9874588 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 20 Aug 2022 17:17:35 +0200 Subject: [PATCH 1339/1976] Add `--save-hybrid` mAP warning (#9050) * Add `--save-hybrid` mAP warning Signed-off-by: Glenn Jocher * Update val.py Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- val.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/val.py b/val.py index 7b4fab4c63be..fcaca889d7e2 100644 --- a/val.py +++ b/val.py @@ -365,6 +365,8 @@ def main(opt): if opt.task in ('train', 'val', 'test'): # run normally if opt.conf_thres > 0.001: # https://github.com/ultralytics/yolov5/issues/1466 LOGGER.info(f'WARNING: confidence threshold {opt.conf_thres} > 0.001 produces invalid results ⚠️') + if opt.save_hybrid: + LOGGER.info('WARNING: --save-hybrid will return high mAP from hybrid labels, not from predictions alone ⚠️') run(**vars(opt)) else: From c725511bfc14eb86daf6edefa0d257084aa24c85 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 21 Aug 2022 01:34:03 +0200 Subject: [PATCH 1340/1976] Refactor for simplification (#9054) * Refactor for simplification * cleanup * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- utils/downloads.py | 2 +- utils/general.py | 5 +++-- utils/metrics.py | 2 +- utils/plots.py | 8 +++----- utils/torch_utils.py | 11 +++++------ 5 files changed, 13 insertions(+), 15 deletions(-) diff --git a/utils/downloads.py b/utils/downloads.py index c4d4a85c38ae..69887a579966 100644 --- a/utils/downloads.py +++ b/utils/downloads.py @@ -46,7 +46,7 @@ def safe_download(file, url, url2=None, min_bytes=1E0, error_msg=''): except Exception as e: # url2 file.unlink(missing_ok=True) # remove partial downloads LOGGER.info(f'ERROR: {e}\nRe-attempting {url2 or url} to {file}...') - os.system(f"curl -L '{url2 or url}' -o '{file}' --retry 3 -C -") # curl download, retry and resume on fail + os.system(f"curl -# -L '{url2 or url}' -o '{file}' --retry 3 -C -") # curl download, retry and resume on fail finally: if not file.exists() or file.stat().st_size < min_bytes: # check file.unlink(missing_ok=True) # remove partial downloads diff --git a/utils/general.py b/utils/general.py index 42d000918c13..d9f436a36359 100755 --- a/utils/general.py +++ b/utils/general.py @@ -582,7 +582,7 @@ def url2file(url): def download(url, dir='.', unzip=True, delete=True, curl=False, threads=1, retry=3): - # Multi-threaded file download and unzip function, used in data.yaml for autodownload + # Multithreaded file download and unzip function, used in data.yaml for autodownload def download_one(url, dir): # Download 1 file success = True @@ -594,7 +594,8 @@ def download_one(url, dir): for i in range(retry + 1): if curl: s = 'sS' if threads > 1 else '' # silent - r = os.system(f'curl -{s}L "{url}" -o "{f}" --retry 9 -C -') # curl download with retry, continue + r = os.system( + f'curl -# -{s}L "{url}" -o "{f}" --retry 9 -C -') # curl download with retry, continue success = r == 0 else: torch.hub.download_url_to_file(url, f, progress=threads == 1) # torch download diff --git a/utils/metrics.py b/utils/metrics.py index 08880cd3f212..8fa3c7e217c7 100644 --- a/utils/metrics.py +++ b/utils/metrics.py @@ -141,7 +141,7 @@ def process_batch(self, detections, labels): """ if detections is None: gt_classes = labels.int() - for i, gc in enumerate(gt_classes): + for gc in gt_classes: self.matrix[self.nc, gc] += 1 # background FN return diff --git a/utils/plots.py b/utils/plots.py index 7417308c4d82..2c7a80b4c872 100644 --- a/utils/plots.py +++ b/utils/plots.py @@ -3,6 +3,7 @@ Plotting utils """ +import contextlib import math import os from copy import copy @@ -180,8 +181,7 @@ def output_to_target(output): # Convert model output to target format [batch_id, class_id, x, y, w, h, conf] targets = [] for i, o in enumerate(output): - for *box, conf, cls in o.cpu().numpy(): - targets.append([i, cls, *list(*xyxy2xywh(np.array(box)[None])), conf]) + targets.extend([i, cls, *list(*xyxy2xywh(np.array(box)[None])), conf] for *box, conf, cls in o.cpu().numpy()) return np.array(targets) @@ -357,10 +357,8 @@ def plot_labels(labels, names=(), save_dir=Path('')): matplotlib.use('svg') # faster ax = plt.subplots(2, 2, figsize=(8, 8), tight_layout=True)[1].ravel() y = ax[0].hist(c, bins=np.linspace(0, nc, nc + 1) - 0.5, rwidth=0.8) - try: # color histogram bars by class + with contextlib.suppress(Exception): # color histogram bars by class [y[2].patches[i].set_color([x / 255 for x in colors(i)]) for i in range(nc)] # known issue #3195 - except Exception: - pass ax[0].set_ylabel('instances') if 0 < len(names) < 30: ax[0].set_xticks(range(len(names))) diff --git a/utils/torch_utils.py b/utils/torch_utils.py index 4de2520b26a2..88108906bfd3 100644 --- a/utils/torch_utils.py +++ b/utils/torch_utils.py @@ -45,11 +45,10 @@ def decorate(fn): def smartCrossEntropyLoss(label_smoothing=0.0): # Returns nn.CrossEntropyLoss with label smoothing enabled for torch>=1.10.0 if check_version(torch.__version__, '1.10.0'): - return nn.CrossEntropyLoss(label_smoothing=label_smoothing) # loss function - else: - if label_smoothing > 0: - LOGGER.warning(f'WARNING: label smoothing {label_smoothing} requires torch>=1.10.0') - return nn.CrossEntropyLoss() # loss function + return nn.CrossEntropyLoss(label_smoothing=label_smoothing) + if label_smoothing > 0: + LOGGER.warning(f'WARNING: label smoothing {label_smoothing} requires torch>=1.10.0') + return nn.CrossEntropyLoss() def smart_DDP(model): @@ -118,7 +117,7 @@ def select_device(device='', batch_size=0, newline=True): assert torch.cuda.is_available() and torch.cuda.device_count() >= len(device.replace(',', '')), \ f"Invalid CUDA '--device {device}' requested, use '--device cpu' or pass valid CUDA device(s)" - if not (cpu or mps) and torch.cuda.is_available(): # prefer GPU if available + if not cpu and not mps and torch.cuda.is_available(): # prefer GPU if available devices = device.split(',') if device else '0' # range(torch.cuda.device_count()) # i.e. 0,1,6,7 n = len(devices) # device count if n > 1 and batch_size > 0: # check batch_size is divisible by device_count From 93f63ee33f2dd2fe9e61268464c9a79f30aa7549 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 21 Aug 2022 02:00:35 +0200 Subject: [PATCH 1341/1976] Refactor for simplification 2 (#9055) * Refactor for simplification 2 * Update __init__.py Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- export.py | 3 +-- utils/loggers/__init__.py | 20 +++++++------------- 2 files changed, 8 insertions(+), 15 deletions(-) diff --git a/export.py b/export.py index 7b398fdc4d93..166b5f406a20 100644 --- a/export.py +++ b/export.py @@ -436,8 +436,7 @@ def export_tfjs(file, prefix=colorstr('TensorFlow.js:')): f'--output_node_names=Identity,Identity_1,Identity_2,Identity_3 {f_pb} {f}' subprocess.run(cmd.split()) - with open(f_json) as j: - json = j.read() + json = Path(f_json).read_text() with open(f_json, 'w') as j: # sort JSON Identity_* in ascending order subst = re.sub( r'{"outputs": {"Identity.?.?": {"name": "Identity.?.?"}, ' diff --git a/utils/loggers/__init__.py b/utils/loggers/__init__.py index 34704b625294..b95a463717f8 100644 --- a/utils/loggers/__init__.py +++ b/utils/loggers/__init__.py @@ -187,18 +187,16 @@ def on_fit_epoch_end(self, vals, epoch, best_fitness, fi): def on_model_save(self, last, epoch, final_epoch, best_fitness, fi): # Callback runs on model save event - if self.wandb: - if ((epoch + 1) % self.opt.save_period == 0 and not final_epoch) and self.opt.save_period != -1: + if (epoch + 1) % self.opt.save_period == 0 and not final_epoch and self.opt.save_period != -1: + if self.wandb: self.wandb.log_model(last.parent, self.opt, epoch, fi, best_model=best_fitness == fi) - - if self.clearml: - if ((epoch + 1) % self.opt.save_period == 0 and not final_epoch) and self.opt.save_period != -1: + if self.clearml: self.clearml.task.update_output_model(model_path=str(last), model_name='Latest Model', auto_delete_file=False) def on_train_end(self, last, best, plots, epoch, results): - # Callback runs on training end + # Callback runs on training end, i.e. saving best model if plots: plot_results(file=self.save_dir / 'results.csv') # save results.png files = ['results.png', 'confusion_matrix.png', *(f'{x}_curve.png' for x in ('F1', 'PR', 'P', 'R'))] @@ -220,15 +218,11 @@ def on_train_end(self, last, best, plots, epoch, results): aliases=['latest', 'best', 'stripped']) self.wandb.finish_run() - if self.clearml: - # Save the best model here - if not self.opt.evolve: - self.clearml.task.update_output_model(model_path=str(best if best.exists() else last), - name='Best Model') + if self.clearml and not self.opt.evolve: + self.clearml.task.update_output_model(model_path=str(best if best.exists() else last), name='Best Model') - def on_params_update(self, params): + def on_params_update(self, params: dict): # Update hyperparams or configs of the experiment - # params: A dict containing {param: value} pairs if self.wandb: self.wandb.wandb_run.config.update(params, allow_val_change=True) From 841f312f9384d3ab8f2ff2ae287441ecfba03740 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 21 Aug 2022 02:23:39 +0200 Subject: [PATCH 1342/1976] zero-mAP fix return `.detach()` to EMA (#9056) Resolves https://github.com/ultralytics/hub/issues/82 Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- utils/torch_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/torch_utils.py b/utils/torch_utils.py index 88108906bfd3..b934248dee43 100644 --- a/utils/torch_utils.py +++ b/utils/torch_utils.py @@ -422,7 +422,7 @@ def update(self, model): for k, v in self.ema.state_dict().items(): if v.dtype.is_floating_point: # true for FP16 and FP32 v *= d - v += (1 - d) * msd[k] + v += (1 - d) * msd[k].detach() assert v.dtype == msd[k].dtype == torch.float32, f'EMA {v.dtype} and model {msd[k]} must be updated in FP32' def update_attr(self, model, include=(), exclude=('process_group', 'reducer')): From 27fb6fd8fc21c20290041f38046d7a60ae8c6e3a Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 21 Aug 2022 03:22:24 +0200 Subject: [PATCH 1343/1976] zero-mAP fix 3 (#9058) * zero-mAP fix 3 Signed-off-by: Glenn Jocher * Update torch_utils.py Signed-off-by: Glenn Jocher * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update torch_utils.py Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- utils/torch_utils.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/utils/torch_utils.py b/utils/torch_utils.py index b934248dee43..5fbe8bbf10f6 100644 --- a/utils/torch_utils.py +++ b/utils/torch_utils.py @@ -412,7 +412,6 @@ def __init__(self, model, decay=0.9999, tau=2000, updates=0): for p in self.ema.parameters(): p.requires_grad_(False) - @smart_inference_mode() def update(self, model): # Update EMA parameters self.updates += 1 @@ -423,7 +422,7 @@ def update(self, model): if v.dtype.is_floating_point: # true for FP16 and FP32 v *= d v += (1 - d) * msd[k].detach() - assert v.dtype == msd[k].dtype == torch.float32, f'EMA {v.dtype} and model {msd[k]} must be updated in FP32' + assert v.dtype == msd[k].detach().dtype == torch.float32, f'EMA {v.dtype} and model {msd[k]} must both be FP32' def update_attr(self, model, include=(), exclude=('process_group', 'reducer')): # Update EMA attributes From e0700cce776c557e7cee51103c53032b766f224a Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 21 Aug 2022 03:47:37 +0200 Subject: [PATCH 1344/1976] Daemon `plot_labels()` for faster start (#9057) * Daemon `plot_labels()` for faster start * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update train.py Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- train.py | 10 +++------- utils/callbacks.py | 13 +++++++++---- utils/general.py | 2 +- utils/loggers/__init__.py | 12 +++++++----- utils/plots.py | 1 - 5 files changed, 20 insertions(+), 18 deletions(-) diff --git a/train.py b/train.py index 665b4f5b609e..0bfcaffc16db 100644 --- a/train.py +++ b/train.py @@ -52,7 +52,7 @@ from utils.loggers.wandb.wandb_utils import check_wandb_resume from utils.loss import ComputeLoss from utils.metrics import fitness -from utils.plots import plot_evolve, plot_labels +from utils.plots import plot_evolve from utils.torch_utils import (EarlyStopping, ModelEMA, de_parallel, select_device, smart_DDP, smart_optimizer, smart_resume, torch_distributed_zero_first) @@ -215,15 +215,11 @@ def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictio prefix=colorstr('val: '))[0] if not resume: - if plots: - plot_labels(labels, names, save_dir) - - # Anchors if not opt.noautoanchor: - check_anchors(dataset, model=model, thr=hyp['anchor_t'], imgsz=imgsz) + check_anchors(dataset, model=model, thr=hyp['anchor_t'], imgsz=imgsz) # run AutoAnchor model.half().float() # pre-reduce anchor precision - callbacks.run('on_pretrain_routine_end') + callbacks.run('on_pretrain_routine_end', labels, names, plots) # DDP mode if cuda and RANK != -1: diff --git a/utils/callbacks.py b/utils/callbacks.py index 2b32df0bf1c1..166d8938322d 100644 --- a/utils/callbacks.py +++ b/utils/callbacks.py @@ -3,6 +3,8 @@ Callback utils """ +import threading + class Callbacks: """" @@ -55,17 +57,20 @@ def get_registered_actions(self, hook=None): """ return self._callbacks[hook] if hook else self._callbacks - def run(self, hook, *args, **kwargs): + def run(self, hook, *args, thread=False, **kwargs): """ - Loop through the registered actions and fire all callbacks + Loop through the registered actions and fire all callbacks on main thread Args: hook: The name of the hook to check, defaults to all args: Arguments to receive from YOLOv5 + thread: (boolean) Run callbacks in daemon thread kwargs: Keyword Arguments to receive from YOLOv5 """ assert hook in self._callbacks, f"hook '{hook}' not found in callbacks {self._callbacks}" - for logger in self._callbacks[hook]: - logger['callback'](*args, **kwargs) + if thread: + threading.Thread(target=logger['callback'], args=args, kwargs=kwargs, daemon=True).start() + else: + logger['callback'](*args, **kwargs) diff --git a/utils/general.py b/utils/general.py index d9f436a36359..3bc6fbc22d57 100755 --- a/utils/general.py +++ b/utils/general.py @@ -622,7 +622,7 @@ def download_one(url, dir): dir.mkdir(parents=True, exist_ok=True) # make directory if threads > 1: pool = ThreadPool(threads) - pool.imap(lambda x: download_one(*x), zip(url, repeat(dir))) # multi-threaded + pool.imap(lambda x: download_one(*x), zip(url, repeat(dir))) # multithreaded pool.close() pool.join() else: diff --git a/utils/loggers/__init__.py b/utils/loggers/__init__.py index b95a463717f8..c5cdd92772f2 100644 --- a/utils/loggers/__init__.py +++ b/utils/loggers/__init__.py @@ -11,10 +11,10 @@ import torch from torch.utils.tensorboard import SummaryWriter -from utils.general import colorstr, cv2 +from utils.general import colorstr, cv2, threaded from utils.loggers.clearml.clearml_utils import ClearmlLogger from utils.loggers.wandb.wandb_utils import WandbLogger -from utils.plots import plot_images, plot_results +from utils.plots import plot_images, plot_labels, plot_results from utils.torch_utils import de_parallel LOGGERS = ('csv', 'tb', 'wandb', 'clearml') # *.csv, TensorBoard, Weights & Biases, ClearML @@ -110,13 +110,15 @@ def on_train_start(self): # Callback runs on train start pass - def on_pretrain_routine_end(self): + def on_pretrain_routine_end(self, labels, names, plots): # Callback runs on pre-train routine end + if plots: + plot_labels(labels, names, self.save_dir) paths = self.save_dir.glob('*labels*.jpg') # training labels if self.wandb: self.wandb.log({"Labels": [wandb.Image(str(x), caption=x.name) for x in paths]}) - if self.clearml: - pass # ClearML saves these images automatically using hooks + # if self.clearml: + # pass # ClearML saves these images automatically using hooks def on_train_batch_end(self, ni, model, imgs, targets, paths, plots): # Callback runs on train batch end diff --git a/utils/plots.py b/utils/plots.py index 2c7a80b4c872..7e1de43aba1b 100644 --- a/utils/plots.py +++ b/utils/plots.py @@ -340,7 +340,6 @@ def plot_val_study(file='', dir='', x=None): # from utils.plots import *; plot_ @try_except # known issue https://github.com/ultralytics/yolov5/issues/5395 -@Timeout(30) # known issue https://github.com/ultralytics/yolov5/issues/5611 def plot_labels(labels, names=(), save_dir=Path('')): # plot dataset labels LOGGER.info(f"Plotting labels to {save_dir / 'labels.jpg'}... ") From 6077bf032aa67b8b849b755aa29c66b2eaaee59e Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 21 Aug 2022 13:10:02 +0200 Subject: [PATCH 1345/1976] TensorBoard fix in tutorial.ipynb (#9064) Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- tutorial.ipynb | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tutorial.ipynb b/tutorial.ipynb index 91e2d7e75eab..55e423d72833 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -660,7 +660,7 @@ "#@title Select YOLOv5 🚀 logger\n", "logger = 'TensorBoard' #@param ['TensorBoard', 'ClearML', 'W&B']\n", "\n", - "if logger == 'Tensorboard':\n", + "if logger == 'TensorBoard':\n", " %load_ext tensorboard\n", " %tensorboard --logdir runs/train\n", "elif logger == 'ClearML':\n", @@ -1103,4 +1103,4 @@ "outputs": [] } ] -} \ No newline at end of file +} From 794f117f4bdd02171273d49da33c1e8a22037f76 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 21 Aug 2022 13:33:13 +0200 Subject: [PATCH 1346/1976] Created using Colaboratory --- tutorial.ipynb | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/tutorial.ipynb b/tutorial.ipynb index 55e423d72833..040197bf8365 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -664,12 +664,10 @@ " %load_ext tensorboard\n", " %tensorboard --logdir runs/train\n", "elif logger == 'ClearML':\n", - " %pip install -q clearml\n", - " !clearml-init\n", + " %pip install -q clearml && clearml-init\n", "elif logger == 'W&B':\n", " %pip install -q wandb\n", - " import wandb\n", - " wandb.login()" + " import wandb; wandb.login()" ], "metadata": { "id": "i3oKtE4g-aNn" @@ -1103,4 +1101,4 @@ "outputs": [] } ] -} +} \ No newline at end of file From 1499526f5668f97832abf39c9e24e2acf3f98fdf Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 21 Aug 2022 14:20:12 +0200 Subject: [PATCH 1347/1976] Created using Colaboratory --- tutorial.ipynb | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tutorial.ipynb b/tutorial.ipynb index 040197bf8365..a8975424cb39 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -534,7 +534,7 @@ }, "source": [ "# Download COCO val\n", - "torch.hub.download_url_to_file('https://ultralytics.com/assets/coco2017val.zip', 'tmp.zip') # download COCO val (1GB - 5000 images)\n", + "torch.hub.download_url_to_file('https://ultralytics.com/assets/coco2017val.zip', 'tmp.zip') # download (780M - 5000 images)\n", "!unzip -q tmp.zip -d ../datasets && rm tmp.zip # unzip" ], "execution_count": null, @@ -657,7 +657,7 @@ { "cell_type": "code", "source": [ - "#@title Select YOLOv5 🚀 logger\n", + "#@title Select YOLOv5 🚀 logger {run: 'auto'}\n", "logger = 'TensorBoard' #@param ['TensorBoard', 'ClearML', 'W&B']\n", "\n", "if logger == 'TensorBoard':\n", @@ -1077,7 +1077,7 @@ "cell_type": "code", "source": [ "# Validate on COCO test. Zip results.json and submit to eval server at https://competitions.codalab.org/competitions/20794\n", - "!bash data/scripts/get_coco.sh --test # download COCO test-dev2017 (7GB - 40,000 images, test 20,000)\n", + "!bash data/scripts/get_coco.sh --test # download COCO test-dev2017 (7G - 40,000 images, test 20,000)\n", "!python val.py --weights yolov5x.pt --data coco.yaml --img 640 --iou 0.65 --half --task test" ], "metadata": { From e6b4bf0bc26c06d54dd92eacef89decdc580a0f5 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 21 Aug 2022 14:21:48 +0200 Subject: [PATCH 1348/1976] Created using Colaboratory --- tutorial.ipynb | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tutorial.ipynb b/tutorial.ipynb index a8975424cb39..8753a2310b1d 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -1064,7 +1064,7 @@ "cell_type": "code", "source": [ "# Classification val\n", - "!bash data/scripts/get_imagenet.sh --val # download ImageNet val split (6.3G, 50000 images)\n", + "!bash data/scripts/get_imagenet.sh --val # download ImageNet val split (6.3G - 50000 images)\n", "!python classify/val.py --weights yolov5m-cls.pt --data ../datasets/imagenet --img 224 # validate" ], "metadata": { @@ -1077,7 +1077,7 @@ "cell_type": "code", "source": [ "# Validate on COCO test. Zip results.json and submit to eval server at https://competitions.codalab.org/competitions/20794\n", - "!bash data/scripts/get_coco.sh --test # download COCO test-dev2017 (7G - 40,000 images, test 20,000)\n", + "!bash data/scripts/get_coco.sh --test # download COCO test-dev2017 (7G - 40000 images, test 20000)\n", "!python val.py --weights yolov5x.pt --data coco.yaml --img 640 --iou 0.65 --half --task test" ], "metadata": { From 87e8deadd563982672a1c5104a68d1a67f0cf765 Mon Sep 17 00:00:00 2001 From: 0zppd <111682241+0zppd@users.noreply.github.com> Date: Sun, 21 Aug 2022 18:40:28 +0500 Subject: [PATCH 1349/1976] zero-mAP fix remove `torch.empty()` forward pass in `.train()` mode (#9068) * Fix Zero Map Issue Signed-off-by: 0zppd <111682241+0zppd@users.noreply.github.com> * Update __init__.py Signed-off-by: Glenn Jocher Signed-off-by: 0zppd <111682241+0zppd@users.noreply.github.com> Signed-off-by: Glenn Jocher Co-authored-by: Glenn Jocher --- utils/loggers/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/loggers/__init__.py b/utils/loggers/__init__.py index c5cdd92772f2..b9869df26a43 100644 --- a/utils/loggers/__init__.py +++ b/utils/loggers/__init__.py @@ -296,7 +296,7 @@ def log_tensorboard_graph(tb, model, imgsz=(640, 640)): try: p = next(model.parameters()) # for device, type imgsz = (imgsz, imgsz) if isinstance(imgsz, int) else imgsz # expand - im = torch.empty((1, 3, *imgsz)).to(p.device).type_as(p) # input image + im = torch.zeros((1, 3, *imgsz)).to(p.device).type_as(p) # input image (WARNING: must be zeros, not empty) with warnings.catch_warnings(): warnings.simplefilter('ignore') # suppress jit trace warning tb.add_graph(torch.jit.trace(de_parallel(model), im, strict=False), []) From 0b8639a40a9c73a9ee1556405fabfd2d46087299 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 21 Aug 2022 15:50:02 +0200 Subject: [PATCH 1350/1976] Rename 'labels' to 'instances' (#9066) * Rename labels to instances * Rename labels to instances * align val --- train.py | 4 ++-- val.py | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/train.py b/train.py index 0bfcaffc16db..ac38d04dba90 100644 --- a/train.py +++ b/train.py @@ -271,7 +271,7 @@ def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictio if RANK != -1: train_loader.sampler.set_epoch(epoch) pbar = enumerate(train_loader) - LOGGER.info(('\n' + '%10s' * 7) % ('Epoch', 'gpu_mem', 'box', 'obj', 'cls', 'labels', 'img_size')) + LOGGER.info(('\n' + '%11s' * 7) % ('Epoch', 'GPU_mem', 'box_loss', 'obj_loss', 'cls_loss', 'Instances', 'Size')) if RANK in {-1, 0}: pbar = tqdm(pbar, total=nb, bar_format='{l_bar}{bar:10}{r_bar}{bar:-10b}') # progress bar optimizer.zero_grad() @@ -326,7 +326,7 @@ def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictio if RANK in {-1, 0}: mloss = (mloss * i + loss_items) / (i + 1) # update mean losses mem = f'{torch.cuda.memory_reserved() / 1E9 if torch.cuda.is_available() else 0:.3g}G' # (GB) - pbar.set_description(('%10s' * 2 + '%10.4g' * 5) % + pbar.set_description(('%11s' * 2 + '%11.4g' * 5) % (f'{epoch}/{epochs - 1}', mem, *mloss, targets.shape[0], imgs.shape[-1])) callbacks.run('on_train_batch_end', ni, model, imgs, targets, paths, plots) if callbacks.stop_training: diff --git a/val.py b/val.py index fcaca889d7e2..f9557bba651d 100644 --- a/val.py +++ b/val.py @@ -186,7 +186,7 @@ def run( if isinstance(names, (list, tuple)): # old format names = dict(enumerate(names)) class_map = coco80_to_coco91_class() if is_coco else list(range(1000)) - s = ('%20s' + '%11s' * 6) % ('Class', 'Images', 'Labels', 'P', 'R', 'mAP@.5', 'mAP@.5:.95') + s = ('%22s' + '%11s' * 6) % ('Class', 'Images', 'Instances', 'P', 'R', 'mAP@.5', 'mAP@.5:.95') dt, p, r, f1, mp, mr, map50, map = (Profile(), Profile(), Profile()), 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 loss = torch.zeros(3, device=device) jdict, stats, ap, ap_class = [], [], [], [] @@ -270,7 +270,7 @@ def run( nt = np.bincount(stats[3].astype(int), minlength=nc) # number of targets per class # Print results - pf = '%20s' + '%11i' * 2 + '%11.3g' * 4 # print format + pf = '%22s' + '%11i' * 2 + '%11.3g' * 4 # print format LOGGER.info(pf % ('all', seen, nt.sum(), mp, mr, map50, map)) if nt.sum() == 0: LOGGER.warning(f'WARNING: no labels found in {task} set, can not compute metrics without labels ⚠️') From 8665d557c1caa66c190c1ec26b377eeae385d1d6 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 21 Aug 2022 16:51:50 +0200 Subject: [PATCH 1351/1976] Threaded TensorBoard graph logging (#9070) * Log TensorBoard graph on pretrain_routine_end * fix --- train.py | 6 +++--- utils/loggers/__init__.py | 34 ++++++++++++++++++---------------- 2 files changed, 21 insertions(+), 19 deletions(-) diff --git a/train.py b/train.py index ac38d04dba90..e4c9b6ae6749 100644 --- a/train.py +++ b/train.py @@ -219,7 +219,7 @@ def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictio check_anchors(dataset, model=model, thr=hyp['anchor_t'], imgsz=imgsz) # run AutoAnchor model.half().float() # pre-reduce anchor precision - callbacks.run('on_pretrain_routine_end', labels, names, plots) + callbacks.run('on_pretrain_routine_end', labels, names) # DDP mode if cuda and RANK != -1: @@ -328,7 +328,7 @@ def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictio mem = f'{torch.cuda.memory_reserved() / 1E9 if torch.cuda.is_available() else 0:.3g}G' # (GB) pbar.set_description(('%11s' * 2 + '%11.4g' * 5) % (f'{epoch}/{epochs - 1}', mem, *mloss, targets.shape[0], imgs.shape[-1])) - callbacks.run('on_train_batch_end', ni, model, imgs, targets, paths, plots) + callbacks.run('on_train_batch_end', model, ni, imgs, targets, paths) if callbacks.stop_training: return # end batch ------------------------------------------------------------------------------------------------ @@ -420,7 +420,7 @@ def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictio if is_coco: callbacks.run('on_fit_epoch_end', list(mloss) + list(results) + lr, epoch, best_fitness, fi) - callbacks.run('on_train_end', last, best, plots, epoch, results) + callbacks.run('on_train_end', last, best, epoch, results) torch.cuda.empty_cache() return results diff --git a/utils/loggers/__init__.py b/utils/loggers/__init__.py index b9869df26a43..98a123eee74d 100644 --- a/utils/loggers/__init__.py +++ b/utils/loggers/__init__.py @@ -49,6 +49,7 @@ def __init__(self, save_dir=None, weights=None, opt=None, hyp=None, logger=None, self.weights = weights self.opt = opt self.hyp = hyp + self.plots = not opt.noplots # plot results self.logger = logger # for printing results to console self.include = include self.keys = [ @@ -110,26 +111,26 @@ def on_train_start(self): # Callback runs on train start pass - def on_pretrain_routine_end(self, labels, names, plots): + def on_pretrain_routine_end(self, labels, names): # Callback runs on pre-train routine end - if plots: + if self.plots: plot_labels(labels, names, self.save_dir) - paths = self.save_dir.glob('*labels*.jpg') # training labels - if self.wandb: - self.wandb.log({"Labels": [wandb.Image(str(x), caption=x.name) for x in paths]}) - # if self.clearml: - # pass # ClearML saves these images automatically using hooks + paths = self.save_dir.glob('*labels*.jpg') # training labels + if self.wandb: + self.wandb.log({"Labels": [wandb.Image(str(x), caption=x.name) for x in paths]}) + # if self.clearml: + # pass # ClearML saves these images automatically using hooks - def on_train_batch_end(self, ni, model, imgs, targets, paths, plots): + def on_train_batch_end(self, model, ni, imgs, targets, paths): # Callback runs on train batch end # ni: number integrated batches (since train start) - if plots: - if ni == 0 and not self.opt.sync_bn and self.tb: - log_tensorboard_graph(self.tb, model, imgsz=list(imgs.shape[2:4])) + if self.plots: if ni < 3: f = self.save_dir / f'train_batch{ni}.jpg' # filename plot_images(imgs, targets, paths, f) - if (self.wandb or self.clearml) and ni == 10: + if ni == 0 and self.tb and not self.opt.sync_bn: + log_tensorboard_graph(self.tb, model, imgsz=(self.opt.imgsz, self.opt.imgsz)) + if ni == 10 and (self.wandb or self.clearml): files = sorted(self.save_dir.glob('train*.jpg')) if self.wandb: self.wandb.log({'Mosaics': [wandb.Image(str(f), caption=f.name) for f in files if f.exists()]}) @@ -197,9 +198,9 @@ def on_model_save(self, last, epoch, final_epoch, best_fitness, fi): model_name='Latest Model', auto_delete_file=False) - def on_train_end(self, last, best, plots, epoch, results): + def on_train_end(self, last, best, epoch, results): # Callback runs on training end, i.e. saving best model - if plots: + if self.plots: plot_results(file=self.save_dir / 'results.csv') # save results.png files = ['results.png', 'confusion_matrix.png', *(f'{x}_curve.png' for x in ('F1', 'PR', 'P', 'R'))] files = [(self.save_dir / f) for f in files if (self.save_dir / f).exists()] # filter @@ -291,6 +292,7 @@ def log_model(self, model_path, epoch=0, metadata={}): wandb.log_artifact(art) +@threaded def log_tensorboard_graph(tb, model, imgsz=(640, 640)): # Log model graph to TensorBoard try: @@ -300,5 +302,5 @@ def log_tensorboard_graph(tb, model, imgsz=(640, 640)): with warnings.catch_warnings(): warnings.simplefilter('ignore') # suppress jit trace warning tb.add_graph(torch.jit.trace(de_parallel(model), im, strict=False), []) - except Exception: - print('WARNING: TensorBoard graph visualization failure') + except Exception as e: + print(f'WARNING: TensorBoard graph visualization failure {e}') From 5373a28c1bcede65e513b7be0ab5a0d43125c90c Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 21 Aug 2022 17:01:05 +0200 Subject: [PATCH 1352/1976] Created using Colaboratory --- tutorial.ipynb | 451 ++++++++++++++++++++++++++----------------------- 1 file changed, 243 insertions(+), 208 deletions(-) diff --git a/tutorial.ipynb b/tutorial.ipynb index 8753a2310b1d..5b7b1f287d7e 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -17,110 +17,121 @@ "accelerator": "GPU", "widgets": { "application/vnd.jupyter.widget-state+json": { - "6d6b90ead2db49b3bdf624b6ba9b44e9": { + "da0946bcefd9414fa282977f7f609e36": { "model_module": "@jupyter-widgets/controls", "model_name": "HBoxModel", - "model_module_version": "1.5.0", + "model_module_version": "2.0.0", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", + "_model_module_version": "2.0.0", "_model_name": "HBoxModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", - "_view_module_version": "1.5.0", + "_view_module_version": "2.0.0", "_view_name": "HBoxView", "box_style": "", "children": [ - "IPY_MODEL_cb77443edb9e43328a56aaa4413a0df3", - "IPY_MODEL_954c8b8699e143bf92be6bfc02fc52f6", - "IPY_MODEL_a64775946e13477f83d8bba6086385b9" + "IPY_MODEL_7838c0af44244ccc906c413cea0989d7", + "IPY_MODEL_309ea78b3e814198b4080beb878d5329", + "IPY_MODEL_b2d1d998e5db4ca1a36280902e1647c7" ], - "layout": "IPY_MODEL_1413611b7f4f4ef99e4f541f5ca35ed6" + "layout": "IPY_MODEL_e7d7f56c77884717ba122f1d603c0852", + "tabbable": null, + "tooltip": null } }, - "cb77443edb9e43328a56aaa4413a0df3": { + "7838c0af44244ccc906c413cea0989d7": { "model_module": "@jupyter-widgets/controls", "model_name": "HTMLModel", - "model_module_version": "1.5.0", + "model_module_version": "2.0.0", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", + "_model_module_version": "2.0.0", "_model_name": "HTMLModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", - "_view_module_version": "1.5.0", + "_view_module_version": "2.0.0", "_view_name": "HTMLView", "description": "", - "description_tooltip": null, - "layout": "IPY_MODEL_00737f5558eb4fbd968172acb978e54a", + "description_allow_html": false, + "layout": "IPY_MODEL_abf60d6b8ea847f9bb358ae2b045458b", "placeholder": "​", - "style": "IPY_MODEL_f03e5ddfd1c04bedaf68ab02c3f6f0ea", + "style": "IPY_MODEL_379196a2761b4a29aca8ef088dc60c10", + "tabbable": null, + "tooltip": null, "value": "100%" } }, - "954c8b8699e143bf92be6bfc02fc52f6": { + "309ea78b3e814198b4080beb878d5329": { "model_module": "@jupyter-widgets/controls", "model_name": "FloatProgressModel", - "model_module_version": "1.5.0", + "model_module_version": "2.0.0", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", + "_model_module_version": "2.0.0", "_model_name": "FloatProgressModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", - "_view_module_version": "1.5.0", + "_view_module_version": "2.0.0", "_view_name": "ProgressView", "bar_style": "success", "description": "", - "description_tooltip": null, - "layout": "IPY_MODEL_6926db7e0035455f99e1dd4508c4b19c", + "description_allow_html": false, + "layout": "IPY_MODEL_52b546a356e54174a95049b30cb52c81", "max": 818322941, "min": 0, "orientation": "horizontal", - "style": "IPY_MODEL_a6a52c9f828b458e97ddf7a11ae9275f", + "style": "IPY_MODEL_0889e134327e4aa0a8719d03a0d6941b", + "tabbable": null, + "tooltip": null, "value": 818322941 } }, - "a64775946e13477f83d8bba6086385b9": { + "b2d1d998e5db4ca1a36280902e1647c7": { "model_module": "@jupyter-widgets/controls", "model_name": "HTMLModel", - "model_module_version": "1.5.0", + "model_module_version": "2.0.0", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", + "_model_module_version": "2.0.0", "_model_name": "HTMLModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", - "_view_module_version": "1.5.0", + "_view_module_version": "2.0.0", "_view_name": "HTMLView", "description": "", - "description_tooltip": null, - "layout": "IPY_MODEL_c4c7dc45a1c24dc4b2c709e21271a37e", + "description_allow_html": false, + "layout": "IPY_MODEL_30f22a3e42d24f10ad9851f40a6703f3", "placeholder": "​", - "style": "IPY_MODEL_09c43ffe2c7e4bdc9489e83f9d82ab73", - "value": " 780M/780M [01:12<00:00, 23.8MB/s]" + "style": "IPY_MODEL_648b3512bb7d4ccca5d75af36c133e92", + "tabbable": null, + "tooltip": null, + "value": " 780M/780M [01:31<00:00, 12.3MB/s]" } }, - "1413611b7f4f4ef99e4f541f5ca35ed6": { + "e7d7f56c77884717ba122f1d603c0852": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", - "model_module_version": "1.2.0", + "model_module_version": "2.0.0", "state": { "_model_module": "@jupyter-widgets/base", - "_model_module_version": "1.2.0", + "_model_module_version": "2.0.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", - "_view_module_version": "1.2.0", + "_view_module_version": "2.0.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, - "border": null, + "border_bottom": null, + "border_left": null, + "border_right": null, + "border_top": null, "bottom": null, "display": null, "flex": null, @@ -148,8 +159,6 @@ "object_position": null, "order": null, "overflow": null, - "overflow_x": null, - "overflow_y": null, "padding": null, "right": null, "top": null, @@ -157,22 +166,25 @@ "width": null } }, - "00737f5558eb4fbd968172acb978e54a": { + "abf60d6b8ea847f9bb358ae2b045458b": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", - "model_module_version": "1.2.0", + "model_module_version": "2.0.0", "state": { "_model_module": "@jupyter-widgets/base", - "_model_module_version": "1.2.0", + "_model_module_version": "2.0.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", - "_view_module_version": "1.2.0", + "_view_module_version": "2.0.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, - "border": null, + "border_bottom": null, + "border_left": null, + "border_right": null, + "border_top": null, "bottom": null, "display": null, "flex": null, @@ -200,8 +212,6 @@ "object_position": null, "order": null, "overflow": null, - "overflow_x": null, - "overflow_y": null, "padding": null, "right": null, "top": null, @@ -209,37 +219,43 @@ "width": null } }, - "f03e5ddfd1c04bedaf68ab02c3f6f0ea": { + "379196a2761b4a29aca8ef088dc60c10": { "model_module": "@jupyter-widgets/controls", - "model_name": "DescriptionStyleModel", - "model_module_version": "1.5.0", + "model_name": "HTMLStyleModel", + "model_module_version": "2.0.0", "state": { "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "_model_name": "DescriptionStyleModel", + "_model_module_version": "2.0.0", + "_model_name": "HTMLStyleModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", - "_view_module_version": "1.2.0", + "_view_module_version": "2.0.0", "_view_name": "StyleView", - "description_width": "" + "background": null, + "description_width": "", + "font_size": null, + "text_color": null } }, - "6926db7e0035455f99e1dd4508c4b19c": { + "52b546a356e54174a95049b30cb52c81": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", - "model_module_version": "1.2.0", + "model_module_version": "2.0.0", "state": { "_model_module": "@jupyter-widgets/base", - "_model_module_version": "1.2.0", + "_model_module_version": "2.0.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", - "_view_module_version": "1.2.0", + "_view_module_version": "2.0.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, - "border": null, + "border_bottom": null, + "border_left": null, + "border_right": null, + "border_top": null, "bottom": null, "display": null, "flex": null, @@ -267,8 +283,6 @@ "object_position": null, "order": null, "overflow": null, - "overflow_x": null, - "overflow_y": null, "padding": null, "right": null, "top": null, @@ -276,38 +290,41 @@ "width": null } }, - "a6a52c9f828b458e97ddf7a11ae9275f": { + "0889e134327e4aa0a8719d03a0d6941b": { "model_module": "@jupyter-widgets/controls", "model_name": "ProgressStyleModel", - "model_module_version": "1.5.0", + "model_module_version": "2.0.0", "state": { "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", + "_model_module_version": "2.0.0", "_model_name": "ProgressStyleModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", - "_view_module_version": "1.2.0", + "_view_module_version": "2.0.0", "_view_name": "StyleView", "bar_color": null, "description_width": "" } }, - "c4c7dc45a1c24dc4b2c709e21271a37e": { + "30f22a3e42d24f10ad9851f40a6703f3": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", - "model_module_version": "1.2.0", + "model_module_version": "2.0.0", "state": { "_model_module": "@jupyter-widgets/base", - "_model_module_version": "1.2.0", + "_model_module_version": "2.0.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", - "_view_module_version": "1.2.0", + "_view_module_version": "2.0.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, - "border": null, + "border_bottom": null, + "border_left": null, + "border_right": null, + "border_top": null, "bottom": null, "display": null, "flex": null, @@ -335,8 +352,6 @@ "object_position": null, "order": null, "overflow": null, - "overflow_x": null, - "overflow_y": null, "padding": null, "right": null, "top": null, @@ -344,19 +359,22 @@ "width": null } }, - "09c43ffe2c7e4bdc9489e83f9d82ab73": { + "648b3512bb7d4ccca5d75af36c133e92": { "model_module": "@jupyter-widgets/controls", - "model_name": "DescriptionStyleModel", - "model_module_version": "1.5.0", + "model_name": "HTMLStyleModel", + "model_module_version": "2.0.0", "state": { "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "_model_name": "DescriptionStyleModel", + "_model_module_version": "2.0.0", + "_model_name": "HTMLStyleModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", - "_view_module_version": "1.2.0", + "_view_module_version": "2.0.0", "_view_name": "StyleView", - "description_width": "" + "background": null, + "description_width": "", + "font_size": null, + "text_color": null } } } @@ -404,7 +422,7 @@ "colab": { "base_uri": "https://localhost:8080/" }, - "outputId": "508de90c-846e-495d-c7d6-50681af62a98" + "outputId": "4200fd6f-c6f5-4505-a4f9-a918f3ed1f86" }, "source": [ "!git clone https://github.com/ultralytics/yolov5 # clone\n", @@ -415,13 +433,13 @@ "import utils\n", "display = utils.notebook_init() # checks" ], - "execution_count": null, + "execution_count": 1, "outputs": [ { "output_type": "stream", "name": "stderr", "text": [ - "YOLOv5 🚀 v6.2-15-g61adf01 Python-3.7.13 torch-1.12.1+cu113 CUDA:0 (Tesla V100-SXM2-16GB, 16160MiB)\n" + "YOLOv5 🚀 v6.2-41-g8665d55 Python-3.7.13 torch-1.12.1+cu113 CUDA:0 (Tesla V100-SXM2-16GB, 16160MiB)\n" ] }, { @@ -461,29 +479,29 @@ "colab": { "base_uri": "https://localhost:8080/" }, - "outputId": "93881540-331e-4890-cd38-4c2776933238" + "outputId": "1af15107-bcd1-4e8f-b5bd-0ee1a737e051" }, "source": [ "!python detect.py --weights yolov5s.pt --img 640 --conf 0.25 --source data/images\n", "# display.Image(filename='runs/detect/exp/zidane.jpg', width=600)" ], - "execution_count": null, + "execution_count": 2, "outputs": [ { "output_type": "stream", "name": "stdout", "text": [ "\u001b[34m\u001b[1mdetect: \u001b[0mweights=['yolov5s.pt'], source=data/images, data=data/coco128.yaml, imgsz=[640, 640], conf_thres=0.25, iou_thres=0.45, max_det=1000, device=, view_img=False, save_txt=False, save_conf=False, save_crop=False, nosave=False, classes=None, agnostic_nms=False, augment=False, visualize=False, update=False, project=runs/detect, name=exp, exist_ok=False, line_thickness=3, hide_labels=False, hide_conf=False, half=False, dnn=False\n", - "YOLOv5 🚀 v6.2-15-g61adf01 Python-3.7.13 torch-1.12.1+cu113 CUDA:0 (Tesla V100-SXM2-16GB, 16160MiB)\n", + "YOLOv5 🚀 v6.2-41-g8665d55 Python-3.7.13 torch-1.12.1+cu113 CUDA:0 (Tesla V100-SXM2-16GB, 16160MiB)\n", "\n", "Downloading https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5s.pt to yolov5s.pt...\n", - "100% 14.1M/14.1M [00:00<00:00, 39.3MB/s]\n", + "100% 14.1M/14.1M [00:00<00:00, 41.7MB/s]\n", "\n", "Fusing layers... \n", "YOLOv5s summary: 213 layers, 7225885 parameters, 0 gradients\n", - "image 1/2 /content/yolov5/data/images/bus.jpg: 640x480 4 persons, 1 bus, 14.9ms\n", - "image 2/2 /content/yolov5/data/images/zidane.jpg: 384x640 2 persons, 2 ties, 22.0ms\n", - "Speed: 0.6ms pre-process, 18.4ms inference, 24.1ms NMS per image at shape (1, 3, 640, 640)\n", + "image 1/2 /content/yolov5/data/images/bus.jpg: 640x480 4 persons, 1 bus, 14.5ms\n", + "image 2/2 /content/yolov5/data/images/zidane.jpg: 384x640 2 persons, 2 ties, 18.9ms\n", + "Speed: 0.5ms pre-process, 16.7ms inference, 21.4ms NMS per image at shape (1, 3, 640, 640)\n", "Results saved to \u001b[1mruns/detect/exp\u001b[0m\n" ] } @@ -515,29 +533,29 @@ "id": "WQPtK1QYVaD_", "colab": { "base_uri": "https://localhost:8080/", - "height": 49, + "height": 17, "referenced_widgets": [ - "6d6b90ead2db49b3bdf624b6ba9b44e9", - "cb77443edb9e43328a56aaa4413a0df3", - "954c8b8699e143bf92be6bfc02fc52f6", - "a64775946e13477f83d8bba6086385b9", - "1413611b7f4f4ef99e4f541f5ca35ed6", - "00737f5558eb4fbd968172acb978e54a", - "f03e5ddfd1c04bedaf68ab02c3f6f0ea", - "6926db7e0035455f99e1dd4508c4b19c", - "a6a52c9f828b458e97ddf7a11ae9275f", - "c4c7dc45a1c24dc4b2c709e21271a37e", - "09c43ffe2c7e4bdc9489e83f9d82ab73" + "da0946bcefd9414fa282977f7f609e36", + "7838c0af44244ccc906c413cea0989d7", + "309ea78b3e814198b4080beb878d5329", + "b2d1d998e5db4ca1a36280902e1647c7", + "e7d7f56c77884717ba122f1d603c0852", + "abf60d6b8ea847f9bb358ae2b045458b", + "379196a2761b4a29aca8ef088dc60c10", + "52b546a356e54174a95049b30cb52c81", + "0889e134327e4aa0a8719d03a0d6941b", + "30f22a3e42d24f10ad9851f40a6703f3", + "648b3512bb7d4ccca5d75af36c133e92" ] }, - "outputId": "ed2ca46e-a1a9-4a16-c449-859278d8aa18" + "outputId": "5f129105-eca5-4f33-fb1d-981255f814ad" }, "source": [ "# Download COCO val\n", "torch.hub.download_url_to_file('https://ultralytics.com/assets/coco2017val.zip', 'tmp.zip') # download (780M - 5000 images)\n", "!unzip -q tmp.zip -d ../datasets && rm tmp.zip # unzip" ], - "execution_count": null, + "execution_count": 3, "outputs": [ { "output_type": "display_data", @@ -548,7 +566,24 @@ "application/vnd.jupyter.widget-view+json": { "version_major": 2, "version_minor": 0, - "model_id": "6d6b90ead2db49b3bdf624b6ba9b44e9" + "model_id": "da0946bcefd9414fa282977f7f609e36" + }, + "application/json": { + "n": 0, + "total": 818322941, + "elapsed": 0.020366430282592773, + "ncols": null, + "nrows": null, + "prefix": "", + "ascii": false, + "unit": "B", + "unit_scale": true, + "rate": null, + "bar_format": null, + "postfix": null, + "unit_divisor": 1024, + "initial": 0, + "colour": null } }, "metadata": {} @@ -562,48 +597,48 @@ "colab": { "base_uri": "https://localhost:8080/" }, - "outputId": "19a590ef-363e-424c-d9ce-78bbe0593cd5" + "outputId": "40d5d000-abee-46a0-c07d-1066e1662e01" }, "source": [ "# Validate YOLOv5x on COCO val\n", "!python val.py --weights yolov5x.pt --data coco.yaml --img 640 --iou 0.65 --half" ], - "execution_count": null, + "execution_count": 4, "outputs": [ { "output_type": "stream", "name": "stdout", "text": [ "\u001b[34m\u001b[1mval: \u001b[0mdata=/content/yolov5/data/coco.yaml, weights=['yolov5x.pt'], batch_size=32, imgsz=640, conf_thres=0.001, iou_thres=0.65, task=val, device=, workers=8, single_cls=False, augment=False, verbose=False, save_txt=False, save_hybrid=False, save_conf=False, save_json=True, project=runs/val, name=exp, exist_ok=False, half=True, dnn=False\n", - "YOLOv5 🚀 v6.2-15-g61adf01 Python-3.7.13 torch-1.12.1+cu113 CUDA:0 (Tesla V100-SXM2-16GB, 16160MiB)\n", + "YOLOv5 🚀 v6.2-41-g8665d55 Python-3.7.13 torch-1.12.1+cu113 CUDA:0 (Tesla V100-SXM2-16GB, 16160MiB)\n", "\n", "Downloading https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5x.pt to yolov5x.pt...\n", - "100% 166M/166M [00:06<00:00, 28.1MB/s]\n", + "100% 166M/166M [00:10<00:00, 16.6MB/s]\n", "\n", "Fusing layers... \n", "YOLOv5x summary: 444 layers, 86705005 parameters, 0 gradients\n", "Downloading https://ultralytics.com/assets/Arial.ttf to /root/.config/Ultralytics/Arial.ttf...\n", - "100% 755k/755k [00:00<00:00, 47.3MB/s]\n", - "\u001b[34m\u001b[1mval: \u001b[0mScanning '/content/datasets/coco/val2017' images and labels...4952 found, 48 missing, 0 empty, 0 corrupt: 100% 5000/5000 [00:00<00:00, 10756.32it/s]\n", + "100% 755k/755k [00:00<00:00, 1.39MB/s]\n", + "\u001b[34m\u001b[1mval: \u001b[0mScanning '/content/datasets/coco/val2017' images and labels...4952 found, 48 missing, 0 empty, 0 corrupt: 100% 5000/5000 [00:00<00:00, 10506.48it/s]\n", "\u001b[34m\u001b[1mval: \u001b[0mNew cache created: /content/datasets/coco/val2017.cache\n", - " Class Images Labels P R mAP@.5 mAP@.5:.95: 100% 157/157 [01:07<00:00, 2.33it/s]\n", - " all 5000 36335 0.743 0.625 0.683 0.504\n", - "Speed: 0.1ms pre-process, 4.6ms inference, 1.2ms NMS per image at shape (32, 3, 640, 640)\n", + " Class Images Instances P R mAP@.5 mAP@.5:.95: 100% 157/157 [01:06<00:00, 2.36it/s]\n", + " all 5000 36335 0.743 0.625 0.683 0.504\n", + "Speed: 0.1ms pre-process, 4.6ms inference, 1.1ms NMS per image at shape (32, 3, 640, 640)\n", "\n", "Evaluating pycocotools mAP... saving runs/val/exp/yolov5x_predictions.json...\n", "loading annotations into memory...\n", - "Done (t=0.41s)\n", + "Done (t=0.38s)\n", "creating index...\n", "index created!\n", "Loading and preparing results...\n", - "DONE (t=5.64s)\n", + "DONE (t=5.49s)\n", "creating index...\n", "index created!\n", "Running per image evaluation...\n", "Evaluate annotation type *bbox*\n", - "DONE (t=76.80s).\n", + "DONE (t=72.10s).\n", "Accumulating evaluation results...\n", - "DONE (t=14.61s).\n", + "DONE (t=13.94s).\n", " Average Precision (AP) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.506\n", " Average Precision (AP) @[ IoU=0.50 | area= all | maxDets=100 ] = 0.688\n", " Average Precision (AP) @[ IoU=0.75 | area= all | maxDets=100 ] = 0.549\n", @@ -682,13 +717,13 @@ "colab": { "base_uri": "https://localhost:8080/" }, - "outputId": "47759d5e-34f0-4a6a-c714-ff533391cfff" + "outputId": "f0ce0354-7f50-4546-f3f9-672b4b522d59" }, "source": [ "# Train YOLOv5s on COCO128 for 3 epochs\n", "!python train.py --img 640 --batch 16 --epochs 3 --data coco128.yaml --weights yolov5s.pt --cache" ], - "execution_count": null, + "execution_count": 5, "outputs": [ { "output_type": "stream", @@ -696,7 +731,7 @@ "text": [ "\u001b[34m\u001b[1mtrain: \u001b[0mweights=yolov5s.pt, cfg=, data=coco128.yaml, hyp=data/hyps/hyp.scratch-low.yaml, epochs=3, batch_size=16, imgsz=640, rect=False, resume=False, nosave=False, noval=False, noautoanchor=False, noplots=False, evolve=None, bucket=, cache=ram, image_weights=False, device=, multi_scale=False, single_cls=False, optimizer=SGD, sync_bn=False, workers=8, project=runs/train, name=exp, exist_ok=False, quad=False, cos_lr=False, label_smoothing=0.0, patience=100, freeze=[0], save_period=-1, seed=0, local_rank=-1, entity=None, upload_dataset=False, bbox_interval=-1, artifact_alias=latest\n", "\u001b[34m\u001b[1mgithub: \u001b[0mup to date with https://github.com/ultralytics/yolov5 ✅\n", - "YOLOv5 🚀 v6.2-15-g61adf01 Python-3.7.13 torch-1.12.1+cu113 CUDA:0 (Tesla V100-SXM2-16GB, 16160MiB)\n", + "YOLOv5 🚀 v6.2-41-g8665d55 Python-3.7.13 torch-1.12.1+cu113 CUDA:0 (Tesla V100-SXM2-16GB, 16160MiB)\n", "\n", "\u001b[34m\u001b[1mhyperparameters: \u001b[0mlr0=0.01, lrf=0.01, momentum=0.937, weight_decay=0.0005, warmup_epochs=3.0, warmup_momentum=0.8, warmup_bias_lr=0.1, box=0.05, cls=0.5, cls_pw=1.0, obj=1.0, obj_pw=1.0, iou_t=0.2, anchor_t=4.0, fl_gamma=0.0, hsv_h=0.015, hsv_s=0.7, hsv_v=0.4, degrees=0.0, translate=0.1, scale=0.5, shear=0.0, perspective=0.0, flipud=0.0, fliplr=0.5, mosaic=1.0, mixup=0.0, copy_paste=0.0\n", "\u001b[34m\u001b[1mWeights & Biases: \u001b[0mrun 'pip install wandb' to automatically track and visualize YOLOv5 🚀 runs in Weights & Biases\n", @@ -705,8 +740,8 @@ "\n", "Dataset not found ⚠️, missing paths ['/content/datasets/coco128/images/train2017']\n", "Downloading https://ultralytics.com/assets/coco128.zip to coco128.zip...\n", - "100% 6.66M/6.66M [00:00<00:00, 75.3MB/s]\n", - "Dataset download success ✅ (0.7s), saved to \u001b[1m/content/datasets\u001b[0m\n", + "100% 6.66M/6.66M [00:00<00:00, 76.7MB/s]\n", + "Dataset download success ✅ (0.5s), saved to \u001b[1m/content/datasets\u001b[0m\n", "\n", " from n params module arguments \n", " 0 -1 1 3520 models.common.Conv [3, 32, 6, 2, 2] \n", @@ -740,33 +775,33 @@ "\u001b[34m\u001b[1mAMP: \u001b[0mchecks passed ✅\n", "\u001b[34m\u001b[1moptimizer:\u001b[0m SGD(lr=0.01) with parameter groups 57 weight(decay=0.0), 60 weight(decay=0.0005), 60 bias\n", "\u001b[34m\u001b[1malbumentations: \u001b[0mBlur(p=0.01, blur_limit=(3, 7)), MedianBlur(p=0.01, blur_limit=(3, 7)), ToGray(p=0.01), CLAHE(p=0.01, clip_limit=(1, 4.0), tile_grid_size=(8, 8))\n", - "\u001b[34m\u001b[1mtrain: \u001b[0mScanning '/content/datasets/coco128/labels/train2017' images and labels...128 found, 0 missing, 2 empty, 0 corrupt: 100% 128/128 [00:00<00:00, 7246.20it/s]\n", + "\u001b[34m\u001b[1mtrain: \u001b[0mScanning '/content/datasets/coco128/labels/train2017' images and labels...128 found, 0 missing, 2 empty, 0 corrupt: 100% 128/128 [00:00<00:00, 7984.87it/s]\n", "\u001b[34m\u001b[1mtrain: \u001b[0mNew cache created: /content/datasets/coco128/labels/train2017.cache\n", - "\u001b[34m\u001b[1mtrain: \u001b[0mCaching images (0.1GB ram): 100% 128/128 [00:00<00:00, 986.21it/s]\n", + "\u001b[34m\u001b[1mtrain: \u001b[0mCaching images (0.1GB ram): 100% 128/128 [00:00<00:00, 1018.19it/s]\n", "\u001b[34m\u001b[1mval: \u001b[0mScanning '/content/datasets/coco128/labels/train2017.cache' images and labels... 128 found, 0 missing, 2 empty, 0 corrupt: 100% 128/128 [00:00 Date: Sun, 21 Aug 2022 17:07:56 +0200 Subject: [PATCH 1353/1976] De-thread TensorBoard graph logging (#9071) * De-thread TensorBoard graph logging Issues with Classification models Signed-off-by: Glenn Jocher * Update __init__.py Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- utils/loggers/__init__.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/utils/loggers/__init__.py b/utils/loggers/__init__.py index 98a123eee74d..006125edbcd9 100644 --- a/utils/loggers/__init__.py +++ b/utils/loggers/__init__.py @@ -11,7 +11,7 @@ import torch from torch.utils.tensorboard import SummaryWriter -from utils.general import colorstr, cv2, threaded +from utils.general import colorstr, cv2 from utils.loggers.clearml.clearml_utils import ClearmlLogger from utils.loggers.wandb.wandb_utils import WandbLogger from utils.plots import plot_images, plot_labels, plot_results @@ -292,7 +292,6 @@ def log_model(self, model_path, epoch=0, metadata={}): wandb.log_artifact(art) -@threaded def log_tensorboard_graph(tb, model, imgsz=(640, 640)): # Log model graph to TensorBoard try: From 262187e95d304f80abf08abd850b7b5076f2a7a9 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 21 Aug 2022 23:26:07 +0200 Subject: [PATCH 1354/1976] Two dimensional `size=(h,w)` AutoShape support (#9072) * Two dimensional `size=(h,w)` AutoShape support May resolve https://github.com/ultralytics/yolov5/issues/9039 Signed-off-by: Glenn Jocher * Update hubconf.py Signed-off-by: Glenn Jocher * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Signed-off-by: Glenn Jocher Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- hubconf.py | 10 +++++++--- models/common.py | 8 +++++--- 2 files changed, 12 insertions(+), 6 deletions(-) diff --git a/hubconf.py b/hubconf.py index 293f177dcbc1..0a7f917bd7d1 100644 --- a/hubconf.py +++ b/hubconf.py @@ -30,7 +30,7 @@ def _create(name, pretrained=True, channels=3, classes=80, autoshape=True, verbo from models.common import AutoShape, DetectMultiBackend from models.experimental import attempt_load - from models.yolo import DetectionModel + from models.yolo import ClassificationModel, DetectionModel from utils.downloads import attempt_download from utils.general import LOGGER, check_requirements, intersect_dicts, logging from utils.torch_utils import select_device @@ -45,8 +45,12 @@ def _create(name, pretrained=True, channels=3, classes=80, autoshape=True, verbo if pretrained and channels == 3 and classes == 80: try: model = DetectMultiBackend(path, device=device, fuse=autoshape) # detection model - if autoshape and isinstance(model.model, DetectionModel): - model = AutoShape(model) # for file/URI/PIL/cv2/np inputs and NMS + if autoshape: + if model.pt and isinstance(model.model, ClassificationModel): + LOGGER.warning('WARNING: YOLOv5 v6.2 ClassificationModel is not yet AutoShape compatible. ' + 'You must pass torch tensors in BCHW to this model, i.e. shape(1,3,224,224).') + else: + model = AutoShape(model) # for file/URI/PIL/cv2/np inputs and NMS except Exception: model = attempt_load(path, device=device, fuse=False) # arbitrary model else: diff --git a/models/common.py b/models/common.py index 44192e622bb5..d308244c4a44 100644 --- a/models/common.py +++ b/models/common.py @@ -589,7 +589,7 @@ def _apply(self, fn): @smart_inference_mode() def forward(self, ims, size=640, augment=False, profile=False): - # Inference from various sources. For height=640, width=1280, RGB images example inputs are: + # Inference from various sources. For size(height=640, width=1280), RGB images example inputs are: # file: ims = 'data/images/zidane.jpg' # str or PosixPath # URI: = 'https://ultralytics.com/images/zidane.jpg' # OpenCV: = cv2.imread('image.jpg')[:,:,::-1] # HWC BGR to RGB x(640,1280,3) @@ -600,6 +600,8 @@ def forward(self, ims, size=640, augment=False, profile=False): dt = (Profile(), Profile(), Profile()) with dt[0]: + if isinstance(size, int): # expand + size = (size, size) p = next(self.model.parameters()) if self.pt else torch.empty(1, device=self.model.device) # param autocast = self.amp and (p.device.type != 'cpu') # Automatic Mixed Precision (AMP) inference if isinstance(ims, torch.Tensor): # torch @@ -622,10 +624,10 @@ def forward(self, ims, size=640, augment=False, profile=False): im = im[..., :3] if im.ndim == 3 else cv2.cvtColor(im, cv2.COLOR_GRAY2BGR) # enforce 3ch input s = im.shape[:2] # HWC shape0.append(s) # image shape - g = (size / max(s)) # gain + g = max(size) / max(s) # gain shape1.append([y * g for y in s]) ims[i] = im if im.data.contiguous else np.ascontiguousarray(im) # update - shape1 = [make_divisible(x, self.stride) if self.pt else size for x in np.array(shape1).max(0)] # inf shape + shape1 = [make_divisible(x, self.stride) for x in np.array(shape1).max(0)] if self.pt else size # inf shape x = [letterbox(im, shape1, auto=False)[0] for im in ims] # pad x = np.ascontiguousarray(np.array(x).transpose((0, 3, 1, 2))) # stack and BHWC to BCHW x = torch.from_numpy(x).to(p.device).type_as(p) / 255 # uint8 to fp16/32 From 0abae780b356aa29332f7d50552e0ed88e38ee3a Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 22 Aug 2022 00:04:30 +0200 Subject: [PATCH 1355/1976] Remove unused Timeout import (#9073) * Remove unused Timeout import Signed-off-by: Glenn Jocher * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Signed-off-by: Glenn Jocher Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- utils/plots.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/utils/plots.py b/utils/plots.py index 7e1de43aba1b..d35e2bdd168a 100644 --- a/utils/plots.py +++ b/utils/plots.py @@ -19,8 +19,8 @@ import torch from PIL import Image, ImageDraw, ImageFont -from utils.general import (CONFIG_DIR, FONT, LOGGER, Timeout, check_font, check_requirements, clip_coords, - increment_path, is_ascii, threaded, try_except, xywh2xyxy, xyxy2xywh) +from utils.general import (CONFIG_DIR, FONT, LOGGER, check_font, check_requirements, clip_coords, increment_path, + is_ascii, threaded, try_except, xywh2xyxy, xyxy2xywh) from utils.metrics import fitness # Settings From 06831aa9e905e0fa703958f6b3f3db443cf477f3 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 22 Aug 2022 01:06:29 +0200 Subject: [PATCH 1356/1976] Improved Usage example docstrings (#9075) * Updated Usage examples * Update detect.py Signed-off-by: Glenn Jocher * Update predict.py Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- classify/predict.py | 23 +++++++++++++++++++---- classify/train.py | 11 +++++++---- classify/val.py | 16 ++++++++++++++-- detect.py | 36 ++++++++++++++++++------------------ export.py | 22 +++++++++++----------- hubconf.py | 4 ++-- models/tf.py | 2 +- models/yolo.py | 2 +- train.py | 17 ++++++++++------- val.py | 24 ++++++++++++------------ 10 files changed, 95 insertions(+), 62 deletions(-) diff --git a/classify/predict.py b/classify/predict.py index 0bf99140b8e3..135470fd36ed 100644 --- a/classify/predict.py +++ b/classify/predict.py @@ -1,9 +1,24 @@ # YOLOv5 🚀 by Ultralytics, GPL-3.0 license """ -Run classification inference on file/dir/URL/glob - -Usage: - $ python classify/predict.py --weights yolov5s-cls.pt --source data/images/bus.jpg +Run YOLOv5 classification inference on images, videos, directories, and globs. + +Usage - sources: + $ python classify/predict.py --weights yolov5s.pt --source img.jpg # image + vid.mp4 # video + path/ # directory + 'path/*.jpg' # glob + +Usage - formats: + $ python classify/predict.py --weights yolov5s-cls.pt # PyTorch + yolov5s-cls.torchscript # TorchScript + yolov5s-cls.onnx # ONNX Runtime or OpenCV DNN with --dnn + yolov5s-cls.xml # OpenVINO + yolov5s-cls.engine # TensorRT + yolov5s-cls.mlmodel # CoreML (macOS-only) + yolov5s-cls_saved_model # TensorFlow SavedModel + yolov5s-cls.pb # TensorFlow GraphDef + yolov5s-cls.tflite # TensorFlow Lite + yolov5s-cls_edgetpu.tflite # TensorFlow Edge TPU """ import argparse diff --git a/classify/train.py b/classify/train.py index 8fe90c1b19eb..223367260bad 100644 --- a/classify/train.py +++ b/classify/train.py @@ -1,13 +1,16 @@ # YOLOv5 🚀 by Ultralytics, GPL-3.0 license """ Train a YOLOv5 classifier model on a classification dataset -Datasets: --data mnist, fashion-mnist, cifar10, cifar100, imagenette, imagewoof, imagenet, or 'path/to/custom/dataset' -YOLOv5-cls models: --model yolov5n-cls.pt, yolov5s-cls.pt, yolov5m-cls.pt, yolov5l-cls.pt, yolov5x-cls.pt -Torchvision models: --model resnet50, efficientnet_b0, etc. See https://pytorch.org/vision/stable/models.html -Usage - Single-GPU and Multi-GPU DDP +Usage - Single-GPU training: $ python classify/train.py --model yolov5s-cls.pt --data imagenette160 --epochs 5 --img 128 + +Usage - Multi-GPU DDP training: $ python -m torch.distributed.run --nproc_per_node 4 --master_port 1 classify/train.py --model yolov5s-cls.pt --data imagenet --epochs 5 --img 224 --device 0,1,2,3 + +Datasets: --data mnist, fashion-mnist, cifar10, cifar100, imagenette, imagewoof, imagenet, or 'path/to/data' +YOLOv5-cls models: --model yolov5n-cls.pt, yolov5s-cls.pt, yolov5m-cls.pt, yolov5l-cls.pt, yolov5x-cls.pt +Torchvision models: --model resnet50, efficientnet_b0, etc. See https://pytorch.org/vision/stable/models.html """ import argparse diff --git a/classify/val.py b/classify/val.py index 2353737957d3..bf808bc21a84 100644 --- a/classify/val.py +++ b/classify/val.py @@ -1,10 +1,22 @@ # YOLOv5 🚀 by Ultralytics, GPL-3.0 license """ -Validate a classification model on a dataset +Validate a trained YOLOv5 classification model on a classification dataset Usage: $ bash data/scripts/get_imagenet.sh --val # download ImageNet val split (6.3G, 50000 images) - $ python classify/val.py --weights yolov5m-cls.pt --data ../datasets/imagenet --img 224 # validate + $ python classify/val.py --weights yolov5m-cls.pt --data ../datasets/imagenet --img 224 # validate ImageNet + +Usage - formats: + $ python classify/val.py --weights yolov5s-cls.pt # PyTorch + yolov5s-cls.torchscript # TorchScript + yolov5s-cls.onnx # ONNX Runtime or OpenCV DNN with --dnn + yolov5s-cls.xml # OpenVINO + yolov5s-cls.engine # TensorRT + yolov5s-cls.mlmodel # CoreML (macOS-only) + yolov5s-cls_saved_model # TensorFlow SavedModel + yolov5s-cls.pb # TensorFlow GraphDef + yolov5s-cls.tflite # TensorFlow Lite + yolov5s-cls_edgetpu.tflite # TensorFlow Edge TPU """ import argparse diff --git a/detect.py b/detect.py index 93ae0baccd13..541ad90e051d 100644 --- a/detect.py +++ b/detect.py @@ -1,27 +1,27 @@ # YOLOv5 🚀 by Ultralytics, GPL-3.0 license """ -Run inference on images, videos, directories, streams, etc. +Run YOLOv5 detection inference on images, videos, directories, streams, etc. Usage - sources: - $ python path/to/detect.py --weights yolov5s.pt --source 0 # webcam - img.jpg # image - vid.mp4 # video - path/ # directory - 'path/*.jpg' # glob - 'https://youtu.be/Zgi9g1ksQHc' # YouTube - 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream + $ python detect.py --weights yolov5s.pt --source 0 # webcam + img.jpg # image + vid.mp4 # video + path/ # directory + 'path/*.jpg' # glob + 'https://youtu.be/Zgi9g1ksQHc' # YouTube + 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream Usage - formats: - $ python path/to/detect.py --weights yolov5s.pt # PyTorch - yolov5s.torchscript # TorchScript - yolov5s.onnx # ONNX Runtime or OpenCV DNN with --dnn - yolov5s.xml # OpenVINO - yolov5s.engine # TensorRT - yolov5s.mlmodel # CoreML (macOS-only) - yolov5s_saved_model # TensorFlow SavedModel - yolov5s.pb # TensorFlow GraphDef - yolov5s.tflite # TensorFlow Lite - yolov5s_edgetpu.tflite # TensorFlow Edge TPU + $ python detect.py --weights yolov5s.pt # PyTorch + yolov5s.torchscript # TorchScript + yolov5s.onnx # ONNX Runtime or OpenCV DNN with --dnn + yolov5s.xml # OpenVINO + yolov5s.engine # TensorRT + yolov5s.mlmodel # CoreML (macOS-only) + yolov5s_saved_model # TensorFlow SavedModel + yolov5s.pb # TensorFlow GraphDef + yolov5s.tflite # TensorFlow Lite + yolov5s_edgetpu.tflite # TensorFlow Edge TPU """ import argparse diff --git a/export.py b/export.py index 166b5f406a20..7a746156b96d 100644 --- a/export.py +++ b/export.py @@ -21,19 +21,19 @@ $ pip install -r requirements.txt coremltools onnx onnx-simplifier onnxruntime-gpu openvino-dev tensorflow # GPU Usage: - $ python path/to/export.py --weights yolov5s.pt --include torchscript onnx openvino engine coreml tflite ... + $ python export.py --weights yolov5s.pt --include torchscript onnx openvino engine coreml tflite ... Inference: - $ python path/to/detect.py --weights yolov5s.pt # PyTorch - yolov5s.torchscript # TorchScript - yolov5s.onnx # ONNX Runtime or OpenCV DNN with --dnn - yolov5s.xml # OpenVINO - yolov5s.engine # TensorRT - yolov5s.mlmodel # CoreML (macOS-only) - yolov5s_saved_model # TensorFlow SavedModel - yolov5s.pb # TensorFlow GraphDef - yolov5s.tflite # TensorFlow Lite - yolov5s_edgetpu.tflite # TensorFlow Edge TPU + $ python detect.py --weights yolov5s.pt # PyTorch + yolov5s.torchscript # TorchScript + yolov5s.onnx # ONNX Runtime or OpenCV DNN with --dnn + yolov5s.xml # OpenVINO + yolov5s.engine # TensorRT + yolov5s.mlmodel # CoreML (macOS-only) + yolov5s_saved_model # TensorFlow SavedModel + yolov5s.pb # TensorFlow GraphDef + yolov5s.tflite # TensorFlow Lite + yolov5s_edgetpu.tflite # TensorFlow Edge TPU TensorFlow.js: $ cd .. && git clone https://github.com/zldrobit/tfjs-yolov5-example.git && cd tfjs-yolov5-example diff --git a/hubconf.py b/hubconf.py index 0a7f917bd7d1..33fc87930582 100644 --- a/hubconf.py +++ b/hubconf.py @@ -1,11 +1,11 @@ # YOLOv5 🚀 by Ultralytics, GPL-3.0 license """ -PyTorch Hub models https://pytorch.org/hub/ultralytics_yolov5/ +PyTorch Hub models https://pytorch.org/hub/ultralytics_yolov5 Usage: import torch model = torch.hub.load('ultralytics/yolov5', 'yolov5s') - model = torch.hub.load('ultralytics/yolov5:master', 'custom', 'path/to/yolov5s.onnx') # file from branch + model = torch.hub.load('ultralytics/yolov5:master', 'custom', 'path/to/yolov5s.onnx') # custom model from branch """ import torch diff --git a/models/tf.py b/models/tf.py index b0d98cc2a3a9..ecb0d4d79c78 100644 --- a/models/tf.py +++ b/models/tf.py @@ -7,7 +7,7 @@ $ python models/tf.py --weights yolov5s.pt Export: - $ python path/to/export.py --weights yolov5s.pt --include saved_model pb tflite tfjs + $ python export.py --weights yolov5s.pt --include saved_model pb tflite tfjs """ import argparse diff --git a/models/yolo.py b/models/yolo.py index 32a47e9591da..e154b72685b4 100644 --- a/models/yolo.py +++ b/models/yolo.py @@ -3,7 +3,7 @@ YOLO-specific modules Usage: - $ python path/to/models/yolo.py --cfg yolov5s.yaml + $ python models/yolo.py --cfg yolov5s.yaml """ import argparse diff --git a/train.py b/train.py index e4c9b6ae6749..0cd4a7f065a6 100644 --- a/train.py +++ b/train.py @@ -1,15 +1,18 @@ # YOLOv5 🚀 by Ultralytics, GPL-3.0 license """ Train a YOLOv5 model on a custom dataset. - Models and datasets download automatically from the latest YOLOv5 release. -Models: https://github.com/ultralytics/yolov5/tree/master/models -Datasets: https://github.com/ultralytics/yolov5/tree/master/data -Tutorial: https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data -Usage: - $ python path/to/train.py --data coco128.yaml --weights yolov5s.pt --img 640 # from pretrained (RECOMMENDED) - $ python path/to/train.py --data coco128.yaml --weights '' --cfg yolov5s.yaml --img 640 # from scratch +Usage - Single-GPU training: + $ python train.py --data coco128.yaml --weights yolov5s.pt --img 640 # from pretrained (recommended) + $ python train.py --data coco128.yaml --weights '' --cfg yolov5s.yaml --img 640 # from scratch + +Usage - Multi-GPU DDP training: + $ python -m torch.distributed.run --nproc_per_node 4 --master_port 1 train.py --data coco128.yaml --weights yolov5s.pt --img 640 --device 0,1,2,3 + +Models: https://github.com/ultralytics/yolov5/tree/master/models +Datasets: https://github.com/ultralytics/yolov5/tree/master/data +Tutorial: https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data """ import argparse diff --git a/val.py b/val.py index f9557bba651d..58b9c9e1bec0 100644 --- a/val.py +++ b/val.py @@ -1,21 +1,21 @@ # YOLOv5 🚀 by Ultralytics, GPL-3.0 license """ -Validate a trained YOLOv5 model accuracy on a custom dataset +Validate a trained YOLOv5 detection model on a detection dataset Usage: - $ python path/to/val.py --weights yolov5s.pt --data coco128.yaml --img 640 + $ python val.py --weights yolov5s.pt --data coco128.yaml --img 640 Usage - formats: - $ python path/to/val.py --weights yolov5s.pt # PyTorch - yolov5s.torchscript # TorchScript - yolov5s.onnx # ONNX Runtime or OpenCV DNN with --dnn - yolov5s.xml # OpenVINO - yolov5s.engine # TensorRT - yolov5s.mlmodel # CoreML (macOS-only) - yolov5s_saved_model # TensorFlow SavedModel - yolov5s.pb # TensorFlow GraphDef - yolov5s.tflite # TensorFlow Lite - yolov5s_edgetpu.tflite # TensorFlow Edge TPU + $ python val.py --weights yolov5s.pt # PyTorch + yolov5s.torchscript # TorchScript + yolov5s.onnx # ONNX Runtime or OpenCV DNN with --dnn + yolov5s.xml # OpenVINO + yolov5s.engine # TensorRT + yolov5s.mlmodel # CoreML (macOS-only) + yolov5s_saved_model # TensorFlow SavedModel + yolov5s.pb # TensorFlow GraphDef + yolov5s.tflite # TensorFlow Lite + yolov5s_edgetpu.tflite # TensorFlow Edge TPU """ import argparse From eab35f66f9104992a448fbd726c6c2dfdfdf240f Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 22 Aug 2022 22:18:01 +0200 Subject: [PATCH 1357/1976] Install `torch` latest stable (#9092) Install torch 1.12.1 stable GPU assignment issues in 1.13 nightly that comes with image Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- utils/docker/Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/docker/Dockerfile b/utils/docker/Dockerfile index cf2c1c5cb3cb..4b9367cc27db 100644 --- a/utils/docker/Dockerfile +++ b/utils/docker/Dockerfile @@ -15,7 +15,7 @@ RUN apt update && apt install --no-install-recommends -y zip htop screen libgl1- # Install pip packages COPY requirements.txt . RUN python -m pip install --upgrade pip wheel -RUN pip uninstall -y Pillow torchtext # torch torchvision +RUN pip uninstall -y Pillow torchtext torch torchvision RUN pip install --no-cache -r requirements.txt albumentations wandb gsutil notebook Pillow>=9.1.0 \ 'opencv-python<4.6.0.66' \ --extra-index-url https://download.pytorch.org/whl/cu113 From d0fa0042bd7775b2dd191d66548f5d8b677bb756 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 23 Aug 2022 13:06:33 +0200 Subject: [PATCH 1358/1976] New `@try_export` decorator (#9096) * New export decorator * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * New export decorator * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Cleanup * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * rename fcn to func * rename to @try_export Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- export.py | 633 +++++++++++++++++++++++------------------------ utils/general.py | 15 +- 2 files changed, 317 insertions(+), 331 deletions(-) diff --git a/export.py b/export.py index 7a746156b96d..1bb7ded8ab85 100644 --- a/export.py +++ b/export.py @@ -67,8 +67,8 @@ from models.experimental import attempt_load from models.yolo import Detect from utils.dataloaders import LoadImages -from utils.general import (LOGGER, check_dataset, check_img_size, check_requirements, check_version, check_yaml, - colorstr, file_size, print_args, url2file) +from utils.general import (LOGGER, Profile, check_dataset, check_img_size, check_requirements, check_version, + check_yaml, colorstr, file_size, get_default_args, print_args, url2file) from utils.torch_utils import select_device, smart_inference_mode @@ -89,200 +89,199 @@ def export_formats(): return pd.DataFrame(x, columns=['Format', 'Argument', 'Suffix', 'CPU', 'GPU']) +def try_export(inner_func): + # YOLOv5 export decorator, i..e @try_export + inner_args = get_default_args(inner_func) + + def outer_func(*args, **kwargs): + prefix = inner_args['prefix'] + try: + with Profile() as dt: + f, model = inner_func(*args, **kwargs) + LOGGER.info(f'{prefix} export success ✅ {dt.t:.1f}s, saved as {f} ({file_size(f):.1f} MB)') + return f, model + except Exception as e: + LOGGER.info(f'{prefix} export failure ❌ {dt.t:.1f}s: {e}') + return None, None + + return outer_func + + +@try_export def export_torchscript(model, im, file, optimize, prefix=colorstr('TorchScript:')): # YOLOv5 TorchScript model export - try: - LOGGER.info(f'\n{prefix} starting export with torch {torch.__version__}...') - f = file.with_suffix('.torchscript') - - ts = torch.jit.trace(model, im, strict=False) - d = {"shape": im.shape, "stride": int(max(model.stride)), "names": model.names} - extra_files = {'config.txt': json.dumps(d)} # torch._C.ExtraFilesMap() - if optimize: # https://pytorch.org/tutorials/recipes/mobile_interpreter.html - optimize_for_mobile(ts)._save_for_lite_interpreter(str(f), _extra_files=extra_files) - else: - ts.save(str(f), _extra_files=extra_files) + LOGGER.info(f'\n{prefix} starting export with torch {torch.__version__}...') + f = file.with_suffix('.torchscript') - LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') - return f - except Exception as e: - LOGGER.info(f'{prefix} export failure: {e}') + ts = torch.jit.trace(model, im, strict=False) + d = {"shape": im.shape, "stride": int(max(model.stride)), "names": model.names} + extra_files = {'config.txt': json.dumps(d)} # torch._C.ExtraFilesMap() + if optimize: # https://pytorch.org/tutorials/recipes/mobile_interpreter.html + optimize_for_mobile(ts)._save_for_lite_interpreter(str(f), _extra_files=extra_files) + else: + ts.save(str(f), _extra_files=extra_files) + return f, None +@try_export def export_onnx(model, im, file, opset, train, dynamic, simplify, prefix=colorstr('ONNX:')): # YOLOv5 ONNX export - try: - check_requirements(('onnx',)) - import onnx - - LOGGER.info(f'\n{prefix} starting export with onnx {onnx.__version__}...') - f = file.with_suffix('.onnx') - - torch.onnx.export( - model.cpu() if dynamic else model, # --dynamic only compatible with cpu - im.cpu() if dynamic else im, - f, - verbose=False, - opset_version=opset, - training=torch.onnx.TrainingMode.TRAINING if train else torch.onnx.TrainingMode.EVAL, - do_constant_folding=not train, - input_names=['images'], - output_names=['output'], - dynamic_axes={ - 'images': { - 0: 'batch', - 2: 'height', - 3: 'width'}, # shape(1,3,640,640) - 'output': { - 0: 'batch', - 1: 'anchors'} # shape(1,25200,85) - } if dynamic else None) - - # Checks - model_onnx = onnx.load(f) # load onnx model - onnx.checker.check_model(model_onnx) # check onnx model - - # Metadata - d = {'stride': int(max(model.stride)), 'names': model.names} - for k, v in d.items(): - meta = model_onnx.metadata_props.add() - meta.key, meta.value = k, str(v) - onnx.save(model_onnx, f) - - # Simplify - if simplify: - try: - cuda = torch.cuda.is_available() - check_requirements(('onnxruntime-gpu' if cuda else 'onnxruntime', 'onnx-simplifier>=0.4.1')) - import onnxsim - - LOGGER.info(f'{prefix} simplifying with onnx-simplifier {onnxsim.__version__}...') - model_onnx, check = onnxsim.simplify(model_onnx) - assert check, 'assert check failed' - onnx.save(model_onnx, f) - except Exception as e: - LOGGER.info(f'{prefix} simplifier failure: {e}') - LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') - return f - except Exception as e: - LOGGER.info(f'{prefix} export failure: {e}') + check_requirements(('onnx',)) + import onnx + + LOGGER.info(f'\n{prefix} starting export with onnx {onnx.__version__}...') + f = file.with_suffix('.onnx') + + torch.onnx.export( + model.cpu() if dynamic else model, # --dynamic only compatible with cpu + im.cpu() if dynamic else im, + f, + verbose=False, + opset_version=opset, + training=torch.onnx.TrainingMode.TRAINING if train else torch.onnx.TrainingMode.EVAL, + do_constant_folding=not train, + input_names=['images'], + output_names=['output'], + dynamic_axes={ + 'images': { + 0: 'batch', + 2: 'height', + 3: 'width'}, # shape(1,3,640,640) + 'output': { + 0: 'batch', + 1: 'anchors'} # shape(1,25200,85) + } if dynamic else None) + + # Checks + model_onnx = onnx.load(f) # load onnx model + onnx.checker.check_model(model_onnx) # check onnx model + + # Metadata + d = {'stride': int(max(model.stride)), 'names': model.names} + for k, v in d.items(): + meta = model_onnx.metadata_props.add() + meta.key, meta.value = k, str(v) + onnx.save(model_onnx, f) + + # Simplify + if simplify: + try: + cuda = torch.cuda.is_available() + check_requirements(('onnxruntime-gpu' if cuda else 'onnxruntime', 'onnx-simplifier>=0.4.1')) + import onnxsim + + LOGGER.info(f'{prefix} simplifying with onnx-simplifier {onnxsim.__version__}...') + model_onnx, check = onnxsim.simplify(model_onnx) + assert check, 'assert check failed' + onnx.save(model_onnx, f) + except Exception as e: + LOGGER.info(f'{prefix} simplifier failure: {e}') + return f, model_onnx +@try_export def export_openvino(model, file, half, prefix=colorstr('OpenVINO:')): # YOLOv5 OpenVINO export - try: - check_requirements(('openvino-dev',)) # requires openvino-dev: https://pypi.org/project/openvino-dev/ - import openvino.inference_engine as ie - - LOGGER.info(f'\n{prefix} starting export with openvino {ie.__version__}...') - f = str(file).replace('.pt', f'_openvino_model{os.sep}') + check_requirements(('openvino-dev',)) # requires openvino-dev: https://pypi.org/project/openvino-dev/ + import openvino.inference_engine as ie - cmd = f"mo --input_model {file.with_suffix('.onnx')} --output_dir {f} --data_type {'FP16' if half else 'FP32'}" - subprocess.check_output(cmd.split()) # export - with open(Path(f) / file.with_suffix('.yaml').name, 'w') as g: - yaml.dump({'stride': int(max(model.stride)), 'names': model.names}, g) # add metadata.yaml + LOGGER.info(f'\n{prefix} starting export with openvino {ie.__version__}...') + f = str(file).replace('.pt', f'_openvino_model{os.sep}') - LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') - return f - except Exception as e: - LOGGER.info(f'\n{prefix} export failure: {e}') + cmd = f"mo --input_model {file.with_suffix('.onnx')} --output_dir {f} --data_type {'FP16' if half else 'FP32'}" + subprocess.check_output(cmd.split()) # export + with open(Path(f) / file.with_suffix('.yaml').name, 'w') as g: + yaml.dump({'stride': int(max(model.stride)), 'names': model.names}, g) # add metadata.yaml + return f, None +@try_export def export_coreml(model, im, file, int8, half, prefix=colorstr('CoreML:')): # YOLOv5 CoreML export - try: - check_requirements(('coremltools',)) - import coremltools as ct - - LOGGER.info(f'\n{prefix} starting export with coremltools {ct.__version__}...') - f = file.with_suffix('.mlmodel') - - ts = torch.jit.trace(model, im, strict=False) # TorchScript model - ct_model = ct.convert(ts, inputs=[ct.ImageType('image', shape=im.shape, scale=1 / 255, bias=[0, 0, 0])]) - bits, mode = (8, 'kmeans_lut') if int8 else (16, 'linear') if half else (32, None) - if bits < 32: - if platform.system() == 'Darwin': # quantization only supported on macOS - with warnings.catch_warnings(): - warnings.filterwarnings("ignore", category=DeprecationWarning) # suppress numpy==1.20 float warning - ct_model = ct.models.neural_network.quantization_utils.quantize_weights(ct_model, bits, mode) - else: - print(f'{prefix} quantization only supported on macOS, skipping...') - ct_model.save(f) - - LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') - return ct_model, f - except Exception as e: - LOGGER.info(f'\n{prefix} export failure: {e}') - return None, None - - -def export_engine(model, im, file, train, half, dynamic, simplify, workspace=4, verbose=False): + check_requirements(('coremltools',)) + import coremltools as ct + + LOGGER.info(f'\n{prefix} starting export with coremltools {ct.__version__}...') + f = file.with_suffix('.mlmodel') + + ts = torch.jit.trace(model, im, strict=False) # TorchScript model + ct_model = ct.convert(ts, inputs=[ct.ImageType('image', shape=im.shape, scale=1 / 255, bias=[0, 0, 0])]) + bits, mode = (8, 'kmeans_lut') if int8 else (16, 'linear') if half else (32, None) + if bits < 32: + if platform.system() == 'Darwin': # quantization only supported on macOS + with warnings.catch_warnings(): + warnings.filterwarnings("ignore", category=DeprecationWarning) # suppress numpy==1.20 float warning + ct_model = ct.models.neural_network.quantization_utils.quantize_weights(ct_model, bits, mode) + else: + print(f'{prefix} quantization only supported on macOS, skipping...') + ct_model.save(f) + return f, ct_model + + +@try_export +def export_engine(model, im, file, half, dynamic, simplify, workspace=4, verbose=False, prefix=colorstr('TensorRT:')): # YOLOv5 TensorRT export https://developer.nvidia.com/tensorrt - prefix = colorstr('TensorRT:') + assert im.device.type != 'cpu', 'export running on CPU but must be on GPU, i.e. `python export.py --device 0`' try: - assert im.device.type != 'cpu', 'export running on CPU but must be on GPU, i.e. `python export.py --device 0`' - try: - import tensorrt as trt - except Exception: - if platform.system() == 'Linux': - check_requirements(('nvidia-tensorrt',), cmds=('-U --index-url https://pypi.ngc.nvidia.com',)) - import tensorrt as trt - - if trt.__version__[0] == '7': # TensorRT 7 handling https://github.com/ultralytics/yolov5/issues/6012 - grid = model.model[-1].anchor_grid - model.model[-1].anchor_grid = [a[..., :1, :1, :] for a in grid] - export_onnx(model, im, file, 12, train, dynamic, simplify) # opset 12 - model.model[-1].anchor_grid = grid - else: # TensorRT >= 8 - check_version(trt.__version__, '8.0.0', hard=True) # require tensorrt>=8.0.0 - export_onnx(model, im, file, 13, train, dynamic, simplify) # opset 13 - onnx = file.with_suffix('.onnx') - - LOGGER.info(f'\n{prefix} starting export with TensorRT {trt.__version__}...') - assert onnx.exists(), f'failed to export ONNX file: {onnx}' - f = file.with_suffix('.engine') # TensorRT engine file - logger = trt.Logger(trt.Logger.INFO) - if verbose: - logger.min_severity = trt.Logger.Severity.VERBOSE - - builder = trt.Builder(logger) - config = builder.create_builder_config() - config.max_workspace_size = workspace * 1 << 30 - # config.set_memory_pool_limit(trt.MemoryPoolType.WORKSPACE, workspace << 30) # fix TRT 8.4 deprecation notice - - flag = (1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)) - network = builder.create_network(flag) - parser = trt.OnnxParser(network, logger) - if not parser.parse_from_file(str(onnx)): - raise RuntimeError(f'failed to load ONNX file: {onnx}') - - inputs = [network.get_input(i) for i in range(network.num_inputs)] - outputs = [network.get_output(i) for i in range(network.num_outputs)] - LOGGER.info(f'{prefix} Network Description:') + import tensorrt as trt + except Exception: + if platform.system() == 'Linux': + check_requirements(('nvidia-tensorrt',), cmds=('-U --index-url https://pypi.ngc.nvidia.com',)) + import tensorrt as trt + + if trt.__version__[0] == '7': # TensorRT 7 handling https://github.com/ultralytics/yolov5/issues/6012 + grid = model.model[-1].anchor_grid + model.model[-1].anchor_grid = [a[..., :1, :1, :] for a in grid] + export_onnx(model, im, file, 12, False, dynamic, simplify) # opset 12 + model.model[-1].anchor_grid = grid + else: # TensorRT >= 8 + check_version(trt.__version__, '8.0.0', hard=True) # require tensorrt>=8.0.0 + export_onnx(model, im, file, 13, False, dynamic, simplify) # opset 13 + onnx = file.with_suffix('.onnx') + + LOGGER.info(f'\n{prefix} starting export with TensorRT {trt.__version__}...') + assert onnx.exists(), f'failed to export ONNX file: {onnx}' + f = file.with_suffix('.engine') # TensorRT engine file + logger = trt.Logger(trt.Logger.INFO) + if verbose: + logger.min_severity = trt.Logger.Severity.VERBOSE + + builder = trt.Builder(logger) + config = builder.create_builder_config() + config.max_workspace_size = workspace * 1 << 30 + # config.set_memory_pool_limit(trt.MemoryPoolType.WORKSPACE, workspace << 30) # fix TRT 8.4 deprecation notice + + flag = (1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)) + network = builder.create_network(flag) + parser = trt.OnnxParser(network, logger) + if not parser.parse_from_file(str(onnx)): + raise RuntimeError(f'failed to load ONNX file: {onnx}') + + inputs = [network.get_input(i) for i in range(network.num_inputs)] + outputs = [network.get_output(i) for i in range(network.num_outputs)] + LOGGER.info(f'{prefix} Network Description:') + for inp in inputs: + LOGGER.info(f'{prefix}\tinput "{inp.name}" with shape {inp.shape} and dtype {inp.dtype}') + for out in outputs: + LOGGER.info(f'{prefix}\toutput "{out.name}" with shape {out.shape} and dtype {out.dtype}') + + if dynamic: + if im.shape[0] <= 1: + LOGGER.warning(f"{prefix}WARNING: --dynamic model requires maximum --batch-size argument") + profile = builder.create_optimization_profile() for inp in inputs: - LOGGER.info(f'{prefix}\tinput "{inp.name}" with shape {inp.shape} and dtype {inp.dtype}') - for out in outputs: - LOGGER.info(f'{prefix}\toutput "{out.name}" with shape {out.shape} and dtype {out.dtype}') - - if dynamic: - if im.shape[0] <= 1: - LOGGER.warning(f"{prefix}WARNING: --dynamic model requires maximum --batch-size argument") - profile = builder.create_optimization_profile() - for inp in inputs: - profile.set_shape(inp.name, (1, *im.shape[1:]), (max(1, im.shape[0] // 2), *im.shape[1:]), im.shape) - config.add_optimization_profile(profile) - - LOGGER.info(f'{prefix} building FP{16 if builder.platform_has_fast_fp16 and half else 32} engine in {f}') - if builder.platform_has_fast_fp16 and half: - config.set_flag(trt.BuilderFlag.FP16) - with builder.build_engine(network, config) as engine, open(f, 'wb') as t: - t.write(engine.serialize()) - LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') - return f - except Exception as e: - LOGGER.info(f'\n{prefix} export failure: {e}') + profile.set_shape(inp.name, (1, *im.shape[1:]), (max(1, im.shape[0] // 2), *im.shape[1:]), im.shape) + config.add_optimization_profile(profile) + + LOGGER.info(f'{prefix} building FP{16 if builder.platform_has_fast_fp16 and half else 32} engine in {f}') + if builder.platform_has_fast_fp16 and half: + config.set_flag(trt.BuilderFlag.FP16) + with builder.build_engine(network, config) as engine, open(f, 'wb') as t: + t.write(engine.serialize()) + return f, None +@try_export def export_saved_model(model, im, file, @@ -296,162 +295,142 @@ def export_saved_model(model, keras=False, prefix=colorstr('TensorFlow SavedModel:')): # YOLOv5 TensorFlow SavedModel export - try: - import tensorflow as tf - from tensorflow.python.framework.convert_to_constants import convert_variables_to_constants_v2 - - from models.tf import TFDetect, TFModel - - LOGGER.info(f'\n{prefix} starting export with tensorflow {tf.__version__}...') - f = str(file).replace('.pt', '_saved_model') - batch_size, ch, *imgsz = list(im.shape) # BCHW - - tf_model = TFModel(cfg=model.yaml, model=model, nc=model.nc, imgsz=imgsz) - im = tf.zeros((batch_size, *imgsz, ch)) # BHWC order for TensorFlow - _ = tf_model.predict(im, tf_nms, agnostic_nms, topk_per_class, topk_all, iou_thres, conf_thres) - inputs = tf.keras.Input(shape=(*imgsz, ch), batch_size=None if dynamic else batch_size) - outputs = tf_model.predict(inputs, tf_nms, agnostic_nms, topk_per_class, topk_all, iou_thres, conf_thres) - keras_model = tf.keras.Model(inputs=inputs, outputs=outputs) - keras_model.trainable = False - keras_model.summary() - if keras: - keras_model.save(f, save_format='tf') - else: - spec = tf.TensorSpec(keras_model.inputs[0].shape, keras_model.inputs[0].dtype) - m = tf.function(lambda x: keras_model(x)) # full model - m = m.get_concrete_function(spec) - frozen_func = convert_variables_to_constants_v2(m) - tfm = tf.Module() - tfm.__call__ = tf.function(lambda x: frozen_func(x)[:4] if tf_nms else frozen_func(x)[0], [spec]) - tfm.__call__(im) - tf.saved_model.save(tfm, - f, - options=tf.saved_model.SaveOptions(experimental_custom_gradients=False) - if check_version(tf.__version__, '2.6') else tf.saved_model.SaveOptions()) - LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') - return keras_model, f - except Exception as e: - LOGGER.info(f'\n{prefix} export failure: {e}') - return None, None + import tensorflow as tf + from tensorflow.python.framework.convert_to_constants import convert_variables_to_constants_v2 + + from models.tf import TFModel + + LOGGER.info(f'\n{prefix} starting export with tensorflow {tf.__version__}...') + f = str(file).replace('.pt', '_saved_model') + batch_size, ch, *imgsz = list(im.shape) # BCHW + + tf_model = TFModel(cfg=model.yaml, model=model, nc=model.nc, imgsz=imgsz) + im = tf.zeros((batch_size, *imgsz, ch)) # BHWC order for TensorFlow + _ = tf_model.predict(im, tf_nms, agnostic_nms, topk_per_class, topk_all, iou_thres, conf_thres) + inputs = tf.keras.Input(shape=(*imgsz, ch), batch_size=None if dynamic else batch_size) + outputs = tf_model.predict(inputs, tf_nms, agnostic_nms, topk_per_class, topk_all, iou_thres, conf_thres) + keras_model = tf.keras.Model(inputs=inputs, outputs=outputs) + keras_model.trainable = False + keras_model.summary() + if keras: + keras_model.save(f, save_format='tf') + else: + spec = tf.TensorSpec(keras_model.inputs[0].shape, keras_model.inputs[0].dtype) + m = tf.function(lambda x: keras_model(x)) # full model + m = m.get_concrete_function(spec) + frozen_func = convert_variables_to_constants_v2(m) + tfm = tf.Module() + tfm.__call__ = tf.function(lambda x: frozen_func(x)[:4] if tf_nms else frozen_func(x)[0], [spec]) + tfm.__call__(im) + tf.saved_model.save(tfm, + f, + options=tf.saved_model.SaveOptions(experimental_custom_gradients=False) if check_version( + tf.__version__, '2.6') else tf.saved_model.SaveOptions()) + return f, keras_model +@try_export def export_pb(keras_model, file, prefix=colorstr('TensorFlow GraphDef:')): # YOLOv5 TensorFlow GraphDef *.pb export https://github.com/leimao/Frozen_Graph_TensorFlow - try: - import tensorflow as tf - from tensorflow.python.framework.convert_to_constants import convert_variables_to_constants_v2 + import tensorflow as tf + from tensorflow.python.framework.convert_to_constants import convert_variables_to_constants_v2 - LOGGER.info(f'\n{prefix} starting export with tensorflow {tf.__version__}...') - f = file.with_suffix('.pb') + LOGGER.info(f'\n{prefix} starting export with tensorflow {tf.__version__}...') + f = file.with_suffix('.pb') - m = tf.function(lambda x: keras_model(x)) # full model - m = m.get_concrete_function(tf.TensorSpec(keras_model.inputs[0].shape, keras_model.inputs[0].dtype)) - frozen_func = convert_variables_to_constants_v2(m) - frozen_func.graph.as_graph_def() - tf.io.write_graph(graph_or_graph_def=frozen_func.graph, logdir=str(f.parent), name=f.name, as_text=False) - - LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') - return f - except Exception as e: - LOGGER.info(f'\n{prefix} export failure: {e}') + m = tf.function(lambda x: keras_model(x)) # full model + m = m.get_concrete_function(tf.TensorSpec(keras_model.inputs[0].shape, keras_model.inputs[0].dtype)) + frozen_func = convert_variables_to_constants_v2(m) + frozen_func.graph.as_graph_def() + tf.io.write_graph(graph_or_graph_def=frozen_func.graph, logdir=str(f.parent), name=f.name, as_text=False) + return f, None +@try_export def export_tflite(keras_model, im, file, int8, data, nms, agnostic_nms, prefix=colorstr('TensorFlow Lite:')): # YOLOv5 TensorFlow Lite export - try: - import tensorflow as tf - - LOGGER.info(f'\n{prefix} starting export with tensorflow {tf.__version__}...') - batch_size, ch, *imgsz = list(im.shape) # BCHW - f = str(file).replace('.pt', '-fp16.tflite') - - converter = tf.lite.TFLiteConverter.from_keras_model(keras_model) - converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS] - converter.target_spec.supported_types = [tf.float16] - converter.optimizations = [tf.lite.Optimize.DEFAULT] - if int8: - from models.tf import representative_dataset_gen - dataset = LoadImages(check_dataset(check_yaml(data))['train'], img_size=imgsz, auto=False) - converter.representative_dataset = lambda: representative_dataset_gen(dataset, ncalib=100) - converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8] - converter.target_spec.supported_types = [] - converter.inference_input_type = tf.uint8 # or tf.int8 - converter.inference_output_type = tf.uint8 # or tf.int8 - converter.experimental_new_quantizer = True - f = str(file).replace('.pt', '-int8.tflite') - if nms or agnostic_nms: - converter.target_spec.supported_ops.append(tf.lite.OpsSet.SELECT_TF_OPS) - - tflite_model = converter.convert() - open(f, "wb").write(tflite_model) - LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') - return f - except Exception as e: - LOGGER.info(f'\n{prefix} export failure: {e}') - - + import tensorflow as tf + + LOGGER.info(f'\n{prefix} starting export with tensorflow {tf.__version__}...') + batch_size, ch, *imgsz = list(im.shape) # BCHW + f = str(file).replace('.pt', '-fp16.tflite') + + converter = tf.lite.TFLiteConverter.from_keras_model(keras_model) + converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS] + converter.target_spec.supported_types = [tf.float16] + converter.optimizations = [tf.lite.Optimize.DEFAULT] + if int8: + from models.tf import representative_dataset_gen + dataset = LoadImages(check_dataset(check_yaml(data))['train'], img_size=imgsz, auto=False) + converter.representative_dataset = lambda: representative_dataset_gen(dataset, ncalib=100) + converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8] + converter.target_spec.supported_types = [] + converter.inference_input_type = tf.uint8 # or tf.int8 + converter.inference_output_type = tf.uint8 # or tf.int8 + converter.experimental_new_quantizer = True + f = str(file).replace('.pt', '-int8.tflite') + if nms or agnostic_nms: + converter.target_spec.supported_ops.append(tf.lite.OpsSet.SELECT_TF_OPS) + + tflite_model = converter.convert() + open(f, "wb").write(tflite_model) + return f, None + + +@try_export def export_edgetpu(file, prefix=colorstr('Edge TPU:')): # YOLOv5 Edge TPU export https://coral.ai/docs/edgetpu/models-intro/ - try: - cmd = 'edgetpu_compiler --version' - help_url = 'https://coral.ai/docs/edgetpu/compiler/' - assert platform.system() == 'Linux', f'export only supported on Linux. See {help_url}' - if subprocess.run(f'{cmd} >/dev/null', shell=True).returncode != 0: - LOGGER.info(f'\n{prefix} export requires Edge TPU compiler. Attempting install from {help_url}') - sudo = subprocess.run('sudo --version >/dev/null', shell=True).returncode == 0 # sudo installed on system - for c in ( - 'curl https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -', - 'echo "deb https://packages.cloud.google.com/apt coral-edgetpu-stable main" | sudo tee /etc/apt/sources.list.d/coral-edgetpu.list', - 'sudo apt-get update', 'sudo apt-get install edgetpu-compiler'): - subprocess.run(c if sudo else c.replace('sudo ', ''), shell=True, check=True) - ver = subprocess.run(cmd, shell=True, capture_output=True, check=True).stdout.decode().split()[-1] - - LOGGER.info(f'\n{prefix} starting export with Edge TPU compiler {ver}...') - f = str(file).replace('.pt', '-int8_edgetpu.tflite') # Edge TPU model - f_tfl = str(file).replace('.pt', '-int8.tflite') # TFLite model - - cmd = f"edgetpu_compiler -s -d -k 10 --out_dir {file.parent} {f_tfl}" - subprocess.run(cmd.split(), check=True) - - LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') - return f - except Exception as e: - LOGGER.info(f'\n{prefix} export failure: {e}') - - + cmd = 'edgetpu_compiler --version' + help_url = 'https://coral.ai/docs/edgetpu/compiler/' + assert platform.system() == 'Linux', f'export only supported on Linux. See {help_url}' + if subprocess.run(f'{cmd} >/dev/null', shell=True).returncode != 0: + LOGGER.info(f'\n{prefix} export requires Edge TPU compiler. Attempting install from {help_url}') + sudo = subprocess.run('sudo --version >/dev/null', shell=True).returncode == 0 # sudo installed on system + for c in ( + 'curl https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -', + 'echo "deb https://packages.cloud.google.com/apt coral-edgetpu-stable main" | sudo tee /etc/apt/sources.list.d/coral-edgetpu.list', + 'sudo apt-get update', 'sudo apt-get install edgetpu-compiler'): + subprocess.run(c if sudo else c.replace('sudo ', ''), shell=True, check=True) + ver = subprocess.run(cmd, shell=True, capture_output=True, check=True).stdout.decode().split()[-1] + + LOGGER.info(f'\n{prefix} starting export with Edge TPU compiler {ver}...') + f = str(file).replace('.pt', '-int8_edgetpu.tflite') # Edge TPU model + f_tfl = str(file).replace('.pt', '-int8.tflite') # TFLite model + + cmd = f"edgetpu_compiler -s -d -k 10 --out_dir {file.parent} {f_tfl}" + subprocess.run(cmd.split(), check=True) + return f, None + + +@try_export def export_tfjs(file, prefix=colorstr('TensorFlow.js:')): # YOLOv5 TensorFlow.js export - try: - check_requirements(('tensorflowjs',)) - import re - - import tensorflowjs as tfjs - - LOGGER.info(f'\n{prefix} starting export with tensorflowjs {tfjs.__version__}...') - f = str(file).replace('.pt', '_web_model') # js dir - f_pb = file.with_suffix('.pb') # *.pb path - f_json = f'{f}/model.json' # *.json path - - cmd = f'tensorflowjs_converter --input_format=tf_frozen_model ' \ - f'--output_node_names=Identity,Identity_1,Identity_2,Identity_3 {f_pb} {f}' - subprocess.run(cmd.split()) - - json = Path(f_json).read_text() - with open(f_json, 'w') as j: # sort JSON Identity_* in ascending order - subst = re.sub( - r'{"outputs": {"Identity.?.?": {"name": "Identity.?.?"}, ' - r'"Identity.?.?": {"name": "Identity.?.?"}, ' - r'"Identity.?.?": {"name": "Identity.?.?"}, ' - r'"Identity.?.?": {"name": "Identity.?.?"}}}', r'{"outputs": {"Identity": {"name": "Identity"}, ' - r'"Identity_1": {"name": "Identity_1"}, ' - r'"Identity_2": {"name": "Identity_2"}, ' - r'"Identity_3": {"name": "Identity_3"}}}', json) - j.write(subst) - - LOGGER.info(f'{prefix} export success, saved as {f} ({file_size(f):.1f} MB)') - return f - except Exception as e: - LOGGER.info(f'\n{prefix} export failure: {e}') + check_requirements(('tensorflowjs',)) + import re + + import tensorflowjs as tfjs + + LOGGER.info(f'\n{prefix} starting export with tensorflowjs {tfjs.__version__}...') + f = str(file).replace('.pt', '_web_model') # js dir + f_pb = file.with_suffix('.pb') # *.pb path + f_json = f'{f}/model.json' # *.json path + + cmd = f'tensorflowjs_converter --input_format=tf_frozen_model ' \ + f'--output_node_names=Identity,Identity_1,Identity_2,Identity_3 {f_pb} {f}' + subprocess.run(cmd.split()) + + json = Path(f_json).read_text() + with open(f_json, 'w') as j: # sort JSON Identity_* in ascending order + subst = re.sub( + r'{"outputs": {"Identity.?.?": {"name": "Identity.?.?"}, ' + r'"Identity.?.?": {"name": "Identity.?.?"}, ' + r'"Identity.?.?": {"name": "Identity.?.?"}, ' + r'"Identity.?.?": {"name": "Identity.?.?"}}}', r'{"outputs": {"Identity": {"name": "Identity"}, ' + r'"Identity_1": {"name": "Identity_1"}, ' + r'"Identity_2": {"name": "Identity_2"}, ' + r'"Identity_3": {"name": "Identity_3"}}}', json) + j.write(subst) + return f, None @smart_inference_mode() @@ -524,22 +503,22 @@ def run( f = [''] * 10 # exported filenames warnings.filterwarnings(action='ignore', category=torch.jit.TracerWarning) # suppress TracerWarning if jit: - f[0] = export_torchscript(model, im, file, optimize) + f[0], _ = export_torchscript(model, im, file, optimize) if engine: # TensorRT required before ONNX - f[1] = export_engine(model, im, file, train, half, dynamic, simplify, workspace, verbose) + f[1], _ = export_engine(model, im, file, half, dynamic, simplify, workspace, verbose) if onnx or xml: # OpenVINO requires ONNX - f[2] = export_onnx(model, im, file, opset, train, dynamic, simplify) + f[2], _ = export_onnx(model, im, file, opset, train, dynamic, simplify) if xml: # OpenVINO - f[3] = export_openvino(model, file, half) + f[3], _ = export_openvino(model, file, half) if coreml: - _, f[4] = export_coreml(model, im, file, int8, half) + f[4], _ = export_coreml(model, im, file, int8, half) # TensorFlow Exports if any((saved_model, pb, tflite, edgetpu, tfjs)): if int8 or edgetpu: # TFLite --int8 bug https://github.com/ultralytics/yolov5/issues/5707 check_requirements(('flatbuffers==1.12',)) # required before `import tensorflow` assert not tflite or not tfjs, 'TFLite and TF.js models must be exported separately, please pass only one type.' - model, f[5] = export_saved_model(model.cpu(), + f[5], model = export_saved_model(model.cpu(), im, file, dynamic, @@ -551,19 +530,19 @@ def run( conf_thres=conf_thres, keras=keras) if pb or tfjs: # pb prerequisite to tfjs - f[6] = export_pb(model, file) + f[6], _ = export_pb(model, file) if tflite or edgetpu: - f[7] = export_tflite(model, im, file, int8=int8 or edgetpu, data=data, nms=nms, agnostic_nms=agnostic_nms) + f[7], _ = export_tflite(model, im, file, int8 or edgetpu, data=data, nms=nms, agnostic_nms=agnostic_nms) if edgetpu: - f[8] = export_edgetpu(file) + f[8], _ = export_edgetpu(file) if tfjs: - f[9] = export_tfjs(file) + f[9], _ = export_tfjs(file) # Finish f = [str(x) for x in f if x] # filter out '' and None if any(f): h = '--half' if half else '' # --half FP16 inference arg - LOGGER.info(f'\nExport complete ({time.time() - t:.2f}s)' + LOGGER.info(f'\nExport complete ({time.time() - t:.1f}s)' f"\nResults saved to {colorstr('bold', file.parent.resolve())}" f"\nDetect: python detect.py --weights {f[-1]} {h}" f"\nValidate: python val.py --weights {f[-1]} {h}" diff --git a/utils/general.py b/utils/general.py index 3bc6fbc22d57..d8c90f10ac8f 100755 --- a/utils/general.py +++ b/utils/general.py @@ -148,6 +148,7 @@ def __init__(self, t=0.0): def __enter__(self): self.start = self.time() + return self def __exit__(self, type, value, traceback): self.dt = self.time() - self.start # delta-time @@ -220,10 +221,10 @@ def methods(instance): return [f for f in dir(instance) if callable(getattr(instance, f)) and not f.startswith("__")] -def print_args(args: Optional[dict] = None, show_file=True, show_fcn=False): +def print_args(args: Optional[dict] = None, show_file=True, show_func=False): # Print function arguments (optional args dict) x = inspect.currentframe().f_back # previous frame - file, _, fcn, _, _ = inspect.getframeinfo(x) + file, _, func, _, _ = inspect.getframeinfo(x) if args is None: # get args automatically args, _, _, frm = inspect.getargvalues(x) args = {k: v for k, v in frm.items() if k in args} @@ -231,7 +232,7 @@ def print_args(args: Optional[dict] = None, show_file=True, show_fcn=False): file = Path(file).resolve().relative_to(ROOT).with_suffix('') except ValueError: file = Path(file).stem - s = (f'{file}: ' if show_file else '') + (f'{fcn}: ' if show_fcn else '') + s = (f'{file}: ' if show_file else '') + (f'{func}: ' if show_func else '') LOGGER.info(colorstr(s) + ', '.join(f'{k}={v}' for k, v in args.items())) @@ -255,7 +256,13 @@ def init_seeds(seed=0, deterministic=False): def intersect_dicts(da, db, exclude=()): # Dictionary intersection of matching keys and shapes, omitting 'exclude' keys, using da values - return {k: v for k, v in da.items() if k in db and not any(x in k for x in exclude) and v.shape == db[k].shape} + return {k: v for k, v in da.items() if k in db and all(x not in k for x in exclude) and v.shape == db[k].shape} + + +def get_default_args(func): + # Get func() default arguments + signature = inspect.signature(func) + return {k: v.default for k, v in signature.parameters.items() if v.default is not inspect.Parameter.empty} def get_latest_run(search_dir='.'): From 48e56d3c9bede445d49e8f2af458d70955032e91 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 23 Aug 2022 14:37:46 +0200 Subject: [PATCH 1359/1976] Add optional `transforms` argument to LoadStreams() (#9105) * Add optional `transforms` argument to LoadStreams() Prepare for streaming classification support Signed-off-by: Glenn Jocher * Cleanup Signed-off-by: Glenn Jocher * fix * batch size > 1 fix Signed-off-by: Glenn Jocher --- utils/dataloaders.py | 54 ++++++++++++++++++++------------------------ 1 file changed, 25 insertions(+), 29 deletions(-) diff --git a/utils/dataloaders.py b/utils/dataloaders.py index e73b20a58915..675c2898e7d7 100755 --- a/utils/dataloaders.py +++ b/utils/dataloaders.py @@ -251,7 +251,7 @@ def __next__(self): s = f'image {self.count}/{self.nf} {path}: ' if self.transforms: - im = self.transforms(cv2.cvtColor(im0, cv2.COLOR_BGR2RGB)) # classify transforms + im = self.transforms(cv2.cvtColor(im0, cv2.COLOR_BGR2RGB)) # transforms else: im = letterbox(im0, self.img_size, stride=self.stride, auto=self.auto)[0] # padded resize im = im.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB @@ -289,22 +289,20 @@ def __next__(self): raise StopIteration # Read frame - ret_val, img0 = self.cap.read() - img0 = cv2.flip(img0, 1) # flip left-right + ret_val, im0 = self.cap.read() + im0 = cv2.flip(im0, 1) # flip left-right # Print assert ret_val, f'Camera Error {self.pipe}' img_path = 'webcam.jpg' s = f'webcam {self.count}: ' - # Padded resize - img = letterbox(img0, self.img_size, stride=self.stride)[0] + # Process + im = letterbox(im0, self.img_size, stride=self.stride)[0] # resize + im = im.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB + im = np.ascontiguousarray(im) # contiguous - # Convert - img = img.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB - img = np.ascontiguousarray(img) - - return img_path, img, img0, None, s + return img_path, im, im0, None, s def __len__(self): return 0 @@ -312,7 +310,7 @@ def __len__(self): class LoadStreams: # YOLOv5 streamloader, i.e. `python detect.py --source 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP streams` - def __init__(self, sources='streams.txt', img_size=640, stride=32, auto=True): + def __init__(self, sources='streams.txt', img_size=640, stride=32, auto=True, transforms=None): self.mode = 'stream' self.img_size = img_size self.stride = stride @@ -326,7 +324,6 @@ def __init__(self, sources='streams.txt', img_size=640, stride=32, auto=True): n = len(sources) self.imgs, self.fps, self.frames, self.threads = [None] * n, [0] * n, [0] * n, [None] * n self.sources = [clean_str(x) for x in sources] # clean source names for later - self.auto = auto for i, s in enumerate(sources): # index, source # Start thread to read frames from video stream st = f'{i + 1}/{n}: {s}... ' @@ -353,8 +350,10 @@ def __init__(self, sources='streams.txt', img_size=640, stride=32, auto=True): LOGGER.info('') # newline # check for common shapes - s = np.stack([letterbox(x, self.img_size, stride=self.stride, auto=self.auto)[0].shape for x in self.imgs]) + s = np.stack([letterbox(x, img_size, stride=stride, auto=auto)[0].shape for x in self.imgs]) self.rect = np.unique(s, axis=0).shape[0] == 1 # rect inference if all shapes equal + self.auto = auto and self.rect + self.transforms = transforms # optional if not self.rect: LOGGER.warning('WARNING: Stream shapes differ. For optimal performance supply similarly-shaped streams.') @@ -385,18 +384,15 @@ def __next__(self): cv2.destroyAllWindows() raise StopIteration - # Letterbox - img0 = self.imgs.copy() - img = [letterbox(x, self.img_size, stride=self.stride, auto=self.rect and self.auto)[0] for x in img0] - - # Stack - img = np.stack(img, 0) - - # Convert - img = img[..., ::-1].transpose((0, 3, 1, 2)) # BGR to RGB, BHWC to BCHW - img = np.ascontiguousarray(img) + im0 = self.imgs.copy() + if self.transforms: + im = np.stack([self.transforms(cv2.cvtColor(x, cv2.COLOR_BGR2RGB)) for x in im0]) # transforms + else: + im = np.stack([letterbox(x, self.img_size, stride=self.stride, auto=self.auto)[0] for x in im0]) # resize + im = im[..., ::-1].transpose((0, 3, 1, 2)) # BGR to RGB, BHWC to BCHW + im = np.ascontiguousarray(im) # contiguous - return self.sources, img, img0, None, '' + return self.sources, im, im0, None, '' def __len__(self): return len(self.sources) # 1E12 frames = 32 streams at 30 FPS for 30 years @@ -836,7 +832,7 @@ def collate_fn(batch): @staticmethod def collate_fn4(batch): - img, label, path, shapes = zip(*batch) # transposed + im, label, path, shapes = zip(*batch) # transposed n = len(shapes) // 4 im4, label4, path4, shapes4 = [], [], path[:n], shapes[:n] @@ -846,13 +842,13 @@ def collate_fn4(batch): for i in range(n): # zidane torch.zeros(16,3,720,1280) # BCHW i *= 4 if random.random() < 0.5: - im = F.interpolate(img[i].unsqueeze(0).float(), scale_factor=2.0, mode='bilinear', - align_corners=False)[0].type(img[i].type()) + im1 = F.interpolate(im[i].unsqueeze(0).float(), scale_factor=2.0, mode='bilinear', + align_corners=False)[0].type(im[i].type()) lb = label[i] else: - im = torch.cat((torch.cat((img[i], img[i + 1]), 1), torch.cat((img[i + 2], img[i + 3]), 1)), 2) + im1 = torch.cat((torch.cat((im[i], im[i + 1]), 1), torch.cat((im[i + 2], im[i + 3]), 1)), 2) lb = torch.cat((label[i], label[i + 1] + ho, label[i + 2] + wo, label[i + 3] + ho + wo), 0) * s - im4.append(im) + im4.append(im1) label4.append(lb) for i, lb in enumerate(label4): From 51c9f9229731021f55a9ceb9f9504abfc979a54b Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 23 Aug 2022 17:54:51 +0200 Subject: [PATCH 1360/1976] Streaming Classification support (#9106) * Streaming Classification support * Streaming Classification support * Streaming Classification support --- classify/predict.py | 168 +++++++++++++++++++++++++++++++---------- detect.py | 2 +- utils/augmentations.py | 1 + 3 files changed, 131 insertions(+), 40 deletions(-) diff --git a/classify/predict.py b/classify/predict.py index 135470fd36ed..b430c0645f21 100644 --- a/classify/predict.py +++ b/classify/predict.py @@ -1,12 +1,15 @@ # YOLOv5 🚀 by Ultralytics, GPL-3.0 license """ -Run YOLOv5 classification inference on images, videos, directories, and globs. +Run YOLOv5 classification inference on images, videos, directories, globs, YouTube, webcam, streams, etc. Usage - sources: - $ python classify/predict.py --weights yolov5s.pt --source img.jpg # image - vid.mp4 # video - path/ # directory - 'path/*.jpg' # glob + $ python classify/predict.py --weights yolov5s-cls.pt --source 0 # webcam + img.jpg # image + vid.mp4 # video + path/ # directory + 'path/*.jpg' # glob + 'https://youtu.be/Zgi9g1ksQHc' # YouTube + 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream Usage - formats: $ python classify/predict.py --weights yolov5s-cls.pt # PyTorch @@ -23,9 +26,11 @@ import argparse import os +import platform import sys from pathlib import Path +import torch.backends.cudnn as cudnn import torch.nn.functional as F FILE = Path(__file__).resolve() @@ -36,45 +41,70 @@ from models.common import DetectMultiBackend from utils.augmentations import classify_transforms -from utils.dataloaders import IMG_FORMATS, VID_FORMATS, LoadImages -from utils.general import LOGGER, Profile, check_file, check_requirements, colorstr, increment_path, print_args +from utils.dataloaders import IMG_FORMATS, VID_FORMATS, LoadImages, LoadStreams +from utils.general import (LOGGER, Profile, check_file, check_img_size, check_imshow, check_requirements, colorstr, cv2, + increment_path, print_args, strip_optimizer) +from utils.plots import Annotator from utils.torch_utils import select_device, smart_inference_mode @smart_inference_mode() def run( weights=ROOT / 'yolov5s-cls.pt', # model.pt path(s) - source=ROOT / 'data/images', # file/dir/URL/glob - imgsz=224, # inference size + source=ROOT / 'data/images', # file/dir/URL/glob, 0 for webcam + data=ROOT / 'data/coco128.yaml', # dataset.yaml path + imgsz=(224, 224), # inference size (height, width) device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu + view_img=False, # show results + save_txt=False, # save results to *.txt + nosave=False, # do not save images/videos + augment=False, # augmented inference + visualize=False, # visualize features + update=False, # update all models + project=ROOT / 'runs/predict-cls', # save results to project/name + name='exp', # save results to project/name + exist_ok=False, # existing project/name ok, do not increment half=False, # use FP16 half-precision inference dnn=False, # use OpenCV DNN for ONNX inference - project=ROOT / 'runs/predict-cls', # save to project/name - name='exp', # save to project/name - exist_ok=False, # existing project/name ok, do not increment ): source = str(source) + save_img = not nosave and not source.endswith('.txt') # save inference images is_file = Path(source).suffix[1:] in (IMG_FORMATS + VID_FORMATS) is_url = source.lower().startswith(('rtsp://', 'rtmp://', 'http://', 'https://')) + webcam = source.isnumeric() or source.endswith('.txt') or (is_url and not is_file) if is_url and is_file: source = check_file(source) # download - dt = Profile(), Profile(), Profile() - device = select_device(device) - # Directories save_dir = increment_path(Path(project) / name, exist_ok=exist_ok) # increment run - save_dir.mkdir(parents=True, exist_ok=True) # make dir + (save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir # Load model - model = DetectMultiBackend(weights, device=device, dnn=dnn, fp16=half) - model.warmup(imgsz=(1, 3, imgsz, imgsz)) # warmup - dataset = LoadImages(source, img_size=imgsz, transforms=classify_transforms(imgsz)) - for seen, (path, im, im0s, vid_cap, s) in enumerate(dataset): - # Image + device = select_device(device) + model = DetectMultiBackend(weights, device=device, dnn=dnn, data=data, fp16=half) + stride, names, pt = model.stride, model.names, model.pt + imgsz = check_img_size(imgsz, s=stride) # check image size + + # Dataloader + if webcam: + view_img = check_imshow() + cudnn.benchmark = True # set True to speed up constant image size inference + dataset = LoadStreams(source, img_size=imgsz, transforms=classify_transforms(imgsz[0])) + bs = len(dataset) # batch_size + else: + dataset = LoadImages(source, img_size=imgsz, transforms=classify_transforms(imgsz[0])) + bs = 1 # batch_size + vid_path, vid_writer = [None] * bs, [None] * bs + + # Run inference + model.warmup(imgsz=(1 if pt else bs, 3, *imgsz)) # warmup + seen, windows, dt = 0, [], (Profile(), Profile(), Profile()) + for path, im, im0s, vid_cap, s in dataset: with dt[0]: - im = im.unsqueeze(0).to(device) - im = im.half() if model.fp16 else im.float() + im = im.to(device) + im = im.half() if model.fp16 else im.float() # uint8 to fp16/32 + if len(im.shape) == 3: + im = im[None] # expand for batch dim # Inference with dt[1]: @@ -82,33 +112,93 @@ def run( # Post-process with dt[2]: - p = F.softmax(results, dim=1) # probabilities - i = p.argsort(1, descending=True)[:, :5].squeeze().tolist() # top 5 indices - # if save: - # imshow_cls(im, f=save_dir / Path(path).name, verbose=True) - LOGGER.info( - f"{s}{imgsz}x{imgsz} {', '.join(f'{model.names[j]} {p[0, j]:.2f}' for j in i)}, {dt[1].dt * 1E3:.1f}ms") + pred = F.softmax(results, dim=1) # probabilities + + # Process predictions + for i, prob in enumerate(pred): # per image + seen += 1 + if webcam: # batch_size >= 1 + p, im0 = path[i], im0s[i].copy() + s += f'{i}: ' + else: + p, im0 = path, im0s.copy() + + p = Path(p) # to Path + save_path = str(save_dir / p.name) # im.jpg + s += '%gx%g ' % im.shape[2:] # print string + annotator = Annotator(im0, example=str(names), pil=True) + + # Print results + top5i = prob.argsort(0, descending=True)[:5].tolist() # top 5 indices + s += f"{', '.join(f'{names[j]} {prob[j]:.2f}' for j in top5i)}, " + + # Write results + if save_img or view_img: # Add bbox to image + text = '\n'.join(f'{prob[j]:.2f} {names[j]}' for j in top5i) + annotator.text((64, 64), text, txt_color=(255, 255, 255)) + + # Stream results + im0 = annotator.result() + if view_img: + if platform.system() == 'Linux' and p not in windows: + windows.append(p) + cv2.namedWindow(str(p), cv2.WINDOW_NORMAL | cv2.WINDOW_KEEPRATIO) # allow window resize (Linux) + cv2.resizeWindow(str(p), im0.shape[1], im0.shape[0]) + cv2.imshow(str(p), im0) + cv2.waitKey(1) # 1 millisecond + + # Save results (image with detections) + if save_img: + if dataset.mode == 'image': + cv2.imwrite(save_path, im0) + else: # 'video' or 'stream' + if vid_path[i] != save_path: # new video + vid_path[i] = save_path + if isinstance(vid_writer[i], cv2.VideoWriter): + vid_writer[i].release() # release previous video writer + if vid_cap: # video + fps = vid_cap.get(cv2.CAP_PROP_FPS) + w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH)) + h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) + else: # stream + fps, w, h = 30, im0.shape[1], im0.shape[0] + save_path = str(Path(save_path).with_suffix('.mp4')) # force *.mp4 suffix on results videos + vid_writer[i] = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h)) + vid_writer[i].write(im0) + + # Print time (inference-only) + LOGGER.info(f"{s}{dt[1].dt * 1E3:.1f}ms") # Print results - t = tuple(x.t / (seen + 1) * 1E3 for x in dt) # speeds per image - shape = (1, 3, imgsz, imgsz) - LOGGER.info(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms post-process per image at shape {shape}' % t) - LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}") - return p + t = tuple(x.t / seen * 1E3 for x in dt) # speeds per image + LOGGER.info(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {(1, 3, *imgsz)}' % t) + if save_txt or save_img: + s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else '' + LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}{s}") + if update: + strip_optimizer(weights[0]) # update model (to fix SourceChangeWarning) def parse_opt(): parser = argparse.ArgumentParser() parser.add_argument('--weights', nargs='+', type=str, default=ROOT / 'yolov5s-cls.pt', help='model path(s)') - parser.add_argument('--source', type=str, default=ROOT / 'data/images', help='file/dir/URL/glob') - parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=224, help='train, val image size (pixels)') + parser.add_argument('--source', type=str, default=ROOT / 'data/images', help='file/dir/URL/glob, 0 for webcam') + parser.add_argument('--data', type=str, default=ROOT / 'data/coco128.yaml', help='(optional) dataset.yaml path') + parser.add_argument('--imgsz', '--img', '--img-size', nargs='+', type=int, default=[224], help='inference size h,w') parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') + parser.add_argument('--view-img', action='store_true', help='show results') + parser.add_argument('--save-txt', action='store_true', help='save results to *.txt') + parser.add_argument('--nosave', action='store_true', help='do not save images/videos') + parser.add_argument('--augment', action='store_true', help='augmented inference') + parser.add_argument('--visualize', action='store_true', help='visualize features') + parser.add_argument('--update', action='store_true', help='update all models') + parser.add_argument('--project', default=ROOT / 'runs/predict-cls', help='save results to project/name') + parser.add_argument('--name', default='exp', help='save results to project/name') + parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment') parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference') parser.add_argument('--dnn', action='store_true', help='use OpenCV DNN for ONNX inference') - parser.add_argument('--project', default=ROOT / 'runs/predict-cls', help='save to project/name') - parser.add_argument('--name', default='exp', help='save to project/name') - parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment') opt = parser.parse_args() + opt.imgsz *= 2 if len(opt.imgsz) == 1 else 1 # expand print_args(vars(opt)) return opt diff --git a/detect.py b/detect.py index 541ad90e051d..60a821b59a03 100644 --- a/detect.py +++ b/detect.py @@ -1,6 +1,6 @@ # YOLOv5 🚀 by Ultralytics, GPL-3.0 license """ -Run YOLOv5 detection inference on images, videos, directories, streams, etc. +Run YOLOv5 detection inference on images, videos, directories, globs, YouTube, webcam, streams, etc. Usage - sources: $ python detect.py --weights yolov5s.pt --source 0 # webcam diff --git a/utils/augmentations.py b/utils/augmentations.py index a55fefa68a76..c8499b3fc8ae 100644 --- a/utils/augmentations.py +++ b/utils/augmentations.py @@ -344,4 +344,5 @@ def classify_albumentations(augment=True, def classify_transforms(size=224): # Transforms to apply if albumentations not installed + assert isinstance(size, int), f'ERROR: classify_transforms size {size} must be integer, not (list, tuple)' return T.Compose([T.ToTensor(), T.Resize(size), T.CenterCrop(size), T.Normalize(IMAGENET_MEAN, IMAGENET_STD)]) From e6f54c5b32340278474e922d456fa3eb7f74599d Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 23 Aug 2022 23:54:05 +0200 Subject: [PATCH 1361/1976] Fix numpy to torch cls streaming bug (#9112) * Fix numpy to torch cls streaming bug Resolves https://github.com/ultralytics/yolov5/issues/9111 Signed-off-by: Glenn Jocher * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Signed-off-by: Glenn Jocher Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- classify/predict.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/classify/predict.py b/classify/predict.py index b430c0645f21..b33b5bcc9933 100644 --- a/classify/predict.py +++ b/classify/predict.py @@ -30,6 +30,7 @@ import sys from pathlib import Path +import torch import torch.backends.cudnn as cudnn import torch.nn.functional as F @@ -101,7 +102,7 @@ def run( seen, windows, dt = 0, [], (Profile(), Profile(), Profile()) for path, im, im0s, vid_cap, s in dataset: with dt[0]: - im = im.to(device) + im = torch.Tensor(im).to(device) im = im.half() if model.fp16 else im.float() # uint8 to fp16/32 if len(im.shape) == 3: im = im[None] # expand for batch dim From f8816f58b7f4bf018ec0fdf546430295e5719205 Mon Sep 17 00:00:00 2001 From: Ayush Chaurasia Date: Wed, 24 Aug 2022 15:45:37 +0530 Subject: [PATCH 1362/1976] Infer Loggers project name (#9117) * smart project name inference * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update __init__.py Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Glenn Jocher --- utils/loggers/__init__.py | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/utils/loggers/__init__.py b/utils/loggers/__init__.py index 006125edbcd9..59d4b566836a 100644 --- a/utils/loggers/__init__.py +++ b/utils/loggers/__init__.py @@ -252,7 +252,7 @@ def __init__(self, opt, console_logger, include=('tb', 'wandb')): self.tb = SummaryWriter(str(self.save_dir)) if wandb and 'wandb' in self.include: - self.wandb = wandb.init(project="YOLOv5-Classifier" if opt.project == "runs/train" else opt.project, + self.wandb = wandb.init(project=web_project_name(str(opt.project)), name=None if opt.name == "exp" else opt.name, config=opt) else: @@ -303,3 +303,11 @@ def log_tensorboard_graph(tb, model, imgsz=(640, 640)): tb.add_graph(torch.jit.trace(de_parallel(model), im, strict=False), []) except Exception as e: print(f'WARNING: TensorBoard graph visualization failure {e}') + + +def web_project_name(project): + # Convert local project name to web project name + if not project.startswith('runs/train'): + return project + suffix = '-Classify' if project.endswith('-cls') else '-Segment' if project.endswith('-seg') else '' + return f'YOLOv5{suffix}' From f0e5a608f50ac647827bede88fded7908c7edeab Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 24 Aug 2022 12:31:50 +0200 Subject: [PATCH 1363/1976] Add CSV logging to GenericLogger (#9128) Enable CSV logging for Classify training. Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- utils/loggers/__init__.py | 21 +++++++++++++++++---- 1 file changed, 17 insertions(+), 4 deletions(-) diff --git a/utils/loggers/__init__.py b/utils/loggers/__init__.py index 59d4b566836a..880039b1914c 100644 --- a/utils/loggers/__init__.py +++ b/utils/loggers/__init__.py @@ -242,9 +242,10 @@ class GenericLogger: def __init__(self, opt, console_logger, include=('tb', 'wandb')): # init default loggers - self.save_dir = opt.save_dir + self.save_dir = Path(opt.save_dir) self.include = include self.console_logger = console_logger + self.csv = self.save_dir / 'results.csv' # CSV logger if 'tb' in self.include: prefix = colorstr('TensorBoard: ') self.console_logger.info( @@ -258,14 +259,21 @@ def __init__(self, opt, console_logger, include=('tb', 'wandb')): else: self.wandb = None - def log_metrics(self, metrics_dict, epoch): + def log_metrics(self, metrics, epoch): # Log metrics dictionary to all loggers + if self.csv: + keys, vals = list(metrics.keys()), list(metrics.values()) + n = len(metrics) + 1 # number of cols + s = '' if self.csv.exists() else (('%23s,' * n % tuple(['epoch'] + keys)).rstrip(',') + '\n') # header + with open(self.csv, 'a') as f: + f.write(s + ('%23.5g,' * n % tuple([epoch] + vals)).rstrip(',') + '\n') + if self.tb: - for k, v in metrics_dict.items(): + for k, v in metrics.items(): self.tb.add_scalar(k, v, epoch) if self.wandb: - self.wandb.log(metrics_dict, step=epoch) + self.wandb.log(metrics, step=epoch) def log_images(self, files, name='Images', epoch=0): # Log images to all loggers @@ -291,6 +299,11 @@ def log_model(self, model_path, epoch=0, metadata={}): art.add_file(str(model_path)) wandb.log_artifact(art) + def update_params(self, params): + # Update the paramters logged + if self.wandb: + wandb.run.config.update(params, allow_val_change=True) + def log_tensorboard_graph(tb, model, imgsz=(640, 640)): # Log model graph to TensorBoard From d07ddc69960ed71111457cbe41ab25ded1ab3155 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 25 Aug 2022 14:34:26 +0200 Subject: [PATCH 1364/1976] New TryExcept decorator (#9154) * New TryExcept decorator * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- utils/__init__.py | 27 ++++++++++++++++++ utils/general.py | 27 ++---------------- utils/metrics.py | 73 ++++++++++++++++++++++++----------------------- utils/plots.py | 5 ++-- 4 files changed, 71 insertions(+), 61 deletions(-) diff --git a/utils/__init__.py b/utils/__init__.py index a63c473a4340..7466a486caf4 100644 --- a/utils/__init__.py +++ b/utils/__init__.py @@ -3,6 +3,33 @@ utils/initialization """ +import contextlib +import threading + + +class TryExcept(contextlib.ContextDecorator): + # YOLOv5 TryExcept class. Usage: @TryExcept() decorator or 'with TryExcept():' context manager + def __init__(self, msg='default message here'): + self.msg = msg + + def __enter__(self): + pass + + def __exit__(self, exc_type, value, traceback): + if value: + print(f'{self.msg}: {value}') + return True + + +def threaded(func): + # Multi-threads a target function and returns thread. Usage: @threaded decorator + def wrapper(*args, **kwargs): + thread = threading.Thread(target=func, args=args, kwargs=kwargs, daemon=True) + thread.start() + return thread + + return wrapper + def notebook_init(verbose=True): # Check system software and hardware diff --git a/utils/general.py b/utils/general.py index d8c90f10ac8f..91b13f84a6c4 100755 --- a/utils/general.py +++ b/utils/general.py @@ -15,7 +15,6 @@ import shutil import signal import sys -import threading import time import urllib from datetime import datetime @@ -34,6 +33,7 @@ import torchvision import yaml +from utils import TryExcept from utils.downloads import gsutil_getsize from utils.metrics import box_iou, fitness @@ -195,27 +195,6 @@ def __exit__(self, exc_type, exc_val, exc_tb): os.chdir(self.cwd) -def try_except(func): - # try-except function. Usage: @try_except decorator - def handler(*args, **kwargs): - try: - func(*args, **kwargs) - except Exception as e: - print(e) - - return handler - - -def threaded(func): - # Multi-threads a target function and returns thread. Usage: @threaded decorator - def wrapper(*args, **kwargs): - thread = threading.Thread(target=func, args=args, kwargs=kwargs, daemon=True) - thread.start() - return thread - - return wrapper - - def methods(instance): # Get class/instance methods return [f for f in dir(instance) if callable(getattr(instance, f)) and not f.startswith("__")] @@ -319,7 +298,7 @@ def git_describe(path=ROOT): # path must be a directory return '' -@try_except +@TryExcept() @WorkingDirectory(ROOT) def check_git_status(repo='ultralytics/yolov5'): # YOLOv5 status check, recommend 'git pull' if code is out of date @@ -364,7 +343,7 @@ def check_version(current='0.0.0', minimum='0.0.0', name='version ', pinned=Fals return result -@try_except +@TryExcept() def check_requirements(requirements=ROOT / 'requirements.txt', exclude=(), install=True, cmds=()): # Check installed dependencies meet YOLOv5 requirements (pass *.txt file or list of packages) prefix = colorstr('red', 'bold', 'requirements:') diff --git a/utils/metrics.py b/utils/metrics.py index 8fa3c7e217c7..de1bf05b326b 100644 --- a/utils/metrics.py +++ b/utils/metrics.py @@ -11,6 +11,8 @@ import numpy as np import torch +from utils import TryExcept, threaded + def fitness(x): # Model fitness as a weighted combination of metrics @@ -184,36 +186,35 @@ def tp_fp(self): # fn = self.matrix.sum(0) - tp # false negatives (missed detections) return tp[:-1], fp[:-1] # remove background class + @TryExcept('WARNING: ConfusionMatrix plot failure') def plot(self, normalize=True, save_dir='', names=()): - try: - import seaborn as sn - - array = self.matrix / ((self.matrix.sum(0).reshape(1, -1) + 1E-9) if normalize else 1) # normalize columns - array[array < 0.005] = np.nan # don't annotate (would appear as 0.00) - - fig = plt.figure(figsize=(12, 9), tight_layout=True) - nc, nn = self.nc, len(names) # number of classes, names - sn.set(font_scale=1.0 if nc < 50 else 0.8) # for label size - labels = (0 < nn < 99) and (nn == nc) # apply names to ticklabels - with warnings.catch_warnings(): - warnings.simplefilter('ignore') # suppress empty matrix RuntimeWarning: All-NaN slice encountered - sn.heatmap(array, - annot=nc < 30, - annot_kws={ - "size": 8}, - cmap='Blues', - fmt='.2f', - square=True, - vmin=0.0, - xticklabels=names + ['background FP'] if labels else "auto", - yticklabels=names + ['background FN'] if labels else "auto").set_facecolor((1, 1, 1)) - fig.axes[0].set_xlabel('True') - fig.axes[0].set_ylabel('Predicted') - plt.title('Confusion Matrix') - fig.savefig(Path(save_dir) / 'confusion_matrix.png', dpi=250) - plt.close() - except Exception as e: - print(f'WARNING: ConfusionMatrix plot failure: {e}') + import seaborn as sn + + array = self.matrix / ((self.matrix.sum(0).reshape(1, -1) + 1E-9) if normalize else 1) # normalize columns + array[array < 0.005] = np.nan # don't annotate (would appear as 0.00) + + fig, ax = plt.subplots(1, 1, figsize=(12, 9), tight_layout=True) + nc, nn = self.nc, len(names) # number of classes, names + sn.set(font_scale=1.0 if nc < 50 else 0.8) # for label size + labels = (0 < nn < 99) and (nn == nc) # apply names to ticklabels + with warnings.catch_warnings(): + warnings.simplefilter('ignore') # suppress empty matrix RuntimeWarning: All-NaN slice encountered + sn.heatmap(array, + ax=ax, + annot=nc < 30, + annot_kws={ + "size": 8}, + cmap='Blues', + fmt='.2f', + square=True, + vmin=0.0, + xticklabels=names + ['background FP'] if labels else "auto", + yticklabels=names + ['background FN'] if labels else "auto").set_facecolor((1, 1, 1)) + ax.set_ylabel('True') + ax.set_ylabel('Predicted') + ax.set_title('Confusion Matrix') + fig.savefig(Path(save_dir) / 'confusion_matrix.png', dpi=250) + plt.close(fig) def print(self): for i in range(self.nc + 1): @@ -320,6 +321,7 @@ def wh_iou(wh1, wh2, eps=1e-7): # Plots ---------------------------------------------------------------------------------------------------------------- +@threaded def plot_pr_curve(px, py, ap, save_dir=Path('pr_curve.png'), names=()): # Precision-recall curve fig, ax = plt.subplots(1, 1, figsize=(9, 6), tight_layout=True) @@ -336,12 +338,13 @@ def plot_pr_curve(px, py, ap, save_dir=Path('pr_curve.png'), names=()): ax.set_ylabel('Precision') ax.set_xlim(0, 1) ax.set_ylim(0, 1) - plt.legend(bbox_to_anchor=(1.04, 1), loc="upper left") - plt.title('Precision-Recall Curve') + ax.legend(bbox_to_anchor=(1.04, 1), loc="upper left") + ax.set_title('Precision-Recall Curve') fig.savefig(save_dir, dpi=250) - plt.close() + plt.close(fig) +@threaded def plot_mc_curve(px, py, save_dir=Path('mc_curve.png'), names=(), xlabel='Confidence', ylabel='Metric'): # Metric-confidence curve fig, ax = plt.subplots(1, 1, figsize=(9, 6), tight_layout=True) @@ -358,7 +361,7 @@ def plot_mc_curve(px, py, save_dir=Path('mc_curve.png'), names=(), xlabel='Confi ax.set_ylabel(ylabel) ax.set_xlim(0, 1) ax.set_ylim(0, 1) - plt.legend(bbox_to_anchor=(1.04, 1), loc="upper left") - plt.title(f'{ylabel}-Confidence Curve') + ax.legend(bbox_to_anchor=(1.04, 1), loc="upper left") + ax.set_title(f'{ylabel}-Confidence Curve') fig.savefig(save_dir, dpi=250) - plt.close() + plt.close(fig) diff --git a/utils/plots.py b/utils/plots.py index d35e2bdd168a..2aa163268336 100644 --- a/utils/plots.py +++ b/utils/plots.py @@ -19,8 +19,9 @@ import torch from PIL import Image, ImageDraw, ImageFont +from utils import TryExcept, threaded from utils.general import (CONFIG_DIR, FONT, LOGGER, check_font, check_requirements, clip_coords, increment_path, - is_ascii, threaded, try_except, xywh2xyxy, xyxy2xywh) + is_ascii, xywh2xyxy, xyxy2xywh) from utils.metrics import fitness # Settings @@ -339,7 +340,7 @@ def plot_val_study(file='', dir='', x=None): # from utils.plots import *; plot_ plt.savefig(f, dpi=300) -@try_except # known issue https://github.com/ultralytics/yolov5/issues/5395 +@TryExcept() # known issue https://github.com/ultralytics/yolov5/issues/5395 def plot_labels(labels, names=(), save_dir=Path('')): # plot dataset labels LOGGER.info(f"Plotting labels to {save_dir / 'labels.jpg'}... ") From 729dc169baeab2eb55b79ef0c29e3174306c8a0e Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 25 Aug 2022 15:04:27 +0200 Subject: [PATCH 1365/1976] Fixed Classify offsets (#9155) --- classify/predict.py | 2 +- utils/plots.py | 10 ++++++---- 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/classify/predict.py b/classify/predict.py index b33b5bcc9933..937704d0f080 100644 --- a/classify/predict.py +++ b/classify/predict.py @@ -136,7 +136,7 @@ def run( # Write results if save_img or view_img: # Add bbox to image text = '\n'.join(f'{prob[j]:.2f} {names[j]}' for j in top5i) - annotator.text((64, 64), text, txt_color=(255, 255, 255)) + annotator.text((32, 32), text, txt_color=(255, 255, 255)) # Stream results im0 = annotator.result() diff --git a/utils/plots.py b/utils/plots.py index 2aa163268336..0f322b6b5844 100644 --- a/utils/plots.py +++ b/utils/plots.py @@ -117,10 +117,12 @@ def rectangle(self, xy, fill=None, outline=None, width=1): # Add rectangle to image (PIL-only) self.draw.rectangle(xy, fill, outline, width) - def text(self, xy, text, txt_color=(255, 255, 255)): + def text(self, xy, text, txt_color=(255, 255, 255), anchor='top'): # Add text to image (PIL-only) - w, h = self.font.getsize(text) # text width, height - self.draw.text((xy[0], xy[1] - h + 1), text, fill=txt_color, font=self.font) + if anchor == 'bottom': # start y from font bottom + w, h = self.font.getsize(text) # text width, height + xy[1] += 1 - h + self.draw.text(xy, text, fill=txt_color, font=self.font) def result(self): # Return annotated image as array @@ -222,7 +224,7 @@ def plot_images(images, targets, paths=None, fname='images.jpg', names=None, max x, y = int(w * (i // ns)), int(h * (i % ns)) # block origin annotator.rectangle([x, y, x + w, y + h], None, (255, 255, 255), width=2) # borders if paths: - annotator.text((x + 5, y + 5 + h), text=Path(paths[i]).name[:40], txt_color=(220, 220, 220)) # filenames + annotator.text((x + 5, y + 5), text=Path(paths[i]).name[:40], txt_color=(220, 220, 220)) # filenames if len(targets) > 0: ti = targets[targets[:, 0] == i] # image targets boxes = xywh2xyxy(ti[:, 2:6]).T From 30e674b14d6bb4e13ceea84a5ef67d08e6dd2f7d Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 25 Aug 2022 15:06:20 +0200 Subject: [PATCH 1366/1976] New YOLOv5 v6.2 splash images (#9142) * New YOLOv5 v6.2 splash images @AyushExel @AlanDimmer Signed-off-by: Glenn Jocher * Created using Colaboratory * Update README.md Signed-off-by: Glenn Jocher * Update README.md Signed-off-by: Glenn Jocher * Update README.md Signed-off-by: Glenn Jocher * Update README_cn.md Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- .github/README_cn.md | 98 +++++++++++++++++++++--------------------- README.md | 100 +++++++++++++++++++++---------------------- tutorial.ipynb | 42 +++++++++--------- 3 files changed, 119 insertions(+), 121 deletions(-) diff --git a/.github/README_cn.md b/.github/README_cn.md index 46aafd86ec9b..bb62714f003f 100644 --- a/.github/README_cn.md +++ b/.github/README_cn.md @@ -1,55 +1,55 @@
-

- - -

-
- -[English](../README.md) | 简体中文 -
- CI CPU testing - YOLOv5 Citation - Docker Pulls -
- Open In Colab - Open In Kaggle - Join Forum -
- -
-

-YOLOv5🚀是一个在COCO数据集上预训练的物体检测架构和模型系列,它代表了Ultralytics对未来视觉AI方法的公开研究,其中包含了在数千小时的研究和开发中所获得的经验和最佳实践。 -

- -
- - - - - - - - - - - - - - - - - - - - +

+ + +

+ +   + + +

+ + [English](../README.md) | 简体中文 +
+
+ CI CPU testing + YOLOv5 Citation + Docker Pulls +
+ Open In Colab + Open In Kaggle + Join Forum +
+ +
+

+ YOLOv5🚀是一个在COCO数据集上预训练的物体检测架构和模型系列,它代表了Ultralytics对未来视觉AI方法的公开研究,其中包含了在数千小时的研究和开发中所获得的经验和最佳实践。 +

+ +
+ + + + + + + + + + + + + + + + + + + + +
- - -
##
文件
diff --git a/README.md b/README.md index 89e4f1199cde..1d6b4e153d82 100644 --- a/README.md +++ b/README.md @@ -1,56 +1,56 @@
-

- - -

- -English | [简体中文](.github/README_cn.md) -
-
- CI CPU testing - YOLOv5 Citation - Docker Pulls -
- Open In Colab - Open In Kaggle - Join Forum -
- -
-

-YOLOv5 🚀 is a family of object detection architectures and models pretrained on the COCO dataset, and represents Ultralytics - open-source research into future vision AI methods, incorporating lessons learned and best practices evolved over thousands of hours of research and development. -

- -
- - - - - - - - - - - - - - - - - - - - +

+ + +

+ +   + + +

+ + English | [简体中文](.github/README_cn.md) +
+
+ CI CPU testing + YOLOv5 Citation + Docker Pulls +
+ Open In Colab + Open In Kaggle + Join Forum +
+ +
+

+ YOLOv5 🚀 is a family of object detection architectures and models pretrained on the COCO dataset, and represents Ultralytics + open-source research into future vision AI methods, incorporating lessons learned and best practices evolved over thousands of hours of research and development. +

+ +
+ + + + + + + + + + + + + + + + + + + + +
- - -
##
Documentation
diff --git a/tutorial.ipynb b/tutorial.ipynb index 5b7b1f287d7e..3af5517c9623 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -7,8 +7,7 @@ "provenance": [], "collapsed_sections": [], "machine_shape": "hm", - "toc_visible": true, - "include_colab_link": true + "toc_visible": true }, "kernelspec": { "name": "python3", @@ -381,27 +380,26 @@ } }, "cells": [ - { - "cell_type": "markdown", - "metadata": { - "id": "view-in-github", - "colab_type": "text" - }, - "source": [ - "\"Open" - ] - }, { "cell_type": "markdown", "metadata": { "id": "t6MPjfT5NrKQ" }, "source": [ - "\n", - "\n", + "
\n", + "\n", + " \n", + " \n", + "\n", "\n", - "This is the **official YOLOv5 🚀 notebook** by **Ultralytics**, and is freely available for redistribution under the [GPL-3.0 license](https://choosealicense.com/licenses/gpl-3.0/). \n", - "For more information please visit https://github.com/ultralytics/yolov5 and https://ultralytics.com. Thank you!" + "
\n", + " \"Open\n", + " \"Open\n", + "
\n", + "\n", + "This YOLOv5 🚀 notebook by Ultralytics presents simple train, validate and predict examples to help start your AI adventure.
See GitHub for community support or contact us for professional support.\n", + "\n", + "
" ] }, { @@ -412,7 +410,7 @@ "source": [ "# Setup\n", "\n", - "Clone repo, install dependencies and check PyTorch and GPU." + "Clone GitHub [repository](https://github.com/ultralytics/yolov5), install [dependencies](https://github.com/ultralytics/yolov5/blob/master/requirements.txt) and check PyTorch and GPU." ] }, { @@ -433,7 +431,7 @@ "import utils\n", "display = utils.notebook_init() # checks" ], - "execution_count": 1, + "execution_count": null, "outputs": [ { "output_type": "stream", @@ -485,7 +483,7 @@ "!python detect.py --weights yolov5s.pt --img 640 --conf 0.25 --source data/images\n", "# display.Image(filename='runs/detect/exp/zidane.jpg', width=600)" ], - "execution_count": 2, + "execution_count": null, "outputs": [ { "output_type": "stream", @@ -555,7 +553,7 @@ "torch.hub.download_url_to_file('https://ultralytics.com/assets/coco2017val.zip', 'tmp.zip') # download (780M - 5000 images)\n", "!unzip -q tmp.zip -d ../datasets && rm tmp.zip # unzip" ], - "execution_count": 3, + "execution_count": null, "outputs": [ { "output_type": "display_data", @@ -603,7 +601,7 @@ "# Validate YOLOv5x on COCO val\n", "!python val.py --weights yolov5x.pt --data coco.yaml --img 640 --iou 0.65 --half" ], - "execution_count": 4, + "execution_count": null, "outputs": [ { "output_type": "stream", @@ -723,7 +721,7 @@ "# Train YOLOv5s on COCO128 for 3 epochs\n", "!python train.py --img 640 --batch 16 --epochs 3 --data coco128.yaml --weights yolov5s.pt --cache" ], - "execution_count": 5, + "execution_count": null, "outputs": [ { "output_type": "stream", From f2b8f3fe3a3ae2b601706e5bea9f25265eb2fcd9 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 25 Aug 2022 22:17:28 +0200 Subject: [PATCH 1367/1976] Created using Colaboratory --- tutorial.ipynb | 474 +++++++++++++++++++++++-------------------------- 1 file changed, 218 insertions(+), 256 deletions(-) diff --git a/tutorial.ipynb b/tutorial.ipynb index 3af5517c9623..12840063b1f1 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -16,121 +16,110 @@ "accelerator": "GPU", "widgets": { "application/vnd.jupyter.widget-state+json": { - "da0946bcefd9414fa282977f7f609e36": { + "9b8caa3522fc4cbab31e13b5dfc7808d": { "model_module": "@jupyter-widgets/controls", "model_name": "HBoxModel", - "model_module_version": "2.0.0", + "model_module_version": "1.5.0", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "2.0.0", + "_model_module_version": "1.5.0", "_model_name": "HBoxModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", - "_view_module_version": "2.0.0", + "_view_module_version": "1.5.0", "_view_name": "HBoxView", "box_style": "", "children": [ - "IPY_MODEL_7838c0af44244ccc906c413cea0989d7", - "IPY_MODEL_309ea78b3e814198b4080beb878d5329", - "IPY_MODEL_b2d1d998e5db4ca1a36280902e1647c7" + "IPY_MODEL_574140e4c4bc48c9a171541a02cd0211", + "IPY_MODEL_35e03ce5090346c9ae602891470fc555", + "IPY_MODEL_c942c208e72d46568b476bb0f2d75496" ], - "layout": "IPY_MODEL_e7d7f56c77884717ba122f1d603c0852", - "tabbable": null, - "tooltip": null + "layout": "IPY_MODEL_65881db1db8a4e9c930fab9172d45143" } }, - "7838c0af44244ccc906c413cea0989d7": { + "574140e4c4bc48c9a171541a02cd0211": { "model_module": "@jupyter-widgets/controls", "model_name": "HTMLModel", - "model_module_version": "2.0.0", + "model_module_version": "1.5.0", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "2.0.0", + "_model_module_version": "1.5.0", "_model_name": "HTMLModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", - "_view_module_version": "2.0.0", + "_view_module_version": "1.5.0", "_view_name": "HTMLView", "description": "", - "description_allow_html": false, - "layout": "IPY_MODEL_abf60d6b8ea847f9bb358ae2b045458b", + "description_tooltip": null, + "layout": "IPY_MODEL_60b913d755b34d638478e30705a2dde1", "placeholder": "​", - "style": "IPY_MODEL_379196a2761b4a29aca8ef088dc60c10", - "tabbable": null, - "tooltip": null, + "style": "IPY_MODEL_0856bea36ec148b68522ff9c9eb258d8", "value": "100%" } }, - "309ea78b3e814198b4080beb878d5329": { + "35e03ce5090346c9ae602891470fc555": { "model_module": "@jupyter-widgets/controls", "model_name": "FloatProgressModel", - "model_module_version": "2.0.0", + "model_module_version": "1.5.0", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "2.0.0", + "_model_module_version": "1.5.0", "_model_name": "FloatProgressModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", - "_view_module_version": "2.0.0", + "_view_module_version": "1.5.0", "_view_name": "ProgressView", "bar_style": "success", "description": "", - "description_allow_html": false, - "layout": "IPY_MODEL_52b546a356e54174a95049b30cb52c81", + "description_tooltip": null, + "layout": "IPY_MODEL_76879f6f2aa54637a7a07faeea2bd684", "max": 818322941, "min": 0, "orientation": "horizontal", - "style": "IPY_MODEL_0889e134327e4aa0a8719d03a0d6941b", - "tabbable": null, - "tooltip": null, + "style": "IPY_MODEL_0ace3934ec6f4d36a1b3a9e086390926", "value": 818322941 } }, - "b2d1d998e5db4ca1a36280902e1647c7": { + "c942c208e72d46568b476bb0f2d75496": { "model_module": "@jupyter-widgets/controls", "model_name": "HTMLModel", - "model_module_version": "2.0.0", + "model_module_version": "1.5.0", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "2.0.0", + "_model_module_version": "1.5.0", "_model_name": "HTMLModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", - "_view_module_version": "2.0.0", + "_view_module_version": "1.5.0", "_view_name": "HTMLView", "description": "", - "description_allow_html": false, - "layout": "IPY_MODEL_30f22a3e42d24f10ad9851f40a6703f3", + "description_tooltip": null, + "layout": "IPY_MODEL_d6b7a2243e0c4beca714d99dceec23d6", "placeholder": "​", - "style": "IPY_MODEL_648b3512bb7d4ccca5d75af36c133e92", - "tabbable": null, - "tooltip": null, - "value": " 780M/780M [01:31<00:00, 12.3MB/s]" + "style": "IPY_MODEL_5966ba6e6f114d8c9d8d1d6b1bd4f4c7", + "value": " 780M/780M [02:19<00:00, 6.24MB/s]" } }, - "e7d7f56c77884717ba122f1d603c0852": { + "65881db1db8a4e9c930fab9172d45143": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", - "model_module_version": "2.0.0", + "model_module_version": "1.2.0", "state": { "_model_module": "@jupyter-widgets/base", - "_model_module_version": "2.0.0", + "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", - "_view_module_version": "2.0.0", + "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, - "border_bottom": null, - "border_left": null, - "border_right": null, - "border_top": null, + "border": null, "bottom": null, "display": null, "flex": null, @@ -158,6 +147,8 @@ "object_position": null, "order": null, "overflow": null, + "overflow_x": null, + "overflow_y": null, "padding": null, "right": null, "top": null, @@ -165,25 +156,22 @@ "width": null } }, - "abf60d6b8ea847f9bb358ae2b045458b": { + "60b913d755b34d638478e30705a2dde1": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", - "model_module_version": "2.0.0", + "model_module_version": "1.2.0", "state": { "_model_module": "@jupyter-widgets/base", - "_model_module_version": "2.0.0", + "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", - "_view_module_version": "2.0.0", + "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, - "border_bottom": null, - "border_left": null, - "border_right": null, - "border_top": null, + "border": null, "bottom": null, "display": null, "flex": null, @@ -211,6 +199,8 @@ "object_position": null, "order": null, "overflow": null, + "overflow_x": null, + "overflow_y": null, "padding": null, "right": null, "top": null, @@ -218,43 +208,37 @@ "width": null } }, - "379196a2761b4a29aca8ef088dc60c10": { + "0856bea36ec148b68522ff9c9eb258d8": { "model_module": "@jupyter-widgets/controls", - "model_name": "HTMLStyleModel", - "model_module_version": "2.0.0", + "model_name": "DescriptionStyleModel", + "model_module_version": "1.5.0", "state": { "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "2.0.0", - "_model_name": "HTMLStyleModel", + "_model_module_version": "1.5.0", + "_model_name": "DescriptionStyleModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", - "_view_module_version": "2.0.0", + "_view_module_version": "1.2.0", "_view_name": "StyleView", - "background": null, - "description_width": "", - "font_size": null, - "text_color": null + "description_width": "" } }, - "52b546a356e54174a95049b30cb52c81": { + "76879f6f2aa54637a7a07faeea2bd684": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", - "model_module_version": "2.0.0", + "model_module_version": "1.2.0", "state": { "_model_module": "@jupyter-widgets/base", - "_model_module_version": "2.0.0", + "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", - "_view_module_version": "2.0.0", + "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, - "border_bottom": null, - "border_left": null, - "border_right": null, - "border_top": null, + "border": null, "bottom": null, "display": null, "flex": null, @@ -282,6 +266,8 @@ "object_position": null, "order": null, "overflow": null, + "overflow_x": null, + "overflow_y": null, "padding": null, "right": null, "top": null, @@ -289,41 +275,38 @@ "width": null } }, - "0889e134327e4aa0a8719d03a0d6941b": { + "0ace3934ec6f4d36a1b3a9e086390926": { "model_module": "@jupyter-widgets/controls", "model_name": "ProgressStyleModel", - "model_module_version": "2.0.0", + "model_module_version": "1.5.0", "state": { "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "2.0.0", + "_model_module_version": "1.5.0", "_model_name": "ProgressStyleModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", - "_view_module_version": "2.0.0", + "_view_module_version": "1.2.0", "_view_name": "StyleView", "bar_color": null, "description_width": "" } }, - "30f22a3e42d24f10ad9851f40a6703f3": { + "d6b7a2243e0c4beca714d99dceec23d6": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", - "model_module_version": "2.0.0", + "model_module_version": "1.2.0", "state": { "_model_module": "@jupyter-widgets/base", - "_model_module_version": "2.0.0", + "_model_module_version": "1.2.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", - "_view_module_version": "2.0.0", + "_view_module_version": "1.2.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, - "border_bottom": null, - "border_left": null, - "border_right": null, - "border_top": null, + "border": null, "bottom": null, "display": null, "flex": null, @@ -351,6 +334,8 @@ "object_position": null, "order": null, "overflow": null, + "overflow_x": null, + "overflow_y": null, "padding": null, "right": null, "top": null, @@ -358,22 +343,19 @@ "width": null } }, - "648b3512bb7d4ccca5d75af36c133e92": { + "5966ba6e6f114d8c9d8d1d6b1bd4f4c7": { "model_module": "@jupyter-widgets/controls", - "model_name": "HTMLStyleModel", - "model_module_version": "2.0.0", + "model_name": "DescriptionStyleModel", + "model_module_version": "1.5.0", "state": { "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "2.0.0", - "_model_name": "HTMLStyleModel", + "_model_module_version": "1.5.0", + "_model_name": "DescriptionStyleModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", - "_view_module_version": "2.0.0", + "_view_module_version": "1.2.0", "_view_name": "StyleView", - "background": null, - "description_width": "", - "font_size": null, - "text_color": null + "description_width": "" } } } @@ -420,7 +402,7 @@ "colab": { "base_uri": "https://localhost:8080/" }, - "outputId": "4200fd6f-c6f5-4505-a4f9-a918f3ed1f86" + "outputId": "0f9ee467-cea4-48e8-9050-7a76ae1b6141" }, "source": [ "!git clone https://github.com/ultralytics/yolov5 # clone\n", @@ -431,13 +413,13 @@ "import utils\n", "display = utils.notebook_init() # checks" ], - "execution_count": null, + "execution_count": 1, "outputs": [ { "output_type": "stream", "name": "stderr", "text": [ - "YOLOv5 🚀 v6.2-41-g8665d55 Python-3.7.13 torch-1.12.1+cu113 CUDA:0 (Tesla V100-SXM2-16GB, 16160MiB)\n" + "YOLOv5 🚀 v6.2-56-g30e674b Python-3.7.13 torch-1.12.1+cu113 CUDA:0 (Tesla V100-SXM2-16GB, 16160MiB)\n" ] }, { @@ -477,29 +459,29 @@ "colab": { "base_uri": "https://localhost:8080/" }, - "outputId": "1af15107-bcd1-4e8f-b5bd-0ee1a737e051" + "outputId": "60647b99-e8d4-402c-f444-331bf6746da4" }, "source": [ "!python detect.py --weights yolov5s.pt --img 640 --conf 0.25 --source data/images\n", "# display.Image(filename='runs/detect/exp/zidane.jpg', width=600)" ], - "execution_count": null, + "execution_count": 2, "outputs": [ { "output_type": "stream", "name": "stdout", "text": [ "\u001b[34m\u001b[1mdetect: \u001b[0mweights=['yolov5s.pt'], source=data/images, data=data/coco128.yaml, imgsz=[640, 640], conf_thres=0.25, iou_thres=0.45, max_det=1000, device=, view_img=False, save_txt=False, save_conf=False, save_crop=False, nosave=False, classes=None, agnostic_nms=False, augment=False, visualize=False, update=False, project=runs/detect, name=exp, exist_ok=False, line_thickness=3, hide_labels=False, hide_conf=False, half=False, dnn=False\n", - "YOLOv5 🚀 v6.2-41-g8665d55 Python-3.7.13 torch-1.12.1+cu113 CUDA:0 (Tesla V100-SXM2-16GB, 16160MiB)\n", + "YOLOv5 🚀 v6.2-56-g30e674b Python-3.7.13 torch-1.12.1+cu113 CUDA:0 (Tesla V100-SXM2-16GB, 16160MiB)\n", "\n", "Downloading https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5s.pt to yolov5s.pt...\n", - "100% 14.1M/14.1M [00:00<00:00, 41.7MB/s]\n", + "100% 14.1M/14.1M [00:00<00:00, 27.8MB/s]\n", "\n", "Fusing layers... \n", "YOLOv5s summary: 213 layers, 7225885 parameters, 0 gradients\n", - "image 1/2 /content/yolov5/data/images/bus.jpg: 640x480 4 persons, 1 bus, 14.5ms\n", - "image 2/2 /content/yolov5/data/images/zidane.jpg: 384x640 2 persons, 2 ties, 18.9ms\n", - "Speed: 0.5ms pre-process, 16.7ms inference, 21.4ms NMS per image at shape (1, 3, 640, 640)\n", + "image 1/2 /content/yolov5/data/images/bus.jpg: 640x480 4 persons, 1 bus, 14.8ms\n", + "image 2/2 /content/yolov5/data/images/zidane.jpg: 384x640 2 persons, 2 ties, 20.1ms\n", + "Speed: 0.6ms pre-process, 17.4ms inference, 21.6ms NMS per image at shape (1, 3, 640, 640)\n", "Results saved to \u001b[1mruns/detect/exp\u001b[0m\n" ] } @@ -531,29 +513,29 @@ "id": "WQPtK1QYVaD_", "colab": { "base_uri": "https://localhost:8080/", - "height": 17, + "height": 49, "referenced_widgets": [ - "da0946bcefd9414fa282977f7f609e36", - "7838c0af44244ccc906c413cea0989d7", - "309ea78b3e814198b4080beb878d5329", - "b2d1d998e5db4ca1a36280902e1647c7", - "e7d7f56c77884717ba122f1d603c0852", - "abf60d6b8ea847f9bb358ae2b045458b", - "379196a2761b4a29aca8ef088dc60c10", - "52b546a356e54174a95049b30cb52c81", - "0889e134327e4aa0a8719d03a0d6941b", - "30f22a3e42d24f10ad9851f40a6703f3", - "648b3512bb7d4ccca5d75af36c133e92" + "9b8caa3522fc4cbab31e13b5dfc7808d", + "574140e4c4bc48c9a171541a02cd0211", + "35e03ce5090346c9ae602891470fc555", + "c942c208e72d46568b476bb0f2d75496", + "65881db1db8a4e9c930fab9172d45143", + "60b913d755b34d638478e30705a2dde1", + "0856bea36ec148b68522ff9c9eb258d8", + "76879f6f2aa54637a7a07faeea2bd684", + "0ace3934ec6f4d36a1b3a9e086390926", + "d6b7a2243e0c4beca714d99dceec23d6", + "5966ba6e6f114d8c9d8d1d6b1bd4f4c7" ] }, - "outputId": "5f129105-eca5-4f33-fb1d-981255f814ad" + "outputId": "102dabed-bc31-42fe-9133-d9ce28a2c01e" }, "source": [ "# Download COCO val\n", "torch.hub.download_url_to_file('https://ultralytics.com/assets/coco2017val.zip', 'tmp.zip') # download (780M - 5000 images)\n", "!unzip -q tmp.zip -d ../datasets && rm tmp.zip # unzip" ], - "execution_count": null, + "execution_count": 3, "outputs": [ { "output_type": "display_data", @@ -564,24 +546,7 @@ "application/vnd.jupyter.widget-view+json": { "version_major": 2, "version_minor": 0, - "model_id": "da0946bcefd9414fa282977f7f609e36" - }, - "application/json": { - "n": 0, - "total": 818322941, - "elapsed": 0.020366430282592773, - "ncols": null, - "nrows": null, - "prefix": "", - "ascii": false, - "unit": "B", - "unit_scale": true, - "rate": null, - "bar_format": null, - "postfix": null, - "unit_divisor": 1024, - "initial": 0, - "colour": null + "model_id": "9b8caa3522fc4cbab31e13b5dfc7808d" } }, "metadata": {} @@ -595,60 +560,57 @@ "colab": { "base_uri": "https://localhost:8080/" }, - "outputId": "40d5d000-abee-46a0-c07d-1066e1662e01" + "outputId": "daf60b1b-b098-4657-c863-584f4c9cf078" }, "source": [ - "# Validate YOLOv5x on COCO val\n", - "!python val.py --weights yolov5x.pt --data coco.yaml --img 640 --iou 0.65 --half" + "# Validate YOLOv5s on COCO val\n", + "!python val.py --weights yolov5s.pt --data coco.yaml --img 640 --half" ], - "execution_count": null, + "execution_count": 4, "outputs": [ { "output_type": "stream", "name": "stdout", "text": [ - "\u001b[34m\u001b[1mval: \u001b[0mdata=/content/yolov5/data/coco.yaml, weights=['yolov5x.pt'], batch_size=32, imgsz=640, conf_thres=0.001, iou_thres=0.65, task=val, device=, workers=8, single_cls=False, augment=False, verbose=False, save_txt=False, save_hybrid=False, save_conf=False, save_json=True, project=runs/val, name=exp, exist_ok=False, half=True, dnn=False\n", - "YOLOv5 🚀 v6.2-41-g8665d55 Python-3.7.13 torch-1.12.1+cu113 CUDA:0 (Tesla V100-SXM2-16GB, 16160MiB)\n", - "\n", - "Downloading https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5x.pt to yolov5x.pt...\n", - "100% 166M/166M [00:10<00:00, 16.6MB/s]\n", + "\u001b[34m\u001b[1mval: \u001b[0mdata=/content/yolov5/data/coco.yaml, weights=['yolov5s.pt'], batch_size=32, imgsz=640, conf_thres=0.001, iou_thres=0.6, task=val, device=, workers=8, single_cls=False, augment=False, verbose=False, save_txt=False, save_hybrid=False, save_conf=False, save_json=True, project=runs/val, name=exp, exist_ok=False, half=True, dnn=False\n", + "YOLOv5 🚀 v6.2-56-g30e674b Python-3.7.13 torch-1.12.1+cu113 CUDA:0 (Tesla V100-SXM2-16GB, 16160MiB)\n", "\n", "Fusing layers... \n", - "YOLOv5x summary: 444 layers, 86705005 parameters, 0 gradients\n", + "YOLOv5s summary: 213 layers, 7225885 parameters, 0 gradients\n", "Downloading https://ultralytics.com/assets/Arial.ttf to /root/.config/Ultralytics/Arial.ttf...\n", - "100% 755k/755k [00:00<00:00, 1.39MB/s]\n", - "\u001b[34m\u001b[1mval: \u001b[0mScanning '/content/datasets/coco/val2017' images and labels...4952 found, 48 missing, 0 empty, 0 corrupt: 100% 5000/5000 [00:00<00:00, 10506.48it/s]\n", + "100% 755k/755k [00:00<00:00, 52.7MB/s]\n", + "\u001b[34m\u001b[1mval: \u001b[0mScanning '/content/datasets/coco/val2017' images and labels...4952 found, 48 missing, 0 empty, 0 corrupt: 100% 5000/5000 [00:00<00:00, 10509.20it/s]\n", "\u001b[34m\u001b[1mval: \u001b[0mNew cache created: /content/datasets/coco/val2017.cache\n", - " Class Images Instances P R mAP@.5 mAP@.5:.95: 100% 157/157 [01:06<00:00, 2.36it/s]\n", - " all 5000 36335 0.743 0.625 0.683 0.504\n", - "Speed: 0.1ms pre-process, 4.6ms inference, 1.1ms NMS per image at shape (32, 3, 640, 640)\n", + " Class Images Instances P R mAP@.5 mAP@.5:.95: 100% 157/157 [00:50<00:00, 3.10it/s]\n", + " all 5000 36335 0.67 0.521 0.566 0.371\n", + "Speed: 0.1ms pre-process, 1.0ms inference, 1.5ms NMS per image at shape (32, 3, 640, 640)\n", "\n", - "Evaluating pycocotools mAP... saving runs/val/exp/yolov5x_predictions.json...\n", + "Evaluating pycocotools mAP... saving runs/val/exp/yolov5s_predictions.json...\n", "loading annotations into memory...\n", - "Done (t=0.38s)\n", + "Done (t=0.81s)\n", "creating index...\n", "index created!\n", "Loading and preparing results...\n", - "DONE (t=5.49s)\n", + "DONE (t=5.62s)\n", "creating index...\n", "index created!\n", "Running per image evaluation...\n", "Evaluate annotation type *bbox*\n", - "DONE (t=72.10s).\n", + "DONE (t=77.03s).\n", "Accumulating evaluation results...\n", - "DONE (t=13.94s).\n", - " Average Precision (AP) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.506\n", - " Average Precision (AP) @[ IoU=0.50 | area= all | maxDets=100 ] = 0.688\n", - " Average Precision (AP) @[ IoU=0.75 | area= all | maxDets=100 ] = 0.549\n", - " Average Precision (AP) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.340\n", - " Average Precision (AP) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.558\n", - " Average Precision (AP) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.651\n", - " Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 1 ] = 0.382\n", - " Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 10 ] = 0.631\n", - " Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.684\n", - " Average Recall (AR) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.528\n", - " Average Recall (AR) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.737\n", - " Average Recall (AR) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.833\n", + "DONE (t=14.63s).\n", + " Average Precision (AP) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.374\n", + " Average Precision (AP) @[ IoU=0.50 | area= all | maxDets=100 ] = 0.572\n", + " Average Precision (AP) @[ IoU=0.75 | area= all | maxDets=100 ] = 0.402\n", + " Average Precision (AP) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.211\n", + " Average Precision (AP) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.423\n", + " Average Precision (AP) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.489\n", + " Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 1 ] = 0.311\n", + " Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 10 ] = 0.516\n", + " Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.566\n", + " Average Recall (AR) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.378\n", + " Average Recall (AR) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.625\n", + " Average Recall (AR) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.724\n", "Results saved to \u001b[1mruns/val/exp\u001b[0m\n" ] } @@ -715,13 +677,13 @@ "colab": { "base_uri": "https://localhost:8080/" }, - "outputId": "f0ce0354-7f50-4546-f3f9-672b4b522d59" + "outputId": "baa6d4be-3379-4aab-844a-d5a5396c0e49" }, "source": [ "# Train YOLOv5s on COCO128 for 3 epochs\n", "!python train.py --img 640 --batch 16 --epochs 3 --data coco128.yaml --weights yolov5s.pt --cache" ], - "execution_count": null, + "execution_count": 5, "outputs": [ { "output_type": "stream", @@ -729,7 +691,7 @@ "text": [ "\u001b[34m\u001b[1mtrain: \u001b[0mweights=yolov5s.pt, cfg=, data=coco128.yaml, hyp=data/hyps/hyp.scratch-low.yaml, epochs=3, batch_size=16, imgsz=640, rect=False, resume=False, nosave=False, noval=False, noautoanchor=False, noplots=False, evolve=None, bucket=, cache=ram, image_weights=False, device=, multi_scale=False, single_cls=False, optimizer=SGD, sync_bn=False, workers=8, project=runs/train, name=exp, exist_ok=False, quad=False, cos_lr=False, label_smoothing=0.0, patience=100, freeze=[0], save_period=-1, seed=0, local_rank=-1, entity=None, upload_dataset=False, bbox_interval=-1, artifact_alias=latest\n", "\u001b[34m\u001b[1mgithub: \u001b[0mup to date with https://github.com/ultralytics/yolov5 ✅\n", - "YOLOv5 🚀 v6.2-41-g8665d55 Python-3.7.13 torch-1.12.1+cu113 CUDA:0 (Tesla V100-SXM2-16GB, 16160MiB)\n", + "YOLOv5 🚀 v6.2-56-g30e674b Python-3.7.13 torch-1.12.1+cu113 CUDA:0 (Tesla V100-SXM2-16GB, 16160MiB)\n", "\n", "\u001b[34m\u001b[1mhyperparameters: \u001b[0mlr0=0.01, lrf=0.01, momentum=0.937, weight_decay=0.0005, warmup_epochs=3.0, warmup_momentum=0.8, warmup_bias_lr=0.1, box=0.05, cls=0.5, cls_pw=1.0, obj=1.0, obj_pw=1.0, iou_t=0.2, anchor_t=4.0, fl_gamma=0.0, hsv_h=0.015, hsv_s=0.7, hsv_v=0.4, degrees=0.0, translate=0.1, scale=0.5, shear=0.0, perspective=0.0, flipud=0.0, fliplr=0.5, mosaic=1.0, mixup=0.0, copy_paste=0.0\n", "\u001b[34m\u001b[1mWeights & Biases: \u001b[0mrun 'pip install wandb' to automatically track and visualize YOLOv5 🚀 runs in Weights & Biases\n", @@ -738,8 +700,8 @@ "\n", "Dataset not found ⚠️, missing paths ['/content/datasets/coco128/images/train2017']\n", "Downloading https://ultralytics.com/assets/coco128.zip to coco128.zip...\n", - "100% 6.66M/6.66M [00:00<00:00, 76.7MB/s]\n", - "Dataset download success ✅ (0.5s), saved to \u001b[1m/content/datasets\u001b[0m\n", + "100% 6.66M/6.66M [00:00<00:00, 41.1MB/s]\n", + "Dataset download success ✅ (0.8s), saved to \u001b[1m/content/datasets\u001b[0m\n", "\n", " from n params module arguments \n", " 0 -1 1 3520 models.common.Conv [3, 32, 6, 2, 2] \n", @@ -773,11 +735,11 @@ "\u001b[34m\u001b[1mAMP: \u001b[0mchecks passed ✅\n", "\u001b[34m\u001b[1moptimizer:\u001b[0m SGD(lr=0.01) with parameter groups 57 weight(decay=0.0), 60 weight(decay=0.0005), 60 bias\n", "\u001b[34m\u001b[1malbumentations: \u001b[0mBlur(p=0.01, blur_limit=(3, 7)), MedianBlur(p=0.01, blur_limit=(3, 7)), ToGray(p=0.01), CLAHE(p=0.01, clip_limit=(1, 4.0), tile_grid_size=(8, 8))\n", - "\u001b[34m\u001b[1mtrain: \u001b[0mScanning '/content/datasets/coco128/labels/train2017' images and labels...128 found, 0 missing, 2 empty, 0 corrupt: 100% 128/128 [00:00<00:00, 7984.87it/s]\n", + "\u001b[34m\u001b[1mtrain: \u001b[0mScanning '/content/datasets/coco128/labels/train2017' images and labels...128 found, 0 missing, 2 empty, 0 corrupt: 100% 128/128 [00:00<00:00, 9659.25it/s]\n", "\u001b[34m\u001b[1mtrain: \u001b[0mNew cache created: /content/datasets/coco128/labels/train2017.cache\n", - "\u001b[34m\u001b[1mtrain: \u001b[0mCaching images (0.1GB ram): 100% 128/128 [00:00<00:00, 1018.19it/s]\n", + "\u001b[34m\u001b[1mtrain: \u001b[0mCaching images (0.1GB ram): 100% 128/128 [00:00<00:00, 951.31it/s]\n", "\u001b[34m\u001b[1mval: \u001b[0mScanning '/content/datasets/coco128/labels/train2017.cache' images and labels... 128 found, 0 missing, 2 empty, 0 corrupt: 100% 128/128 [00:00 Date: Fri, 26 Aug 2022 14:34:28 +0200 Subject: [PATCH 1368/1976] Rename onnx_dynamic -> dynamic (#9168) --- export.py | 2 +- models/yolo.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/export.py b/export.py index 1bb7ded8ab85..0f26e63e9adc 100644 --- a/export.py +++ b/export.py @@ -489,7 +489,7 @@ def run( for k, m in model.named_modules(): if isinstance(m, Detect): m.inplace = inplace - m.onnx_dynamic = dynamic + m.dynamic = dynamic m.export = True for _ in range(2): diff --git a/models/yolo.py b/models/yolo.py index e154b72685b4..7a7308312a14 100644 --- a/models/yolo.py +++ b/models/yolo.py @@ -37,7 +37,7 @@ class Detect(nn.Module): stride = None # strides computed during build - onnx_dynamic = False # ONNX export parameter + dynamic = False # force grid reconstruction export = False # export mode def __init__(self, nc=80, anchors=(), ch=(), inplace=True): # detection layer @@ -60,7 +60,7 @@ def forward(self, x): x[i] = x[i].view(bs, self.na, self.no, ny, nx).permute(0, 1, 3, 4, 2).contiguous() if not self.training: # inference - if self.onnx_dynamic or self.grid[i].shape[2:4] != x[i].shape[2:4]: + if self.dynamic or self.grid[i].shape[2:4] != x[i].shape[2:4]: self.grid[i], self.anchor_grid[i] = self._make_grid(nx, ny, i) y = x[i].sigmoid() From 5d3d051c9b6bb25c45d254ceabab669c758ed72b Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 26 Aug 2022 15:29:31 +0200 Subject: [PATCH 1369/1976] Inline `_make_grid()` meshgrid (#9170) * Inline _make_grid() meshgrid Signed-off-by: Glenn Jocher * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Signed-off-by: Glenn Jocher Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- models/yolo.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/models/yolo.py b/models/yolo.py index 7a7308312a14..fa05fcf9a8d9 100644 --- a/models/yolo.py +++ b/models/yolo.py @@ -81,10 +81,7 @@ def _make_grid(self, nx=20, ny=20, i=0, torch_1_10=check_version(torch.__version t = self.anchors[i].dtype shape = 1, self.na, ny, nx, 2 # grid shape y, x = torch.arange(ny, device=d, dtype=t), torch.arange(nx, device=d, dtype=t) - if torch_1_10: # torch>=1.10.0 meshgrid workaround for torch>=0.7 compatibility - yv, xv = torch.meshgrid(y, x, indexing='ij') - else: - yv, xv = torch.meshgrid(y, x) + yv, xv = torch.meshgrid(y, x, indexing='ij') if torch_1_10 else torch.meshgrid(y, x) # torch>=0.7 compatibility grid = torch.stack((xv, yv), 2).expand(shape) - 0.5 # add grid offset, i.e. y = 2.0 * x - 0.5 anchor_grid = (self.anchors[i] * self.stride[i]).view((1, self.na, 1, 1, 2)).expand(shape) return grid, anchor_grid From cff9717d730710ad0f5e858ca54cb19731e6a6b5 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 26 Aug 2022 20:06:26 +0200 Subject: [PATCH 1370/1976] Comment EMA assert (#9173) Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- utils/torch_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/torch_utils.py b/utils/torch_utils.py index 5fbe8bbf10f6..abf0bbc19a98 100644 --- a/utils/torch_utils.py +++ b/utils/torch_utils.py @@ -422,7 +422,7 @@ def update(self, model): if v.dtype.is_floating_point: # true for FP16 and FP32 v *= d v += (1 - d) * msd[k].detach() - assert v.dtype == msd[k].detach().dtype == torch.float32, f'EMA {v.dtype} and model {msd[k]} must both be FP32' + # assert v.dtype == msd[k].dtype == torch.float32, f'{k}: EMA {v.dtype} and model {msd[k].dtype} must be FP32' def update_attr(self, model, include=(), exclude=('process_group', 'reducer')): # Update EMA attributes From ffbce3858ae3d0d1d0978a5927daa2d4f94e55b6 Mon Sep 17 00:00:00 2001 From: HighMans <42877729+HighMans@users.noreply.github.com> Date: Fri, 26 Aug 2022 19:39:11 -0400 Subject: [PATCH 1371/1976] Fix confidence threshold for ClearML debug images (#9174) * Fix confidence threshold The confidence is converted to a percentage on line 144, but it is being compared to a default conf_threshold value of a decimal value instead of percent value. Signed-off-by: HighMans <42877729+HighMans@users.noreply.github.com> * Revert "Fix confidence threshold" This reverts commit f84a09967f83d70626ca8dfe7625dce60fb0102e. * Fix confidence comparison Fix the confidence percentage is being compared to a decimal value. Signed-off-by: HighMans <42877729+HighMans@users.noreply.github.com> --- utils/loggers/clearml/clearml_utils.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/utils/loggers/clearml/clearml_utils.py b/utils/loggers/clearml/clearml_utils.py index 52320c090ddd..1e136907367d 100644 --- a/utils/loggers/clearml/clearml_utils.py +++ b/utils/loggers/clearml/clearml_utils.py @@ -141,10 +141,10 @@ def log_image_with_boxes(self, image_path, boxes, class_names, image, conf_thres color = colors(i) class_name = class_names[int(class_nr)] - confidence = round(float(conf) * 100, 2) - label = f"{class_name}: {confidence}%" + confidence_percentage = round(float(conf) * 100, 2) + label = f"{class_name}: {confidence_percentage}%" - if confidence > conf_threshold: + if conf > conf_threshold: annotator.rectangle(box.cpu().numpy(), outline=color) annotator.box_label(box.cpu().numpy(), label=label, color=color) From f58fe6b6c12f1b0d25d95ab07a6656b87ac31b25 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 28 Aug 2022 21:36:05 +0200 Subject: [PATCH 1372/1976] Update Dockerfile-cpu (#9184) Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- utils/docker/Dockerfile-cpu | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/utils/docker/Dockerfile-cpu b/utils/docker/Dockerfile-cpu index d61dfeffe22c..5dc75d83c20f 100644 --- a/utils/docker/Dockerfile-cpu +++ b/utils/docker/Dockerfile-cpu @@ -18,7 +18,8 @@ RUN apt install --no-install-recommends -y python3-pip git zip curl htop libgl1- COPY requirements.txt . RUN python3 -m pip install --upgrade pip wheel RUN pip install --no-cache -r requirements.txt albumentations gsutil notebook \ - coremltools onnx onnx-simplifier onnxruntime openvino-dev tensorflow-cpu tensorflowjs \ + coremltools onnx onnx-simplifier onnxruntime tensorflow-cpu tensorflowjs \ + # openvino-dev \ --extra-index-url https://download.pytorch.org/whl/cpu # Create working directory From 985e000d813c739fe6e4c05b8df6f80f40ca3c7a Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 28 Aug 2022 21:48:58 +0200 Subject: [PATCH 1373/1976] Update Dockerfile-cpu to libpython3-dev (#9185) Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- utils/docker/Dockerfile-cpu | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/docker/Dockerfile-cpu b/utils/docker/Dockerfile-cpu index 5dc75d83c20f..d6fac645dba1 100644 --- a/utils/docker/Dockerfile-cpu +++ b/utils/docker/Dockerfile-cpu @@ -11,7 +11,7 @@ ADD https://ultralytics.com/assets/Arial.ttf https://ultralytics.com/assets/Aria # Install linux packages RUN apt update RUN DEBIAN_FRONTEND=noninteractive TZ=Etc/UTC apt install -y tzdata -RUN apt install --no-install-recommends -y python3-pip git zip curl htop libgl1-mesa-glx libglib2.0-0 libpython3.8-dev +RUN apt install --no-install-recommends -y python3-pip git zip curl htop libgl1-mesa-glx libglib2.0-0 libpython3-dev # RUN alias python=python3 # Install pip packages From 53711bacea004389a603697e02c5aa8f7cd4b78e Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 28 Aug 2022 22:14:21 +0200 Subject: [PATCH 1374/1976] Update Dockerfile-arm64 to libpython3-dev (#9187) Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- utils/docker/Dockerfile-arm64 | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/utils/docker/Dockerfile-arm64 b/utils/docker/Dockerfile-arm64 index fe92c8d56146..6e8ff77545c5 100644 --- a/utils/docker/Dockerfile-arm64 +++ b/utils/docker/Dockerfile-arm64 @@ -11,8 +11,7 @@ ADD https://ultralytics.com/assets/Arial.ttf https://ultralytics.com/assets/Aria # Install linux packages RUN apt update RUN DEBIAN_FRONTEND=noninteractive TZ=Etc/UTC apt install -y tzdata -RUN apt install --no-install-recommends -y python3-pip git zip curl htop gcc \ - libgl1-mesa-glx libglib2.0-0 libpython3.8-dev +RUN apt install --no-install-recommends -y python3-pip git zip curl htop gcc libgl1-mesa-glx libglib2.0-0 libpython3-dev # RUN alias python=python3 # Install pip packages From 13530402f8b960544aed45db4f71d7056a3ffdfc Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 29 Aug 2022 01:51:41 +0200 Subject: [PATCH 1375/1976] Fix AutoAnchor MPS bug (#9188) Resolves https://github.com/ultralytics/yolov5/issues/8862 Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- utils/autoanchor.py | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/utils/autoanchor.py b/utils/autoanchor.py index f2222203e24c..ac17c6cafc90 100644 --- a/utils/autoanchor.py +++ b/utils/autoanchor.py @@ -10,6 +10,7 @@ import yaml from tqdm import tqdm +from utils import TryExcept from utils.general import LOGGER, colorstr PREFIX = colorstr('AutoAnchor: ') @@ -25,6 +26,7 @@ def check_anchor_order(m): m.anchors[:] = m.anchors.flip(0) +@TryExcept(f'{PREFIX}ERROR:') def check_anchors(dataset, model, thr=4.0, imgsz=640): # Check anchor fit to data, recompute if necessary m = model.module.model[-1] if hasattr(model, 'module') else model.model[-1] # Detect() @@ -49,10 +51,7 @@ def metric(k): # compute metric else: LOGGER.info(f'{s}Anchors are a poor fit to dataset ⚠️, attempting to improve...') na = m.anchors.numel() // 2 # number of anchors - try: - anchors = kmean_anchors(dataset, n=na, img_size=imgsz, thr=thr, gen=1000, verbose=False) - except Exception as e: - LOGGER.info(f'{PREFIX}ERROR: {e}') + anchors = kmean_anchors(dataset, n=na, img_size=imgsz, thr=thr, gen=1000, verbose=False) new_bpr = metric(anchors)[0] if new_bpr > bpr: # replace anchors anchors = torch.tensor(anchors, device=m.anchors.device).type_as(m.anchors) @@ -124,7 +123,7 @@ def print_results(k, verbose=True): i = (wh0 < 3.0).any(1).sum() if i: LOGGER.info(f'{PREFIX}WARNING: Extremely small objects found: {i} of {len(wh0)} labels are < 3 pixels in size') - wh = wh0[(wh0 >= 2.0).any(1)] # filter > 2 pixels + wh = wh0[(wh0 >= 2.0).any(1)].astype(np.float32) # filter > 2 pixels # wh = wh * (npr.rand(wh.shape[0], 1) * 0.9 + 0.1) # multiply by random scale 0-1 # Kmeans init @@ -167,4 +166,4 @@ def print_results(k, verbose=True): if verbose: print_results(k, verbose) - return print_results(k) + return print_results(k).astype(np.float32) From e57275a2d8713ec6b6fe88fd341d24c6c6e2d29d Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 29 Aug 2022 02:00:42 +0200 Subject: [PATCH 1376/1976] Skip AMP check on MPS (#9189) Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- utils/general.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/utils/general.py b/utils/general.py index 91b13f84a6c4..842f28c60886 100755 --- a/utils/general.py +++ b/utils/general.py @@ -535,8 +535,8 @@ def amp_allclose(model, im): prefix = colorstr('AMP: ') device = next(model.parameters()).device # get model device - if device.type == 'cpu': - return False # AMP disabled on CPU + if device.type in ('cpu', 'mps'): + return False # AMP only used on CUDA devices f = ROOT / 'data' / 'images' / 'bus.jpg' # image to check im = f if f.exists() else 'https://ultralytics.com/images/bus.jpg' if check_online() else np.ones((640, 640, 3)) try: From cf5d9cbc33ed6849801311765d0c90cdce8ddfd9 Mon Sep 17 00:00:00 2001 From: HighMans <42877729+HighMans@users.noreply.github.com> Date: Mon, 29 Aug 2022 08:58:55 -0400 Subject: [PATCH 1377/1976] ClearML's set_report_period's time is defined in minutes not seconds. (#9186) * ClearML's set_report_period's time is defined in minutes not seconds. https://clear.ml/docs/latest/docs/references/sdk/hpo_optimization_hyperparameteroptimizer/#set_report_period set_report_period function takes in time in terms of minutes, not seconds. Signed-off-by: HighMans <42877729+HighMans@users.noreply.github.com> * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Signed-off-by: HighMans <42877729+HighMans@users.noreply.github.com> Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Glenn Jocher --- utils/loggers/clearml/hpo.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/loggers/clearml/hpo.py b/utils/loggers/clearml/hpo.py index 96c2c544c84c..ee518b0fbfc8 100644 --- a/utils/loggers/clearml/hpo.py +++ b/utils/loggers/clearml/hpo.py @@ -69,7 +69,7 @@ ) # report every 10 seconds, this is way too often, but we are testing here -optimizer.set_report_period(10) +optimizer.set_report_period(10 / 60) # You can also use the line below instead to run all the optimizer tasks locally, without using queues or agent # an_optimizer.start_locally(job_complete_callback=job_complete_callback) # set the time limit for the optimization process (2 hours) From f65081c4360887ead430e44ee3eb23566a8145ef Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 29 Aug 2022 15:00:55 +0200 Subject: [PATCH 1378/1976] Add `check_git_status(..., branch='master')` argument (#9199) Add check_git_status(branch='master') argument Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- utils/general.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/utils/general.py b/utils/general.py index 842f28c60886..ac9b0cefd7c8 100755 --- a/utils/general.py +++ b/utils/general.py @@ -300,7 +300,7 @@ def git_describe(path=ROOT): # path must be a directory @TryExcept() @WorkingDirectory(ROOT) -def check_git_status(repo='ultralytics/yolov5'): +def check_git_status(repo='ultralytics/yolov5', branch='master'): # YOLOv5 status check, recommend 'git pull' if code is out of date url = f'https://github.com/{repo}' msg = f', for updates see {url}' @@ -316,10 +316,10 @@ def check_git_status(repo='ultralytics/yolov5'): remote = 'ultralytics' check_output(f'git remote add {remote} {url}', shell=True) check_output(f'git fetch {remote}', shell=True, timeout=5) # git fetch - branch = check_output('git rev-parse --abbrev-ref HEAD', shell=True).decode().strip() # checked out - n = int(check_output(f'git rev-list {branch}..{remote}/master --count', shell=True)) # commits behind + local_branch = check_output('git rev-parse --abbrev-ref HEAD', shell=True).decode().strip() # checked out + n = int(check_output(f'git rev-list {local_branch}..{remote}/{branch} --count', shell=True)) # commits behind if n > 0: - pull = 'git pull' if remote == 'origin' else f'git pull {remote} master' + pull = 'git pull' if remote == 'origin' else f'git pull {remote} {branch}' s += f"⚠️ YOLOv5 is out of date by {n} commit{'s' * (n > 1)}. Use `{pull}` or `git clone {url}` to update." else: s += f'up to date with {url} ✅' From fef1913d288a170a19df33493e241b593de99e41 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 29 Aug 2022 15:12:15 +0200 Subject: [PATCH 1379/1976] `check_font()` on notebook init (#9200) Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- utils/__init__.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/utils/__init__.py b/utils/__init__.py index 7466a486caf4..8bdffd47b3b2 100644 --- a/utils/__init__.py +++ b/utils/__init__.py @@ -38,10 +38,12 @@ def notebook_init(verbose=True): import os import shutil - from utils.general import check_requirements, emojis, is_colab + from utils.general import check_font, check_requirements, emojis, is_colab from utils.torch_utils import select_device # imports check_requirements(('psutil', 'IPython')) + check_font() + import psutil from IPython import display # to display images and clear console output From bd5fd78411115f8f9ebed4c95a26f0d3da316ac5 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 29 Aug 2022 17:13:55 +0200 Subject: [PATCH 1380/1976] Comment `protobuf` in requirements.txt (#9207) The low package version is causing conflicts among other dependencies, commenting it causes no ill effects in CI so this should be fine. Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 10620566ca66..44fe1ce697b7 100644 --- a/requirements.txt +++ b/requirements.txt @@ -12,7 +12,7 @@ scipy>=1.4.1 torch>=1.7.0 torchvision>=0.8.1 tqdm>=4.64.0 -protobuf<=3.20.1 # https://github.com/ultralytics/yolov5/issues/8012 +# protobuf<=3.20.1 # https://github.com/ultralytics/yolov5/issues/8012 # Logging ------------------------------------- tensorboard>=2.4.1 From da22e01a6818199d9222a13e58aa145b0477c342 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 29 Aug 2022 18:10:19 +0200 Subject: [PATCH 1381/1976] `check_font()` fstring update (#9208) Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- utils/general.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/general.py b/utils/general.py index ac9b0cefd7c8..3e42e887283c 100755 --- a/utils/general.py +++ b/utils/general.py @@ -456,7 +456,7 @@ def check_font(font=FONT, progress=False): font = Path(font) file = CONFIG_DIR / font.name if not font.exists() and not file.exists(): - url = "https://ultralytics.com/assets/" + font.name + url = f'https://ultralytics.com/assets/{font.name}' LOGGER.info(f'Downloading {url} to {file}...') torch.hub.download_url_to_file(url, str(file), progress=progress) From 3c64d891043643cede117c8e54e30e35aecf2e56 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 29 Aug 2022 20:06:00 +0200 Subject: [PATCH 1382/1976] AutoBatch protect from extreme batch sizes (#9209) If < 1 or > 1024 set output to default batch size 16. May partially address https://github.com/ultralytics/yolov5/issues/9156 Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- utils/autobatch.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/utils/autobatch.py b/utils/autobatch.py index 8d12e46f0f09..01152055196d 100644 --- a/utils/autobatch.py +++ b/utils/autobatch.py @@ -60,8 +60,8 @@ def autobatch(model, imgsz=640, fraction=0.9, batch_size=16): i = results.index(None) # first fail index if b >= batch_sizes[i]: # y intercept above failure point b = batch_sizes[max(i - 1, 0)] # select prior safe point - if b < 1: # zero or negative batch size - b = 16 + if b < 1 or b > 1024: # b outside of safe range + b = batch_size LOGGER.warning(f'{prefix}WARNING: ⚠️ CUDA anomaly detected, recommend restart environment and retry command.') fraction = np.polyval(p, b) / t # actual fraction predicted From 91a81d48fa4e34dbdbaf0e45a1f841c11216aab5 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 29 Aug 2022 20:41:54 +0200 Subject: [PATCH 1383/1976] Default AutoBatch 0.8 fraction (#9212) --- hubconf.py | 2 +- utils/autobatch.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/hubconf.py b/hubconf.py index 33fc87930582..bffe2d588b4f 100644 --- a/hubconf.py +++ b/hubconf.py @@ -47,7 +47,7 @@ def _create(name, pretrained=True, channels=3, classes=80, autoshape=True, verbo model = DetectMultiBackend(path, device=device, fuse=autoshape) # detection model if autoshape: if model.pt and isinstance(model.model, ClassificationModel): - LOGGER.warning('WARNING: YOLOv5 v6.2 ClassificationModel is not yet AutoShape compatible. ' + LOGGER.warning('WARNING: ⚠️ YOLOv5 v6.2 ClassificationModel is not yet AutoShape compatible. ' 'You must pass torch tensors in BCHW to this model, i.e. shape(1,3,224,224).') else: model = AutoShape(model) # for file/URI/PIL/cv2/np inputs and NMS diff --git a/utils/autobatch.py b/utils/autobatch.py index 01152055196d..641b055b9fe3 100644 --- a/utils/autobatch.py +++ b/utils/autobatch.py @@ -18,7 +18,7 @@ def check_train_batch_size(model, imgsz=640, amp=True): return autobatch(deepcopy(model).train(), imgsz) # compute optimal batch size -def autobatch(model, imgsz=640, fraction=0.9, batch_size=16): +def autobatch(model, imgsz=640, fraction=0.8, batch_size=16): # Automatically estimate best batch size to use `fraction` of available CUDA memory # Usage: # import torch From f37ac8d611c0972851831fdf534cdb2b7f126cff Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 30 Aug 2022 11:36:38 +0200 Subject: [PATCH 1384/1976] Delete rebase.yml (#9202) * Delete rebase.yml No longer required with new built-in GitHub PR merge master feature Signed-off-by: Glenn Jocher * Update CONTRIBUTING.md Signed-off-by: Glenn Jocher * Update greetings.yml Signed-off-by: Glenn Jocher * Update CONTRIBUTING.md Signed-off-by: Glenn Jocher * cleanup Signed-off-by: Glenn Jocher --- .github/workflows/greetings.yml | 14 ++++---------- .github/workflows/rebase.yml | 21 --------------------- CONTRIBUTING.md | 23 +++++++++-------------- 3 files changed, 13 insertions(+), 45 deletions(-) delete mode 100644 .github/workflows/rebase.yml diff --git a/.github/workflows/greetings.yml b/.github/workflows/greetings.yml index d5dad7a25559..91bf190eb727 100644 --- a/.github/workflows/greetings.yml +++ b/.github/workflows/greetings.yml @@ -17,16 +17,10 @@ jobs: repo-token: ${{ secrets.GITHUB_TOKEN }} pr-message: | 👋 Hello @${{ github.actor }}, thank you for submitting a YOLOv5 🚀 PR! To allow your work to be integrated as seamlessly as possible, we advise you to: - - ✅ Verify your PR is **up-to-date with upstream/master.** If your PR is behind upstream/master an automatic [GitHub Actions](https://github.com/ultralytics/yolov5/blob/master/.github/workflows/rebase.yml) merge may be attempted by writing /rebase in a new comment, or by running the following code, replacing 'feature' with the name of your local branch: - ```bash - git remote add upstream https://github.com/ultralytics/yolov5.git - git fetch upstream - # git checkout feature # <--- replace 'feature' with local branch name - git merge upstream/master - git push -u origin -f - ``` - - ✅ Verify all Continuous Integration (CI) **checks are passing**. - - ✅ Reduce changes to the absolute **minimum** required for your bug fix or feature addition. _"It is not daily increase but daily decrease, hack away the unessential. The closer to the source, the less wastage there is."_ -Bruce Lee + + - ✅ Verify your PR is **up-to-date** with `ultralytics/yolov5` `master` branch. If your PR is behind you can update your code by clicking the 'Update branch' button or by running `git pull` and `git merge master` locally. + - ✅ Verify all YOLOv5 Continuous Integration (CI) **checks are passing**. + - ✅ Reduce changes to the absolute **minimum** required for your bug fix or feature addition. _"It is not daily increase but daily decrease, hack away the unessential. The closer to the source, the less wastage there is."_ — Bruce Lee issue-message: | 👋 Hello @${{ github.actor }}, thank you for your interest in YOLOv5 🚀! Please visit our ⭐️ [Tutorials](https://github.com/ultralytics/yolov5/wiki#tutorials) to get started, where you can find quickstart guides for simple tasks like [Custom Data Training](https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data) all the way to advanced concepts like [Hyperparameter Evolution](https://github.com/ultralytics/yolov5/issues/607). diff --git a/.github/workflows/rebase.yml b/.github/workflows/rebase.yml deleted file mode 100644 index a4dc9e5092fd..000000000000 --- a/.github/workflows/rebase.yml +++ /dev/null @@ -1,21 +0,0 @@ -# https://github.com/marketplace/actions/automatic-rebase - -name: Automatic Rebase -on: - issue_comment: - types: [created] -jobs: - rebase: - name: Rebase - if: github.event.issue.pull_request != '' && contains(github.event.comment.body, '/rebase') - runs-on: ubuntu-latest - steps: - - name: Checkout the latest code - uses: actions/checkout@v3 - with: - token: ${{ secrets.ACTIONS_TOKEN }} - fetch-depth: 0 # otherwise, you will fail to push refs to dest repo - - name: Automatic Rebase - uses: cirrus-actions/rebase@1.7 - env: - GITHUB_TOKEN: ${{ secrets.ACTIONS_TOKEN }} diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 13b9b73b50cc..7498f8995d40 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -45,20 +45,15 @@ changes** button. All done, your PR is now submitted to YOLOv5 for review and ap To allow your work to be integrated as seamlessly as possible, we advise you to: -- ✅ Verify your PR is **up-to-date with upstream/master.** If your PR is behind upstream/master an - automatic [GitHub Actions](https://github.com/ultralytics/yolov5/blob/master/.github/workflows/rebase.yml) merge may - be attempted by writing /rebase in a new comment, or by running the following code, replacing 'feature' with the name - of your local branch: - -```bash -git remote add upstream https://github.com/ultralytics/yolov5.git -git fetch upstream -# git checkout feature # <--- replace 'feature' with local branch name -git merge upstream/master -git push -u origin -f -``` - -- ✅ Verify all Continuous Integration (CI) **checks are passing**. +- ✅ Verify your PR is **up-to-date** with `ultralytics/yolov5` `master` branch. If your PR is behind you can update + your code by clicking the 'Update branch' button or by running `git pull` and `git merge master` locally. + +

Screenshot 2022-08-29 at 22 47 15

+ +- ✅ Verify all YOLOv5 Continuous Integration (CI) **checks are passing**. + +

Screenshot 2022-08-29 at 22 47 03

+ - ✅ Reduce changes to the absolute **minimum** required for your bug fix or feature addition. _"It is not daily increase but daily decrease, hack away the unessential. The closer to the source, the less wastage there is."_ — Bruce Lee From 5fb267f3e5dc86675d508e1b08d20fc0e2e84003 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 30 Aug 2022 13:40:51 +0200 Subject: [PATCH 1385/1976] Duplicate segment verification fix (#9225) Solution by @Laughing-q to resolve duplicate segment verification bug in https://github.com/ultralytics/yolov5/pull/9052#issuecomment-1231426638 Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- utils/dataloaders.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/dataloaders.py b/utils/dataloaders.py index 675c2898e7d7..f027307ccb94 100755 --- a/utils/dataloaders.py +++ b/utils/dataloaders.py @@ -963,7 +963,7 @@ def verify_image_label(args): if len(i) < nl: # duplicate row check lb = lb[i] # remove duplicates if segments: - segments = segments[i] + segments = [segments[x] for x in i] msg = f'{prefix}WARNING: {im_file}: {nl - len(i)} duplicate labels removed' else: ne = 1 # label empty From 6e7a7ae7edee8f66d7ce5617f9f75724bb7d6992 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 30 Aug 2022 15:17:58 +0200 Subject: [PATCH 1386/1976] New `LetterBox(size)` `CenterCrop(size)`, `ToTensor()` transforms (#9213) * New LetterBox transform YOLOv5 LetterBox class for image preprocessing, i.e. T.Compose([T.ToTensor(), LetterBox(size)]) Signed-off-by: Glenn Jocher * Update augmentations.py Signed-off-by: Glenn Jocher * Update augmentations.py Signed-off-by: Glenn Jocher * Update augmentations.py Signed-off-by: Glenn Jocher * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * cleanup * cleanup * cleanup * cleanup * cleanup Signed-off-by: Glenn Jocher Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- utils/augmentations.py | 50 +++++++++++++++++++++++++++++++++++++++++- utils/dataloaders.py | 22 +++++++++---------- 2 files changed, 60 insertions(+), 12 deletions(-) diff --git a/utils/augmentations.py b/utils/augmentations.py index c8499b3fc8ae..a5587351f75b 100644 --- a/utils/augmentations.py +++ b/utils/augmentations.py @@ -8,6 +8,7 @@ import cv2 import numpy as np +import torch import torchvision.transforms as T import torchvision.transforms.functional as TF @@ -345,4 +346,51 @@ def classify_albumentations(augment=True, def classify_transforms(size=224): # Transforms to apply if albumentations not installed assert isinstance(size, int), f'ERROR: classify_transforms size {size} must be integer, not (list, tuple)' - return T.Compose([T.ToTensor(), T.Resize(size), T.CenterCrop(size), T.Normalize(IMAGENET_MEAN, IMAGENET_STD)]) + # T.Compose([T.ToTensor(), T.Resize(size), T.CenterCrop(size), T.Normalize(IMAGENET_MEAN, IMAGENET_STD)]) + return T.Compose([CenterCrop(size), ToTensor(), T.Normalize(IMAGENET_MEAN, IMAGENET_STD)]) + + +class LetterBox: + # YOLOv5 LetterBox class for image preprocessing, i.e. T.Compose([LetterBox(size), ToTensor()]) + def __init__(self, size=(640, 640), auto=False, stride=32): + super().__init__() + self.h, self.w = (size, size) if isinstance(size, int) else size + self.auto = auto # pass max size integer, automatically solve for short side using stride + self.stride = stride # used with auto + + def __call__(self, im): # im = np.array HWC + imh, imw = im.shape[:2] + r = min(self.h / imh, self.w / imw) # ratio of new/old + h, w = round(imh * r), round(imw * r) # resized image + hs, ws = (math.ceil(x / self.stride) * self.stride for x in (h, w)) if self.auto else self.h, self.w + top, left = round((hs - h) / 2 - 0.1), round((ws - w) / 2 - 0.1) + im_out = np.full((self.h, self.w, 3), 114, dtype=im.dtype) + im_out[top:top + h, left:left + w] = cv2.resize(im, (w, h), interpolation=cv2.INTER_LINEAR) + return im_out + + +class CenterCrop: + # YOLOv5 CenterCrop class for image preprocessing, i.e. T.Compose([CenterCrop(size), ToTensor()]) + def __init__(self, size=640): + super().__init__() + self.h, self.w = (size, size) if isinstance(size, int) else size + + def __call__(self, im): # im = np.array HWC + imh, imw = im.shape[:2] + m = min(imh, imw) # min dimension + top, left = (imh - m) // 2, (imw - m) // 2 + return cv2.resize(im[top:top + m, left:left + m], (self.w, self.h), interpolation=cv2.INTER_LINEAR) + + +class ToTensor: + # YOLOv5 ToTensor class for image preprocessing, i.e. T.Compose([LetterBox(size), ToTensor()]) + def __init__(self, half=False): + super().__init__() + self.half = half + + def __call__(self, im): # im = np.array HWC in BGR order + im = np.ascontiguousarray(im.transpose((2, 0, 1))[::-1]) # HWC to CHW -> BGR to RGB -> contiguous + im = torch.from_numpy(im) # to torch + im = im.half() if self.half else im.float() # uint8 to fp16/32 + im /= 255.0 # 0-255 to 0.0-1.0 + return im diff --git a/utils/dataloaders.py b/utils/dataloaders.py index f027307ccb94..d4ab592bbea7 100755 --- a/utils/dataloaders.py +++ b/utils/dataloaders.py @@ -251,7 +251,7 @@ def __next__(self): s = f'image {self.count}/{self.nf} {path}: ' if self.transforms: - im = self.transforms(cv2.cvtColor(im0, cv2.COLOR_BGR2RGB)) # transforms + im = self.transforms(im0) # transforms else: im = letterbox(im0, self.img_size, stride=self.stride, auto=self.auto)[0] # padded resize im = im.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB @@ -386,7 +386,7 @@ def __next__(self): im0 = self.imgs.copy() if self.transforms: - im = np.stack([self.transforms(cv2.cvtColor(x, cv2.COLOR_BGR2RGB)) for x in im0]) # transforms + im = np.stack([self.transforms(x) for x in im0]) # transforms else: im = np.stack([letterbox(x, self.img_size, stride=self.stride, auto=self.auto)[0] for x in im0]) # resize im = im[..., ::-1].transpose((0, 3, 1, 2)) # BGR to RGB, BHWC to BCHW @@ -1113,18 +1113,18 @@ def __init__(self, root, augment, imgsz, cache=False): def __getitem__(self, i): f, j, fn, im = self.samples[i] # filename, index, filename.with_suffix('.npy'), image + if self.cache_ram and im is None: + im = self.samples[i][3] = cv2.imread(f) + elif self.cache_disk: + if not fn.exists(): # load npy + np.save(fn.as_posix(), cv2.imread(f)) + im = np.load(fn) + else: # read image + im = cv2.imread(f) # BGR if self.album_transforms: - if self.cache_ram and im is None: - im = self.samples[i][3] = cv2.imread(f) - elif self.cache_disk: - if not fn.exists(): # load npy - np.save(fn.as_posix(), cv2.imread(f)) - im = np.load(fn) - else: # read image - im = cv2.imread(f) # BGR sample = self.album_transforms(image=cv2.cvtColor(im, cv2.COLOR_BGR2RGB))["image"] else: - sample = self.torch_transforms(self.loader(f)) + sample = self.torch_transforms(im) return sample, j From 4a37381ee8f9b650dde21fe352a94ff932c5b08d Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 30 Aug 2022 16:18:01 +0200 Subject: [PATCH 1387/1976] Add ClassificationModel TF export assert (#9226) * Add ClassificationModel TF export assert Export to TF not yet supported, warning alerts users. Signed-off-by: Glenn Jocher * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Signed-off-by: Glenn Jocher Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- export.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/export.py b/export.py index 0f26e63e9adc..4d0144af9efb 100644 --- a/export.py +++ b/export.py @@ -65,7 +65,7 @@ ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative from models.experimental import attempt_load -from models.yolo import Detect +from models.yolo import ClassificationModel, Detect from utils.dataloaders import LoadImages from utils.general import (LOGGER, Profile, check_dataset, check_img_size, check_requirements, check_version, check_yaml, colorstr, file_size, get_default_args, print_args, url2file) @@ -518,6 +518,7 @@ def run( if int8 or edgetpu: # TFLite --int8 bug https://github.com/ultralytics/yolov5/issues/5707 check_requirements(('flatbuffers==1.12',)) # required before `import tensorflow` assert not tflite or not tfjs, 'TFLite and TF.js models must be exported separately, please pass only one type.' + assert not isinstance(model, ClassificationModel), 'ClassificationModel export to TF formats not yet supported.' f[5], model = export_saved_model(model.cpu(), im, file, From 5f1000a499dc8de5f9083412796324ebe091ba10 Mon Sep 17 00:00:00 2001 From: Yannick Merkli Date: Tue, 30 Aug 2022 21:57:36 +0200 Subject: [PATCH 1388/1976] Remove usage of `pathlib.Path.unlink(missing_ok=...)` (#9227) remove usage of pathlib.Path.unlink(missing_ok=...) Co-authored-by: Yannick Merkli --- utils/dataloaders.py | 4 +++- utils/downloads.py | 18 ++++++++++++------ 2 files changed, 15 insertions(+), 7 deletions(-) diff --git a/utils/dataloaders.py b/utils/dataloaders.py index d4ab592bbea7..c61068ea316f 100755 --- a/utils/dataloaders.py +++ b/utils/dataloaders.py @@ -917,7 +917,9 @@ def autosplit(path=DATASETS_DIR / 'coco128/images', weights=(0.9, 0.1, 0.0), ann indices = random.choices([0, 1, 2], weights=weights, k=n) # assign each image to a split txt = ['autosplit_train.txt', 'autosplit_val.txt', 'autosplit_test.txt'] # 3 txt files - [(path.parent / x).unlink(missing_ok=True) for x in txt] # remove existing + for x in txt: + if (path.parent / x).exists(): + (path.parent / x).unlink() # remove existing print(f'Autosplitting images from {path}' + ', using *.txt labeled images only' * annotated_only) for i, img in tqdm(zip(indices, files), total=n): diff --git a/utils/downloads.py b/utils/downloads.py index 69887a579966..b56fc28c3bde 100644 --- a/utils/downloads.py +++ b/utils/downloads.py @@ -44,12 +44,14 @@ def safe_download(file, url, url2=None, min_bytes=1E0, error_msg=''): torch.hub.download_url_to_file(url, str(file), progress=LOGGER.level <= logging.INFO) assert file.exists() and file.stat().st_size > min_bytes, assert_msg # check except Exception as e: # url2 - file.unlink(missing_ok=True) # remove partial downloads + if file.exists(): + file.unlink() # remove partial downloads LOGGER.info(f'ERROR: {e}\nRe-attempting {url2 or url} to {file}...') os.system(f"curl -# -L '{url2 or url}' -o '{file}' --retry 3 -C -") # curl download, retry and resume on fail finally: if not file.exists() or file.stat().st_size < min_bytes: # check - file.unlink(missing_ok=True) # remove partial downloads + if file.exists(): + file.unlink() # remove partial downloads LOGGER.info(f"ERROR: {assert_msg}\n{error_msg}") LOGGER.info('') @@ -112,8 +114,10 @@ def gdrive_download(id='16TiPfZj7htmTyhntwcZyEEAejOUxuT6m', file='tmp.zip'): file = Path(file) cookie = Path('cookie') # gdrive cookie print(f'Downloading https://drive.google.com/uc?export=download&id={id} as {file}... ', end='') - file.unlink(missing_ok=True) # remove existing file - cookie.unlink(missing_ok=True) # remove existing cookie + if file.exists(): + file.unlink() # remove existing file + if cookie.exists(): + cookie.unlink() # remove existing cookie # Attempt file download out = "NUL" if platform.system() == "Windows" else "/dev/null" @@ -123,11 +127,13 @@ def gdrive_download(id='16TiPfZj7htmTyhntwcZyEEAejOUxuT6m', file='tmp.zip'): else: # small file s = f'curl -s -L -o {file} "drive.google.com/uc?export=download&id={id}"' r = os.system(s) # execute, capture return - cookie.unlink(missing_ok=True) # remove existing cookie + if cookie.exists(): + cookie.unlink() # remove existing cookie # Error check if r != 0: - file.unlink(missing_ok=True) # remove partial + if file.exists(): + file.unlink() # remove partial print('Download error ') # raise Exception('Download error') return r From 79e181a83badd31c5013fffa0b80b55ff090c761 Mon Sep 17 00:00:00 2001 From: spacewalk01 Date: Thu, 1 Sep 2022 00:31:13 +0900 Subject: [PATCH 1389/1976] Add support for *`.pfm` images (#9230) add support for pfm image --- utils/dataloaders.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/dataloaders.py b/utils/dataloaders.py index c61068ea316f..84215925284e 100755 --- a/utils/dataloaders.py +++ b/utils/dataloaders.py @@ -36,7 +36,7 @@ # Parameters HELP_URL = 'See https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data' -IMG_FORMATS = 'bmp', 'dng', 'jpeg', 'jpg', 'mpo', 'png', 'tif', 'tiff', 'webp' # include image suffixes +IMG_FORMATS = 'bmp', 'dng', 'jpeg', 'jpg', 'mpo', 'png', 'tif', 'tiff', 'webp', 'pfm' # include image suffixes VID_FORMATS = 'asf', 'avi', 'gif', 'm4v', 'mkv', 'mov', 'mp4', 'mpeg', 'mpg', 'ts', 'wmv' # include video suffixes BAR_FORMAT = '{l_bar}{bar:10}{r_bar}{bar:-10b}' # tqdm bar format LOCAL_RANK = int(os.getenv('LOCAL_RANK', -1)) # https://pytorch.org/docs/stable/elastic/run.html From 55b009616b4701f73311d1272cc87057d84a93e6 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 31 Aug 2022 18:53:46 +0200 Subject: [PATCH 1390/1976] Python check warning emoji (#9238) Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- utils/general.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/general.py b/utils/general.py index 3e42e887283c..bc978ea221f3 100755 --- a/utils/general.py +++ b/utils/general.py @@ -335,7 +335,7 @@ def check_version(current='0.0.0', minimum='0.0.0', name='version ', pinned=Fals # Check version vs. required version current, minimum = (pkg.parse_version(x) for x in (current, minimum)) result = (current == minimum) if pinned else (current >= minimum) # bool - s = f'{name}{minimum} required by YOLOv5, but {name}{current} is currently installed' # string + s = f'WARNING: ⚠️ {name}{minimum} is required by YOLOv5, but {name}{current} is currently installed' # string if hard: assert result, s # assert min requirements met if verbose and not result: From 223c59dbe07357a0bf760ea49cef6e1d7b66df91 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 1 Sep 2022 12:13:53 +0200 Subject: [PATCH 1391/1976] Add `url_getsize()` function (#9247) * Add `url_getsize()` function Signed-off-by: Glenn Jocher * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update downloads.py Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- utils/downloads.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/utils/downloads.py b/utils/downloads.py index b56fc28c3bde..dd2698f995a4 100644 --- a/utils/downloads.py +++ b/utils/downloads.py @@ -33,6 +33,12 @@ def gsutil_getsize(url=''): return eval(s.split(' ')[0]) if len(s) else 0 # bytes +def url_getsize(url='https://ultralytics.com/images/bus.jpg'): + # Return downloadable file size in bytes + response = requests.head(url, allow_redirects=True) + return int(response.headers.get('content-length', -1)) + + def safe_download(file, url, url2=None, min_bytes=1E0, error_msg=''): # Attempts to download file from url or url2, checks and removes incomplete downloads < min_bytes from utils.general import LOGGER From c91d1db7161f4cffe70535378b81faf3ff4549b4 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 1 Sep 2022 14:30:21 +0200 Subject: [PATCH 1392/1976] Update dataloaders.py (#9250) Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- utils/dataloaders.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/utils/dataloaders.py b/utils/dataloaders.py index 84215925284e..a4e6c0cfef18 100755 --- a/utils/dataloaders.py +++ b/utils/dataloaders.py @@ -40,6 +40,7 @@ VID_FORMATS = 'asf', 'avi', 'gif', 'm4v', 'mkv', 'mov', 'mp4', 'mpeg', 'mpg', 'ts', 'wmv' # include video suffixes BAR_FORMAT = '{l_bar}{bar:10}{r_bar}{bar:-10b}' # tqdm bar format LOCAL_RANK = int(os.getenv('LOCAL_RANK', -1)) # https://pytorch.org/docs/stable/elastic/run.html +PIN_MEMORY = str(os.getenv('PIN_MEMORY', True)).lower() == 'true' # global pin_memory for dataloaders # Get orientation exif tag for orientation in ExifTags.TAGS.keys(): @@ -83,7 +84,7 @@ def exif_transpose(image): 5: Image.TRANSPOSE, 6: Image.ROTATE_270, 7: Image.TRANSVERSE, - 8: Image.ROTATE_90,}.get(orientation) + 8: Image.ROTATE_90}.get(orientation) if method is not None: image = image.transpose(method) del exif[0x0112] @@ -144,7 +145,7 @@ def create_dataloader(path, shuffle=shuffle and sampler is None, num_workers=nw, sampler=sampler, - pin_memory=True, + pin_memory=PIN_MEMORY, collate_fn=LoadImagesAndLabels.collate_fn4 if quad else LoadImagesAndLabels.collate_fn, worker_init_fn=seed_worker, generator=generator), dataset @@ -1152,6 +1153,6 @@ def create_classification_dataloader(path, shuffle=shuffle and sampler is None, num_workers=nw, sampler=sampler, - pin_memory=True, + pin_memory=PIN_MEMORY, worker_init_fn=seed_worker, generator=generator) # or DataLoader(persistent_workers=True) From 2d082a07bd28952159bf534c8728865ba577a449 Mon Sep 17 00:00:00 2001 From: Ayush Chaurasia Date: Thu, 1 Sep 2022 22:47:36 +0530 Subject: [PATCH 1393/1976] Refactor Loggers : Move code outside train.py (#9241) * update * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * update * update * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Glenn Jocher --- train.py | 11 +++++------ utils/loggers/__init__.py | 11 +++++++++++ 2 files changed, 16 insertions(+), 6 deletions(-) diff --git a/train.py b/train.py index 0cd4a7f065a6..29293aa612cf 100644 --- a/train.py +++ b/train.py @@ -91,17 +91,16 @@ def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictio data_dict = None if RANK in {-1, 0}: loggers = Loggers(save_dir, weights, opt, hyp, LOGGER) # loggers instance - if loggers.clearml: - data_dict = loggers.clearml.data_dict # None if no ClearML dataset or filled in by ClearML - if loggers.wandb: - data_dict = loggers.wandb.data_dict - if resume: - weights, epochs, hyp, batch_size = opt.weights, opt.epochs, opt.hyp, opt.batch_size # Register actions for k in methods(loggers): callbacks.register_action(k, callback=getattr(loggers, k)) + # Process custom dataset artifact link + data_dict = loggers.remote_dataset + if resume: # If resuming runs from remote artifact + weights, epochs, hyp, batch_size = opt.weights, opt.epochs, opt.hyp, opt.batch_size + # Config plots = not evolve and not opt.noplots # create plots cuda = device.type != 'cpu' diff --git a/utils/loggers/__init__.py b/utils/loggers/__init__.py index 880039b1914c..1aa8427f9127 100644 --- a/utils/loggers/__init__.py +++ b/utils/loggers/__init__.py @@ -107,6 +107,17 @@ def __init__(self, save_dir=None, weights=None, opt=None, hyp=None, logger=None, else: self.clearml = None + @property + def remote_dataset(self): + # Get data_dict if custom dataset artifact link is provided + data_dict = None + if self.clearml: + data_dict = self.clearml.data_dict + if self.wandb: + data_dict = self.wandb.data_dict + + return data_dict + def on_train_start(self): # Callback runs on train start pass From ea98199041088a378b4f13316ba96afc637dfb83 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 1 Sep 2022 19:36:27 +0200 Subject: [PATCH 1394/1976] Update general.py (#9252) Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- utils/general.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/general.py b/utils/general.py index bc978ea221f3..ba6d9e165901 100755 --- a/utils/general.py +++ b/utils/general.py @@ -337,7 +337,7 @@ def check_version(current='0.0.0', minimum='0.0.0', name='version ', pinned=Fals result = (current == minimum) if pinned else (current >= minimum) # bool s = f'WARNING: ⚠️ {name}{minimum} is required by YOLOv5, but {name}{current} is currently installed' # string if hard: - assert result, s # assert min requirements met + assert result, emojis(s) # assert min requirements met if verbose and not result: LOGGER.warning(s) return result From 9da6d0f9f5bc37fa386b7b82d2a963f94650949a Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 1 Sep 2022 22:30:26 +0200 Subject: [PATCH 1395/1976] Add LoadImages._cv2_rotate() (#9249) Optional manual rotation code per iPhone rotation issue in https://github.com/ultralytics/yolov5/issues/8493 Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- utils/dataloaders.py | 20 +++++++++++++++++--- 1 file changed, 17 insertions(+), 3 deletions(-) diff --git a/utils/dataloaders.py b/utils/dataloaders.py index a4e6c0cfef18..5f86f83786db 100755 --- a/utils/dataloaders.py +++ b/utils/dataloaders.py @@ -213,7 +213,7 @@ def __init__(self, path, img_size=640, stride=32, auto=True, transforms=None): self.auto = auto self.transforms = transforms # optional if any(videos): - self.new_video(videos[0]) # new video + self._new_video(videos[0]) # new video else: self.cap = None assert self.nf > 0, f'No images or videos found in {p}. ' \ @@ -238,10 +238,11 @@ def __next__(self): if self.count == self.nf: # last video raise StopIteration path = self.files[self.count] - self.new_video(path) + self._new_video(path) ret_val, im0 = self.cap.read() self.frame += 1 + # im0 = self._cv2_rotate(im0) # for use if cv2 auto rotation is False s = f'video {self.count + 1}/{self.nf} ({self.frame}/{self.frames}) {path}: ' else: @@ -260,10 +261,23 @@ def __next__(self): return path, im, im0, self.cap, s - def new_video(self, path): + def _new_video(self, path): + # Create a new video capture object self.frame = 0 self.cap = cv2.VideoCapture(path) self.frames = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT)) + self.orientation = int(self.cap.get(cv2.CAP_PROP_ORIENTATION_META)) # rotation degrees + # self.cap.set(cv2.CAP_PROP_ORIENTATION_AUTO, 0) # disable https://github.com/ultralytics/yolov5/issues/8493 + + def _cv2_rotate(self, im): + # Rotate a cv2 video manually + if self.orientation == 0: + return cv2.rotate(im, cv2.ROTATE_90_CLOCKWISE) + elif self.orientation == 180: + return cv2.rotate(im, cv2.ROTATE_90_COUNTERCLOCKWISE) + elif self.orientation == 90: + return cv2.rotate(im, cv2.ROTATE_180) + return im def __len__(self): return self.nf # number of files From ffdb58b0e07d964eb2d148a6814d22a4a26d47cc Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 2 Sep 2022 14:12:10 +0200 Subject: [PATCH 1396/1976] Move `cudnn.benchmarks(True)` to LoadStreams (#9258) * Move cudnn.benchmarks(True) to LoadStreams * Update dataloaders.py Signed-off-by: Glenn Jocher * Move cudnn.benchmarks(True) to LoadStreams Signed-off-by: Glenn Jocher --- classify/predict.py | 2 -- detect.py | 2 -- utils/dataloaders.py | 54 ++++---------------------------------------- 3 files changed, 4 insertions(+), 54 deletions(-) diff --git a/classify/predict.py b/classify/predict.py index 937704d0f080..76115c75029f 100644 --- a/classify/predict.py +++ b/classify/predict.py @@ -31,7 +31,6 @@ from pathlib import Path import torch -import torch.backends.cudnn as cudnn import torch.nn.functional as F FILE = Path(__file__).resolve() @@ -89,7 +88,6 @@ def run( # Dataloader if webcam: view_img = check_imshow() - cudnn.benchmark = True # set True to speed up constant image size inference dataset = LoadStreams(source, img_size=imgsz, transforms=classify_transforms(imgsz[0])) bs = len(dataset) # batch_size else: diff --git a/detect.py b/detect.py index 60a821b59a03..cf75d0f11c92 100644 --- a/detect.py +++ b/detect.py @@ -31,7 +31,6 @@ from pathlib import Path import torch -import torch.backends.cudnn as cudnn FILE = Path(__file__).resolve() ROOT = FILE.parents[0] # YOLOv5 root directory @@ -97,7 +96,6 @@ def run( # Dataloader if webcam: view_img = check_imshow() - cudnn.benchmark = True # set True to speed up constant image size inference dataset = LoadStreams(source, img_size=imgsz, stride=stride, auto=pt) bs = len(dataset) # batch_size else: diff --git a/utils/dataloaders.py b/utils/dataloaders.py index 5f86f83786db..38ae3399ce26 100755 --- a/utils/dataloaders.py +++ b/utils/dataloaders.py @@ -283,62 +283,17 @@ def __len__(self): return self.nf # number of files -class LoadWebcam: # for inference - # YOLOv5 local webcam dataloader, i.e. `python detect.py --source 0` - def __init__(self, pipe='0', img_size=640, stride=32): - self.img_size = img_size - self.stride = stride - self.pipe = eval(pipe) if pipe.isnumeric() else pipe - self.cap = cv2.VideoCapture(self.pipe) # video capture object - self.cap.set(cv2.CAP_PROP_BUFFERSIZE, 3) # set buffer size - - def __iter__(self): - self.count = -1 - return self - - def __next__(self): - self.count += 1 - if cv2.waitKey(1) == ord('q'): # q to quit - self.cap.release() - cv2.destroyAllWindows() - raise StopIteration - - # Read frame - ret_val, im0 = self.cap.read() - im0 = cv2.flip(im0, 1) # flip left-right - - # Print - assert ret_val, f'Camera Error {self.pipe}' - img_path = 'webcam.jpg' - s = f'webcam {self.count}: ' - - # Process - im = letterbox(im0, self.img_size, stride=self.stride)[0] # resize - im = im.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB - im = np.ascontiguousarray(im) # contiguous - - return img_path, im, im0, None, s - - def __len__(self): - return 0 - - class LoadStreams: # YOLOv5 streamloader, i.e. `python detect.py --source 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP streams` def __init__(self, sources='streams.txt', img_size=640, stride=32, auto=True, transforms=None): + torch.backends.cudnn.benchmark = True # faster for fixed-size inference self.mode = 'stream' self.img_size = img_size self.stride = stride - - if os.path.isfile(sources): - with open(sources) as f: - sources = [x.strip() for x in f.read().strip().splitlines() if len(x.strip())] - else: - sources = [sources] - + sources = Path(sources).read_text().rsplit() if Path(sources).is_file() else [sources] n = len(sources) - self.imgs, self.fps, self.frames, self.threads = [None] * n, [0] * n, [0] * n, [None] * n self.sources = [clean_str(x) for x in sources] # clean source names for later + self.imgs, self.fps, self.frames, self.threads = [None] * n, [0] * n, [0] * n, [None] * n for i, s in enumerate(sources): # index, source # Start thread to read frames from video stream st = f'{i + 1}/{n}: {s}... ' @@ -377,8 +332,7 @@ def update(self, i, cap, stream): n, f, read = 0, self.frames[i], 1 # frame number, frame array, inference every 'read' frame while cap.isOpened() and n < f: n += 1 - # _, self.imgs[index] = cap.read() - cap.grab() + cap.grab() # .read() = .grab() followed by .retrieve() if n % read == 0: success, im = cap.retrieve() if success: From 5d4787baabea694369ad95c7d762139eb9f04e56 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 2 Sep 2022 16:05:23 +0200 Subject: [PATCH 1397/1976] `cudnn.benchmark = True` on Seed 0 (#9259) * `cudnn.benchmark = True` on Seed 0 Signed-off-by: Glenn Jocher * Update general.py Signed-off-by: Glenn Jocher * Update general.py Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- utils/general.py | 15 ++++++--------- 1 file changed, 6 insertions(+), 9 deletions(-) diff --git a/utils/general.py b/utils/general.py index ba6d9e165901..25a1a1456009 100755 --- a/utils/general.py +++ b/utils/general.py @@ -217,20 +217,17 @@ def print_args(args: Optional[dict] = None, show_file=True, show_func=False): def init_seeds(seed=0, deterministic=False): # Initialize random number generator (RNG) seeds https://pytorch.org/docs/stable/notes/randomness.html - # cudnn seed 0 settings are slower and more reproducible, else faster and less reproducible - import torch.backends.cudnn as cudnn - - if deterministic and check_version(torch.__version__, '1.12.0'): # https://github.com/ultralytics/yolov5/pull/8213 - torch.use_deterministic_algorithms(True) - os.environ['CUBLAS_WORKSPACE_CONFIG'] = ':4096:8' - os.environ['PYTHONHASHSEED'] = str(seed) - random.seed(seed) np.random.seed(seed) torch.manual_seed(seed) - cudnn.benchmark, cudnn.deterministic = (False, True) if seed == 0 else (True, False) torch.cuda.manual_seed(seed) torch.cuda.manual_seed_all(seed) # for Multi-GPU, exception safe + torch.backends.cudnn.benchmark = True # for faster training + if deterministic and check_version(torch.__version__, '1.12.0'): # https://github.com/ultralytics/yolov5/pull/8213 + torch.use_deterministic_algorithms(True) + torch.backends.cudnn.deterministic = True + os.environ['CUBLAS_WORKSPACE_CONFIG'] = ':4096:8' + os.environ['PYTHONHASHSEED'] = str(seed) def intersect_dicts(da, db, exclude=()): From 15e82d296720d4be344bf42a34d60ffd57b3eb28 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 2 Sep 2022 16:24:30 +0200 Subject: [PATCH 1398/1976] Update `TryExcept(msg='...')`` (#9261) --- utils/__init__.py | 4 ++-- utils/autoanchor.py | 2 +- utils/metrics.py | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/utils/__init__.py b/utils/__init__.py index 8bdffd47b3b2..46225c2208ce 100644 --- a/utils/__init__.py +++ b/utils/__init__.py @@ -9,7 +9,7 @@ class TryExcept(contextlib.ContextDecorator): # YOLOv5 TryExcept class. Usage: @TryExcept() decorator or 'with TryExcept():' context manager - def __init__(self, msg='default message here'): + def __init__(self, msg=''): self.msg = msg def __enter__(self): @@ -17,7 +17,7 @@ def __enter__(self): def __exit__(self, exc_type, value, traceback): if value: - print(f'{self.msg}: {value}') + print(f'{self.msg}{value}') return True diff --git a/utils/autoanchor.py b/utils/autoanchor.py index ac17c6cafc90..0b49ab3319c0 100644 --- a/utils/autoanchor.py +++ b/utils/autoanchor.py @@ -26,7 +26,7 @@ def check_anchor_order(m): m.anchors[:] = m.anchors.flip(0) -@TryExcept(f'{PREFIX}ERROR:') +@TryExcept(f'{PREFIX}ERROR: ') def check_anchors(dataset, model, thr=4.0, imgsz=640): # Check anchor fit to data, recompute if necessary m = model.module.model[-1] if hasattr(model, 'module') else model.model[-1] # Detect() diff --git a/utils/metrics.py b/utils/metrics.py index de1bf05b326b..ee7d33982cfc 100644 --- a/utils/metrics.py +++ b/utils/metrics.py @@ -186,7 +186,7 @@ def tp_fp(self): # fn = self.matrix.sum(0) - tp # false negatives (missed detections) return tp[:-1], fp[:-1] # remove background class - @TryExcept('WARNING: ConfusionMatrix plot failure') + @TryExcept('WARNING: ConfusionMatrix plot failure: ') def plot(self, normalize=True, save_dir='', names=()): import seaborn as sn From 5cb9fe612a215e0b7f6d99bf39e91cc52ab13c53 Mon Sep 17 00:00:00 2001 From: Victor Sonck Date: Sat, 3 Sep 2022 20:49:25 +0200 Subject: [PATCH 1399/1976] Make sure best.pt model file is preserved ClearML (#9265) * Make sure best.pt model file is preserved ClearML * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- utils/loggers/__init__.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/utils/loggers/__init__.py b/utils/loggers/__init__.py index 1aa8427f9127..3aee35844f52 100644 --- a/utils/loggers/__init__.py +++ b/utils/loggers/__init__.py @@ -233,7 +233,9 @@ def on_train_end(self, last, best, epoch, results): self.wandb.finish_run() if self.clearml and not self.opt.evolve: - self.clearml.task.update_output_model(model_path=str(best if best.exists() else last), name='Best Model') + self.clearml.task.update_output_model(model_path=str(best if best.exists() else last), + name='Best Model', + auto_delete_file=False) def on_params_update(self, params: dict): # Update hyperparams or configs of the experiment From 63ecce60eab055bd5fec3223ee2b8d8a3d099349 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 4 Sep 2022 01:33:38 +0200 Subject: [PATCH 1400/1976] DetectMultiBackend improvements (#9269) * Update common.py Signed-off-by: Glenn Jocher * Update common.py Signed-off-by: Glenn Jocher * Update common.py Signed-off-by: Glenn Jocher * Update common.py Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- models/common.py | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/models/common.py b/models/common.py index d308244c4a44..2e5d5a198e33 100644 --- a/models/common.py +++ b/models/common.py @@ -354,6 +354,7 @@ def __init__(self, weights='yolov5s.pt', device=torch.device('cpu'), dnn=False, import onnxruntime providers = ['CUDAExecutionProvider', 'CPUExecutionProvider'] if cuda else ['CPUExecutionProvider'] session = onnxruntime.InferenceSession(w, providers=providers) + output_names = [x.name for x in session.get_outputs()] meta = session.get_modelmeta().custom_metadata_map # metadata if 'stride' in meta: stride, names = int(meta['stride']), eval(meta['names']) @@ -372,9 +373,7 @@ def __init__(self, weights='yolov5s.pt', device=torch.device('cpu'), dnn=False, batch_size = batch_dim.get_length() executable_network = ie.compile_model(network, device_name="CPU") # device_name="MYRIAD" for Intel NCS2 output_layer = next(iter(executable_network.outputs)) - meta = Path(w).with_suffix('.yaml') - if meta.exists(): - stride, names = self._load_metadata(meta) # load metadata + stride, names = self._load_metadata(Path(w).with_suffix('.yaml')) # load metadata elif engine: # TensorRT LOGGER.info(f'Loading {w} for TensorRT inference...') import tensorrt as trt # https://developer.nvidia.com/nvidia-tensorrt-download @@ -476,7 +475,7 @@ def forward(self, im, augment=False, visualize=False, val=False): y = self.net.forward() elif self.onnx: # ONNX Runtime im = im.cpu().numpy() # torch to numpy - y = self.session.run([self.session.get_outputs()[0].name], {self.session.get_inputs()[0].name: im})[0] + y = self.session.run(self.output_names, {self.session.get_inputs()[0].name: im})[0] elif self.xml: # OpenVINO im = im.cpu().numpy() # FP32 y = self.executable_network([im])[self.output_layer] @@ -524,7 +523,7 @@ def forward(self, im, augment=False, visualize=False, val=False): y[..., :4] *= [w, h, w, h] # xywh normalized to pixels if isinstance(y, np.ndarray): - y = torch.tensor(y, device=self.device) + y = torch.from_numpy(y).to(self.device) return (y, []) if val else y def warmup(self, imgsz=(1, 3, 640, 640)): @@ -548,10 +547,12 @@ def _model_type(p='path/to/model.pt'): return pt, jit, onnx, xml, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs @staticmethod - def _load_metadata(f='path/to/meta.yaml'): + def _load_metadata(f=Path('path/to/meta.yaml')): # Load metadata from meta.yaml if it exists - d = yaml_load(f) - return d['stride'], d['names'] # assign stride, names + if f.exists(): + d = yaml_load(f) + return d['stride'], d['names'] # assign stride, names + return None, None class AutoShape(nn.Module): From 96c3c7f71d6af51819c270e2752603665680ced7 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 4 Sep 2022 14:01:43 +0200 Subject: [PATCH 1401/1976] Update DetectMultiBackend for tuple outputs (#9274) Update --- models/common.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/models/common.py b/models/common.py index 2e5d5a198e33..5c82b18f102c 100644 --- a/models/common.py +++ b/models/common.py @@ -465,17 +465,15 @@ def forward(self, im, augment=False, visualize=False, val=False): if self.pt: # PyTorch y = self.model(im, augment=augment, visualize=visualize) if augment or visualize else self.model(im) - if isinstance(y, tuple): - y = y[0] elif self.jit: # TorchScript - y = self.model(im)[0] + y = self.model(im) elif self.dnn: # ONNX OpenCV DNN im = im.cpu().numpy() # torch to numpy self.net.setInput(im) y = self.net.forward() elif self.onnx: # ONNX Runtime im = im.cpu().numpy() # torch to numpy - y = self.session.run(self.output_names, {self.session.get_inputs()[0].name: im})[0] + y = self.session.run(self.output_names, {self.session.get_inputs()[0].name: im}) elif self.xml: # OpenVINO im = im.cpu().numpy() # FP32 y = self.executable_network([im])[self.output_layer] @@ -522,6 +520,8 @@ def forward(self, im, augment=False, visualize=False, val=False): y = (y.astype(np.float32) - zero_point) * scale # re-scale y[..., :4] *= [w, h, w, h] # xywh normalized to pixels + if isinstance(y, (list, tuple)): + y = y[0] if isinstance(y, np.ndarray): y = torch.from_numpy(y).to(self.device) return (y, []) if val else y From 7aa263c5f2f526472435babf86ddd33eed1dbd78 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 4 Sep 2022 15:39:57 +0200 Subject: [PATCH 1402/1976] Update DetectMultiBackend for tuple outputs 2 (#9275) * Update DetectMultiBackend for tuple outputs 2 Signed-off-by: Glenn Jocher * Update * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update * Update * Update Signed-off-by: Glenn Jocher Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- models/common.py | 12 +++++++----- utils/general.py | 3 +++ val.py | 4 ++-- 3 files changed, 12 insertions(+), 7 deletions(-) diff --git a/models/common.py b/models/common.py index 5c82b18f102c..7ac3a4a29672 100644 --- a/models/common.py +++ b/models/common.py @@ -457,7 +457,7 @@ def wrap_frozen_graph(gd, inputs, outputs): self.__dict__.update(locals()) # assign all variables to self - def forward(self, im, augment=False, visualize=False, val=False): + def forward(self, im, augment=False, visualize=False): # YOLOv5 MultiBackend inference b, ch, h, w = im.shape # batch, channel, height, width if self.fp16 and im.dtype != torch.float16: @@ -521,10 +521,12 @@ def forward(self, im, augment=False, visualize=False, val=False): y[..., :4] *= [w, h, w, h] # xywh normalized to pixels if isinstance(y, (list, tuple)): - y = y[0] - if isinstance(y, np.ndarray): - y = torch.from_numpy(y).to(self.device) - return (y, []) if val else y + return self.from_numpy(y[0]) if len(y) == 1 else [self.from_numpy(x) for x in y] + else: + return self.from_numpy(y) + + def from_numpy(self, x): + return torch.from_numpy(x).to(self.device) if isinstance(x, np.ndarray) else x def warmup(self, imgsz=(1, 3, 640, 640)): # Warmup model by running inference once diff --git a/utils/general.py b/utils/general.py index 25a1a1456009..cae63fd9dd21 100755 --- a/utils/general.py +++ b/utils/general.py @@ -813,6 +813,9 @@ def non_max_suppression(prediction, list of detections, on (n,6) tensor per image [xyxy, conf, cls] """ + if isinstance(prediction, (list, tuple)): # YOLOv5 model in validation model, output = (inference_out, loss_out) + prediction = prediction[0] # select only inference output + bs = prediction.shape[0] # batch size nc = prediction.shape[2] - 5 # number of classes xc = prediction[..., 4] > conf_thres # candidates diff --git a/val.py b/val.py index 58b9c9e1bec0..5427ee7b3619 100644 --- a/val.py +++ b/val.py @@ -204,11 +204,11 @@ def run( # Inference with dt[1]: - out, train_out = model(im) if training else model(im, augment=augment, val=True) # inference, loss outputs + out, train_out = model(im) if compute_loss else (model(im, augment=augment), None) # Loss if compute_loss: - loss += compute_loss([x.float() for x in train_out], targets)[1] # box, obj, cls + loss += compute_loss(train_out, targets)[1] # box, obj, cls # NMS targets[:, 2:] *= torch.tensor((width, height, width, height), device=device) # to pixels From e45d335bbc4a891a2a9f49311f4448e252d3d88f Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 4 Sep 2022 16:35:16 +0200 Subject: [PATCH 1403/1976] Update benchmarks CI with `--hard-fail` min metric floor (#9276) * Update benchmarks CI with min metric floor Signed-off-by: Glenn Jocher * Update ci-testing.yml Signed-off-by: Glenn Jocher * Update benchmarks.py Signed-off-by: Glenn Jocher * Update benchmarks.py Signed-off-by: Glenn Jocher * Update benchmarks.py Signed-off-by: Glenn Jocher * Update benchmarks.py Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- .github/workflows/ci-testing.yml | 2 +- utils/benchmarks.py | 8 ++++++-- 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/.github/workflows/ci-testing.yml b/.github/workflows/ci-testing.yml index 4ef930c61233..6fb277676959 100644 --- a/.github/workflows/ci-testing.yml +++ b/.github/workflows/ci-testing.yml @@ -39,7 +39,7 @@ jobs: pip list - name: Run benchmarks run: | - python utils/benchmarks.py --weights ${{ matrix.model }}.pt --img 320 --hard-fail + python utils/benchmarks.py --weights ${{ matrix.model }}.pt --img 320 --hard-fail 0.29 Tests: timeout-minutes: 60 diff --git a/utils/benchmarks.py b/utils/benchmarks.py index d412653c866f..d5f4c1d61fbe 100644 --- a/utils/benchmarks.py +++ b/utils/benchmarks.py @@ -92,10 +92,14 @@ def run( LOGGER.info('\n') parse_opt() notebook_init() # print system info - c = ['Format', 'Size (MB)', 'mAP@0.5:0.95', 'Inference time (ms)'] if map else ['Format', 'Export', '', ''] + c = ['Format', 'Size (MB)', 'mAP50-95', 'Inference time (ms)'] if map else ['Format', 'Export', '', ''] py = pd.DataFrame(y, columns=c) LOGGER.info(f'\nBenchmarks complete ({time.time() - t:.2f}s)') LOGGER.info(str(py if map else py.iloc[:, :2])) + if hard_fail and isinstance(hard_fail, str): + metrics = py['mAP50-95'].array # values to compare to floor + floor = eval(hard_fail) # minimum metric floor to pass, i.e. = 0.29 mAP for YOLOv5n + assert all(x > floor for x in metrics if pd.notna(x)), f'HARD FAIL: mAP50-95 < floor {floor}' return py @@ -141,7 +145,7 @@ def parse_opt(): parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference') parser.add_argument('--test', action='store_true', help='test exports only') parser.add_argument('--pt-only', action='store_true', help='test PyTorch only') - parser.add_argument('--hard-fail', action='store_true', help='throw error on benchmark failure') + parser.add_argument('--hard-fail', nargs='?', const=True, default=False, help='Exception on error or < min metric') opt = parser.parse_args() opt.data = check_yaml(opt.data) # check YAML print_args(vars(opt)) From 1aea74cddbc78e7f79dac07090cb157dfc24dbcc Mon Sep 17 00:00:00 2001 From: VELC Date: Sun, 4 Sep 2022 17:15:53 +0200 Subject: [PATCH 1404/1976] Add new `--vid-stride` inference parameter for videos (#9256) * fps feature/skip frame added * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * predict.py updates * Update dataloaders.py Signed-off-by: Glenn Jocher * Update dataloaders.py Signed-off-by: Glenn Jocher * remove unused attribute Signed-off-by: Glenn Jocher * Cleanup Signed-off-by: Glenn Jocher * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update predict.py Signed-off-by: Glenn Jocher * Update detect.py Signed-off-by: Glenn Jocher * Update dataloaders.py Signed-off-by: Glenn Jocher * Rename skip_frame to vid_stride * cleanup * cleanup2 Signed-off-by: Glenn Jocher Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Glenn Jocher --- classify/predict.py | 6 ++++-- detect.py | 6 ++++-- utils/dataloaders.py | 15 +++++++++------ 3 files changed, 17 insertions(+), 10 deletions(-) diff --git a/classify/predict.py b/classify/predict.py index 76115c75029f..701b5b1ac92d 100644 --- a/classify/predict.py +++ b/classify/predict.py @@ -66,6 +66,7 @@ def run( exist_ok=False, # existing project/name ok, do not increment half=False, # use FP16 half-precision inference dnn=False, # use OpenCV DNN for ONNX inference + vid_stride=1, # video frame-rate stride ): source = str(source) save_img = not nosave and not source.endswith('.txt') # save inference images @@ -88,10 +89,10 @@ def run( # Dataloader if webcam: view_img = check_imshow() - dataset = LoadStreams(source, img_size=imgsz, transforms=classify_transforms(imgsz[0])) + dataset = LoadStreams(source, img_size=imgsz, transforms=classify_transforms(imgsz[0]), vid_stride=vid_stride) bs = len(dataset) # batch_size else: - dataset = LoadImages(source, img_size=imgsz, transforms=classify_transforms(imgsz[0])) + dataset = LoadImages(source, img_size=imgsz, transforms=classify_transforms(imgsz[0]), vid_stride=vid_stride) bs = 1 # batch_size vid_path, vid_writer = [None] * bs, [None] * bs @@ -196,6 +197,7 @@ def parse_opt(): parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment') parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference') parser.add_argument('--dnn', action='store_true', help='use OpenCV DNN for ONNX inference') + parser.add_argument('--vid-stride', type=int, default=1, help='video frame-rate stride') opt = parser.parse_args() opt.imgsz *= 2 if len(opt.imgsz) == 1 else 1 # expand print_args(vars(opt)) diff --git a/detect.py b/detect.py index cf75d0f11c92..69a1bf13aac6 100644 --- a/detect.py +++ b/detect.py @@ -74,6 +74,7 @@ def run( hide_conf=False, # hide confidences half=False, # use FP16 half-precision inference dnn=False, # use OpenCV DNN for ONNX inference + vid_stride=1, # video frame-rate stride ): source = str(source) save_img = not nosave and not source.endswith('.txt') # save inference images @@ -96,10 +97,10 @@ def run( # Dataloader if webcam: view_img = check_imshow() - dataset = LoadStreams(source, img_size=imgsz, stride=stride, auto=pt) + dataset = LoadStreams(source, img_size=imgsz, stride=stride, auto=pt, vid_stride=vid_stride) bs = len(dataset) # batch_size else: - dataset = LoadImages(source, img_size=imgsz, stride=stride, auto=pt) + dataset = LoadImages(source, img_size=imgsz, stride=stride, auto=pt, vid_stride=vid_stride) bs = 1 # batch_size vid_path, vid_writer = [None] * bs, [None] * bs @@ -236,6 +237,7 @@ def parse_opt(): parser.add_argument('--hide-conf', default=False, action='store_true', help='hide confidences') parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference') parser.add_argument('--dnn', action='store_true', help='use OpenCV DNN for ONNX inference') + parser.add_argument('--vid-stride', type=int, default=1, help='video frame-rate stride') opt = parser.parse_args() opt.imgsz *= 2 if len(opt.imgsz) == 1 else 1 # expand print_args(vars(opt)) diff --git a/utils/dataloaders.py b/utils/dataloaders.py index 38ae3399ce26..c1ad1f1a4b83 100755 --- a/utils/dataloaders.py +++ b/utils/dataloaders.py @@ -187,7 +187,7 @@ def __iter__(self): class LoadImages: # YOLOv5 image/video dataloader, i.e. `python detect.py --source image.jpg/vid.mp4` - def __init__(self, path, img_size=640, stride=32, auto=True, transforms=None): + def __init__(self, path, img_size=640, stride=32, auto=True, transforms=None, vid_stride=1): files = [] for p in sorted(path) if isinstance(path, (list, tuple)) else [path]: p = str(Path(p).resolve()) @@ -212,6 +212,7 @@ def __init__(self, path, img_size=640, stride=32, auto=True, transforms=None): self.mode = 'image' self.auto = auto self.transforms = transforms # optional + self.vid_stride = vid_stride # video frame-rate stride if any(videos): self._new_video(videos[0]) # new video else: @@ -232,6 +233,7 @@ def __next__(self): # Read video self.mode = 'video' ret_val, im0 = self.cap.read() + self.cap.set(cv2.CAP_PROP_POS_FRAMES, self.vid_stride * (self.frame + 1)) # read at vid_stride while not ret_val: self.count += 1 self.cap.release() @@ -242,7 +244,7 @@ def __next__(self): ret_val, im0 = self.cap.read() self.frame += 1 - # im0 = self._cv2_rotate(im0) # for use if cv2 auto rotation is False + # im0 = self._cv2_rotate(im0) # for use if cv2 autorotation is False s = f'video {self.count + 1}/{self.nf} ({self.frame}/{self.frames}) {path}: ' else: @@ -265,7 +267,7 @@ def _new_video(self, path): # Create a new video capture object self.frame = 0 self.cap = cv2.VideoCapture(path) - self.frames = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT)) + self.frames = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT) / self.vid_stride) self.orientation = int(self.cap.get(cv2.CAP_PROP_ORIENTATION_META)) # rotation degrees # self.cap.set(cv2.CAP_PROP_ORIENTATION_AUTO, 0) # disable https://github.com/ultralytics/yolov5/issues/8493 @@ -285,11 +287,12 @@ def __len__(self): class LoadStreams: # YOLOv5 streamloader, i.e. `python detect.py --source 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP streams` - def __init__(self, sources='streams.txt', img_size=640, stride=32, auto=True, transforms=None): + def __init__(self, sources='streams.txt', img_size=640, stride=32, auto=True, transforms=None, vid_stride=1): torch.backends.cudnn.benchmark = True # faster for fixed-size inference self.mode = 'stream' self.img_size = img_size self.stride = stride + self.vid_stride = vid_stride # video frame-rate stride sources = Path(sources).read_text().rsplit() if Path(sources).is_file() else [sources] n = len(sources) self.sources = [clean_str(x) for x in sources] # clean source names for later @@ -329,11 +332,11 @@ def __init__(self, sources='streams.txt', img_size=640, stride=32, auto=True, tr def update(self, i, cap, stream): # Read stream `i` frames in daemon thread - n, f, read = 0, self.frames[i], 1 # frame number, frame array, inference every 'read' frame + n, f = 0, self.frames[i] # frame number, frame array while cap.isOpened() and n < f: n += 1 cap.grab() # .read() = .grab() followed by .retrieve() - if n % read == 0: + if n % self.vid_stride == 0: success, im = cap.retrieve() if success: self.imgs[i] = im From 32794c130bc0de9cbd1fe34819b7032138bbd81d Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 6 Sep 2022 19:00:26 +0300 Subject: [PATCH 1405/1976] [pre-commit.ci] pre-commit suggestions (#9295) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/executablebooks/mdformat: 0.7.14 → 0.7.16](https://github.com/executablebooks/mdformat/compare/0.7.14...0.7.16) - [github.com/asottile/yesqa: v1.3.0 → v1.4.0](https://github.com/asottile/yesqa/compare/v1.3.0...v1.4.0) - [github.com/PyCQA/flake8: 5.0.2 → 5.0.4](https://github.com/PyCQA/flake8/compare/5.0.2...5.0.4) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 43aca019feb1..ba8005535397 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -43,7 +43,7 @@ repos: name: YAPF formatting - repo: https://github.com/executablebooks/mdformat - rev: 0.7.14 + rev: 0.7.16 hooks: - id: mdformat name: MD formatting @@ -53,12 +53,12 @@ repos: exclude: "README.md|README_cn.md" - repo: https://github.com/asottile/yesqa - rev: v1.3.0 + rev: v1.4.0 hooks: - id: yesqa - repo: https://github.com/PyCQA/flake8 - rev: 5.0.2 + rev: 5.0.4 hooks: - id: flake8 name: PEP8 From 5a134e06530a8c24fdb9774c2c4ab0b513b08260 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 7 Sep 2022 10:11:30 +0300 Subject: [PATCH 1406/1976] Replace deprecated `np.int` with `int` (#9307) Per ``` /content/yolov5/utils/dataloaders.py:458: DeprecationWarning: `np.int` is a deprecated alias for the builtin `int`. To silence this warning, use `int` by itself. Doing this will not modify any behavior and is safe. When replacing `np.int`, you may wish to use e.g. `np.int64` or `np.int32` to specify the precision. If you wish to review your current use, check the release note link for additional information. Deprecated in NumPy 1.20; for more details and guidance: https://numpy.org/devdocs/release/1.20.0-notes.html#deprecations ``` Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- utils/dataloaders.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/utils/dataloaders.py b/utils/dataloaders.py index c1ad1f1a4b83..d8ef11fd94b4 100755 --- a/utils/dataloaders.py +++ b/utils/dataloaders.py @@ -455,7 +455,7 @@ def __init__(self, self.im_files = list(cache.keys()) # update self.label_files = img2label_paths(cache.keys()) # update n = len(shapes) # number of images - bi = np.floor(np.arange(n) / batch_size).astype(np.int) # batch index + bi = np.floor(np.arange(n) / batch_size).astype(int) # batch index nb = bi[-1] + 1 # number of batches self.batch = bi # batch index of image self.n = n @@ -497,7 +497,7 @@ def __init__(self, elif mini > 1: shapes[i] = [1, 1 / mini] - self.batch_shapes = np.ceil(np.array(shapes) * img_size / stride + pad).astype(np.int) * stride + self.batch_shapes = np.ceil(np.array(shapes) * img_size / stride + pad).astype(int) * stride # Cache images into RAM/disk for faster training (WARNING: large datasets may exceed system resources) self.ims = [None] * n @@ -867,7 +867,7 @@ def extract_boxes(path=DATASETS_DIR / 'coco128'): # from utils.dataloaders impo b = x[1:] * [w, h, w, h] # box # b[2:] = b[2:].max() # rectangle to square b[2:] = b[2:] * 1.2 + 3 # pad - b = xywh2xyxy(b.reshape(-1, 4)).ravel().astype(np.int) + b = xywh2xyxy(b.reshape(-1, 4)).ravel().astype(int) b[[0, 2]] = np.clip(b[[0, 2]], 0, w) # clip boxes outside of image b[[1, 3]] = np.clip(b[[1, 3]], 0, h) From 903b239f1338e7ad8b12dd8e4a3c53f4f362e07f Mon Sep 17 00:00:00 2001 From: Dhruv Nair Date: Wed, 7 Sep 2022 11:28:46 -0400 Subject: [PATCH 1407/1976] Comet Logging and Visualization Integration (#9232) * add comet to logger interface * add comet logger * add support for updated parameters * clean up offline logger creation * update callback args for comet logger * add comet optimizer * add optimizer config * add comet README * update tutorial notebook with Comet section * add option to log class level metrics * add support for class level metrics and confusion matrix * handle errors when adding files to artifacts * fix typo * clean resume workflow * updates for HPO * update comet README * fix typo in comet README * update code snippets in comet README * update comet links in tutorial * updated links * change optimizer batch size param and update comet README image * update comet section in tutorial * use prexisting cmd line flags to configure logger * update artifact upload/download flow * remove come remove comet logger specific cmd line args * move downloading weights into comet logger code * remove extra argparse * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * change checkpoint logging flow to follow offline logger * update resume flow * add comet logger to remote dataset property * update cmd line args in hpo * set types for integer/float env variables * update README * fix typo in README * default to always logging model predictions * Update tutorial.ipynb * Update train.py * Add Comet to Integrations table * Update README.md * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: Ayush Chaurasia Co-authored-by: Glenn Jocher Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- README.md | 59 +-- train.py | 17 +- tutorial.ipynb | 39 +- utils/loggers/__init__.py | 77 +++- utils/loggers/comet/README.md | 256 +++++++++++ utils/loggers/comet/__init__.py | 496 ++++++++++++++++++++++ utils/loggers/comet/comet_utils.py | 150 +++++++ utils/loggers/comet/hpo.py | 118 +++++ utils/loggers/comet/optimizer_config.json | 209 +++++++++ val.py | 4 +- 10 files changed, 1376 insertions(+), 49 deletions(-) create mode 100644 utils/loggers/comet/README.md create mode 100644 utils/loggers/comet/__init__.py create mode 100644 utils/loggers/comet/comet_utils.py create mode 100644 utils/loggers/comet/hpo.py create mode 100644 utils/loggers/comet/optimizer_config.json diff --git a/README.md b/README.md index 1d6b4e153d82..7763d174f92b 100644 --- a/README.md +++ b/README.md @@ -160,46 +160,31 @@ python train.py --data coco.yaml --cfg yolov5n.yaml --weights '' --batch-size 12
-##
Environments
- -Get started in seconds with our verified environments. Click each icon below for details. - -
- - - - - - - - - - - - - - -
##
Integrations
+ +
+ + + - + - + - +
-|Deci ⭐ NEW|ClearML ⭐ NEW|Roboflow|Weights & Biases -|:-:|:-:|:-:|:-:| -|Automatically compile and quantize YOLOv5 for better inference performance in one click at [Deci](https://bit.ly/yolov5-deci-platform)|Automatically track, visualize and even remotely train YOLOv5 using [ClearML](https://cutt.ly/yolov5-readme-clearml) (open-source!)|Label and export your custom datasets directly to YOLOv5 for training with [Roboflow](https://roboflow.com/?ref=ultralytics) |Automatically track and visualize all your YOLOv5 training runs in the cloud with [Weights & Biases](https://wandb.ai/site?utm_campaign=repo_yolo_readme) +|Comet ⭐ NEW|Deci ⭐ NEW|ClearML ⭐ NEW|Roboflow|Weights & Biases +|:-:|:-:|:-:|:-:|:-:| +|Visualize model metrics and predictions and upload models and datasets in realtime with [Comet](https://www.comet.com/site/?ref=yolov5&utm_source=yolov5&utm_medium=affilliate&utm_campaign=yolov5_comet_integration)|Automatically compile and quantize YOLOv5 for better inference performance in one click at [Deci](https://bit.ly/yolov5-deci-platform)|Automatically track, visualize and even remotely train YOLOv5 using [ClearML](https://cutt.ly/yolov5-readme-clearml) (open-source!)|Label and export your custom datasets directly to YOLOv5 for training with [Roboflow](https://roboflow.com/?ref=ultralytics) |Automatically track and visualize all your YOLOv5 training runs in the cloud with [Weights & Biases](https://wandb.ai/site?utm_campaign=repo_yolo_readme) ##
Why YOLOv5
@@ -323,6 +308,28 @@ python export.py --weights yolov5s-cls.pt resnet50.pt efficientnet_b0.pt --inclu
+##
Environments
+ +Get started in seconds with our verified environments. Click each icon below for details. + +
+ + + + + + + + + + + + + + +
+ + ##
Contribute
We love your input! We want to make contributing to YOLOv5 as easy and transparent as possible. Please see our [Contributing Guide](CONTRIBUTING.md) to get started, and fill out the [YOLOv5 Survey](https://ultralytics.com/survey?utm_source=github&utm_medium=social&utm_campaign=Survey) to send us feedback on your experiences. Thank you to all our contributors! diff --git a/train.py b/train.py index 29293aa612cf..e16c17c499f0 100644 --- a/train.py +++ b/train.py @@ -52,6 +52,7 @@ init_seeds, intersect_dicts, labels_to_class_weights, labels_to_image_weights, methods, one_cycle, print_args, print_mutation, strip_optimizer, yaml_save) from utils.loggers import Loggers +from utils.loggers.comet.comet_utils import check_comet_resume from utils.loggers.wandb.wandb_utils import check_wandb_resume from utils.loss import ComputeLoss from utils.metrics import fitness @@ -330,7 +331,7 @@ def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictio mem = f'{torch.cuda.memory_reserved() / 1E9 if torch.cuda.is_available() else 0:.3g}G' # (GB) pbar.set_description(('%11s' * 2 + '%11.4g' * 5) % (f'{epoch}/{epochs - 1}', mem, *mloss, targets.shape[0], imgs.shape[-1])) - callbacks.run('on_train_batch_end', model, ni, imgs, targets, paths) + callbacks.run('on_train_batch_end', model, ni, imgs, targets, paths, list(mloss)) if callbacks.stop_training: return # end batch ------------------------------------------------------------------------------------------------ @@ -465,11 +466,11 @@ def parse_opt(known=False): parser.add_argument('--seed', type=int, default=0, help='Global training seed') parser.add_argument('--local_rank', type=int, default=-1, help='Automatic DDP Multi-GPU argument, do not modify') - # Weights & Biases arguments - parser.add_argument('--entity', default=None, help='W&B: Entity') - parser.add_argument('--upload_dataset', nargs='?', const=True, default=False, help='W&B: Upload data, "val" option') - parser.add_argument('--bbox_interval', type=int, default=-1, help='W&B: Set bounding-box image logging interval') - parser.add_argument('--artifact_alias', type=str, default='latest', help='W&B: Version of dataset artifact to use') + # Logger arguments + parser.add_argument('--entity', default=None, help='Entity') + parser.add_argument('--upload_dataset', nargs='?', const=True, default=False, help='Upload data, "val" option') + parser.add_argument('--bbox_interval', type=int, default=-1, help='Set bounding-box image logging interval') + parser.add_argument('--artifact_alias', type=str, default='latest', help='Version of dataset artifact to use') return parser.parse_known_args()[0] if known else parser.parse_args() @@ -481,8 +482,8 @@ def main(opt, callbacks=Callbacks()): check_git_status() check_requirements() - # Resume - if opt.resume and not (check_wandb_resume(opt) or opt.evolve): # resume from specified or most recent last.pt + # Resume (from specified or most recent last.pt) + if opt.resume and not check_wandb_resume(opt) and not check_comet_resume(opt) or opt.evolve: last = Path(check_file(opt.resume) if isinstance(opt.resume, str) else get_latest_run()) opt_yaml = last.parent.parent / 'opt.yaml' # train options yaml opt_data = opt.data # original dataset diff --git a/tutorial.ipynb b/tutorial.ipynb index 12840063b1f1..957437b2be6d 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -413,7 +413,7 @@ "import utils\n", "display = utils.notebook_init() # checks" ], - "execution_count": 1, + "execution_count": null, "outputs": [ { "output_type": "stream", @@ -465,7 +465,7 @@ "!python detect.py --weights yolov5s.pt --img 640 --conf 0.25 --source data/images\n", "# display.Image(filename='runs/detect/exp/zidane.jpg', width=600)" ], - "execution_count": 2, + "execution_count": null, "outputs": [ { "output_type": "stream", @@ -535,7 +535,7 @@ "torch.hub.download_url_to_file('https://ultralytics.com/assets/coco2017val.zip', 'tmp.zip') # download (780M - 5000 images)\n", "!unzip -q tmp.zip -d ../datasets && rm tmp.zip # unzip" ], - "execution_count": 3, + "execution_count": null, "outputs": [ { "output_type": "display_data", @@ -566,7 +566,7 @@ "# Validate YOLOv5s on COCO val\n", "!python val.py --weights yolov5s.pt --data coco.yaml --img 640 --half" ], - "execution_count": 4, + "execution_count": null, "outputs": [ { "output_type": "stream", @@ -653,11 +653,14 @@ "cell_type": "code", "source": [ "#@title Select YOLOv5 🚀 logger {run: 'auto'}\n", - "logger = 'TensorBoard' #@param ['TensorBoard', 'ClearML', 'W&B']\n", + "logger = 'TensorBoard' #@param ['TensorBoard', 'Comet', 'ClearML', 'W&B']\n", "\n", "if logger == 'TensorBoard':\n", " %load_ext tensorboard\n", " %tensorboard --logdir runs/train\n", + "elif logger == 'Comet':\n", + " %pip install -q comet_ml\n", + " import comet_ml; comet_ml.init()\n", "elif logger == 'ClearML':\n", " %pip install -q clearml && clearml-init\n", "elif logger == 'W&B':\n", @@ -683,7 +686,7 @@ "# Train YOLOv5s on COCO128 for 3 epochs\n", "!python train.py --img 640 --batch 16 --epochs 3 --data coco128.yaml --weights yolov5s.pt --cache" ], - "execution_count": 5, + "execution_count": null, "outputs": [ { "output_type": "stream", @@ -857,6 +860,28 @@ "# 4. Visualize" ] }, + { + "cell_type": "markdown", + "source": [ + "## Comet Logging and Visualization 🌟 NEW\n", + "[Comet](https://www.comet.com/site/?ref=yolov5&utm_source=yolov5&utm_medium=affilliate&utm_campaign=yolov5_comet_integration) is now fully integrated with YOLOv5. Track and visualize model metrics in real time, save your hyperparameters, datasets, and model checkpoints, and visualize your model predictions with [Comet Custom Panels](https://www.comet.com/docs/v2/guides/comet-dashboard/code-panels/about-panels/?ref=yolov5&utm_source=yolov5&utm_medium=affilliate&utm_campaign=yolov5_comet_integration)! Comet makes sure you never lose track of your work and makes it easy to share results and collaborate across teams of all sizes! \n", + "\n", + "Getting started is easy:\n", + "```shell\n", + "pip install comet_ml # 1. install\n", + "export COMET_API_KEY= # 2. paste API key\n", + "python train.py --img 640 --epochs 3 --data coco128.yaml --weights yolov5s.pt # 3. train\n", + "```\n", + "\n", + "To learn more about all of the supported Comet features for this integration, check out the [Comet Tutorial](https://github.com/ultralytics/yolov5/tree/master/utils/loggers/comet). If you'd like to learn more about Comet, head over to our [documentation](https://www.comet.com/docs/v2/?ref=yolov5&utm_source=yolov5&utm_medium=affilliate&utm_campaign=yolov5_comet_integration). Get started by trying out the Comet Colab Notebook:\n", + "[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1RG0WOQyxlDlo5Km8GogJpIEJlg_5lyYO?usp=sharing)\n", + "\n", + "\"yolo-ui\"" + ], + "metadata": { + "id": "nWOsI5wJR1o3" + } + }, { "cell_type": "markdown", "source": [ @@ -1096,4 +1121,4 @@ "outputs": [] } ] -} \ No newline at end of file +} diff --git a/utils/loggers/__init__.py b/utils/loggers/__init__.py index 3aee35844f52..f29debb76907 100644 --- a/utils/loggers/__init__.py +++ b/utils/loggers/__init__.py @@ -17,7 +17,7 @@ from utils.plots import plot_images, plot_labels, plot_results from utils.torch_utils import de_parallel -LOGGERS = ('csv', 'tb', 'wandb', 'clearml') # *.csv, TensorBoard, Weights & Biases, ClearML +LOGGERS = ('csv', 'tb', 'wandb', 'clearml', 'comet') # *.csv, TensorBoard, Weights & Biases, ClearML RANK = int(os.getenv('RANK', -1)) try: @@ -41,6 +41,18 @@ except (ImportError, AssertionError): clearml = None +try: + if RANK not in [0, -1]: + comet_ml = None + else: + import comet_ml + + assert hasattr(comet_ml, '__version__') # verify package import not local dir + from utils.loggers.comet import CometLogger + +except (ModuleNotFoundError, ImportError, AssertionError): + comet_ml = None + class Loggers(): # YOLOv5 Loggers class @@ -80,7 +92,10 @@ def __init__(self, save_dir=None, weights=None, opt=None, hyp=None, logger=None, prefix = colorstr('ClearML: ') s = f"{prefix}run 'pip install clearml' to automatically track, visualize and remotely train YOLOv5 🚀 in ClearML" self.logger.info(s) - + if not comet_ml: + prefix = colorstr('Comet: ') + s = f"{prefix}run 'pip install comet_ml' to automatically track and visualize YOLOv5 🚀 runs in Comet" + self.logger.info(s) # TensorBoard s = self.save_dir if 'tb' in self.include and not self.opt.evolve: @@ -107,6 +122,18 @@ def __init__(self, save_dir=None, weights=None, opt=None, hyp=None, logger=None, else: self.clearml = None + # Comet + if comet_ml and 'comet' in self.include: + if isinstance(self.opt.resume, str) and self.opt.resume.startswith("comet://"): + run_id = self.opt.resume.split("/")[-1] + self.comet_logger = CometLogger(self.opt, self.hyp, run_id=run_id) + + else: + self.comet_logger = CometLogger(self.opt, self.hyp) + + else: + self.comet_logger = None + @property def remote_dataset(self): # Get data_dict if custom dataset artifact link is provided @@ -115,12 +142,18 @@ def remote_dataset(self): data_dict = self.clearml.data_dict if self.wandb: data_dict = self.wandb.data_dict + if self.comet_logger: + data_dict = self.comet_logger.data_dict return data_dict def on_train_start(self): - # Callback runs on train start - pass + if self.comet_logger: + self.comet_logger.on_train_start() + + def on_pretrain_routine_start(self): + if self.comet_logger: + self.comet_logger.on_pretrain_routine_start() def on_pretrain_routine_end(self, labels, names): # Callback runs on pre-train routine end @@ -131,8 +164,11 @@ def on_pretrain_routine_end(self, labels, names): self.wandb.log({"Labels": [wandb.Image(str(x), caption=x.name) for x in paths]}) # if self.clearml: # pass # ClearML saves these images automatically using hooks + if self.comet_logger: + self.comet_logger.on_pretrain_routine_end(paths) - def on_train_batch_end(self, model, ni, imgs, targets, paths): + def on_train_batch_end(self, model, ni, imgs, targets, paths, vals): + log_dict = dict(zip(self.keys[0:3], vals)) # Callback runs on train batch end # ni: number integrated batches (since train start) if self.plots: @@ -148,11 +184,21 @@ def on_train_batch_end(self, model, ni, imgs, targets, paths): if self.clearml: self.clearml.log_debug_samples(files, title='Mosaics') + if self.comet_logger: + self.comet_logger.on_train_batch_end(log_dict, step=ni) + def on_train_epoch_end(self, epoch): # Callback runs on train epoch end if self.wandb: self.wandb.current_epoch = epoch + 1 + if self.comet_logger: + self.comet_logger.on_train_epoch_end(epoch) + + def on_val_start(self): + if self.comet_logger: + self.comet_logger.on_val_start() + def on_val_image_end(self, pred, predn, path, names, im): # Callback runs on val image end if self.wandb: @@ -160,7 +206,11 @@ def on_val_image_end(self, pred, predn, path, names, im): if self.clearml: self.clearml.log_image_with_boxes(path, pred, names, im) - def on_val_end(self): + def on_val_batch_end(self, batch_i, im, targets, paths, shapes, out): + if self.comet_logger: + self.comet_logger.on_val_batch_end(batch_i, im, targets, paths, shapes, out) + + def on_val_end(self, nt, tp, fp, p, r, f1, ap, ap50, ap_class, confusion_matrix): # Callback runs on val end if self.wandb or self.clearml: files = sorted(self.save_dir.glob('val*.jpg')) @@ -169,6 +219,9 @@ def on_val_end(self): if self.clearml: self.clearml.log_debug_samples(files, title='Validation') + if self.comet_logger: + self.comet_logger.on_val_end(nt, tp, fp, p, r, f1, ap, ap50, ap_class, confusion_matrix) + def on_fit_epoch_end(self, vals, epoch, best_fitness, fi): # Callback runs at the end of each fit (train+val) epoch x = dict(zip(self.keys, vals)) @@ -199,6 +252,9 @@ def on_fit_epoch_end(self, vals, epoch, best_fitness, fi): self.clearml.current_epoch_logged_images = set() # reset epoch image limit self.clearml.current_epoch += 1 + if self.comet_logger: + self.comet_logger.on_fit_epoch_end(x, epoch=epoch) + def on_model_save(self, last, epoch, final_epoch, best_fitness, fi): # Callback runs on model save event if (epoch + 1) % self.opt.save_period == 0 and not final_epoch and self.opt.save_period != -1: @@ -209,6 +265,9 @@ def on_model_save(self, last, epoch, final_epoch, best_fitness, fi): model_name='Latest Model', auto_delete_file=False) + if self.comet_logger: + self.comet_logger.on_model_save(last, epoch, final_epoch, best_fitness, fi) + def on_train_end(self, last, best, epoch, results): # Callback runs on training end, i.e. saving best model if self.plots: @@ -237,10 +296,16 @@ def on_train_end(self, last, best, epoch, results): name='Best Model', auto_delete_file=False) + if self.comet_logger: + final_results = dict(zip(self.keys[3:10], results)) + self.comet_logger.on_train_end(files, self.save_dir, last, best, epoch, final_results) + def on_params_update(self, params: dict): # Update hyperparams or configs of the experiment if self.wandb: self.wandb.wandb_run.config.update(params, allow_val_change=True) + if self.comet_logger: + self.comet_logger.on_params_update(params) class GenericLogger: diff --git a/utils/loggers/comet/README.md b/utils/loggers/comet/README.md new file mode 100644 index 000000000000..7b0b8e0e2f09 --- /dev/null +++ b/utils/loggers/comet/README.md @@ -0,0 +1,256 @@ + + +# YOLOv5 with Comet + +This guide will cover how to use YOLOv5 with [Comet](https://www.comet.com/site/?ref=yolov5&utm_source=yolov5&utm_medium=affilliate&utm_campaign=yolov5_comet_integration) + +# About Comet + +Comet builds tools that help data scientists, engineers, and team leaders accelerate and optimize machine learning and deep learning models. + +Track and visualize model metrics in real time, save your hyperparameters, datasets, and model checkpoints, and visualize your model predictions with [Comet Custom Panels](https://www.comet.com/examples/comet-example-yolov5?shareable=YcwMiJaZSXfcEXpGOHDD12vA1&ref=yolov5&utm_source=yolov5&utm_medium=affilliate&utm_campaign=yolov5_comet_integration)! +Comet makes sure you never lose track of your work and makes it easy to share results and collaborate across teams of all sizes! + +# Getting Started + +## Install Comet + +```shell +pip install comet_ml +``` + +## Configure Comet Credentials + +There are two ways to configure Comet with YOLOv5. + +You can either set your credentials through enviroment variables + +**Environment Variables** + +```shell +export COMET_API_KEY= +export COMET_PROJECT_NAME= # This will default to 'yolov5' +``` + +Or create a `.comet.config` file in your working directory and set your credentials there. + +**Comet Configuration File** + +``` +[comet] +api_key= +project_name= # This will default to 'yolov5' +``` + +## Run the Training Script + +```shell +# Train YOLOv5s on COCO128 for 5 epochs +python train.py --img 640 --batch 16 --epochs 5 --data coco128.yaml --weights yolov5s.pt +``` + +That's it! Comet will automatically log your hyperparameters, command line arguments, training and valiation metrics. You can visualize and analyze your runs in the Comet UI + +yolo-ui + +# Try out an Example! +Check out an example of a [completed run here](https://www.comet.com/examples/comet-example-yolov5/a0e29e0e9b984e4a822db2a62d0cb357?experiment-tab=chart&showOutliers=true&smoothing=0&transformY=smoothing&xAxis=step&ref=yolov5&utm_source=yolov5&utm_medium=affilliate&utm_campaign=yolov5_comet_integration) + +Or better yet, try it out yourself in this Colab Notebook + +[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1RG0WOQyxlDlo5Km8GogJpIEJlg_5lyYO?usp=sharing) + +# Log automatically + +By default, Comet will log the following items + +## Metrics +- Box Loss, Object Loss, Classification Loss for the training and validation data +- mAP_0.5, mAP_0.5:0.95 metrics for the validation data. +- Precision and Recall for the validation data + +## Parameters + +- Model Hyperparameters +- All parameters passed through the command line options + +## Visualizations + +- Confusion Matrix of the model predictions on the validation data +- Plots for the PR and F1 curves across all classes +- Correlogram of the Class Labels + +# Configure Comet Logging + +Comet can be configured to log additional data either through command line flags passed to the training script +or through environment variables. + +```shell +export COMET_MODE=online # Set whether to run Comet in 'online' or 'offline' mode. Defaults to online +export COMET_MODEL_NAME= #Set the name for the saved model. Defaults to yolov5 +export COMET_LOG_CONFUSION_MATRIX=false # Set to disable logging a Comet Confusion Matrix. Defaults to true +export COMET_MAX_IMAGE_UPLOADS= # Controls how many total image predictions to log to Comet. Defaults to 100. +export COMET_LOG_PER_CLASS_METRICS=true # Set to log evaluation metrics for each detected class at the end of training. Defaults to false +export COMET_DEFAULT_CHECKPOINT_FILENAME= # Set this if you would like to resume training from a different checkpoint. Defaults to 'last.pt' +export COMET_LOG_BATCH_LEVEL_METRICS=true # Set this if you would like to log training metrics at the batch level. Defaults to false. +export COMET_LOG_PREDICTIONS=true # Set this to false to disable logging model predictions +``` + +## Logging Checkpoints with Comet + +Logging Models to Comet is disabled by default. To enable it, pass the `save-period` argument to the training script. This will save the +logged checkpoints to Comet based on the interval value provided by `save-period` + +```shell +python train.py \ +--img 640 \ +--batch 16 \ +--epochs 5 \ +--data coco128.yaml \ +--weights yolov5s.pt \ +--save-period 1 +``` + +## Logging Model Predictions + +By default, model predictions (images, ground truth labels and bounding boxes) will be logged to Comet. + +You can control the frequency of logged predictions and the associated images by passing the `bbox_interval` command line argument. Predictions can be visualized using Comet's Object Detection Custom Panel. This frequency corresponds to every Nth batch of data per epoch. In the example below, we are logging every 2nd batch of data for each epoch. + +**Note:** The YOLOv5 validation dataloader will default to a batch size of 32, so you will have to set the logging frequency accordingly. + +Here is an [example project using the Panel](https://www.comet.com/examples/comet-example-yolov5?shareable=YcwMiJaZSXfcEXpGOHDD12vA1&ref=yolov5&utm_source=yolov5&utm_medium=affilliate&utm_campaign=yolov5_comet_integration) + + +```shell +python train.py \ +--img 640 \ +--batch 16 \ +--epochs 5 \ +--data coco128.yaml \ +--weights yolov5s.pt \ +--bbox_interval 2 +``` + +### Controlling the number of Prediction Images logged to Comet + +When logging predictions from YOLOv5, Comet will log the images associated with each set of predictions. By default a maximum of 100 validation images are logged. You can increase or decrease this number using the `COMET_MAX_IMAGE_UPLOADS` environment variable. + +```shell +env COMET_MAX_IMAGE_UPLOADS=200 python train.py \ +--img 640 \ +--batch 16 \ +--epochs 5 \ +--data coco128.yaml \ +--weights yolov5s.pt \ +--bbox_interval 1 +``` + +### Logging Class Level Metrics + +Use the `COMET_LOG_PER_CLASS_METRICS` environment variable to log mAP, precision, recall, f1 for each class. + +```shell +env COMET_LOG_PER_CLASS_METRICS=true python train.py \ +--img 640 \ +--batch 16 \ +--epochs 5 \ +--data coco128.yaml \ +--weights yolov5s.pt +``` + +## Uploading a Dataset to Comet Artifacts + +If you would like to store your data using [Comet Artifacts](https://www.comet.com/docs/v2/guides/data-management/using-artifacts/#learn-more?ref=yolov5&utm_source=yolov5&utm_medium=affilliate&utm_campaign=yolov5_comet_integration), you can do so using the `upload_dataset` flag. + +The dataset be organized in the way described in the [YOLOv5 documentation](https://docs.ultralytics.com/tutorials/train-custom-datasets/#3-organize-directories). The dataset config `yaml` file must follow the same format as that of the `coco128.yaml` file. + +```shell +python train.py \ +--img 640 \ +--batch 16 \ +--epochs 5 \ +--data coco128.yaml \ +--weights yolov5s.pt \ +--upload_dataset +``` + +You can find the uploaded dataset in the Artifacts tab in your Comet Workspace +artifact-1 + +You can preview the data directly in the Comet UI. +artifact-2 + +Artifacts are versioned and also support adding metadata about the dataset. Comet will automatically log the metadata from your dataset `yaml` file +artifact-3 + +### Using a saved Artifact + +If you would like to use a dataset from Comet Artifacts, set the `path` variable in your dataset `yaml` file to point to the following Artifact resource URL. + +``` +# contents of artifact.yaml file +path: "comet:///:" +``` +Then pass this file to your training script in the following way + +```shell +python train.py \ +--img 640 \ +--batch 16 \ +--epochs 5 \ +--data artifact.yaml \ +--weights yolov5s.pt +``` + +Artifacts also allow you to track the lineage of data as it flows through your Experimentation workflow. Here you can see a graph that shows you all the experiments that have used your uploaded dataset. +artifact-4 + +## Resuming a Training Run + +If your training run is interrupted for any reason, e.g. disrupted internet connection, you can resume the run using the `resume` flag and the Comet Run Path. + +The Run Path has the following format `comet:////`. + +This will restore the run to its state before the interruption, which includes restoring the model from a checkpoint, restoring all hyperparameters and training arguments and downloading Comet dataset Artifacts if they were used in the original run. The resumed run will continue logging to the existing Experiment in the Comet UI + +```shell +python train.py \ +--resume "comet://" +``` + +## Hyperparameter Search with the Comet Optimizer + +YOLOv5 is also integrated with Comet's Optimizer, making is simple to visualie hyperparameter sweeps in the Comet UI. + +### Configuring an Optimizer Sweep + +To configure the Comet Optimizer, you will have to create a JSON file with the information about the sweep. An example file has been provided in `utils/loggers/comet/optimizer_config.json` + +```shell +python utils/loggers/comet/hpo.py \ + --comet_optimizer_config "utils/loggers/comet/optimizer_config.json" +``` + +The `hpo.py` script accepts the same arguments as `train.py`. If you wish to pass additional arguments to your sweep simply add them after +the script. + +```shell +python utils/loggers/comet/hpo.py \ + --comet_optimizer_config "utils/loggers/comet/optimizer_config.json" \ + --save-period 1 \ + --bbox_interval 1 +``` + +### Running a Sweep in Parallel + +```shell +comet optimizer -j utils/loggers/comet/hpo.py \ + utils/loggers/comet/optimizer_config.json" +``` + +### Visualizing Results + +Comet provides a number of ways to visualize the results of your sweep. Take a look at a [project with a completed sweep here](https://www.comet.com/examples/comet-example-yolov5/view/PrlArHGuuhDTKC1UuBmTtOSXD/panels?ref=yolov5&utm_source=yolov5&utm_medium=affilliate&utm_campaign=yolov5_comet_integration) + +hyperparameter-yolo \ No newline at end of file diff --git a/utils/loggers/comet/__init__.py b/utils/loggers/comet/__init__.py new file mode 100644 index 000000000000..b168687dd7b2 --- /dev/null +++ b/utils/loggers/comet/__init__.py @@ -0,0 +1,496 @@ +import glob +import json +import logging +import os +import sys +from pathlib import Path + +logger = logging.getLogger(__name__) + +FILE = Path(__file__).resolve() +ROOT = FILE.parents[3] # YOLOv5 root directory +if str(ROOT) not in sys.path: + sys.path.append(str(ROOT)) # add ROOT to PATH + +try: + import comet_ml + + # Project Configuration + config = comet_ml.config.get_config() + COMET_PROJECT_NAME = config.get_string(os.getenv("COMET_PROJECT_NAME"), "comet.project_name", default="yolov5") +except (ModuleNotFoundError, ImportError): + comet_ml = None + COMET_PROJECT_NAME = None + +import torch +import torchvision.transforms as T +import yaml + +from utils.dataloaders import img2label_paths +from utils.general import check_dataset, scale_coords, xywh2xyxy +from utils.metrics import box_iou + +COMET_PREFIX = "comet://" + +COMET_MODE = os.getenv("COMET_MODE", "online") + +# Model Saving Settings +COMET_MODEL_NAME = os.getenv("COMET_MODEL_NAME", "yolov5") + +# Dataset Artifact Settings +COMET_UPLOAD_DATASET = os.getenv("COMET_UPLOAD_DATASET", "false").lower() == "true" + +# Evaluation Settings +COMET_LOG_CONFUSION_MATRIX = os.getenv("COMET_LOG_CONFUSION_MATRIX", "true").lower() == "true" +COMET_LOG_PREDICTIONS = os.getenv("COMET_LOG_PREDICTIONS", "true").lower() == "true" +COMET_MAX_IMAGE_UPLOADS = int(os.getenv("COMET_MAX_IMAGE_UPLOADS", 100)) + +# Confusion Matrix Settings +CONF_THRES = float(os.getenv("CONF_THRES", 0.001)) +IOU_THRES = float(os.getenv("IOU_THRES", 0.6)) + +# Batch Logging Settings +COMET_LOG_BATCH_METRICS = os.getenv("COMET_LOG_BATCH_METRICS", "false").lower() == "true" +COMET_BATCH_LOGGING_INTERVAL = os.getenv("COMET_BATCH_LOGGING_INTERVAL", 1) +COMET_PREDICTION_LOGGING_INTERVAL = os.getenv("COMET_PREDICTION_LOGGING_INTERVAL", 1) +COMET_LOG_PER_CLASS_METRICS = os.getenv("COMET_LOG_PER_CLASS_METRICS", "false").lower() == "true" + +RANK = int(os.getenv("RANK", -1)) + +to_pil = T.ToPILImage() + + +class CometLogger: + """Log metrics, parameters, source code, models and much more + with Comet + """ + + def __init__(self, opt, hyp, run_id=None, job_type="Training", **experiment_kwargs) -> None: + self.job_type = job_type + self.opt = opt + self.hyp = hyp + + # Comet Flags + self.comet_mode = COMET_MODE + + self.save_model = opt.save_period > -1 + self.model_name = COMET_MODEL_NAME + + # Batch Logging Settings + self.log_batch_metrics = COMET_LOG_BATCH_METRICS + self.comet_log_batch_interval = COMET_BATCH_LOGGING_INTERVAL + + # Dataset Artifact Settings + self.upload_dataset = self.opt.upload_dataset if self.opt.upload_dataset else COMET_UPLOAD_DATASET + self.resume = self.opt.resume + + # Default parameters to pass to Experiment objects + self.default_experiment_kwargs = { + "log_code": False, + "log_env_gpu": True, + "log_env_cpu": True, + "project_name": COMET_PROJECT_NAME,} + self.default_experiment_kwargs.update(experiment_kwargs) + self.experiment = self._get_experiment(self.comet_mode, run_id) + + self.data_dict = self.check_dataset(self.opt.data) + self.class_names = self.data_dict["names"] + self.num_classes = self.data_dict["nc"] + + self.logged_images_count = 0 + self.max_images = COMET_MAX_IMAGE_UPLOADS + + if run_id is None: + self.experiment.log_other("Created from", "YOLOv5") + if not isinstance(self.experiment, comet_ml.OfflineExperiment): + workspace, project_name, experiment_id = self.experiment.url.split("/")[-3:] + self.experiment.log_other( + "Run Path", + f"{workspace}/{project_name}/{experiment_id}", + ) + self.log_parameters(vars(opt)) + self.log_parameters(self.opt.hyp) + self.log_asset_data( + self.opt.hyp, + name="hyperparameters.json", + metadata={"type": "hyp-config-file"}, + ) + self.log_asset( + f"{self.opt.save_dir}/opt.yaml", + metadata={"type": "opt-config-file"}, + ) + + self.comet_log_confusion_matrix = COMET_LOG_CONFUSION_MATRIX + + if hasattr(self.opt, "conf_thres"): + self.conf_thres = self.opt.conf_thres + else: + self.conf_thres = CONF_THRES + if hasattr(self.opt, "iou_thres"): + self.iou_thres = self.opt.iou_thres + else: + self.iou_thres = IOU_THRES + + self.comet_log_predictions = COMET_LOG_PREDICTIONS + if self.opt.bbox_interval == -1: + self.comet_log_prediction_interval = self.opt.epochs // 10 if self.opt.epochs < 10 else 1 + else: + self.comet_log_prediction_interval = self.opt.bbox_interval + + if self.comet_log_predictions: + self.metadata_dict = {} + + self.comet_log_per_class_metrics = COMET_LOG_PER_CLASS_METRICS + + self.experiment.log_others({ + "comet_mode": COMET_MODE, + "comet_max_image_uploads": COMET_MAX_IMAGE_UPLOADS, + "comet_log_per_class_metrics": COMET_LOG_PER_CLASS_METRICS, + "comet_log_batch_metrics": COMET_LOG_BATCH_METRICS, + "comet_log_confusion_matrix": COMET_LOG_CONFUSION_MATRIX, + "comet_model_name": COMET_MODEL_NAME,}) + + # Check if running the Experiment with the Comet Optimizer + if hasattr(self.opt, "comet_optimizer_id"): + self.experiment.log_other("optimizer_id", self.opt.comet_optimizer_id) + self.experiment.log_other("optimizer_objective", self.opt.comet_optimizer_objective) + self.experiment.log_other("optimizer_metric", self.opt.comet_optimizer_metric) + self.experiment.log_other("optimizer_parameters", json.dumps(self.hyp)) + + def _get_experiment(self, mode, experiment_id=None): + if mode == "offline": + if experiment_id is not None: + return comet_ml.ExistingOfflineExperiment( + previous_experiment=experiment_id, + **self.default_experiment_kwargs, + ) + + return comet_ml.OfflineExperiment(**self.default_experiment_kwargs,) + + else: + try: + if experiment_id is not None: + return comet_ml.ExistingExperiment( + previous_experiment=experiment_id, + **self.default_experiment_kwargs, + ) + + return comet_ml.Experiment(**self.default_experiment_kwargs) + + except ValueError: + logger.warning("COMET WARNING: " + "Comet credentials have not been set. " + "Comet will default to offline logging. " + "Please set your credentials to enable online logging.") + return self._get_experiment("offline", experiment_id) + + return + + def log_metrics(self, log_dict, **kwargs): + self.experiment.log_metrics(log_dict, **kwargs) + + def log_parameters(self, log_dict, **kwargs): + self.experiment.log_parameters(log_dict, **kwargs) + + def log_asset(self, asset_path, **kwargs): + self.experiment.log_asset(asset_path, **kwargs) + + def log_asset_data(self, asset, **kwargs): + self.experiment.log_asset_data(asset, **kwargs) + + def log_image(self, img, **kwargs): + self.experiment.log_image(img, **kwargs) + + def log_model(self, path, opt, epoch, fitness_score, best_model=False): + if not self.save_model: + return + + model_metadata = { + "fitness_score": fitness_score[-1], + "epochs_trained": epoch + 1, + "save_period": opt.save_period, + "total_epochs": opt.epochs,} + + model_files = glob.glob(f"{path}/*.pt") + for model_path in model_files: + name = Path(model_path).name + + self.experiment.log_model( + self.model_name, + file_or_folder=model_path, + file_name=name, + metadata=model_metadata, + overwrite=True, + ) + + def check_dataset(self, data_file): + with open(data_file) as f: + data_config = yaml.safe_load(f) + + if data_config['path'].startswith(COMET_PREFIX): + path = data_config['path'].replace(COMET_PREFIX, "") + data_dict = self.download_dataset_artifact(path) + + return data_dict + + self.log_asset(self.opt.data, metadata={"type": "data-config-file"}) + + return check_dataset(data_file) + + def log_predictions(self, image, labelsn, path, shape, predn): + if self.logged_images_count >= self.max_images: + return + detections = predn[predn[:, 4] > self.conf_thres] + iou = box_iou(labelsn[:, 1:], detections[:, :4]) + mask, _ = torch.where(iou > self.iou_thres) + if len(mask) == 0: + return + + filtered_detections = detections[mask] + filtered_labels = labelsn[mask] + + processed_image = (image * 255).to(torch.uint8) + + image_id = path.split("/")[-1].split(".")[0] + image_name = f"{image_id}_curr_epoch_{self.experiment.curr_epoch}" + self.log_image(to_pil(processed_image), name=image_name) + + metadata = [] + for cls, *xyxy in filtered_labels.tolist(): + metadata.append({ + "label": f"{self.class_names[int(cls)]}-gt", + "score": 100, + "box": { + "x": xyxy[0], + "y": xyxy[1], + "x2": xyxy[2], + "y2": xyxy[3]},}) + for *xyxy, conf, cls in filtered_detections.tolist(): + metadata.append({ + "label": f"{self.class_names[int(cls)]}", + "score": conf * 100, + "box": { + "x": xyxy[0], + "y": xyxy[1], + "x2": xyxy[2], + "y2": xyxy[3]},}) + + self.metadata_dict[image_name] = metadata + self.logged_images_count += 1 + + return + + def preprocess_prediction(self, image, labels, shape, pred): + nl, _ = labels.shape[0], pred.shape[0] + + # Predictions + if self.opt.single_cls: + pred[:, 5] = 0 + + predn = pred.clone() + scale_coords(image.shape[1:], predn[:, :4], shape[0], shape[1]) + + labelsn = None + if nl: + tbox = xywh2xyxy(labels[:, 1:5]) # target boxes + scale_coords(image.shape[1:], tbox, shape[0], shape[1]) # native-space labels + labelsn = torch.cat((labels[:, 0:1], tbox), 1) # native-space labels + scale_coords(image.shape[1:], predn[:, :4], shape[0], shape[1]) # native-space pred + + return predn, labelsn + + def add_assets_to_artifact(self, artifact, path, asset_path, split): + img_paths = sorted(glob.glob(f"{asset_path}/*")) + label_paths = img2label_paths(img_paths) + + for image_file, label_file in zip(img_paths, label_paths): + image_logical_path, label_logical_path = map(lambda x: os.path.relpath(x, path), [image_file, label_file]) + + try: + artifact.add(image_file, logical_path=image_logical_path, metadata={"split": split}) + artifact.add(label_file, logical_path=label_logical_path, metadata={"split": split}) + except ValueError as e: + logger.error('COMET ERROR: Error adding file to Artifact. Skipping file.') + logger.error(f"COMET ERROR: {e}") + continue + + return artifact + + def upload_dataset_artifact(self): + dataset_name = self.data_dict.get("dataset_name", "yolov5-dataset") + path = str((ROOT / Path(self.data_dict["path"])).resolve()) + + metadata = self.data_dict.copy() + for key in ["train", "val", "test"]: + split_path = metadata.get(key) + if split_path is not None: + metadata[key] = split_path.replace(path, "") + + artifact = comet_ml.Artifact(name=dataset_name, artifact_type="dataset", metadata=metadata) + for key in metadata.keys(): + if key in ["train", "val", "test"]: + if isinstance(self.upload_dataset, str) and (key != self.upload_dataset): + continue + + asset_path = self.data_dict.get(key) + if asset_path is not None: + artifact = self.add_assets_to_artifact(artifact, path, asset_path, key) + + self.experiment.log_artifact(artifact) + + return + + def download_dataset_artifact(self, artifact_path): + logged_artifact = self.experiment.get_artifact(artifact_path) + artifact_save_dir = str(Path(self.opt.save_dir) / logged_artifact.name) + logged_artifact.download(artifact_save_dir) + + metadata = logged_artifact.metadata + data_dict = metadata.copy() + data_dict["path"] = artifact_save_dir + data_dict["names"] = {int(k): v for k, v in metadata.get("names").items()} + + data_dict = self.update_data_paths(data_dict) + return data_dict + + def update_data_paths(self, data_dict): + path = data_dict.get("path", "") + + for split in ["train", "val", "test"]: + if data_dict.get(split): + split_path = data_dict.get(split) + data_dict[split] = (f"{path}/{split_path}" if isinstance(split, str) else [ + f"{path}/{x}" for x in split_path]) + + return data_dict + + def on_pretrain_routine_end(self, paths): + if self.opt.resume: + return + + for path in paths: + self.log_asset(str(path)) + + if self.upload_dataset: + if not self.resume: + self.upload_dataset_artifact() + + return + + def on_train_start(self): + self.log_parameters(self.hyp) + + def on_train_epoch_start(self): + return + + def on_train_epoch_end(self, epoch): + self.experiment.curr_epoch = epoch + + return + + def on_train_batch_start(self): + return + + def on_train_batch_end(self, log_dict, step): + self.experiment.curr_step = step + if self.log_batch_metrics and (step % self.comet_log_batch_interval == 0): + self.log_metrics(log_dict, step=step) + + return + + def on_train_end(self, files, save_dir, last, best, epoch, results): + if self.comet_log_predictions: + curr_epoch = self.experiment.curr_epoch + self.experiment.log_asset_data(self.metadata_dict, "image-metadata.json", epoch=curr_epoch) + + for f in files: + self.log_asset(f, metadata={"epoch": epoch}) + self.log_asset(f"{save_dir}/results.csv", metadata={"epoch": epoch}) + + if not self.opt.evolve: + model_path = str(best if best.exists() else last) + name = Path(model_path).name + if self.save_model: + self.experiment.log_model( + self.model_name, + file_or_folder=model_path, + file_name=name, + overwrite=True, + ) + + # Check if running Experiment with Comet Optimizer + if hasattr(self.opt, 'comet_optimizer_id'): + metric = results.get(self.opt.comet_optimizer_metric) + self.experiment.log_other('optimizer_metric_value', metric) + + self.finish_run() + + def on_val_start(self): + return + + def on_val_batch_start(self): + return + + def on_val_batch_end(self, batch_i, images, targets, paths, shapes, outputs): + if not (self.comet_log_predictions and ((batch_i + 1) % self.comet_log_prediction_interval == 0)): + return + + for si, pred in enumerate(outputs): + if len(pred) == 0: + continue + + image = images[si] + labels = targets[targets[:, 0] == si, 1:] + shape = shapes[si] + path = paths[si] + predn, labelsn = self.preprocess_prediction(image, labels, shape, pred) + if labelsn is not None: + self.log_predictions(image, labelsn, path, shape, predn) + + return + + def on_val_end(self, nt, tp, fp, p, r, f1, ap, ap50, ap_class, confusion_matrix): + if self.comet_log_per_class_metrics: + if self.num_classes > 1: + for i, c in enumerate(ap_class): + class_name = self.class_names[c] + self.experiment.log_metrics( + { + 'mAP@.5': ap50[i], + 'mAP@.5:.95': ap[i], + 'precision': p[i], + 'recall': r[i], + 'f1': f1[i], + 'true_positives': tp[i], + 'false_positives': fp[i], + 'support': nt[c]}, + prefix=class_name) + + if self.comet_log_confusion_matrix: + epoch = self.experiment.curr_epoch + class_names = list(self.class_names.values()) + class_names.append("background") + num_classes = len(class_names) + + self.experiment.log_confusion_matrix( + matrix=confusion_matrix.matrix, + max_categories=num_classes, + labels=class_names, + epoch=epoch, + column_label='Actual Category', + row_label='Predicted Category', + file_name=f"confusion-matrix-epoch-{epoch}.json", + ) + + def on_fit_epoch_end(self, result, epoch): + self.log_metrics(result, epoch=epoch) + + def on_model_save(self, last, epoch, final_epoch, best_fitness, fi): + if ((epoch + 1) % self.opt.save_period == 0 and not final_epoch) and self.opt.save_period != -1: + self.log_model(last.parent, self.opt, epoch, fi, best_model=best_fitness == fi) + + def on_params_update(self, params): + self.log_parameters(params) + + def finish_run(self): + self.experiment.end() diff --git a/utils/loggers/comet/comet_utils.py b/utils/loggers/comet/comet_utils.py new file mode 100644 index 000000000000..3cbd45156b57 --- /dev/null +++ b/utils/loggers/comet/comet_utils.py @@ -0,0 +1,150 @@ +import logging +import os +from urllib.parse import urlparse + +try: + import comet_ml +except (ModuleNotFoundError, ImportError): + comet_ml = None + +import yaml + +logger = logging.getLogger(__name__) + +COMET_PREFIX = "comet://" +COMET_MODEL_NAME = os.getenv("COMET_MODEL_NAME", "yolov5") +COMET_DEFAULT_CHECKPOINT_FILENAME = os.getenv("COMET_DEFAULT_CHECKPOINT_FILENAME", "last.pt") + + +def download_model_checkpoint(opt, experiment): + model_dir = f"{opt.project}/{experiment.name}" + os.makedirs(model_dir, exist_ok=True) + + model_name = COMET_MODEL_NAME + model_asset_list = experiment.get_model_asset_list(model_name) + + if len(model_asset_list) == 0: + logger.error(f"COMET ERROR: No checkpoints found for model name : {model_name}") + return + + model_asset_list = sorted( + model_asset_list, + key=lambda x: x["step"], + reverse=True, + ) + logged_checkpoint_map = {asset["fileName"]: asset["assetId"] for asset in model_asset_list} + + resource_url = urlparse(opt.weights) + checkpoint_filename = resource_url.query + + if checkpoint_filename: + asset_id = logged_checkpoint_map.get(checkpoint_filename) + else: + asset_id = logged_checkpoint_map.get(COMET_DEFAULT_CHECKPOINT_FILENAME) + checkpoint_filename = COMET_DEFAULT_CHECKPOINT_FILENAME + + if asset_id is None: + logger.error(f"COMET ERROR: Checkpoint {checkpoint_filename} not found in the given Experiment") + return + + try: + logger.info(f"COMET INFO: Downloading checkpoint {checkpoint_filename}") + asset_filename = checkpoint_filename + + model_binary = experiment.get_asset(asset_id, return_type="binary", stream=False) + model_download_path = f"{model_dir}/{asset_filename}" + with open(model_download_path, "wb") as f: + f.write(model_binary) + + opt.weights = model_download_path + + except Exception as e: + logger.warning("COMET WARNING: Unable to download checkpoint from Comet") + logger.exception(e) + + +def set_opt_parameters(opt, experiment): + """Update the opts Namespace with parameters + from Comet's ExistingExperiment when resuming a run + + Args: + opt (argparse.Namespace): Namespace of command line options + experiment (comet_ml.APIExperiment): Comet API Experiment object + """ + asset_list = experiment.get_asset_list() + resume_string = opt.resume + + for asset in asset_list: + if asset["fileName"] == "opt.yaml": + asset_id = asset["assetId"] + asset_binary = experiment.get_asset(asset_id, return_type="binary", stream=False) + opt_dict = yaml.safe_load(asset_binary) + for key, value in opt_dict.items(): + setattr(opt, key, value) + opt.resume = resume_string + + # Save hyperparameters to YAML file + # Necessary to pass checks in training script + save_dir = f"{opt.project}/{experiment.name}" + os.makedirs(save_dir, exist_ok=True) + + hyp_yaml_path = f"{save_dir}/hyp.yaml" + with open(hyp_yaml_path, "w") as f: + yaml.dump(opt.hyp, f) + opt.hyp = hyp_yaml_path + + +def check_comet_weights(opt): + """Downloads model weights from Comet and updates the + weights path to point to saved weights location + + Args: + opt (argparse.Namespace): Command Line arguments passed + to YOLOv5 training script + + Returns: + None/bool: Return True if weights are successfully downloaded + else return None + """ + if comet_ml is None: + return + + if isinstance(opt.weights, str): + if opt.weights.startswith(COMET_PREFIX): + api = comet_ml.API() + resource = urlparse(opt.weights) + experiment_path = f"{resource.netloc}{resource.path}" + experiment = api.get(experiment_path) + download_model_checkpoint(opt, experiment) + return True + + return None + + +def check_comet_resume(opt): + """Restores run parameters to its original state based on the model checkpoint + and logged Experiment parameters. + + Args: + opt (argparse.Namespace): Command Line arguments passed + to YOLOv5 training script + + Returns: + None/bool: Return True if the run is restored successfully + else return None + """ + if comet_ml is None: + return + + if isinstance(opt.resume, str): + if opt.resume.startswith(COMET_PREFIX): + api = comet_ml.API() + resource = urlparse(opt.resume) + experiment_path = f"{resource.netloc}{resource.path}" + experiment = api.get(experiment_path) + set_opt_parameters(opt, experiment) + download_model_checkpoint(opt, experiment) + + return True + + return None diff --git a/utils/loggers/comet/hpo.py b/utils/loggers/comet/hpo.py new file mode 100644 index 000000000000..eab4df9978cf --- /dev/null +++ b/utils/loggers/comet/hpo.py @@ -0,0 +1,118 @@ +import argparse +import json +import logging +import os +import sys +from pathlib import Path + +import comet_ml + +logger = logging.getLogger(__name__) + +FILE = Path(__file__).resolve() +ROOT = FILE.parents[3] # YOLOv5 root directory +if str(ROOT) not in sys.path: + sys.path.append(str(ROOT)) # add ROOT to PATH + +from train import parse_opt, train +from utils.callbacks import Callbacks +from utils.general import increment_path +from utils.torch_utils import select_device + +# Project Configuration +config = comet_ml.config.get_config() +COMET_PROJECT_NAME = config.get_string(os.getenv("COMET_PROJECT_NAME"), "comet.project_name", default="yolov5") + + +def get_args(known=False): + parser = argparse.ArgumentParser() + parser.add_argument('--weights', type=str, default=ROOT / 'yolov5s.pt', help='initial weights path') + parser.add_argument('--cfg', type=str, default='', help='model.yaml path') + parser.add_argument('--data', type=str, default=ROOT / 'data/coco128.yaml', help='dataset.yaml path') + parser.add_argument('--hyp', type=str, default=ROOT / 'data/hyps/hyp.scratch-low.yaml', help='hyperparameters path') + parser.add_argument('--epochs', type=int, default=300, help='total training epochs') + parser.add_argument('--batch-size', type=int, default=16, help='total batch size for all GPUs, -1 for autobatch') + parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=640, help='train, val image size (pixels)') + parser.add_argument('--rect', action='store_true', help='rectangular training') + parser.add_argument('--resume', nargs='?', const=True, default=False, help='resume most recent training') + parser.add_argument('--nosave', action='store_true', help='only save final checkpoint') + parser.add_argument('--noval', action='store_true', help='only validate final epoch') + parser.add_argument('--noautoanchor', action='store_true', help='disable AutoAnchor') + parser.add_argument('--noplots', action='store_true', help='save no plot files') + parser.add_argument('--evolve', type=int, nargs='?', const=300, help='evolve hyperparameters for x generations') + parser.add_argument('--bucket', type=str, default='', help='gsutil bucket') + parser.add_argument('--cache', type=str, nargs='?', const='ram', help='--cache images in "ram" (default) or "disk"') + parser.add_argument('--image-weights', action='store_true', help='use weighted image selection for training') + parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') + parser.add_argument('--multi-scale', action='store_true', help='vary img-size +/- 50%%') + parser.add_argument('--single-cls', action='store_true', help='train multi-class data as single-class') + parser.add_argument('--optimizer', type=str, choices=['SGD', 'Adam', 'AdamW'], default='SGD', help='optimizer') + parser.add_argument('--sync-bn', action='store_true', help='use SyncBatchNorm, only available in DDP mode') + parser.add_argument('--workers', type=int, default=8, help='max dataloader workers (per RANK in DDP mode)') + parser.add_argument('--project', default=ROOT / 'runs/train', help='save to project/name') + parser.add_argument('--name', default='exp', help='save to project/name') + parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment') + parser.add_argument('--quad', action='store_true', help='quad dataloader') + parser.add_argument('--cos-lr', action='store_true', help='cosine LR scheduler') + parser.add_argument('--label-smoothing', type=float, default=0.0, help='Label smoothing epsilon') + parser.add_argument('--patience', type=int, default=100, help='EarlyStopping patience (epochs without improvement)') + parser.add_argument('--freeze', nargs='+', type=int, default=[0], help='Freeze layers: backbone=10, first3=0 1 2') + parser.add_argument('--save-period', type=int, default=-1, help='Save checkpoint every x epochs (disabled if < 1)') + parser.add_argument('--seed', type=int, default=0, help='Global training seed') + parser.add_argument('--local_rank', type=int, default=-1, help='Automatic DDP Multi-GPU argument, do not modify') + + # Weights & Biases arguments + parser.add_argument('--entity', default=None, help='W&B: Entity') + parser.add_argument('--upload_dataset', nargs='?', const=True, default=False, help='W&B: Upload data, "val" option') + parser.add_argument('--bbox_interval', type=int, default=-1, help='W&B: Set bounding-box image logging interval') + parser.add_argument('--artifact_alias', type=str, default='latest', help='W&B: Version of dataset artifact to use') + + # Comet Arguments + parser.add_argument("--comet_optimizer_config", type=str, help="Comet: Path to a Comet Optimizer Config File.") + parser.add_argument("--comet_optimizer_id", type=str, help="Comet: ID of the Comet Optimizer sweep.") + parser.add_argument("--comet_optimizer_objective", type=str, help="Comet: Set to 'minimize' or 'maximize'.") + parser.add_argument("--comet_optimizer_metric", type=str, help="Comet: Metric to Optimize.") + parser.add_argument("--comet_optimizer_workers", + type=int, + default=1, + help="Comet: Number of Parallel Workers to use with the Comet Optimizer.") + + return parser.parse_known_args()[0] if known else parser.parse_args() + + +def run(parameters, opt): + hyp_dict = {k: v for k, v in parameters.items() if k not in ["epochs", "batch_size"]} + + opt.save_dir = str(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok or opt.evolve)) + opt.batch_size = parameters.get("batch_size") + opt.epochs = parameters.get("epochs") + + device = select_device(opt.device, batch_size=opt.batch_size) + train(hyp_dict, opt, device, callbacks=Callbacks()) + + +if __name__ == "__main__": + opt = get_args(known=True) + + opt.weights = str(opt.weights) + opt.cfg = str(opt.cfg) + opt.data = str(opt.data) + opt.project = str(opt.project) + + optimizer_id = os.getenv("COMET_OPTIMIZER_ID") + if optimizer_id is None: + with open(opt.comet_optimizer_config) as f: + optimizer_config = json.load(f) + optimizer = comet_ml.Optimizer(optimizer_config) + else: + optimizer = comet_ml.Optimizer(optimizer_id) + + opt.comet_optimizer_id = optimizer.id + status = optimizer.status() + + opt.comet_optimizer_objective = status["spec"]["objective"] + opt.comet_optimizer_metric = status["spec"]["metric"] + + logger.info("COMET INFO: Starting Hyperparameter Sweep") + for parameter in optimizer.get_parameters(): + run(parameter["parameters"], opt) diff --git a/utils/loggers/comet/optimizer_config.json b/utils/loggers/comet/optimizer_config.json new file mode 100644 index 000000000000..83ddddab6f20 --- /dev/null +++ b/utils/loggers/comet/optimizer_config.json @@ -0,0 +1,209 @@ +{ + "algorithm": "random", + "parameters": { + "anchor_t": { + "type": "discrete", + "values": [ + 2, + 8 + ] + }, + "batch_size": { + "type": "discrete", + "values": [ + 16, + 32, + 64 + ] + }, + "box": { + "type": "discrete", + "values": [ + 0.02, + 0.2 + ] + }, + "cls": { + "type": "discrete", + "values": [ + 0.2 + ] + }, + "cls_pw": { + "type": "discrete", + "values": [ + 0.5 + ] + }, + "copy_paste": { + "type": "discrete", + "values": [ + 1 + ] + }, + "degrees": { + "type": "discrete", + "values": [ + 0, + 45 + ] + }, + "epochs": { + "type": "discrete", + "values": [ + 5 + ] + }, + "fl_gamma": { + "type": "discrete", + "values": [ + 0 + ] + }, + "fliplr": { + "type": "discrete", + "values": [ + 0 + ] + }, + "flipud": { + "type": "discrete", + "values": [ + 0 + ] + }, + "hsv_h": { + "type": "discrete", + "values": [ + 0 + ] + }, + "hsv_s": { + "type": "discrete", + "values": [ + 0 + ] + }, + "hsv_v": { + "type": "discrete", + "values": [ + 0 + ] + }, + "iou_t": { + "type": "discrete", + "values": [ + 0.7 + ] + }, + "lr0": { + "type": "discrete", + "values": [ + 1e-05, + 0.1 + ] + }, + "lrf": { + "type": "discrete", + "values": [ + 0.01, + 1 + ] + }, + "mixup": { + "type": "discrete", + "values": [ + 1 + ] + }, + "momentum": { + "type": "discrete", + "values": [ + 0.6 + ] + }, + "mosaic": { + "type": "discrete", + "values": [ + 0 + ] + }, + "obj": { + "type": "discrete", + "values": [ + 0.2 + ] + }, + "obj_pw": { + "type": "discrete", + "values": [ + 0.5 + ] + }, + "optimizer": { + "type": "categorical", + "values": [ + "SGD", + "Adam", + "AdamW" + ] + }, + "perspective": { + "type": "discrete", + "values": [ + 0 + ] + }, + "scale": { + "type": "discrete", + "values": [ + 0 + ] + }, + "shear": { + "type": "discrete", + "values": [ + 0 + ] + }, + "translate": { + "type": "discrete", + "values": [ + 0 + ] + }, + "warmup_bias_lr": { + "type": "discrete", + "values": [ + 0, + 0.2 + ] + }, + "warmup_epochs": { + "type": "discrete", + "values": [ + 5 + ] + }, + "warmup_momentum": { + "type": "discrete", + "values": [ + 0, + 0.95 + ] + }, + "weight_decay": { + "type": "discrete", + "values": [ + 0, + 0.001 + ] + } + }, + "spec": { + "maxCombo": 0, + "metric": "metrics/mAP_0.5", + "objective": "maximize" + }, + "trials": 1 +} diff --git a/val.py b/val.py index 5427ee7b3619..665d92f9286d 100644 --- a/val.py +++ b/val.py @@ -259,7 +259,7 @@ def run( plot_images(im, targets, paths, save_dir / f'val_batch{batch_i}_labels.jpg', names) # labels plot_images(im, output_to_target(out), paths, save_dir / f'val_batch{batch_i}_pred.jpg', names) # pred - callbacks.run('on_val_batch_end') + callbacks.run('on_val_batch_end', batch_i, im, targets, paths, shapes, out) # Compute metrics stats = [torch.cat(x, 0).cpu().numpy() for x in zip(*stats)] # to numpy @@ -289,7 +289,7 @@ def run( # Plots if plots: confusion_matrix.plot(save_dir=save_dir, names=list(names.values())) - callbacks.run('on_val_end') + callbacks.run('on_val_end', nt, tp, fp, p, r, f1, ap, ap50, ap_class, confusion_matrix) # Save JSON if save_json and len(jdict): From 5f075eedf221852aab85b4d2b5d98289e13077b4 Mon Sep 17 00:00:00 2001 From: Dhruv Nair Date: Thu, 8 Sep 2022 11:17:14 -0400 Subject: [PATCH 1408/1976] Comet changes (#9328) * add link to comte tutorial from main README * fix prediction interval bug --- README.md | 1 + utils/loggers/comet/__init__.py | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 7763d174f92b..da8bf1dad862 100644 --- a/README.md +++ b/README.md @@ -157,6 +157,7 @@ python train.py --data coco.yaml --cfg yolov5n.yaml --weights '' --batch-size 12 - [Roboflow for Datasets, Labeling, and Active Learning](https://github.com/ultralytics/yolov5/issues/4975)  🌟 NEW - [ClearML Logging](https://github.com/ultralytics/yolov5/tree/master/utils/loggers/clearml) 🌟 NEW - [Deci Platform](https://github.com/ultralytics/yolov5/wiki/Deci-Platform) 🌟 NEW +- [Comet Logging](https://github.com/ultralytics/yolov5/tree/master/utils/loggers/comet) 🌟 NEW
diff --git a/utils/loggers/comet/__init__.py b/utils/loggers/comet/__init__.py index b168687dd7b2..4ee86dd70d6e 100644 --- a/utils/loggers/comet/__init__.py +++ b/utils/loggers/comet/__init__.py @@ -133,7 +133,7 @@ def __init__(self, opt, hyp, run_id=None, job_type="Training", **experiment_kwar self.comet_log_predictions = COMET_LOG_PREDICTIONS if self.opt.bbox_interval == -1: - self.comet_log_prediction_interval = self.opt.epochs // 10 if self.opt.epochs < 10 else 1 + self.comet_log_prediction_interval = 1 if self.opt.epochs < 10 else self.opt.epochs // 10 else: self.comet_log_prediction_interval = self.opt.bbox_interval From 3cd66b1c3863a8524c6cc564029c29ac783f7310 Mon Sep 17 00:00:00 2001 From: robinned <78896580+robinned@users.noreply.github.com> Date: Thu, 8 Sep 2022 12:00:54 -0700 Subject: [PATCH 1409/1976] Train.py line 486 typo fix (#9330) fixed issue Signed-off-by: robinned <78896580+robinned@users.noreply.github.com> Signed-off-by: robinned <78896580+robinned@users.noreply.github.com> Co-authored-by: Ayush Chaurasia --- train.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/train.py b/train.py index e16c17c499f0..4eff6e5d645a 100644 --- a/train.py +++ b/train.py @@ -483,7 +483,7 @@ def main(opt, callbacks=Callbacks()): check_requirements() # Resume (from specified or most recent last.pt) - if opt.resume and not check_wandb_resume(opt) and not check_comet_resume(opt) or opt.evolve: + if opt.resume and not check_wandb_resume(opt) and not check_comet_resume(opt) and not opt.evolve: last = Path(check_file(opt.resume) if isinstance(opt.resume, str) else get_latest_run()) opt_yaml = last.parent.parent / 'opt.yaml' # train options yaml opt_data = opt.data # original dataset From 8aa196ce08007aa1033b0e42931c247e1e491321 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E9=BB=84=E4=B8=8D=E7=9B=88?= <33193090+YellowAndGreen@users.noreply.github.com> Date: Sat, 10 Sep 2022 05:16:07 +0800 Subject: [PATCH 1410/1976] Add dilated conv support (#9347) * added dilate conv support * added dilate conv support * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update common.py * Update common.py Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Glenn Jocher --- models/common.py | 16 +++++++++------- utils/torch_utils.py | 1 + 2 files changed, 10 insertions(+), 7 deletions(-) diff --git a/models/common.py b/models/common.py index 7ac3a4a29672..c30c8ee94777 100644 --- a/models/common.py +++ b/models/common.py @@ -28,18 +28,20 @@ from utils.torch_utils import copy_attr, smart_inference_mode -def autopad(k, p=None): # kernel, padding - # Pad to 'same' +def autopad(k, p=None, d=1): # kernel, padding, dilation + # Pad to 'same' shape outputs + if d > 1: + k = d * (k - 1) + 1 if isinstance(k, int) else [d * (x - 1) + 1 for x in k] # actual kernel-size if p is None: p = k // 2 if isinstance(k, int) else [x // 2 for x in k] # auto-pad return p class Conv(nn.Module): - # Standard convolution - def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True): # ch_in, ch_out, kernel, stride, padding, groups + # Standard convolution with args(ch_in, ch_out, kernel, stride, padding, groups, dilation, activation) + def __init__(self, c1, c2, k=1, s=1, p=None, g=1, d=1, act=True): super().__init__() - self.conv = nn.Conv2d(c1, c2, k, s, autopad(k, p), groups=g, bias=False) + self.conv = nn.Conv2d(c1, c2, k, s, autopad(k, p, d), groups=g, dilation=d, bias=False) self.bn = nn.BatchNorm2d(c2) self.act = nn.SiLU() if act is True else (act if isinstance(act, nn.Module) else nn.Identity()) @@ -51,13 +53,13 @@ def forward_fuse(self, x): class DWConv(Conv): - # Depth-wise convolution class + # Depth-wise convolution def __init__(self, c1, c2, k=1, s=1, act=True): # ch_in, ch_out, kernel, stride, padding, groups super().__init__(c1, c2, k, s, g=math.gcd(c1, c2), act=act) class DWConvTranspose2d(nn.ConvTranspose2d): - # Depth-wise transpose convolution class + # Depth-wise transpose convolution def __init__(self, c1, c2, k=1, s=1, p1=0, p2=0): # ch_in, ch_out, kernel, stride, padding, padding_out super().__init__(c1, c2, k, s, p1, p2, groups=math.gcd(c1, c2)) diff --git a/utils/torch_utils.py b/utils/torch_utils.py index abf0bbc19a98..8a3366ca3e27 100644 --- a/utils/torch_utils.py +++ b/utils/torch_utils.py @@ -251,6 +251,7 @@ def fuse_conv_and_bn(conv, bn): kernel_size=conv.kernel_size, stride=conv.stride, padding=conv.padding, + dilation=conv.dilation, groups=conv.groups, bias=True).requires_grad_(False).to(conv.weight.device) From 24bf9cceb406a7e380bdb9e100417318615a78a1 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 10 Sep 2022 11:11:56 +0300 Subject: [PATCH 1411/1976] Update `check_requirements()` single install (#9353) * Update `check_requirements()` single install Faster install and better conflict resolution with single installation Signed-off-by: Glenn Jocher * Update * Update * Update Signed-off-by: Glenn Jocher --- export.py | 12 ++++++------ models/common.py | 4 ++-- utils/general.py | 48 +++++++++++++++++++++++------------------------- val.py | 4 ++-- 4 files changed, 33 insertions(+), 35 deletions(-) diff --git a/export.py b/export.py index 4d0144af9efb..8fed4d3e3661 100644 --- a/export.py +++ b/export.py @@ -126,7 +126,7 @@ def export_torchscript(model, im, file, optimize, prefix=colorstr('TorchScript:' @try_export def export_onnx(model, im, file, opset, train, dynamic, simplify, prefix=colorstr('ONNX:')): # YOLOv5 ONNX export - check_requirements(('onnx',)) + check_requirements('onnx') import onnx LOGGER.info(f'\n{prefix} starting export with onnx {onnx.__version__}...') @@ -182,7 +182,7 @@ def export_onnx(model, im, file, opset, train, dynamic, simplify, prefix=colorst @try_export def export_openvino(model, file, half, prefix=colorstr('OpenVINO:')): # YOLOv5 OpenVINO export - check_requirements(('openvino-dev',)) # requires openvino-dev: https://pypi.org/project/openvino-dev/ + check_requirements('openvino-dev') # requires openvino-dev: https://pypi.org/project/openvino-dev/ import openvino.inference_engine as ie LOGGER.info(f'\n{prefix} starting export with openvino {ie.__version__}...') @@ -198,7 +198,7 @@ def export_openvino(model, file, half, prefix=colorstr('OpenVINO:')): @try_export def export_coreml(model, im, file, int8, half, prefix=colorstr('CoreML:')): # YOLOv5 CoreML export - check_requirements(('coremltools',)) + check_requirements('coremltools') import coremltools as ct LOGGER.info(f'\n{prefix} starting export with coremltools {ct.__version__}...') @@ -226,7 +226,7 @@ def export_engine(model, im, file, half, dynamic, simplify, workspace=4, verbose import tensorrt as trt except Exception: if platform.system() == 'Linux': - check_requirements(('nvidia-tensorrt',), cmds=('-U --index-url https://pypi.ngc.nvidia.com',)) + check_requirements('nvidia-tensorrt', cmds=['-U --index-url https://pypi.ngc.nvidia.com']) import tensorrt as trt if trt.__version__[0] == '7': # TensorRT 7 handling https://github.com/ultralytics/yolov5/issues/6012 @@ -405,7 +405,7 @@ def export_edgetpu(file, prefix=colorstr('Edge TPU:')): @try_export def export_tfjs(file, prefix=colorstr('TensorFlow.js:')): # YOLOv5 TensorFlow.js export - check_requirements(('tensorflowjs',)) + check_requirements('tensorflowjs') import re import tensorflowjs as tfjs @@ -516,7 +516,7 @@ def run( # TensorFlow Exports if any((saved_model, pb, tflite, edgetpu, tfjs)): if int8 or edgetpu: # TFLite --int8 bug https://github.com/ultralytics/yolov5/issues/5707 - check_requirements(('flatbuffers==1.12',)) # required before `import tensorflow` + check_requirements('flatbuffers==1.12') # required before `import tensorflow` assert not tflite or not tfjs, 'TFLite and TF.js models must be exported separately, please pass only one type.' assert not isinstance(model, ClassificationModel), 'ClassificationModel export to TF formats not yet supported.' f[5], model = export_saved_model(model.cpu(), diff --git a/models/common.py b/models/common.py index c30c8ee94777..0e01b60e81e5 100644 --- a/models/common.py +++ b/models/common.py @@ -347,7 +347,7 @@ def __init__(self, weights='yolov5s.pt', device=torch.device('cpu'), dnn=False, stride, names = int(d['stride']), d['names'] elif dnn: # ONNX OpenCV DNN LOGGER.info(f'Loading {w} for ONNX OpenCV DNN inference...') - check_requirements(('opencv-python>=4.5.4',)) + check_requirements('opencv-python>=4.5.4') net = cv2.dnn.readNetFromONNX(w) elif onnx: # ONNX Runtime LOGGER.info(f'Loading {w} for ONNX Runtime inference...') @@ -362,7 +362,7 @@ def __init__(self, weights='yolov5s.pt', device=torch.device('cpu'), dnn=False, stride, names = int(meta['stride']), eval(meta['names']) elif xml: # OpenVINO LOGGER.info(f'Loading {w} for OpenVINO inference...') - check_requirements(('openvino',)) # requires openvino-dev: https://pypi.org/project/openvino-dev/ + check_requirements('openvino') # requires openvino-dev: https://pypi.org/project/openvino-dev/ from openvino.runtime import Core, Layout, get_batch ie = Core() if not Path(w).is_file(): # if not *.xml diff --git a/utils/general.py b/utils/general.py index cae63fd9dd21..629df32ebc54 100755 --- a/utils/general.py +++ b/utils/general.py @@ -342,39 +342,37 @@ def check_version(current='0.0.0', minimum='0.0.0', name='version ', pinned=Fals @TryExcept() def check_requirements(requirements=ROOT / 'requirements.txt', exclude=(), install=True, cmds=()): - # Check installed dependencies meet YOLOv5 requirements (pass *.txt file or list of packages) + # Check installed dependencies meet YOLOv5 requirements (pass *.txt file or list of packages or single package str) prefix = colorstr('red', 'bold', 'requirements:') check_python() # check python version - if isinstance(requirements, (str, Path)): # requirements.txt file - file = Path(requirements) + if isinstance(requirements, Path): # requirements.txt file + file = requirements assert file.exists(), f"{prefix} {file.resolve()} not found, check failed." with file.open() as f: requirements = [f'{x.name}{x.specifier}' for x in pkg.parse_requirements(f) if x.name not in exclude] - else: # list or tuple of packages - requirements = [x for x in requirements if x not in exclude] + elif isinstance(requirements, str): + requirements = [requirements] - n = 0 # number of packages updates - for i, r in enumerate(requirements): + s = '' + n = 0 + for r in requirements: try: pkg.require(r) - except Exception: # DistributionNotFound or VersionConflict if requirements not met - s = f"{prefix} {r} not found and is required by YOLOv5" - if install and AUTOINSTALL: # check environment variable - LOGGER.info(f"{s}, attempting auto-update...") - try: - assert check_online(), f"'pip install {r}' skipped (offline)" - LOGGER.info(check_output(f'pip install "{r}" {cmds[i] if cmds else ""}', shell=True).decode()) - n += 1 - except Exception as e: - LOGGER.warning(f'{prefix} {e}') - else: - LOGGER.info(f'{s}. Please install and rerun your command.') - - if n: # if packages updated - source = file.resolve() if 'file' in locals() else requirements - s = f"{prefix} {n} package{'s' * (n > 1)} updated per {source}\n" \ - f"{prefix} ⚠️ {colorstr('bold', 'Restart runtime or rerun command for updates to take effect')}\n" - LOGGER.info(s) + except (pkg.VersionConflict, pkg.DistributionNotFound): # exception if requirements not met + s += f'"{r}" ' + n += 1 + + if s and install and AUTOINSTALL: # check environment variable + LOGGER.info(f"{prefix} YOLOv5 requirements {s}not found, attempting AutoUpdate...") + try: + assert check_online(), "AutoUpdate skipped (offline)" + LOGGER.info(check_output(f'pip install {s} {" ".join(cmds) if cmds else ""}', shell=True).decode()) + source = file.resolve() if 'file' in locals() else requirements + s = f"{prefix} {n} package{'s' * (n > 1)} updated per {source}\n" \ + f"{prefix} ⚠️ {colorstr('bold', 'Restart runtime or rerun command for updates to take effect')}\n" + LOGGER.info(s) + except Exception as e: + LOGGER.warning(f'{prefix} {e}') def check_img_size(imgsz, s=32, floor=0): diff --git a/val.py b/val.py index 665d92f9286d..fed5e21577e5 100644 --- a/val.py +++ b/val.py @@ -301,7 +301,7 @@ def run( json.dump(jdict, f) try: # https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocoEvalDemo.ipynb - check_requirements(['pycocotools']) + check_requirements('pycocotools') from pycocotools.coco import COCO from pycocotools.cocoeval import COCOeval @@ -360,7 +360,7 @@ def parse_opt(): def main(opt): - check_requirements(requirements=ROOT / 'requirements.txt', exclude=('tensorboard', 'thop')) + check_requirements(exclude=('tensorboard', 'thop')) if opt.task in ('train', 'val', 'test'): # run normally if opt.conf_thres > 0.001: # https://github.com/ultralytics/yolov5/issues/1466 From e9ddc5b5274be1d795a28542159d7c9293efccea Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 10 Sep 2022 12:00:16 +0300 Subject: [PATCH 1412/1976] Update `check_requirements(args, cmds='')` (#9355) * Update `check_requirements(args, cmds='')` * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- export.py | 2 +- utils/general.py | 10 +++++----- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/export.py b/export.py index 8fed4d3e3661..cdf5dcddd07a 100644 --- a/export.py +++ b/export.py @@ -226,7 +226,7 @@ def export_engine(model, im, file, half, dynamic, simplify, workspace=4, verbose import tensorrt as trt except Exception: if platform.system() == 'Linux': - check_requirements('nvidia-tensorrt', cmds=['-U --index-url https://pypi.ngc.nvidia.com']) + check_requirements('nvidia-tensorrt', cmds='-U --index-url https://pypi.ngc.nvidia.com') import tensorrt as trt if trt.__version__[0] == '7': # TensorRT 7 handling https://github.com/ultralytics/yolov5/issues/6012 diff --git a/utils/general.py b/utils/general.py index 629df32ebc54..187d2c6b2d4a 100755 --- a/utils/general.py +++ b/utils/general.py @@ -341,13 +341,13 @@ def check_version(current='0.0.0', minimum='0.0.0', name='version ', pinned=Fals @TryExcept() -def check_requirements(requirements=ROOT / 'requirements.txt', exclude=(), install=True, cmds=()): +def check_requirements(requirements=ROOT / 'requirements.txt', exclude=(), install=True, cmds=''): # Check installed dependencies meet YOLOv5 requirements (pass *.txt file or list of packages or single package str) prefix = colorstr('red', 'bold', 'requirements:') check_python() # check python version if isinstance(requirements, Path): # requirements.txt file - file = requirements - assert file.exists(), f"{prefix} {file.resolve()} not found, check failed." + file = requirements.resolve() + assert file.exists(), f"{prefix} {file} not found, check failed." with file.open() as f: requirements = [f'{x.name}{x.specifier}' for x in pkg.parse_requirements(f) if x.name not in exclude] elif isinstance(requirements, str): @@ -366,8 +366,8 @@ def check_requirements(requirements=ROOT / 'requirements.txt', exclude=(), insta LOGGER.info(f"{prefix} YOLOv5 requirements {s}not found, attempting AutoUpdate...") try: assert check_online(), "AutoUpdate skipped (offline)" - LOGGER.info(check_output(f'pip install {s} {" ".join(cmds) if cmds else ""}', shell=True).decode()) - source = file.resolve() if 'file' in locals() else requirements + LOGGER.info(check_output(f'pip install {s} {cmds}', shell=True).decode()) + source = file if 'file' in locals() else requirements s = f"{prefix} {n} package{'s' * (n > 1)} updated per {source}\n" \ f"{prefix} ⚠️ {colorstr('bold', 'Restart runtime or rerun command for updates to take effect')}\n" LOGGER.info(s) From 57ef676af2358d70bd5902059531655789135510 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 10 Sep 2022 12:14:31 +0300 Subject: [PATCH 1413/1976] Update `check_requirements()` multiple string (#9356) Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- utils/general.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/general.py b/utils/general.py index 187d2c6b2d4a..33232efac9fd 100755 --- a/utils/general.py +++ b/utils/general.py @@ -363,7 +363,7 @@ def check_requirements(requirements=ROOT / 'requirements.txt', exclude=(), insta n += 1 if s and install and AUTOINSTALL: # check environment variable - LOGGER.info(f"{prefix} YOLOv5 requirements {s}not found, attempting AutoUpdate...") + LOGGER.info(f"{prefix} YOLOv5 requirement{'s' * (n > 1)} {s}not found, attempting AutoUpdate...") try: assert check_online(), "AutoUpdate skipped (offline)" LOGGER.info(check_output(f'pip install {s} {cmds}', shell=True).decode()) From e3e5122f82b0d1f24c11a90b2377fbb5a1673274 Mon Sep 17 00:00:00 2001 From: Katteria <39751846+kisaragychihaya@users.noreply.github.com> Date: Sat, 10 Sep 2022 17:20:46 +0800 Subject: [PATCH 1414/1976] Add PaddlePaddle export and inference (#9240) * Add PaddlePaddle Model Export Test on Yolov5 DockerEnviroment with paddlepaddle-gpu v2.2 Signed-off-by: Katteria <39751846+kisaragychihaya@users.noreply.github.com> * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Cleanup Paddle Export Signed-off-by: Glenn Jocher * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update common.py Signed-off-by: Glenn Jocher * Update export.py Signed-off-by: Glenn Jocher * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update export.py Signed-off-by: Glenn Jocher * Update export.py Signed-off-by: Glenn Jocher * Update export.py Signed-off-by: Glenn Jocher * Use PyTorch2Paddle Signed-off-by: Glenn Jocher * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Paddle no longer requires ONNX Signed-off-by: Glenn Jocher * Update export.py Signed-off-by: Glenn Jocher * Update export.py Signed-off-by: Glenn Jocher * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update benchmarks.py Signed-off-by: Glenn Jocher * Add inference code of PaddlePaddle Signed-off-by: Katteria <39751846+kisaragychihaya@users.noreply.github.com> * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update common.py Signed-off-by: Katteria <39751846+kisaragychihaya@users.noreply.github.com> * Update common.py Signed-off-by: Glenn Jocher * Add paddlepaddle-gpu install if cuda Signed-off-by: Glenn Jocher * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update common.py Signed-off-by: Glenn Jocher * Update common.py Signed-off-by: Glenn Jocher * Update common.py Signed-off-by: Glenn Jocher Signed-off-by: Katteria <39751846+kisaragychihaya@users.noreply.github.com> Signed-off-by: Glenn Jocher Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Glenn Jocher --- export.py | 72 +++++++++++++++++----------- models/common.py | 114 ++++++++++++++++++++++++++------------------ utils/benchmarks.py | 2 +- 3 files changed, 112 insertions(+), 76 deletions(-) diff --git a/export.py b/export.py index cdf5dcddd07a..262b11a1a268 100644 --- a/export.py +++ b/export.py @@ -15,6 +15,7 @@ TensorFlow Lite | `tflite` | yolov5s.tflite TensorFlow Edge TPU | `edgetpu` | yolov5s_edgetpu.tflite TensorFlow.js | `tfjs` | yolov5s_web_model/ +PaddlePaddle | `paddle` | yolov5s_paddle_model/ Requirements: $ pip install -r requirements.txt coremltools onnx onnx-simplifier onnxruntime openvino-dev tensorflow-cpu # CPU @@ -54,7 +55,6 @@ import pandas as pd import torch -import yaml from torch.utils.mobile_optimizer import optimize_for_mobile FILE = Path(__file__).resolve() @@ -68,7 +68,7 @@ from models.yolo import ClassificationModel, Detect from utils.dataloaders import LoadImages from utils.general import (LOGGER, Profile, check_dataset, check_img_size, check_requirements, check_version, - check_yaml, colorstr, file_size, get_default_args, print_args, url2file) + check_yaml, colorstr, file_size, get_default_args, print_args, url2file, yaml_save) from utils.torch_utils import select_device, smart_inference_mode @@ -85,7 +85,8 @@ def export_formats(): ['TensorFlow GraphDef', 'pb', '.pb', True, True], ['TensorFlow Lite', 'tflite', '.tflite', True, False], ['TensorFlow Edge TPU', 'edgetpu', '_edgetpu.tflite', False, False], - ['TensorFlow.js', 'tfjs', '_web_model', False, False],] + ['TensorFlow.js', 'tfjs', '_web_model', False, False], + ['PaddlePaddle', 'paddle', '_paddle_model', True, True],] return pd.DataFrame(x, columns=['Format', 'Argument', 'Suffix', 'CPU', 'GPU']) @@ -180,7 +181,7 @@ def export_onnx(model, im, file, opset, train, dynamic, simplify, prefix=colorst @try_export -def export_openvino(model, file, half, prefix=colorstr('OpenVINO:')): +def export_openvino(file, metadata, half, prefix=colorstr('OpenVINO:')): # YOLOv5 OpenVINO export check_requirements('openvino-dev') # requires openvino-dev: https://pypi.org/project/openvino-dev/ import openvino.inference_engine as ie @@ -189,9 +190,23 @@ def export_openvino(model, file, half, prefix=colorstr('OpenVINO:')): f = str(file).replace('.pt', f'_openvino_model{os.sep}') cmd = f"mo --input_model {file.with_suffix('.onnx')} --output_dir {f} --data_type {'FP16' if half else 'FP32'}" - subprocess.check_output(cmd.split()) # export - with open(Path(f) / file.with_suffix('.yaml').name, 'w') as g: - yaml.dump({'stride': int(max(model.stride)), 'names': model.names}, g) # add metadata.yaml + subprocess.run(cmd.split(), check=True, env=os.environ) # export + yaml_save(Path(f) / file.with_suffix('.yaml').name, metadata) # add metadata.yaml + return f, None + + +@try_export +def export_paddle(model, im, file, metadata, prefix=colorstr('PaddlePaddle:')): + # YOLOv5 Paddle export + check_requirements(('paddlepaddle', 'x2paddle')) + import x2paddle + from x2paddle.convert import pytorch2paddle + + LOGGER.info(f'\n{prefix} starting export with X2Paddle {x2paddle.__version__}...') + f = str(file).replace('.pt', f'_paddle_model{os.sep}') + + pytorch2paddle(module=model, save_dir=f, jit_type='trace', input_examples=[im]) # export + yaml_save(Path(f) / file.with_suffix('.yaml').name, metadata) # add metadata.yaml return f, None @@ -464,7 +479,7 @@ def run( fmts = tuple(export_formats()['Argument'][1:]) # --include arguments flags = [x in include for x in fmts] assert sum(flags) == len(include), f'ERROR: Invalid --include {include}, valid --include arguments are {fmts}' - jit, onnx, xml, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs = flags # export booleans + jit, onnx, xml, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs, paddle = flags # export booleans file = Path(url2file(weights) if str(weights).startswith(('http:/', 'https:/')) else weights) # PyTorch weights # Load PyTorch model @@ -497,47 +512,48 @@ def run( if half and not coreml: im, model = im.half(), model.half() # to FP16 shape = tuple((y[0] if isinstance(y, tuple) else y).shape) # model output shape + metadata = {'stride': int(max(model.stride)), 'names': model.names} # model metadata LOGGER.info(f"\n{colorstr('PyTorch:')} starting from {file} with output shape {shape} ({file_size(file):.1f} MB)") # Exports - f = [''] * 10 # exported filenames + f = [''] * len(fmts) # exported filenames warnings.filterwarnings(action='ignore', category=torch.jit.TracerWarning) # suppress TracerWarning - if jit: + if jit: # TorchScript f[0], _ = export_torchscript(model, im, file, optimize) if engine: # TensorRT required before ONNX f[1], _ = export_engine(model, im, file, half, dynamic, simplify, workspace, verbose) if onnx or xml: # OpenVINO requires ONNX f[2], _ = export_onnx(model, im, file, opset, train, dynamic, simplify) if xml: # OpenVINO - f[3], _ = export_openvino(model, file, half) - if coreml: + f[3], _ = export_openvino(file, metadata, half) + if coreml: # CoreML f[4], _ = export_coreml(model, im, file, int8, half) - - # TensorFlow Exports - if any((saved_model, pb, tflite, edgetpu, tfjs)): + if any((saved_model, pb, tflite, edgetpu, tfjs)): # TensorFlow formats if int8 or edgetpu: # TFLite --int8 bug https://github.com/ultralytics/yolov5/issues/5707 check_requirements('flatbuffers==1.12') # required before `import tensorflow` assert not tflite or not tfjs, 'TFLite and TF.js models must be exported separately, please pass only one type.' assert not isinstance(model, ClassificationModel), 'ClassificationModel export to TF formats not yet supported.' - f[5], model = export_saved_model(model.cpu(), - im, - file, - dynamic, - tf_nms=nms or agnostic_nms or tfjs, - agnostic_nms=agnostic_nms or tfjs, - topk_per_class=topk_per_class, - topk_all=topk_all, - iou_thres=iou_thres, - conf_thres=conf_thres, - keras=keras) + f[5], s_model = export_saved_model(model.cpu(), + im, + file, + dynamic, + tf_nms=nms or agnostic_nms or tfjs, + agnostic_nms=agnostic_nms or tfjs, + topk_per_class=topk_per_class, + topk_all=topk_all, + iou_thres=iou_thres, + conf_thres=conf_thres, + keras=keras) if pb or tfjs: # pb prerequisite to tfjs - f[6], _ = export_pb(model, file) + f[6], _ = export_pb(s_model, file) if tflite or edgetpu: - f[7], _ = export_tflite(model, im, file, int8 or edgetpu, data=data, nms=nms, agnostic_nms=agnostic_nms) + f[7], _ = export_tflite(s_model, im, file, int8 or edgetpu, data=data, nms=nms, agnostic_nms=agnostic_nms) if edgetpu: f[8], _ = export_edgetpu(file) if tfjs: f[9], _ = export_tfjs(file) + if paddle: # PaddlePaddle + f[10], _ = export_paddle(model, im, file, metadata) # Finish f = [str(x) for x in f if x] # filter out '' and None diff --git a/models/common.py b/models/common.py index 0e01b60e81e5..396b5de0b505 100644 --- a/models/common.py +++ b/models/common.py @@ -320,14 +320,16 @@ def __init__(self, weights='yolov5s.pt', device=torch.device('cpu'), dnn=False, # TensorFlow GraphDef: *.pb # TensorFlow Lite: *.tflite # TensorFlow Edge TPU: *_edgetpu.tflite + # PaddlePaddle: *_paddle_model from models.experimental import attempt_download, attempt_load # scoped to avoid circular import super().__init__() w = str(weights[0] if isinstance(weights, list) else weights) - pt, jit, onnx, xml, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs = self._model_type(w) # get backend + pt, jit, onnx, xml, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs, paddle = self._model_type(w) # type w = attempt_download(w) # download if not local fp16 &= pt or jit or onnx or engine # FP16 stride = 32 # default stride + cuda = torch.cuda.is_available() and device.type != 'cpu' # use CUDA if pt: # PyTorch model = attempt_load(weights if isinstance(weights, list) else w, device=device, inplace=True, fuse=fuse) @@ -351,7 +353,6 @@ def __init__(self, weights='yolov5s.pt', device=torch.device('cpu'), dnn=False, net = cv2.dnn.readNetFromONNX(w) elif onnx: # ONNX Runtime LOGGER.info(f'Loading {w} for ONNX Runtime inference...') - cuda = torch.cuda.is_available() and device.type != 'cpu' check_requirements(('onnx', 'onnxruntime-gpu' if cuda else 'onnxruntime')) import onnxruntime providers = ['CUDAExecutionProvider', 'CPUExecutionProvider'] if cuda else ['CPUExecutionProvider'] @@ -408,48 +409,60 @@ def __init__(self, weights='yolov5s.pt', device=torch.device('cpu'), dnn=False, LOGGER.info(f'Loading {w} for CoreML inference...') import coremltools as ct model = ct.models.MLModel(w) - else: # TensorFlow (SavedModel, GraphDef, Lite, Edge TPU) - if saved_model: # SavedModel - LOGGER.info(f'Loading {w} for TensorFlow SavedModel inference...') - import tensorflow as tf - keras = False # assume TF1 saved_model - model = tf.keras.models.load_model(w) if keras else tf.saved_model.load(w) - elif pb: # GraphDef https://www.tensorflow.org/guide/migrate#a_graphpb_or_graphpbtxt - LOGGER.info(f'Loading {w} for TensorFlow GraphDef inference...') + elif saved_model: # TF SavedModel + LOGGER.info(f'Loading {w} for TensorFlow SavedModel inference...') + import tensorflow as tf + keras = False # assume TF1 saved_model + model = tf.keras.models.load_model(w) if keras else tf.saved_model.load(w) + elif pb: # GraphDef https://www.tensorflow.org/guide/migrate#a_graphpb_or_graphpbtxt + LOGGER.info(f'Loading {w} for TensorFlow GraphDef inference...') + import tensorflow as tf + + def wrap_frozen_graph(gd, inputs, outputs): + x = tf.compat.v1.wrap_function(lambda: tf.compat.v1.import_graph_def(gd, name=""), []) # wrapped + ge = x.graph.as_graph_element + return x.prune(tf.nest.map_structure(ge, inputs), tf.nest.map_structure(ge, outputs)) + + gd = tf.Graph().as_graph_def() # TF GraphDef + with open(w, 'rb') as f: + gd.ParseFromString(f.read()) + frozen_func = wrap_frozen_graph(gd, inputs="x:0", outputs="Identity:0") + elif tflite or edgetpu: # https://www.tensorflow.org/lite/guide/python#install_tensorflow_lite_for_python + try: # https://coral.ai/docs/edgetpu/tflite-python/#update-existing-tf-lite-code-for-the-edge-tpu + from tflite_runtime.interpreter import Interpreter, load_delegate + except ImportError: import tensorflow as tf - - def wrap_frozen_graph(gd, inputs, outputs): - x = tf.compat.v1.wrap_function(lambda: tf.compat.v1.import_graph_def(gd, name=""), []) # wrapped - ge = x.graph.as_graph_element - return x.prune(tf.nest.map_structure(ge, inputs), tf.nest.map_structure(ge, outputs)) - - gd = tf.Graph().as_graph_def() # graph_def - with open(w, 'rb') as f: - gd.ParseFromString(f.read()) - frozen_func = wrap_frozen_graph(gd, inputs="x:0", outputs="Identity:0") - elif tflite or edgetpu: # https://www.tensorflow.org/lite/guide/python#install_tensorflow_lite_for_python - try: # https://coral.ai/docs/edgetpu/tflite-python/#update-existing-tf-lite-code-for-the-edge-tpu - from tflite_runtime.interpreter import Interpreter, load_delegate - except ImportError: - import tensorflow as tf - Interpreter, load_delegate = tf.lite.Interpreter, tf.lite.experimental.load_delegate, - if edgetpu: # Edge TPU https://coral.ai/software/#edgetpu-runtime - LOGGER.info(f'Loading {w} for TensorFlow Lite Edge TPU inference...') - delegate = { - 'Linux': 'libedgetpu.so.1', - 'Darwin': 'libedgetpu.1.dylib', - 'Windows': 'edgetpu.dll'}[platform.system()] - interpreter = Interpreter(model_path=w, experimental_delegates=[load_delegate(delegate)]) - else: # Lite - LOGGER.info(f'Loading {w} for TensorFlow Lite inference...') - interpreter = Interpreter(model_path=w) # load TFLite model - interpreter.allocate_tensors() # allocate - input_details = interpreter.get_input_details() # inputs - output_details = interpreter.get_output_details() # outputs - elif tfjs: - raise NotImplementedError('ERROR: YOLOv5 TF.js inference is not supported') - else: - raise NotImplementedError(f'ERROR: {w} is not a supported format') + Interpreter, load_delegate = tf.lite.Interpreter, tf.lite.experimental.load_delegate, + if edgetpu: # TF Edge TPU https://coral.ai/software/#edgetpu-runtime + LOGGER.info(f'Loading {w} for TensorFlow Lite Edge TPU inference...') + delegate = { + 'Linux': 'libedgetpu.so.1', + 'Darwin': 'libedgetpu.1.dylib', + 'Windows': 'edgetpu.dll'}[platform.system()] + interpreter = Interpreter(model_path=w, experimental_delegates=[load_delegate(delegate)]) + else: # TFLite + LOGGER.info(f'Loading {w} for TensorFlow Lite inference...') + interpreter = Interpreter(model_path=w) # load TFLite model + interpreter.allocate_tensors() # allocate + input_details = interpreter.get_input_details() # inputs + output_details = interpreter.get_output_details() # outputs + elif tfjs: # TF.js + raise NotImplementedError('ERROR: YOLOv5 TF.js inference is not supported') + elif paddle: # PaddlePaddle + LOGGER.info(f'Loading {w} for PaddlePaddle inference...') + check_requirements('paddlepaddle-gpu' if cuda else 'paddlepaddle') + import paddle.inference as pdi + if not Path(w).is_file(): # if not *.pdmodel + w = next(Path(w).rglob('*.pdmodel')) # get *.xml file from *_openvino_model dir + weights = Path(w).with_suffix('.pdiparams') + config = pdi.Config(str(w), str(weights)) + if cuda: + config.enable_use_gpu(memory_pool_init_size_mb=2048, device_id=0) + predictor = pdi.create_predictor(config) + input_names = predictor.get_input_names() + input_handle = predictor.get_input_handle(input_names[0]) + else: + raise NotImplementedError(f'ERROR: {w} is not a supported format') # class names if 'names' not in locals(): @@ -502,6 +515,13 @@ def forward(self, im, augment=False, visualize=False): else: k = 'var_' + str(sorted(int(k.replace('var_', '')) for k in y)[-1]) # output key y = y[k] # output + elif self.paddle: # PaddlePaddle + im = im.cpu().numpy().astype("float32") + self.input_handle.copy_from_cpu(im) + self.predictor.run() + output_names = self.predictor.get_output_names() + output_handle = self.predictor.get_output_handle(output_names[0]) + y = output_handle.copy_to_cpu() else: # TensorFlow (SavedModel, GraphDef, Lite, Edge TPU) im = im.permute(0, 2, 3, 1).cpu().numpy() # torch BCHW to numpy BHWC shape(1,320,192,3) if self.saved_model: # SavedModel @@ -542,13 +562,13 @@ def warmup(self, imgsz=(1, 3, 640, 640)): def _model_type(p='path/to/model.pt'): # Return model type from model path, i.e. path='path/to/model.onnx' -> type=onnx from export import export_formats - suffixes = list(export_formats().Suffix) + ['.xml'] # export suffixes - check_suffix(p, suffixes) # checks + sf = list(export_formats().Suffix) + ['.xml'] # export suffixes + check_suffix(p, sf) # checks p = Path(p).name # eliminate trailing separators - pt, jit, onnx, xml, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs, xml2 = (s in p for s in suffixes) + pt, jit, onnx, xml, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs, paddle, xml2 = (s in p for s in sf) xml |= xml2 # *_openvino_model or *.xml tflite &= not edgetpu # *.tflite - return pt, jit, onnx, xml, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs + return pt, jit, onnx, xml, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs, paddle @staticmethod def _load_metadata(f=Path('path/to/meta.yaml')): diff --git a/utils/benchmarks.py b/utils/benchmarks.py index d5f4c1d61fbe..9d5c7f2965d5 100644 --- a/utils/benchmarks.py +++ b/utils/benchmarks.py @@ -61,7 +61,7 @@ def run( device = select_device(device) for i, (name, f, suffix, cpu, gpu) in export.export_formats().iterrows(): # index, (name, file, suffix, CPU, GPU) try: - assert i not in (9, 10), 'inference not supported' # Edge TPU and TF.js are unsupported + assert i not in (9, 10, 11), 'inference not supported' # Edge TPU, TF.js and Paddle are unsupported assert i != 5 or platform.system() == 'Darwin', 'inference only supported on macOS>=10.13' # CoreML if 'cpu' in device.type: assert cpu, 'inference not supported on CPU' From 4e8504abd9c1a7287dfcf9f96dfa04f061086cca Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 10 Sep 2022 13:25:01 +0300 Subject: [PATCH 1415/1976] PaddlePaddle Usage examples (#9358) --- classify/predict.py | 1 + classify/val.py | 1 + detect.py | 1 + export.py | 1 + models/common.py | 2 +- val.py | 1 + 6 files changed, 6 insertions(+), 1 deletion(-) diff --git a/classify/predict.py b/classify/predict.py index 701b5b1ac92d..878cf48b6fef 100644 --- a/classify/predict.py +++ b/classify/predict.py @@ -22,6 +22,7 @@ yolov5s-cls.pb # TensorFlow GraphDef yolov5s-cls.tflite # TensorFlow Lite yolov5s-cls_edgetpu.tflite # TensorFlow Edge TPU + yolov5s-cls_paddle_model # PaddlePaddle """ import argparse diff --git a/classify/val.py b/classify/val.py index bf808bc21a84..3c16ec8092d8 100644 --- a/classify/val.py +++ b/classify/val.py @@ -17,6 +17,7 @@ yolov5s-cls.pb # TensorFlow GraphDef yolov5s-cls.tflite # TensorFlow Lite yolov5s-cls_edgetpu.tflite # TensorFlow Edge TPU + yolov5s-cls_paddle_model # PaddlePaddle """ import argparse diff --git a/detect.py b/detect.py index 69a1bf13aac6..a69606a3dff9 100644 --- a/detect.py +++ b/detect.py @@ -22,6 +22,7 @@ yolov5s.pb # TensorFlow GraphDef yolov5s.tflite # TensorFlow Lite yolov5s_edgetpu.tflite # TensorFlow Edge TPU + yolov5s_paddle_model # PaddlePaddle """ import argparse diff --git a/export.py b/export.py index 262b11a1a268..9d33024a9ca4 100644 --- a/export.py +++ b/export.py @@ -35,6 +35,7 @@ yolov5s.pb # TensorFlow GraphDef yolov5s.tflite # TensorFlow Lite yolov5s_edgetpu.tflite # TensorFlow Edge TPU + yolov5s_paddle_model # PaddlePaddle TensorFlow.js: $ cd .. && git clone https://github.com/zldrobit/tfjs-yolov5-example.git && cd tfjs-yolov5-example diff --git a/models/common.py b/models/common.py index 396b5de0b505..c601aacc885c 100644 --- a/models/common.py +++ b/models/common.py @@ -312,7 +312,7 @@ def __init__(self, weights='yolov5s.pt', device=torch.device('cpu'), dnn=False, # PyTorch: weights = *.pt # TorchScript: *.torchscript # ONNX Runtime: *.onnx - # ONNX OpenCV DNN: *.onnx with --dnn + # ONNX OpenCV DNN: *.onnx --dnn # OpenVINO: *.xml # CoreML: *.mlmodel # TensorRT: *.engine diff --git a/val.py b/val.py index fed5e21577e5..4b0bdddae3b1 100644 --- a/val.py +++ b/val.py @@ -16,6 +16,7 @@ yolov5s.pb # TensorFlow GraphDef yolov5s.tflite # TensorFlow Lite yolov5s_edgetpu.tflite # TensorFlow Edge TPU + yolov5s_paddle_model # PaddlePaddle """ import argparse From 2b5c9a83ec4953c68159a924b338a646554a4490 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 10 Sep 2022 22:24:46 +0300 Subject: [PATCH 1416/1976] labels.jpg names fix (#9361) Partially resolves https://github.com/ultralytics/yolov5/issues/9360 Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- utils/plots.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/plots.py b/utils/plots.py index 0f322b6b5844..0530d0abdf48 100644 --- a/utils/plots.py +++ b/utils/plots.py @@ -364,7 +364,7 @@ def plot_labels(labels, names=(), save_dir=Path('')): ax[0].set_ylabel('instances') if 0 < len(names) < 30: ax[0].set_xticks(range(len(names))) - ax[0].set_xticklabels(names, rotation=90, fontsize=10) + ax[0].set_xticklabels(list(names.values()), rotation=90, fontsize=10) else: ax[0].set_xlabel('classes') sn.histplot(x, x='x', y='y', ax=ax[2], bins=50, pmax=0.9) From cafdd189397992cf93ec0ad6b76929c60ff09a17 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 10 Sep 2022 22:58:24 +0300 Subject: [PATCH 1417/1976] Exclude `ipython` from hubconf.py `check_requirements()` (#9362) Exclude ipython from hubconf.py `check_requirements()` Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- hubconf.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hubconf.py b/hubconf.py index bffe2d588b4f..2f05565629a5 100644 --- a/hubconf.py +++ b/hubconf.py @@ -37,7 +37,7 @@ def _create(name, pretrained=True, channels=3, classes=80, autoshape=True, verbo if not verbose: LOGGER.setLevel(logging.WARNING) - check_requirements(exclude=('tensorboard', 'thop', 'opencv-python')) + check_requirements(exclude=('ipython', 'opencv-python', 'tensorboard', 'thop')) name = Path(name) path = name.with_suffix('.pt') if name.suffix == '' and not name.is_dir() else name # checkpoint path try: From 23d0456b08cac22f783d63292cc7c2bf87a19a60 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 10 Sep 2022 23:55:18 +0300 Subject: [PATCH 1418/1976] `torch.jit.trace()` fix (#9363) * Update common.py Signed-off-by: Glenn Jocher * Update ci-testing.yml Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- .github/workflows/ci-testing.yml | 3 +++ models/common.py | 1 + 2 files changed, 4 insertions(+) diff --git a/.github/workflows/ci-testing.yml b/.github/workflows/ci-testing.yml index 6fb277676959..a83f997cbfc2 100644 --- a/.github/workflows/ci-testing.yml +++ b/.github/workflows/ci-testing.yml @@ -119,9 +119,12 @@ jobs: python export.py --weights $m.pt --img 64 --include torchscript # export python - < Date: Sun, 11 Sep 2022 13:56:51 +0300 Subject: [PATCH 1419/1976] AMP Check fix (#9367) Resolves https://github.com/ultralytics/yolov5/issues/9365 Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- utils/general.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/utils/general.py b/utils/general.py index 33232efac9fd..f5fb2c93a3d5 100755 --- a/utils/general.py +++ b/utils/general.py @@ -17,6 +17,7 @@ import sys import time import urllib +from copy import deepcopy from datetime import datetime from itertools import repeat from multiprocessing.pool import ThreadPool @@ -535,7 +536,7 @@ def amp_allclose(model, im): f = ROOT / 'data' / 'images' / 'bus.jpg' # image to check im = f if f.exists() else 'https://ultralytics.com/images/bus.jpg' if check_online() else np.ones((640, 640, 3)) try: - assert amp_allclose(model, im) or amp_allclose(DetectMultiBackend('yolov5n.pt', device), im) + assert amp_allclose(deepcopy(model), im) or amp_allclose(DetectMultiBackend('yolov5n.pt', device), im) LOGGER.info(f'{prefix}checks passed ✅') return True except Exception: From a4ed9888938a090631ca4dba5be6363f8b66575c Mon Sep 17 00:00:00 2001 From: Jiacong Fang Date: Wed, 14 Sep 2022 05:50:23 +0800 Subject: [PATCH 1420/1976] Remove duplicate line in setup.cfg (#9380) --- setup.cfg | 1 - 1 file changed, 1 deletion(-) diff --git a/setup.cfg b/setup.cfg index 020a75740e97..f12995da3e8e 100644 --- a/setup.cfg +++ b/setup.cfg @@ -34,7 +34,6 @@ ignore = F401 # module imported but unused W504 # line break after binary operator E127 # continuation line over-indented for visual indent - W504 # line break after binary operator E231 # missing whitespace after ‘,’, ‘;’, or ‘:’ E501 # line too long F403 # ‘from module import *’ used; unable to detect undefined names From 1323b4805319ca18e4ffd8f93f3e855b87093ad4 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 15 Sep 2022 19:05:10 +0200 Subject: [PATCH 1421/1976] Remove `.train()` mode exports (#9429) * Remove `.train()` mode exports No common use cases. Signed-off-by: Glenn Jocher * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Signed-off-by: Glenn Jocher Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- export.py | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/export.py b/export.py index 9d33024a9ca4..1b25f3f8221b 100644 --- a/export.py +++ b/export.py @@ -126,7 +126,7 @@ def export_torchscript(model, im, file, optimize, prefix=colorstr('TorchScript:' @try_export -def export_onnx(model, im, file, opset, train, dynamic, simplify, prefix=colorstr('ONNX:')): +def export_onnx(model, im, file, opset, dynamic, simplify, prefix=colorstr('ONNX:')): # YOLOv5 ONNX export check_requirements('onnx') import onnx @@ -140,8 +140,7 @@ def export_onnx(model, im, file, opset, train, dynamic, simplify, prefix=colorst f, verbose=False, opset_version=opset, - training=torch.onnx.TrainingMode.TRAINING if train else torch.onnx.TrainingMode.EVAL, - do_constant_folding=not train, + do_constant_folding=True, input_names=['images'], output_names=['output'], dynamic_axes={ @@ -459,7 +458,6 @@ def run( include=('torchscript', 'onnx'), # include formats half=False, # FP16 half-precision export inplace=False, # set YOLOv5 Detect() inplace=True - train=False, # model.train() mode keras=False, # use Keras optimize=False, # TorchScript: optimize for mobile int8=False, # CoreML/TF INT8 quantization @@ -501,7 +499,7 @@ def run( im = torch.zeros(batch_size, 3, *imgsz).to(device) # image size(1,3,320,192) BCHW iDetection # Update model - model.train() if train else model.eval() # training mode = no Detect() layer grid construction + model.eval() for k, m in model.named_modules(): if isinstance(m, Detect): m.inplace = inplace @@ -524,7 +522,7 @@ def run( if engine: # TensorRT required before ONNX f[1], _ = export_engine(model, im, file, half, dynamic, simplify, workspace, verbose) if onnx or xml: # OpenVINO requires ONNX - f[2], _ = export_onnx(model, im, file, opset, train, dynamic, simplify) + f[2], _ = export_onnx(model, im, file, opset, dynamic, simplify) if xml: # OpenVINO f[3], _ = export_openvino(file, metadata, half) if coreml: # CoreML @@ -578,7 +576,6 @@ def parse_opt(): parser.add_argument('--device', default='cpu', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') parser.add_argument('--half', action='store_true', help='FP16 half-precision export') parser.add_argument('--inplace', action='store_true', help='set YOLOv5 Detect() inplace=True') - parser.add_argument('--train', action='store_true', help='model.train() mode') parser.add_argument('--keras', action='store_true', help='TF: use Keras') parser.add_argument('--optimize', action='store_true', help='TorchScript: optimize for mobile') parser.add_argument('--int8', action='store_true', help='CoreML/TF INT8 quantization') From 36cb05b7b211d4c5d99586dd49d3195de16e4485 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 15 Sep 2022 23:28:33 +0200 Subject: [PATCH 1422/1976] Continue on Docker arm64 failure (#9430) Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- .github/workflows/docker.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index c89d0ada3219..67ef565474a4 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -30,6 +30,7 @@ jobs: - name: Build and push arm64 image uses: docker/build-push-action@v3 + continue-on-error: true with: context: . platforms: linux/arm64 From 65afaa78beaa3d68d457e9c49109dc6327003962 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 15 Sep 2022 23:53:36 +0200 Subject: [PATCH 1423/1976] Continue on Docker failure (all backends) (#9432) Continue on Docker failure (all) Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- .github/workflows/docker.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index 67ef565474a4..f9eec3bd839e 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -12,6 +12,7 @@ jobs: if: github.repository == 'ultralytics/yolov5' name: Push Docker image to Docker Hub runs-on: ubuntu-latest + continue-on-error: true steps: - name: Checkout repo uses: actions/checkout@v3 @@ -30,7 +31,6 @@ jobs: - name: Build and push arm64 image uses: docker/build-push-action@v3 - continue-on-error: true with: context: . platforms: linux/arm64 From abea53ea5b7d4eba6b58535d31e17336912d0d1f Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 16 Sep 2022 00:10:10 +0200 Subject: [PATCH 1424/1976] Continue on Docker fail (all backends) fix (#9433) Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- .github/workflows/docker.yml | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index f9eec3bd839e..1d0bd30b22cb 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -12,7 +12,6 @@ jobs: if: github.repository == 'ultralytics/yolov5' name: Push Docker image to Docker Hub runs-on: ubuntu-latest - continue-on-error: true steps: - name: Checkout repo uses: actions/checkout@v3 @@ -31,6 +30,7 @@ jobs: - name: Build and push arm64 image uses: docker/build-push-action@v3 + continue-on-error: true with: context: . platforms: linux/arm64 @@ -40,6 +40,7 @@ jobs: - name: Build and push CPU image uses: docker/build-push-action@v3 + continue-on-error: true with: context: . file: utils/docker/Dockerfile-cpu @@ -48,6 +49,7 @@ jobs: - name: Build and push GPU image uses: docker/build-push-action@v3 + continue-on-error: true with: context: . file: utils/docker/Dockerfile From f9869f7ffdbce757f260d28a6b799c5fa50263ee Mon Sep 17 00:00:00 2001 From: Ayush Chaurasia Date: Fri, 16 Sep 2022 03:42:46 +0530 Subject: [PATCH 1425/1976] YOLOv5 segmentation model support (#9052) * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fix duplicate plots.py * Fix check_font() * # torch.use_deterministic_algorithms(True) * update doc detect->predict * Resolve precommit for segment/train and segment/val * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Resolve precommit for utils/segment * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Resolve precommit min_wh * Resolve precommit utils/segment/plots * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Resolve precommit utils/segment/general * Align NMS-seg closer to NMS * restore deterministic init_seeds code * remove easydict dependency * update * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * restore output_to_target mask * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * update * cleanup * Remove unused ImageFont import * Unified NMS * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * DetectMultiBackend compatibility * segment/predict.py update * update plot colors * fix bbox shifted * sort bbox by confidence * enable overlap by default * Merge detect/segment output_to_target() function * Start segmentation CI * fix plots * Update ci-testing.yml * fix training whitespace * optimize process mask functions (can we merge both?) * Update predict/detect * Update plot_images * Update plot_images_and_masks * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix * Add train to CI * fix precommit * fix precommit CI * fix precommit pycocotools * fix val float issues * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix masks float float issues * suppress errors * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix no-predictions plotting bug * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Add CSV Logger * fix val len(plot_masks) * speed up evaluation * fix process_mask * fix plots * update segment/utils build_targets * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * optimize utils/segment/general crop() * optimize utils/segment/general crop() 2 * minor updates * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * torch.where revert * downsample only if different shape * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * loss cleanup * loss cleanup 2 * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * loss cleanup 3 * update project names * Rename -seg yamls from _underscore to -dash * prepare for yolov5n-seg.pt * precommit space fix * add coco128-seg.yaml * update coco128-seg comments * cleanup val.py * Major val.py cleanup * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * precommit fix * precommit fix * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * optional pycocotools * remove CI pip install pycocotools (auto-installed now) * seg yaml fix * optimize mask_iou() and masks_iou() * threaded fix * Major train.py update * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Major segments/val/process_batch() update * yolov5/val updates from segment * process_batch numpy/tensor fix * opt-in to pycocotools with --save-json * threaded pycocotools ops for 2x speed increase * Avoid permute contiguous if possible * Add max_det=300 argument to both val.py and segment/val.py * fix onnx_dynamic * speed up pycocotools ops * faster process_mask(upsample=True) for predict * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * eliminate permutations for process_mask(upsample=True) * eliminate permute-contiguous in crop(), use native dimension order * cleanup comment * Add Proto() module * fix class count * fix anchor order * broadcast mask_gti in loss for speed * Cleanup seg loss * faster indexing * faster indexing fix * faster indexing fix2 * revert faster indexing * fix validation plotting * Loss cleanup and mxyxy simplification * Loss cleanup and mxyxy simplification 2 * revert validation plotting * replace missing tanh * Eliminate last permutation * delete unneeded .float() * Remove MaskIOULoss and crop(if HWC) * Final v6.3 SegmentationModel architecture updates * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Add support for TF export * remove debugger trace * add call * update * update * Merge master * Merge master * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update dataloaders.py * Restore CI * Update dataloaders.py * Fix TF/TFLite export for segmentation model * Merge master * Cleanup predict.py mask plotting * cleanup scale_masks() * rename scale_masks to scale_image * cleanup/optimize plot_masks * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Add Annotator.masks() * Annotator.masks() fix * Update plots.py * Annotator mask optimization * Rename crop() to crop_mask() * Do not crop in predict.py * crop always * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Merge master * Add vid-stride from master PR * Update seg model outputs * Update seg model outputs * Add segmentation benchmarks * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Add segmentation benchmarks * Add segmentation benchmarks * Add segmentation benchmarks * Fix DetectMultiBackend for OpenVINO * update Annotator.masks * fix val plot * revert val plot * clean up * revert pil * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fix CI error * fix predict log * remove upsample * update interpolate * fix validation plot logging * Annotator.masks() cleanup * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Remove segmentation_model definition * Restore 0.99999 decimals Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Glenn Jocher Co-authored-by: Laughing-q <1185102784@qq.com> Co-authored-by: Jiacong Fang --- .github/workflows/ci-testing.yml | 22 +- utils/benchmarks.py => benchmarks.py | 20 +- data/coco128-seg.yaml | 101 ++++ detect.py | 4 +- models/common.py | 18 +- models/segment/yolov5l-seg.yaml | 48 ++ models/segment/yolov5m-seg.yaml | 48 ++ models/segment/yolov5n-seg.yaml | 48 ++ models/segment/yolov5s-seg.yaml | 48 ++ models/segment/yolov5x-seg.yaml | 48 ++ models/tf.py | 36 +- models/yolo.py | 58 ++- segment/predict.py | 266 +++++++++++ segment/train.py | 676 +++++++++++++++++++++++++++ segment/val.py | 471 +++++++++++++++++++ utils/dataloaders.py | 1 + utils/general.py | 45 +- utils/metrics.py | 10 +- utils/plots.py | 71 ++- utils/segment/__init__.py | 0 utils/segment/augmentations.py | 104 +++++ utils/segment/dataloaders.py | 330 +++++++++++++ utils/segment/general.py | 120 +++++ utils/segment/loss.py | 186 ++++++++ utils/segment/metrics.py | 210 +++++++++ utils/segment/plots.py | 143 ++++++ val.py | 30 +- 27 files changed, 3091 insertions(+), 71 deletions(-) rename utils/benchmarks.py => benchmarks.py (87%) create mode 100644 data/coco128-seg.yaml create mode 100644 models/segment/yolov5l-seg.yaml create mode 100644 models/segment/yolov5m-seg.yaml create mode 100644 models/segment/yolov5n-seg.yaml create mode 100644 models/segment/yolov5s-seg.yaml create mode 100644 models/segment/yolov5x-seg.yaml create mode 100644 segment/predict.py create mode 100644 segment/train.py create mode 100644 segment/val.py mode change 100755 => 100644 utils/dataloaders.py mode change 100755 => 100644 utils/general.py create mode 100644 utils/segment/__init__.py create mode 100644 utils/segment/augmentations.py create mode 100644 utils/segment/dataloaders.py create mode 100644 utils/segment/general.py create mode 100644 utils/segment/loss.py create mode 100644 utils/segment/metrics.py create mode 100644 utils/segment/plots.py diff --git a/.github/workflows/ci-testing.yml b/.github/workflows/ci-testing.yml index a83f997cbfc2..537ba96e7225 100644 --- a/.github/workflows/ci-testing.yml +++ b/.github/workflows/ci-testing.yml @@ -15,6 +15,7 @@ jobs: Benchmarks: runs-on: ${{ matrix.os }} strategy: + fail-fast: false matrix: os: [ ubuntu-latest ] python-version: [ '3.9' ] # requires python<=3.9 @@ -37,9 +38,12 @@ jobs: python --version pip --version pip list - - name: Run benchmarks + - name: Benchmark DetectionModel + run: | + python benchmarks.py --data coco128.yaml --weights ${{ matrix.model }}.pt --img 320 --hard-fail 0.29 + - name: Benchmark SegmentationModel run: | - python utils/benchmarks.py --weights ${{ matrix.model }}.pt --img 320 --hard-fail 0.29 + python benchmarks.py --data coco128-seg.yaml --weights ${{ matrix.model }}-seg.pt --img 320 Tests: timeout-minutes: 60 @@ -126,6 +130,20 @@ jobs: model(im) # warmup, build grids for trace torch.jit.trace(model, [im]) EOF + - name: Test segmentation + shell: bash # for Windows compatibility + run: | + m=${{ matrix.model }}-seg # official weights + b=runs/train-seg/exp/weights/best # best.pt checkpoint + python segment/train.py --imgsz 64 --batch 32 --weights $m.pt --cfg $m.yaml --epochs 1 --device cpu # train + python segment/train.py --imgsz 64 --batch 32 --weights '' --cfg $m.yaml --epochs 1 --device cpu # train + for d in cpu; do # devices + for w in $m $b; do # weights + python segment/val.py --imgsz 64 --batch 32 --weights $w.pt --device $d # val + python segment/predict.py --imgsz 64 --weights $w.pt --device $d # predict + python export.py --weights $w.pt --img 64 --include torchscript --device $d # export + done + done - name: Test classification shell: bash # for Windows compatibility run: | diff --git a/utils/benchmarks.py b/benchmarks.py similarity index 87% rename from utils/benchmarks.py rename to benchmarks.py index 9d5c7f2965d5..58e083c95d55 100644 --- a/utils/benchmarks.py +++ b/benchmarks.py @@ -34,16 +34,19 @@ import pandas as pd FILE = Path(__file__).resolve() -ROOT = FILE.parents[1] # YOLOv5 root directory +ROOT = FILE.parents[0] # YOLOv5 root directory if str(ROOT) not in sys.path: sys.path.append(str(ROOT)) # add ROOT to PATH # ROOT = ROOT.relative_to(Path.cwd()) # relative import export -import val +from models.experimental import attempt_load +from models.yolo import SegmentationModel +from segment.val import run as val_seg from utils import notebook_init from utils.general import LOGGER, check_yaml, file_size, print_args from utils.torch_utils import select_device +from val import run as val_det def run( @@ -59,6 +62,7 @@ def run( ): y, t = [], time.time() device = select_device(device) + model_type = type(attempt_load(weights, fuse=False)) # DetectionModel, SegmentationModel, etc. for i, (name, f, suffix, cpu, gpu) in export.export_formats().iterrows(): # index, (name, file, suffix, CPU, GPU) try: assert i not in (9, 10, 11), 'inference not supported' # Edge TPU, TF.js and Paddle are unsupported @@ -76,10 +80,14 @@ def run( assert suffix in str(w), 'export failed' # Validate - result = val.run(data, w, batch_size, imgsz, plots=False, device=device, task='benchmark', half=half) - metrics = result[0] # metrics (mp, mr, map50, map, *losses(box, obj, cls)) - speeds = result[2] # times (preprocess, inference, postprocess) - y.append([name, round(file_size(w), 1), round(metrics[3], 4), round(speeds[1], 2)]) # MB, mAP, t_inference + if model_type == SegmentationModel: + result = val_seg(data, w, batch_size, imgsz, plots=False, device=device, task='benchmark', half=half) + metric = result[0][7] # (box(p, r, map50, map), mask(p, r, map50, map), *loss(box, obj, cls)) + else: # DetectionModel: + result = val_det(data, w, batch_size, imgsz, plots=False, device=device, task='benchmark', half=half) + metric = result[0][3] # (p, r, map50, map, *loss(box, obj, cls)) + speed = result[2][1] # times (preprocess, inference, postprocess) + y.append([name, round(file_size(w), 1), round(metric, 4), round(speed, 2)]) # MB, mAP, t_inference except Exception as e: if hard_fail: assert type(e) is AssertionError, f'Benchmark --hard-fail for {name}: {e}' diff --git a/data/coco128-seg.yaml b/data/coco128-seg.yaml new file mode 100644 index 000000000000..5e81910cc456 --- /dev/null +++ b/data/coco128-seg.yaml @@ -0,0 +1,101 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# COCO128-seg dataset https://www.kaggle.com/ultralytics/coco128 (first 128 images from COCO train2017) by Ultralytics +# Example usage: python train.py --data coco128.yaml +# parent +# ├── yolov5 +# └── datasets +# └── coco128-seg ← downloads here (7 MB) + + +# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..] +path: ../datasets/coco128-seg # dataset root dir +train: images/train2017 # train images (relative to 'path') 128 images +val: images/train2017 # val images (relative to 'path') 128 images +test: # test images (optional) + +# Classes +names: + 0: person + 1: bicycle + 2: car + 3: motorcycle + 4: airplane + 5: bus + 6: train + 7: truck + 8: boat + 9: traffic light + 10: fire hydrant + 11: stop sign + 12: parking meter + 13: bench + 14: bird + 15: cat + 16: dog + 17: horse + 18: sheep + 19: cow + 20: elephant + 21: bear + 22: zebra + 23: giraffe + 24: backpack + 25: umbrella + 26: handbag + 27: tie + 28: suitcase + 29: frisbee + 30: skis + 31: snowboard + 32: sports ball + 33: kite + 34: baseball bat + 35: baseball glove + 36: skateboard + 37: surfboard + 38: tennis racket + 39: bottle + 40: wine glass + 41: cup + 42: fork + 43: knife + 44: spoon + 45: bowl + 46: banana + 47: apple + 48: sandwich + 49: orange + 50: broccoli + 51: carrot + 52: hot dog + 53: pizza + 54: donut + 55: cake + 56: chair + 57: couch + 58: potted plant + 59: bed + 60: dining table + 61: toilet + 62: tv + 63: laptop + 64: mouse + 65: remote + 66: keyboard + 67: cell phone + 68: microwave + 69: oven + 70: toaster + 71: sink + 72: refrigerator + 73: book + 74: clock + 75: vase + 76: scissors + 77: teddy bear + 78: hair drier + 79: toothbrush + + +# Download script/URL (optional) +download: https://ultralytics.com/assets/coco128-seg.zip diff --git a/detect.py b/detect.py index a69606a3dff9..310d169281bf 100644 --- a/detect.py +++ b/detect.py @@ -149,8 +149,8 @@ def run( det[:, :4] = scale_coords(im.shape[2:], det[:, :4], im0.shape).round() # Print results - for c in det[:, -1].unique(): - n = (det[:, -1] == c).sum() # detections per class + for c in det[:, 5].unique(): + n = (det[:, 5] == c).sum() # detections per class s += f"{n} {names[int(c)]}{'s' * (n > 1)}, " # add to string # Write results diff --git a/models/common.py b/models/common.py index 8b7dbbfa95fe..0d90ff4f8827 100644 --- a/models/common.py +++ b/models/common.py @@ -375,7 +375,6 @@ def __init__(self, weights='yolov5s.pt', device=torch.device('cpu'), dnn=False, if batch_dim.is_static: batch_size = batch_dim.get_length() executable_network = ie.compile_model(network, device_name="CPU") # device_name="MYRIAD" for Intel NCS2 - output_layer = next(iter(executable_network.outputs)) stride, names = self._load_metadata(Path(w).with_suffix('.yaml')) # load metadata elif engine: # TensorRT LOGGER.info(f'Loading {w} for TensorRT inference...') @@ -491,7 +490,7 @@ def forward(self, im, augment=False, visualize=False): y = self.session.run(self.output_names, {self.session.get_inputs()[0].name: im}) elif self.xml: # OpenVINO im = im.cpu().numpy() # FP32 - y = self.executable_network([im])[self.output_layer] + y = list(self.executable_network([im]).values()) elif self.engine: # TensorRT if self.dynamic and im.shape != self.bindings['images'].shape: i_in, i_out = (self.model.get_binding_index(x) for x in ('images', 'output')) @@ -786,8 +785,21 @@ def __str__(self): return '' +class Proto(nn.Module): + # YOLOv5 mask Proto module for segmentation models + def __init__(self, c1, c_=256, c2=32): # ch_in, number of protos, number of masks + super().__init__() + self.cv1 = Conv(c1, c_, k=3) + self.upsample = nn.Upsample(scale_factor=2, mode='nearest') + self.cv2 = Conv(c_, c_, k=3) + self.cv3 = Conv(c_, c2) + + def forward(self, x): + return self.cv3(self.cv2(self.upsample(self.cv1(x)))) + + class Classify(nn.Module): - # Classification head, i.e. x(b,c1,20,20) to x(b,c2) + # YOLOv5 classification head, i.e. x(b,c1,20,20) to x(b,c2) def __init__(self, c1, c2, k=1, s=1, p=None, g=1): # ch_in, ch_out, kernel, stride, padding, groups super().__init__() c_ = 1280 # efficientnet_b0 size diff --git a/models/segment/yolov5l-seg.yaml b/models/segment/yolov5l-seg.yaml new file mode 100644 index 000000000000..4782de11dd2d --- /dev/null +++ b/models/segment/yolov5l-seg.yaml @@ -0,0 +1,48 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license + +# Parameters +nc: 80 # number of classes +depth_multiple: 1.0 # model depth multiple +width_multiple: 1.0 # layer channel multiple +anchors: + - [10,13, 16,30, 33,23] # P3/8 + - [30,61, 62,45, 59,119] # P4/16 + - [116,90, 156,198, 373,326] # P5/32 + +# YOLOv5 v6.0 backbone +backbone: + # [from, number, module, args] + [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 + [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 + [-1, 3, C3, [128]], + [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 + [-1, 6, C3, [256]], + [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 + [-1, 9, C3, [512]], + [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 + [-1, 3, C3, [1024]], + [-1, 1, SPPF, [1024, 5]], # 9 + ] + +# YOLOv5 v6.0 head +head: + [[-1, 1, Conv, [512, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 6], 1, Concat, [1]], # cat backbone P4 + [-1, 3, C3, [512, False]], # 13 + + [-1, 1, Conv, [256, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 4], 1, Concat, [1]], # cat backbone P3 + [-1, 3, C3, [256, False]], # 17 (P3/8-small) + + [-1, 1, Conv, [256, 3, 2]], + [[-1, 14], 1, Concat, [1]], # cat head P4 + [-1, 3, C3, [512, False]], # 20 (P4/16-medium) + + [-1, 1, Conv, [512, 3, 2]], + [[-1, 10], 1, Concat, [1]], # cat head P5 + [-1, 3, C3, [1024, False]], # 23 (P5/32-large) + + [[17, 20, 23], 1, Segment, [nc, anchors, 32, 256]], # Detect(P3, P4, P5) + ] diff --git a/models/segment/yolov5m-seg.yaml b/models/segment/yolov5m-seg.yaml new file mode 100644 index 000000000000..f73d1992ac19 --- /dev/null +++ b/models/segment/yolov5m-seg.yaml @@ -0,0 +1,48 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license + +# Parameters +nc: 80 # number of classes +depth_multiple: 0.67 # model depth multiple +width_multiple: 0.75 # layer channel multiple +anchors: + - [10,13, 16,30, 33,23] # P3/8 + - [30,61, 62,45, 59,119] # P4/16 + - [116,90, 156,198, 373,326] # P5/32 + +# YOLOv5 v6.0 backbone +backbone: + # [from, number, module, args] + [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 + [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 + [-1, 3, C3, [128]], + [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 + [-1, 6, C3, [256]], + [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 + [-1, 9, C3, [512]], + [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 + [-1, 3, C3, [1024]], + [-1, 1, SPPF, [1024, 5]], # 9 + ] + +# YOLOv5 v6.0 head +head: + [[-1, 1, Conv, [512, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 6], 1, Concat, [1]], # cat backbone P4 + [-1, 3, C3, [512, False]], # 13 + + [-1, 1, Conv, [256, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 4], 1, Concat, [1]], # cat backbone P3 + [-1, 3, C3, [256, False]], # 17 (P3/8-small) + + [-1, 1, Conv, [256, 3, 2]], + [[-1, 14], 1, Concat, [1]], # cat head P4 + [-1, 3, C3, [512, False]], # 20 (P4/16-medium) + + [-1, 1, Conv, [512, 3, 2]], + [[-1, 10], 1, Concat, [1]], # cat head P5 + [-1, 3, C3, [1024, False]], # 23 (P5/32-large) + + [[17, 20, 23], 1, Segment, [nc, anchors, 32, 256]], # Detect(P3, P4, P5) + ] \ No newline at end of file diff --git a/models/segment/yolov5n-seg.yaml b/models/segment/yolov5n-seg.yaml new file mode 100644 index 000000000000..c28225ab4a50 --- /dev/null +++ b/models/segment/yolov5n-seg.yaml @@ -0,0 +1,48 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license + +# Parameters +nc: 80 # number of classes +depth_multiple: 0.33 # model depth multiple +width_multiple: 0.25 # layer channel multiple +anchors: + - [10,13, 16,30, 33,23] # P3/8 + - [30,61, 62,45, 59,119] # P4/16 + - [116,90, 156,198, 373,326] # P5/32 + +# YOLOv5 v6.0 backbone +backbone: + # [from, number, module, args] + [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 + [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 + [-1, 3, C3, [128]], + [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 + [-1, 6, C3, [256]], + [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 + [-1, 9, C3, [512]], + [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 + [-1, 3, C3, [1024]], + [-1, 1, SPPF, [1024, 5]], # 9 + ] + +# YOLOv5 v6.0 head +head: + [[-1, 1, Conv, [512, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 6], 1, Concat, [1]], # cat backbone P4 + [-1, 3, C3, [512, False]], # 13 + + [-1, 1, Conv, [256, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 4], 1, Concat, [1]], # cat backbone P3 + [-1, 3, C3, [256, False]], # 17 (P3/8-small) + + [-1, 1, Conv, [256, 3, 2]], + [[-1, 14], 1, Concat, [1]], # cat head P4 + [-1, 3, C3, [512, False]], # 20 (P4/16-medium) + + [-1, 1, Conv, [512, 3, 2]], + [[-1, 10], 1, Concat, [1]], # cat head P5 + [-1, 3, C3, [1024, False]], # 23 (P5/32-large) + + [[17, 20, 23], 1, Segment, [nc, anchors, 32, 256]], # Detect(P3, P4, P5) + ] diff --git a/models/segment/yolov5s-seg.yaml b/models/segment/yolov5s-seg.yaml new file mode 100644 index 000000000000..7cbdb36b425c --- /dev/null +++ b/models/segment/yolov5s-seg.yaml @@ -0,0 +1,48 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license + +# Parameters +nc: 80 # number of classes +depth_multiple: 0.33 # model depth multiple +width_multiple: 0.5 # layer channel multiple +anchors: + - [10,13, 16,30, 33,23] # P3/8 + - [30,61, 62,45, 59,119] # P4/16 + - [116,90, 156,198, 373,326] # P5/32 + +# YOLOv5 v6.0 backbone +backbone: + # [from, number, module, args] + [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 + [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 + [-1, 3, C3, [128]], + [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 + [-1, 6, C3, [256]], + [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 + [-1, 9, C3, [512]], + [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 + [-1, 3, C3, [1024]], + [-1, 1, SPPF, [1024, 5]], # 9 + ] + +# YOLOv5 v6.0 head +head: + [[-1, 1, Conv, [512, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 6], 1, Concat, [1]], # cat backbone P4 + [-1, 3, C3, [512, False]], # 13 + + [-1, 1, Conv, [256, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 4], 1, Concat, [1]], # cat backbone P3 + [-1, 3, C3, [256, False]], # 17 (P3/8-small) + + [-1, 1, Conv, [256, 3, 2]], + [[-1, 14], 1, Concat, [1]], # cat head P4 + [-1, 3, C3, [512, False]], # 20 (P4/16-medium) + + [-1, 1, Conv, [512, 3, 2]], + [[-1, 10], 1, Concat, [1]], # cat head P5 + [-1, 3, C3, [1024, False]], # 23 (P5/32-large) + + [[17, 20, 23], 1, Segment, [nc, anchors, 32, 256]], # Detect(P3, P4, P5) + ] \ No newline at end of file diff --git a/models/segment/yolov5x-seg.yaml b/models/segment/yolov5x-seg.yaml new file mode 100644 index 000000000000..5d0c4524a99c --- /dev/null +++ b/models/segment/yolov5x-seg.yaml @@ -0,0 +1,48 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license + +# Parameters +nc: 80 # number of classes +depth_multiple: 1.33 # model depth multiple +width_multiple: 1.25 # layer channel multiple +anchors: + - [10,13, 16,30, 33,23] # P3/8 + - [30,61, 62,45, 59,119] # P4/16 + - [116,90, 156,198, 373,326] # P5/32 + +# YOLOv5 v6.0 backbone +backbone: + # [from, number, module, args] + [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 + [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 + [-1, 3, C3, [128]], + [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 + [-1, 6, C3, [256]], + [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 + [-1, 9, C3, [512]], + [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 + [-1, 3, C3, [1024]], + [-1, 1, SPPF, [1024, 5]], # 9 + ] + +# YOLOv5 v6.0 head +head: + [[-1, 1, Conv, [512, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 6], 1, Concat, [1]], # cat backbone P4 + [-1, 3, C3, [512, False]], # 13 + + [-1, 1, Conv, [256, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 4], 1, Concat, [1]], # cat backbone P3 + [-1, 3, C3, [256, False]], # 17 (P3/8-small) + + [-1, 1, Conv, [256, 3, 2]], + [[-1, 14], 1, Concat, [1]], # cat head P4 + [-1, 3, C3, [512, False]], # 20 (P4/16-medium) + + [-1, 1, Conv, [512, 3, 2]], + [[-1, 10], 1, Concat, [1]], # cat head P5 + [-1, 3, C3, [1024, False]], # 23 (P5/32-large) + + [[17, 20, 23], 1, Segment, [nc, anchors, 32, 256]], # Detect(P3, P4, P5) + ] diff --git a/models/tf.py b/models/tf.py index ecb0d4d79c78..8cce147059d3 100644 --- a/models/tf.py +++ b/models/tf.py @@ -30,7 +30,7 @@ from models.common import (C3, SPP, SPPF, Bottleneck, BottleneckCSP, C3x, Concat, Conv, CrossConv, DWConv, DWConvTranspose2d, Focus, autopad) from models.experimental import MixConv2d, attempt_load -from models.yolo import Detect +from models.yolo import Detect, Segment from utils.activations import SiLU from utils.general import LOGGER, make_divisible, print_args @@ -320,6 +320,36 @@ def _make_grid(nx=20, ny=20): return tf.cast(tf.reshape(tf.stack([xv, yv], 2), [1, 1, ny * nx, 2]), dtype=tf.float32) +class TFSegment(TFDetect): + # YOLOv5 Segment head for segmentation models + def __init__(self, nc=80, anchors=(), nm=32, npr=256, ch=(), imgsz=(640, 640), w=None): + super().__init__(nc, anchors, ch, imgsz, w) + self.nm = nm # number of masks + self.npr = npr # number of protos + self.no = 5 + nc + self.nm # number of outputs per anchor + self.m = [TFConv2d(x, self.no * self.na, 1, w=w.m[i]) for i, x in enumerate(ch)] # output conv + self.proto = TFProto(ch[0], self.npr, self.nm, w=w.proto) # protos + self.detect = TFDetect.call + + def call(self, x): + p = self.proto(x[0]) + x = self.detect(self, x) + return (x, p) if self.training else ((x[0], p),) + + +class TFProto(keras.layers.Layer): + + def __init__(self, c1, c_=256, c2=32, w=None): + super().__init__() + self.cv1 = TFConv(c1, c_, k=3, w=w.cv1) + self.upsample = TFUpsample(None, scale_factor=2, mode='nearest') + self.cv2 = TFConv(c_, c_, k=3, w=w.cv2) + self.cv3 = TFConv(c_, c2, w=w.cv3) + + def call(self, inputs): + return self.cv3(self.cv2(self.upsample(self.cv1(inputs)))) + + class TFUpsample(keras.layers.Layer): # TF version of torch.nn.Upsample() def __init__(self, size, scale_factor, mode, w=None): # warning: all arguments needed including 'w' @@ -377,10 +407,12 @@ def parse_model(d, ch, model, imgsz): # model_dict, input_channels(3) args = [ch[f]] elif m is Concat: c2 = sum(ch[-1 if x == -1 else x + 1] for x in f) - elif m is Detect: + elif m in [Detect, Segment]: args.append([ch[x + 1] for x in f]) if isinstance(args[1], int): # number of anchors args[1] = [list(range(args[1] * 2))] * len(f) + if m is Segment: + args[3] = make_divisible(args[3] * gw, 8) args.append(imgsz) else: c2 = ch[f] diff --git a/models/yolo.py b/models/yolo.py index fa05fcf9a8d9..a0702a7c0257 100644 --- a/models/yolo.py +++ b/models/yolo.py @@ -36,6 +36,7 @@ class Detect(nn.Module): + # YOLOv5 Detect head for detection models stride = None # strides computed during build dynamic = False # force grid reconstruction export = False # export mode @@ -63,15 +64,16 @@ def forward(self, x): if self.dynamic or self.grid[i].shape[2:4] != x[i].shape[2:4]: self.grid[i], self.anchor_grid[i] = self._make_grid(nx, ny, i) - y = x[i].sigmoid() + y = x[i].clone() + y[..., :5 + self.nc].sigmoid_() if self.inplace: y[..., 0:2] = (y[..., 0:2] * 2 + self.grid[i]) * self.stride[i] # xy y[..., 2:4] = (y[..., 2:4] * 2) ** 2 * self.anchor_grid[i] # wh else: # for YOLOv5 on AWS Inferentia https://github.com/ultralytics/yolov5/pull/2953 - xy, wh, conf = y.split((2, 2, self.nc + 1), 4) # y.tensor_split((2, 4, 5), 4) # torch 1.8.0 + xy, wh, etc = y.split((2, 2, self.no - 4), 4) # tensor_split((2, 4, 5), 4) if torch 1.8.0 xy = (xy * 2 + self.grid[i]) * self.stride[i] # xy wh = (wh * 2) ** 2 * self.anchor_grid[i] # wh - y = torch.cat((xy, wh, conf), 4) + y = torch.cat((xy, wh, etc), 4) z.append(y.view(bs, -1, self.no)) return x if self.training else (torch.cat(z, 1),) if self.export else (torch.cat(z, 1), x) @@ -87,6 +89,23 @@ def _make_grid(self, nx=20, ny=20, i=0, torch_1_10=check_version(torch.__version return grid, anchor_grid +class Segment(Detect): + # YOLOv5 Segment head for segmentation models + def __init__(self, nc=80, anchors=(), nm=32, npr=256, ch=(), inplace=True): + super().__init__(nc, anchors, ch, inplace) + self.nm = nm # number of masks + self.npr = npr # number of protos + self.no = 5 + nc + self.nm # number of outputs per anchor + self.m = nn.ModuleList(nn.Conv2d(x, self.no * self.na, 1) for x in ch) # output conv + self.proto = Proto(ch[0], self.npr, self.nm) # protos + self.detect = Detect.forward + + def forward(self, x): + p = self.proto(x[0]) + x = self.detect(self, x) + return (x, p) if self.training else (x[0], p) if self.export else (x[0], p, x[1]) + + class BaseModel(nn.Module): # YOLOv5 base model def forward(self, x, profile=False, visualize=False): @@ -135,7 +154,7 @@ def _apply(self, fn): # Apply to(), cpu(), cuda(), half() to model tensors that are not parameters or registered buffers self = super()._apply(fn) m = self.model[-1] # Detect() - if isinstance(m, Detect): + if isinstance(m, (Detect, Segment)): m.stride = fn(m.stride) m.grid = list(map(fn, m.grid)) if isinstance(m.anchor_grid, list): @@ -169,11 +188,12 @@ def __init__(self, cfg='yolov5s.yaml', ch=3, nc=None, anchors=None): # model, i # Build strides, anchors m = self.model[-1] # Detect() - if isinstance(m, Detect): + if isinstance(m, (Detect, Segment)): s = 256 # 2x min stride m.inplace = self.inplace - m.stride = torch.tensor([s / x.shape[-2] for x in self.forward(torch.empty(1, ch, s, s))]) # forward - check_anchor_order(m) # must be in pixel-space (not grid-space) + forward = lambda x: self.forward(x)[0] if isinstance(m, Segment) else self.forward(x) + m.stride = torch.tensor([s / x.shape[-2] for x in forward(torch.zeros(1, ch, s, s))]) # forward + check_anchor_order(m) m.anchors /= m.stride.view(-1, 1, 1) self.stride = m.stride self._initialize_biases() # only run once @@ -235,15 +255,21 @@ def _initialize_biases(self, cf=None): # initialize biases into Detect(), cf is # cf = torch.bincount(torch.tensor(np.concatenate(dataset.labels, 0)[:, 0]).long(), minlength=nc) + 1. m = self.model[-1] # Detect() module for mi, s in zip(m.m, m.stride): # from - b = mi.bias.view(m.na, -1).detach() # conv.bias(255) to (3,85) - b[:, 4] += math.log(8 / (640 / s) ** 2) # obj (8 objects per 640 image) - b[:, 5:] += math.log(0.6 / (m.nc - 0.999999)) if cf is None else torch.log(cf / cf.sum()) # cls + b = mi.bias.view(m.na, -1) # conv.bias(255) to (3,85) + b.data[:, 4] += math.log(8 / (640 / s) ** 2) # obj (8 objects per 640 image) + b.data[:, 5:5 + m.nc] += math.log(0.6 / (m.nc - 0.99999)) if cf is None else torch.log(cf / cf.sum()) # cls mi.bias = torch.nn.Parameter(b.view(-1), requires_grad=True) Model = DetectionModel # retain YOLOv5 'Model' class for backwards compatibility +class SegmentationModel(DetectionModel): + # YOLOv5 segmentation model + def __init__(self, cfg='yolov5s-seg.yaml', ch=3, nc=None, anchors=None): + super().__init__(cfg, ch, nc, anchors) + + class ClassificationModel(BaseModel): # YOLOv5 classification model def __init__(self, cfg=None, model=None, nc=1000, cutoff=10): # yaml, model, number of classes, cutoff index @@ -284,24 +310,28 @@ def parse_model(d, ch): # model_dict, input_channels(3) args[j] = eval(a) if isinstance(a, str) else a # eval strings n = n_ = max(round(n * gd), 1) if n > 1 else n # depth gain - if m in (Conv, GhostConv, Bottleneck, GhostBottleneck, SPP, SPPF, DWConv, MixConv2d, Focus, CrossConv, - BottleneckCSP, C3, C3TR, C3SPP, C3Ghost, nn.ConvTranspose2d, DWConvTranspose2d, C3x): + if m in { + Conv, GhostConv, Bottleneck, GhostBottleneck, SPP, SPPF, DWConv, MixConv2d, Focus, CrossConv, + BottleneckCSP, C3, C3TR, C3SPP, C3Ghost, nn.ConvTranspose2d, DWConvTranspose2d, C3x}: c1, c2 = ch[f], args[0] if c2 != no: # if not output c2 = make_divisible(c2 * gw, 8) args = [c1, c2, *args[1:]] - if m in [BottleneckCSP, C3, C3TR, C3Ghost, C3x]: + if m in {BottleneckCSP, C3, C3TR, C3Ghost, C3x}: args.insert(2, n) # number of repeats n = 1 elif m is nn.BatchNorm2d: args = [ch[f]] elif m is Concat: c2 = sum(ch[x] for x in f) - elif m is Detect: + # TODO: channel, gw, gd + elif m in {Detect, Segment}: args.append([ch[x] for x in f]) if isinstance(args[1], int): # number of anchors args[1] = [list(range(args[1] * 2))] * len(f) + if m is Segment: + args[3] = make_divisible(args[3] * gw, 8) elif m is Contract: c2 = ch[f] * args[0] ** 2 elif m is Expand: diff --git a/segment/predict.py b/segment/predict.py new file mode 100644 index 000000000000..ba4cf2905255 --- /dev/null +++ b/segment/predict.py @@ -0,0 +1,266 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +""" +Run YOLOv5 segmentation inference on images, videos, directories, streams, etc. + +Usage - sources: + $ python segment/predict.py --weights yolov5s-seg.pt --source 0 # webcam + img.jpg # image + vid.mp4 # video + path/ # directory + 'path/*.jpg' # glob + 'https://youtu.be/Zgi9g1ksQHc' # YouTube + 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream + +Usage - formats: + $ python segment/predict.py --weights yolov5s-seg.pt # PyTorch + yolov5s-seg.torchscript # TorchScript + yolov5s-seg.onnx # ONNX Runtime or OpenCV DNN with --dnn + yolov5s-seg.xml # OpenVINO + yolov5s-seg.engine # TensorRT + yolov5s-seg.mlmodel # CoreML (macOS-only) + yolov5s-seg_saved_model # TensorFlow SavedModel + yolov5s-seg.pb # TensorFlow GraphDef + yolov5s-seg.tflite # TensorFlow Lite + yolov5s-seg_edgetpu.tflite # TensorFlow Edge TPU + yolov5s-seg_paddle_model # PaddlePaddle +""" + +import argparse +import os +import platform +import sys +from pathlib import Path + +import torch + +FILE = Path(__file__).resolve() +ROOT = FILE.parents[1] # YOLOv5 root directory +if str(ROOT) not in sys.path: + sys.path.append(str(ROOT)) # add ROOT to PATH +ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative + +from models.common import DetectMultiBackend +from utils.dataloaders import IMG_FORMATS, VID_FORMATS, LoadImages, LoadStreams +from utils.general import (LOGGER, Profile, check_file, check_img_size, check_imshow, check_requirements, colorstr, cv2, + increment_path, non_max_suppression, print_args, scale_coords, strip_optimizer, xyxy2xywh) +from utils.plots import Annotator, colors, save_one_box +from utils.segment.general import process_mask +from utils.torch_utils import select_device, smart_inference_mode + + +@smart_inference_mode() +def run( + weights=ROOT / 'yolov5s-seg.pt', # model.pt path(s) + source=ROOT / 'data/images', # file/dir/URL/glob, 0 for webcam + data=ROOT / 'data/coco128.yaml', # dataset.yaml path + imgsz=(640, 640), # inference size (height, width) + conf_thres=0.25, # confidence threshold + iou_thres=0.45, # NMS IOU threshold + max_det=1000, # maximum detections per image + device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu + view_img=False, # show results + save_txt=False, # save results to *.txt + save_conf=False, # save confidences in --save-txt labels + save_crop=False, # save cropped prediction boxes + nosave=False, # do not save images/videos + classes=None, # filter by class: --class 0, or --class 0 2 3 + agnostic_nms=False, # class-agnostic NMS + augment=False, # augmented inference + visualize=False, # visualize features + update=False, # update all models + project=ROOT / 'runs/predict-seg', # save results to project/name + name='exp', # save results to project/name + exist_ok=False, # existing project/name ok, do not increment + line_thickness=3, # bounding box thickness (pixels) + hide_labels=False, # hide labels + hide_conf=False, # hide confidences + half=False, # use FP16 half-precision inference + dnn=False, # use OpenCV DNN for ONNX inference + vid_stride=1, # video frame-rate stride + retina_masks=False, +): + source = str(source) + save_img = not nosave and not source.endswith('.txt') # save inference images + is_file = Path(source).suffix[1:] in (IMG_FORMATS + VID_FORMATS) + is_url = source.lower().startswith(('rtsp://', 'rtmp://', 'http://', 'https://')) + webcam = source.isnumeric() or source.endswith('.txt') or (is_url and not is_file) + if is_url and is_file: + source = check_file(source) # download + + # Directories + save_dir = increment_path(Path(project) / name, exist_ok=exist_ok) # increment run + (save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir + + # Load model + device = select_device(device) + model = DetectMultiBackend(weights, device=device, dnn=dnn, data=data, fp16=half) + stride, names, pt = model.stride, model.names, model.pt + imgsz = check_img_size(imgsz, s=stride) # check image size + + # Dataloader + if webcam: + view_img = check_imshow() + dataset = LoadStreams(source, img_size=imgsz, stride=stride, auto=pt, vid_stride=vid_stride) + bs = len(dataset) # batch_size + else: + dataset = LoadImages(source, img_size=imgsz, stride=stride, auto=pt, vid_stride=vid_stride) + bs = 1 # batch_size + vid_path, vid_writer = [None] * bs, [None] * bs + + # Run inference + model.warmup(imgsz=(1 if pt else bs, 3, *imgsz)) # warmup + seen, windows, dt = 0, [], (Profile(), Profile(), Profile()) + for path, im, im0s, vid_cap, s in dataset: + with dt[0]: + im = torch.from_numpy(im).to(device) + im = im.half() if model.fp16 else im.float() # uint8 to fp16/32 + im /= 255 # 0 - 255 to 0.0 - 1.0 + if len(im.shape) == 3: + im = im[None] # expand for batch dim + + # Inference + with dt[1]: + visualize = increment_path(save_dir / Path(path).stem, mkdir=True) if visualize else False + pred, proto = model(im, augment=augment, visualize=visualize)[:2] + + # NMS + with dt[2]: + pred = non_max_suppression(pred, conf_thres, iou_thres, classes, agnostic_nms, max_det=max_det, nm=32) + + # Second-stage classifier (optional) + # pred = utils.general.apply_classifier(pred, classifier_model, im, im0s) + + # Process predictions + for i, det in enumerate(pred): # per image + seen += 1 + if webcam: # batch_size >= 1 + p, im0, frame = path[i], im0s[i].copy(), dataset.count + s += f'{i}: ' + else: + p, im0, frame = path, im0s.copy(), getattr(dataset, 'frame', 0) + + p = Path(p) # to Path + save_path = str(save_dir / p.name) # im.jpg + txt_path = str(save_dir / 'labels' / p.stem) + ('' if dataset.mode == 'image' else f'_{frame}') # im.txt + s += '%gx%g ' % im.shape[2:] # print string + gn = torch.tensor(im0.shape)[[1, 0, 1, 0]] # normalization gain whwh + imc = im0.copy() if save_crop else im0 # for save_crop + annotator = Annotator(im0, line_width=line_thickness, example=str(names)) + if len(det): + masks = process_mask(proto[i], det[:, 6:], det[:, :4], im.shape[2:], upsample=True) # HWC + + # Rescale boxes from img_size to im0 size + det[:, :4] = scale_coords(im.shape[2:], det[:, :4], im0.shape).round() + + # Print results + for c in det[:, 5].unique(): + n = (det[:, 5] == c).sum() # detections per class + s += f"{n} {names[int(c)]}{'s' * (n > 1)}, " # add to string + + # Mask plotting + annotator.masks(masks, + colors=[colors(x, True) for x in det[:, 5]], + im_gpu=None if retina_masks else im[i]) + + # Write results + for *xyxy, conf, cls in reversed(det[:, :6]): + if save_txt: # Write to file + xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh + line = (cls, *xywh, conf) if save_conf else (cls, *xywh) # label format + with open(f'{txt_path}.txt', 'a') as f: + f.write(('%g ' * len(line)).rstrip() % line + '\n') + + if save_img or save_crop or view_img: # Add bbox to image + c = int(cls) # integer class + label = None if hide_labels else (names[c] if hide_conf else f'{names[c]} {conf:.2f}') + annotator.box_label(xyxy, label, color=colors(c, True)) + if save_crop: + save_one_box(xyxy, imc, file=save_dir / 'crops' / names[c] / f'{p.stem}.jpg', BGR=True) + + # Stream results + im0 = annotator.result() + if view_img: + if platform.system() == 'Linux' and p not in windows: + windows.append(p) + cv2.namedWindow(str(p), cv2.WINDOW_NORMAL | cv2.WINDOW_KEEPRATIO) # allow window resize (Linux) + cv2.resizeWindow(str(p), im0.shape[1], im0.shape[0]) + cv2.imshow(str(p), im0) + if cv2.waitKey(1) == ord('q'): # 1 millisecond + exit() + + # Save results (image with detections) + if save_img: + if dataset.mode == 'image': + cv2.imwrite(save_path, im0) + else: # 'video' or 'stream' + if vid_path[i] != save_path: # new video + vid_path[i] = save_path + if isinstance(vid_writer[i], cv2.VideoWriter): + vid_writer[i].release() # release previous video writer + if vid_cap: # video + fps = vid_cap.get(cv2.CAP_PROP_FPS) + w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH)) + h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) + else: # stream + fps, w, h = 30, im0.shape[1], im0.shape[0] + save_path = str(Path(save_path).with_suffix('.mp4')) # force *.mp4 suffix on results videos + vid_writer[i] = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h)) + vid_writer[i].write(im0) + + # Print time (inference-only) + LOGGER.info(f"{s}{'' if len(det) else '(no detections), '}{dt[1].dt * 1E3:.1f}ms") + + # Print results + t = tuple(x.t / seen * 1E3 for x in dt) # speeds per image + LOGGER.info(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {(1, 3, *imgsz)}' % t) + if save_txt or save_img: + s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else '' + LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}{s}") + if update: + strip_optimizer(weights[0]) # update model (to fix SourceChangeWarning) + + +def parse_opt(): + parser = argparse.ArgumentParser() + parser.add_argument('--weights', nargs='+', type=str, default=ROOT / 'yolov5s-seg.pt', help='model path(s)') + parser.add_argument('--source', type=str, default=ROOT / 'data/images', help='file/dir/URL/glob, 0 for webcam') + parser.add_argument('--data', type=str, default=ROOT / 'data/coco128.yaml', help='(optional) dataset.yaml path') + parser.add_argument('--imgsz', '--img', '--img-size', nargs='+', type=int, default=[640], help='inference size h,w') + parser.add_argument('--conf-thres', type=float, default=0.25, help='confidence threshold') + parser.add_argument('--iou-thres', type=float, default=0.45, help='NMS IoU threshold') + parser.add_argument('--max-det', type=int, default=1000, help='maximum detections per image') + parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') + parser.add_argument('--view-img', action='store_true', help='show results') + parser.add_argument('--save-txt', action='store_true', help='save results to *.txt') + parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels') + parser.add_argument('--save-crop', action='store_true', help='save cropped prediction boxes') + parser.add_argument('--nosave', action='store_true', help='do not save images/videos') + parser.add_argument('--classes', nargs='+', type=int, help='filter by class: --classes 0, or --classes 0 2 3') + parser.add_argument('--agnostic-nms', action='store_true', help='class-agnostic NMS') + parser.add_argument('--augment', action='store_true', help='augmented inference') + parser.add_argument('--visualize', action='store_true', help='visualize features') + parser.add_argument('--update', action='store_true', help='update all models') + parser.add_argument('--project', default=ROOT / 'runs/predict-seg', help='save results to project/name') + parser.add_argument('--name', default='exp', help='save results to project/name') + parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment') + parser.add_argument('--line-thickness', default=3, type=int, help='bounding box thickness (pixels)') + parser.add_argument('--hide-labels', default=False, action='store_true', help='hide labels') + parser.add_argument('--hide-conf', default=False, action='store_true', help='hide confidences') + parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference') + parser.add_argument('--dnn', action='store_true', help='use OpenCV DNN for ONNX inference') + parser.add_argument('--vid-stride', type=int, default=1, help='video frame-rate stride') + parser.add_argument('--retina-masks', action='store_true', help='whether to plot masks in native resolution') + opt = parser.parse_args() + opt.imgsz *= 2 if len(opt.imgsz) == 1 else 1 # expand + print_args(vars(opt)) + return opt + + +def main(opt): + check_requirements(exclude=('tensorboard', 'thop')) + run(**vars(opt)) + + +if __name__ == "__main__": + opt = parse_opt() + main(opt) diff --git a/segment/train.py b/segment/train.py new file mode 100644 index 000000000000..bda379176151 --- /dev/null +++ b/segment/train.py @@ -0,0 +1,676 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +""" +Train a YOLOv5 segment model on a segment dataset +Models and datasets download automatically from the latest YOLOv5 release. + +Usage - Single-GPU training: + $ python segment/train.py --data coco128-seg.yaml --weights yolov5s-seg.pt --img 640 # from pretrained (recommended) + $ python segment/train.py --data coco128-seg.yaml --weights '' --cfg yolov5s-seg.yaml --img 640 # from scratch + +Usage - Multi-GPU DDP training: + $ python -m torch.distributed.run --nproc_per_node 4 --master_port 1 segment/train.py --data coco128-seg.yaml --weights yolov5s-seg.pt --img 640 --device 0,1,2,3 + +Models: https://github.com/ultralytics/yolov5/tree/master/models +Datasets: https://github.com/ultralytics/yolov5/tree/master/data +Tutorial: https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data +""" + +import argparse +import math +import os +import random +import sys +import time +from copy import deepcopy +from datetime import datetime +from pathlib import Path + +import numpy as np +import torch +import torch.distributed as dist +import torch.nn as nn +import yaml +from torch.optim import lr_scheduler +from tqdm import tqdm + +FILE = Path(__file__).resolve() +ROOT = FILE.parents[1] # YOLOv5 root directory +if str(ROOT) not in sys.path: + sys.path.append(str(ROOT)) # add ROOT to PATH +ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative + +import torch.nn.functional as F + +import segment.val as validate # for end-of-epoch mAP +from models.experimental import attempt_load +from models.yolo import SegmentationModel +from utils.autoanchor import check_anchors +from utils.autobatch import check_train_batch_size +from utils.callbacks import Callbacks +from utils.downloads import attempt_download, is_url +from utils.general import (LOGGER, check_amp, check_dataset, check_file, check_git_status, check_img_size, + check_requirements, check_suffix, check_yaml, colorstr, get_latest_run, increment_path, + init_seeds, intersect_dicts, labels_to_class_weights, labels_to_image_weights, one_cycle, + print_args, print_mutation, strip_optimizer, yaml_save) +from utils.loggers import GenericLogger +from utils.plots import plot_evolve, plot_labels +from utils.segment.dataloaders import create_dataloader +from utils.segment.loss import ComputeLoss +from utils.segment.metrics import KEYS, fitness +from utils.segment.plots import plot_images_and_masks, plot_results_with_masks +from utils.torch_utils import (EarlyStopping, ModelEMA, de_parallel, select_device, smart_DDP, smart_optimizer, + smart_resume, torch_distributed_zero_first) + +LOCAL_RANK = int(os.getenv('LOCAL_RANK', -1)) # https://pytorch.org/docs/stable/elastic/run.html +RANK = int(os.getenv('RANK', -1)) +WORLD_SIZE = int(os.getenv('WORLD_SIZE', 1)) + + +def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictionary + save_dir, epochs, batch_size, weights, single_cls, evolve, data, cfg, resume, noval, nosave, workers, freeze, mask_ratio = \ + Path(opt.save_dir), opt.epochs, opt.batch_size, opt.weights, opt.single_cls, opt.evolve, opt.data, opt.cfg, \ + opt.resume, opt.noval, opt.nosave, opt.workers, opt.freeze, opt.mask_ratio + # callbacks.run('on_pretrain_routine_start') + + # Directories + w = save_dir / 'weights' # weights dir + (w.parent if evolve else w).mkdir(parents=True, exist_ok=True) # make dir + last, best = w / 'last.pt', w / 'best.pt' + + # Hyperparameters + if isinstance(hyp, str): + with open(hyp, errors='ignore') as f: + hyp = yaml.safe_load(f) # load hyps dict + LOGGER.info(colorstr('hyperparameters: ') + ', '.join(f'{k}={v}' for k, v in hyp.items())) + opt.hyp = hyp.copy() # for saving hyps to checkpoints + + # Save run settings + if not evolve: + yaml_save(save_dir / 'hyp.yaml', hyp) + yaml_save(save_dir / 'opt.yaml', vars(opt)) + + # Loggers + data_dict = None + if RANK in {-1, 0}: + logger = GenericLogger(opt=opt, console_logger=LOGGER) + # loggers = Loggers(save_dir, weights, opt, hyp, LOGGER) # loggers instance + # if loggers.clearml: + # data_dict = loggers.clearml.data_dict # None if no ClearML dataset or filled in by ClearML + # if loggers.wandb: + # data_dict = loggers.wandb.data_dict + # if resume: + # weights, epochs, hyp, batch_size = opt.weights, opt.epochs, opt.hyp, opt.batch_size + # + # # Register actions + # for k in methods(loggers): + # callbacks.register_action(k, callback=getattr(loggers, k)) + + # Config + plots = not evolve and not opt.noplots # create plots + overlap = not opt.no_overlap + cuda = device.type != 'cpu' + init_seeds(opt.seed + 1 + RANK, deterministic=True) + with torch_distributed_zero_first(LOCAL_RANK): + data_dict = data_dict or check_dataset(data) # check if None + train_path, val_path = data_dict['train'], data_dict['val'] + nc = 1 if single_cls else int(data_dict['nc']) # number of classes + names = {0: 'item'} if single_cls and len(data_dict['names']) != 1 else data_dict['names'] # class names + is_coco = isinstance(val_path, str) and val_path.endswith('coco/val2017.txt') # COCO dataset + + # Model + check_suffix(weights, '.pt') # check weights + pretrained = weights.endswith('.pt') + if pretrained: + with torch_distributed_zero_first(LOCAL_RANK): + weights = attempt_download(weights) # download if not found locally + ckpt = torch.load(weights, map_location='cpu') # load checkpoint to CPU to avoid CUDA memory leak + model = SegmentationModel(cfg or ckpt['model'].yaml, ch=3, nc=nc, anchors=hyp.get('anchors')).to(device) + exclude = ['anchor'] if (cfg or hyp.get('anchors')) and not resume else [] # exclude keys + csd = ckpt['model'].float().state_dict() # checkpoint state_dict as FP32 + csd = intersect_dicts(csd, model.state_dict(), exclude=exclude) # intersect + model.load_state_dict(csd, strict=False) # load + LOGGER.info(f'Transferred {len(csd)}/{len(model.state_dict())} items from {weights}') # report + else: + model = SegmentationModel(cfg, ch=3, nc=nc, anchors=hyp.get('anchors')).to(device) # create + amp = check_amp(model) # check AMP + + # Freeze + freeze = [f'model.{x}.' for x in (freeze if len(freeze) > 1 else range(freeze[0]))] # layers to freeze + for k, v in model.named_parameters(): + v.requires_grad = True # train all layers + # v.register_hook(lambda x: torch.nan_to_num(x)) # NaN to 0 (commented for erratic training results) + if any(x in k for x in freeze): + LOGGER.info(f'freezing {k}') + v.requires_grad = False + + # Image size + gs = max(int(model.stride.max()), 32) # grid size (max stride) + imgsz = check_img_size(opt.imgsz, gs, floor=gs * 2) # verify imgsz is gs-multiple + + # Batch size + if RANK == -1 and batch_size == -1: # single-GPU only, estimate best batch size + batch_size = check_train_batch_size(model, imgsz, amp) + logger.update_params({"batch_size": batch_size}) + # loggers.on_params_update({"batch_size": batch_size}) + + # Optimizer + nbs = 64 # nominal batch size + accumulate = max(round(nbs / batch_size), 1) # accumulate loss before optimizing + hyp['weight_decay'] *= batch_size * accumulate / nbs # scale weight_decay + optimizer = smart_optimizer(model, opt.optimizer, hyp['lr0'], hyp['momentum'], hyp['weight_decay']) + + # Scheduler + if opt.cos_lr: + lf = one_cycle(1, hyp['lrf'], epochs) # cosine 1->hyp['lrf'] + else: + lf = lambda x: (1 - x / epochs) * (1.0 - hyp['lrf']) + hyp['lrf'] # linear + scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf) # plot_lr_scheduler(optimizer, scheduler, epochs) + + # EMA + ema = ModelEMA(model) if RANK in {-1, 0} else None + + # Resume + best_fitness, start_epoch = 0.0, 0 + if pretrained: + if resume: + best_fitness, start_epoch, epochs = smart_resume(ckpt, optimizer, ema, weights, epochs, resume) + del ckpt, csd + + # DP mode + if cuda and RANK == -1 and torch.cuda.device_count() > 1: + LOGGER.warning('WARNING: DP not recommended, use torch.distributed.run for best DDP Multi-GPU results.\n' + 'See Multi-GPU Tutorial at https://github.com/ultralytics/yolov5/issues/475 to get started.') + model = torch.nn.DataParallel(model) + + # SyncBatchNorm + if opt.sync_bn and cuda and RANK != -1: + model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model).to(device) + LOGGER.info('Using SyncBatchNorm()') + + # Trainloader + train_loader, dataset = create_dataloader( + train_path, + imgsz, + batch_size // WORLD_SIZE, + gs, + single_cls, + hyp=hyp, + augment=True, + cache=None if opt.cache == 'val' else opt.cache, + rect=opt.rect, + rank=LOCAL_RANK, + workers=workers, + image_weights=opt.image_weights, + quad=opt.quad, + prefix=colorstr('train: '), + shuffle=True, + mask_downsample_ratio=mask_ratio, + overlap_mask=overlap, + ) + labels = np.concatenate(dataset.labels, 0) + mlc = int(labels[:, 0].max()) # max label class + assert mlc < nc, f'Label class {mlc} exceeds nc={nc} in {data}. Possible class labels are 0-{nc - 1}' + + # Process 0 + if RANK in {-1, 0}: + val_loader = create_dataloader(val_path, + imgsz, + batch_size // WORLD_SIZE * 2, + gs, + single_cls, + hyp=hyp, + cache=None if noval else opt.cache, + rect=True, + rank=-1, + workers=workers * 2, + pad=0.5, + mask_downsample_ratio=mask_ratio, + overlap_mask=overlap, + prefix=colorstr('val: '))[0] + + if not resume: + if not opt.noautoanchor: + check_anchors(dataset, model=model, thr=hyp['anchor_t'], imgsz=imgsz) # run AutoAnchor + model.half().float() # pre-reduce anchor precision + + if plots: + plot_labels(labels, names, save_dir) + # callbacks.run('on_pretrain_routine_end', labels, names) + + # DDP mode + if cuda and RANK != -1: + model = smart_DDP(model) + + # Model attributes + nl = de_parallel(model).model[-1].nl # number of detection layers (to scale hyps) + hyp['box'] *= 3 / nl # scale to layers + hyp['cls'] *= nc / 80 * 3 / nl # scale to classes and layers + hyp['obj'] *= (imgsz / 640) ** 2 * 3 / nl # scale to image size and layers + hyp['label_smoothing'] = opt.label_smoothing + model.nc = nc # attach number of classes to model + model.hyp = hyp # attach hyperparameters to model + model.class_weights = labels_to_class_weights(dataset.labels, nc).to(device) * nc # attach class weights + model.names = names + + # Start training + t0 = time.time() + nb = len(train_loader) # number of batches + nw = max(round(hyp['warmup_epochs'] * nb), 100) # number of warmup iterations, max(3 epochs, 100 iterations) + # nw = min(nw, (epochs - start_epoch) / 2 * nb) # limit warmup to < 1/2 of training + last_opt_step = -1 + maps = np.zeros(nc) # mAP per class + results = (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) # P, R, mAP@.5, mAP@.5-.95, val_loss(box, obj, cls) + scheduler.last_epoch = start_epoch - 1 # do not move + scaler = torch.cuda.amp.GradScaler(enabled=amp) + stopper, stop = EarlyStopping(patience=opt.patience), False + compute_loss = ComputeLoss(model, overlap=overlap) # init loss class + # callbacks.run('on_train_start') + LOGGER.info(f'Image sizes {imgsz} train, {imgsz} val\n' + f'Using {train_loader.num_workers * WORLD_SIZE} dataloader workers\n' + f"Logging results to {colorstr('bold', save_dir)}\n" + f'Starting training for {epochs} epochs...') + for epoch in range(start_epoch, epochs): # epoch ------------------------------------------------------------------ + # callbacks.run('on_train_epoch_start') + model.train() + + # Update image weights (optional, single-GPU only) + if opt.image_weights: + cw = model.class_weights.cpu().numpy() * (1 - maps) ** 2 / nc # class weights + iw = labels_to_image_weights(dataset.labels, nc=nc, class_weights=cw) # image weights + dataset.indices = random.choices(range(dataset.n), weights=iw, k=dataset.n) # rand weighted idx + + # Update mosaic border (optional) + # b = int(random.uniform(0.25 * imgsz, 0.75 * imgsz + gs) // gs * gs) + # dataset.mosaic_border = [b - imgsz, -b] # height, width borders + + mloss = torch.zeros(4, device=device) # mean losses + if RANK != -1: + train_loader.sampler.set_epoch(epoch) + pbar = enumerate(train_loader) + LOGGER.info(('\n' + '%11s' * 8) % + ('Epoch', 'GPU_mem', 'box_loss', 'seg_loss', 'obj_loss', 'cls_loss', 'Instances', 'Size')) + if RANK in {-1, 0}: + pbar = tqdm(pbar, total=nb, bar_format='{l_bar}{bar:10}{r_bar}{bar:-10b}') # progress bar + optimizer.zero_grad() + for i, (imgs, targets, paths, _, masks) in pbar: # batch ------------------------------------------------------ + # callbacks.run('on_train_batch_start') + ni = i + nb * epoch # number integrated batches (since train start) + imgs = imgs.to(device, non_blocking=True).float() / 255 # uint8 to float32, 0-255 to 0.0-1.0 + + # Warmup + if ni <= nw: + xi = [0, nw] # x interp + # compute_loss.gr = np.interp(ni, xi, [0.0, 1.0]) # iou loss ratio (obj_loss = 1.0 or iou) + accumulate = max(1, np.interp(ni, xi, [1, nbs / batch_size]).round()) + for j, x in enumerate(optimizer.param_groups): + # bias lr falls from 0.1 to lr0, all other lrs rise from 0.0 to lr0 + x['lr'] = np.interp(ni, xi, [hyp['warmup_bias_lr'] if j == 0 else 0.0, x['initial_lr'] * lf(epoch)]) + if 'momentum' in x: + x['momentum'] = np.interp(ni, xi, [hyp['warmup_momentum'], hyp['momentum']]) + + # Multi-scale + if opt.multi_scale: + sz = random.randrange(imgsz * 0.5, imgsz * 1.5 + gs) // gs * gs # size + sf = sz / max(imgs.shape[2:]) # scale factor + if sf != 1: + ns = [math.ceil(x * sf / gs) * gs for x in imgs.shape[2:]] # new shape (stretched to gs-multiple) + imgs = nn.functional.interpolate(imgs, size=ns, mode='bilinear', align_corners=False) + + # Forward + with torch.cuda.amp.autocast(amp): + pred = model(imgs) # forward + loss, loss_items = compute_loss(pred, targets.to(device), masks=masks.to(device).float()) + if RANK != -1: + loss *= WORLD_SIZE # gradient averaged between devices in DDP mode + if opt.quad: + loss *= 4. + + # Backward + scaler.scale(loss).backward() + + # Optimize - https://pytorch.org/docs/master/notes/amp_examples.html + if ni - last_opt_step >= accumulate: + scaler.unscale_(optimizer) # unscale gradients + torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=10.0) # clip gradients + scaler.step(optimizer) # optimizer.step + scaler.update() + optimizer.zero_grad() + if ema: + ema.update(model) + last_opt_step = ni + + # Log + if RANK in {-1, 0}: + mloss = (mloss * i + loss_items) / (i + 1) # update mean losses + mem = f'{torch.cuda.memory_reserved() / 1E9 if torch.cuda.is_available() else 0:.3g}G' # (GB) + pbar.set_description(('%11s' * 2 + '%11.4g' * 6) % + (f'{epoch}/{epochs - 1}', mem, *mloss, targets.shape[0], imgs.shape[-1])) + # callbacks.run('on_train_batch_end', model, ni, imgs, targets, paths) + # if callbacks.stop_training: + # return + + # Mosaic plots + if plots: + if ni < 3: + plot_images_and_masks(imgs, targets, masks, paths, save_dir / f"train_batch{ni}.jpg") + if ni == 10: + files = sorted(save_dir.glob('train*.jpg')) + logger.log_images(files, "Mosaics", epoch) + # end batch ------------------------------------------------------------------------------------------------ + + # Scheduler + lr = [x['lr'] for x in optimizer.param_groups] # for loggers + scheduler.step() + + if RANK in {-1, 0}: + # mAP + # callbacks.run('on_train_epoch_end', epoch=epoch) + ema.update_attr(model, include=['yaml', 'nc', 'hyp', 'names', 'stride', 'class_weights']) + final_epoch = (epoch + 1 == epochs) or stopper.possible_stop + if not noval or final_epoch: # Calculate mAP + results, maps, _ = validate.run(data_dict, + batch_size=batch_size // WORLD_SIZE * 2, + imgsz=imgsz, + half=amp, + model=ema.ema, + single_cls=single_cls, + dataloader=val_loader, + save_dir=save_dir, + plots=False, + callbacks=callbacks, + compute_loss=compute_loss, + mask_downsample_ratio=mask_ratio, + overlap=overlap) + + # Update best mAP + fi = fitness(np.array(results).reshape(1, -1)) # weighted combination of [P, R, mAP@.5, mAP@.5-.95] + stop = stopper(epoch=epoch, fitness=fi) # early stop check + if fi > best_fitness: + best_fitness = fi + log_vals = list(mloss) + list(results) + lr + # callbacks.run('on_fit_epoch_end', log_vals, epoch, best_fitness, fi) + # Log val metrics and media + metrics_dict = dict(zip(KEYS, log_vals)) + logger.log_metrics(metrics_dict, epoch) + + # Save model + if (not nosave) or (final_epoch and not evolve): # if save + ckpt = { + 'epoch': epoch, + 'best_fitness': best_fitness, + 'model': deepcopy(de_parallel(model)).half(), + 'ema': deepcopy(ema.ema).half(), + 'updates': ema.updates, + 'optimizer': optimizer.state_dict(), + # 'wandb_id': loggers.wandb.wandb_run.id if loggers.wandb else None, + 'opt': vars(opt), + 'date': datetime.now().isoformat()} + + # Save last, best and delete + torch.save(ckpt, last) + if best_fitness == fi: + torch.save(ckpt, best) + if opt.save_period > 0 and epoch % opt.save_period == 0: + torch.save(ckpt, w / f'epoch{epoch}.pt') + logger.log_model(w / f'epoch{epoch}.pt') + del ckpt + # callbacks.run('on_model_save', last, epoch, final_epoch, best_fitness, fi) + + # EarlyStopping + if RANK != -1: # if DDP training + broadcast_list = [stop if RANK == 0 else None] + dist.broadcast_object_list(broadcast_list, 0) # broadcast 'stop' to all ranks + if RANK != 0: + stop = broadcast_list[0] + if stop: + break # must break all DDP ranks + + # end epoch ---------------------------------------------------------------------------------------------------- + # end training ----------------------------------------------------------------------------------------------------- + if RANK in {-1, 0}: + LOGGER.info(f'\n{epoch - start_epoch + 1} epochs completed in {(time.time() - t0) / 3600:.3f} hours.') + for f in last, best: + if f.exists(): + strip_optimizer(f) # strip optimizers + if f is best: + LOGGER.info(f'\nValidating {f}...') + results, _, _ = validate.run( + data_dict, + batch_size=batch_size // WORLD_SIZE * 2, + imgsz=imgsz, + model=attempt_load(f, device).half(), + iou_thres=0.65 if is_coco else 0.60, # best pycocotools at iou 0.65 + single_cls=single_cls, + dataloader=val_loader, + save_dir=save_dir, + save_json=is_coco, + verbose=True, + plots=plots, + callbacks=callbacks, + compute_loss=compute_loss, + mask_downsample_ratio=mask_ratio, + overlap=overlap) # val best model with plots + if is_coco: + # callbacks.run('on_fit_epoch_end', list(mloss) + list(results) + lr, epoch, best_fitness, fi) + metrics_dict = dict(zip(KEYS, list(mloss) + list(results) + lr)) + logger.log_metrics(metrics_dict, epoch) + + # callbacks.run('on_train_end', last, best, epoch, results) + # on train end callback using genericLogger + logger.log_metrics(dict(zip(KEYS[4:16], results)), epochs) + if not opt.evolve: + logger.log_model(best, epoch) + if plots: + plot_results_with_masks(file=save_dir / 'results.csv') # save results.png + files = ['results.png', 'confusion_matrix.png', *(f'{x}_curve.png' for x in ('F1', 'PR', 'P', 'R'))] + files = [(save_dir / f) for f in files if (save_dir / f).exists()] # filter + LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}") + logger.log_images(files, "Results", epoch + 1) + logger.log_images(sorted(save_dir.glob('val*.jpg')), "Validation", epoch + 1) + torch.cuda.empty_cache() + return results + + +def parse_opt(known=False): + parser = argparse.ArgumentParser() + parser.add_argument('--weights', type=str, default=ROOT / 'yolov5s-seg.pt', help='initial weights path') + parser.add_argument('--cfg', type=str, default='', help='model.yaml path') + parser.add_argument('--data', type=str, default=ROOT / 'data/coco128-seg.yaml', help='dataset.yaml path') + parser.add_argument('--hyp', type=str, default=ROOT / 'data/hyps/hyp.scratch-low.yaml', help='hyperparameters path') + parser.add_argument('--epochs', type=int, default=300, help='total training epochs') + parser.add_argument('--batch-size', type=int, default=16, help='total batch size for all GPUs, -1 for autobatch') + parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=640, help='train, val image size (pixels)') + parser.add_argument('--rect', action='store_true', help='rectangular training') + parser.add_argument('--resume', nargs='?', const=True, default=False, help='resume most recent training') + parser.add_argument('--nosave', action='store_true', help='only save final checkpoint') + parser.add_argument('--noval', action='store_true', help='only validate final epoch') + parser.add_argument('--noautoanchor', action='store_true', help='disable AutoAnchor') + parser.add_argument('--noplots', action='store_true', help='save no plot files') + parser.add_argument('--evolve', type=int, nargs='?', const=300, help='evolve hyperparameters for x generations') + parser.add_argument('--bucket', type=str, default='', help='gsutil bucket') + parser.add_argument('--cache', type=str, nargs='?', const='ram', help='--cache images in "ram" (default) or "disk"') + parser.add_argument('--image-weights', action='store_true', help='use weighted image selection for training') + parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') + parser.add_argument('--multi-scale', action='store_true', help='vary img-size +/- 50%%') + parser.add_argument('--single-cls', action='store_true', help='train multi-class data as single-class') + parser.add_argument('--optimizer', type=str, choices=['SGD', 'Adam', 'AdamW'], default='SGD', help='optimizer') + parser.add_argument('--sync-bn', action='store_true', help='use SyncBatchNorm, only available in DDP mode') + parser.add_argument('--workers', type=int, default=8, help='max dataloader workers (per RANK in DDP mode)') + parser.add_argument('--project', default=ROOT / 'runs/train-seg', help='save to project/name') + parser.add_argument('--name', default='exp', help='save to project/name') + parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment') + parser.add_argument('--quad', action='store_true', help='quad dataloader') + parser.add_argument('--cos-lr', action='store_true', help='cosine LR scheduler') + parser.add_argument('--label-smoothing', type=float, default=0.0, help='Label smoothing epsilon') + parser.add_argument('--patience', type=int, default=100, help='EarlyStopping patience (epochs without improvement)') + parser.add_argument('--freeze', nargs='+', type=int, default=[0], help='Freeze layers: backbone=10, first3=0 1 2') + parser.add_argument('--save-period', type=int, default=-1, help='Save checkpoint every x epochs (disabled if < 1)') + parser.add_argument('--seed', type=int, default=0, help='Global training seed') + parser.add_argument('--local_rank', type=int, default=-1, help='Automatic DDP Multi-GPU argument, do not modify') + + # Instance Segmentation Args + parser.add_argument('--mask-ratio', type=int, default=4, help='Downsample the truth masks to saving memory') + parser.add_argument('--no-overlap', action='store_true', help='Overlap masks train faster at slightly less mAP') + + # Weights & Biases arguments + # parser.add_argument('--entity', default=None, help='W&B: Entity') + # parser.add_argument('--upload_dataset', nargs='?', const=True, default=False, help='W&B: Upload data, "val" option') + # parser.add_argument('--bbox_interval', type=int, default=-1, help='W&B: Set bounding-box image logging interval') + # parser.add_argument('--artifact_alias', type=str, default='latest', help='W&B: Version of dataset artifact to use') + + return parser.parse_known_args()[0] if known else parser.parse_args() + + +def main(opt, callbacks=Callbacks()): + # Checks + if RANK in {-1, 0}: + print_args(vars(opt)) + check_git_status() + check_requirements() + + # Resume + if opt.resume and not opt.evolve: # resume from specified or most recent last.pt + last = Path(check_file(opt.resume) if isinstance(opt.resume, str) else get_latest_run()) + opt_yaml = last.parent.parent / 'opt.yaml' # train options yaml + opt_data = opt.data # original dataset + if opt_yaml.is_file(): + with open(opt_yaml, errors='ignore') as f: + d = yaml.safe_load(f) + else: + d = torch.load(last, map_location='cpu')['opt'] + opt = argparse.Namespace(**d) # replace + opt.cfg, opt.weights, opt.resume = '', str(last), True # reinstate + if is_url(opt_data): + opt.data = check_file(opt_data) # avoid HUB resume auth timeout + else: + opt.data, opt.cfg, opt.hyp, opt.weights, opt.project = \ + check_file(opt.data), check_yaml(opt.cfg), check_yaml(opt.hyp), str(opt.weights), str(opt.project) # checks + assert len(opt.cfg) or len(opt.weights), 'either --cfg or --weights must be specified' + if opt.evolve: + if opt.project == str(ROOT / 'runs/train'): # if default project name, rename to runs/evolve + opt.project = str(ROOT / 'runs/evolve') + opt.exist_ok, opt.resume = opt.resume, False # pass resume to exist_ok and disable resume + if opt.name == 'cfg': + opt.name = Path(opt.cfg).stem # use model.yaml as name + opt.save_dir = str(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok)) + + # DDP mode + device = select_device(opt.device, batch_size=opt.batch_size) + if LOCAL_RANK != -1: + msg = 'is not compatible with YOLOv5 Multi-GPU DDP training' + assert not opt.image_weights, f'--image-weights {msg}' + assert not opt.evolve, f'--evolve {msg}' + assert opt.batch_size != -1, f'AutoBatch with --batch-size -1 {msg}, please pass a valid --batch-size' + assert opt.batch_size % WORLD_SIZE == 0, f'--batch-size {opt.batch_size} must be multiple of WORLD_SIZE' + assert torch.cuda.device_count() > LOCAL_RANK, 'insufficient CUDA devices for DDP command' + torch.cuda.set_device(LOCAL_RANK) + device = torch.device('cuda', LOCAL_RANK) + dist.init_process_group(backend="nccl" if dist.is_nccl_available() else "gloo") + + # Train + if not opt.evolve: + train(opt.hyp, opt, device, callbacks) + + # Evolve hyperparameters (optional) + else: + # Hyperparameter evolution metadata (mutation scale 0-1, lower_limit, upper_limit) + meta = { + 'lr0': (1, 1e-5, 1e-1), # initial learning rate (SGD=1E-2, Adam=1E-3) + 'lrf': (1, 0.01, 1.0), # final OneCycleLR learning rate (lr0 * lrf) + 'momentum': (0.3, 0.6, 0.98), # SGD momentum/Adam beta1 + 'weight_decay': (1, 0.0, 0.001), # optimizer weight decay + 'warmup_epochs': (1, 0.0, 5.0), # warmup epochs (fractions ok) + 'warmup_momentum': (1, 0.0, 0.95), # warmup initial momentum + 'warmup_bias_lr': (1, 0.0, 0.2), # warmup initial bias lr + 'box': (1, 0.02, 0.2), # box loss gain + 'cls': (1, 0.2, 4.0), # cls loss gain + 'cls_pw': (1, 0.5, 2.0), # cls BCELoss positive_weight + 'obj': (1, 0.2, 4.0), # obj loss gain (scale with pixels) + 'obj_pw': (1, 0.5, 2.0), # obj BCELoss positive_weight + 'iou_t': (0, 0.1, 0.7), # IoU training threshold + 'anchor_t': (1, 2.0, 8.0), # anchor-multiple threshold + 'anchors': (2, 2.0, 10.0), # anchors per output grid (0 to ignore) + 'fl_gamma': (0, 0.0, 2.0), # focal loss gamma (efficientDet default gamma=1.5) + 'hsv_h': (1, 0.0, 0.1), # image HSV-Hue augmentation (fraction) + 'hsv_s': (1, 0.0, 0.9), # image HSV-Saturation augmentation (fraction) + 'hsv_v': (1, 0.0, 0.9), # image HSV-Value augmentation (fraction) + 'degrees': (1, 0.0, 45.0), # image rotation (+/- deg) + 'translate': (1, 0.0, 0.9), # image translation (+/- fraction) + 'scale': (1, 0.0, 0.9), # image scale (+/- gain) + 'shear': (1, 0.0, 10.0), # image shear (+/- deg) + 'perspective': (0, 0.0, 0.001), # image perspective (+/- fraction), range 0-0.001 + 'flipud': (1, 0.0, 1.0), # image flip up-down (probability) + 'fliplr': (0, 0.0, 1.0), # image flip left-right (probability) + 'mosaic': (1, 0.0, 1.0), # image mixup (probability) + 'mixup': (1, 0.0, 1.0), # image mixup (probability) + 'copy_paste': (1, 0.0, 1.0)} # segment copy-paste (probability) + + with open(opt.hyp, errors='ignore') as f: + hyp = yaml.safe_load(f) # load hyps dict + if 'anchors' not in hyp: # anchors commented in hyp.yaml + hyp['anchors'] = 3 + if opt.noautoanchor: + del hyp['anchors'], meta['anchors'] + opt.noval, opt.nosave, save_dir = True, True, Path(opt.save_dir) # only val/save final epoch + # ei = [isinstance(x, (int, float)) for x in hyp.values()] # evolvable indices + evolve_yaml, evolve_csv = save_dir / 'hyp_evolve.yaml', save_dir / 'evolve.csv' + if opt.bucket: + os.system(f'gsutil cp gs://{opt.bucket}/evolve.csv {evolve_csv}') # download evolve.csv if exists + + for _ in range(opt.evolve): # generations to evolve + if evolve_csv.exists(): # if evolve.csv exists: select best hyps and mutate + # Select parent(s) + parent = 'single' # parent selection method: 'single' or 'weighted' + x = np.loadtxt(evolve_csv, ndmin=2, delimiter=',', skiprows=1) + n = min(5, len(x)) # number of previous results to consider + x = x[np.argsort(-fitness(x))][:n] # top n mutations + w = fitness(x) - fitness(x).min() + 1E-6 # weights (sum > 0) + if parent == 'single' or len(x) == 1: + # x = x[random.randint(0, n - 1)] # random selection + x = x[random.choices(range(n), weights=w)[0]] # weighted selection + elif parent == 'weighted': + x = (x * w.reshape(n, 1)).sum(0) / w.sum() # weighted combination + + # Mutate + mp, s = 0.8, 0.2 # mutation probability, sigma + npr = np.random + npr.seed(int(time.time())) + g = np.array([meta[k][0] for k in hyp.keys()]) # gains 0-1 + ng = len(meta) + v = np.ones(ng) + while all(v == 1): # mutate until a change occurs (prevent duplicates) + v = (g * (npr.random(ng) < mp) * npr.randn(ng) * npr.random() * s + 1).clip(0.3, 3.0) + for i, k in enumerate(hyp.keys()): # plt.hist(v.ravel(), 300) + hyp[k] = float(x[i + 7] * v[i]) # mutate + + # Constrain to limits + for k, v in meta.items(): + hyp[k] = max(hyp[k], v[1]) # lower limit + hyp[k] = min(hyp[k], v[2]) # upper limit + hyp[k] = round(hyp[k], 5) # significant digits + + # Train mutation + results = train(hyp.copy(), opt, device, callbacks) + callbacks = Callbacks() + # Write mutation results + print_mutation(results, hyp.copy(), save_dir, opt.bucket) + + # Plot results + plot_evolve(evolve_csv) + LOGGER.info(f'Hyperparameter evolution finished {opt.evolve} generations\n' + f"Results saved to {colorstr('bold', save_dir)}\n" + f'Usage example: $ python train.py --hyp {evolve_yaml}') + + +def run(**kwargs): + # Usage: import train; train.run(data='coco128.yaml', imgsz=320, weights='yolov5m.pt') + opt = parse_opt(True) + for k, v in kwargs.items(): + setattr(opt, k, v) + main(opt) + return opt + + +if __name__ == "__main__": + opt = parse_opt() + main(opt) diff --git a/segment/val.py b/segment/val.py new file mode 100644 index 000000000000..138aa00aaed3 --- /dev/null +++ b/segment/val.py @@ -0,0 +1,471 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +""" +Validate a trained YOLOv5 segment model on a segment dataset + +Usage: + $ bash data/scripts/get_coco.sh --val --segments # download COCO-segments val split (1G, 5000 images) + $ python segment/val.py --weights yolov5s-seg.pt --data coco.yaml --img 640- # validate COCO-segments + +Usage - formats: + $ python segment/val.py --weights yolov5s-seg.pt # PyTorch + yolov5s-seg.torchscript # TorchScript + yolov5s-seg.onnx # ONNX Runtime or OpenCV DNN with --dnn + yolov5s-seg.xml # OpenVINO + yolov5s-seg.engine # TensorRT + yolov5s-seg.mlmodel # CoreML (macOS-only) + yolov5s-seg_saved_model # TensorFlow SavedModel + yolov5s-seg.pb # TensorFlow GraphDef + yolov5s-seg.tflite # TensorFlow Lite + yolov5s-seg_edgetpu.tflite # TensorFlow Edge TPU + yolov5s-seg_paddle_model # PaddlePaddle +""" + +import argparse +import json +import os +import sys +from multiprocessing.pool import ThreadPool +from pathlib import Path + +import numpy as np +import torch +from tqdm import tqdm + +FILE = Path(__file__).resolve() +ROOT = FILE.parents[1] # YOLOv5 root directory +if str(ROOT) not in sys.path: + sys.path.append(str(ROOT)) # add ROOT to PATH +ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative + +import torch.nn.functional as F + +from models.common import DetectMultiBackend +from models.yolo import SegmentationModel +from utils.callbacks import Callbacks +from utils.general import (LOGGER, NUM_THREADS, Profile, check_dataset, check_img_size, check_requirements, check_yaml, + coco80_to_coco91_class, colorstr, increment_path, non_max_suppression, print_args, + scale_coords, xywh2xyxy, xyxy2xywh) +from utils.metrics import ConfusionMatrix, box_iou +from utils.plots import output_to_target, plot_val_study +from utils.segment.dataloaders import create_dataloader +from utils.segment.general import mask_iou, process_mask, process_mask_upsample, scale_image +from utils.segment.metrics import Metrics, ap_per_class_box_and_mask +from utils.segment.plots import plot_images_and_masks +from utils.torch_utils import de_parallel, select_device, smart_inference_mode + + +def save_one_txt(predn, save_conf, shape, file): + # Save one txt result + gn = torch.tensor(shape)[[1, 0, 1, 0]] # normalization gain whwh + for *xyxy, conf, cls in predn.tolist(): + xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh + line = (cls, *xywh, conf) if save_conf else (cls, *xywh) # label format + with open(file, 'a') as f: + f.write(('%g ' * len(line)).rstrip() % line + '\n') + + +def save_one_json(predn, jdict, path, class_map, pred_masks): + # Save one JSON result {"image_id": 42, "category_id": 18, "bbox": [258.15, 41.29, 348.26, 243.78], "score": 0.236} + from pycocotools.mask import encode + + def single_encode(x): + rle = encode(np.asarray(x[:, :, None], order="F", dtype="uint8"))[0] + rle["counts"] = rle["counts"].decode("utf-8") + return rle + + image_id = int(path.stem) if path.stem.isnumeric() else path.stem + box = xyxy2xywh(predn[:, :4]) # xywh + box[:, :2] -= box[:, 2:] / 2 # xy center to top-left corner + pred_masks = np.transpose(pred_masks, (2, 0, 1)) + with ThreadPool(NUM_THREADS) as pool: + rles = pool.map(single_encode, pred_masks) + for i, (p, b) in enumerate(zip(predn.tolist(), box.tolist())): + jdict.append({ + 'image_id': image_id, + 'category_id': class_map[int(p[5])], + 'bbox': [round(x, 3) for x in b], + 'score': round(p[4], 5), + 'segmentation': rles[i]}) + + +def process_batch(detections, labels, iouv, pred_masks=None, gt_masks=None, overlap=False, masks=False): + """ + Return correct prediction matrix + Arguments: + detections (array[N, 6]), x1, y1, x2, y2, conf, class + labels (array[M, 5]), class, x1, y1, x2, y2 + Returns: + correct (array[N, 10]), for 10 IoU levels + """ + if masks: + if overlap: + nl = len(labels) + index = torch.arange(nl, device=gt_masks.device).view(nl, 1, 1) + 1 + gt_masks = gt_masks.repeat(nl, 1, 1) # shape(1,640,640) -> (n,640,640) + gt_masks = torch.where(gt_masks == index, 1.0, 0.0) + if gt_masks.shape[1:] != pred_masks.shape[1:]: + gt_masks = F.interpolate(gt_masks[None], pred_masks.shape[1:], mode="bilinear", align_corners=False)[0] + gt_masks = gt_masks.gt_(0.5) + iou = mask_iou(gt_masks.view(gt_masks.shape[0], -1), pred_masks.view(pred_masks.shape[0], -1)) + else: # boxes + iou = box_iou(labels[:, 1:], detections[:, :4]) + + correct = np.zeros((detections.shape[0], iouv.shape[0])).astype(bool) + correct_class = labels[:, 0:1] == detections[:, 5] + for i in range(len(iouv)): + x = torch.where((iou >= iouv[i]) & correct_class) # IoU > threshold and classes match + if x[0].shape[0]: + matches = torch.cat((torch.stack(x, 1), iou[x[0], x[1]][:, None]), 1).cpu().numpy() # [label, detect, iou] + if x[0].shape[0] > 1: + matches = matches[matches[:, 2].argsort()[::-1]] + matches = matches[np.unique(matches[:, 1], return_index=True)[1]] + # matches = matches[matches[:, 2].argsort()[::-1]] + matches = matches[np.unique(matches[:, 0], return_index=True)[1]] + correct[matches[:, 1].astype(int), i] = True + return torch.tensor(correct, dtype=torch.bool, device=iouv.device) + + +@smart_inference_mode() +def run( + data, + weights=None, # model.pt path(s) + batch_size=32, # batch size + imgsz=640, # inference size (pixels) + conf_thres=0.001, # confidence threshold + iou_thres=0.6, # NMS IoU threshold + max_det=300, # maximum detections per image + task='val', # train, val, test, speed or study + device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu + workers=8, # max dataloader workers (per RANK in DDP mode) + single_cls=False, # treat as single-class dataset + augment=False, # augmented inference + verbose=False, # verbose output + save_txt=False, # save results to *.txt + save_hybrid=False, # save label+prediction hybrid results to *.txt + save_conf=False, # save confidences in --save-txt labels + save_json=False, # save a COCO-JSON results file + project=ROOT / 'runs/val-seg', # save to project/name + name='exp', # save to project/name + exist_ok=False, # existing project/name ok, do not increment + half=True, # use FP16 half-precision inference + dnn=False, # use OpenCV DNN for ONNX inference + model=None, + dataloader=None, + save_dir=Path(''), + plots=True, + overlap=False, + mask_downsample_ratio=1, + compute_loss=None, + callbacks=Callbacks(), +): + if save_json: + check_requirements(['pycocotools']) + process = process_mask_upsample # more accurate + else: + process = process_mask # faster + + # Initialize/load model and set device + training = model is not None + if training: # called by train.py + device, pt, jit, engine = next(model.parameters()).device, True, False, False # get model device, PyTorch model + half &= device.type != 'cpu' # half precision only supported on CUDA + model.half() if half else model.float() + nm = de_parallel(model).model[-1].nm # number of masks + else: # called directly + device = select_device(device, batch_size=batch_size) + + # Directories + save_dir = increment_path(Path(project) / name, exist_ok=exist_ok) # increment run + (save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir + + # Load model + model = DetectMultiBackend(weights, device=device, dnn=dnn, data=data, fp16=half) + stride, pt, jit, engine = model.stride, model.pt, model.jit, model.engine + imgsz = check_img_size(imgsz, s=stride) # check image size + half = model.fp16 # FP16 supported on limited backends with CUDA + nm = de_parallel(model).model.model[-1].nm if isinstance(model, SegmentationModel) else 32 # number of masks + if engine: + batch_size = model.batch_size + else: + device = model.device + if not (pt or jit): + batch_size = 1 # export.py models default to batch-size 1 + LOGGER.info(f'Forcing --batch-size 1 square inference (1,3,{imgsz},{imgsz}) for non-PyTorch models') + + # Data + data = check_dataset(data) # check + + # Configure + model.eval() + cuda = device.type != 'cpu' + is_coco = isinstance(data.get('val'), str) and data['val'].endswith(f'coco{os.sep}val2017.txt') # COCO dataset + nc = 1 if single_cls else int(data['nc']) # number of classes + iouv = torch.linspace(0.5, 0.95, 10, device=device) # iou vector for mAP@0.5:0.95 + niou = iouv.numel() + + # Dataloader + if not training: + if pt and not single_cls: # check --weights are trained on --data + ncm = model.model.nc + assert ncm == nc, f'{weights} ({ncm} classes) trained on different --data than what you passed ({nc} ' \ + f'classes). Pass correct combination of --weights and --data that are trained together.' + model.warmup(imgsz=(1 if pt else batch_size, 3, imgsz, imgsz)) # warmup + pad = 0.0 if task in ('speed', 'benchmark') else 0.5 + rect = False if task == 'benchmark' else pt # square inference for benchmarks + task = task if task in ('train', 'val', 'test') else 'val' # path to train/val/test images + dataloader = create_dataloader(data[task], + imgsz, + batch_size, + stride, + single_cls, + pad=pad, + rect=rect, + workers=workers, + prefix=colorstr(f'{task}: '), + overlap_mask=overlap, + mask_downsample_ratio=mask_downsample_ratio)[0] + + seen = 0 + confusion_matrix = ConfusionMatrix(nc=nc) + names = model.names if hasattr(model, 'names') else model.module.names # get class names + if isinstance(names, (list, tuple)): # old format + names = dict(enumerate(names)) + class_map = coco80_to_coco91_class() if is_coco else list(range(1000)) + s = ('%22s' + '%11s' * 10) % ('Class', 'Images', 'Instances', 'Box(P', "R", "mAP50", "mAP50-95)", "Mask(P", "R", + "mAP50", "mAP50-95)") + dt = Profile(), Profile(), Profile() + metrics = Metrics() + loss = torch.zeros(4, device=device) + jdict, stats = [], [] + # callbacks.run('on_val_start') + pbar = tqdm(dataloader, desc=s, bar_format='{l_bar}{bar:10}{r_bar}{bar:-10b}') # progress bar + for batch_i, (im, targets, paths, shapes, masks) in enumerate(pbar): + # callbacks.run('on_val_batch_start') + with dt[0]: + if cuda: + im = im.to(device, non_blocking=True) + targets = targets.to(device) + masks = masks.to(device) + masks = masks.float() + im = im.half() if half else im.float() # uint8 to fp16/32 + im /= 255 # 0 - 255 to 0.0 - 1.0 + nb, _, height, width = im.shape # batch size, channels, height, width + + # Inference + with dt[1]: + preds, protos, train_out = model(im) if compute_loss else (*model(im, augment=augment)[:2], None) + + # Loss + if compute_loss: + loss += compute_loss((train_out, protos), targets, masks)[1] # box, obj, cls + + # NMS + targets[:, 2:] *= torch.tensor((width, height, width, height), device=device) # to pixels + lb = [targets[targets[:, 0] == i, 1:] for i in range(nb)] if save_hybrid else [] # for autolabelling + with dt[2]: + preds = non_max_suppression(preds, + conf_thres, + iou_thres, + labels=lb, + multi_label=True, + agnostic=single_cls, + max_det=max_det, + nm=nm) + + # Metrics + plot_masks = [] # masks for plotting + for si, (pred, proto) in enumerate(zip(preds, protos)): + labels = targets[targets[:, 0] == si, 1:] + nl, npr = labels.shape[0], pred.shape[0] # number of labels, predictions + path, shape = Path(paths[si]), shapes[si][0] + correct_masks = torch.zeros(npr, niou, dtype=torch.bool, device=device) # init + correct_bboxes = torch.zeros(npr, niou, dtype=torch.bool, device=device) # init + seen += 1 + + if npr == 0: + if nl: + stats.append((correct_masks, correct_bboxes, *torch.zeros((2, 0), device=device), labels[:, 0])) + if plots: + confusion_matrix.process_batch(detections=None, labels=labels[:, 0]) + continue + + # Masks + midx = [si] if overlap else targets[:, 0] == si + gt_masks = masks[midx] + pred_masks = process(proto, pred[:, 6:], pred[:, :4], shape=im[si].shape[1:]) + + # Predictions + if single_cls: + pred[:, 5] = 0 + predn = pred.clone() + scale_coords(im[si].shape[1:], predn[:, :4], shape, shapes[si][1]) # native-space pred + + # Evaluate + if nl: + tbox = xywh2xyxy(labels[:, 1:5]) # target boxes + scale_coords(im[si].shape[1:], tbox, shape, shapes[si][1]) # native-space labels + labelsn = torch.cat((labels[:, 0:1], tbox), 1) # native-space labels + correct_bboxes = process_batch(predn, labelsn, iouv) + correct_masks = process_batch(predn, labelsn, iouv, pred_masks, gt_masks, overlap=overlap, masks=True) + if plots: + confusion_matrix.process_batch(predn, labelsn) + stats.append((correct_masks, correct_bboxes, pred[:, 4], pred[:, 5], labels[:, 0])) # (conf, pcls, tcls) + + pred_masks = torch.as_tensor(pred_masks, dtype=torch.uint8) + if plots and batch_i < 3: + plot_masks.append(pred_masks[:15].cpu()) # filter top 15 to plot + + # Save/log + if save_txt: + save_one_txt(predn, save_conf, shape, file=save_dir / 'labels' / f'{path.stem}.txt') + if save_json: + pred_masks = scale_image(im[si].shape[1:], + pred_masks.permute(1, 2, 0).contiguous().cpu().numpy(), shape, shapes[si][1]) + save_one_json(predn, jdict, path, class_map, pred_masks) # append to COCO-JSON dictionary + # callbacks.run('on_val_image_end', pred, predn, path, names, im[si]) + + # Plot images + if plots and batch_i < 3: + if len(plot_masks): + plot_masks = torch.cat(plot_masks, dim=0) + plot_images_and_masks(im, targets, masks, paths, save_dir / f'val_batch{batch_i}_labels.jpg', names) + plot_images_and_masks(im, output_to_target(preds, max_det=15), plot_masks, paths, + save_dir / f'val_batch{batch_i}_pred.jpg', names) # pred + + # callbacks.run('on_val_batch_end') + + # Compute metrics + stats = [torch.cat(x, 0).cpu().numpy() for x in zip(*stats)] # to numpy + if len(stats) and stats[0].any(): + results = ap_per_class_box_and_mask(*stats, plot=plots, save_dir=save_dir, names=names) + metrics.update(results) + nt = np.bincount(stats[4].astype(int), minlength=nc) # number of targets per class + + # Print results + pf = '%22s' + '%11i' * 2 + '%11.3g' * 8 # print format + LOGGER.info(pf % ("all", seen, nt.sum(), *metrics.mean_results())) + if nt.sum() == 0: + LOGGER.warning(f'WARNING: no labels found in {task} set, can not compute metrics without labels ⚠️') + + # Print results per class + if (verbose or (nc < 50 and not training)) and nc > 1 and len(stats): + for i, c in enumerate(metrics.ap_class_index): + LOGGER.info(pf % (names[c], seen, nt[c], *metrics.class_result(i))) + + # Print speeds + t = tuple(x.t / seen * 1E3 for x in dt) # speeds per image + if not training: + shape = (batch_size, 3, imgsz, imgsz) + LOGGER.info(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {shape}' % t) + + # Plots + if plots: + confusion_matrix.plot(save_dir=save_dir, names=list(names.values())) + # callbacks.run('on_val_end') + + mp_bbox, mr_bbox, map50_bbox, map_bbox, mp_mask, mr_mask, map50_mask, map_mask = metrics.mean_results() + + # Save JSON + if save_json and len(jdict): + w = Path(weights[0] if isinstance(weights, list) else weights).stem if weights is not None else '' # weights + anno_json = str(Path(data.get('path', '../coco')) / 'annotations/instances_val2017.json') # annotations json + pred_json = str(save_dir / f"{w}_predictions.json") # predictions json + LOGGER.info(f'\nEvaluating pycocotools mAP... saving {pred_json}...') + with open(pred_json, 'w') as f: + json.dump(jdict, f) + + try: # https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocoEvalDemo.ipynb + from pycocotools.coco import COCO + from pycocotools.cocoeval import COCOeval + + anno = COCO(anno_json) # init annotations api + pred = anno.loadRes(pred_json) # init predictions api + results = [] + for eval in COCOeval(anno, pred, 'bbox'), COCOeval(anno, pred, 'segm'): + if is_coco: + eval.params.imgIds = [int(Path(x).stem) for x in dataloader.dataset.im_files] # img ID to evaluate + eval.evaluate() + eval.accumulate() + eval.summarize() + results.extend(eval.stats[:2]) # update results (mAP@0.5:0.95, mAP@0.5) + map_bbox, map50_bbox, map_mask, map50_mask = results + except Exception as e: + LOGGER.info(f'pycocotools unable to run: {e}') + + # Return results + model.float() # for training + if not training: + s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else '' + LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}{s}") + final_metric = mp_bbox, mr_bbox, map50_bbox, map_bbox, mp_mask, mr_mask, map50_mask, map_mask + return (*final_metric, *(loss.cpu() / len(dataloader)).tolist()), metrics.get_maps(nc), t + + +def parse_opt(): + parser = argparse.ArgumentParser() + parser.add_argument('--data', type=str, default=ROOT / 'data/coco128-seg.yaml', help='dataset.yaml path') + parser.add_argument('--weights', nargs='+', type=str, default=ROOT / 'yolov5s-seg.pt', help='model path(s)') + parser.add_argument('--batch-size', type=int, default=32, help='batch size') + parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=640, help='inference size (pixels)') + parser.add_argument('--conf-thres', type=float, default=0.001, help='confidence threshold') + parser.add_argument('--iou-thres', type=float, default=0.6, help='NMS IoU threshold') + parser.add_argument('--max-det', type=int, default=300, help='maximum detections per image') + parser.add_argument('--task', default='val', help='train, val, test, speed or study') + parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') + parser.add_argument('--workers', type=int, default=8, help='max dataloader workers (per RANK in DDP mode)') + parser.add_argument('--single-cls', action='store_true', help='treat as single-class dataset') + parser.add_argument('--augment', action='store_true', help='augmented inference') + parser.add_argument('--verbose', action='store_true', help='report mAP by class') + parser.add_argument('--save-txt', action='store_true', help='save results to *.txt') + parser.add_argument('--save-hybrid', action='store_true', help='save label+prediction hybrid results to *.txt') + parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels') + parser.add_argument('--save-json', action='store_true', help='save a COCO-JSON results file') + parser.add_argument('--project', default=ROOT / 'runs/val-seg', help='save results to project/name') + parser.add_argument('--name', default='exp', help='save to project/name') + parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment') + parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference') + parser.add_argument('--dnn', action='store_true', help='use OpenCV DNN for ONNX inference') + opt = parser.parse_args() + opt.data = check_yaml(opt.data) # check YAML + # opt.save_json |= opt.data.endswith('coco.yaml') + opt.save_txt |= opt.save_hybrid + print_args(vars(opt)) + return opt + + +def main(opt): + check_requirements(requirements=ROOT / 'requirements.txt', exclude=('tensorboard', 'thop')) + + if opt.task in ('train', 'val', 'test'): # run normally + if opt.conf_thres > 0.001: # https://github.com/ultralytics/yolov5/issues/1466 + LOGGER.info(f'WARNING: confidence threshold {opt.conf_thres} > 0.001 produces invalid results ⚠️') + if opt.save_hybrid: + LOGGER.info('WARNING: --save-hybrid will return high mAP from hybrid labels, not from predictions alone ⚠️') + run(**vars(opt)) + + else: + weights = opt.weights if isinstance(opt.weights, list) else [opt.weights] + opt.half = True # FP16 for fastest results + if opt.task == 'speed': # speed benchmarks + # python val.py --task speed --data coco.yaml --batch 1 --weights yolov5n.pt yolov5s.pt... + opt.conf_thres, opt.iou_thres, opt.save_json = 0.25, 0.45, False + for opt.weights in weights: + run(**vars(opt), plots=False) + + elif opt.task == 'study': # speed vs mAP benchmarks + # python val.py --task study --data coco.yaml --iou 0.7 --weights yolov5n.pt yolov5s.pt... + for opt.weights in weights: + f = f'study_{Path(opt.data).stem}_{Path(opt.weights).stem}.txt' # filename to save to + x, y = list(range(256, 1536 + 128, 128)), [] # x axis (image sizes), y axis + for opt.imgsz in x: # img-size + LOGGER.info(f'\nRunning {f} --imgsz {opt.imgsz}...') + r, _, t = run(**vars(opt), plots=False) + y.append(r + t) # results and times + np.savetxt(f, y, fmt='%10.4g') # save + os.system('zip -r study.zip study_*.txt') + plot_val_study(x=x) # plot + + +if __name__ == "__main__": + opt = parse_opt() + main(opt) diff --git a/utils/dataloaders.py b/utils/dataloaders.py old mode 100755 new mode 100644 index d8ef11fd94b4..c04be853c580 --- a/utils/dataloaders.py +++ b/utils/dataloaders.py @@ -484,6 +484,7 @@ def __init__(self, self.im_files = [self.im_files[i] for i in irect] self.label_files = [self.label_files[i] for i in irect] self.labels = [self.labels[i] for i in irect] + self.segments = [self.segments[i] for i in irect] self.shapes = s[irect] # wh ar = ar[irect] diff --git a/utils/general.py b/utils/general.py old mode 100755 new mode 100644 index f5fb2c93a3d5..8633511f89f5 --- a/utils/general.py +++ b/utils/general.py @@ -798,15 +798,18 @@ def clip_coords(boxes, shape): boxes[:, [1, 3]] = boxes[:, [1, 3]].clip(0, shape[0]) # y1, y2 -def non_max_suppression(prediction, - conf_thres=0.25, - iou_thres=0.45, - classes=None, - agnostic=False, - multi_label=False, - labels=(), - max_det=300): - """Non-Maximum Suppression (NMS) on inference results to reject overlapping bounding boxes +def non_max_suppression( + prediction, + conf_thres=0.25, + iou_thres=0.45, + classes=None, + agnostic=False, + multi_label=False, + labels=(), + max_det=300, + nm=0, # number of masks +): + """Non-Maximum Suppression (NMS) on inference results to reject overlapping detections Returns: list of detections, on (n,6) tensor per image [xyxy, conf, cls] @@ -816,7 +819,7 @@ def non_max_suppression(prediction, prediction = prediction[0] # select only inference output bs = prediction.shape[0] # batch size - nc = prediction.shape[2] - 5 # number of classes + nc = prediction.shape[2] - nm - 5 # number of classes xc = prediction[..., 4] > conf_thres # candidates # Checks @@ -827,13 +830,14 @@ def non_max_suppression(prediction, # min_wh = 2 # (pixels) minimum box width and height max_wh = 7680 # (pixels) maximum box width and height max_nms = 30000 # maximum number of boxes into torchvision.ops.nms() - time_limit = 0.3 + 0.03 * bs # seconds to quit after + time_limit = 0.5 + 0.05 * bs # seconds to quit after redundant = True # require redundant detections multi_label &= nc > 1 # multiple labels per box (adds 0.5ms/img) merge = False # use merge-NMS t = time.time() - output = [torch.zeros((0, 6), device=prediction.device)] * bs + mi = 5 + nc # mask start index + output = [torch.zeros((0, 6 + nm), device=prediction.device)] * bs for xi, x in enumerate(prediction): # image index, image inference # Apply constraints # x[((x[..., 2:4] < min_wh) | (x[..., 2:4] > max_wh)).any(1), 4] = 0 # width-height @@ -842,7 +846,7 @@ def non_max_suppression(prediction, # Cat apriori labels if autolabelling if labels and len(labels[xi]): lb = labels[xi] - v = torch.zeros((len(lb), nc + 5), device=x.device) + v = torch.zeros((len(lb), nc + nm + 5), device=x.device) v[:, :4] = lb[:, 1:5] # box v[:, 4] = 1.0 # conf v[range(len(lb)), lb[:, 0].long() + 5] = 1.0 # cls @@ -855,16 +859,17 @@ def non_max_suppression(prediction, # Compute conf x[:, 5:] *= x[:, 4:5] # conf = obj_conf * cls_conf - # Box (center x, center y, width, height) to (x1, y1, x2, y2) - box = xywh2xyxy(x[:, :4]) + # Box/Mask + box = xywh2xyxy(x[:, :4]) # center_x, center_y, width, height) to (x1, y1, x2, y2) + mask = x[:, mi:] # zero columns if no masks # Detections matrix nx6 (xyxy, conf, cls) if multi_label: - i, j = (x[:, 5:] > conf_thres).nonzero(as_tuple=False).T - x = torch.cat((box[i], x[i, j + 5, None], j[:, None].float()), 1) + i, j = (x[:, 5:mi] > conf_thres).nonzero(as_tuple=False).T + x = torch.cat((box[i], x[i, 5 + j, None], j[:, None].float(), mask[i]), 1) else: # best class only - conf, j = x[:, 5:].max(1, keepdim=True) - x = torch.cat((box, conf, j.float()), 1)[conf.view(-1) > conf_thres] + conf, j = x[:, 5:mi].max(1, keepdim=True) + x = torch.cat((box, conf, j.float(), mask), 1)[conf.view(-1) > conf_thres] # Filter by class if classes is not None: @@ -880,6 +885,8 @@ def non_max_suppression(prediction, continue elif n > max_nms: # excess boxes x = x[x[:, 4].argsort(descending=True)[:max_nms]] # sort by confidence + else: + x = x[x[:, 4].argsort(descending=True)] # sort by confidence # Batched NMS c = x[:, 5:6] * (0 if agnostic else max_wh) # classes diff --git a/utils/metrics.py b/utils/metrics.py index ee7d33982cfc..001813cbcd65 100644 --- a/utils/metrics.py +++ b/utils/metrics.py @@ -28,7 +28,7 @@ def smooth(y, f=0.05): return np.convolve(yp, np.ones(nf) / nf, mode='valid') # y-smoothed -def ap_per_class(tp, conf, pred_cls, target_cls, plot=False, save_dir='.', names=(), eps=1e-16): +def ap_per_class(tp, conf, pred_cls, target_cls, plot=False, save_dir='.', names=(), eps=1e-16, prefix=""): """ Compute the average precision, given the recall and precision curves. Source: https://github.com/rafaelpadilla/Object-Detection-Metrics. # Arguments @@ -83,10 +83,10 @@ def ap_per_class(tp, conf, pred_cls, target_cls, plot=False, save_dir='.', names names = [v for k, v in names.items() if k in unique_classes] # list: only classes that have data names = dict(enumerate(names)) # to dict if plot: - plot_pr_curve(px, py, ap, Path(save_dir) / 'PR_curve.png', names) - plot_mc_curve(px, f1, Path(save_dir) / 'F1_curve.png', names, ylabel='F1') - plot_mc_curve(px, p, Path(save_dir) / 'P_curve.png', names, ylabel='Precision') - plot_mc_curve(px, r, Path(save_dir) / 'R_curve.png', names, ylabel='Recall') + plot_pr_curve(px, py, ap, Path(save_dir) / f'{prefix}PR_curve.png', names) + plot_mc_curve(px, f1, Path(save_dir) / f'{prefix}F1_curve.png', names, ylabel='F1') + plot_mc_curve(px, p, Path(save_dir) / f'{prefix}P_curve.png', names, ylabel='Precision') + plot_mc_curve(px, r, Path(save_dir) / f'{prefix}R_curve.png', names, ylabel='Recall') i = smooth(f1.mean(0), 0.1).argmax() # max F1 index p, r, f1 = p[:, i], r[:, i], f1[:, i] diff --git a/utils/plots.py b/utils/plots.py index 0530d0abdf48..d8d5b225a774 100644 --- a/utils/plots.py +++ b/utils/plots.py @@ -23,6 +23,7 @@ from utils.general import (CONFIG_DIR, FONT, LOGGER, check_font, check_requirements, clip_coords, increment_path, is_ascii, xywh2xyxy, xyxy2xywh) from utils.metrics import fitness +from utils.segment.general import scale_image # Settings RANK = int(os.getenv('RANK', -1)) @@ -113,6 +114,52 @@ def box_label(self, box, label='', color=(128, 128, 128), txt_color=(255, 255, 2 thickness=tf, lineType=cv2.LINE_AA) + def masks(self, masks, colors, im_gpu=None, alpha=0.5): + """Plot masks at once. + Args: + masks (tensor): predicted masks on cuda, shape: [n, h, w] + colors (List[List[Int]]): colors for predicted masks, [[r, g, b] * n] + im_gpu (tensor): img is in cuda, shape: [3, h, w], range: [0, 1] + alpha (float): mask transparency: 0.0 fully transparent, 1.0 opaque + """ + if self.pil: + # convert to numpy first + self.im = np.asarray(self.im).copy() + if im_gpu is None: + # Add multiple masks of shape(h,w,n) with colors list([r,g,b], [r,g,b], ...) + if len(masks) == 0: + return + if isinstance(masks, torch.Tensor): + masks = torch.as_tensor(masks, dtype=torch.uint8) + masks = masks.permute(1, 2, 0).contiguous() + masks = masks.cpu().numpy() + # masks = np.ascontiguousarray(masks.transpose(1, 2, 0)) + masks = scale_image(masks.shape[:2], masks, self.im.shape) + masks = np.asarray(masks, dtype=np.float32) + colors = np.asarray(colors, dtype=np.float32) # shape(n,3) + s = masks.sum(2, keepdims=True).clip(0, 1) # add all masks together + masks = (masks @ colors).clip(0, 255) # (h,w,n) @ (n,3) = (h,w,3) + self.im[:] = masks * alpha + self.im * (1 - s * alpha) + else: + if len(masks) == 0: + self.im[:] = im_gpu.permute(1, 2, 0).contiguous().cpu().numpy() * 255 + colors = torch.tensor(colors, device=im_gpu.device, dtype=torch.float32) / 255.0 + colors = colors[:, None, None] # shape(n,1,1,3) + masks = masks.unsqueeze(3) # shape(n,h,w,1) + masks_color = masks * (colors * alpha) # shape(n,h,w,3) + + inv_alph_masks = (1 - masks * alpha).cumprod(0) # shape(n,h,w,1) + mcs = (masks_color * inv_alph_masks).sum(0) * 2 # mask color summand shape(n,h,w,3) + + im_gpu = im_gpu.flip(dims=[0]) # flip channel + im_gpu = im_gpu.permute(1, 2, 0).contiguous() # shape(h,w,3) + im_gpu = im_gpu * inv_alph_masks[-1] + mcs + im_mask = (im_gpu * 255).byte().cpu().numpy() + self.im[:] = scale_image(im_gpu.shape, im_mask, self.im.shape) + if self.pil: + # convert im back to PIL and update draw + self.fromarray(self.im) + def rectangle(self, xy, fill=None, outline=None, width=1): # Add rectangle to image (PIL-only) self.draw.rectangle(xy, fill, outline, width) @@ -124,6 +171,11 @@ def text(self, xy, text, txt_color=(255, 255, 255), anchor='top'): xy[1] += 1 - h self.draw.text(xy, text, fill=txt_color, font=self.font) + def fromarray(self, im): + # Update self.im from a numpy array + self.im = im if isinstance(im, Image.Image) else Image.fromarray(im) + self.draw = ImageDraw.Draw(self.im) + def result(self): # Return annotated image as array return np.asarray(self.im) @@ -180,26 +232,31 @@ def butter_lowpass(cutoff, fs, order): return filtfilt(b, a, data) # forward-backward filter -def output_to_target(output): - # Convert model output to target format [batch_id, class_id, x, y, w, h, conf] +def output_to_target(output, max_det=300): + # Convert model output to target format [batch_id, class_id, x, y, w, h, conf] for plotting targets = [] for i, o in enumerate(output): - targets.extend([i, cls, *list(*xyxy2xywh(np.array(box)[None])), conf] for *box, conf, cls in o.cpu().numpy()) - return np.array(targets) + box, conf, cls = o[:max_det, :6].cpu().split((4, 1, 1), 1) + j = torch.full((conf.shape[0], 1), i) + targets.append(torch.cat((j, cls, xyxy2xywh(box), conf), 1)) + return torch.cat(targets, 0).numpy() @threaded -def plot_images(images, targets, paths=None, fname='images.jpg', names=None, max_size=1920, max_subplots=16): +def plot_images(images, targets, paths=None, fname='images.jpg', names=None): # Plot image grid with labels if isinstance(images, torch.Tensor): images = images.cpu().float().numpy() if isinstance(targets, torch.Tensor): targets = targets.cpu().numpy() - if np.max(images[0]) <= 1: - images *= 255 # de-normalise (optional) + + max_size = 1920 # max image size + max_subplots = 16 # max image subplots, i.e. 4x4 bs, _, h, w = images.shape # batch size, _, height, width bs = min(bs, max_subplots) # limit plot images ns = np.ceil(bs ** 0.5) # number of subplots (square) + if np.max(images[0]) <= 1: + images *= 255 # de-normalise (optional) # Build Image mosaic = np.full((int(ns * h), int(ns * w), 3), 255, dtype=np.uint8) # init diff --git a/utils/segment/__init__.py b/utils/segment/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/utils/segment/augmentations.py b/utils/segment/augmentations.py new file mode 100644 index 000000000000..169addedf0f5 --- /dev/null +++ b/utils/segment/augmentations.py @@ -0,0 +1,104 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +""" +Image augmentation functions +""" + +import math +import random + +import cv2 +import numpy as np + +from ..augmentations import box_candidates +from ..general import resample_segments, segment2box + + +def mixup(im, labels, segments, im2, labels2, segments2): + # Applies MixUp augmentation https://arxiv.org/pdf/1710.09412.pdf + r = np.random.beta(32.0, 32.0) # mixup ratio, alpha=beta=32.0 + im = (im * r + im2 * (1 - r)).astype(np.uint8) + labels = np.concatenate((labels, labels2), 0) + segments = np.concatenate((segments, segments2), 0) + return im, labels, segments + + +def random_perspective(im, + targets=(), + segments=(), + degrees=10, + translate=.1, + scale=.1, + shear=10, + perspective=0.0, + border=(0, 0)): + # torchvision.transforms.RandomAffine(degrees=(-10, 10), translate=(.1, .1), scale=(.9, 1.1), shear=(-10, 10)) + # targets = [cls, xyxy] + + height = im.shape[0] + border[0] * 2 # shape(h,w,c) + width = im.shape[1] + border[1] * 2 + + # Center + C = np.eye(3) + C[0, 2] = -im.shape[1] / 2 # x translation (pixels) + C[1, 2] = -im.shape[0] / 2 # y translation (pixels) + + # Perspective + P = np.eye(3) + P[2, 0] = random.uniform(-perspective, perspective) # x perspective (about y) + P[2, 1] = random.uniform(-perspective, perspective) # y perspective (about x) + + # Rotation and Scale + R = np.eye(3) + a = random.uniform(-degrees, degrees) + # a += random.choice([-180, -90, 0, 90]) # add 90deg rotations to small rotations + s = random.uniform(1 - scale, 1 + scale) + # s = 2 ** random.uniform(-scale, scale) + R[:2] = cv2.getRotationMatrix2D(angle=a, center=(0, 0), scale=s) + + # Shear + S = np.eye(3) + S[0, 1] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # x shear (deg) + S[1, 0] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # y shear (deg) + + # Translation + T = np.eye(3) + T[0, 2] = (random.uniform(0.5 - translate, 0.5 + translate) * width) # x translation (pixels) + T[1, 2] = (random.uniform(0.5 - translate, 0.5 + translate) * height) # y translation (pixels) + + # Combined rotation matrix + M = T @ S @ R @ P @ C # order of operations (right to left) is IMPORTANT + if (border[0] != 0) or (border[1] != 0) or (M != np.eye(3)).any(): # image changed + if perspective: + im = cv2.warpPerspective(im, M, dsize=(width, height), borderValue=(114, 114, 114)) + else: # affine + im = cv2.warpAffine(im, M[:2], dsize=(width, height), borderValue=(114, 114, 114)) + + # Visualize + # import matplotlib.pyplot as plt + # ax = plt.subplots(1, 2, figsize=(12, 6))[1].ravel() + # ax[0].imshow(im[:, :, ::-1]) # base + # ax[1].imshow(im2[:, :, ::-1]) # warped + + # Transform label coordinates + n = len(targets) + new_segments = [] + if n: + new = np.zeros((n, 4)) + segments = resample_segments(segments) # upsample + for i, segment in enumerate(segments): + xy = np.ones((len(segment), 3)) + xy[:, :2] = segment + xy = xy @ M.T # transform + xy = (xy[:, :2] / xy[:, 2:3] if perspective else xy[:, :2]) # perspective rescale or affine + + # clip + new[i] = segment2box(xy, width, height) + new_segments.append(xy) + + # filter candidates + i = box_candidates(box1=targets[:, 1:5].T * s, box2=new.T, area_thr=0.01) + targets = targets[i] + targets[:, 1:5] = new[i] + new_segments = np.array(new_segments)[i] + + return im, targets, new_segments diff --git a/utils/segment/dataloaders.py b/utils/segment/dataloaders.py new file mode 100644 index 000000000000..f6fe642d077f --- /dev/null +++ b/utils/segment/dataloaders.py @@ -0,0 +1,330 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +""" +Dataloaders +""" + +import os +import random + +import cv2 +import numpy as np +import torch +from torch.utils.data import DataLoader, distributed + +from ..augmentations import augment_hsv, copy_paste, letterbox +from ..dataloaders import InfiniteDataLoader, LoadImagesAndLabels, seed_worker +from ..general import LOGGER, xyn2xy, xywhn2xyxy, xyxy2xywhn +from ..torch_utils import torch_distributed_zero_first +from .augmentations import mixup, random_perspective + + +def create_dataloader(path, + imgsz, + batch_size, + stride, + single_cls=False, + hyp=None, + augment=False, + cache=False, + pad=0.0, + rect=False, + rank=-1, + workers=8, + image_weights=False, + quad=False, + prefix='', + shuffle=False, + mask_downsample_ratio=1, + overlap_mask=False): + if rect and shuffle: + LOGGER.warning('WARNING: --rect is incompatible with DataLoader shuffle, setting shuffle=False') + shuffle = False + with torch_distributed_zero_first(rank): # init dataset *.cache only once if DDP + dataset = LoadImagesAndLabelsAndMasks( + path, + imgsz, + batch_size, + augment=augment, # augmentation + hyp=hyp, # hyperparameters + rect=rect, # rectangular batches + cache_images=cache, + single_cls=single_cls, + stride=int(stride), + pad=pad, + image_weights=image_weights, + prefix=prefix, + downsample_ratio=mask_downsample_ratio, + overlap=overlap_mask) + + batch_size = min(batch_size, len(dataset)) + nd = torch.cuda.device_count() # number of CUDA devices + nw = min([os.cpu_count() // max(nd, 1), batch_size if batch_size > 1 else 0, workers]) # number of workers + sampler = None if rank == -1 else distributed.DistributedSampler(dataset, shuffle=shuffle) + loader = DataLoader if image_weights else InfiniteDataLoader # only DataLoader allows for attribute updates + # generator = torch.Generator() + # generator.manual_seed(0) + return loader( + dataset, + batch_size=batch_size, + shuffle=shuffle and sampler is None, + num_workers=nw, + sampler=sampler, + pin_memory=True, + collate_fn=LoadImagesAndLabelsAndMasks.collate_fn4 if quad else LoadImagesAndLabelsAndMasks.collate_fn, + worker_init_fn=seed_worker, + # generator=generator, + ), dataset + + +class LoadImagesAndLabelsAndMasks(LoadImagesAndLabels): # for training/testing + + def __init__( + self, + path, + img_size=640, + batch_size=16, + augment=False, + hyp=None, + rect=False, + image_weights=False, + cache_images=False, + single_cls=False, + stride=32, + pad=0, + prefix="", + downsample_ratio=1, + overlap=False, + ): + super().__init__(path, img_size, batch_size, augment, hyp, rect, image_weights, cache_images, single_cls, + stride, pad, prefix) + self.downsample_ratio = downsample_ratio + self.overlap = overlap + + def __getitem__(self, index): + index = self.indices[index] # linear, shuffled, or image_weights + + hyp = self.hyp + mosaic = self.mosaic and random.random() < hyp['mosaic'] + masks = [] + if mosaic: + # Load mosaic + img, labels, segments = self.load_mosaic(index) + shapes = None + + # MixUp augmentation + if random.random() < hyp["mixup"]: + img, labels, segments = mixup(img, labels, segments, *self.load_mosaic(random.randint(0, self.n - 1))) + + else: + # Load image + img, (h0, w0), (h, w) = self.load_image(index) + + # Letterbox + shape = self.batch_shapes[self.batch[index]] if self.rect else self.img_size # final letterboxed shape + img, ratio, pad = letterbox(img, shape, auto=False, scaleup=self.augment) + shapes = (h0, w0), ((h / h0, w / w0), pad) # for COCO mAP rescaling + + labels = self.labels[index].copy() + # [array, array, ....], array.shape=(num_points, 2), xyxyxyxy + segments = self.segments[index].copy() + if len(segments): + for i_s in range(len(segments)): + segments[i_s] = xyn2xy( + segments[i_s], + ratio[0] * w, + ratio[1] * h, + padw=pad[0], + padh=pad[1], + ) + if labels.size: # normalized xywh to pixel xyxy format + labels[:, 1:] = xywhn2xyxy(labels[:, 1:], ratio[0] * w, ratio[1] * h, padw=pad[0], padh=pad[1]) + + if self.augment: + img, labels, segments = random_perspective( + img, + labels, + segments=segments, + degrees=hyp["degrees"], + translate=hyp["translate"], + scale=hyp["scale"], + shear=hyp["shear"], + perspective=hyp["perspective"], + return_seg=True, + ) + + nl = len(labels) # number of labels + if nl: + labels[:, 1:5] = xyxy2xywhn(labels[:, 1:5], w=img.shape[1], h=img.shape[0], clip=True, eps=1e-3) + if self.overlap: + masks, sorted_idx = polygons2masks_overlap(img.shape[:2], + segments, + downsample_ratio=self.downsample_ratio) + masks = masks[None] # (640, 640) -> (1, 640, 640) + labels = labels[sorted_idx] + else: + masks = polygons2masks(img.shape[:2], segments, color=1, downsample_ratio=self.downsample_ratio) + + masks = (torch.from_numpy(masks) if len(masks) else torch.zeros(1 if self.overlap else nl, img.shape[0] // + self.downsample_ratio, img.shape[1] // + self.downsample_ratio)) + # TODO: albumentations support + if self.augment: + # Albumentations + # there are some augmentation that won't change boxes and masks, + # so just be it for now. + img, labels = self.albumentations(img, labels) + nl = len(labels) # update after albumentations + + # HSV color-space + augment_hsv(img, hgain=hyp["hsv_h"], sgain=hyp["hsv_s"], vgain=hyp["hsv_v"]) + + # Flip up-down + if random.random() < hyp["flipud"]: + img = np.flipud(img) + if nl: + labels[:, 2] = 1 - labels[:, 2] + masks = torch.flip(masks, dims=[1]) + + # Flip left-right + if random.random() < hyp["fliplr"]: + img = np.fliplr(img) + if nl: + labels[:, 1] = 1 - labels[:, 1] + masks = torch.flip(masks, dims=[2]) + + # Cutouts # labels = cutout(img, labels, p=0.5) + + labels_out = torch.zeros((nl, 6)) + if nl: + labels_out[:, 1:] = torch.from_numpy(labels) + + # Convert + img = img.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB + img = np.ascontiguousarray(img) + + return (torch.from_numpy(img), labels_out, self.im_files[index], shapes, masks) + + def load_mosaic(self, index): + # YOLOv5 4-mosaic loader. Loads 1 image + 3 random images into a 4-image mosaic + labels4, segments4 = [], [] + s = self.img_size + yc, xc = (int(random.uniform(-x, 2 * s + x)) for x in self.mosaic_border) # mosaic center x, y + + # 3 additional image indices + indices = [index] + random.choices(self.indices, k=3) # 3 additional image indices + for i, index in enumerate(indices): + # Load image + img, _, (h, w) = self.load_image(index) + + # place img in img4 + if i == 0: # top left + img4 = np.full((s * 2, s * 2, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles + x1a, y1a, x2a, y2a = max(xc - w, 0), max(yc - h, 0), xc, yc # xmin, ymin, xmax, ymax (large image) + x1b, y1b, x2b, y2b = w - (x2a - x1a), h - (y2a - y1a), w, h # xmin, ymin, xmax, ymax (small image) + elif i == 1: # top right + x1a, y1a, x2a, y2a = xc, max(yc - h, 0), min(xc + w, s * 2), yc + x1b, y1b, x2b, y2b = 0, h - (y2a - y1a), min(w, x2a - x1a), h + elif i == 2: # bottom left + x1a, y1a, x2a, y2a = max(xc - w, 0), yc, xc, min(s * 2, yc + h) + x1b, y1b, x2b, y2b = w - (x2a - x1a), 0, w, min(y2a - y1a, h) + elif i == 3: # bottom right + x1a, y1a, x2a, y2a = xc, yc, min(xc + w, s * 2), min(s * 2, yc + h) + x1b, y1b, x2b, y2b = 0, 0, min(w, x2a - x1a), min(y2a - y1a, h) + + img4[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax] + padw = x1a - x1b + padh = y1a - y1b + + labels, segments = self.labels[index].copy(), self.segments[index].copy() + + if labels.size: + labels[:, 1:] = xywhn2xyxy(labels[:, 1:], w, h, padw, padh) # normalized xywh to pixel xyxy format + segments = [xyn2xy(x, w, h, padw, padh) for x in segments] + labels4.append(labels) + segments4.extend(segments) + + # Concat/clip labels + labels4 = np.concatenate(labels4, 0) + for x in (labels4[:, 1:], *segments4): + np.clip(x, 0, 2 * s, out=x) # clip when using random_perspective() + # img4, labels4 = replicate(img4, labels4) # replicate + + # Augment + img4, labels4, segments4 = copy_paste(img4, labels4, segments4, p=self.hyp["copy_paste"]) + img4, labels4, segments4 = random_perspective(img4, + labels4, + segments4, + degrees=self.hyp["degrees"], + translate=self.hyp["translate"], + scale=self.hyp["scale"], + shear=self.hyp["shear"], + perspective=self.hyp["perspective"], + border=self.mosaic_border) # border to remove + return img4, labels4, segments4 + + @staticmethod + def collate_fn(batch): + img, label, path, shapes, masks = zip(*batch) # transposed + batched_masks = torch.cat(masks, 0) + for i, l in enumerate(label): + l[:, 0] = i # add target image index for build_targets() + return torch.stack(img, 0), torch.cat(label, 0), path, shapes, batched_masks + + +def polygon2mask(img_size, polygons, color=1, downsample_ratio=1): + """ + Args: + img_size (tuple): The image size. + polygons (np.ndarray): [N, M], N is the number of polygons, + M is the number of points(Be divided by 2). + """ + mask = np.zeros(img_size, dtype=np.uint8) + polygons = np.asarray(polygons) + polygons = polygons.astype(np.int32) + shape = polygons.shape + polygons = polygons.reshape(shape[0], -1, 2) + cv2.fillPoly(mask, polygons, color=color) + nh, nw = (img_size[0] // downsample_ratio, img_size[1] // downsample_ratio) + # NOTE: fillPoly firstly then resize is trying the keep the same way + # of loss calculation when mask-ratio=1. + mask = cv2.resize(mask, (nw, nh)) + return mask + + +def polygons2masks(img_size, polygons, color, downsample_ratio=1): + """ + Args: + img_size (tuple): The image size. + polygons (list[np.ndarray]): each polygon is [N, M], + N is the number of polygons, + M is the number of points(Be divided by 2). + """ + masks = [] + for si in range(len(polygons)): + mask = polygon2mask(img_size, [polygons[si].reshape(-1)], color, downsample_ratio) + masks.append(mask) + return np.array(masks) + + +def polygons2masks_overlap(img_size, segments, downsample_ratio=1): + """Return a (640, 640) overlap mask.""" + masks = np.zeros((img_size[0] // downsample_ratio, img_size[1] // downsample_ratio), dtype=np.uint8) + areas = [] + ms = [] + for si in range(len(segments)): + mask = polygon2mask( + img_size, + [segments[si].reshape(-1)], + downsample_ratio=downsample_ratio, + color=1, + ) + ms.append(mask) + areas.append(mask.sum()) + areas = np.asarray(areas) + index = np.argsort(-areas) + ms = np.array(ms)[index] + for i in range(len(segments)): + mask = ms[i] * (i + 1) + masks = masks + mask + masks = np.clip(masks, a_min=0, a_max=i + 1) + return masks, index diff --git a/utils/segment/general.py b/utils/segment/general.py new file mode 100644 index 000000000000..36547ed0889c --- /dev/null +++ b/utils/segment/general.py @@ -0,0 +1,120 @@ +import cv2 +import torch +import torch.nn.functional as F + + +def crop_mask(masks, boxes): + """ + "Crop" predicted masks by zeroing out everything not in the predicted bbox. + Vectorized by Chong (thanks Chong). + + Args: + - masks should be a size [h, w, n] tensor of masks + - boxes should be a size [n, 4] tensor of bbox coords in relative point form + """ + + n, h, w = masks.shape + x1, y1, x2, y2 = torch.chunk(boxes[:, :, None], 4, 1) # x1 shape(1,1,n) + r = torch.arange(w, device=masks.device, dtype=x1.dtype)[None, None, :] # rows shape(1,w,1) + c = torch.arange(h, device=masks.device, dtype=x1.dtype)[None, :, None] # cols shape(h,1,1) + + return masks * ((r >= x1) * (r < x2) * (c >= y1) * (c < y2)) + + +def process_mask_upsample(protos, masks_in, bboxes, shape): + """ + Crop after upsample. + proto_out: [mask_dim, mask_h, mask_w] + out_masks: [n, mask_dim], n is number of masks after nms + bboxes: [n, 4], n is number of masks after nms + shape:input_image_size, (h, w) + + return: h, w, n + """ + + c, mh, mw = protos.shape # CHW + masks = (masks_in @ protos.float().view(c, -1)).sigmoid().view(-1, mh, mw) + masks = F.interpolate(masks[None], shape, mode='bilinear', align_corners=False)[0] # CHW + masks = crop_mask(masks, bboxes) # CHW + return masks.gt_(0.5) + + +def process_mask(protos, masks_in, bboxes, shape, upsample=False): + """ + Crop before upsample. + proto_out: [mask_dim, mask_h, mask_w] + out_masks: [n, mask_dim], n is number of masks after nms + bboxes: [n, 4], n is number of masks after nms + shape:input_image_size, (h, w) + + return: h, w, n + """ + + c, mh, mw = protos.shape # CHW + ih, iw = shape + masks = (masks_in @ protos.float().view(c, -1)).sigmoid().view(-1, mh, mw) # CHW + + downsampled_bboxes = bboxes.clone() + downsampled_bboxes[:, 0] *= mw / iw + downsampled_bboxes[:, 2] *= mw / iw + downsampled_bboxes[:, 3] *= mh / ih + downsampled_bboxes[:, 1] *= mh / ih + + masks = crop_mask(masks, downsampled_bboxes) # CHW + if upsample: + masks = F.interpolate(masks[None], shape, mode='bilinear', align_corners=False)[0] # CHW + return masks.gt_(0.5) + + +def scale_image(im1_shape, masks, im0_shape, ratio_pad=None): + """ + img1_shape: model input shape, [h, w] + img0_shape: origin pic shape, [h, w, 3] + masks: [h, w, num] + """ + # Rescale coordinates (xyxy) from im1_shape to im0_shape + if ratio_pad is None: # calculate from im0_shape + gain = min(im1_shape[0] / im0_shape[0], im1_shape[1] / im0_shape[1]) # gain = old / new + pad = (im1_shape[1] - im0_shape[1] * gain) / 2, (im1_shape[0] - im0_shape[0] * gain) / 2 # wh padding + else: + pad = ratio_pad[1] + top, left = int(pad[1]), int(pad[0]) # y, x + bottom, right = int(im1_shape[0] - pad[1]), int(im1_shape[1] - pad[0]) + + if len(masks.shape) < 2: + raise ValueError(f'"len of masks shape" should be 2 or 3, but got {len(masks.shape)}') + masks = masks[top:bottom, left:right] + # masks = masks.permute(2, 0, 1).contiguous() + # masks = F.interpolate(masks[None], im0_shape[:2], mode='bilinear', align_corners=False)[0] + # masks = masks.permute(1, 2, 0).contiguous() + masks = cv2.resize(masks, (im0_shape[1], im0_shape[0])) + + if len(masks.shape) == 2: + masks = masks[:, :, None] + return masks + + +def mask_iou(mask1, mask2, eps=1e-7): + """ + mask1: [N, n] m1 means number of predicted objects + mask2: [M, n] m2 means number of gt objects + Note: n means image_w x image_h + + return: masks iou, [N, M] + """ + intersection = torch.matmul(mask1, mask2.t()).clamp(0) + union = (mask1.sum(1)[:, None] + mask2.sum(1)[None]) - intersection # (area1 + area2) - intersection + return intersection / (union + eps) + + +def masks_iou(mask1, mask2, eps=1e-7): + """ + mask1: [N, n] m1 means number of predicted objects + mask2: [N, n] m2 means number of gt objects + Note: n means image_w x image_h + + return: masks iou, (N, ) + """ + intersection = (mask1 * mask2).sum(1).clamp(0) # (N, ) + union = (mask1.sum(1) + mask2.sum(1))[None] - intersection # (area1 + area2) - intersection + return intersection / (union + eps) diff --git a/utils/segment/loss.py b/utils/segment/loss.py new file mode 100644 index 000000000000..b45b2c27e0a0 --- /dev/null +++ b/utils/segment/loss.py @@ -0,0 +1,186 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F + +from ..general import xywh2xyxy +from ..loss import FocalLoss, smooth_BCE +from ..metrics import bbox_iou +from ..torch_utils import de_parallel +from .general import crop_mask + + +class ComputeLoss: + # Compute losses + def __init__(self, model, autobalance=False, overlap=False): + self.sort_obj_iou = False + self.overlap = overlap + device = next(model.parameters()).device # get model device + h = model.hyp # hyperparameters + self.device = device + + # Define criteria + BCEcls = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h['cls_pw']], device=device)) + BCEobj = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h['obj_pw']], device=device)) + + # Class label smoothing https://arxiv.org/pdf/1902.04103.pdf eqn 3 + self.cp, self.cn = smooth_BCE(eps=h.get('label_smoothing', 0.0)) # positive, negative BCE targets + + # Focal loss + g = h['fl_gamma'] # focal loss gamma + if g > 0: + BCEcls, BCEobj = FocalLoss(BCEcls, g), FocalLoss(BCEobj, g) + + m = de_parallel(model).model[-1] # Detect() module + self.balance = {3: [4.0, 1.0, 0.4]}.get(m.nl, [4.0, 1.0, 0.25, 0.06, 0.02]) # P3-P7 + self.ssi = list(m.stride).index(16) if autobalance else 0 # stride 16 index + self.BCEcls, self.BCEobj, self.gr, self.hyp, self.autobalance = BCEcls, BCEobj, 1.0, h, autobalance + self.na = m.na # number of anchors + self.nc = m.nc # number of classes + self.nl = m.nl # number of layers + self.nm = m.nm # number of masks + self.anchors = m.anchors + self.device = device + + def __call__(self, preds, targets, masks): # predictions, targets, model + p, proto = preds + bs, nm, mask_h, mask_w = proto.shape # batch size, number of masks, mask height, mask width + lcls = torch.zeros(1, device=self.device) + lbox = torch.zeros(1, device=self.device) + lobj = torch.zeros(1, device=self.device) + lseg = torch.zeros(1, device=self.device) + tcls, tbox, indices, anchors, tidxs, xywhn = self.build_targets(p, targets) # targets + + # Losses + for i, pi in enumerate(p): # layer index, layer predictions + b, a, gj, gi = indices[i] # image, anchor, gridy, gridx + tobj = torch.zeros(pi.shape[:4], dtype=pi.dtype, device=self.device) # target obj + + n = b.shape[0] # number of targets + if n: + pxy, pwh, _, pcls, pmask = pi[b, a, gj, gi].split((2, 2, 1, self.nc, nm), 1) # subset of predictions + + # Box regression + pxy = pxy.sigmoid() * 2 - 0.5 + pwh = (pwh.sigmoid() * 2) ** 2 * anchors[i] + pbox = torch.cat((pxy, pwh), 1) # predicted box + iou = bbox_iou(pbox, tbox[i], CIoU=True).squeeze() # iou(prediction, target) + lbox += (1.0 - iou).mean() # iou loss + + # Objectness + iou = iou.detach().clamp(0).type(tobj.dtype) + if self.sort_obj_iou: + j = iou.argsort() + b, a, gj, gi, iou = b[j], a[j], gj[j], gi[j], iou[j] + if self.gr < 1: + iou = (1.0 - self.gr) + self.gr * iou + tobj[b, a, gj, gi] = iou # iou ratio + + # Classification + if self.nc > 1: # cls loss (only if multiple classes) + t = torch.full_like(pcls, self.cn, device=self.device) # targets + t[range(n), tcls[i]] = self.cp + lcls += self.BCEcls(pcls, t) # BCE + + # Mask regression + if tuple(masks.shape[-2:]) != (mask_h, mask_w): # downsample + masks = F.interpolate(masks[None], (mask_h, mask_w), mode="nearest")[0] + marea = xywhn[i][:, 2:].prod(1) # mask width, height normalized + mxyxy = xywh2xyxy(xywhn[i] * torch.tensor([mask_w, mask_h, mask_w, mask_h], device=self.device)) + for bi in b.unique(): + j = b == bi # matching index + if self.overlap: + mask_gti = torch.where(masks[bi][None] == tidxs[i][j].view(-1, 1, 1), 1.0, 0.0) + else: + mask_gti = masks[tidxs[i]][j] + lseg += self.single_mask_loss(mask_gti, pmask[j], proto[bi], mxyxy[j], marea[j]) + + obji = self.BCEobj(pi[..., 4], tobj) + lobj += obji * self.balance[i] # obj loss + if self.autobalance: + self.balance[i] = self.balance[i] * 0.9999 + 0.0001 / obji.detach().item() + + if self.autobalance: + self.balance = [x / self.balance[self.ssi] for x in self.balance] + lbox *= self.hyp["box"] + lobj *= self.hyp["obj"] + lcls *= self.hyp["cls"] + lseg *= self.hyp["box"] / bs + + loss = lbox + lobj + lcls + lseg + return loss * bs, torch.cat((lbox, lseg, lobj, lcls)).detach() + + def single_mask_loss(self, gt_mask, pred, proto, xyxy, area): + # Mask loss for one image + pred_mask = (pred @ proto.view(self.nm, -1)).view(-1, *proto.shape[1:]) # (n,32) @ (32,80,80) -> (n,80,80) + loss = F.binary_cross_entropy_with_logits(pred_mask, gt_mask, reduction="none") + return (crop_mask(loss, xyxy).mean(dim=(1, 2)) / area).mean() + + def build_targets(self, p, targets): + # Build targets for compute_loss(), input targets(image,class,x,y,w,h) + na, nt = self.na, targets.shape[0] # number of anchors, targets + tcls, tbox, indices, anch, tidxs, xywhn = [], [], [], [], [], [] + gain = torch.ones(8, device=self.device) # normalized to gridspace gain + ai = torch.arange(na, device=self.device).float().view(na, 1).repeat(1, nt) # same as .repeat_interleave(nt) + if self.overlap: + batch = p[0].shape[0] + ti = [] + for i in range(batch): + num = (targets[:, 0] == i).sum() # find number of targets of each image + ti.append(torch.arange(num, device=self.device).float().view(1, num).repeat(na, 1) + 1) # (na, num) + ti = torch.cat(ti, 1) # (na, nt) + else: + ti = torch.arange(nt, device=self.device).float().view(1, nt).repeat(na, 1) + targets = torch.cat((targets.repeat(na, 1, 1), ai[..., None], ti[..., None]), 2) # append anchor indices + + g = 0.5 # bias + off = torch.tensor( + [ + [0, 0], + [1, 0], + [0, 1], + [-1, 0], + [0, -1], # j,k,l,m + # [1, 1], [1, -1], [-1, 1], [-1, -1], # jk,jm,lk,lm + ], + device=self.device).float() * g # offsets + + for i in range(self.nl): + anchors, shape = self.anchors[i], p[i].shape + gain[2:6] = torch.tensor(shape)[[3, 2, 3, 2]] # xyxy gain + + # Match targets to anchors + t = targets * gain # shape(3,n,7) + if nt: + # Matches + r = t[..., 4:6] / anchors[:, None] # wh ratio + j = torch.max(r, 1 / r).max(2)[0] < self.hyp['anchor_t'] # compare + # j = wh_iou(anchors, t[:, 4:6]) > model.hyp['iou_t'] # iou(3,n)=wh_iou(anchors(3,2), gwh(n,2)) + t = t[j] # filter + + # Offsets + gxy = t[:, 2:4] # grid xy + gxi = gain[[2, 3]] - gxy # inverse + j, k = ((gxy % 1 < g) & (gxy > 1)).T + l, m = ((gxi % 1 < g) & (gxi > 1)).T + j = torch.stack((torch.ones_like(j), j, k, l, m)) + t = t.repeat((5, 1, 1))[j] + offsets = (torch.zeros_like(gxy)[None] + off[:, None])[j] + else: + t = targets[0] + offsets = 0 + + # Define + bc, gxy, gwh, at = t.chunk(4, 1) # (image, class), grid xy, grid wh, anchors + (a, tidx), (b, c) = at.long().T, bc.long().T # anchors, image, class + gij = (gxy - offsets).long() + gi, gj = gij.T # grid indices + + # Append + indices.append((b, a, gj.clamp_(0, shape[2] - 1), gi.clamp_(0, shape[3] - 1))) # image, anchor, grid + tbox.append(torch.cat((gxy - gij, gwh), 1)) # box + anch.append(anchors[a]) # anchors + tcls.append(c) # class + tidxs.append(tidx) + xywhn.append(torch.cat((gxy, gwh), 1) / gain[2:6]) # xywh normalized + + return tcls, tbox, indices, anch, tidxs, xywhn diff --git a/utils/segment/metrics.py b/utils/segment/metrics.py new file mode 100644 index 000000000000..b09ce23fb9e3 --- /dev/null +++ b/utils/segment/metrics.py @@ -0,0 +1,210 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +""" +Model validation metrics +""" + +import numpy as np + +from ..metrics import ap_per_class + + +def fitness(x): + # Model fitness as a weighted combination of metrics + w = [0.0, 0.0, 0.1, 0.9, 0.0, 0.0, 0.1, 0.9] + return (x[:, :8] * w).sum(1) + + +def ap_per_class_box_and_mask( + tp_m, + tp_b, + conf, + pred_cls, + target_cls, + plot=False, + save_dir=".", + names=(), +): + """ + Args: + tp_b: tp of boxes. + tp_m: tp of masks. + other arguments see `func: ap_per_class`. + """ + results_boxes = ap_per_class(tp_b, + conf, + pred_cls, + target_cls, + plot=plot, + save_dir=save_dir, + names=names, + prefix="Box")[2:] + results_masks = ap_per_class(tp_m, + conf, + pred_cls, + target_cls, + plot=plot, + save_dir=save_dir, + names=names, + prefix="Mask")[2:] + + results = { + "boxes": { + "p": results_boxes[0], + "r": results_boxes[1], + "ap": results_boxes[3], + "f1": results_boxes[2], + "ap_class": results_boxes[4]}, + "masks": { + "p": results_masks[0], + "r": results_masks[1], + "ap": results_masks[3], + "f1": results_masks[2], + "ap_class": results_masks[4]}} + return results + + +class Metric: + + def __init__(self) -> None: + self.p = [] # (nc, ) + self.r = [] # (nc, ) + self.f1 = [] # (nc, ) + self.all_ap = [] # (nc, 10) + self.ap_class_index = [] # (nc, ) + + @property + def ap50(self): + """AP@0.5 of all classes. + Return: + (nc, ) or []. + """ + return self.all_ap[:, 0] if len(self.all_ap) else [] + + @property + def ap(self): + """AP@0.5:0.95 + Return: + (nc, ) or []. + """ + return self.all_ap.mean(1) if len(self.all_ap) else [] + + @property + def mp(self): + """mean precision of all classes. + Return: + float. + """ + return self.p.mean() if len(self.p) else 0.0 + + @property + def mr(self): + """mean recall of all classes. + Return: + float. + """ + return self.r.mean() if len(self.r) else 0.0 + + @property + def map50(self): + """Mean AP@0.5 of all classes. + Return: + float. + """ + return self.all_ap[:, 0].mean() if len(self.all_ap) else 0.0 + + @property + def map(self): + """Mean AP@0.5:0.95 of all classes. + Return: + float. + """ + return self.all_ap.mean() if len(self.all_ap) else 0.0 + + def mean_results(self): + """Mean of results, return mp, mr, map50, map""" + return (self.mp, self.mr, self.map50, self.map) + + def class_result(self, i): + """class-aware result, return p[i], r[i], ap50[i], ap[i]""" + return (self.p[i], self.r[i], self.ap50[i], self.ap[i]) + + def get_maps(self, nc): + maps = np.zeros(nc) + self.map + for i, c in enumerate(self.ap_class_index): + maps[c] = self.ap[i] + return maps + + def update(self, results): + """ + Args: + results: tuple(p, r, ap, f1, ap_class) + """ + p, r, all_ap, f1, ap_class_index = results + self.p = p + self.r = r + self.all_ap = all_ap + self.f1 = f1 + self.ap_class_index = ap_class_index + + +class Metrics: + """Metric for boxes and masks.""" + + def __init__(self) -> None: + self.metric_box = Metric() + self.metric_mask = Metric() + + def update(self, results): + """ + Args: + results: Dict{'boxes': Dict{}, 'masks': Dict{}} + """ + self.metric_box.update(list(results["boxes"].values())) + self.metric_mask.update(list(results["masks"].values())) + + def mean_results(self): + return self.metric_box.mean_results() + self.metric_mask.mean_results() + + def class_result(self, i): + return self.metric_box.class_result(i) + self.metric_mask.class_result(i) + + def get_maps(self, nc): + return self.metric_box.get_maps(nc) + self.metric_mask.get_maps(nc) + + @property + def ap_class_index(self): + # boxes and masks have the same ap_class_index + return self.metric_box.ap_class_index + + +KEYS = [ + "train/box_loss", + "train/seg_loss", # train loss + "train/obj_loss", + "train/cls_loss", + "metrics/precision(B)", + "metrics/recall(B)", + "metrics/mAP_0.5(B)", + "metrics/mAP_0.5:0.95(B)", # metrics + "metrics/precision(M)", + "metrics/recall(M)", + "metrics/mAP_0.5(M)", + "metrics/mAP_0.5:0.95(M)", # metrics + "val/box_loss", + "val/seg_loss", # val loss + "val/obj_loss", + "val/cls_loss", + "x/lr0", + "x/lr1", + "x/lr2",] + +BEST_KEYS = [ + "best/epoch", + "best/precision(B)", + "best/recall(B)", + "best/mAP_0.5(B)", + "best/mAP_0.5:0.95(B)", + "best/precision(M)", + "best/recall(M)", + "best/mAP_0.5(M)", + "best/mAP_0.5:0.95(M)",] diff --git a/utils/segment/plots.py b/utils/segment/plots.py new file mode 100644 index 000000000000..e882c14390f0 --- /dev/null +++ b/utils/segment/plots.py @@ -0,0 +1,143 @@ +import contextlib +import math +from pathlib import Path + +import cv2 +import matplotlib.pyplot as plt +import numpy as np +import pandas as pd +import torch + +from .. import threaded +from ..general import xywh2xyxy +from ..plots import Annotator, colors + + +@threaded +def plot_images_and_masks(images, targets, masks, paths=None, fname='images.jpg', names=None): + # Plot image grid with labels + if isinstance(images, torch.Tensor): + images = images.cpu().float().numpy() + if isinstance(targets, torch.Tensor): + targets = targets.cpu().numpy() + if isinstance(masks, torch.Tensor): + masks = masks.cpu().numpy().astype(int) + + max_size = 1920 # max image size + max_subplots = 16 # max image subplots, i.e. 4x4 + bs, _, h, w = images.shape # batch size, _, height, width + bs = min(bs, max_subplots) # limit plot images + ns = np.ceil(bs ** 0.5) # number of subplots (square) + if np.max(images[0]) <= 1: + images *= 255 # de-normalise (optional) + + # Build Image + mosaic = np.full((int(ns * h), int(ns * w), 3), 255, dtype=np.uint8) # init + for i, im in enumerate(images): + if i == max_subplots: # if last batch has fewer images than we expect + break + x, y = int(w * (i // ns)), int(h * (i % ns)) # block origin + im = im.transpose(1, 2, 0) + mosaic[y:y + h, x:x + w, :] = im + + # Resize (optional) + scale = max_size / ns / max(h, w) + if scale < 1: + h = math.ceil(scale * h) + w = math.ceil(scale * w) + mosaic = cv2.resize(mosaic, tuple(int(x * ns) for x in (w, h))) + + # Annotate + fs = int((h + w) * ns * 0.01) # font size + annotator = Annotator(mosaic, line_width=round(fs / 10), font_size=fs, pil=True, example=names) + for i in range(i + 1): + x, y = int(w * (i // ns)), int(h * (i % ns)) # block origin + annotator.rectangle([x, y, x + w, y + h], None, (255, 255, 255), width=2) # borders + if paths: + annotator.text((x + 5, y + 5 + h), text=Path(paths[i]).name[:40], txt_color=(220, 220, 220)) # filenames + if len(targets) > 0: + idx = targets[:, 0] == i + ti = targets[idx] # image targets + + boxes = xywh2xyxy(ti[:, 2:6]).T + classes = ti[:, 1].astype('int') + labels = ti.shape[1] == 6 # labels if no conf column + conf = None if labels else ti[:, 6] # check for confidence presence (label vs pred) + + if boxes.shape[1]: + if boxes.max() <= 1.01: # if normalized with tolerance 0.01 + boxes[[0, 2]] *= w # scale to pixels + boxes[[1, 3]] *= h + elif scale < 1: # absolute coords need scale if image scales + boxes *= scale + boxes[[0, 2]] += x + boxes[[1, 3]] += y + for j, box in enumerate(boxes.T.tolist()): + cls = classes[j] + color = colors(cls) + cls = names[cls] if names else cls + if labels or conf[j] > 0.25: # 0.25 conf thresh + label = f'{cls}' if labels else f'{cls} {conf[j]:.1f}' + annotator.box_label(box, label, color=color) + + # Plot masks + if len(masks): + if masks.max() > 1.0: # mean that masks are overlap + image_masks = masks[[i]] # (1, 640, 640) + nl = len(ti) + index = np.arange(nl).reshape(nl, 1, 1) + 1 + image_masks = np.repeat(image_masks, nl, axis=0) + image_masks = np.where(image_masks == index, 1.0, 0.0) + else: + image_masks = masks[idx] + + im = np.asarray(annotator.im).copy() + for j, box in enumerate(boxes.T.tolist()): + if labels or conf[j] > 0.25: # 0.25 conf thresh + color = colors(classes[j]) + mh, mw = image_masks[j].shape + if mh != h or mw != w: + mask = image_masks[j].astype(np.uint8) + mask = cv2.resize(mask, (w, h)) + mask = mask.astype(np.bool) + else: + mask = image_masks[j].astype(np.bool) + with contextlib.suppress(Exception): + im[y:y + h, x:x + w, :][mask] = im[y:y + h, x:x + w, :][mask] * 0.4 + np.array(color) * 0.6 + annotator.fromarray(im) + annotator.im.save(fname) # save + + +def plot_results_with_masks(file="path/to/results.csv", dir="", best=True): + # Plot training results.csv. Usage: from utils.plots import *; plot_results('path/to/results.csv') + save_dir = Path(file).parent if file else Path(dir) + fig, ax = plt.subplots(2, 8, figsize=(18, 6), tight_layout=True) + ax = ax.ravel() + files = list(save_dir.glob("results*.csv")) + assert len(files), f"No results.csv files found in {save_dir.resolve()}, nothing to plot." + for f in files: + try: + data = pd.read_csv(f) + index = np.argmax(0.9 * data.values[:, 8] + 0.1 * data.values[:, 7] + 0.9 * data.values[:, 12] + + 0.1 * data.values[:, 11]) + s = [x.strip() for x in data.columns] + x = data.values[:, 0] + for i, j in enumerate([1, 2, 3, 4, 5, 6, 9, 10, 13, 14, 15, 16, 7, 8, 11, 12]): + y = data.values[:, j] + # y[y == 0] = np.nan # don't show zero values + ax[i].plot(x, y, marker=".", label=f.stem, linewidth=2, markersize=2) + if best: + # best + ax[i].scatter(index, y[index], color="r", label=f"best:{index}", marker="*", linewidth=3) + ax[i].set_title(s[j] + f"\n{round(y[index], 5)}") + else: + # last + ax[i].scatter(x[-1], y[-1], color="r", label="last", marker="*", linewidth=3) + ax[i].set_title(s[j] + f"\n{round(y[-1], 5)}") + # if j in [8, 9, 10]: # share train and val loss y axes + # ax[i].get_shared_y_axes().join(ax[i], ax[i - 5]) + except Exception as e: + print(f"Warning: Plotting error for {f}: {e}") + ax[1].legend() + fig.savefig(save_dir / "results.png", dpi=200) + plt.close() diff --git a/val.py b/val.py index 4b0bdddae3b1..6a0f18e28392 100644 --- a/val.py +++ b/val.py @@ -71,12 +71,12 @@ def save_one_json(predn, jdict, path, class_map): def process_batch(detections, labels, iouv): """ - Return correct predictions matrix. Both sets of boxes are in (x1, y1, x2, y2) format. + Return correct prediction matrix Arguments: - detections (Array[N, 6]), x1, y1, x2, y2, conf, class - labels (Array[M, 5]), class, x1, y1, x2, y2 + detections (array[N, 6]), x1, y1, x2, y2, conf, class + labels (array[M, 5]), class, x1, y1, x2, y2 Returns: - correct (Array[N, 10]), for 10 IoU levels + correct (array[N, 10]), for 10 IoU levels """ correct = np.zeros((detections.shape[0], iouv.shape[0])).astype(bool) iou = box_iou(labels[:, 1:], detections[:, :4]) @@ -102,6 +102,7 @@ def run( imgsz=640, # inference size (pixels) conf_thres=0.001, # confidence threshold iou_thres=0.6, # NMS IoU threshold + max_det=300, # maximum detections per image task='val', # train, val, test, speed or study device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu workers=8, # max dataloader workers (per RANK in DDP mode) @@ -187,7 +188,7 @@ def run( if isinstance(names, (list, tuple)): # old format names = dict(enumerate(names)) class_map = coco80_to_coco91_class() if is_coco else list(range(1000)) - s = ('%22s' + '%11s' * 6) % ('Class', 'Images', 'Instances', 'P', 'R', 'mAP@.5', 'mAP@.5:.95') + s = ('%22s' + '%11s' * 6) % ('Class', 'Images', 'Instances', 'P', 'R', 'mAP50', 'mAP50-95') dt, p, r, f1, mp, mr, map50, map = (Profile(), Profile(), Profile()), 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 loss = torch.zeros(3, device=device) jdict, stats, ap, ap_class = [], [], [], [] @@ -205,7 +206,7 @@ def run( # Inference with dt[1]: - out, train_out = model(im) if compute_loss else (model(im, augment=augment), None) + preds, train_out = model(im) if compute_loss else (model(im, augment=augment), None) # Loss if compute_loss: @@ -215,10 +216,16 @@ def run( targets[:, 2:] *= torch.tensor((width, height, width, height), device=device) # to pixels lb = [targets[targets[:, 0] == i, 1:] for i in range(nb)] if save_hybrid else [] # for autolabelling with dt[2]: - out = non_max_suppression(out, conf_thres, iou_thres, labels=lb, multi_label=True, agnostic=single_cls) + preds = non_max_suppression(preds, + conf_thres, + iou_thres, + labels=lb, + multi_label=True, + agnostic=single_cls, + max_det=max_det) # Metrics - for si, pred in enumerate(out): + for si, pred in enumerate(preds): labels = targets[targets[:, 0] == si, 1:] nl, npr = labels.shape[0], pred.shape[0] # number of labels, predictions path, shape = Path(paths[si]), shapes[si][0] @@ -258,9 +265,9 @@ def run( # Plot images if plots and batch_i < 3: plot_images(im, targets, paths, save_dir / f'val_batch{batch_i}_labels.jpg', names) # labels - plot_images(im, output_to_target(out), paths, save_dir / f'val_batch{batch_i}_pred.jpg', names) # pred + plot_images(im, output_to_target(preds), paths, save_dir / f'val_batch{batch_i}_pred.jpg', names) # pred - callbacks.run('on_val_batch_end', batch_i, im, targets, paths, shapes, out) + callbacks.run('on_val_batch_end', batch_i, im, targets, paths, shapes, preds) # Compute metrics stats = [torch.cat(x, 0).cpu().numpy() for x in zip(*stats)] # to numpy @@ -332,11 +339,12 @@ def run( def parse_opt(): parser = argparse.ArgumentParser() parser.add_argument('--data', type=str, default=ROOT / 'data/coco128.yaml', help='dataset.yaml path') - parser.add_argument('--weights', nargs='+', type=str, default=ROOT / 'yolov5s.pt', help='model.pt path(s)') + parser.add_argument('--weights', nargs='+', type=str, default=ROOT / 'yolov5s.pt', help='model path(s)') parser.add_argument('--batch-size', type=int, default=32, help='batch size') parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=640, help='inference size (pixels)') parser.add_argument('--conf-thres', type=float, default=0.001, help='confidence threshold') parser.add_argument('--iou-thres', type=float, default=0.6, help='NMS IoU threshold') + parser.add_argument('--max-det', type=int, default=300, help='maximum detections per image') parser.add_argument('--task', default='val', help='train, val, test, speed or study') parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') parser.add_argument('--workers', type=int, default=8, help='max dataloader workers (per RANK in DDP mode)') From 58ad5ca5ce6b4fb3da6420bcc7b11a09e20674fd Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 16 Sep 2022 00:21:13 +0200 Subject: [PATCH 1426/1976] Fix val.py zero-TP bug (#9431) Resolves https://github.com/ultralytics/yolov5/issues/9400 Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- val.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/val.py b/val.py index 6a0f18e28392..e003d2144b7f 100644 --- a/val.py +++ b/val.py @@ -189,7 +189,8 @@ def run( names = dict(enumerate(names)) class_map = coco80_to_coco91_class() if is_coco else list(range(1000)) s = ('%22s' + '%11s' * 6) % ('Class', 'Images', 'Instances', 'P', 'R', 'mAP50', 'mAP50-95') - dt, p, r, f1, mp, mr, map50, map = (Profile(), Profile(), Profile()), 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 + tp, fp, p, r, f1, mp, mr, map50, ap50, map = 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 + dt = Profile(), Profile(), Profile() # profiling times loss = torch.zeros(3, device=device) jdict, stats, ap, ap_class = [], [], [], [] callbacks.run('on_val_start') From a1e5f9a97de2a3ace012315208c686744ced2782 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 16 Sep 2022 00:55:21 +0200 Subject: [PATCH 1427/1976] New model.yaml `activation:` field (#9371) * New model.yaml `activation:` field Add optional model yaml activation field to define model-wide activations, i.e.: ```yaml activation: nn.LeakyReLU(0.1) # activation with arguments activation: nn.SiLU() # activation with no arguments ``` Signed-off-by: Glenn Jocher * Update yolo.py Signed-off-by: Glenn Jocher * Add example models * l to m models * update * Add yolov5s-LeakyReLU.yaml * Update yolov5s-LeakyReLU.yaml Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- models/common.py | 8 +++-- models/hub/yolov5s-LeakyReLU.yaml | 49 +++++++++++++++++++++++++++++++ models/yolo.py | 6 +++- 3 files changed, 59 insertions(+), 4 deletions(-) create mode 100644 models/hub/yolov5s-LeakyReLU.yaml diff --git a/models/common.py b/models/common.py index 0d90ff4f8827..debbc2d03f60 100644 --- a/models/common.py +++ b/models/common.py @@ -39,11 +39,13 @@ def autopad(k, p=None, d=1): # kernel, padding, dilation class Conv(nn.Module): # Standard convolution with args(ch_in, ch_out, kernel, stride, padding, groups, dilation, activation) + act = nn.SiLU() # default activation + def __init__(self, c1, c2, k=1, s=1, p=None, g=1, d=1, act=True): super().__init__() self.conv = nn.Conv2d(c1, c2, k, s, autopad(k, p, d), groups=g, dilation=d, bias=False) self.bn = nn.BatchNorm2d(c2) - self.act = nn.SiLU() if act is True else (act if isinstance(act, nn.Module) else nn.Identity()) + self.act = self.act if act is True else act if isinstance(act, nn.Module) else nn.Identity() def forward(self, x): return self.act(self.bn(self.conv(x))) @@ -54,8 +56,8 @@ def forward_fuse(self, x): class DWConv(Conv): # Depth-wise convolution - def __init__(self, c1, c2, k=1, s=1, act=True): # ch_in, ch_out, kernel, stride, padding, groups - super().__init__(c1, c2, k, s, g=math.gcd(c1, c2), act=act) + def __init__(self, c1, c2, k=1, s=1, d=1, act=True): # ch_in, ch_out, kernel, stride, dilation, activation + super().__init__(c1, c2, k, s, g=math.gcd(c1, c2), d=d, act=act) class DWConvTranspose2d(nn.ConvTranspose2d): diff --git a/models/hub/yolov5s-LeakyReLU.yaml b/models/hub/yolov5s-LeakyReLU.yaml new file mode 100644 index 000000000000..3a179bf3311c --- /dev/null +++ b/models/hub/yolov5s-LeakyReLU.yaml @@ -0,0 +1,49 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license + +# Parameters +nc: 80 # number of classes +activation: nn.LeakyReLU(0.1) # <----- Conv() activation used throughout entire YOLOv5 model +depth_multiple: 0.33 # model depth multiple +width_multiple: 0.50 # layer channel multiple +anchors: + - [10,13, 16,30, 33,23] # P3/8 + - [30,61, 62,45, 59,119] # P4/16 + - [116,90, 156,198, 373,326] # P5/32 + +# YOLOv5 v6.0 backbone +backbone: + # [from, number, module, args] + [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 + [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 + [-1, 3, C3, [128]], + [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 + [-1, 6, C3, [256]], + [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 + [-1, 9, C3, [512]], + [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 + [-1, 3, C3, [1024]], + [-1, 1, SPPF, [1024, 5]], # 9 + ] + +# YOLOv5 v6.0 head +head: + [[-1, 1, Conv, [512, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 6], 1, Concat, [1]], # cat backbone P4 + [-1, 3, C3, [512, False]], # 13 + + [-1, 1, Conv, [256, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 4], 1, Concat, [1]], # cat backbone P3 + [-1, 3, C3, [256, False]], # 17 (P3/8-small) + + [-1, 1, Conv, [256, 3, 2]], + [[-1, 14], 1, Concat, [1]], # cat head P4 + [-1, 3, C3, [512, False]], # 20 (P4/16-medium) + + [-1, 1, Conv, [512, 3, 2]], + [[-1, 10], 1, Concat, [1]], # cat head P5 + [-1, 3, C3, [1024, False]], # 23 (P5/32-large) + + [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) + ] diff --git a/models/yolo.py b/models/yolo.py index a0702a7c0257..46039c36d7e1 100644 --- a/models/yolo.py +++ b/models/yolo.py @@ -297,8 +297,12 @@ def _from_yaml(self, cfg): def parse_model(d, ch): # model_dict, input_channels(3) + # Parse a YOLOv5 model.yaml dictionary LOGGER.info(f"\n{'':>3}{'from':>18}{'n':>3}{'params':>10} {'module':<40}{'arguments':<30}") - anchors, nc, gd, gw = d['anchors'], d['nc'], d['depth_multiple'], d['width_multiple'] + anchors, nc, gd, gw, act = d['anchors'], d['nc'], d['depth_multiple'], d['width_multiple'], d.get('activation') + if act: + Conv.act = eval(act) # redefine default activation, i.e. Conv.act = nn.SiLU() + LOGGER.info(f"{colorstr('activation:')} {act}") # print na = (len(anchors[0]) // 2) if isinstance(anchors, list) else anchors # number of anchors no = na * (nc + 5) # number of outputs = anchors * (classes + 5) From c7a2d6bcf4f7e88db53f3d09a8484391dac7bc89 Mon Sep 17 00:00:00 2001 From: Hoyeong-GenGenAI <5404902+hotohoto@users.noreply.github.com> Date: Fri, 16 Sep 2022 18:53:18 +0900 Subject: [PATCH 1428/1976] Fix tick labels for background FN/FP (#9414) * Fix tick labels for background FN/FP In the confusion matrix. * Remove FP/FN from the background labels of the confusion matrix * Update metrics.py Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher Co-authored-by: Glenn Jocher --- utils/metrics.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/utils/metrics.py b/utils/metrics.py index 001813cbcd65..021a46ce5d37 100644 --- a/utils/metrics.py +++ b/utils/metrics.py @@ -170,12 +170,12 @@ def process_batch(self, detections, labels): if n and sum(j) == 1: self.matrix[detection_classes[m1[j]], gc] += 1 # correct else: - self.matrix[self.nc, gc] += 1 # background FP + self.matrix[self.nc, gc] += 1 # true background if n: for i, dc in enumerate(detection_classes): if not any(m1 == i): - self.matrix[dc, self.nc] += 1 # background FN + self.matrix[dc, self.nc] += 1 # predicted background def matrix(self): return self.matrix @@ -197,6 +197,7 @@ def plot(self, normalize=True, save_dir='', names=()): nc, nn = self.nc, len(names) # number of classes, names sn.set(font_scale=1.0 if nc < 50 else 0.8) # for label size labels = (0 < nn < 99) and (nn == nc) # apply names to ticklabels + ticklabels = (names + ['background']) if labels else "auto" with warnings.catch_warnings(): warnings.simplefilter('ignore') # suppress empty matrix RuntimeWarning: All-NaN slice encountered sn.heatmap(array, @@ -208,8 +209,8 @@ def plot(self, normalize=True, save_dir='', names=()): fmt='.2f', square=True, vmin=0.0, - xticklabels=names + ['background FP'] if labels else "auto", - yticklabels=names + ['background FN'] if labels else "auto").set_facecolor((1, 1, 1)) + xticklabels=ticklabels, + yticklabels=ticklabels).set_facecolor((1, 1, 1)) ax.set_ylabel('True') ax.set_ylabel('Predicted') ax.set_title('Confusion Matrix') From 03f2ca8eff8918b98169256d055353a1f15b8e32 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 16 Sep 2022 12:31:43 +0200 Subject: [PATCH 1429/1976] Fix TensorRT exports to ONNX opset 12 (#9441) * Fix TensorRT exports to ONNX opset 12 Signed-off-by: Glenn Jocher * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Signed-off-by: Glenn Jocher Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- export.py | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/export.py b/export.py index 1b25f3f8221b..cc4386ae4916 100644 --- a/export.py +++ b/export.py @@ -251,7 +251,7 @@ def export_engine(model, im, file, half, dynamic, simplify, workspace=4, verbose model.model[-1].anchor_grid = grid else: # TensorRT >= 8 check_version(trt.__version__, '8.0.0', hard=True) # require tensorrt>=8.0.0 - export_onnx(model, im, file, 13, False, dynamic, simplify) # opset 13 + export_onnx(model, im, file, 12, False, dynamic, simplify) # opset 12 onnx = file.with_suffix('.onnx') LOGGER.info(f'\n{prefix} starting export with TensorRT {trt.__version__}...') @@ -274,11 +274,10 @@ def export_engine(model, im, file, half, dynamic, simplify, workspace=4, verbose inputs = [network.get_input(i) for i in range(network.num_inputs)] outputs = [network.get_output(i) for i in range(network.num_outputs)] - LOGGER.info(f'{prefix} Network Description:') for inp in inputs: - LOGGER.info(f'{prefix}\tinput "{inp.name}" with shape {inp.shape} and dtype {inp.dtype}') + LOGGER.info(f'{prefix} input "{inp.name}" with shape{inp.shape} {inp.dtype}') for out in outputs: - LOGGER.info(f'{prefix}\toutput "{out.name}" with shape {out.shape} and dtype {out.dtype}') + LOGGER.info(f'{prefix} output "{out.name}" with shape{out.shape} {out.dtype}') if dynamic: if im.shape[0] <= 1: @@ -288,7 +287,7 @@ def export_engine(model, im, file, half, dynamic, simplify, workspace=4, verbose profile.set_shape(inp.name, (1, *im.shape[1:]), (max(1, im.shape[0] // 2), *im.shape[1:]), im.shape) config.add_optimization_profile(profile) - LOGGER.info(f'{prefix} building FP{16 if builder.platform_has_fast_fp16 and half else 32} engine in {f}') + LOGGER.info(f'{prefix} building FP{16 if builder.platform_has_fast_fp16 and half else 32} engine as {f}') if builder.platform_has_fast_fp16 and half: config.set_flag(trt.BuilderFlag.FP16) with builder.build_engine(network, config) as engine, open(f, 'wb') as t: From 2ac4b634c745cc46c4728e682c6da66f79f6416a Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 16 Sep 2022 17:25:44 +0200 Subject: [PATCH 1430/1976] AutoShape explicit arguments fix (#9443) * AutoShape explicit arguments fix Signed-off-by: Glenn Jocher * Update common.py Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- models/common.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/models/common.py b/models/common.py index debbc2d03f60..85b82e10a4e1 100644 --- a/models/common.py +++ b/models/common.py @@ -633,7 +633,7 @@ def forward(self, ims, size=640, augment=False, profile=False): autocast = self.amp and (p.device.type != 'cpu') # Automatic Mixed Precision (AMP) inference if isinstance(ims, torch.Tensor): # torch with amp.autocast(autocast): - return self.model(ims.to(p.device).type_as(p), augment, profile) # inference + return self.model(ims.to(p.device).type_as(p), augment=augment) # inference # Pre-process n, ims = (len(ims), list(ims)) if isinstance(ims, (list, tuple)) else (1, [ims]) # number, list of images @@ -662,7 +662,7 @@ def forward(self, ims, size=640, augment=False, profile=False): with amp.autocast(autocast): # Inference with dt[1]: - y = self.model(x, augment, profile) # forward + y = self.model(x, augment=augment) # forward # Post-process with dt[2]: @@ -696,7 +696,7 @@ def __init__(self, ims, pred, files, times=(0, 0, 0), names=None, shape=None): self.xywhn = [x / g for x, g in zip(self.xywh, gn)] # xywh normalized self.n = len(self.pred) # number of images (batch size) self.t = tuple(x.t / self.n * 1E3 for x in times) # timestamps (ms) - self.s = shape # inference BCHW shape + self.s = tuple(shape) # inference BCHW shape def display(self, pprint=False, show=False, save=False, crop=False, render=False, labels=True, save_dir=Path('')): crops = [] @@ -726,7 +726,7 @@ def display(self, pprint=False, show=False, save=False, crop=False, render=False im = Image.fromarray(im.astype(np.uint8)) if isinstance(im, np.ndarray) else im # from np if pprint: - print(s.rstrip(', ')) + LOGGER.info(s.rstrip(', ')) if show: im.show(self.files[i]) # show if save: @@ -743,7 +743,7 @@ def display(self, pprint=False, show=False, save=False, crop=False, render=False def print(self): self.display(pprint=True) # print results - print(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {tuple(self.s)}' % self.t) + LOGGER.info(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {self.s}' % self.t) def show(self, labels=True): self.display(show=True, labels=labels) # show results From fe10b4abc054cba1b5fab1d3598b3caf77b53859 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 16 Sep 2022 18:36:55 +0200 Subject: [PATCH 1431/1976] Update Detections() instance printing (#9445) * Update Detections() instance printing Signed-off-by: Glenn Jocher * Update common.py Signed-off-by: Glenn Jocher * Update common.py Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- models/common.py | 39 +++++++++++++++++++++------------------ 1 file changed, 21 insertions(+), 18 deletions(-) diff --git a/models/common.py b/models/common.py index 85b82e10a4e1..9c08120fe7f6 100644 --- a/models/common.py +++ b/models/common.py @@ -698,14 +698,15 @@ def __init__(self, ims, pred, files, times=(0, 0, 0), names=None, shape=None): self.t = tuple(x.t / self.n * 1E3 for x in times) # timestamps (ms) self.s = tuple(shape) # inference BCHW shape - def display(self, pprint=False, show=False, save=False, crop=False, render=False, labels=True, save_dir=Path('')): - crops = [] + def _run(self, pprint=False, show=False, save=False, crop=False, render=False, labels=True, save_dir=Path('')): + s, crops = '', [] for i, (im, pred) in enumerate(zip(self.ims, self.pred)): - s = f'image {i + 1}/{len(self.pred)}: {im.shape[0]}x{im.shape[1]} ' # string + s += f'\nimage {i + 1}/{len(self.pred)}: {im.shape[0]}x{im.shape[1]} ' # string if pred.shape[0]: for c in pred[:, -1].unique(): n = (pred[:, -1] == c).sum() # detections per class s += f"{n} {self.names[int(c)]}{'s' * (n > 1)}, " # add to string + s = s.rstrip(', ') if show or save or render or crop: annotator = Annotator(im, example=str(self.names)) for *box, conf, cls in reversed(pred): # xyxy, confidence, class @@ -725,8 +726,6 @@ def display(self, pprint=False, show=False, save=False, crop=False, render=False s += '(no detections)' im = Image.fromarray(im.astype(np.uint8)) if isinstance(im, np.ndarray) else im # from np - if pprint: - LOGGER.info(s.rstrip(', ')) if show: im.show(self.files[i]) # show if save: @@ -736,28 +735,27 @@ def display(self, pprint=False, show=False, save=False, crop=False, render=False LOGGER.info(f"Saved {self.n} image{'s' * (self.n > 1)} to {colorstr('bold', save_dir)}") if render: self.ims[i] = np.asarray(im) + if pprint: + s = s.lstrip('\n') + return f'{s}\nSpeed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {self.s}' % self.t if crop: if save: LOGGER.info(f'Saved results to {save_dir}\n') return crops - def print(self): - self.display(pprint=True) # print results - LOGGER.info(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {self.s}' % self.t) - def show(self, labels=True): - self.display(show=True, labels=labels) # show results + self._run(show=True, labels=labels) # show results def save(self, labels=True, save_dir='runs/detect/exp'): save_dir = increment_path(save_dir, exist_ok=save_dir != 'runs/detect/exp', mkdir=True) # increment save_dir - self.display(save=True, labels=labels, save_dir=save_dir) # save results + self._run(save=True, labels=labels, save_dir=save_dir) # save results def crop(self, save=True, save_dir='runs/detect/exp'): save_dir = increment_path(save_dir, exist_ok=save_dir != 'runs/detect/exp', mkdir=True) if save else None - return self.display(crop=True, save=save, save_dir=save_dir) # crop results + return self._run(crop=True, save=save, save_dir=save_dir) # crop results def render(self, labels=True): - self.display(render=True, labels=labels) # render results + self._run(render=True, labels=labels) # render results return self.ims def pandas(self): @@ -779,12 +777,17 @@ def tolist(self): # setattr(d, k, getattr(d, k)[0]) # pop out of list return x - def __len__(self): - return self.n # override len(results) + def print(self): + LOGGER.info(self.__str__()) + + def __len__(self): # override len(results) + return self.n + + def __str__(self): # override print(results) + return self._run(pprint=True) # print results - def __str__(self): - self.print() # override print(results) - return '' + def __repr__(self): + return f'YOLOv5 {self.__class__} instance\n' + self.__str__() class Proto(nn.Module): From db06f495db02501ef94efe46171d952642dec880 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 16 Sep 2022 20:44:56 +0200 Subject: [PATCH 1432/1976] AutoUpdate TensorFlow in export.py (#9447) Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- export.py | 1 + 1 file changed, 1 insertion(+) diff --git a/export.py b/export.py index cc4386ae4916..a575c73e375f 100644 --- a/export.py +++ b/export.py @@ -309,6 +309,7 @@ def export_saved_model(model, keras=False, prefix=colorstr('TensorFlow SavedModel:')): # YOLOv5 TensorFlow SavedModel export + check_requirements('tensorflow' if torch.cuda.is_available() else 'tensorflow-cpu') import tensorflow as tf from tensorflow.python.framework.convert_to_constants import convert_variables_to_constants_v2 From 5e1a9553fbed73995c9b81e63ba41cc70fdf89de Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 16 Sep 2022 21:46:07 +0200 Subject: [PATCH 1433/1976] AutoBatch `cudnn.benchmark=True` fix (#9448) * AutoBatch `cudnn.benchmark=True` fix May resolve https://github.com/ultralytics/yolov5/issues/9287 Signed-off-by: Glenn Jocher * Update autobatch.py Signed-off-by: Glenn Jocher * Update autobatch.py Signed-off-by: Glenn Jocher * Update general.py Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- utils/autobatch.py | 3 +++ utils/general.py | 2 +- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/utils/autobatch.py b/utils/autobatch.py index 641b055b9fe3..3204fd26fc41 100644 --- a/utils/autobatch.py +++ b/utils/autobatch.py @@ -33,6 +33,9 @@ def autobatch(model, imgsz=640, fraction=0.8, batch_size=16): if device.type == 'cpu': LOGGER.info(f'{prefix}CUDA not detected, using default CPU batch-size {batch_size}') return batch_size + if torch.backends.cudnn.benchmark: + LOGGER.info(f'{prefix} ⚠️ Requires torch.backends.cudnn.benchmark=False, using default batch-size {batch_size}') + return batch_size # Inspect CUDA memory gb = 1 << 30 # bytes to GiB (1024 ** 3) diff --git a/utils/general.py b/utils/general.py index 8633511f89f5..af95b3dc2b8b 100644 --- a/utils/general.py +++ b/utils/general.py @@ -223,7 +223,7 @@ def init_seeds(seed=0, deterministic=False): torch.manual_seed(seed) torch.cuda.manual_seed(seed) torch.cuda.manual_seed_all(seed) # for Multi-GPU, exception safe - torch.backends.cudnn.benchmark = True # for faster training + # torch.backends.cudnn.benchmark = True # AutoBatch problem https://github.com/ultralytics/yolov5/issues/9287 if deterministic and check_version(torch.__version__, '1.12.0'): # https://github.com/ultralytics/yolov5/pull/8213 torch.use_deterministic_algorithms(True) torch.backends.cudnn.deterministic = True From 4a4308001ce1699fca2d9566b652e2388a088973 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 17 Sep 2022 15:19:43 +0200 Subject: [PATCH 1434/1976] Do not move downloaded zips (#9455) * Do not move downloaded zips Prevent multiple downloads on HUB of same dataset @kalenmike Signed-off-by: Glenn Jocher * Update general.py Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- utils/general.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/utils/general.py b/utils/general.py index af95b3dc2b8b..4d080f282ed0 100644 --- a/utils/general.py +++ b/utils/general.py @@ -568,10 +568,10 @@ def download(url, dir='.', unzip=True, delete=True, curl=False, threads=1, retry def download_one(url, dir): # Download 1 file success = True - f = dir / Path(url).name # filename - if Path(url).is_file(): # exists in current path - Path(url).rename(f) # move to dir - elif not f.exists(): + if Path(url).is_file(): + f = Path(url) # filename + else: # does not exist + f = dir / Path(url).name LOGGER.info(f'Downloading {url} to {f}...') for i in range(retry + 1): if curl: From 6a9fffd19a96799c683c94d2d4da8c453e819116 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 17 Sep 2022 15:42:24 +0200 Subject: [PATCH 1435/1976] Update general.py (#9454) Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- utils/general.py | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) diff --git a/utils/general.py b/utils/general.py index 4d080f282ed0..38856b6bfa1c 100644 --- a/utils/general.py +++ b/utils/general.py @@ -469,8 +469,7 @@ def check_dataset(data, autodownload=True): # Read yaml (optional) if isinstance(data, (str, Path)): - with open(data, errors='ignore') as f: - data = yaml.safe_load(f) # dictionary + data = yaml_load(data) # dictionary # Checks for k in 'train', 'val', 'names': @@ -485,7 +484,13 @@ def check_dataset(data, autodownload=True): path = (ROOT / path).resolve() for k in 'train', 'val', 'test': if data.get(k): # prepend path - data[k] = str(path / data[k]) if isinstance(data[k], str) else [str(path / x) for x in data[k]] + if isinstance(data[k], str): + x = (path / data[k]).resolve() + if not x.exists() and data[k].startswith('../'): + x = (path / data[k][3:]).resolve() + data[k] = str(x) + else: + data[k] = [str((path / x).resolve()) for x in data[k]] # Parse yaml train, val, test, s = (data.get(x) for x in ('train', 'val', 'test', 'download')) @@ -496,13 +501,12 @@ def check_dataset(data, autodownload=True): if not s or not autodownload: raise Exception('Dataset not found ❌') t = time.time() - root = path.parent if 'path' in data else '..' # unzip directory i.e. '../' if s.startswith('http') and s.endswith('.zip'): # URL f = Path(s).name # filename LOGGER.info(f'Downloading {s} to {f}...') torch.hub.download_url_to_file(s, f) - Path(root).mkdir(parents=True, exist_ok=True) # create root - ZipFile(f).extractall(path=root) # unzip + Path(DATASETS_DIR).mkdir(parents=True, exist_ok=True) # create root + ZipFile(f).extractall(path=DATASETS_DIR) # unzip Path(f).unlink() # remove zip r = None # success elif s.startswith('bash '): # bash script @@ -511,7 +515,7 @@ def check_dataset(data, autodownload=True): else: # python script r = exec(s, {'yaml': data}) # return None dt = f'({round(time.time() - t, 1)}s)' - s = f"success ✅ {dt}, saved to {colorstr('bold', root)}" if r in (0, None) else f"failure {dt} ❌" + s = f"success ✅ {dt}, saved to {colorstr('bold', DATASETS_DIR)}" if r in (0, None) else f"failure {dt} ❌" LOGGER.info(f"Dataset download {s}") check_font('Arial.ttf' if is_ascii(data['names']) else 'Arial.Unicode.ttf', progress=True) # download fonts return data # dictionary From 060837406542c5c65301b8fde641f4d92a1f395e Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 17 Sep 2022 23:17:59 +0200 Subject: [PATCH 1436/1976] `Detect()` and `Segment()` fixes for CoreML and Paddle (#9458) * Detect() and Segment() fixes for CoreML and Paddle Signed-off-by: Glenn Jocher * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Signed-off-by: Glenn Jocher Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- models/yolo.py | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/models/yolo.py b/models/yolo.py index 46039c36d7e1..0dca6353a356 100644 --- a/models/yolo.py +++ b/models/yolo.py @@ -64,17 +64,17 @@ def forward(self, x): if self.dynamic or self.grid[i].shape[2:4] != x[i].shape[2:4]: self.grid[i], self.anchor_grid[i] = self._make_grid(nx, ny, i) - y = x[i].clone() - y[..., :5 + self.nc].sigmoid_() - if self.inplace: - y[..., 0:2] = (y[..., 0:2] * 2 + self.grid[i]) * self.stride[i] # xy - y[..., 2:4] = (y[..., 2:4] * 2) ** 2 * self.anchor_grid[i] # wh - else: # for YOLOv5 on AWS Inferentia https://github.com/ultralytics/yolov5/pull/2953 - xy, wh, etc = y.split((2, 2, self.no - 4), 4) # tensor_split((2, 4, 5), 4) if torch 1.8.0 + if isinstance(self, Segment): # (boxes + masks) + xy, wh, conf, mask = x[i].split((2, 2, self.nc + 1, self.no - self.nc - 5), 4) + xy = (xy.sigmoid() * 2 + self.grid[i]) * self.stride[i] # xy + wh = (wh.sigmoid() * 2) ** 2 * self.anchor_grid[i] # wh + y = torch.cat((xy, wh, conf.sigmoid(), mask), 4) + else: # Detect (boxes only) + xy, wh, conf = x[i].sigmoid().split((2, 2, self.nc + 1), 4) xy = (xy * 2 + self.grid[i]) * self.stride[i] # xy wh = (wh * 2) ** 2 * self.anchor_grid[i] # wh - y = torch.cat((xy, wh, etc), 4) - z.append(y.view(bs, -1, self.no)) + y = torch.cat((xy, wh, conf), 4) + z.append(y.view(bs, self.na * nx * ny, self.no)) return x if self.training else (torch.cat(z, 1),) if self.export else (torch.cat(z, 1), x) From afb9860522e5023d64f4fd36fb78b6f26011f760 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 18 Sep 2022 00:17:31 +0200 Subject: [PATCH 1437/1976] Add Paddle exports to benchmarks (#9459) * Add Paddle exports to benchmarks Signed-off-by: Glenn Jocher * Update plots.py Signed-off-by: Glenn Jocher * Update common.py Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- benchmarks.py | 2 +- models/common.py | 10 ++++------ utils/segment/plots.py | 4 ++-- 3 files changed, 7 insertions(+), 9 deletions(-) diff --git a/benchmarks.py b/benchmarks.py index 58e083c95d55..161af73c1eda 100644 --- a/benchmarks.py +++ b/benchmarks.py @@ -65,7 +65,7 @@ def run( model_type = type(attempt_load(weights, fuse=False)) # DetectionModel, SegmentationModel, etc. for i, (name, f, suffix, cpu, gpu) in export.export_formats().iterrows(): # index, (name, file, suffix, CPU, GPU) try: - assert i not in (9, 10, 11), 'inference not supported' # Edge TPU, TF.js and Paddle are unsupported + assert i not in (9, 10), 'inference not supported' # Edge TPU and TF.js are unsupported assert i != 5 or platform.system() == 'Darwin', 'inference only supported on macOS>=10.13' # CoreML if 'cpu' in device.type: assert cpu, 'inference not supported on CPU' diff --git a/models/common.py b/models/common.py index 9c08120fe7f6..2b61307ad46b 100644 --- a/models/common.py +++ b/models/common.py @@ -460,8 +460,8 @@ def wrap_frozen_graph(gd, inputs, outputs): if cuda: config.enable_use_gpu(memory_pool_init_size_mb=2048, device_id=0) predictor = pdi.create_predictor(config) - input_names = predictor.get_input_names() - input_handle = predictor.get_input_handle(input_names[0]) + input_handle = predictor.get_input_handle(predictor.get_input_names()[0]) + output_names = predictor.get_output_names() else: raise NotImplementedError(f'ERROR: {w} is not a supported format') @@ -517,12 +517,10 @@ def forward(self, im, augment=False, visualize=False): k = 'var_' + str(sorted(int(k.replace('var_', '')) for k in y)[-1]) # output key y = y[k] # output elif self.paddle: # PaddlePaddle - im = im.cpu().numpy().astype("float32") + im = im.cpu().numpy().astype(np.float32) self.input_handle.copy_from_cpu(im) self.predictor.run() - output_names = self.predictor.get_output_names() - output_handle = self.predictor.get_output_handle(output_names[0]) - y = output_handle.copy_to_cpu() + y = [self.predictor.get_output_handle(x).copy_to_cpu() for x in self.output_names] else: # TensorFlow (SavedModel, GraphDef, Lite, Edge TPU) im = im.permute(0, 2, 3, 1).cpu().numpy() # torch BCHW to numpy BHWC shape(1,320,192,3) if self.saved_model: # SavedModel diff --git a/utils/segment/plots.py b/utils/segment/plots.py index e882c14390f0..9b90900b3772 100644 --- a/utils/segment/plots.py +++ b/utils/segment/plots.py @@ -99,9 +99,9 @@ def plot_images_and_masks(images, targets, masks, paths=None, fname='images.jpg' if mh != h or mw != w: mask = image_masks[j].astype(np.uint8) mask = cv2.resize(mask, (w, h)) - mask = mask.astype(np.bool) + mask = mask.astype(bool) else: - mask = image_masks[j].astype(np.bool) + mask = image_masks[j].astype(bool) with contextlib.suppress(Exception): im[y:y + h, x:x + w, :][mask] = im[y:y + h, x:x + w, :][mask] * 0.4 + np.array(color) * 0.6 annotator.fromarray(im) From e8a9c5ae41b53f756e46de1190831b14b53c3b24 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 18 Sep 2022 00:57:48 +0200 Subject: [PATCH 1438/1976] Add `macos-latest` runner for CoreML benchmarks (#9453) * Add `macos-latest` runner for CoreML benchmarks Signed-off-by: Glenn Jocher * Update common.py Signed-off-by: Glenn Jocher * Update ci-testing.yml Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- models/common.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/models/common.py b/models/common.py index 2b61307ad46b..825a4c4e2633 100644 --- a/models/common.py +++ b/models/common.py @@ -514,8 +514,7 @@ def forward(self, im, augment=False, visualize=False): conf, cls = y['confidence'].max(1), y['confidence'].argmax(1).astype(np.float) y = np.concatenate((box, conf.reshape(-1, 1), cls.reshape(-1, 1)), 1) else: - k = 'var_' + str(sorted(int(k.replace('var_', '')) for k in y)[-1]) # output key - y = y[k] # output + y = list(reversed(y.values())) # reversed for segmentation models (pred, proto) elif self.paddle: # PaddlePaddle im = im.cpu().numpy().astype(np.float32) self.input_handle.copy_from_cpu(im) From 8ae81a6c87ebbf6a25c4dc2c77ef443b1d84098a Mon Sep 17 00:00:00 2001 From: Junjie Zhang <46258221+Oswells@users.noreply.github.com> Date: Sun, 18 Sep 2022 18:27:43 +0800 Subject: [PATCH 1439/1976] Fix cutout bug (#9452) * fix cutout bug Signed-off-by: Junjie Zhang <46258221+Oswells@users.noreply.github.com> * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Signed-off-by: Junjie Zhang <46258221+Oswells@users.noreply.github.com> Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Glenn Jocher --- utils/augmentations.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/utils/augmentations.py b/utils/augmentations.py index a5587351f75b..f49110f43c6a 100644 --- a/utils/augmentations.py +++ b/utils/augmentations.py @@ -12,7 +12,7 @@ import torchvision.transforms as T import torchvision.transforms.functional as TF -from utils.general import LOGGER, check_version, colorstr, resample_segments, segment2box +from utils.general import LOGGER, check_version, colorstr, resample_segments, segment2box, xywhn2xyxy from utils.metrics import bbox_ioa IMAGENET_MEAN = 0.485, 0.456, 0.406 # RGB mean @@ -281,7 +281,7 @@ def cutout(im, labels, p=0.5): # return unobscured labels if len(labels) and s > 0.03: box = np.array([xmin, ymin, xmax, ymax], dtype=np.float32) - ioa = bbox_ioa(box, labels[:, 1:5]) # intersection over area + ioa = bbox_ioa(box, xywhn2xyxy(labels[:, 1:5], w, h)) # intersection over area labels = labels[ioa < 0.60] # remove >60% obscured labels return labels From 95cef1ae6b3bdf4ced616a2b6f3c9655803e9ea7 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 18 Sep 2022 12:42:23 +0200 Subject: [PATCH 1440/1976] Optimize imports (#9464) * Optimize imports * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Reformat * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- segment/train.py | 2 -- utils/loggers/clearml/clearml_utils.py | 1 + utils/loggers/comet/hpo.py | 2 +- 3 files changed, 2 insertions(+), 3 deletions(-) diff --git a/segment/train.py b/segment/train.py index bda379176151..8abd0944551d 100644 --- a/segment/train.py +++ b/segment/train.py @@ -39,8 +39,6 @@ sys.path.append(str(ROOT)) # add ROOT to PATH ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative -import torch.nn.functional as F - import segment.val as validate # for end-of-epoch mAP from models.experimental import attempt_load from models.yolo import SegmentationModel diff --git a/utils/loggers/clearml/clearml_utils.py b/utils/loggers/clearml/clearml_utils.py index 1e136907367d..eb1c12ce6cac 100644 --- a/utils/loggers/clearml/clearml_utils.py +++ b/utils/loggers/clearml/clearml_utils.py @@ -11,6 +11,7 @@ try: import clearml from clearml import Dataset, Task + assert hasattr(clearml, '__version__') # verify package import not local dir except (ImportError, AssertionError): clearml = None diff --git a/utils/loggers/comet/hpo.py b/utils/loggers/comet/hpo.py index eab4df9978cf..7dd5c92e8de1 100644 --- a/utils/loggers/comet/hpo.py +++ b/utils/loggers/comet/hpo.py @@ -14,7 +14,7 @@ if str(ROOT) not in sys.path: sys.path.append(str(ROOT)) # add ROOT to PATH -from train import parse_opt, train +from train import train from utils.callbacks import Callbacks from utils.general import increment_path from utils.torch_utils import select_device From dc42e6ef2232979e6f0f606da670f42c6d59108c Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 18 Sep 2022 14:45:08 +0200 Subject: [PATCH 1441/1976] TensorRT SegmentationModel fix (#9465) * TensorRT SegmentationModel fix * TensorRT SegmentationModel fix * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * TensorRT SegmentationModel fix * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * TensorRT SegmentationModel fix * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * TensorRT SegmentationModel fix * TensorRT SegmentationModel fix * fix * sort output names * Update ci-testing.yml Signed-off-by: Glenn Jocher * Update ci-testing.yml Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- export.py | 23 ++++++++++++----------- models/common.py | 27 ++++++++++++++++----------- 2 files changed, 28 insertions(+), 22 deletions(-) diff --git a/export.py b/export.py index a575c73e375f..9955870e9e43 100644 --- a/export.py +++ b/export.py @@ -66,7 +66,7 @@ ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative from models.experimental import attempt_load -from models.yolo import ClassificationModel, Detect +from models.yolo import ClassificationModel, Detect, DetectionModel, SegmentationModel from utils.dataloaders import LoadImages from utils.general import (LOGGER, Profile, check_dataset, check_img_size, check_requirements, check_version, check_yaml, colorstr, file_size, get_default_args, print_args, url2file, yaml_save) @@ -134,6 +134,15 @@ def export_onnx(model, im, file, opset, dynamic, simplify, prefix=colorstr('ONNX LOGGER.info(f'\n{prefix} starting export with onnx {onnx.__version__}...') f = file.with_suffix('.onnx') + output_names = ['output0', 'output1'] if isinstance(model, SegmentationModel) else ['output0'] + if dynamic: + dynamic = {'images': {0: 'batch', 2: 'height', 3: 'width'}} # shape(1,3,640,640) + if isinstance(model, SegmentationModel): + dynamic['output0'] = {0: 'batch', 1: 'anchors'} # shape(1,25200,85) + dynamic['output1'] = {0: 'batch', 2: 'mask_height', 3: 'mask_width'} # shape(1,32,160,160) + elif isinstance(model, DetectionModel): + dynamic['output0'] = {0: 'batch', 1: 'anchors'} # shape(1,25200,85) + torch.onnx.export( model.cpu() if dynamic else model, # --dynamic only compatible with cpu im.cpu() if dynamic else im, @@ -142,16 +151,8 @@ def export_onnx(model, im, file, opset, dynamic, simplify, prefix=colorstr('ONNX opset_version=opset, do_constant_folding=True, input_names=['images'], - output_names=['output'], - dynamic_axes={ - 'images': { - 0: 'batch', - 2: 'height', - 3: 'width'}, # shape(1,3,640,640) - 'output': { - 0: 'batch', - 1: 'anchors'} # shape(1,25200,85) - } if dynamic else None) + output_names=output_names, + dynamic_axes=dynamic or None) # Checks model_onnx = onnx.load(f) # load onnx model diff --git a/models/common.py b/models/common.py index 825a4c4e2633..d0bc65e02f91 100644 --- a/models/common.py +++ b/models/common.py @@ -390,18 +390,21 @@ def __init__(self, weights='yolov5s.pt', device=torch.device('cpu'), dnn=False, model = runtime.deserialize_cuda_engine(f.read()) context = model.create_execution_context() bindings = OrderedDict() + output_names = [] fp16 = False # default updated below dynamic = False - for index in range(model.num_bindings): - name = model.get_binding_name(index) - dtype = trt.nptype(model.get_binding_dtype(index)) - if model.binding_is_input(index): - if -1 in tuple(model.get_binding_shape(index)): # dynamic + for i in range(model.num_bindings): + name = model.get_binding_name(i) + dtype = trt.nptype(model.get_binding_dtype(i)) + if model.binding_is_input(i): + if -1 in tuple(model.get_binding_shape(i)): # dynamic dynamic = True - context.set_binding_shape(index, tuple(model.get_profile_shape(0, index)[2])) + context.set_binding_shape(i, tuple(model.get_profile_shape(0, i)[2])) if dtype == np.float16: fp16 = True - shape = tuple(context.get_binding_shape(index)) + else: # output + output_names.append(name) + shape = tuple(context.get_binding_shape(i)) im = torch.from_numpy(np.empty(shape, dtype=dtype)).to(device) bindings[name] = Binding(name, dtype, shape, im, int(im.data_ptr())) binding_addrs = OrderedDict((n, d.ptr) for n, d in bindings.items()) @@ -495,15 +498,17 @@ def forward(self, im, augment=False, visualize=False): y = list(self.executable_network([im]).values()) elif self.engine: # TensorRT if self.dynamic and im.shape != self.bindings['images'].shape: - i_in, i_out = (self.model.get_binding_index(x) for x in ('images', 'output')) - self.context.set_binding_shape(i_in, im.shape) # reshape if dynamic + i = self.model.get_binding_index('images') + self.context.set_binding_shape(i, im.shape) # reshape if dynamic self.bindings['images'] = self.bindings['images']._replace(shape=im.shape) - self.bindings['output'].data.resize_(tuple(self.context.get_binding_shape(i_out))) + for name in self.output_names: + i = self.model.get_binding_index(name) + self.bindings[name].data.resize_(tuple(self.context.get_binding_shape(i))) s = self.bindings['images'].shape assert im.shape == s, f"input size {im.shape} {'>' if self.dynamic else 'not equal to'} max model size {s}" self.binding_addrs['images'] = int(im.data_ptr()) self.context.execute_v2(list(self.binding_addrs.values())) - y = self.bindings['output'].data + y = [self.bindings[x].data for x in sorted(self.output_names)] elif self.coreml: # CoreML im = im.permute(0, 2, 3, 1).cpu().numpy() # torch BCHW to numpy BHWC shape(1,320,192,3) im = Image.fromarray((im[0] * 255).astype('uint8')) From 4d50cd3469d75b18e99ce1e831ca024e3d25a2d7 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 18 Sep 2022 15:02:04 +0200 Subject: [PATCH 1442/1976] `Conv()` dilation argument fix (#9466) Resolves https://github.com/ultralytics/yolov5/issues/9384 Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- models/common.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/models/common.py b/models/common.py index d0bc65e02f91..33db74dcd9ae 100644 --- a/models/common.py +++ b/models/common.py @@ -232,7 +232,7 @@ class Focus(nn.Module): # Focus wh information into c-space def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True): # ch_in, ch_out, kernel, stride, padding, groups super().__init__() - self.conv = Conv(c1 * 4, c2, k, s, p, g, act) + self.conv = Conv(c1 * 4, c2, k, s, p, g, act=act) # self.contract = Contract(gain=2) def forward(self, x): # x(b,c,w,h) -> y(b,4c,w/2,h/2) @@ -245,8 +245,8 @@ class GhostConv(nn.Module): def __init__(self, c1, c2, k=1, s=1, g=1, act=True): # ch_in, ch_out, kernel, stride, groups super().__init__() c_ = c2 // 2 # hidden channels - self.cv1 = Conv(c1, c_, k, s, None, g, act) - self.cv2 = Conv(c_, c_, 5, 1, None, c_, act) + self.cv1 = Conv(c1, c_, k, s, None, g, act=act) + self.cv2 = Conv(c_, c_, 5, 1, None, c_, act=act) def forward(self, x): y = self.cv1(x) From 295c5e9d3ce70f5dbdb897c2da6a58e58f7c1125 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 18 Sep 2022 16:13:22 +0200 Subject: [PATCH 1443/1976] Update ClassificationModel default training `imgsz=224` (#9469) Update ClassificationModel default training imgsz=224 To match classify/val.py and classify/predict.py Helps https://github.com/ultralytics/yolov5/issues/9462 Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- classify/train.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/classify/train.py b/classify/train.py index 223367260bad..23c90e0a5274 100644 --- a/classify/train.py +++ b/classify/train.py @@ -3,7 +3,7 @@ Train a YOLOv5 classifier model on a classification dataset Usage - Single-GPU training: - $ python classify/train.py --model yolov5s-cls.pt --data imagenette160 --epochs 5 --img 128 + $ python classify/train.py --model yolov5s-cls.pt --data imagenette160 --epochs 5 --img 224 Usage - Multi-GPU DDP training: $ python -m torch.distributed.run --nproc_per_node 4 --master_port 1 classify/train.py --model yolov5s-cls.pt --data imagenet --epochs 5 --img 224 --device 0,1,2,3 @@ -272,7 +272,7 @@ def parse_opt(known=False): parser.add_argument('--data', type=str, default='imagenette160', help='cifar10, cifar100, mnist, imagenet, ...') parser.add_argument('--epochs', type=int, default=10, help='total training epochs') parser.add_argument('--batch-size', type=int, default=64, help='total batch size for all GPUs') - parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=128, help='train, val image size (pixels)') + parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=224, help='train, val image size (pixels)') parser.add_argument('--nosave', action='store_true', help='only save final checkpoint') parser.add_argument('--cache', type=str, nargs='?', const='ram', help='--cache images in "ram" (default) or "disk"') parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') From ca9c993d6c3c9f59c44d28b22d8968709cd11693 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 18 Sep 2022 16:15:25 +0200 Subject: [PATCH 1444/1976] =?UTF-8?q?Standardize=20warnings=20with=20`WARN?= =?UTF-8?q?ING=20=20=E2=9A=A0=EF=B8=8F=20...`=20(#9467)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Standardize warnings * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- benchmarks.py | 2 +- classify/train.py | 2 +- export.py | 2 +- hubconf.py | 2 +- segment/train.py | 2 +- segment/val.py | 6 +++--- train.py | 2 +- utils/__init__.py | 10 ++++++++-- utils/autoanchor.py | 4 ++-- utils/autobatch.py | 2 +- utils/dataloaders.py | 18 +++++++++--------- utils/general.py | 21 ++++++++------------- utils/loggers/__init__.py | 4 ++-- utils/metrics.py | 2 +- utils/segment/dataloaders.py | 2 +- utils/torch_utils.py | 2 +- val.py | 6 +++--- 17 files changed, 45 insertions(+), 44 deletions(-) diff --git a/benchmarks.py b/benchmarks.py index 161af73c1eda..b3b58eb3257c 100644 --- a/benchmarks.py +++ b/benchmarks.py @@ -91,7 +91,7 @@ def run( except Exception as e: if hard_fail: assert type(e) is AssertionError, f'Benchmark --hard-fail for {name}: {e}' - LOGGER.warning(f'WARNING: Benchmark failure for {name}: {e}') + LOGGER.warning(f'WARNING ⚠️ Benchmark failure for {name}: {e}') y.append([name, None, None, None]) # mAP, t_inference if pt_only and i == 0: break # break after PyTorch diff --git a/classify/train.py b/classify/train.py index 23c90e0a5274..178ebcdfff53 100644 --- a/classify/train.py +++ b/classify/train.py @@ -114,7 +114,7 @@ def train(opt, device): m = hub.list('ultralytics/yolov5') # + hub.list('pytorch/vision') # models raise ModuleNotFoundError(f'--model {opt.model} not found. Available models are: \n' + '\n'.join(m)) if isinstance(model, DetectionModel): - LOGGER.warning("WARNING: pass YOLOv5 classifier model with '-cls' suffix, i.e. '--model yolov5s-cls.pt'") + LOGGER.warning("WARNING ⚠️ pass YOLOv5 classifier model with '-cls' suffix, i.e. '--model yolov5s-cls.pt'") model = ClassificationModel(model=model, nc=nc, cutoff=opt.cutoff or 10) # convert to classification model reshape_classifier_output(model, nc) # update class count for m in model.modules(): diff --git a/export.py b/export.py index 9955870e9e43..ac9b13db8ec0 100644 --- a/export.py +++ b/export.py @@ -282,7 +282,7 @@ def export_engine(model, im, file, half, dynamic, simplify, workspace=4, verbose if dynamic: if im.shape[0] <= 1: - LOGGER.warning(f"{prefix}WARNING: --dynamic model requires maximum --batch-size argument") + LOGGER.warning(f"{prefix}WARNING ⚠️ --dynamic model requires maximum --batch-size argument") profile = builder.create_optimization_profile() for inp in inputs: profile.set_shape(inp.name, (1, *im.shape[1:]), (max(1, im.shape[0] // 2), *im.shape[1:]), im.shape) diff --git a/hubconf.py b/hubconf.py index 2f05565629a5..4224760a4732 100644 --- a/hubconf.py +++ b/hubconf.py @@ -47,7 +47,7 @@ def _create(name, pretrained=True, channels=3, classes=80, autoshape=True, verbo model = DetectMultiBackend(path, device=device, fuse=autoshape) # detection model if autoshape: if model.pt and isinstance(model.model, ClassificationModel): - LOGGER.warning('WARNING: ⚠️ YOLOv5 v6.2 ClassificationModel is not yet AutoShape compatible. ' + LOGGER.warning('WARNING ⚠️ YOLOv5 v6.2 ClassificationModel is not yet AutoShape compatible. ' 'You must pass torch tensors in BCHW to this model, i.e. shape(1,3,224,224).') else: model = AutoShape(model) # for file/URI/PIL/cv2/np inputs and NMS diff --git a/segment/train.py b/segment/train.py index 8abd0944551d..5121c5fa784a 100644 --- a/segment/train.py +++ b/segment/train.py @@ -176,7 +176,7 @@ def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictio # DP mode if cuda and RANK == -1 and torch.cuda.device_count() > 1: - LOGGER.warning('WARNING: DP not recommended, use torch.distributed.run for best DDP Multi-GPU results.\n' + LOGGER.warning('WARNING ⚠️ DP not recommended, use torch.distributed.run for best DDP Multi-GPU results.\n' 'See Multi-GPU Tutorial at https://github.com/ultralytics/yolov5/issues/475 to get started.') model = torch.nn.DataParallel(model) diff --git a/segment/val.py b/segment/val.py index 138aa00aaed3..59ab76672a30 100644 --- a/segment/val.py +++ b/segment/val.py @@ -345,7 +345,7 @@ def run( pf = '%22s' + '%11i' * 2 + '%11.3g' * 8 # print format LOGGER.info(pf % ("all", seen, nt.sum(), *metrics.mean_results())) if nt.sum() == 0: - LOGGER.warning(f'WARNING: no labels found in {task} set, can not compute metrics without labels ⚠️') + LOGGER.warning(f'WARNING ⚠️ no labels found in {task} set, can not compute metrics without labels') # Print results per class if (verbose or (nc < 50 and not training)) and nc > 1 and len(stats): @@ -438,9 +438,9 @@ def main(opt): if opt.task in ('train', 'val', 'test'): # run normally if opt.conf_thres > 0.001: # https://github.com/ultralytics/yolov5/issues/1466 - LOGGER.info(f'WARNING: confidence threshold {opt.conf_thres} > 0.001 produces invalid results ⚠️') + LOGGER.warning(f'WARNING ⚠️ confidence threshold {opt.conf_thres} > 0.001 produces invalid results') if opt.save_hybrid: - LOGGER.info('WARNING: --save-hybrid will return high mAP from hybrid labels, not from predictions alone ⚠️') + LOGGER.warning('WARNING ⚠️ --save-hybrid returns high mAP from hybrid labels, not from predictions alone') run(**vars(opt)) else: diff --git a/train.py b/train.py index 4eff6e5d645a..9efece250581 100644 --- a/train.py +++ b/train.py @@ -173,7 +173,7 @@ def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictio # DP mode if cuda and RANK == -1 and torch.cuda.device_count() > 1: - LOGGER.warning('WARNING: DP not recommended, use torch.distributed.run for best DDP Multi-GPU results.\n' + LOGGER.warning('WARNING ⚠️ DP not recommended, use torch.distributed.run for best DDP Multi-GPU results.\n' 'See Multi-GPU Tutorial at https://github.com/ultralytics/yolov5/issues/475 to get started.') model = torch.nn.DataParallel(model) diff --git a/utils/__init__.py b/utils/__init__.py index 46225c2208ce..8403a6149827 100644 --- a/utils/__init__.py +++ b/utils/__init__.py @@ -4,9 +4,15 @@ """ import contextlib +import platform import threading +def emojis(str=''): + # Return platform-dependent emoji-safe version of string + return str.encode().decode('ascii', 'ignore') if platform.system() == 'Windows' else str + + class TryExcept(contextlib.ContextDecorator): # YOLOv5 TryExcept class. Usage: @TryExcept() decorator or 'with TryExcept():' context manager def __init__(self, msg=''): @@ -17,7 +23,7 @@ def __enter__(self): def __exit__(self, exc_type, value, traceback): if value: - print(f'{self.msg}{value}') + print(emojis(f'{self.msg}{value}')) return True @@ -38,7 +44,7 @@ def notebook_init(verbose=True): import os import shutil - from utils.general import check_font, check_requirements, emojis, is_colab + from utils.general import check_font, check_requirements, is_colab from utils.torch_utils import select_device # imports check_requirements(('psutil', 'IPython')) diff --git a/utils/autoanchor.py b/utils/autoanchor.py index 0b49ab3319c0..7e7e9985d68a 100644 --- a/utils/autoanchor.py +++ b/utils/autoanchor.py @@ -122,7 +122,7 @@ def print_results(k, verbose=True): # Filter i = (wh0 < 3.0).any(1).sum() if i: - LOGGER.info(f'{PREFIX}WARNING: Extremely small objects found: {i} of {len(wh0)} labels are < 3 pixels in size') + LOGGER.info(f'{PREFIX}WARNING ⚠️ Extremely small objects found: {i} of {len(wh0)} labels are <3 pixels in size') wh = wh0[(wh0 >= 2.0).any(1)].astype(np.float32) # filter > 2 pixels # wh = wh * (npr.rand(wh.shape[0], 1) * 0.9 + 0.1) # multiply by random scale 0-1 @@ -134,7 +134,7 @@ def print_results(k, verbose=True): k = kmeans(wh / s, n, iter=30)[0] * s # points assert n == len(k) # kmeans may return fewer points than requested if wh is insufficient or too similar except Exception: - LOGGER.warning(f'{PREFIX}WARNING: switching strategies from kmeans to random init') + LOGGER.warning(f'{PREFIX}WARNING ⚠️ switching strategies from kmeans to random init') k = np.sort(npr.rand(n * 2)).reshape(n, 2) * img_size # random init wh, wh0 = (torch.tensor(x, dtype=torch.float32) for x in (wh, wh0)) k = print_results(k, verbose=False) diff --git a/utils/autobatch.py b/utils/autobatch.py index 3204fd26fc41..49435f51a244 100644 --- a/utils/autobatch.py +++ b/utils/autobatch.py @@ -65,7 +65,7 @@ def autobatch(model, imgsz=640, fraction=0.8, batch_size=16): b = batch_sizes[max(i - 1, 0)] # select prior safe point if b < 1 or b > 1024: # b outside of safe range b = batch_size - LOGGER.warning(f'{prefix}WARNING: ⚠️ CUDA anomaly detected, recommend restart environment and retry command.') + LOGGER.warning(f'{prefix}WARNING ⚠️ CUDA anomaly detected, recommend restart environment and retry command.') fraction = np.polyval(p, b) / t # actual fraction predicted LOGGER.info(f'{prefix}Using batch-size {b} for {d} {t * fraction:.2f}G/{t:.2f}G ({fraction * 100:.0f}%) ✅') diff --git a/utils/dataloaders.py b/utils/dataloaders.py index c04be853c580..5c3460eb0d6e 100644 --- a/utils/dataloaders.py +++ b/utils/dataloaders.py @@ -116,7 +116,7 @@ def create_dataloader(path, prefix='', shuffle=False): if rect and shuffle: - LOGGER.warning('WARNING: --rect is incompatible with DataLoader shuffle, setting shuffle=False') + LOGGER.warning('WARNING ⚠️ --rect is incompatible with DataLoader shuffle, setting shuffle=False') shuffle = False with torch_distributed_zero_first(rank): # init dataset *.cache only once if DDP dataset = LoadImagesAndLabels( @@ -328,7 +328,7 @@ def __init__(self, sources='streams.txt', img_size=640, stride=32, auto=True, tr self.auto = auto and self.rect self.transforms = transforms # optional if not self.rect: - LOGGER.warning('WARNING: Stream shapes differ. For optimal performance supply similarly-shaped streams.') + LOGGER.warning('WARNING ⚠️ Stream shapes differ. For optimal performance supply similarly-shaped streams.') def update(self, i, cap, stream): # Read stream `i` frames in daemon thread @@ -341,7 +341,7 @@ def update(self, i, cap, stream): if success: self.imgs[i] = im else: - LOGGER.warning('WARNING: Video stream unresponsive, please check your IP camera connection.') + LOGGER.warning('WARNING ⚠️ Video stream unresponsive, please check your IP camera connection.') self.imgs[i] = np.zeros_like(self.imgs[i]) cap.open(stream) # re-open stream if signal was lost time.sleep(0.0) # wait time @@ -543,7 +543,7 @@ def cache_labels(self, path=Path('./labels.cache'), prefix=''): if msgs: LOGGER.info('\n'.join(msgs)) if nf == 0: - LOGGER.warning(f'{prefix}WARNING: No labels found in {path}. {HELP_URL}') + LOGGER.warning(f'{prefix}WARNING ⚠️ No labels found in {path}. {HELP_URL}') x['hash'] = get_hash(self.label_files + self.im_files) x['results'] = nf, nm, ne, nc, len(self.im_files) x['msgs'] = msgs # warnings @@ -553,7 +553,7 @@ def cache_labels(self, path=Path('./labels.cache'), prefix=''): path.with_suffix('.cache.npy').rename(path) # remove .npy suffix LOGGER.info(f'{prefix}New cache created: {path}') except Exception as e: - LOGGER.warning(f'{prefix}WARNING: Cache directory {path.parent} is not writeable: {e}') # not writeable + LOGGER.warning(f'{prefix}WARNING ⚠️ Cache directory {path.parent} is not writeable: {e}') # not writeable return x def __len__(self): @@ -917,7 +917,7 @@ def verify_image_label(args): f.seek(-2, 2) if f.read() != b'\xff\xd9': # corrupt JPEG ImageOps.exif_transpose(Image.open(im_file)).save(im_file, 'JPEG', subsampling=0, quality=100) - msg = f'{prefix}WARNING: {im_file}: corrupt JPEG restored and saved' + msg = f'{prefix}WARNING ⚠️ {im_file}: corrupt JPEG restored and saved' # verify labels if os.path.isfile(lb_file): @@ -939,7 +939,7 @@ def verify_image_label(args): lb = lb[i] # remove duplicates if segments: segments = [segments[x] for x in i] - msg = f'{prefix}WARNING: {im_file}: {nl - len(i)} duplicate labels removed' + msg = f'{prefix}WARNING ⚠️ {im_file}: {nl - len(i)} duplicate labels removed' else: ne = 1 # label empty lb = np.zeros((0, 5), dtype=np.float32) @@ -949,7 +949,7 @@ def verify_image_label(args): return im_file, lb, shape, segments, nm, nf, ne, nc, msg except Exception as e: nc = 1 - msg = f'{prefix}WARNING: {im_file}: ignoring corrupt image/label: {e}' + msg = f'{prefix}WARNING ⚠️ {im_file}: ignoring corrupt image/label: {e}' return [None, None, None, None, nm, nf, ne, nc, msg] @@ -1012,7 +1012,7 @@ def _hub_ops(self, f, max_dim=1920): im = im.resize((int(im.width * r), int(im.height * r))) im.save(f_new, 'JPEG', quality=50, optimize=True) # save except Exception as e: # use OpenCV - print(f'WARNING: HUB ops PIL failure {f}: {e}') + LOGGER.info(f'WARNING ⚠️ HUB ops PIL failure {f}: {e}') im = cv2.imread(f) im_height, im_width = im.shape[:2] r = max_dim / max(im_height, im_width) # ratio diff --git a/utils/general.py b/utils/general.py index 38856b6bfa1c..fd0b4090a0fa 100644 --- a/utils/general.py +++ b/utils/general.py @@ -34,7 +34,7 @@ import torchvision import yaml -from utils import TryExcept +from utils import TryExcept, emojis from utils.downloads import gsutil_getsize from utils.metrics import box_iou, fitness @@ -248,11 +248,6 @@ def get_latest_run(search_dir='.'): return max(last_list, key=os.path.getctime) if last_list else '' -def emojis(str=''): - # Return platform-dependent emoji-safe version of string - return str.encode().decode('ascii', 'ignore') if platform.system() == 'Windows' else str - - def file_age(path=__file__): # Return days since last file update dt = (datetime.now() - datetime.fromtimestamp(Path(path).stat().st_mtime)) # delta @@ -333,7 +328,7 @@ def check_version(current='0.0.0', minimum='0.0.0', name='version ', pinned=Fals # Check version vs. required version current, minimum = (pkg.parse_version(x) for x in (current, minimum)) result = (current == minimum) if pinned else (current >= minimum) # bool - s = f'WARNING: ⚠️ {name}{minimum} is required by YOLOv5, but {name}{current} is currently installed' # string + s = f'WARNING ⚠️ {name}{minimum} is required by YOLOv5, but {name}{current} is currently installed' # string if hard: assert result, emojis(s) # assert min requirements met if verbose and not result: @@ -373,7 +368,7 @@ def check_requirements(requirements=ROOT / 'requirements.txt', exclude=(), insta f"{prefix} ⚠️ {colorstr('bold', 'Restart runtime or rerun command for updates to take effect')}\n" LOGGER.info(s) except Exception as e: - LOGGER.warning(f'{prefix} {e}') + LOGGER.warning(f'{prefix} ❌ {e}') def check_img_size(imgsz, s=32, floor=0): @@ -384,7 +379,7 @@ def check_img_size(imgsz, s=32, floor=0): imgsz = list(imgsz) # convert to list if tuple new_size = [max(make_divisible(x, int(s)), floor) for x in imgsz] if new_size != imgsz: - LOGGER.warning(f'WARNING: --img-size {imgsz} must be multiple of max stride {s}, updating to {new_size}') + LOGGER.warning(f'WARNING ⚠️ --img-size {imgsz} must be multiple of max stride {s}, updating to {new_size}') return new_size @@ -399,7 +394,7 @@ def check_imshow(): cv2.waitKey(1) return True except Exception as e: - LOGGER.warning(f'WARNING: Environment does not support cv2.imshow() or PIL Image.show() image displays\n{e}') + LOGGER.warning(f'WARNING ⚠️ Environment does not support cv2.imshow() or PIL Image.show() image displays\n{e}') return False @@ -589,9 +584,9 @@ def download_one(url, dir): if success: break elif i < retry: - LOGGER.warning(f'Download failure, retrying {i + 1}/{retry} {url}...') + LOGGER.warning(f'⚠️ Download failure, retrying {i + 1}/{retry} {url}...') else: - LOGGER.warning(f'Failed to download {url}...') + LOGGER.warning(f'❌ Failed to download {url}...') if unzip and success and f.suffix in ('.zip', '.tar', '.gz'): LOGGER.info(f'Unzipping {f}...') @@ -908,7 +903,7 @@ def non_max_suppression( output[xi] = x[i] if (time.time() - t) > time_limit: - LOGGER.warning(f'WARNING: NMS time limit {time_limit:.3f}s exceeded') + LOGGER.warning(f'WARNING ⚠️ NMS time limit {time_limit:.3f}s exceeded') break # time limit exceeded return output diff --git a/utils/loggers/__init__.py b/utils/loggers/__init__.py index f29debb76907..941d09e19e2d 100644 --- a/utils/loggers/__init__.py +++ b/utils/loggers/__init__.py @@ -11,7 +11,7 @@ import torch from torch.utils.tensorboard import SummaryWriter -from utils.general import colorstr, cv2 +from utils.general import LOGGER, colorstr, cv2 from utils.loggers.clearml.clearml_utils import ClearmlLogger from utils.loggers.wandb.wandb_utils import WandbLogger from utils.plots import plot_images, plot_labels, plot_results @@ -393,7 +393,7 @@ def log_tensorboard_graph(tb, model, imgsz=(640, 640)): warnings.simplefilter('ignore') # suppress jit trace warning tb.add_graph(torch.jit.trace(de_parallel(model), im, strict=False), []) except Exception as e: - print(f'WARNING: TensorBoard graph visualization failure {e}') + LOGGER.warning(f'WARNING ⚠️ TensorBoard graph visualization failure {e}') def web_project_name(project): diff --git a/utils/metrics.py b/utils/metrics.py index 021a46ce5d37..ed611d7d38fa 100644 --- a/utils/metrics.py +++ b/utils/metrics.py @@ -186,7 +186,7 @@ def tp_fp(self): # fn = self.matrix.sum(0) - tp # false negatives (missed detections) return tp[:-1], fp[:-1] # remove background class - @TryExcept('WARNING: ConfusionMatrix plot failure: ') + @TryExcept('WARNING ⚠️ ConfusionMatrix plot failure: ') def plot(self, normalize=True, save_dir='', names=()): import seaborn as sn diff --git a/utils/segment/dataloaders.py b/utils/segment/dataloaders.py index f6fe642d077f..d137caa5ab27 100644 --- a/utils/segment/dataloaders.py +++ b/utils/segment/dataloaders.py @@ -37,7 +37,7 @@ def create_dataloader(path, mask_downsample_ratio=1, overlap_mask=False): if rect and shuffle: - LOGGER.warning('WARNING: --rect is incompatible with DataLoader shuffle, setting shuffle=False') + LOGGER.warning('WARNING ⚠️ --rect is incompatible with DataLoader shuffle, setting shuffle=False') shuffle = False with torch_distributed_zero_first(rank): # init dataset *.cache only once if DDP dataset = LoadImagesAndLabelsAndMasks( diff --git a/utils/torch_utils.py b/utils/torch_utils.py index 8a3366ca3e27..9f257d06ac60 100644 --- a/utils/torch_utils.py +++ b/utils/torch_utils.py @@ -47,7 +47,7 @@ def smartCrossEntropyLoss(label_smoothing=0.0): if check_version(torch.__version__, '1.10.0'): return nn.CrossEntropyLoss(label_smoothing=label_smoothing) if label_smoothing > 0: - LOGGER.warning(f'WARNING: label smoothing {label_smoothing} requires torch>=1.10.0') + LOGGER.warning(f'WARNING ⚠️ label smoothing {label_smoothing} requires torch>=1.10.0') return nn.CrossEntropyLoss() diff --git a/val.py b/val.py index e003d2144b7f..3ab4bc3fdb58 100644 --- a/val.py +++ b/val.py @@ -282,7 +282,7 @@ def run( pf = '%22s' + '%11i' * 2 + '%11.3g' * 4 # print format LOGGER.info(pf % ('all', seen, nt.sum(), mp, mr, map50, map)) if nt.sum() == 0: - LOGGER.warning(f'WARNING: no labels found in {task} set, can not compute metrics without labels ⚠️') + LOGGER.warning(f'WARNING ⚠️ no labels found in {task} set, can not compute metrics without labels') # Print results per class if (verbose or (nc < 50 and not training)) and nc > 1 and len(stats): @@ -374,9 +374,9 @@ def main(opt): if opt.task in ('train', 'val', 'test'): # run normally if opt.conf_thres > 0.001: # https://github.com/ultralytics/yolov5/issues/1466 - LOGGER.info(f'WARNING: confidence threshold {opt.conf_thres} > 0.001 produces invalid results ⚠️') + LOGGER.info(f'WARNING ⚠️ confidence threshold {opt.conf_thres} > 0.001 produces invalid results') if opt.save_hybrid: - LOGGER.info('WARNING: --save-hybrid will return high mAP from hybrid labels, not from predictions alone ⚠️') + LOGGER.info('WARNING ⚠️ --save-hybrid will return high mAP from hybrid labels, not from predictions alone') run(**vars(opt)) else: From 92b52424d468feb48c51c3dde173d5d2c606a44b Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 18 Sep 2022 17:34:34 +0200 Subject: [PATCH 1445/1976] TensorFlow macOS AutoUpdate (#9471) * TensorFlow macOS AutoUpdate * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- export.py | 11 ++++++++--- requirements.txt | 2 +- 2 files changed, 9 insertions(+), 4 deletions(-) diff --git a/export.py b/export.py index ac9b13db8ec0..ae292afe06f6 100644 --- a/export.py +++ b/export.py @@ -72,6 +72,8 @@ check_yaml, colorstr, file_size, get_default_args, print_args, url2file, yaml_save) from utils.torch_utils import select_device, smart_inference_mode +MACOS = platform.system() == 'Darwin' # macOS environment + def export_formats(): # YOLOv5 export formats @@ -224,7 +226,7 @@ def export_coreml(model, im, file, int8, half, prefix=colorstr('CoreML:')): ct_model = ct.convert(ts, inputs=[ct.ImageType('image', shape=im.shape, scale=1 / 255, bias=[0, 0, 0])]) bits, mode = (8, 'kmeans_lut') if int8 else (16, 'linear') if half else (32, None) if bits < 32: - if platform.system() == 'Darwin': # quantization only supported on macOS + if MACOS: # quantization only supported on macOS with warnings.catch_warnings(): warnings.filterwarnings("ignore", category=DeprecationWarning) # suppress numpy==1.20 float warning ct_model = ct.models.neural_network.quantization_utils.quantize_weights(ct_model, bits, mode) @@ -310,8 +312,11 @@ def export_saved_model(model, keras=False, prefix=colorstr('TensorFlow SavedModel:')): # YOLOv5 TensorFlow SavedModel export - check_requirements('tensorflow' if torch.cuda.is_available() else 'tensorflow-cpu') - import tensorflow as tf + try: + import tensorflow as tf + except Exception: + check_requirements(f"tensorflow{'' if torch.cuda.is_available() else '-macos' if MACOS else '-cpu'}") + import tensorflow as tf from tensorflow.python.framework.convert_to_constants import convert_variables_to_constants_v2 from models.tf import TFModel diff --git a/requirements.txt b/requirements.txt index 44fe1ce697b7..835346f218a4 100644 --- a/requirements.txt +++ b/requirements.txt @@ -30,7 +30,7 @@ seaborn>=0.11.0 # nvidia-pyindex # TensorRT export # nvidia-tensorrt # TensorRT export # scikit-learn==0.19.2 # CoreML quantization -# tensorflow>=2.4.1 # TFLite export (or tensorflow-cpu, tensorflow-aarch64) +# tensorflow>=2.4.1 # TF exports (-cpu, -aarch64, -macos) # tensorflowjs>=3.9.0 # TF.js export # openvino-dev # OpenVINO export From 120e27e38efd4351b5e5bb5d735635f4cbf1bc86 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 18 Sep 2022 19:34:10 +0200 Subject: [PATCH 1446/1976] `classify/predict --save-txt` fix (#9478) Classify --save-txt Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- classify/predict.py | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/classify/predict.py b/classify/predict.py index 878cf48b6fef..4857c69766e7 100644 --- a/classify/predict.py +++ b/classify/predict.py @@ -119,13 +119,15 @@ def run( for i, prob in enumerate(pred): # per image seen += 1 if webcam: # batch_size >= 1 - p, im0 = path[i], im0s[i].copy() + p, im0, frame = path[i], im0s[i].copy(), dataset.count s += f'{i}: ' else: - p, im0 = path, im0s.copy() + p, im0, frame = path, im0s.copy(), getattr(dataset, 'frame', 0) p = Path(p) # to Path save_path = str(save_dir / p.name) # im.jpg + txt_path = str(save_dir / 'labels' / p.stem) + ('' if dataset.mode == 'image' else f'_{frame}') # im.txt + s += '%gx%g ' % im.shape[2:] # print string annotator = Annotator(im0, example=str(names), pil=True) @@ -134,9 +136,12 @@ def run( s += f"{', '.join(f'{names[j]} {prob[j]:.2f}' for j in top5i)}, " # Write results + text = '\n'.join(f'{prob[j]:.2f} {names[j]}' for j in top5i) if save_img or view_img: # Add bbox to image - text = '\n'.join(f'{prob[j]:.2f} {names[j]}' for j in top5i) annotator.text((32, 32), text, txt_color=(255, 255, 255)) + if save_txt: # Write to file + with open(f'{txt_path}.txt', 'a') as f: + f.write(text + '\n') # Stream results im0 = annotator.result() @@ -188,7 +193,7 @@ def parse_opt(): parser.add_argument('--imgsz', '--img', '--img-size', nargs='+', type=int, default=[224], help='inference size h,w') parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') parser.add_argument('--view-img', action='store_true', help='show results') - parser.add_argument('--save-txt', action='store_true', help='save results to *.txt') + parser.add_argument('--save-txt', action='store_false', help='save results to *.txt') parser.add_argument('--nosave', action='store_true', help='do not save images/videos') parser.add_argument('--augment', action='store_true', help='augmented inference') parser.add_argument('--visualize', action='store_true', help='visualize features') From fda8aa551d0b732153c2e0848dd6abd887a41cd1 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 18 Sep 2022 19:52:46 +0200 Subject: [PATCH 1447/1976] TensorFlow SegmentationModel support (#9472) * TensorFlow SegmentationModel support * TensorFlow SegmentationModel support * TensorFlow SegmentationModel support * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * TFLite fixes * GraphDef fixes * Update ci-testing.yml Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .github/workflows/ci-testing.yml | 2 +- export.py | 2 +- models/common.py | 29 ++++++++++++++++++++--------- models/tf.py | 15 ++++++++------- 4 files changed, 30 insertions(+), 18 deletions(-) diff --git a/.github/workflows/ci-testing.yml b/.github/workflows/ci-testing.yml index 537ba96e7225..fffc92d1b72f 100644 --- a/.github/workflows/ci-testing.yml +++ b/.github/workflows/ci-testing.yml @@ -43,7 +43,7 @@ jobs: python benchmarks.py --data coco128.yaml --weights ${{ matrix.model }}.pt --img 320 --hard-fail 0.29 - name: Benchmark SegmentationModel run: | - python benchmarks.py --data coco128-seg.yaml --weights ${{ matrix.model }}-seg.pt --img 320 + python benchmarks.py --data coco128-seg.yaml --weights ${{ matrix.model }}-seg.pt --img 320 --hard-fail 0.22 Tests: timeout-minutes: 60 diff --git a/export.py b/export.py index ae292afe06f6..fe4e53d06cc3 100644 --- a/export.py +++ b/export.py @@ -341,7 +341,7 @@ def export_saved_model(model, m = m.get_concrete_function(spec) frozen_func = convert_variables_to_constants_v2(m) tfm = tf.Module() - tfm.__call__ = tf.function(lambda x: frozen_func(x)[:4] if tf_nms else frozen_func(x)[0], [spec]) + tfm.__call__ = tf.function(lambda x: frozen_func(x)[:4] if tf_nms else frozen_func(x), [spec]) tfm.__call__(im) tf.saved_model.save(tfm, f, diff --git a/models/common.py b/models/common.py index 33db74dcd9ae..fac95a82fdb9 100644 --- a/models/common.py +++ b/models/common.py @@ -427,10 +427,17 @@ def wrap_frozen_graph(gd, inputs, outputs): ge = x.graph.as_graph_element return x.prune(tf.nest.map_structure(ge, inputs), tf.nest.map_structure(ge, outputs)) + def gd_outputs(gd): + name_list, input_list = [], [] + for node in gd.node: # tensorflow.core.framework.node_def_pb2.NodeDef + name_list.append(node.name) + input_list.extend(node.input) + return sorted(f'{x}:0' for x in list(set(name_list) - set(input_list)) if not x.startswith('NoOp')) + gd = tf.Graph().as_graph_def() # TF GraphDef with open(w, 'rb') as f: gd.ParseFromString(f.read()) - frozen_func = wrap_frozen_graph(gd, inputs="x:0", outputs="Identity:0") + frozen_func = wrap_frozen_graph(gd, inputs="x:0", outputs=gd_outputs(gd)) elif tflite or edgetpu: # https://www.tensorflow.org/lite/guide/python#install_tensorflow_lite_for_python try: # https://coral.ai/docs/edgetpu/tflite-python/#update-existing-tf-lite-code-for-the-edge-tpu from tflite_runtime.interpreter import Interpreter, load_delegate @@ -528,22 +535,26 @@ def forward(self, im, augment=False, visualize=False): else: # TensorFlow (SavedModel, GraphDef, Lite, Edge TPU) im = im.permute(0, 2, 3, 1).cpu().numpy() # torch BCHW to numpy BHWC shape(1,320,192,3) if self.saved_model: # SavedModel - y = (self.model(im, training=False) if self.keras else self.model(im)).numpy() + y = self.model(im, training=False) if self.keras else self.model(im) elif self.pb: # GraphDef - y = self.frozen_func(x=self.tf.constant(im)).numpy() + y = self.frozen_func(x=self.tf.constant(im)) else: # Lite or Edge TPU - input, output = self.input_details[0], self.output_details[0] + input = self.input_details[0] int8 = input['dtype'] == np.uint8 # is TFLite quantized uint8 model if int8: scale, zero_point = input['quantization'] im = (im / scale + zero_point).astype(np.uint8) # de-scale self.interpreter.set_tensor(input['index'], im) self.interpreter.invoke() - y = self.interpreter.get_tensor(output['index']) - if int8: - scale, zero_point = output['quantization'] - y = (y.astype(np.float32) - zero_point) * scale # re-scale - y[..., :4] *= [w, h, w, h] # xywh normalized to pixels + y = [] + for output in self.output_details: + x = self.interpreter.get_tensor(output['index']) + if int8: + scale, zero_point = output['quantization'] + x = (x.astype(np.float32) - zero_point) * scale # re-scale + y.append(x) + y = [x if isinstance(x, np.ndarray) else x.numpy() for x in y] + y[0][..., :4] *= [w, h, w, h] # xywh normalized to pixels if isinstance(y, (list, tuple)): return self.from_numpy(y[0]) if len(y) == 1 else [self.from_numpy(x) for x in y] diff --git a/models/tf.py b/models/tf.py index 8cce147059d3..ae58ca738e2e 100644 --- a/models/tf.py +++ b/models/tf.py @@ -299,15 +299,15 @@ def call(self, inputs): x[i] = tf.reshape(x[i], [-1, ny * nx, self.na, self.no]) if not self.training: # inference - y = tf.sigmoid(x[i]) + y = x[i] grid = tf.transpose(self.grid[i], [0, 2, 1, 3]) - 0.5 anchor_grid = tf.transpose(self.anchor_grid[i], [0, 2, 1, 3]) * 4 - xy = (y[..., 0:2] * 2 + grid) * self.stride[i] # xy - wh = y[..., 2:4] ** 2 * anchor_grid + xy = (tf.sigmoid(y[..., 0:2]) * 2 + grid) * self.stride[i] # xy + wh = tf.sigmoid(y[..., 2:4]) ** 2 * anchor_grid # Normalize xywh to 0-1 to reduce calibration error xy /= tf.constant([[self.imgsz[1], self.imgsz[0]]], dtype=tf.float32) wh /= tf.constant([[self.imgsz[1], self.imgsz[0]]], dtype=tf.float32) - y = tf.concat([xy, wh, y[..., 4:]], -1) + y = tf.concat([xy, wh, tf.sigmoid(y[..., 4:5 + self.nc]), y[..., 5 + self.nc:]], -1) z.append(tf.reshape(y, [-1, self.na * ny * nx, self.no])) return tf.transpose(x, [0, 2, 1, 3]) if self.training else (tf.concat(z, 1), x) @@ -333,8 +333,9 @@ def __init__(self, nc=80, anchors=(), nm=32, npr=256, ch=(), imgsz=(640, 640), w def call(self, x): p = self.proto(x[0]) + p = tf.transpose(p, [0, 3, 1, 2]) # from shape(1,160,160,32) to shape(1,32,160,160) x = self.detect(self, x) - return (x, p) if self.training else ((x[0], p),) + return (x, p) if self.training else (x[0], p) class TFProto(keras.layers.Layer): @@ -485,8 +486,8 @@ def predict(self, conf_thres, clip_boxes=False) return nms, x[1] - return x[0] # output only first tensor [1,6300,85] = [xywh, conf, class0, class1, ...] - # x = x[0][0] # [x(1,6300,85), ...] to x(6300,85) + return x # output [1,6300,85] = [xywh, conf, class0, class1, ...] + # x = x[0] # [x(1,6300,85), ...] to x(6300,85) # xywh = x[..., :4] # x(6300,4) boxes # conf = x[..., 4:5] # x(6300,1) confidences # cls = tf.reshape(tf.cast(tf.argmax(x[..., 5:], axis=1), tf.float32), (-1, 1)) # x(6300,1) classes From f038ad71729960facad54407e1b353b0e81242e2 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 19 Sep 2022 12:18:55 +0200 Subject: [PATCH 1448/1976] AutoBatch report include reserved+allocated (#9491) May resolve https://github.com/ultralytics/yolov5/issues/9287#issuecomment-1250767031 Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- utils/autobatch.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/utils/autobatch.py b/utils/autobatch.py index 49435f51a244..bdeb91c3d2bd 100644 --- a/utils/autobatch.py +++ b/utils/autobatch.py @@ -19,7 +19,7 @@ def check_train_batch_size(model, imgsz=640, amp=True): def autobatch(model, imgsz=640, fraction=0.8, batch_size=16): - # Automatically estimate best batch size to use `fraction` of available CUDA memory + # Automatically estimate best YOLOv5 batch size to use `fraction` of available CUDA memory # Usage: # import torch # from utils.autobatch import autobatch @@ -67,6 +67,6 @@ def autobatch(model, imgsz=640, fraction=0.8, batch_size=16): b = batch_size LOGGER.warning(f'{prefix}WARNING ⚠️ CUDA anomaly detected, recommend restart environment and retry command.') - fraction = np.polyval(p, b) / t # actual fraction predicted + fraction = (np.polyval(p, b) + r + a) / t # actual fraction predicted LOGGER.info(f'{prefix}Using batch-size {b} for {d} {t * fraction:.2f}G/{t:.2f}G ({fraction * 100:.0f}%) ✅') return b From 868c0e9bbb45b031e7bfd73c6d3983bcce07b9c1 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 19 Sep 2022 13:31:24 +0200 Subject: [PATCH 1449/1976] Update Detect() grid init `for` loop (#9494) May resolve threaded inference issue in https://github.com/ultralytics/yolov5/pull/9425#issuecomment-1250802928 by avoiding memory sharing on init. Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- models/yolo.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/models/yolo.py b/models/yolo.py index 0dca6353a356..1d0da2a6e010 100644 --- a/models/yolo.py +++ b/models/yolo.py @@ -47,8 +47,8 @@ def __init__(self, nc=80, anchors=(), ch=(), inplace=True): # detection layer self.no = nc + 5 # number of outputs per anchor self.nl = len(anchors) # number of detection layers self.na = len(anchors[0]) // 2 # number of anchors - self.grid = [torch.empty(1)] * self.nl # init grid - self.anchor_grid = [torch.empty(1)] * self.nl # init anchor grid + self.grid = [torch.empty(0) for _ in range(self.nl)] # init grid + self.anchor_grid = [torch.empty(0) for _ in range(self.nl)] # init anchor grid self.register_buffer('anchors', torch.tensor(anchors).float().view(self.nl, -1, 2)) # shape(nl,na,2) self.m = nn.ModuleList(nn.Conv2d(x, self.no * self.na, 1) for x in ch) # output conv self.inplace = inplace # use inplace ops (e.g. slice assignment) From 11640698977724daf7982c9da398c2ee2f2b6e91 Mon Sep 17 00:00:00 2001 From: mucunwuxian Date: Mon, 19 Sep 2022 21:01:46 +0900 Subject: [PATCH 1450/1976] Accelerate video inference (#9487) * The following code is slow, "self.cap.set(cv2.CAP_PROP_POS_FRAMES, self.vid_stride * (self.frame + 1)) # read at vid_stride". * adjust... * Update dataloaders.py Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher Co-authored-by: Glenn Jocher --- utils/dataloaders.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/utils/dataloaders.py b/utils/dataloaders.py index 5c3460eb0d6e..5b03b4eb9759 100644 --- a/utils/dataloaders.py +++ b/utils/dataloaders.py @@ -232,8 +232,9 @@ def __next__(self): if self.video_flag[self.count]: # Read video self.mode = 'video' - ret_val, im0 = self.cap.read() - self.cap.set(cv2.CAP_PROP_POS_FRAMES, self.vid_stride * (self.frame + 1)) # read at vid_stride + for _ in range(self.vid_stride): + self.cap.grab() + ret_val, im0 = self.cap.retrieve() while not ret_val: self.count += 1 self.cap.release() From 0b724c5b851b32bb3a8fbfab3cc2d68f93b4661e Mon Sep 17 00:00:00 2001 From: Dhruv Nair Date: Mon, 19 Sep 2022 11:26:19 -0400 Subject: [PATCH 1451/1976] Comet Image Logging Fix (#9498) fix issues with image logging --- utils/loggers/comet/__init__.py | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/utils/loggers/comet/__init__.py b/utils/loggers/comet/__init__.py index 4ee86dd70d6e..3b3142b002c5 100644 --- a/utils/loggers/comet/__init__.py +++ b/utils/loggers/comet/__init__.py @@ -22,6 +22,7 @@ comet_ml = None COMET_PROJECT_NAME = None +import PIL import torch import torchvision.transforms as T import yaml @@ -131,6 +132,8 @@ def __init__(self, opt, hyp, run_id=None, job_type="Training", **experiment_kwar else: self.iou_thres = IOU_THRES + self.log_parameters({"val_iou_threshold": self.iou_thres, "val_conf_threshold": self.conf_thres}) + self.comet_log_predictions = COMET_LOG_PREDICTIONS if self.opt.bbox_interval == -1: self.comet_log_prediction_interval = 1 if self.opt.epochs < 10 else self.opt.epochs // 10 @@ -139,6 +142,7 @@ def __init__(self, opt, hyp, run_id=None, job_type="Training", **experiment_kwar if self.comet_log_predictions: self.metadata_dict = {} + self.logged_image_names = [] self.comet_log_per_class_metrics = COMET_LOG_PER_CLASS_METRICS @@ -249,11 +253,12 @@ def log_predictions(self, image, labelsn, path, shape, predn): filtered_detections = detections[mask] filtered_labels = labelsn[mask] - processed_image = (image * 255).to(torch.uint8) - image_id = path.split("/")[-1].split(".")[0] image_name = f"{image_id}_curr_epoch_{self.experiment.curr_epoch}" - self.log_image(to_pil(processed_image), name=image_name) + if image_name not in self.logged_image_names: + native_scale_image = PIL.Image.open(path) + self.log_image(native_scale_image, name=image_name) + self.logged_image_names.append(image_name) metadata = [] for cls, *xyxy in filtered_labels.tolist(): From 0171198f38f36c55090c91c49a7b5abacd571324 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 19 Sep 2022 20:38:11 +0200 Subject: [PATCH 1452/1976] Fix visualization title bug (#9500) Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- utils/plots.py | 1 - 1 file changed, 1 deletion(-) diff --git a/utils/plots.py b/utils/plots.py index d8d5b225a774..51bb7d6c20af 100644 --- a/utils/plots.py +++ b/utils/plots.py @@ -204,7 +204,6 @@ def feature_visualization(x, module_type, stage, n=32, save_dir=Path('runs/detec ax[i].axis('off') LOGGER.info(f'Saving {f}... ({n}/{channels})') - plt.title('Features') plt.savefig(f, dpi=300, bbox_inches='tight') plt.close() np.save(str(f.with_suffix('.npy')), x[0].cpu().numpy()) # npy save From 63368e71d23e453ded1d94094a2b43b75c1a54fa Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E6=9B=BE=E9=80=B8=E5=A4=AB=EF=BC=88Zeng=20Yifu=EF=BC=89?= <41098760+Zengyf-CVer@users.noreply.github.com> Date: Tue, 20 Sep 2022 07:11:29 +0800 Subject: [PATCH 1453/1976] Add paddle tips (#9502) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Update export.py Signed-off-by: 曾逸夫(Zeng Yifu) <41098760+Zengyf-CVer@users.noreply.github.com> * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Signed-off-by: 曾逸夫(Zeng Yifu) <41098760+Zengyf-CVer@users.noreply.github.com> Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- export.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/export.py b/export.py index fe4e53d06cc3..04c2ed9c802d 100644 --- a/export.py +++ b/export.py @@ -596,10 +596,11 @@ def parse_opt(): parser.add_argument('--topk-all', type=int, default=100, help='TF.js NMS: topk for all classes to keep') parser.add_argument('--iou-thres', type=float, default=0.45, help='TF.js NMS: IoU threshold') parser.add_argument('--conf-thres', type=float, default=0.25, help='TF.js NMS: confidence threshold') - parser.add_argument('--include', - nargs='+', - default=['torchscript'], - help='torchscript, onnx, openvino, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs') + parser.add_argument( + '--include', + nargs='+', + default=['torchscript'], + help='torchscript, onnx, openvino, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs, paddle') opt = parser.parse_args() print_args(vars(opt)) return opt From 095f601d9d32ea0f0afd47554c068659939ecf4e Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 20 Sep 2022 12:22:02 +0200 Subject: [PATCH 1454/1976] Segmentation `polygons2masks_overlap()` in `np.int32` (#9493) * Segmentation `polygons2masks_overlap()` in `np.int32` May resolve https://github.com/ultralytics/yolov5/issues/9461 WARNING: Masks should be uint8 for fastest speed, change needs profiling results to determine impact. @AyushExel @Laughing-q Signed-off-by: Glenn Jocher * Update dataloaders.py Signed-off-by: Glenn Jocher * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Signed-off-by: Glenn Jocher Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- utils/segment/dataloaders.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/utils/segment/dataloaders.py b/utils/segment/dataloaders.py index d137caa5ab27..49575f065752 100644 --- a/utils/segment/dataloaders.py +++ b/utils/segment/dataloaders.py @@ -308,7 +308,8 @@ def polygons2masks(img_size, polygons, color, downsample_ratio=1): def polygons2masks_overlap(img_size, segments, downsample_ratio=1): """Return a (640, 640) overlap mask.""" - masks = np.zeros((img_size[0] // downsample_ratio, img_size[1] // downsample_ratio), dtype=np.uint8) + masks = np.zeros((img_size[0] // downsample_ratio, img_size[1] // downsample_ratio), + dtype=np.int32 if len(segments) > 255 else np.uint8) areas = [] ms = [] for si in range(len(segments)): From f8b74631e50bcac1bef8a52283102a5feb7217a6 Mon Sep 17 00:00:00 2001 From: FeiGeChuanShu <774074168@qq.com> Date: Tue, 20 Sep 2022 19:04:45 +0800 Subject: [PATCH 1455/1976] Fix `random_perspective` param bug in segment (#9512) * fix random_perspective param bug when mosaic=False Signed-off-by: FeiGeChuanShu <774074168@qq.com> * Update dataloaders.py * Update dataloaders.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Signed-off-by: FeiGeChuanShu <774074168@qq.com> Co-authored-by: Glenn Jocher Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- utils/segment/dataloaders.py | 19 ++++++++----------- 1 file changed, 8 insertions(+), 11 deletions(-) diff --git a/utils/segment/dataloaders.py b/utils/segment/dataloaders.py index 49575f065752..97ef8556068e 100644 --- a/utils/segment/dataloaders.py +++ b/utils/segment/dataloaders.py @@ -140,17 +140,14 @@ def __getitem__(self, index): labels[:, 1:] = xywhn2xyxy(labels[:, 1:], ratio[0] * w, ratio[1] * h, padw=pad[0], padh=pad[1]) if self.augment: - img, labels, segments = random_perspective( - img, - labels, - segments=segments, - degrees=hyp["degrees"], - translate=hyp["translate"], - scale=hyp["scale"], - shear=hyp["shear"], - perspective=hyp["perspective"], - return_seg=True, - ) + img, labels, segments = random_perspective(img, + labels, + segments=segments, + degrees=hyp["degrees"], + translate=hyp["translate"], + scale=hyp["scale"], + shear=hyp["shear"], + perspective=hyp["perspective"]) nl = len(labels) # number of labels if nl: From e233c038ed63780843446dd7bf00d5cc6a2711fe Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 20 Sep 2022 16:38:04 +0200 Subject: [PATCH 1456/1976] Remove `check_requirements('flatbuffers==1.12')` (#9514) * Remove `check_requirements('flatbuffers==1.12')` Signed-off-by: Glenn Jocher * Update ci-testing.yml Signed-off-by: Glenn Jocher * Update ci-testing.yml Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- export.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/export.py b/export.py index 04c2ed9c802d..a2aa5e830c33 100644 --- a/export.py +++ b/export.py @@ -534,8 +534,6 @@ def run( if coreml: # CoreML f[4], _ = export_coreml(model, im, file, int8, half) if any((saved_model, pb, tflite, edgetpu, tfjs)): # TensorFlow formats - if int8 or edgetpu: # TFLite --int8 bug https://github.com/ultralytics/yolov5/issues/5707 - check_requirements('flatbuffers==1.12') # required before `import tensorflow` assert not tflite or not tfjs, 'TFLite and TF.js models must be exported separately, please pass only one type.' assert not isinstance(model, ClassificationModel), 'ClassificationModel export to TF formats not yet supported.' f[5], s_model = export_saved_model(model.cpu(), From bd35191033d52a9e48e6c8faaeaaa009243b988f Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 20 Sep 2022 18:47:14 +0200 Subject: [PATCH 1457/1976] Fix TF Lite exports (#9517) * Update tf.py Signed-off-by: Glenn Jocher * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Signed-off-by: Glenn Jocher Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> From c0d97138456f2257f608c4120c8fd65abcf69326 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 20 Sep 2022 19:01:03 +0200 Subject: [PATCH 1458/1976] TFLite fix 2 (#9518) * TFLite fix 2 Signed-off-by: Glenn Jocher * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Signed-off-by: Glenn Jocher Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- models/tf.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/models/tf.py b/models/tf.py index ae58ca738e2e..0520c30a96df 100644 --- a/models/tf.py +++ b/models/tf.py @@ -310,7 +310,7 @@ def call(self, inputs): y = tf.concat([xy, wh, tf.sigmoid(y[..., 4:5 + self.nc]), y[..., 5 + self.nc:]], -1) z.append(tf.reshape(y, [-1, self.na * ny * nx, self.no])) - return tf.transpose(x, [0, 2, 1, 3]) if self.training else (tf.concat(z, 1), x) + return tf.transpose(x, [0, 2, 1, 3]) if self.training else (tf.concat(z, 1),) @staticmethod def _make_grid(nx=20, ny=20): From 77dcf55168d59131f75b8187c6be27172eec00ec Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 20 Sep 2022 22:57:42 +0200 Subject: [PATCH 1459/1976] FROM nvcr.io/nvidia/pytorch:22.08-py3 (#9520) Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- utils/docker/Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/docker/Dockerfile b/utils/docker/Dockerfile index 4b9367cc27db..764ee278c22e 100644 --- a/utils/docker/Dockerfile +++ b/utils/docker/Dockerfile @@ -3,7 +3,7 @@ # Image is CUDA-optimized for YOLOv5 single/multi-GPU training and inference # Start FROM NVIDIA PyTorch image https://ngc.nvidia.com/catalog/containers/nvidia:pytorch -FROM nvcr.io/nvidia/pytorch:22.07-py3 +FROM nvcr.io/nvidia/pytorch:22.08-py3 RUN rm -rf /opt/pytorch # remove 1.2GB dir # Downloads to user config dir From 6ebef288944ea3a8152f8e0c98a2aee0bd922144 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 21 Sep 2022 15:12:12 +0200 Subject: [PATCH 1460/1976] Remove scikit-learn constraint on coremltools 6.0 (#9530) Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 835346f218a4..75e7cc9e94d3 100644 --- a/requirements.txt +++ b/requirements.txt @@ -29,7 +29,7 @@ seaborn>=0.11.0 # onnx-simplifier>=0.4.1 # ONNX simplifier # nvidia-pyindex # TensorRT export # nvidia-tensorrt # TensorRT export -# scikit-learn==0.19.2 # CoreML quantization +# scikit-learn # CoreML quantization # tensorflow>=2.4.1 # TF exports (-cpu, -aarch64, -macos) # tensorflowjs>=3.9.0 # TF.js export # openvino-dev # OpenVINO export From 499a6bf5736a1b78341dfd142bd7c82f71ebf459 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 21 Sep 2022 15:14:54 +0200 Subject: [PATCH 1461/1976] Update scikit-learn constraint per coremltools 6.0 (#9531) Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 75e7cc9e94d3..17db73678fc1 100644 --- a/requirements.txt +++ b/requirements.txt @@ -29,7 +29,7 @@ seaborn>=0.11.0 # onnx-simplifier>=0.4.1 # ONNX simplifier # nvidia-pyindex # TensorRT export # nvidia-tensorrt # TensorRT export -# scikit-learn # CoreML quantization +# scikit-learn<=1.1.2 # CoreML quantization # tensorflow>=2.4.1 # TF exports (-cpu, -aarch64, -macos) # tensorflowjs>=3.9.0 # TF.js export # openvino-dev # OpenVINO export From db6847431b489a6b8d36c14f05e08970025d01a2 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 21 Sep 2022 17:55:25 +0200 Subject: [PATCH 1462/1976] Update `coremltools>=6.0` (#9532) Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 17db73678fc1..55c1f2428e3f 100644 --- a/requirements.txt +++ b/requirements.txt @@ -24,7 +24,7 @@ pandas>=1.1.4 seaborn>=0.11.0 # Export -------------------------------------- -# coremltools>=5.2 # CoreML export +# coremltools>=6.0 # CoreML export # onnx>=1.9.0 # ONNX export # onnx-simplifier>=0.4.1 # ONNX simplifier # nvidia-pyindex # TensorRT export From 6f0284763b0f66467dc04e5a5d87e5a68d1d49cd Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 21 Sep 2022 19:53:26 +0200 Subject: [PATCH 1463/1976] Update albumentations (#9503) * Add `RandomResizedCrop(ratio)` * Update ratio * Update ratio * Update ratio * Update ratio * Update ratio * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Create augmentations.py Signed-off-by: Glenn Jocher * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update augmentations.py Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- utils/augmentations.py | 27 +++++++++++++++------------ utils/dataloaders.py | 2 +- 2 files changed, 16 insertions(+), 13 deletions(-) diff --git a/utils/augmentations.py b/utils/augmentations.py index f49110f43c6a..7c8e0bcdede6 100644 --- a/utils/augmentations.py +++ b/utils/augmentations.py @@ -21,7 +21,7 @@ class Albumentations: # YOLOv5 Albumentations class (optional, only used if package is installed) - def __init__(self): + def __init__(self, size=640): self.transform = None prefix = colorstr('albumentations: ') try: @@ -29,6 +29,7 @@ def __init__(self): check_version(A.__version__, '1.0.3', hard=True) # version requirement T = [ + A.RandomResizedCrop(height=size, width=size, scale=(0.8, 1.0), ratio=(0.9, 1.11), p=0.0), A.Blur(p=0.01), A.MedianBlur(p=0.01), A.ToGray(p=0.01), @@ -303,15 +304,17 @@ def box_candidates(box1, box2, wh_thr=2, ar_thr=100, area_thr=0.1, eps=1e-16): return (w2 > wh_thr) & (h2 > wh_thr) & (w2 * h2 / (w1 * h1 + eps) > area_thr) & (ar < ar_thr) # candidates -def classify_albumentations(augment=True, - size=224, - scale=(0.08, 1.0), - hflip=0.5, - vflip=0.0, - jitter=0.4, - mean=IMAGENET_MEAN, - std=IMAGENET_STD, - auto_aug=False): +def classify_albumentations( + augment=True, + size=224, + scale=(0.08, 1.0), + ratio=(0.75, 1.0 / 0.75), # 0.75, 1.33 + hflip=0.5, + vflip=0.0, + jitter=0.4, + mean=IMAGENET_MEAN, + std=IMAGENET_STD, + auto_aug=False): # YOLOv5 classification Albumentations (optional, only used if package is installed) prefix = colorstr('albumentations: ') try: @@ -319,7 +322,7 @@ def classify_albumentations(augment=True, from albumentations.pytorch import ToTensorV2 check_version(A.__version__, '1.0.3', hard=True) # version requirement if augment: # Resize and crop - T = [A.RandomResizedCrop(height=size, width=size, scale=scale)] + T = [A.RandomResizedCrop(height=size, width=size, scale=scale, ratio=ratio)] if auto_aug: # TODO: implement AugMix, AutoAug & RandAug in albumentation LOGGER.info(f'{prefix}auto augmentations are currently not supported') @@ -338,7 +341,7 @@ def classify_albumentations(augment=True, return A.Compose(T) except ImportError: # package not installed, skip - pass + LOGGER.warning(f'{prefix}⚠️ not found, install with `pip install albumentations` (recommended)') except Exception as e: LOGGER.info(f'{prefix}{e}') diff --git a/utils/dataloaders.py b/utils/dataloaders.py index 5b03b4eb9759..ee79bd0bc5a5 100644 --- a/utils/dataloaders.py +++ b/utils/dataloaders.py @@ -404,7 +404,7 @@ def __init__(self, self.mosaic_border = [-img_size // 2, -img_size // 2] self.stride = stride self.path = path - self.albumentations = Albumentations() if augment else None + self.albumentations = Albumentations(size=img_size) if augment else None try: f = [] # image files From 999482b45163c1b808a187b02183f324a9c782cb Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 21 Sep 2022 23:08:52 +0200 Subject: [PATCH 1464/1976] import re (#9535) * import re Signed-off-by: Glenn Jocher * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Signed-off-by: Glenn Jocher Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- export.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/export.py b/export.py index a2aa5e830c33..e3cf392b0101 100644 --- a/export.py +++ b/export.py @@ -48,6 +48,7 @@ import json import os import platform +import re import subprocess import sys import time @@ -427,8 +428,6 @@ def export_edgetpu(file, prefix=colorstr('Edge TPU:')): def export_tfjs(file, prefix=colorstr('TensorFlow.js:')): # YOLOv5 TensorFlow.js export check_requirements('tensorflowjs') - import re - import tensorflowjs as tfjs LOGGER.info(f'\n{prefix} starting export with tensorflowjs {tfjs.__version__}...') From 489920ab30b217fed14d3ddd31c23e9afc5be238 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 22 Sep 2022 00:34:35 +0200 Subject: [PATCH 1465/1976] TF.js fix (#9536) * TF.js fix May resolve https://github.com/ultralytics/yolov5/issues/9534 Signed-off-by: Glenn Jocher * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update ci-testing.yml Signed-off-by: Glenn Jocher * Update ci-testing.yml Signed-off-by: Glenn Jocher * Update ci-testing.yml Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- models/tf.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/models/tf.py b/models/tf.py index 0520c30a96df..1446d8841646 100644 --- a/models/tf.py +++ b/models/tf.py @@ -485,7 +485,7 @@ def predict(self, iou_thres, conf_thres, clip_boxes=False) - return nms, x[1] + return (nms,) return x # output [1,6300,85] = [xywh, conf, class0, class1, ...] # x = x[0] # [x(1,6300,85), ...] to x(6300,85) # xywh = x[..., :4] # x(6300,4) boxes From b25d5a75f2c89aace5cae342f3fe29dfdd46e401 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 22 Sep 2022 23:23:40 +0200 Subject: [PATCH 1466/1976] Refactor dataset batch-size (#9551) --- classify/predict.py | 3 +-- detect.py | 3 +-- segment/predict.py | 3 +-- 3 files changed, 3 insertions(+), 6 deletions(-) diff --git a/classify/predict.py b/classify/predict.py index 4857c69766e7..ef59ff6f550a 100644 --- a/classify/predict.py +++ b/classify/predict.py @@ -91,10 +91,9 @@ def run( if webcam: view_img = check_imshow() dataset = LoadStreams(source, img_size=imgsz, transforms=classify_transforms(imgsz[0]), vid_stride=vid_stride) - bs = len(dataset) # batch_size else: dataset = LoadImages(source, img_size=imgsz, transforms=classify_transforms(imgsz[0]), vid_stride=vid_stride) - bs = 1 # batch_size + bs = len(dataset) # batch_size vid_path, vid_writer = [None] * bs, [None] * bs # Run inference diff --git a/detect.py b/detect.py index 310d169281bf..4015b9ae0d7f 100644 --- a/detect.py +++ b/detect.py @@ -99,10 +99,9 @@ def run( if webcam: view_img = check_imshow() dataset = LoadStreams(source, img_size=imgsz, stride=stride, auto=pt, vid_stride=vid_stride) - bs = len(dataset) # batch_size else: dataset = LoadImages(source, img_size=imgsz, stride=stride, auto=pt, vid_stride=vid_stride) - bs = 1 # batch_size + bs = len(dataset) # batch_size vid_path, vid_writer = [None] * bs, [None] * bs # Run inference diff --git a/segment/predict.py b/segment/predict.py index ba4cf2905255..2ea6bd9327e0 100644 --- a/segment/predict.py +++ b/segment/predict.py @@ -101,10 +101,9 @@ def run( if webcam: view_img = check_imshow() dataset = LoadStreams(source, img_size=imgsz, stride=stride, auto=pt, vid_stride=vid_stride) - bs = len(dataset) # batch_size else: dataset = LoadImages(source, img_size=imgsz, stride=stride, auto=pt, vid_stride=vid_stride) - bs = 1 # batch_size + bs = len(dataset) # batch_size vid_path, vid_writer = [None] * bs, [None] * bs # Run inference From 30fa9b610a3a6d9dc6a9e5961388710e5af0b704 Mon Sep 17 00:00:00 2001 From: zombob <2613669+zombob@users.noreply.github.com> Date: Fri, 23 Sep 2022 05:58:14 +0800 Subject: [PATCH 1467/1976] Add `--source screen` for screenshot inference (#9542) * add screenshot as source * fix: screen number support * Fix: mutiple screen specific area * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * parse screen args in LoadScreenshots * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * sequence+ '_' as file name for save-txt save-crop * screenshot as stream * Update requirements.txt Signed-off-by: Glenn Jocher * Update dataloaders.py Signed-off-by: Glenn Jocher * Update dataloaders.py Signed-off-by: Glenn Jocher * Update detect.py Signed-off-by: Glenn Jocher * Update detect.py Signed-off-by: Glenn Jocher * Update detect.py Signed-off-by: Glenn Jocher * Update dataloaders.py Signed-off-by: Glenn Jocher * Update detect.py Signed-off-by: Glenn Jocher * Update detect.py Signed-off-by: Glenn Jocher * Update predict.py Signed-off-by: Glenn Jocher * Update detect.py Signed-off-by: Glenn Jocher * Update predict.py Signed-off-by: Glenn Jocher * Update README.md Signed-off-by: Glenn Jocher * Update tutorial.ipynb Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher Co-authored-by: xin Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Glenn Jocher --- README.md | 1 + classify/predict.py | 9 +++++--- detect.py | 9 +++++--- requirements.txt | 1 + segment/predict.py | 9 +++++--- tutorial.ipynb | 1 + utils/dataloaders.py | 49 ++++++++++++++++++++++++++++++++++++++++++++ 7 files changed, 70 insertions(+), 9 deletions(-) diff --git a/README.md b/README.md index da8bf1dad862..1d43111d56e7 100644 --- a/README.md +++ b/README.md @@ -107,6 +107,7 @@ the latest YOLOv5 [release](https://github.com/ultralytics/yolov5/releases) and python detect.py --source 0 # webcam img.jpg # image vid.mp4 # video + screen # screenshot path/ # directory 'path/*.jpg' # glob 'https://youtu.be/Zgi9g1ksQHc' # YouTube diff --git a/classify/predict.py b/classify/predict.py index ef59ff6f550a..011e7b83f09b 100644 --- a/classify/predict.py +++ b/classify/predict.py @@ -42,7 +42,7 @@ from models.common import DetectMultiBackend from utils.augmentations import classify_transforms -from utils.dataloaders import IMG_FORMATS, VID_FORMATS, LoadImages, LoadStreams +from utils.dataloaders import IMG_FORMATS, VID_FORMATS, LoadImages, LoadScreenshots, LoadStreams from utils.general import (LOGGER, Profile, check_file, check_img_size, check_imshow, check_requirements, colorstr, cv2, increment_path, print_args, strip_optimizer) from utils.plots import Annotator @@ -52,7 +52,7 @@ @smart_inference_mode() def run( weights=ROOT / 'yolov5s-cls.pt', # model.pt path(s) - source=ROOT / 'data/images', # file/dir/URL/glob, 0 for webcam + source=ROOT / 'data/images', # file/dir/URL/glob/screen/0(webcam) data=ROOT / 'data/coco128.yaml', # dataset.yaml path imgsz=(224, 224), # inference size (height, width) device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu @@ -74,6 +74,7 @@ def run( is_file = Path(source).suffix[1:] in (IMG_FORMATS + VID_FORMATS) is_url = source.lower().startswith(('rtsp://', 'rtmp://', 'http://', 'https://')) webcam = source.isnumeric() or source.endswith('.txt') or (is_url and not is_file) + screenshot = source.lower().startswith('screen') if is_url and is_file: source = check_file(source) # download @@ -91,6 +92,8 @@ def run( if webcam: view_img = check_imshow() dataset = LoadStreams(source, img_size=imgsz, transforms=classify_transforms(imgsz[0]), vid_stride=vid_stride) + elif screenshot: + dataset = LoadScreenshots(source, img_size=imgsz, stride=stride, auto=pt) else: dataset = LoadImages(source, img_size=imgsz, transforms=classify_transforms(imgsz[0]), vid_stride=vid_stride) bs = len(dataset) # batch_size @@ -187,7 +190,7 @@ def run( def parse_opt(): parser = argparse.ArgumentParser() parser.add_argument('--weights', nargs='+', type=str, default=ROOT / 'yolov5s-cls.pt', help='model path(s)') - parser.add_argument('--source', type=str, default=ROOT / 'data/images', help='file/dir/URL/glob, 0 for webcam') + parser.add_argument('--source', type=str, default=ROOT / 'data/images', help='file/dir/URL/glob/screen/0(webcam)') parser.add_argument('--data', type=str, default=ROOT / 'data/coco128.yaml', help='(optional) dataset.yaml path') parser.add_argument('--imgsz', '--img', '--img-size', nargs='+', type=int, default=[224], help='inference size h,w') parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') diff --git a/detect.py b/detect.py index 4015b9ae0d7f..9036b26263e5 100644 --- a/detect.py +++ b/detect.py @@ -40,7 +40,7 @@ ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative from models.common import DetectMultiBackend -from utils.dataloaders import IMG_FORMATS, VID_FORMATS, LoadImages, LoadStreams +from utils.dataloaders import IMG_FORMATS, VID_FORMATS, LoadImages, LoadScreenshots, LoadStreams from utils.general import (LOGGER, Profile, check_file, check_img_size, check_imshow, check_requirements, colorstr, cv2, increment_path, non_max_suppression, print_args, scale_coords, strip_optimizer, xyxy2xywh) from utils.plots import Annotator, colors, save_one_box @@ -50,7 +50,7 @@ @smart_inference_mode() def run( weights=ROOT / 'yolov5s.pt', # model.pt path(s) - source=ROOT / 'data/images', # file/dir/URL/glob, 0 for webcam + source=ROOT / 'data/images', # file/dir/URL/glob/screen/0(webcam) data=ROOT / 'data/coco128.yaml', # dataset.yaml path imgsz=(640, 640), # inference size (height, width) conf_thres=0.25, # confidence threshold @@ -82,6 +82,7 @@ def run( is_file = Path(source).suffix[1:] in (IMG_FORMATS + VID_FORMATS) is_url = source.lower().startswith(('rtsp://', 'rtmp://', 'http://', 'https://')) webcam = source.isnumeric() or source.endswith('.txt') or (is_url and not is_file) + screenshot = source.lower().startswith('screen') if is_url and is_file: source = check_file(source) # download @@ -99,6 +100,8 @@ def run( if webcam: view_img = check_imshow() dataset = LoadStreams(source, img_size=imgsz, stride=stride, auto=pt, vid_stride=vid_stride) + elif screenshot: + dataset = LoadScreenshots(source, img_size=imgsz, stride=stride, auto=pt) else: dataset = LoadImages(source, img_size=imgsz, stride=stride, auto=pt, vid_stride=vid_stride) bs = len(dataset) # batch_size @@ -212,7 +215,7 @@ def run( def parse_opt(): parser = argparse.ArgumentParser() parser.add_argument('--weights', nargs='+', type=str, default=ROOT / 'yolov5s.pt', help='model path(s)') - parser.add_argument('--source', type=str, default=ROOT / 'data/images', help='file/dir/URL/glob, 0 for webcam') + parser.add_argument('--source', type=str, default=ROOT / 'data/images', help='file/dir/URL/glob/screen/0(webcam)') parser.add_argument('--data', type=str, default=ROOT / 'data/coco128.yaml', help='(optional) dataset.yaml path') parser.add_argument('--imgsz', '--img', '--img-size', nargs='+', type=int, default=[640], help='inference size h,w') parser.add_argument('--conf-thres', type=float, default=0.25, help='confidence threshold') diff --git a/requirements.txt b/requirements.txt index 55c1f2428e3f..914da54e73fc 100644 --- a/requirements.txt +++ b/requirements.txt @@ -38,6 +38,7 @@ seaborn>=0.11.0 ipython # interactive notebook psutil # system utilization thop>=0.1.1 # FLOPs computation +# mss # screenshots # albumentations>=1.0.3 # pycocotools>=2.0 # COCO mAP # roboflow diff --git a/segment/predict.py b/segment/predict.py index 2ea6bd9327e0..43cebc706371 100644 --- a/segment/predict.py +++ b/segment/predict.py @@ -40,7 +40,7 @@ ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative from models.common import DetectMultiBackend -from utils.dataloaders import IMG_FORMATS, VID_FORMATS, LoadImages, LoadStreams +from utils.dataloaders import IMG_FORMATS, VID_FORMATS, LoadImages, LoadScreenshots, LoadStreams from utils.general import (LOGGER, Profile, check_file, check_img_size, check_imshow, check_requirements, colorstr, cv2, increment_path, non_max_suppression, print_args, scale_coords, strip_optimizer, xyxy2xywh) from utils.plots import Annotator, colors, save_one_box @@ -51,7 +51,7 @@ @smart_inference_mode() def run( weights=ROOT / 'yolov5s-seg.pt', # model.pt path(s) - source=ROOT / 'data/images', # file/dir/URL/glob, 0 for webcam + source=ROOT / 'data/images', # file/dir/URL/glob/screen/0(webcam) data=ROOT / 'data/coco128.yaml', # dataset.yaml path imgsz=(640, 640), # inference size (height, width) conf_thres=0.25, # confidence threshold @@ -84,6 +84,7 @@ def run( is_file = Path(source).suffix[1:] in (IMG_FORMATS + VID_FORMATS) is_url = source.lower().startswith(('rtsp://', 'rtmp://', 'http://', 'https://')) webcam = source.isnumeric() or source.endswith('.txt') or (is_url and not is_file) + screenshot = source.lower().startswith('screen') if is_url and is_file: source = check_file(source) # download @@ -101,6 +102,8 @@ def run( if webcam: view_img = check_imshow() dataset = LoadStreams(source, img_size=imgsz, stride=stride, auto=pt, vid_stride=vid_stride) + elif screenshot: + dataset = LoadScreenshots(source, img_size=imgsz, stride=stride, auto=pt) else: dataset = LoadImages(source, img_size=imgsz, stride=stride, auto=pt, vid_stride=vid_stride) bs = len(dataset) # batch_size @@ -222,7 +225,7 @@ def run( def parse_opt(): parser = argparse.ArgumentParser() parser.add_argument('--weights', nargs='+', type=str, default=ROOT / 'yolov5s-seg.pt', help='model path(s)') - parser.add_argument('--source', type=str, default=ROOT / 'data/images', help='file/dir/URL/glob, 0 for webcam') + parser.add_argument('--source', type=str, default=ROOT / 'data/images', help='file/dir/URL/glob/screen/0(webcam)') parser.add_argument('--data', type=str, default=ROOT / 'data/coco128.yaml', help='(optional) dataset.yaml path') parser.add_argument('--imgsz', '--img', '--img-size', nargs='+', type=int, default=[640], help='inference size h,w') parser.add_argument('--conf-thres', type=float, default=0.25, help='confidence threshold') diff --git a/tutorial.ipynb b/tutorial.ipynb index 957437b2be6d..f87cccd99df8 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -445,6 +445,7 @@ "python detect.py --source 0 # webcam\n", " img.jpg # image \n", " vid.mp4 # video\n", + " screen # screenshot\n", " path/ # directory\n", " 'path/*.jpg' # glob\n", " 'https://youtu.be/Zgi9g1ksQHc' # YouTube\n", diff --git a/utils/dataloaders.py b/utils/dataloaders.py index ee79bd0bc5a5..7aee0b891161 100644 --- a/utils/dataloaders.py +++ b/utils/dataloaders.py @@ -185,6 +185,55 @@ def __iter__(self): yield from iter(self.sampler) +class LoadScreenshots: + # YOLOv5 screenshot dataloader, i.e. `python detect.py --source "screen 0 100 100 512 256"` + def __init__(self, source, img_size=640, stride=32, auto=True, transforms=None): + # source = [screen_number left top width height] (pixels) + check_requirements('mss') + import mss + + source, *params = source.split() + self.screen, left, top, width, height = 0, None, None, None, None # default to full screen 0 + if len(params) == 1: + self.screen = int(params[0]) + elif len(params) == 4: + left, top, width, height = (int(x) for x in params) + elif len(params) == 5: + self.screen, left, top, width, height = (int(x) for x in params) + self.img_size = img_size + self.stride = stride + self.transforms = transforms + self.auto = auto + self.mode = 'stream' + self.frame = 0 + self.sct = mss.mss() + + # Parse monitor shape + monitor = self.sct.monitors[self.screen] + self.top = monitor["top"] if top is None else (monitor["top"] + top) + self.left = monitor["left"] if left is None else (monitor["left"] + left) + self.width = width or monitor["width"] + self.height = height or monitor["height"] + self.monitor = {"left": self.left, "top": self.top, "width": self.width, "height": self.height} + + def __iter__(self): + return self + + def __next__(self): + # mss screen capture: get raw pixels from the screen as np array + im0 = np.array(self.sct.grab(self.monitor))[:, :, :3] # [:, :, :3] BGRA to BGR + s = f"screen {self.screen} (LTWH): {self.left},{self.top},{self.width},{self.height}: " + + if self.transforms: + im = self.transforms(im0) # transforms + else: + im = letterbox(im0, self.img_size, stride=self.stride, auto=self.auto)[0] # padded resize + im = im.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB + im = np.ascontiguousarray(im) # contiguous + self.frame += 1 + return str(self.screen), im, im0, None, s # screen, img, original img, im0s, s + + class LoadImages: # YOLOv5 image/video dataloader, i.e. `python detect.py --source image.jpg/vid.mp4` def __init__(self, path, img_size=640, stride=32, auto=True, transforms=None, vid_stride=1): From 1320ce183e3997c4e3a7bf23c22b9edb222519a4 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 23 Sep 2022 23:20:19 +0200 Subject: [PATCH 1468/1976] Update `is_url()` (#9566) Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- utils/downloads.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/utils/downloads.py b/utils/downloads.py index dd2698f995a4..bd495068522d 100644 --- a/utils/downloads.py +++ b/utils/downloads.py @@ -16,13 +16,13 @@ import torch -def is_url(url, check_online=True): - # Check if online file exists +def is_url(url, check_exists=True): + # Check if string is URL and check if URL exists try: url = str(url) result = urllib.parse.urlparse(url) assert all([result.scheme, result.netloc, result.path]) # check if is url - return (urllib.request.urlopen(url).getcode() == 200) if check_online else True # check if exists online + return (urllib.request.urlopen(url).getcode() == 200) if check_exists else True # check if exists online except (AssertionError, urllib.request.HTTPError): return False From d669a74623f273f74213a88b5233964d1ab3ea08 Mon Sep 17 00:00:00 2001 From: Gaz Iqbal Date: Fri, 23 Sep 2022 15:56:42 -0700 Subject: [PATCH 1469/1976] Detect.py supports running against a Triton container (#9228) * update coco128-seg comments * Enables detect.py to use Triton for inference Triton Inference Server is an open source inference serving software that streamlines AI inferencing. https://github.com/triton-inference-server/server The user can now provide a "--triton-url" argument to detect.py to use a local or remote Triton server for inference. For e.g., http://localhost:8000 will use http over port 8000 and grpc://localhost:8001 will use grpc over port 8001. Note, it is not necessary to specify a weights file to use Triton. A Triton container can be created by first exporting the Yolov5 model to a Triton supported runtime. Onnx, Torchscript, TensorRT are supported by both Triton and the export.py script. The exported model can then be containerized via the OctoML CLI. See https://github.com/octoml/octo-cli#getting-started for a guide. * added triton client to requirements * fixed support for TFSavedModels in Triton * reverted change * Test CoreML update Signed-off-by: Glenn Jocher * Update ci-testing.yml Signed-off-by: Glenn Jocher * Use pathlib Signed-off-by: Glenn Jocher * Refacto DetectMultiBackend to directly accept triton url as --weights http://... Signed-off-by: Glenn Jocher * Deploy category Signed-off-by: Glenn Jocher * Update detect.py Signed-off-by: Glenn Jocher * Update common.py Signed-off-by: Glenn Jocher * Update common.py Signed-off-by: Glenn Jocher * Update predict.py Signed-off-by: Glenn Jocher * Update predict.py Signed-off-by: Glenn Jocher * Update predict.py Signed-off-by: Glenn Jocher * Update triton.py Signed-off-by: Glenn Jocher * Update triton.py Signed-off-by: Glenn Jocher * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Add printout and requirements check * Cleanup Signed-off-by: Glenn Jocher * triton fixes * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fixed triton model query over grpc * Update check_requirements('tritonclient[all]') * group imports * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fix likely remote URL bug * update comment * Update is_url() * Fix 2x download attempt on http://path/to/model.pt Signed-off-by: Glenn Jocher Co-authored-by: glennjocher Co-authored-by: Gaz Iqbal Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- classify/predict.py | 2 +- detect.py | 8 ++--- models/common.py | 44 +++++++++++++++-------- requirements.txt | 3 ++ segment/predict.py | 2 +- utils/downloads.py | 4 +-- utils/triton.py | 85 +++++++++++++++++++++++++++++++++++++++++++++ 7 files changed, 126 insertions(+), 22 deletions(-) create mode 100644 utils/triton.py diff --git a/classify/predict.py b/classify/predict.py index 011e7b83f09b..d3bec8eea7ba 100644 --- a/classify/predict.py +++ b/classify/predict.py @@ -104,7 +104,7 @@ def run( seen, windows, dt = 0, [], (Profile(), Profile(), Profile()) for path, im, im0s, vid_cap, s in dataset: with dt[0]: - im = torch.Tensor(im).to(device) + im = torch.Tensor(im).to(model.device) im = im.half() if model.fp16 else im.float() # uint8 to fp16/32 if len(im.shape) == 3: im = im[None] # expand for batch dim diff --git a/detect.py b/detect.py index 9036b26263e5..e442ed75f4c7 100644 --- a/detect.py +++ b/detect.py @@ -49,7 +49,7 @@ @smart_inference_mode() def run( - weights=ROOT / 'yolov5s.pt', # model.pt path(s) + weights=ROOT / 'yolov5s.pt', # model path or triton URL source=ROOT / 'data/images', # file/dir/URL/glob/screen/0(webcam) data=ROOT / 'data/coco128.yaml', # dataset.yaml path imgsz=(640, 640), # inference size (height, width) @@ -108,11 +108,11 @@ def run( vid_path, vid_writer = [None] * bs, [None] * bs # Run inference - model.warmup(imgsz=(1 if pt else bs, 3, *imgsz)) # warmup + model.warmup(imgsz=(1 if pt or model.triton else bs, 3, *imgsz)) # warmup seen, windows, dt = 0, [], (Profile(), Profile(), Profile()) for path, im, im0s, vid_cap, s in dataset: with dt[0]: - im = torch.from_numpy(im).to(device) + im = torch.from_numpy(im).to(model.device) im = im.half() if model.fp16 else im.float() # uint8 to fp16/32 im /= 255 # 0 - 255 to 0.0 - 1.0 if len(im.shape) == 3: @@ -214,7 +214,7 @@ def run( def parse_opt(): parser = argparse.ArgumentParser() - parser.add_argument('--weights', nargs='+', type=str, default=ROOT / 'yolov5s.pt', help='model path(s)') + parser.add_argument('--weights', nargs='+', type=str, default=ROOT / 'yolov5s.pt', help='model path or triton URL') parser.add_argument('--source', type=str, default=ROOT / 'data/images', help='file/dir/URL/glob/screen/0(webcam)') parser.add_argument('--data', type=str, default=ROOT / 'data/coco128.yaml', help='(optional) dataset.yaml path') parser.add_argument('--imgsz', '--img', '--img-size', nargs='+', type=int, default=[640], help='inference size h,w') diff --git a/models/common.py b/models/common.py index fac95a82fdb9..177704849d3d 100644 --- a/models/common.py +++ b/models/common.py @@ -10,6 +10,7 @@ from collections import OrderedDict, namedtuple from copy import copy from pathlib import Path +from urllib.parse import urlparse import cv2 import numpy as np @@ -327,11 +328,13 @@ def __init__(self, weights='yolov5s.pt', device=torch.device('cpu'), dnn=False, super().__init__() w = str(weights[0] if isinstance(weights, list) else weights) - pt, jit, onnx, xml, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs, paddle = self._model_type(w) # type - w = attempt_download(w) # download if not local + pt, jit, onnx, xml, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs, paddle, triton = self._model_type(w) fp16 &= pt or jit or onnx or engine # FP16 + nhwc = coreml or saved_model or pb or tflite or edgetpu # BHWC formats (vs torch BCWH) stride = 32 # default stride cuda = torch.cuda.is_available() and device.type != 'cpu' # use CUDA + if not (pt or triton): + w = attempt_download(w) # download if not local if pt: # PyTorch model = attempt_load(weights if isinstance(weights, list) else w, device=device, inplace=True, fuse=fuse) @@ -342,7 +345,7 @@ def __init__(self, weights='yolov5s.pt', device=torch.device('cpu'), dnn=False, elif jit: # TorchScript LOGGER.info(f'Loading {w} for TorchScript inference...') extra_files = {'config.txt': ''} # model metadata - model = torch.jit.load(w, _extra_files=extra_files) + model = torch.jit.load(w, _extra_files=extra_files, map_location=device) model.half() if fp16 else model.float() if extra_files['config.txt']: # load metadata dict d = json.loads(extra_files['config.txt'], @@ -472,6 +475,12 @@ def gd_outputs(gd): predictor = pdi.create_predictor(config) input_handle = predictor.get_input_handle(predictor.get_input_names()[0]) output_names = predictor.get_output_names() + elif triton: # NVIDIA Triton Inference Server + LOGGER.info(f'Using {w} as Triton Inference Server...') + check_requirements('tritonclient[all]') + from utils.triton import TritonRemoteModel + model = TritonRemoteModel(url=w) + nhwc = model.runtime.startswith("tensorflow") else: raise NotImplementedError(f'ERROR: {w} is not a supported format') @@ -488,6 +497,8 @@ def forward(self, im, augment=False, visualize=False): b, ch, h, w = im.shape # batch, channel, height, width if self.fp16 and im.dtype != torch.float16: im = im.half() # to FP16 + if self.nhwc: + im = im.permute(0, 2, 3, 1) # torch BCHW to numpy BHWC shape(1,320,192,3) if self.pt: # PyTorch y = self.model(im, augment=augment, visualize=visualize) if augment or visualize else self.model(im) @@ -517,7 +528,7 @@ def forward(self, im, augment=False, visualize=False): self.context.execute_v2(list(self.binding_addrs.values())) y = [self.bindings[x].data for x in sorted(self.output_names)] elif self.coreml: # CoreML - im = im.permute(0, 2, 3, 1).cpu().numpy() # torch BCHW to numpy BHWC shape(1,320,192,3) + im = im.cpu().numpy() im = Image.fromarray((im[0] * 255).astype('uint8')) # im = im.resize((192, 320), Image.ANTIALIAS) y = self.model.predict({'image': im}) # coordinates are xywh normalized @@ -532,8 +543,10 @@ def forward(self, im, augment=False, visualize=False): self.input_handle.copy_from_cpu(im) self.predictor.run() y = [self.predictor.get_output_handle(x).copy_to_cpu() for x in self.output_names] + elif self.triton: # NVIDIA Triton Inference Server + y = self.model(im) else: # TensorFlow (SavedModel, GraphDef, Lite, Edge TPU) - im = im.permute(0, 2, 3, 1).cpu().numpy() # torch BCHW to numpy BHWC shape(1,320,192,3) + im = im.cpu().numpy() if self.saved_model: # SavedModel y = self.model(im, training=False) if self.keras else self.model(im) elif self.pb: # GraphDef @@ -566,8 +579,8 @@ def from_numpy(self, x): def warmup(self, imgsz=(1, 3, 640, 640)): # Warmup model by running inference once - warmup_types = self.pt, self.jit, self.onnx, self.engine, self.saved_model, self.pb - if any(warmup_types) and self.device.type != 'cpu': + warmup_types = self.pt, self.jit, self.onnx, self.engine, self.saved_model, self.pb, self.triton + if any(warmup_types) and (self.device.type != 'cpu' or self.triton): im = torch.empty(*imgsz, dtype=torch.half if self.fp16 else torch.float, device=self.device) # input for _ in range(2 if self.jit else 1): # self.forward(im) # warmup @@ -575,14 +588,17 @@ def warmup(self, imgsz=(1, 3, 640, 640)): @staticmethod def _model_type(p='path/to/model.pt'): # Return model type from model path, i.e. path='path/to/model.onnx' -> type=onnx + # types = [pt, jit, onnx, xml, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs, paddle] from export import export_formats - sf = list(export_formats().Suffix) + ['.xml'] # export suffixes - check_suffix(p, sf) # checks - p = Path(p).name # eliminate trailing separators - pt, jit, onnx, xml, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs, paddle, xml2 = (s in p for s in sf) - xml |= xml2 # *_openvino_model or *.xml - tflite &= not edgetpu # *.tflite - return pt, jit, onnx, xml, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs, paddle + from utils.downloads import is_url + sf = list(export_formats().Suffix) # export suffixes + if not is_url(p, check=False): + check_suffix(p, sf) # checks + url = urlparse(p) # if url may be Triton inference server + types = [s in Path(p).name for s in sf] + types[8] &= not types[9] # tflite &= not edgetpu + triton = not any(types) and all([any(s in url.scheme for s in ["http", "grpc"]), url.netloc]) + return types + [triton] @staticmethod def _load_metadata(f=Path('path/to/meta.yaml')): diff --git a/requirements.txt b/requirements.txt index 914da54e73fc..4d6ec3509efa 100644 --- a/requirements.txt +++ b/requirements.txt @@ -34,6 +34,9 @@ seaborn>=0.11.0 # tensorflowjs>=3.9.0 # TF.js export # openvino-dev # OpenVINO export +# Deploy -------------------------------------- +# tritonclient[all]~=2.24.0 + # Extras -------------------------------------- ipython # interactive notebook psutil # system utilization diff --git a/segment/predict.py b/segment/predict.py index 43cebc706371..2e794c342de1 100644 --- a/segment/predict.py +++ b/segment/predict.py @@ -114,7 +114,7 @@ def run( seen, windows, dt = 0, [], (Profile(), Profile(), Profile()) for path, im, im0s, vid_cap, s in dataset: with dt[0]: - im = torch.from_numpy(im).to(device) + im = torch.from_numpy(im).to(model.device) im = im.half() if model.fp16 else im.float() # uint8 to fp16/32 im /= 255 # 0 - 255 to 0.0 - 1.0 if len(im.shape) == 3: diff --git a/utils/downloads.py b/utils/downloads.py index bd495068522d..433de84b51ca 100644 --- a/utils/downloads.py +++ b/utils/downloads.py @@ -16,13 +16,13 @@ import torch -def is_url(url, check_exists=True): +def is_url(url, check=True): # Check if string is URL and check if URL exists try: url = str(url) result = urllib.parse.urlparse(url) assert all([result.scheme, result.netloc, result.path]) # check if is url - return (urllib.request.urlopen(url).getcode() == 200) if check_exists else True # check if exists online + return (urllib.request.urlopen(url).getcode() == 200) if check else True # check if exists online except (AssertionError, urllib.request.HTTPError): return False diff --git a/utils/triton.py b/utils/triton.py new file mode 100644 index 000000000000..a94ef0ad197d --- /dev/null +++ b/utils/triton.py @@ -0,0 +1,85 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +""" Utils to interact with the Triton Inference Server +""" + +import typing +from urllib.parse import urlparse + +import torch + + +class TritonRemoteModel: + """ A wrapper over a model served by the Triton Inference Server. It can + be configured to communicate over GRPC or HTTP. It accepts Torch Tensors + as input and returns them as outputs. + """ + + def __init__(self, url: str): + """ + Keyword arguments: + url: Fully qualified address of the Triton server - for e.g. grpc://localhost:8000 + """ + + parsed_url = urlparse(url) + if parsed_url.scheme == "grpc": + from tritonclient.grpc import InferenceServerClient, InferInput + + self.client = InferenceServerClient(parsed_url.netloc) # Triton GRPC client + model_repository = self.client.get_model_repository_index() + self.model_name = model_repository.models[0].name + self.metadata = self.client.get_model_metadata(self.model_name, as_json=True) + + def create_input_placeholders() -> typing.List[InferInput]: + return [ + InferInput(i['name'], [int(s) for s in i["shape"]], i['datatype']) for i in self.metadata['inputs']] + + else: + from tritonclient.http import InferenceServerClient, InferInput + + self.client = InferenceServerClient(parsed_url.netloc) # Triton HTTP client + model_repository = self.client.get_model_repository_index() + self.model_name = model_repository[0]['name'] + self.metadata = self.client.get_model_metadata(self.model_name) + + def create_input_placeholders() -> typing.List[InferInput]: + return [ + InferInput(i['name'], [int(s) for s in i["shape"]], i['datatype']) for i in self.metadata['inputs']] + + self._create_input_placeholders_fn = create_input_placeholders + + @property + def runtime(self): + """Returns the model runtime""" + return self.metadata.get("backend", self.metadata.get("platform")) + + def __call__(self, *args, **kwargs) -> typing.Union[torch.Tensor, typing.Tuple[torch.Tensor, ...]]: + """ Invokes the model. Parameters can be provided via args or kwargs. + args, if provided, are assumed to match the order of inputs of the model. + kwargs are matched with the model input names. + """ + inputs = self._create_inputs(*args, **kwargs) + response = self.client.infer(model_name=self.model_name, inputs=inputs) + result = [] + for output in self.metadata['outputs']: + tensor = torch.as_tensor(response.as_numpy(output['name'])) + result.append(tensor) + return result[0] if len(result) == 1 else result + + def _create_inputs(self, *args, **kwargs): + args_len, kwargs_len = len(args), len(kwargs) + if not args_len and not kwargs_len: + raise RuntimeError("No inputs provided.") + if args_len and kwargs_len: + raise RuntimeError("Cannot specify args and kwargs at the same time") + + placeholders = self._create_input_placeholders_fn() + if args_len: + if args_len != len(placeholders): + raise RuntimeError(f"Expected {len(placeholders)} inputs, got {args_len}.") + for input, value in zip(placeholders, args): + input.set_data_from_numpy(value.cpu().numpy()) + else: + for input in placeholders: + value = kwargs[input.name] + input.set_data_from_numpy(value.cpu().numpy()) + return placeholders From c8e52304cf5c34653570c5c3953ba061bc33c1af Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 24 Sep 2022 16:02:41 +0200 Subject: [PATCH 1470/1976] New `scale_segments()` function (#9570) * Rename scale_coords to scale_boxes * add scale_segments --- detect.py | 4 +-- models/common.py | 4 +-- segment/predict.py | 4 +-- segment/val.py | 6 ++--- utils/general.py | 46 ++++++++++++++++++++++++++------- utils/loggers/comet/__init__.py | 8 +++--- utils/plots.py | 4 +-- val.py | 6 ++--- 8 files changed, 54 insertions(+), 28 deletions(-) diff --git a/detect.py b/detect.py index e442ed75f4c7..4971033b35fb 100644 --- a/detect.py +++ b/detect.py @@ -42,7 +42,7 @@ from models.common import DetectMultiBackend from utils.dataloaders import IMG_FORMATS, VID_FORMATS, LoadImages, LoadScreenshots, LoadStreams from utils.general import (LOGGER, Profile, check_file, check_img_size, check_imshow, check_requirements, colorstr, cv2, - increment_path, non_max_suppression, print_args, scale_coords, strip_optimizer, xyxy2xywh) + increment_path, non_max_suppression, print_args, scale_boxes, strip_optimizer, xyxy2xywh) from utils.plots import Annotator, colors, save_one_box from utils.torch_utils import select_device, smart_inference_mode @@ -148,7 +148,7 @@ def run( annotator = Annotator(im0, line_width=line_thickness, example=str(names)) if len(det): # Rescale boxes from img_size to im0 size - det[:, :4] = scale_coords(im.shape[2:], det[:, :4], im0.shape).round() + det[:, :4] = scale_boxes(im.shape[2:], det[:, :4], im0.shape).round() # Print results for c in det[:, 5].unique(): diff --git a/models/common.py b/models/common.py index 177704849d3d..273e73d9e729 100644 --- a/models/common.py +++ b/models/common.py @@ -23,7 +23,7 @@ from utils.dataloaders import exif_transpose, letterbox from utils.general import (LOGGER, ROOT, Profile, check_requirements, check_suffix, check_version, colorstr, - increment_path, make_divisible, non_max_suppression, scale_coords, xywh2xyxy, xyxy2xywh, + increment_path, make_divisible, non_max_suppression, scale_boxes, xywh2xyxy, xyxy2xywh, yaml_load) from utils.plots import Annotator, colors, save_one_box from utils.torch_utils import copy_attr, smart_inference_mode @@ -703,7 +703,7 @@ def forward(self, ims, size=640, augment=False, profile=False): self.multi_label, max_det=self.max_det) # NMS for i in range(n): - scale_coords(shape1, y[i][:, :4], shape0[i]) + scale_boxes(shape1, y[i][:, :4], shape0[i]) return Detections(ims, y, files, dt, self.names, x.shape) diff --git a/segment/predict.py b/segment/predict.py index 2e794c342de1..2241204715b5 100644 --- a/segment/predict.py +++ b/segment/predict.py @@ -42,7 +42,7 @@ from models.common import DetectMultiBackend from utils.dataloaders import IMG_FORMATS, VID_FORMATS, LoadImages, LoadScreenshots, LoadStreams from utils.general import (LOGGER, Profile, check_file, check_img_size, check_imshow, check_requirements, colorstr, cv2, - increment_path, non_max_suppression, print_args, scale_coords, strip_optimizer, xyxy2xywh) + increment_path, non_max_suppression, print_args, scale_boxes, strip_optimizer, xyxy2xywh) from utils.plots import Annotator, colors, save_one_box from utils.segment.general import process_mask from utils.torch_utils import select_device, smart_inference_mode @@ -152,7 +152,7 @@ def run( masks = process_mask(proto[i], det[:, 6:], det[:, :4], im.shape[2:], upsample=True) # HWC # Rescale boxes from img_size to im0 size - det[:, :4] = scale_coords(im.shape[2:], det[:, :4], im0.shape).round() + det[:, :4] = scale_boxes(im.shape[2:], det[:, :4], im0.shape).round() # Print results for c in det[:, 5].unique(): diff --git a/segment/val.py b/segment/val.py index 59ab76672a30..0a37998c1771 100644 --- a/segment/val.py +++ b/segment/val.py @@ -44,7 +44,7 @@ from utils.callbacks import Callbacks from utils.general import (LOGGER, NUM_THREADS, Profile, check_dataset, check_img_size, check_requirements, check_yaml, coco80_to_coco91_class, colorstr, increment_path, non_max_suppression, print_args, - scale_coords, xywh2xyxy, xyxy2xywh) + scale_boxes, xywh2xyxy, xyxy2xywh) from utils.metrics import ConfusionMatrix, box_iou from utils.plots import output_to_target, plot_val_study from utils.segment.dataloaders import create_dataloader @@ -298,12 +298,12 @@ def run( if single_cls: pred[:, 5] = 0 predn = pred.clone() - scale_coords(im[si].shape[1:], predn[:, :4], shape, shapes[si][1]) # native-space pred + scale_boxes(im[si].shape[1:], predn[:, :4], shape, shapes[si][1]) # native-space pred # Evaluate if nl: tbox = xywh2xyxy(labels[:, 1:5]) # target boxes - scale_coords(im[si].shape[1:], tbox, shape, shapes[si][1]) # native-space labels + scale_boxes(im[si].shape[1:], tbox, shape, shapes[si][1]) # native-space labels labelsn = torch.cat((labels[:, 0:1], tbox), 1) # native-space labels correct_bboxes = process_batch(predn, labelsn, iouv) correct_masks = process_batch(predn, labelsn, iouv, pred_masks, gt_masks, overlap=overlap, masks=True) diff --git a/utils/general.py b/utils/general.py index fd0b4090a0fa..87e7e20df1ab 100644 --- a/utils/general.py +++ b/utils/general.py @@ -725,7 +725,7 @@ def xywhn2xyxy(x, w=640, h=640, padw=0, padh=0): def xyxy2xywhn(x, w=640, h=640, clip=False, eps=0.0): # Convert nx4 boxes from [x1, y1, x2, y2] to [x, y, w, h] normalized where xy1=top-left, xy2=bottom-right if clip: - clip_coords(x, (h - eps, w - eps)) # warning: inplace clip + clip_boxes(x, (h - eps, w - eps)) # warning: inplace clip y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) y[:, 0] = ((x[:, 0] + x[:, 2]) / 2) / w # x center y[:, 1] = ((x[:, 1] + x[:, 3]) / 2) / h # y center @@ -769,7 +769,23 @@ def resample_segments(segments, n=1000): return segments -def scale_coords(img1_shape, coords, img0_shape, ratio_pad=None): +def scale_boxes(img1_shape, boxes, img0_shape, ratio_pad=None): + # Rescale boxes (xyxy) from img1_shape to img0_shape + if ratio_pad is None: # calculate from img0_shape + gain = min(img1_shape[0] / img0_shape[0], img1_shape[1] / img0_shape[1]) # gain = old / new + pad = (img1_shape[1] - img0_shape[1] * gain) / 2, (img1_shape[0] - img0_shape[0] * gain) / 2 # wh padding + else: + gain = ratio_pad[0][0] + pad = ratio_pad[1] + + boxes[:, [0, 2]] -= pad[0] # x padding + boxes[:, [1, 3]] -= pad[1] # y padding + boxes[:, :4] /= gain + clip_boxes(boxes, img0_shape) + return boxes + + +def scale_segments(img1_shape, segments, img0_shape, ratio_pad=None): # Rescale coords (xyxy) from img1_shape to img0_shape if ratio_pad is None: # calculate from img0_shape gain = min(img1_shape[0] / img0_shape[0], img1_shape[1] / img0_shape[1]) # gain = old / new @@ -778,15 +794,15 @@ def scale_coords(img1_shape, coords, img0_shape, ratio_pad=None): gain = ratio_pad[0][0] pad = ratio_pad[1] - coords[:, [0, 2]] -= pad[0] # x padding - coords[:, [1, 3]] -= pad[1] # y padding - coords[:, :4] /= gain - clip_coords(coords, img0_shape) - return coords + segments[:, 0] -= pad[0] # x padding + segments[:, 1] -= pad[1] # y padding + segments /= gain + clip_segments(segments, img0_shape) + return segments -def clip_coords(boxes, shape): - # Clip bounding xyxy bounding boxes to image shape (height, width) +def clip_boxes(boxes, shape): + # Clip boxes (xyxy) to image shape (height, width) if isinstance(boxes, torch.Tensor): # faster individually boxes[:, 0].clamp_(0, shape[1]) # x1 boxes[:, 1].clamp_(0, shape[0]) # y1 @@ -797,6 +813,16 @@ def clip_coords(boxes, shape): boxes[:, [1, 3]] = boxes[:, [1, 3]].clip(0, shape[0]) # y1, y2 +def clip_segments(boxes, shape): + # Clip segments (xy1,xy2,...) to image shape (height, width) + if isinstance(boxes, torch.Tensor): # faster individually + boxes[:, 0].clamp_(0, shape[1]) # x + boxes[:, 1].clamp_(0, shape[0]) # y + else: # np.array (faster grouped) + boxes[:, 0] = boxes[:, 0].clip(0, shape[1]) # x + boxes[:, 1] = boxes[:, 1].clip(0, shape[0]) # y + + def non_max_suppression( prediction, conf_thres=0.25, @@ -980,7 +1006,7 @@ def apply_classifier(x, model, img, im0): d[:, :4] = xywh2xyxy(b).long() # Rescale boxes from img_size to im0 size - scale_coords(img.shape[2:], d[:, :4], im0[i].shape) + scale_boxes(img.shape[2:], d[:, :4], im0[i].shape) # Classes pred_cls1 = d[:, 5].long() diff --git a/utils/loggers/comet/__init__.py b/utils/loggers/comet/__init__.py index 3b3142b002c5..ba5cecc8e096 100644 --- a/utils/loggers/comet/__init__.py +++ b/utils/loggers/comet/__init__.py @@ -28,7 +28,7 @@ import yaml from utils.dataloaders import img2label_paths -from utils.general import check_dataset, scale_coords, xywh2xyxy +from utils.general import check_dataset, scale_boxes, xywh2xyxy from utils.metrics import box_iou COMET_PREFIX = "comet://" @@ -293,14 +293,14 @@ def preprocess_prediction(self, image, labels, shape, pred): pred[:, 5] = 0 predn = pred.clone() - scale_coords(image.shape[1:], predn[:, :4], shape[0], shape[1]) + scale_boxes(image.shape[1:], predn[:, :4], shape[0], shape[1]) labelsn = None if nl: tbox = xywh2xyxy(labels[:, 1:5]) # target boxes - scale_coords(image.shape[1:], tbox, shape[0], shape[1]) # native-space labels + scale_boxes(image.shape[1:], tbox, shape[0], shape[1]) # native-space labels labelsn = torch.cat((labels[:, 0:1], tbox), 1) # native-space labels - scale_coords(image.shape[1:], predn[:, :4], shape[0], shape[1]) # native-space pred + scale_boxes(image.shape[1:], predn[:, :4], shape[0], shape[1]) # native-space pred return predn, labelsn diff --git a/utils/plots.py b/utils/plots.py index 51bb7d6c20af..36df271c60e1 100644 --- a/utils/plots.py +++ b/utils/plots.py @@ -20,7 +20,7 @@ from PIL import Image, ImageDraw, ImageFont from utils import TryExcept, threaded -from utils.general import (CONFIG_DIR, FONT, LOGGER, check_font, check_requirements, clip_coords, increment_path, +from utils.general import (CONFIG_DIR, FONT, LOGGER, check_font, check_requirements, clip_boxes, increment_path, is_ascii, xywh2xyxy, xyxy2xywh) from utils.metrics import fitness from utils.segment.general import scale_image @@ -565,7 +565,7 @@ def save_one_box(xyxy, im, file=Path('im.jpg'), gain=1.02, pad=10, square=False, b[:, 2:] = b[:, 2:].max(1)[0].unsqueeze(1) # attempt rectangle to square b[:, 2:] = b[:, 2:] * gain + pad # box wh * gain + pad xyxy = xywh2xyxy(b).long() - clip_coords(xyxy, im.shape) + clip_boxes(xyxy, im.shape) crop = im[int(xyxy[0, 1]):int(xyxy[0, 3]), int(xyxy[0, 0]):int(xyxy[0, 2]), ::(1 if BGR else -1)] if save: file.parent.mkdir(parents=True, exist_ok=True) # make directory diff --git a/val.py b/val.py index 3ab4bc3fdb58..c0954498d2fb 100644 --- a/val.py +++ b/val.py @@ -40,7 +40,7 @@ from utils.dataloaders import create_dataloader from utils.general import (LOGGER, Profile, check_dataset, check_img_size, check_requirements, check_yaml, coco80_to_coco91_class, colorstr, increment_path, non_max_suppression, print_args, - scale_coords, xywh2xyxy, xyxy2xywh) + scale_boxes, xywh2xyxy, xyxy2xywh) from utils.metrics import ConfusionMatrix, ap_per_class, box_iou from utils.plots import output_to_target, plot_images, plot_val_study from utils.torch_utils import select_device, smart_inference_mode @@ -244,12 +244,12 @@ def run( if single_cls: pred[:, 5] = 0 predn = pred.clone() - scale_coords(im[si].shape[1:], predn[:, :4], shape, shapes[si][1]) # native-space pred + scale_boxes(im[si].shape[1:], predn[:, :4], shape, shapes[si][1]) # native-space pred # Evaluate if nl: tbox = xywh2xyxy(labels[:, 1:5]) # target boxes - scale_coords(im[si].shape[1:], tbox, shape, shapes[si][1]) # native-space labels + scale_boxes(im[si].shape[1:], tbox, shape, shapes[si][1]) # native-space labels labelsn = torch.cat((labels[:, 0:1], tbox), 1) # native-space labels correct = process_batch(predn, labelsn, iouv) if plots: From f11a8a62d27c2740af5df940973d231fd5fcb038 Mon Sep 17 00:00:00 2001 From: Forever518 <1423429527@qq.com> Date: Sun, 25 Sep 2022 01:35:07 +0800 Subject: [PATCH 1471/1976] generator seed fix for DDP mAP drop (#9545) * Try to fix DDP mAP drop by setting generator's seed to RANK * Fix default activation bug * Update dataloaders.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update dataloaders.py Co-authored-by: Glenn Jocher Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- models/common.py | 4 ++-- models/yolo.py | 2 +- utils/dataloaders.py | 5 +++-- utils/segment/dataloaders.py | 8 +++++--- 4 files changed, 11 insertions(+), 8 deletions(-) diff --git a/models/common.py b/models/common.py index 273e73d9e729..2fe99be8972b 100644 --- a/models/common.py +++ b/models/common.py @@ -40,13 +40,13 @@ def autopad(k, p=None, d=1): # kernel, padding, dilation class Conv(nn.Module): # Standard convolution with args(ch_in, ch_out, kernel, stride, padding, groups, dilation, activation) - act = nn.SiLU() # default activation + default_act = nn.SiLU() # default activation def __init__(self, c1, c2, k=1, s=1, p=None, g=1, d=1, act=True): super().__init__() self.conv = nn.Conv2d(c1, c2, k, s, autopad(k, p, d), groups=g, dilation=d, bias=False) self.bn = nn.BatchNorm2d(c2) - self.act = self.act if act is True else act if isinstance(act, nn.Module) else nn.Identity() + self.act = self.default_act if act is True else act if isinstance(act, nn.Module) else nn.Identity() def forward(self, x): return self.act(self.bn(self.conv(x))) diff --git a/models/yolo.py b/models/yolo.py index 1d0da2a6e010..ed21c067ee93 100644 --- a/models/yolo.py +++ b/models/yolo.py @@ -301,7 +301,7 @@ def parse_model(d, ch): # model_dict, input_channels(3) LOGGER.info(f"\n{'':>3}{'from':>18}{'n':>3}{'params':>10} {'module':<40}{'arguments':<30}") anchors, nc, gd, gw, act = d['anchors'], d['nc'], d['depth_multiple'], d['width_multiple'], d.get('activation') if act: - Conv.act = eval(act) # redefine default activation, i.e. Conv.act = nn.SiLU() + Conv.default_act = eval(act) # redefine default activation, i.e. Conv.default_act = nn.SiLU() LOGGER.info(f"{colorstr('activation:')} {act}") # print na = (len(anchors[0]) // 2) if isinstance(anchors, list) else anchors # number of anchors no = na * (nc + 5) # number of outputs = anchors * (classes + 5) diff --git a/utils/dataloaders.py b/utils/dataloaders.py index 7aee0b891161..6cd1da6b9cf9 100644 --- a/utils/dataloaders.py +++ b/utils/dataloaders.py @@ -40,6 +40,7 @@ VID_FORMATS = 'asf', 'avi', 'gif', 'm4v', 'mkv', 'mov', 'mp4', 'mpeg', 'mpg', 'ts', 'wmv' # include video suffixes BAR_FORMAT = '{l_bar}{bar:10}{r_bar}{bar:-10b}' # tqdm bar format LOCAL_RANK = int(os.getenv('LOCAL_RANK', -1)) # https://pytorch.org/docs/stable/elastic/run.html +RANK = int(os.getenv('RANK', -1)) PIN_MEMORY = str(os.getenv('PIN_MEMORY', True)).lower() == 'true' # global pin_memory for dataloaders # Get orientation exif tag @@ -139,7 +140,7 @@ def create_dataloader(path, sampler = None if rank == -1 else distributed.DistributedSampler(dataset, shuffle=shuffle) loader = DataLoader if image_weights else InfiniteDataLoader # only DataLoader allows for attribute updates generator = torch.Generator() - generator.manual_seed(0) + generator.manual_seed(6148914691236517205 + RANK) return loader(dataset, batch_size=batch_size, shuffle=shuffle and sampler is None, @@ -1169,7 +1170,7 @@ def create_classification_dataloader(path, nw = min([os.cpu_count() // max(nd, 1), batch_size if batch_size > 1 else 0, workers]) sampler = None if rank == -1 else distributed.DistributedSampler(dataset, shuffle=shuffle) generator = torch.Generator() - generator.manual_seed(0) + generator.manual_seed(6148914691236517205 + RANK) return InfiniteDataLoader(dataset, batch_size=batch_size, shuffle=shuffle and sampler is None, diff --git a/utils/segment/dataloaders.py b/utils/segment/dataloaders.py index 97ef8556068e..a63d6ec013fd 100644 --- a/utils/segment/dataloaders.py +++ b/utils/segment/dataloaders.py @@ -17,6 +17,8 @@ from ..torch_utils import torch_distributed_zero_first from .augmentations import mixup, random_perspective +RANK = int(os.getenv('RANK', -1)) + def create_dataloader(path, imgsz, @@ -61,8 +63,8 @@ def create_dataloader(path, nw = min([os.cpu_count() // max(nd, 1), batch_size if batch_size > 1 else 0, workers]) # number of workers sampler = None if rank == -1 else distributed.DistributedSampler(dataset, shuffle=shuffle) loader = DataLoader if image_weights else InfiniteDataLoader # only DataLoader allows for attribute updates - # generator = torch.Generator() - # generator.manual_seed(0) + generator = torch.Generator() + generator.manual_seed(6148914691236517205 + RANK) return loader( dataset, batch_size=batch_size, @@ -72,7 +74,7 @@ def create_dataloader(path, pin_memory=True, collate_fn=LoadImagesAndLabelsAndMasks.collate_fn4 if quad else LoadImagesAndLabelsAndMasks.collate_fn, worker_init_fn=seed_worker, - # generator=generator, + generator=generator, ), dataset From 55fbac933bc25b3151082021fa3f10790b3b936a Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 25 Sep 2022 02:59:25 +0200 Subject: [PATCH 1472/1976] Update default GitHub assets (#9573) * Update default GitHub assets Signed-off-by: Glenn Jocher * Update downloads.py Signed-off-by: Glenn Jocher * Update downloads.py Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- utils/downloads.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/utils/downloads.py b/utils/downloads.py index 433de84b51ca..73b8334cb94a 100644 --- a/utils/downloads.py +++ b/utils/downloads.py @@ -87,9 +87,7 @@ def github_assets(repository, version='latest'): return file # GitHub assets - assets = [ - 'yolov5n.pt', 'yolov5s.pt', 'yolov5m.pt', 'yolov5l.pt', 'yolov5x.pt', 'yolov5n6.pt', 'yolov5s6.pt', - 'yolov5m6.pt', 'yolov5l6.pt', 'yolov5x6.pt'] + assets = [f'yolov5{size}{suffix}.pt' for size in 'nsmlx' for suffix in ('', '6', '-cls', '-seg')] # default try: tag, assets = github_assets(repo, release) except Exception: @@ -107,7 +105,6 @@ def github_assets(repository, version='latest'): safe_download( file, url=f'https://github.com/{repo}/releases/download/{tag}/{name}', - url2=f'https://storage.googleapis.com/{repo}/{tag}/{name}', # backup url (optional) min_bytes=1E5, error_msg=f'{file} missing, try downloading from https://github.com/{repo}/releases/{tag} or {url3}') From ee91dc9bb32d2dddc46c633b711a778a6c603143 Mon Sep 17 00:00:00 2001 From: "David A. Macey" Date: Sun, 25 Sep 2022 08:47:16 -0400 Subject: [PATCH 1473/1976] Update requirements.txt comment https://pytorch.org/get-started/locally/ (#9576) * Update Requirements with PyTorch CUDA Added --extra-index-url https://download.pytorch.org/whl/cu116 URL to requirements file for ease of creating venv with CUDA enabled PyTorch. Otherwise CPU PyTorch is installed an unable to use local GPUs. Signed-off-by: David A. Macey * Update requirements.txt Signed-off-by: Glenn Jocher * Update requirements.txt Signed-off-by: Glenn Jocher Signed-off-by: David A. Macey Signed-off-by: Glenn Jocher Co-authored-by: Glenn Jocher --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 4d6ec3509efa..0436f415c642 100644 --- a/requirements.txt +++ b/requirements.txt @@ -9,7 +9,7 @@ Pillow>=7.1.2 PyYAML>=5.3.1 requests>=2.23.0 scipy>=1.4.1 -torch>=1.7.0 +torch>=1.7.0 # see https://pytorch.org/get-started/locally/ (recommended) torchvision>=0.8.1 tqdm>=4.64.0 # protobuf<=3.20.1 # https://github.com/ultralytics/yolov5/issues/8012 From 2787ad701fbb308cfb494ae8fb68b0fcea0e4077 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 25 Sep 2022 14:52:49 +0200 Subject: [PATCH 1474/1976] Add segment line predictions (#9571) * Add segment line predictions Signed-off-by: Glenn Jocher * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update Signed-off-by: Glenn Jocher Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- segment/predict.py | 20 ++++++++++++-------- utils/segment/general.py | 14 ++++++++++++++ 2 files changed, 26 insertions(+), 8 deletions(-) diff --git a/segment/predict.py b/segment/predict.py index 2241204715b5..607a8697d731 100644 --- a/segment/predict.py +++ b/segment/predict.py @@ -42,9 +42,10 @@ from models.common import DetectMultiBackend from utils.dataloaders import IMG_FORMATS, VID_FORMATS, LoadImages, LoadScreenshots, LoadStreams from utils.general import (LOGGER, Profile, check_file, check_img_size, check_imshow, check_requirements, colorstr, cv2, - increment_path, non_max_suppression, print_args, scale_boxes, strip_optimizer, xyxy2xywh) + increment_path, non_max_suppression, print_args, scale_boxes, scale_segments, + strip_optimizer, xyxy2xywh) from utils.plots import Annotator, colors, save_one_box -from utils.segment.general import process_mask +from utils.segment.general import masks2segments, process_mask from utils.torch_utils import select_device, smart_inference_mode @@ -145,14 +146,16 @@ def run( save_path = str(save_dir / p.name) # im.jpg txt_path = str(save_dir / 'labels' / p.stem) + ('' if dataset.mode == 'image' else f'_{frame}') # im.txt s += '%gx%g ' % im.shape[2:] # print string - gn = torch.tensor(im0.shape)[[1, 0, 1, 0]] # normalization gain whwh imc = im0.copy() if save_crop else im0 # for save_crop annotator = Annotator(im0, line_width=line_thickness, example=str(names)) if len(det): masks = process_mask(proto[i], det[:, 6:], det[:, :4], im.shape[2:], upsample=True) # HWC + det[:, :4] = scale_boxes(im.shape[2:], det[:, :4], im0.shape).round() # rescale boxes to im0 size - # Rescale boxes from img_size to im0 size - det[:, :4] = scale_boxes(im.shape[2:], det[:, :4], im0.shape).round() + # Segments + if save_txt: + segments = reversed(masks2segments(masks)) + segments = [scale_segments(im.shape[2:], x, im0.shape).round() for x in segments] # Print results for c in det[:, 5].unique(): @@ -165,10 +168,10 @@ def run( im_gpu=None if retina_masks else im[i]) # Write results - for *xyxy, conf, cls in reversed(det[:, :6]): + for j, (*xyxy, conf, cls) in enumerate(reversed(det[:, :6])): if save_txt: # Write to file - xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh - line = (cls, *xywh, conf) if save_conf else (cls, *xywh) # label format + segj = segments[j].reshape(-1) # (n,2) to (n*2) + line = (cls, *segj, conf) if save_conf else (cls, *segj) # label format with open(f'{txt_path}.txt', 'a') as f: f.write(('%g ' * len(line)).rstrip() % line + '\n') @@ -176,6 +179,7 @@ def run( c = int(cls) # integer class label = None if hide_labels else (names[c] if hide_conf else f'{names[c]} {conf:.2f}') annotator.box_label(xyxy, label, color=colors(c, True)) + annotator.draw.polygon(segments[j], outline=colors(c, True), width=3) if save_crop: save_one_box(xyxy, imc, file=save_dir / 'crops' / names[c] / f'{p.stem}.jpg', BGR=True) diff --git a/utils/segment/general.py b/utils/segment/general.py index 36547ed0889c..655123bdcfeb 100644 --- a/utils/segment/general.py +++ b/utils/segment/general.py @@ -1,4 +1,5 @@ import cv2 +import numpy as np import torch import torch.nn.functional as F @@ -118,3 +119,16 @@ def masks_iou(mask1, mask2, eps=1e-7): intersection = (mask1 * mask2).sum(1).clamp(0) # (N, ) union = (mask1.sum(1) + mask2.sum(1))[None] - intersection # (area1 + area2) - intersection return intersection / (union + eps) + + +def masks2segments(masks, strategy='largest'): + # Convert masks(n,160,160) into segments(n,xy) + segments = [] + for x in masks.int().numpy().astype('uint8'): + c = cv2.findContours(x, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[0] + if strategy == 'concat': # concatenate all segments + c = np.concatenate([x.reshape(-1, 2) for x in c]) + elif strategy == 'largest': # select largest segment + c = np.array(c[np.array([len(x) for x in c]).argmax()]).reshape(-1, 2) + segments.append(c.astype('float32')) + return segments From 966b0e09f0a5261e555c2a137af2ef9d58cc9779 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 25 Sep 2022 16:21:26 +0200 Subject: [PATCH 1475/1976] TensorRT detect.py inference fix (#9581) * Update * Update ci-testing.yml Signed-off-by: Glenn Jocher * Update ci-testing.yml Signed-off-by: Glenn Jocher * Segment fix * Segment fix Signed-off-by: Glenn Jocher --- .github/workflows/ci-testing.yml | 6 ++++++ classify/predict.py | 3 ++- detect.py | 3 ++- segment/predict.py | 5 +++-- 4 files changed, 13 insertions(+), 4 deletions(-) diff --git a/.github/workflows/ci-testing.yml b/.github/workflows/ci-testing.yml index fffc92d1b72f..1ec68e8412f9 100644 --- a/.github/workflows/ci-testing.yml +++ b/.github/workflows/ci-testing.yml @@ -44,6 +44,12 @@ jobs: - name: Benchmark SegmentationModel run: | python benchmarks.py --data coco128-seg.yaml --weights ${{ matrix.model }}-seg.pt --img 320 --hard-fail 0.22 + - name: Test predictions + run: | + python export.py --weights ${{ matrix.model }}-cls.pt --include onnx --img 224 + python detect.py --weights ${{ matrix.model }}.onnx --img 320 + python segment/predict.py --weights ${{ matrix.model }}-seg.onnx --img 320 + python classify/predict.py --weights ${{ matrix.model }}-cls.onnx --img 224 Tests: timeout-minutes: 60 diff --git a/classify/predict.py b/classify/predict.py index d3bec8eea7ba..9114aab1d703 100644 --- a/classify/predict.py +++ b/classify/predict.py @@ -89,14 +89,15 @@ def run( imgsz = check_img_size(imgsz, s=stride) # check image size # Dataloader + bs = 1 # batch_size if webcam: view_img = check_imshow() dataset = LoadStreams(source, img_size=imgsz, transforms=classify_transforms(imgsz[0]), vid_stride=vid_stride) + bs = len(dataset) elif screenshot: dataset = LoadScreenshots(source, img_size=imgsz, stride=stride, auto=pt) else: dataset = LoadImages(source, img_size=imgsz, transforms=classify_transforms(imgsz[0]), vid_stride=vid_stride) - bs = len(dataset) # batch_size vid_path, vid_writer = [None] * bs, [None] * bs # Run inference diff --git a/detect.py b/detect.py index 4971033b35fb..8f48d8d28000 100644 --- a/detect.py +++ b/detect.py @@ -97,14 +97,15 @@ def run( imgsz = check_img_size(imgsz, s=stride) # check image size # Dataloader + bs = 1 # batch_size if webcam: view_img = check_imshow() dataset = LoadStreams(source, img_size=imgsz, stride=stride, auto=pt, vid_stride=vid_stride) + bs = len(dataset) elif screenshot: dataset = LoadScreenshots(source, img_size=imgsz, stride=stride, auto=pt) else: dataset = LoadImages(source, img_size=imgsz, stride=stride, auto=pt, vid_stride=vid_stride) - bs = len(dataset) # batch_size vid_path, vid_writer = [None] * bs, [None] * bs # Run inference diff --git a/segment/predict.py b/segment/predict.py index 607a8697d731..94117cd78633 100644 --- a/segment/predict.py +++ b/segment/predict.py @@ -100,14 +100,15 @@ def run( imgsz = check_img_size(imgsz, s=stride) # check image size # Dataloader + bs = 1 # batch_size if webcam: view_img = check_imshow() dataset = LoadStreams(source, img_size=imgsz, stride=stride, auto=pt, vid_stride=vid_stride) + bs = len(dataset) elif screenshot: dataset = LoadScreenshots(source, img_size=imgsz, stride=stride, auto=pt) else: dataset = LoadImages(source, img_size=imgsz, stride=stride, auto=pt, vid_stride=vid_stride) - bs = len(dataset) # batch_size vid_path, vid_writer = [None] * bs, [None] * bs # Run inference @@ -179,7 +180,7 @@ def run( c = int(cls) # integer class label = None if hide_labels else (names[c] if hide_conf else f'{names[c]} {conf:.2f}') annotator.box_label(xyxy, label, color=colors(c, True)) - annotator.draw.polygon(segments[j], outline=colors(c, True), width=3) + # annotator.draw.polygon(segments[j], outline=colors(c, True), width=3) if save_crop: save_one_box(xyxy, imc, file=save_dir / 'crops' / names[c] / f'{p.stem}.jpg', BGR=True) From 639d82fbabed66f347a17fd39cd058bcd26a4142 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 25 Sep 2022 20:12:57 +0200 Subject: [PATCH 1476/1976] Update Comet links (#9587) * Update Comet links Signed-off-by: Glenn Jocher * Update tutorial.ipynb Signed-off-by: Glenn Jocher * Update README.md Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- README.md | 4 ++-- tutorial.ipynb | 4 ++-- utils/loggers/comet/README.md | 6 +++--- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/README.md b/README.md index 1d43111d56e7..1c5e123d61e7 100644 --- a/README.md +++ b/README.md @@ -168,7 +168,7 @@ python train.py --data coco.yaml --cfg yolov5n.yaml --weights '' --batch-size 12
- + @@ -186,7 +186,7 @@ python train.py --data coco.yaml --cfg yolov5n.yaml --weights '' --batch-size 12 |Comet ⭐ NEW|Deci ⭐ NEW|ClearML ⭐ NEW|Roboflow|Weights & Biases |:-:|:-:|:-:|:-:|:-:| -|Visualize model metrics and predictions and upload models and datasets in realtime with [Comet](https://www.comet.com/site/?ref=yolov5&utm_source=yolov5&utm_medium=affilliate&utm_campaign=yolov5_comet_integration)|Automatically compile and quantize YOLOv5 for better inference performance in one click at [Deci](https://bit.ly/yolov5-deci-platform)|Automatically track, visualize and even remotely train YOLOv5 using [ClearML](https://cutt.ly/yolov5-readme-clearml) (open-source!)|Label and export your custom datasets directly to YOLOv5 for training with [Roboflow](https://roboflow.com/?ref=ultralytics) |Automatically track and visualize all your YOLOv5 training runs in the cloud with [Weights & Biases](https://wandb.ai/site?utm_campaign=repo_yolo_readme) +|Visualize model metrics and predictions and upload models and datasets in realtime with [Comet](https://bit.ly/yolov5-readme-comet)|Automatically compile and quantize YOLOv5 for better inference performance in one click at [Deci](https://bit.ly/yolov5-deci-platform)|Automatically track, visualize and even remotely train YOLOv5 using [ClearML](https://cutt.ly/yolov5-readme-clearml) (open-source!)|Label and export your custom datasets directly to YOLOv5 for training with [Roboflow](https://roboflow.com/?ref=ultralytics) |Automatically track and visualize all your YOLOv5 training runs in the cloud with [Weights & Biases](https://wandb.ai/site?utm_campaign=repo_yolo_readme) ##
Why YOLOv5
diff --git a/tutorial.ipynb b/tutorial.ipynb index f87cccd99df8..8c78af2b84cd 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -865,7 +865,7 @@ "cell_type": "markdown", "source": [ "## Comet Logging and Visualization 🌟 NEW\n", - "[Comet](https://www.comet.com/site/?ref=yolov5&utm_source=yolov5&utm_medium=affilliate&utm_campaign=yolov5_comet_integration) is now fully integrated with YOLOv5. Track and visualize model metrics in real time, save your hyperparameters, datasets, and model checkpoints, and visualize your model predictions with [Comet Custom Panels](https://www.comet.com/docs/v2/guides/comet-dashboard/code-panels/about-panels/?ref=yolov5&utm_source=yolov5&utm_medium=affilliate&utm_campaign=yolov5_comet_integration)! Comet makes sure you never lose track of your work and makes it easy to share results and collaborate across teams of all sizes! \n", + "[Comet](https://bit.ly/yolov5-readme-comet) is now fully integrated with YOLOv5. Track and visualize model metrics in real time, save your hyperparameters, datasets, and model checkpoints, and visualize your model predictions with [Comet Custom Panels](https://bit.ly/yolov5-colab-comet-panels)! Comet makes sure you never lose track of your work and makes it easy to share results and collaborate across teams of all sizes! \n", "\n", "Getting started is easy:\n", "```shell\n", @@ -874,7 +874,7 @@ "python train.py --img 640 --epochs 3 --data coco128.yaml --weights yolov5s.pt # 3. train\n", "```\n", "\n", - "To learn more about all of the supported Comet features for this integration, check out the [Comet Tutorial](https://github.com/ultralytics/yolov5/tree/master/utils/loggers/comet). If you'd like to learn more about Comet, head over to our [documentation](https://www.comet.com/docs/v2/?ref=yolov5&utm_source=yolov5&utm_medium=affilliate&utm_campaign=yolov5_comet_integration). Get started by trying out the Comet Colab Notebook:\n", + "To learn more about all of the supported Comet features for this integration, check out the [Comet Tutorial](https://github.com/ultralytics/yolov5/tree/master/utils/loggers/comet). If you'd like to learn more about Comet, head over to our [documentation](https://bit.ly/yolov5-colab-comet-docs). Get started by trying out the Comet Colab Notebook:\n", "[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1RG0WOQyxlDlo5Km8GogJpIEJlg_5lyYO?usp=sharing)\n", "\n", "\"yolo-ui\"" diff --git a/utils/loggers/comet/README.md b/utils/loggers/comet/README.md index 7b0b8e0e2f09..3a51cb9b5a25 100644 --- a/utils/loggers/comet/README.md +++ b/utils/loggers/comet/README.md @@ -2,13 +2,13 @@ # YOLOv5 with Comet -This guide will cover how to use YOLOv5 with [Comet](https://www.comet.com/site/?ref=yolov5&utm_source=yolov5&utm_medium=affilliate&utm_campaign=yolov5_comet_integration) +This guide will cover how to use YOLOv5 with [Comet](https://bit.ly/yolov5-readme-comet) # About Comet Comet builds tools that help data scientists, engineers, and team leaders accelerate and optimize machine learning and deep learning models. -Track and visualize model metrics in real time, save your hyperparameters, datasets, and model checkpoints, and visualize your model predictions with [Comet Custom Panels](https://www.comet.com/examples/comet-example-yolov5?shareable=YcwMiJaZSXfcEXpGOHDD12vA1&ref=yolov5&utm_source=yolov5&utm_medium=affilliate&utm_campaign=yolov5_comet_integration)! +Track and visualize model metrics in real time, save your hyperparameters, datasets, and model checkpoints, and visualize your model predictions with [Comet Custom Panels](https://bit.ly/yolov5-colab-comet-panels)! Comet makes sure you never lose track of your work and makes it easy to share results and collaborate across teams of all sizes! # Getting Started @@ -253,4 +253,4 @@ comet optimizer -j utils/loggers/comet/hpo.py \ Comet provides a number of ways to visualize the results of your sweep. Take a look at a [project with a completed sweep here](https://www.comet.com/examples/comet-example-yolov5/view/PrlArHGuuhDTKC1UuBmTtOSXD/panels?ref=yolov5&utm_source=yolov5&utm_medium=affilliate&utm_campaign=yolov5_comet_integration) -hyperparameter-yolo \ No newline at end of file +hyperparameter-yolo From 9006b41498a3bc512e293061e017a518f11e9902 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 25 Sep 2022 21:40:28 +0200 Subject: [PATCH 1477/1976] Add global YOLOv5_DATASETS_DIR (#9586) Update general.py Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- utils/general.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/general.py b/utils/general.py index 87e7e20df1ab..de7871cb23f9 100644 --- a/utils/general.py +++ b/utils/general.py @@ -43,8 +43,8 @@ RANK = int(os.getenv('RANK', -1)) # Settings -DATASETS_DIR = ROOT.parent / 'datasets' # YOLOv5 datasets directory NUM_THREADS = min(8, max(1, os.cpu_count() - 1)) # number of YOLOv5 multiprocessing threads +DATASETS_DIR = Path(os.getenv('YOLOv5_DATASETS_DIR', ROOT.parent / 'datasets')) # global datasets directory AUTOINSTALL = str(os.getenv('YOLOv5_AUTOINSTALL', True)).lower() == 'true' # global auto-install mode VERBOSE = str(os.getenv('YOLOv5_VERBOSE', True)).lower() == 'true' # global verbose mode FONT = 'Arial.ttf' # https://ultralytics.com/assets/Arial.ttf From 9f1cf8dd1ca79b8128d73ac144e8899f51bc5816 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 25 Sep 2022 23:03:14 +0200 Subject: [PATCH 1478/1976] Add Paperspace Gradient badges (#9588) * Add Paperspace Gradient badges Signed-off-by: Glenn Jocher * Update README.md Signed-off-by: Glenn Jocher * Update tutorial.ipynb Signed-off-by: Glenn Jocher * Update tutorial.ipynb Signed-off-by: Glenn Jocher * Update tutorial.ipynb Signed-off-by: Glenn Jocher * Update README.md Signed-off-by: Glenn Jocher * Update tutorial.ipynb Signed-off-by: Glenn Jocher * Update greetings.yml Signed-off-by: Glenn Jocher * Update greetings.yml Signed-off-by: Glenn Jocher * Update README_cn.md Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- .github/README_cn.md | 4 ++-- .github/workflows/greetings.yml | 8 ++++---- README.md | 7 +++++-- tutorial.ipynb | 5 +++-- 4 files changed, 14 insertions(+), 10 deletions(-) diff --git a/.github/README_cn.md b/.github/README_cn.md index bb62714f003f..7e8aa6f7f087 100644 --- a/.github/README_cn.md +++ b/.github/README_cn.md @@ -12,13 +12,13 @@ [English](../README.md) | 简体中文
- CI CPU testing + YOLOv5 CI YOLOv5 Citation Docker Pulls
+ Run on Gradient Open In Colab Open In Kaggle - Join Forum

diff --git a/.github/workflows/greetings.yml b/.github/workflows/greetings.yml index 91bf190eb727..5e1589c340ed 100644 --- a/.github/workflows/greetings.yml +++ b/.github/workflows/greetings.yml @@ -44,14 +44,14 @@ jobs: YOLOv5 may be run in any of the following up-to-date verified environments (with all dependencies including [CUDA](https://developer.nvidia.com/cuda)/[CUDNN](https://developer.nvidia.com/cudnn), [Python](https://www.python.org/) and [PyTorch](https://pytorch.org/) preinstalled): - - **Google Colab and Kaggle** notebooks with free GPU: Open In Colab Open In Kaggle + - **Notebooks** with free GPU: Run on Gradient Open In Colab Open In Kaggle - **Google Cloud** Deep Learning VM. See [GCP Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/GCP-Quickstart) - **Amazon** Deep Learning AMI. See [AWS Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/AWS-Quickstart) - **Docker Image**. See [Docker Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/Docker-Quickstart) Docker Pulls - ## Status - CI CPU testing + YOLOv5 CI + + If this badge is green, all [YOLOv5 GitHub Actions](https://github.com/ultralytics/yolov5/actions) Continuous Integration (CI) tests are currently passing. CI tests verify correct operation of YOLOv5 [training](https://github.com/ultralytics/yolov5/blob/master/train.py), [validation](https://github.com/ultralytics/yolov5/blob/master/val.py), [inference](https://github.com/ultralytics/yolov5/blob/master/detect.py), [export](https://github.com/ultralytics/yolov5/blob/master/export.py) and [benchmarks](https://github.com/ultralytics/yolov5/blob/master/benchmarks.py) on MacOS, Windows, and Ubuntu every 24 hours and on every commit. - If this badge is green, all [YOLOv5 GitHub Actions](https://github.com/ultralytics/yolov5/actions) Continuous Integration (CI) tests are currently passing. CI tests verify correct operation of YOLOv5 training ([train.py](https://github.com/ultralytics/yolov5/blob/master/train.py)), validation ([val.py](https://github.com/ultralytics/yolov5/blob/master/val.py)), inference ([detect.py](https://github.com/ultralytics/yolov5/blob/master/detect.py)) and export ([export.py](https://github.com/ultralytics/yolov5/blob/master/export.py)) on macOS, Windows, and Ubuntu every 24 hours and on every commit. diff --git a/README.md b/README.md index 1c5e123d61e7..227735b52fac 100644 --- a/README.md +++ b/README.md @@ -12,13 +12,13 @@ English | [简体中文](.github/README_cn.md)
- CI CPU testing + YOLOv5 CI YOLOv5 Citation Docker Pulls
+ Run on Gradient Open In Colab Open In Kaggle - Join Forum

@@ -315,6 +315,9 @@ python export.py --weights yolov5s-cls.pt resnet50.pt efficientnet_b0.pt --inclu Get started in seconds with our verified environments. Click each icon below for details.
+ + + diff --git a/tutorial.ipynb b/tutorial.ipynb index 8c78af2b84cd..5d867fb36c93 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -375,6 +375,7 @@ "\n", "\n", "
\n", + " \"Run\n", " \"Open\n", " \"Open\n", "
\n", @@ -945,7 +946,7 @@ "\n", "YOLOv5 may be run in any of the following up-to-date verified environments (with all dependencies including [CUDA](https://developer.nvidia.com/cuda)/[CUDNN](https://developer.nvidia.com/cudnn), [Python](https://www.python.org/) and [PyTorch](https://pytorch.org/) preinstalled):\n", "\n", - "- **Google Colab and Kaggle** notebooks with free GPU: \"Open \"Open\n", + "- **Notebooks** with free GPU: \"Run \"Open \"Open\n", "- **Google Cloud** Deep Learning VM. See [GCP Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/GCP-Quickstart)\n", "- **Amazon** Deep Learning AMI. See [AWS Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/AWS-Quickstart)\n", "- **Docker Image**. See [Docker Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/Docker-Quickstart) \"Docker\n" @@ -959,7 +960,7 @@ "source": [ "# Status\n", "\n", - "![CI CPU testing](https://github.com/ultralytics/yolov5/workflows/CI%20CPU%20testing/badge.svg)\n", + "![YOLOv5 CI](https://github.com/ultralytics/yolov5/actions/workflows/ci-testing.yml/badge.svg)\n", "\n", "If this badge is green, all [YOLOv5 GitHub Actions](https://github.com/ultralytics/yolov5/actions) Continuous Integration (CI) tests are currently passing. CI tests verify correct operation of YOLOv5 training ([train.py](https://github.com/ultralytics/yolov5/blob/master/train.py)), testing ([val.py](https://github.com/ultralytics/yolov5/blob/master/val.py)), inference ([detect.py](https://github.com/ultralytics/yolov5/blob/master/detect.py)) and export ([export.py](https://github.com/ultralytics/yolov5/blob/master/export.py)) on macOS, Windows, and Ubuntu every 24 hours and on every commit.\n" ] From 959a4665f820362c95f7435dc05175deeff19671 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 25 Sep 2022 23:26:15 +0200 Subject: [PATCH 1479/1976] #YOLOVISION22 announcement (#9590) * #YOLOVISION22 announcement Signed-off-by: Glenn Jocher * Update README.md Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- README.md | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/README.md b/README.md index 227735b52fac..56349867e4b6 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,19 @@
+ + Hi, I'm [Glenn Jocher](https://www.linkedin.com/in/glenn-jocher/), author of [YOLOv5](https://github.com/ultralytics/yolov5) 🚀. + + I'd like to invite you to attend the world's first-ever YOLO conference: [#YOLOVISION22](https://ultralytics.com/yolo-vision)! + + This virtual event takes place on **September 27th, 2022** with talks from the world's leading vision AI experts from Google, SenseTime's MMLabs, Baidu's PaddlePaddle, Meituan's YOLOv6, Weight & Biases, Roboflow, Neural Magic, OctoML and of course Ultralytics YOLOv5 and many others. + + Save your spot at https://ultralytics.com/yolo-vision! + + + + +##
+
+

@@ -191,6 +206,8 @@ python train.py --data coco.yaml --cfg yolov5n.yaml --weights '' --batch-size 12 ##

Why YOLOv5
+YOLOv5 has been designed to be super easy to get started and simple to learn. We prioritize real-world results. +

YOLOv5-P5 640 Figure (click to expand) From bfe052b8e1ab398e834a62b607e7d544e1a9876f Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 26 Sep 2022 12:39:08 +0200 Subject: [PATCH 1480/1976] Bump actions/stale from 5 to 6 (#9595) Bumps [actions/stale](https://github.com/actions/stale) from 5 to 6. - [Release notes](https://github.com/actions/stale/releases) - [Changelog](https://github.com/actions/stale/blob/main/CHANGELOG.md) - [Commits](https://github.com/actions/stale/compare/v5...v6) --- updated-dependencies: - dependency-name: actions/stale dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/stale.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml index 03d99790a4a7..9067c343608b 100644 --- a/.github/workflows/stale.yml +++ b/.github/workflows/stale.yml @@ -9,7 +9,7 @@ jobs: stale: runs-on: ubuntu-latest steps: - - uses: actions/stale@v5 + - uses: actions/stale@v6 with: repo-token: ${{ secrets.GITHUB_TOKEN }} stale-issue-message: | From bd9c0c42aee090b373db51c7393c972c26ed9913 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 26 Sep 2022 13:27:34 +0200 Subject: [PATCH 1481/1976] #YOLOVISION22 update (#9598) Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 56349867e4b6..514270973137 100644 --- a/README.md +++ b/README.md @@ -4,7 +4,7 @@ I'd like to invite you to attend the world's first-ever YOLO conference: [#YOLOVISION22](https://ultralytics.com/yolo-vision)! - This virtual event takes place on **September 27th, 2022** with talks from the world's leading vision AI experts from Google, SenseTime's MMLabs, Baidu's PaddlePaddle, Meituan's YOLOv6, Weight & Biases, Roboflow, Neural Magic, OctoML and of course Ultralytics YOLOv5 and many others. + This virtual event takes place on **September 27th, 2022** with talks from the world's leading vision AI experts from Google, OpenMMLab's MMDetection, Baidu's PaddlePaddle, Meituan's YOLOv6, Weight & Biases, Roboflow, Neural Magic, OctoML and of course Ultralytics YOLOv5 and many others. Save your spot at https://ultralytics.com/yolo-vision! From c4c0ee8fc35937cfa940fdaaaf6b9660f5b355f5 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 26 Sep 2022 14:13:03 +0200 Subject: [PATCH 1482/1976] Apple MPS -> CPU NMS fallback strategy (#9600) Until more ops are fully supported this update will allow for seamless MPS inference (but slower MPS to CPU transfer before NMS, so slower NMS times). Partially resolves https://github.com/ultralytics/yolov5/issues/9596 Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- utils/general.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/utils/general.py b/utils/general.py index de7871cb23f9..a855691d3a1f 100644 --- a/utils/general.py +++ b/utils/general.py @@ -843,6 +843,8 @@ def non_max_suppression( if isinstance(prediction, (list, tuple)): # YOLOv5 model in validation model, output = (inference_out, loss_out) prediction = prediction[0] # select only inference output + if 'mps' in prediction.device.type: # MPS not fully supported yet, convert tensors to CPU before NMS + prediction = prediction.cpu() bs = prediction.shape[0] # batch size nc = prediction.shape[2] - nm - 5 # number of classes xc = prediction[..., 4] > conf_thres # candidates From a5748e4b93ae6944ea813b26de6540e80141070b Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 26 Sep 2022 20:10:24 +0200 Subject: [PATCH 1483/1976] Updated Segmentation and Classification usage (#9607) * Updated Segmentation and Classification usage Signed-off-by: Glenn Jocher * Update export.py Signed-off-by: Glenn Jocher * Update export.py Signed-off-by: Glenn Jocher * Update export.py Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- export.py | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/export.py b/export.py index e3cf392b0101..20c1fbc5c7b8 100644 --- a/export.py +++ b/export.py @@ -560,13 +560,20 @@ def run( # Finish f = [str(x) for x in f if x] # filter out '' and None if any(f): + tp = type(model) + dir = Path('segment' if tp is SegmentationModel else 'classify' if tp is ClassificationModel else '') + predict = 'detect.py' if tp is DetectionModel else 'predict.py' h = '--half' if half else '' # --half FP16 inference arg LOGGER.info(f'\nExport complete ({time.time() - t:.1f}s)' f"\nResults saved to {colorstr('bold', file.parent.resolve())}" - f"\nDetect: python detect.py --weights {f[-1]} {h}" - f"\nValidate: python val.py --weights {f[-1]} {h}" + f"\nDetect: python {dir / predict} --weights {f[-1]} {h}" + f"\nValidate: python {dir / 'val.py'} --weights {f[-1]} {h}" f"\nPyTorch Hub: model = torch.hub.load('ultralytics/yolov5', 'custom', '{f[-1]}')" f"\nVisualize: https://netron.app") + if tp is ClassificationModel: + LOGGER.warning("WARNING ⚠️ ClassificationModel not yet supported for PyTorch Hub AutoShape inference") + if tp is SegmentationModel: + LOGGER.warning("WARNING ⚠️ SegmentationModel not yet supported for PyTorch Hub AutoShape inference") return f # return list of exported files/dirs From 6b2c9d1d0f5f9acad86ff9e7043f094a071aa6fe Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 26 Sep 2022 20:46:50 +0200 Subject: [PATCH 1484/1976] Update export.py Usage examples (#9609) * Update export.py Usage examples Signed-off-by: Glenn Jocher * Update export.py Signed-off-by: Glenn Jocher * Update export.py Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- export.py | 15 ++++++--------- 1 file changed, 6 insertions(+), 9 deletions(-) diff --git a/export.py b/export.py index 20c1fbc5c7b8..cf37965cea6b 100644 --- a/export.py +++ b/export.py @@ -560,20 +560,17 @@ def run( # Finish f = [str(x) for x in f if x] # filter out '' and None if any(f): - tp = type(model) - dir = Path('segment' if tp is SegmentationModel else 'classify' if tp is ClassificationModel else '') - predict = 'detect.py' if tp is DetectionModel else 'predict.py' + cls, det, seg = (isinstance(model, x) for x in (ClassificationModel, DetectionModel, SegmentationModel)) # type + dir = Path('segment' if seg else 'classify' if cls else '') h = '--half' if half else '' # --half FP16 inference arg + s = "# WARNING ⚠️ ClassificationModel not yet supported for PyTorch Hub AutoShape inference" if cls else \ + "# WARNING ⚠️ SegmentationModel not yet supported for PyTorch Hub AutoShape inference" if seg else '' LOGGER.info(f'\nExport complete ({time.time() - t:.1f}s)' f"\nResults saved to {colorstr('bold', file.parent.resolve())}" - f"\nDetect: python {dir / predict} --weights {f[-1]} {h}" + f"\nDetect: python {dir / ('detect.py' if det else 'predict.py')} --weights {f[-1]} {h}" f"\nValidate: python {dir / 'val.py'} --weights {f[-1]} {h}" - f"\nPyTorch Hub: model = torch.hub.load('ultralytics/yolov5', 'custom', '{f[-1]}')" + f"\nPyTorch Hub: model = torch.hub.load('ultralytics/yolov5', 'custom', '{f[-1]}') {s}" f"\nVisualize: https://netron.app") - if tp is ClassificationModel: - LOGGER.warning("WARNING ⚠️ ClassificationModel not yet supported for PyTorch Hub AutoShape inference") - if tp is SegmentationModel: - LOGGER.warning("WARNING ⚠️ SegmentationModel not yet supported for PyTorch Hub AutoShape inference") return f # return list of exported files/dirs From 1460e5715700cdb130472e1314074ff648f811d8 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 27 Sep 2022 00:29:23 +0200 Subject: [PATCH 1485/1976] Fix `is_url('https://ultralytics.com')` (#9610) Failing on missing path, i.e. no 'www.' Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- utils/downloads.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/downloads.py b/utils/downloads.py index 73b8334cb94a..60417c1f8835 100644 --- a/utils/downloads.py +++ b/utils/downloads.py @@ -21,7 +21,7 @@ def is_url(url, check=True): try: url = str(url) result = urllib.parse.urlparse(url) - assert all([result.scheme, result.netloc, result.path]) # check if is url + assert all([result.scheme, result.netloc]) # check if is url return (urllib.request.urlopen(url).getcode() == 200) if check else True # check if exists online except (AssertionError, urllib.request.HTTPError): return False From 7314363f26e23fc831a9a739b4031f9f0217084a Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 27 Sep 2022 16:58:14 +0200 Subject: [PATCH 1486/1976] Add `results.save(save_dir='path', exist_ok=False)` (#9617) Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- models/common.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/models/common.py b/models/common.py index 2fe99be8972b..d889d0292c61 100644 --- a/models/common.py +++ b/models/common.py @@ -775,12 +775,12 @@ def _run(self, pprint=False, show=False, save=False, crop=False, render=False, l def show(self, labels=True): self._run(show=True, labels=labels) # show results - def save(self, labels=True, save_dir='runs/detect/exp'): - save_dir = increment_path(save_dir, exist_ok=save_dir != 'runs/detect/exp', mkdir=True) # increment save_dir + def save(self, labels=True, save_dir='runs/detect/exp', exist_ok=False): + save_dir = increment_path(save_dir, exist_ok, mkdir=True) # increment save_dir self._run(save=True, labels=labels, save_dir=save_dir) # save results - def crop(self, save=True, save_dir='runs/detect/exp'): - save_dir = increment_path(save_dir, exist_ok=save_dir != 'runs/detect/exp', mkdir=True) if save else None + def crop(self, save=True, save_dir='runs/detect/exp', exist_ok=False): + save_dir = increment_path(save_dir, exist_ok, mkdir=True) if save else None return self._run(crop=True, save=save, save_dir=save_dir) # crop results def render(self, labels=True): From 2373d5470e386a0c63c6ab77fbee6d699665e27b Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 27 Sep 2022 18:02:48 +0200 Subject: [PATCH 1487/1976] NMS MPS device wrapper (#9620) * NMS MPS device wrapper May resolve https://github.com/ultralytics/yolov5/issues/9613 Signed-off-by: Glenn Jocher * Update general.py Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- utils/general.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/utils/general.py b/utils/general.py index a855691d3a1f..d31b043a113e 100644 --- a/utils/general.py +++ b/utils/general.py @@ -843,7 +843,9 @@ def non_max_suppression( if isinstance(prediction, (list, tuple)): # YOLOv5 model in validation model, output = (inference_out, loss_out) prediction = prediction[0] # select only inference output - if 'mps' in prediction.device.type: # MPS not fully supported yet, convert tensors to CPU before NMS + device = prediction.device + mps = 'mps' in device.type # Apple MPS + if mps: # MPS not fully supported yet, convert tensors to CPU before NMS prediction = prediction.cpu() bs = prediction.shape[0] # batch size nc = prediction.shape[2] - nm - 5 # number of classes @@ -930,6 +932,8 @@ def non_max_suppression( i = i[iou.sum(1) > 1] # require redundancy output[xi] = x[i] + if mps: + output[xi] = output[xi].to(device) if (time.time() - t) > time_limit: LOGGER.warning(f'WARNING ⚠️ NMS time limit {time_limit:.3f}s exceeded') break # time limit exceeded From 799e3d0cc92a9f431d97931641e7d0b46720699a Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 28 Sep 2022 16:43:11 +0200 Subject: [PATCH 1488/1976] Add SegmentationModel unsupported warning (#9632) * Add SegmentationModel unsupported warning Signed-off-by: Glenn Jocher * Update hubconf.py Signed-off-by: Glenn Jocher * Update hubconf.py Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- hubconf.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/hubconf.py b/hubconf.py index 4224760a4732..95b95a5c30cc 100644 --- a/hubconf.py +++ b/hubconf.py @@ -30,7 +30,7 @@ def _create(name, pretrained=True, channels=3, classes=80, autoshape=True, verbo from models.common import AutoShape, DetectMultiBackend from models.experimental import attempt_load - from models.yolo import ClassificationModel, DetectionModel + from models.yolo import ClassificationModel, DetectionModel, SegmentationModel from utils.downloads import attempt_download from utils.general import LOGGER, check_requirements, intersect_dicts, logging from utils.torch_utils import select_device @@ -47,8 +47,11 @@ def _create(name, pretrained=True, channels=3, classes=80, autoshape=True, verbo model = DetectMultiBackend(path, device=device, fuse=autoshape) # detection model if autoshape: if model.pt and isinstance(model.model, ClassificationModel): - LOGGER.warning('WARNING ⚠️ YOLOv5 v6.2 ClassificationModel is not yet AutoShape compatible. ' + LOGGER.warning('WARNING ⚠️ YOLOv5 ClassificationModel is not yet AutoShape compatible. ' 'You must pass torch tensors in BCHW to this model, i.e. shape(1,3,224,224).') + elif model.pt and isinstance(model.model, SegmentationModel): + LOGGER.warning('WARNING ⚠️ YOLOv5 SegmentationModel is not yet AutoShape compatible. ' + 'You will not be able to run inference with this model.') else: model = AutoShape(model) # for file/URI/PIL/cv2/np inputs and NMS except Exception: From 0860e58557f26a0136dd8afbc82f408f31d15ecd Mon Sep 17 00:00:00 2001 From: Soumik Rakshit <19soumik.rakshit96@gmail.com> Date: Fri, 30 Sep 2022 02:31:45 +0530 Subject: [PATCH 1489/1976] Disabled upload_dataset flag temporarily due to an artifact related bug (#9652) * disabled upload_dataset flag temporarily due to an artifact related bug * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- utils/loggers/wandb/wandb_utils.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/utils/loggers/wandb/wandb_utils.py b/utils/loggers/wandb/wandb_utils.py index e850d2ac8a7c..d2dd0fa7c6cd 100644 --- a/utils/loggers/wandb/wandb_utils.py +++ b/utils/loggers/wandb/wandb_utils.py @@ -132,6 +132,11 @@ def __init__(self, opt, run_id=None, job_type='Training'): job_type (str) -- To set the job_type for this run """ + # Temporary-fix + if opt.upload_dataset: + opt.upload_dataset = False + LOGGER.info("Uploading Dataset functionality is not being supported temporarily due to a bug.") + # Pre-training routine -- self.job_type = job_type self.wandb, self.wandb_run = wandb, None if not wandb else wandb.run From 82bec4c8785e123bbea01f6f2d4215c2077ac81f Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 29 Sep 2022 23:35:39 +0200 Subject: [PATCH 1490/1976] Add NVIDIA Jetson Nano Deployment tutorial (#9656) Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index 514270973137..8b1c98b34e8f 100644 --- a/README.md +++ b/README.md @@ -163,6 +163,7 @@ python train.py --data coco.yaml --cfg yolov5n.yaml --weights '' --batch-size 12 - [Multi-GPU Training](https://github.com/ultralytics/yolov5/issues/475) - [PyTorch Hub](https://github.com/ultralytics/yolov5/issues/36) 🌟 NEW - [TFLite, ONNX, CoreML, TensorRT Export](https://github.com/ultralytics/yolov5/issues/251) 🚀 +- [NVIDIA Jetson Nano Deployment](https://github.com/ultralytics/yolov5/issues/9627) 🌟 NEW - [Test-Time Augmentation (TTA)](https://github.com/ultralytics/yolov5/issues/303) - [Model Ensembling](https://github.com/ultralytics/yolov5/issues/318) - [Model Pruning/Sparsity](https://github.com/ultralytics/yolov5/issues/304) From 8a19437690548a158b78ab27b7f5b463a268fa19 Mon Sep 17 00:00:00 2001 From: Anant Sakhare <70131870+senhorinfinito@users.noreply.github.com> Date: Sat, 1 Oct 2022 20:12:31 +0530 Subject: [PATCH 1491/1976] =?UTF-8?q?Added=20cutout=20import=20from=20util?= =?UTF-8?q?s/augmentations.py=20to=20use=20Cutout=20Aug=20in=20=E2=80=A6?= =?UTF-8?q?=20(#9668)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Added cutout import from utils/augmentations.py to use Cutout Aug in data loader by un-commenting line 679, 680, 681 * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- utils/dataloaders.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/dataloaders.py b/utils/dataloaders.py index 6cd1da6b9cf9..d849d5150f4b 100644 --- a/utils/dataloaders.py +++ b/utils/dataloaders.py @@ -29,7 +29,7 @@ from tqdm import tqdm from utils.augmentations import (Albumentations, augment_hsv, classify_albumentations, classify_transforms, copy_paste, - letterbox, mixup, random_perspective) + cutout, letterbox, mixup, random_perspective) from utils.general import (DATASETS_DIR, LOGGER, NUM_THREADS, check_dataset, check_requirements, check_yaml, clean_str, cv2, is_colab, is_kaggle, segments2boxes, xyn2xy, xywh2xyxy, xywhn2xyxy, xyxy2xywhn) from utils.torch_utils import torch_distributed_zero_first From 1158a50abd78808049327fdf60724b2b32726d88 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 2 Oct 2022 13:37:54 +0200 Subject: [PATCH 1492/1976] Simplify val.py benchmark mode with speed mode (#9674) Update --- benchmarks.py | 4 ++-- segment/val.py | 3 +-- val.py | 3 +-- 3 files changed, 4 insertions(+), 6 deletions(-) diff --git a/benchmarks.py b/benchmarks.py index b3b58eb3257c..ef5c882973f0 100644 --- a/benchmarks.py +++ b/benchmarks.py @@ -81,10 +81,10 @@ def run( # Validate if model_type == SegmentationModel: - result = val_seg(data, w, batch_size, imgsz, plots=False, device=device, task='benchmark', half=half) + result = val_seg(data, w, batch_size, imgsz, plots=False, device=device, task='speed', half=half) metric = result[0][7] # (box(p, r, map50, map), mask(p, r, map50, map), *loss(box, obj, cls)) else: # DetectionModel: - result = val_det(data, w, batch_size, imgsz, plots=False, device=device, task='benchmark', half=half) + result = val_det(data, w, batch_size, imgsz, plots=False, device=device, task='speed', half=half) metric = result[0][3] # (p, r, map50, map, *loss(box, obj, cls)) speed = result[2][1] # times (preprocess, inference, postprocess) y.append([name, round(file_size(w), 1), round(metric, 4), round(speed, 2)]) # MB, mAP, t_inference diff --git a/segment/val.py b/segment/val.py index 0a37998c1771..f1ec54638d61 100644 --- a/segment/val.py +++ b/segment/val.py @@ -210,8 +210,7 @@ def run( assert ncm == nc, f'{weights} ({ncm} classes) trained on different --data than what you passed ({nc} ' \ f'classes). Pass correct combination of --weights and --data that are trained together.' model.warmup(imgsz=(1 if pt else batch_size, 3, imgsz, imgsz)) # warmup - pad = 0.0 if task in ('speed', 'benchmark') else 0.5 - rect = False if task == 'benchmark' else pt # square inference for benchmarks + pad, rect = (0.0, False) if task == 'speed' else (0.5, pt) # square inference for benchmarks task = task if task in ('train', 'val', 'test') else 'val' # path to train/val/test images dataloader = create_dataloader(data[task], imgsz, diff --git a/val.py b/val.py index c0954498d2fb..ca838c0beb2f 100644 --- a/val.py +++ b/val.py @@ -169,8 +169,7 @@ def run( assert ncm == nc, f'{weights} ({ncm} classes) trained on different --data than what you passed ({nc} ' \ f'classes). Pass correct combination of --weights and --data that are trained together.' model.warmup(imgsz=(1 if pt else batch_size, 3, imgsz, imgsz)) # warmup - pad = 0.0 if task in ('speed', 'benchmark') else 0.5 - rect = False if task == 'benchmark' else pt # square inference for benchmarks + pad, rect = (0.0, False) if task == 'speed' else (0.5, pt) # square inference for benchmarks task = task if task in ('train', 'val', 'test') else 'val' # path to train/val/test images dataloader = create_dataloader(data[task], imgsz, From c98128fe71a8676037a0605ab389c7473c743d07 Mon Sep 17 00:00:00 2001 From: KristenKehrer <34010022+KristenKehrer@users.noreply.github.com> Date: Sun, 2 Oct 2022 18:25:10 -0400 Subject: [PATCH 1493/1976] Allow list for Comet artifact class 'names' field (#9654) * Update __init__.py In the Comet logger, when I run train.py, it wants to download the data artifact. It was requiring me to format the 'names' field in the data artifact metadata as a dictionary, so I've changed this so that it also accepts a list. Signed-off-by: KristenKehrer <34010022+KristenKehrer@users.noreply.github.com> * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update utils/loggers/comet/__init__.py Co-authored-by: Dhruv Nair Signed-off-by: KristenKehrer <34010022+KristenKehrer@users.noreply.github.com> Signed-off-by: KristenKehrer <34010022+KristenKehrer@users.noreply.github.com> Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Dhruv Nair Co-authored-by: Glenn Jocher --- utils/loggers/comet/__init__.py | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/utils/loggers/comet/__init__.py b/utils/loggers/comet/__init__.py index ba5cecc8e096..b0318f88d6a6 100644 --- a/utils/loggers/comet/__init__.py +++ b/utils/loggers/comet/__init__.py @@ -353,7 +353,14 @@ def download_dataset_artifact(self, artifact_path): metadata = logged_artifact.metadata data_dict = metadata.copy() data_dict["path"] = artifact_save_dir - data_dict["names"] = {int(k): v for k, v in metadata.get("names").items()} + + metadata_names = metadata.get("names") + if type(metadata_names) == dict: + data_dict["names"] = {int(k): v for k, v in metadata.get("names").items()} + elif type(metadata_names) == list: + data_dict["names"] = {int(k): v for k, v in zip(range(len(metadata_names)), metadata_names)} + else: + raise "Invalid 'names' field in dataset yaml file. Please use a list or dictionary" data_dict = self.update_data_paths(data_dict) return data_dict From 68d654d8c4d473aa81be91ac42f320009736992b Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 4 Oct 2022 16:31:51 +0200 Subject: [PATCH 1494/1976] [pre-commit.ci] pre-commit suggestions (#9685) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/asottile/pyupgrade: v2.37.3 → v2.38.2](https://github.com/asottile/pyupgrade/compare/v2.37.3...v2.38.2) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index ba8005535397..1cd102c26b41 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -24,7 +24,7 @@ repos: - id: check-docstring-first - repo: https://github.com/asottile/pyupgrade - rev: v2.37.3 + rev: v2.38.2 hooks: - id: pyupgrade name: Upgrade code From e4398cf179601d47207e9f526cf0760b82058930 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 4 Oct 2022 16:32:19 +0200 Subject: [PATCH 1495/1976] TensorRT `--dynamic` fix (#9691) * Update export.py Signed-off-by: Glenn Jocher * Update export.py Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- export.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/export.py b/export.py index cf37965cea6b..66d4d636133a 100644 --- a/export.py +++ b/export.py @@ -251,11 +251,11 @@ def export_engine(model, im, file, half, dynamic, simplify, workspace=4, verbose if trt.__version__[0] == '7': # TensorRT 7 handling https://github.com/ultralytics/yolov5/issues/6012 grid = model.model[-1].anchor_grid model.model[-1].anchor_grid = [a[..., :1, :1, :] for a in grid] - export_onnx(model, im, file, 12, False, dynamic, simplify) # opset 12 + export_onnx(model, im, file, 12, dynamic, simplify) # opset 12 model.model[-1].anchor_grid = grid else: # TensorRT >= 8 check_version(trt.__version__, '8.0.0', hard=True) # require tensorrt>=8.0.0 - export_onnx(model, im, file, 12, False, dynamic, simplify) # opset 12 + export_onnx(model, im, file, 12, dynamic, simplify) # opset 12 onnx = file.with_suffix('.onnx') LOGGER.info(f'\n{prefix} starting export with TensorRT {trt.__version__}...') @@ -285,7 +285,7 @@ def export_engine(model, im, file, half, dynamic, simplify, workspace=4, verbose if dynamic: if im.shape[0] <= 1: - LOGGER.warning(f"{prefix}WARNING ⚠️ --dynamic model requires maximum --batch-size argument") + LOGGER.warning(f"{prefix} WARNING ⚠️ --dynamic model requires maximum --batch-size argument") profile = builder.create_optimization_profile() for inp in inputs: profile.set_shape(inp.name, (1, *im.shape[1:]), (max(1, im.shape[0] // 2), *im.shape[1:]), im.shape) From 7f097ddb6c9921d64fa504a8db79cf24fa0a913c Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 5 Oct 2022 22:29:46 +0200 Subject: [PATCH 1496/1976] FROM nvcr.io/nvidia/pytorch:22.09-py3 (#9711) Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- utils/docker/Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/docker/Dockerfile b/utils/docker/Dockerfile index 764ee278c22e..9b93fad7b203 100644 --- a/utils/docker/Dockerfile +++ b/utils/docker/Dockerfile @@ -3,7 +3,7 @@ # Image is CUDA-optimized for YOLOv5 single/multi-GPU training and inference # Start FROM NVIDIA PyTorch image https://ngc.nvidia.com/catalog/containers/nvidia:pytorch -FROM nvcr.io/nvidia/pytorch:22.08-py3 +FROM nvcr.io/nvidia/pytorch:22.09-py3 RUN rm -rf /opt/pytorch # remove 1.2GB dir # Downloads to user config dir From 5ef69ef3e6180709bc292370ed314b6029ecabfc Mon Sep 17 00:00:00 2001 From: Paul Guerrie <97041392+paulguerrie@users.noreply.github.com> Date: Thu, 6 Oct 2022 14:55:15 -0600 Subject: [PATCH 1497/1976] Error in utils/segment/general `masks2segments()` (#9724) When running segmentation predict on gpu, the conversion from tensor to numpy fails. Calling `.cpu()` solves this problem. Signed-off-by: Paul Guerrie <97041392+paulguerrie@users.noreply.github.com> Signed-off-by: Paul Guerrie <97041392+paulguerrie@users.noreply.github.com> --- utils/segment/general.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/segment/general.py b/utils/segment/general.py index 655123bdcfeb..43bdc460f928 100644 --- a/utils/segment/general.py +++ b/utils/segment/general.py @@ -124,7 +124,7 @@ def masks_iou(mask1, mask2, eps=1e-7): def masks2segments(masks, strategy='largest'): # Convert masks(n,160,160) into segments(n,xy) segments = [] - for x in masks.int().numpy().astype('uint8'): + for x in masks.int().cpu().numpy().astype('uint8'): c = cv2.findContours(x, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[0] if strategy == 'concat': # concatenate all segments c = np.concatenate([x.reshape(-1, 2) for x in c]) From 209be932dec9e89b902f0ac2975fa599e9bc676f Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 9 Oct 2022 23:51:29 +0200 Subject: [PATCH 1498/1976] Fix segment evolution keys (#9742) * Update * Cleanup --- segment/train.py | 2 +- train.py | 4 +++- utils/general.py | 5 ++--- 3 files changed, 6 insertions(+), 5 deletions(-) diff --git a/segment/train.py b/segment/train.py index 5121c5fa784a..26f0d0c13c78 100644 --- a/segment/train.py +++ b/segment/train.py @@ -651,7 +651,7 @@ def main(opt, callbacks=Callbacks()): results = train(hyp.copy(), opt, device, callbacks) callbacks = Callbacks() # Write mutation results - print_mutation(results, hyp.copy(), save_dir, opt.bucket) + print_mutation(KEYS, results, hyp.copy(), save_dir, opt.bucket) # Plot results plot_evolve(evolve_csv) diff --git a/train.py b/train.py index 9efece250581..177e081c8c37 100644 --- a/train.py +++ b/train.py @@ -607,7 +607,9 @@ def main(opt, callbacks=Callbacks()): results = train(hyp.copy(), opt, device, callbacks) callbacks = Callbacks() # Write mutation results - print_mutation(results, hyp.copy(), save_dir, opt.bucket) + keys = ('metrics/precision', 'metrics/recall', 'metrics/mAP_0.5', 'metrics/mAP_0.5:0.95', 'val/box_loss', + 'val/obj_loss', 'val/cls_loss') + print_mutation(keys, results, hyp.copy(), save_dir, opt.bucket) # Plot results plot_evolve(evolve_csv) diff --git a/utils/general.py b/utils/general.py index d31b043a113e..e2faca9dbf2a 100644 --- a/utils/general.py +++ b/utils/general.py @@ -957,11 +957,10 @@ def strip_optimizer(f='best.pt', s=''): # from utils.general import *; strip_op LOGGER.info(f"Optimizer stripped from {f},{f' saved as {s},' if s else ''} {mb:.1f}MB") -def print_mutation(results, hyp, save_dir, bucket, prefix=colorstr('evolve: ')): +def print_mutation(keys, results, hyp, save_dir, bucket, prefix=colorstr('evolve: ')): evolve_csv = save_dir / 'evolve.csv' evolve_yaml = save_dir / 'hyp_evolve.yaml' - keys = ('metrics/precision', 'metrics/recall', 'metrics/mAP_0.5', 'metrics/mAP_0.5:0.95', 'val/box_loss', - 'val/obj_loss', 'val/cls_loss') + tuple(hyp.keys()) # [results + hyps] + keys = tuple(keys) + tuple(hyp.keys()) # [results + hyps] keys = tuple(x.strip() for x in keys) vals = results + tuple(hyp.values()) n = len(keys) From 2f1eb21ad6c0f715f38200c31e6e01a92c5acb25 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 10 Oct 2022 14:54:21 +0200 Subject: [PATCH 1499/1976] Remove #YOLOVISION22 notice (#9751) Update README.md Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- README.md | 15 --------------- 1 file changed, 15 deletions(-) diff --git a/README.md b/README.md index 8b1c98b34e8f..8c19e52c45d7 100644 --- a/README.md +++ b/README.md @@ -1,19 +1,4 @@
- - Hi, I'm [Glenn Jocher](https://www.linkedin.com/in/glenn-jocher/), author of [YOLOv5](https://github.com/ultralytics/yolov5) 🚀. - - I'd like to invite you to attend the world's first-ever YOLO conference: [#YOLOVISION22](https://ultralytics.com/yolo-vision)! - - This virtual event takes place on **September 27th, 2022** with talks from the world's leading vision AI experts from Google, OpenMMLab's MMDetection, Baidu's PaddlePaddle, Meituan's YOLOv6, Weight & Biases, Roboflow, Neural Magic, OctoML and of course Ultralytics YOLOv5 and many others. - - Save your spot at https://ultralytics.com/yolo-vision! - - - - -##
-
-

From 7a69035eb8a15f44a1dc8f1e07ee71b674e98271 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 12 Oct 2022 12:53:12 +0200 Subject: [PATCH 1500/1976] Update Loggers (#9760) * Update * Update tutorial.ipynb Signed-off-by: Glenn Jocher * Update tutorial.ipynb Signed-off-by: Glenn Jocher * Update tutorial.ipynb Signed-off-by: Glenn Jocher * Update requirements.txt Signed-off-by: Glenn Jocher * Update * Update README.md Signed-off-by: Glenn Jocher * Update Signed-off-by: Glenn Jocher --- README.md | 16 ++++++---------- requirements.txt | 2 +- tutorial.ipynb | 25 +++---------------------- utils/docker/Dockerfile | 2 +- utils/loggers/__init__.py | 14 +++++++------- utils/loggers/wandb/wandb_utils.py | 2 +- 6 files changed, 19 insertions(+), 42 deletions(-) diff --git a/README.md b/README.md index 8c19e52c45d7..8f45ccd229b5 100644 --- a/README.md +++ b/README.md @@ -155,7 +155,6 @@ python train.py --data coco.yaml --cfg yolov5n.yaml --weights '' --batch-size 12 - [Hyperparameter Evolution](https://github.com/ultralytics/yolov5/issues/607) - [Transfer Learning with Frozen Layers](https://github.com/ultralytics/yolov5/issues/1314) - [Architecture Summary](https://github.com/ultralytics/yolov5/issues/6998) 🌟 NEW -- [Weights & Biases Logging](https://github.com/ultralytics/yolov5/issues/1289) - [Roboflow for Datasets, Labeling, and Active Learning](https://github.com/ultralytics/yolov5/issues/4975)  🌟 NEW - [ClearML Logging](https://github.com/ultralytics/yolov5/tree/master/utils/loggers/clearml) 🌟 NEW - [Deci Platform](https://github.com/ultralytics/yolov5/wiki/Deci-Platform) 🌟 NEW @@ -171,23 +170,20 @@ python train.py --data coco.yaml --cfg yolov5n.yaml --weights '' --batch-size 12

- + - + - + - - -
-|Comet ⭐ NEW|Deci ⭐ NEW|ClearML ⭐ NEW|Roboflow|Weights & Biases -|:-:|:-:|:-:|:-:|:-:| -|Visualize model metrics and predictions and upload models and datasets in realtime with [Comet](https://bit.ly/yolov5-readme-comet)|Automatically compile and quantize YOLOv5 for better inference performance in one click at [Deci](https://bit.ly/yolov5-deci-platform)|Automatically track, visualize and even remotely train YOLOv5 using [ClearML](https://cutt.ly/yolov5-readme-clearml) (open-source!)|Label and export your custom datasets directly to YOLOv5 for training with [Roboflow](https://roboflow.com/?ref=ultralytics) |Automatically track and visualize all your YOLOv5 training runs in the cloud with [Weights & Biases](https://wandb.ai/site?utm_campaign=repo_yolo_readme) +|Comet ⭐ NEW|Deci ⭐ NEW|ClearML ⭐ NEW|Roboflow| +|:-:|:-:|:-:|:-:| +|Visualize model metrics and predictions and upload models and datasets in realtime with [Comet](https://bit.ly/yolov5-readme-comet)|Automatically compile and quantize YOLOv5 for better inference performance in one click at [Deci](https://bit.ly/yolov5-deci-platform)|Automatically track, visualize and even remotely train YOLOv5 using [ClearML](https://cutt.ly/yolov5-readme-clearml) (open-source!)|Label and export your custom datasets directly to YOLOv5 for training with [Roboflow](https://roboflow.com/?ref=ultralytics)| ##
Why YOLOv5
diff --git a/requirements.txt b/requirements.txt index 0436f415c642..52f7b9ea57d2 100644 --- a/requirements.txt +++ b/requirements.txt @@ -16,8 +16,8 @@ tqdm>=4.64.0 # Logging ------------------------------------- tensorboard>=2.4.1 -# wandb # clearml +# comet # Plotting ------------------------------------ pandas>=1.1.4 diff --git a/tutorial.ipynb b/tutorial.ipynb index 5d867fb36c93..63abebc5b37f 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -655,7 +655,7 @@ "cell_type": "code", "source": [ "#@title Select YOLOv5 🚀 logger {run: 'auto'}\n", - "logger = 'TensorBoard' #@param ['TensorBoard', 'Comet', 'ClearML', 'W&B']\n", + "logger = 'TensorBoard' #@param ['TensorBoard', 'Comet', 'ClearML']\n", "\n", "if logger == 'TensorBoard':\n", " %load_ext tensorboard\n", @@ -664,10 +664,7 @@ " %pip install -q comet_ml\n", " import comet_ml; comet_ml.init()\n", "elif logger == 'ClearML':\n", - " %pip install -q clearml && clearml-init\n", - "elif logger == 'W&B':\n", - " %pip install -q wandb\n", - " import wandb; wandb.login()" + " %pip install -q clearml && clearml-init" ], "metadata": { "id": "i3oKtE4g-aNn" @@ -699,7 +696,7 @@ "YOLOv5 🚀 v6.2-56-g30e674b Python-3.7.13 torch-1.12.1+cu113 CUDA:0 (Tesla V100-SXM2-16GB, 16160MiB)\n", "\n", "\u001b[34m\u001b[1mhyperparameters: \u001b[0mlr0=0.01, lrf=0.01, momentum=0.937, weight_decay=0.0005, warmup_epochs=3.0, warmup_momentum=0.8, warmup_bias_lr=0.1, box=0.05, cls=0.5, cls_pw=1.0, obj=1.0, obj_pw=1.0, iou_t=0.2, anchor_t=4.0, fl_gamma=0.0, hsv_h=0.015, hsv_s=0.7, hsv_v=0.4, degrees=0.0, translate=0.1, scale=0.5, shear=0.0, perspective=0.0, flipud=0.0, fliplr=0.5, mosaic=1.0, mixup=0.0, copy_paste=0.0\n", - "\u001b[34m\u001b[1mWeights & Biases: \u001b[0mrun 'pip install wandb' to automatically track and visualize YOLOv5 🚀 runs in Weights & Biases\n", + "\u001b[34m\u001b[1mComet: \u001b[0mrun 'pip install comet' to automatically track and visualize YOLOv5 🚀 runs with Comet\n", "\u001b[34m\u001b[1mClearML: \u001b[0mrun 'pip install clearml' to automatically track, visualize and remotely train YOLOv5 🚀 in ClearML\n", "\u001b[34m\u001b[1mTensorBoard: \u001b[0mStart with 'tensorboard --logdir runs/train', view at http://localhost:6006/\n", "\n", @@ -905,22 +902,6 @@ "id": "Lay2WsTjNJzP" } }, - { - "cell_type": "markdown", - "metadata": { - "id": "DLI1JmHU7B0l" - }, - "source": [ - "## Weights & Biases Logging\n", - "\n", - "[Weights & Biases](https://wandb.ai/site?utm_campaign=repo_yolo_notebook) (W&B) is integrated with YOLOv5 for real-time visualization and cloud logging of training runs. This allows for better run comparison and introspection, as well improved visibility and collaboration for teams. To enable W&B `pip install wandb`, and then train normally (you will be guided through setup on first use). \n", - "\n", - "During training you will see live updates at [https://wandb.ai/home](https://wandb.ai/home?utm_campaign=repo_yolo_notebook), and you can create and share detailed [Reports](https://wandb.ai/glenn-jocher/yolov5_tutorial/reports/YOLOv5-COCO128-Tutorial-Results--VmlldzozMDI5OTY) of your results. For more information see the [YOLOv5 Weights & Biases Tutorial](https://github.com/ultralytics/yolov5/issues/1289). \n", - "\n", - "\n", - "\"Weights" - ] - }, { "cell_type": "markdown", "metadata": { diff --git a/utils/docker/Dockerfile b/utils/docker/Dockerfile index 9b93fad7b203..be5c2fb71517 100644 --- a/utils/docker/Dockerfile +++ b/utils/docker/Dockerfile @@ -16,7 +16,7 @@ RUN apt update && apt install --no-install-recommends -y zip htop screen libgl1- COPY requirements.txt . RUN python -m pip install --upgrade pip wheel RUN pip uninstall -y Pillow torchtext torch torchvision -RUN pip install --no-cache -r requirements.txt albumentations wandb gsutil notebook Pillow>=9.1.0 \ +RUN pip install --no-cache -r requirements.txt albumentations comet clearml gsutil notebook Pillow>=9.1.0 \ 'opencv-python<4.6.0.66' \ --extra-index-url https://download.pytorch.org/whl/cu113 diff --git a/utils/loggers/__init__.py b/utils/loggers/__init__.py index 941d09e19e2d..bc8dd7621579 100644 --- a/utils/loggers/__init__.py +++ b/utils/loggers/__init__.py @@ -84,10 +84,10 @@ def __init__(self, save_dir=None, weights=None, opt=None, hyp=None, logger=None, self.csv = True # always log to csv # Messages - if not wandb: - prefix = colorstr('Weights & Biases: ') - s = f"{prefix}run 'pip install wandb' to automatically track and visualize YOLOv5 🚀 runs in Weights & Biases" - self.logger.info(s) + # if not wandb: + # prefix = colorstr('Weights & Biases: ') + # s = f"{prefix}run 'pip install wandb' to automatically track and visualize YOLOv5 🚀 runs in Weights & Biases" + # self.logger.info(s) if not clearml: prefix = colorstr('ClearML: ') s = f"{prefix}run 'pip install clearml' to automatically track, visualize and remotely train YOLOv5 🚀 in ClearML" @@ -110,9 +110,9 @@ def __init__(self, save_dir=None, weights=None, opt=None, hyp=None, logger=None, self.opt.hyp = self.hyp # add hyperparameters self.wandb = WandbLogger(self.opt, run_id) # temp warn. because nested artifacts not supported after 0.12.10 - if pkg.parse_version(wandb.__version__) >= pkg.parse_version('0.12.11'): - s = "YOLOv5 temporarily requires wandb version 0.12.10 or below. Some features may not work as expected." - self.logger.warning(s) + # if pkg.parse_version(wandb.__version__) >= pkg.parse_version('0.12.11'): + # s = "YOLOv5 temporarily requires wandb version 0.12.10 or below. Some features may not work as expected." + # self.logger.warning(s) else: self.wandb = None diff --git a/utils/loggers/wandb/wandb_utils.py b/utils/loggers/wandb/wandb_utils.py index d2dd0fa7c6cd..238f4edbf2a0 100644 --- a/utils/loggers/wandb/wandb_utils.py +++ b/utils/loggers/wandb/wandb_utils.py @@ -135,7 +135,7 @@ def __init__(self, opt, run_id=None, job_type='Training'): # Temporary-fix if opt.upload_dataset: opt.upload_dataset = False - LOGGER.info("Uploading Dataset functionality is not being supported temporarily due to a bug.") + # LOGGER.info("Uploading Dataset functionality is not being supported temporarily due to a bug.") # Pre-training routine -- self.job_type = job_type From 85ae985b6a232f3a3e2f7400243cec2ca0b5f8d1 Mon Sep 17 00:00:00 2001 From: Vladislav Veklenko <71467601+vladoossss@users.noreply.github.com> Date: Thu, 13 Oct 2022 01:44:01 +0200 Subject: [PATCH 1501/1976] update mask2segments and saving results (#9785) * update mask2segments and saving results * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update general.py Signed-off-by: Glenn Jocher * Update predict.py Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Glenn Jocher --- utils/segment/general.py | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/utils/segment/general.py b/utils/segment/general.py index 43bdc460f928..b526333dc5a1 100644 --- a/utils/segment/general.py +++ b/utils/segment/general.py @@ -126,9 +126,12 @@ def masks2segments(masks, strategy='largest'): segments = [] for x in masks.int().cpu().numpy().astype('uint8'): c = cv2.findContours(x, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[0] - if strategy == 'concat': # concatenate all segments - c = np.concatenate([x.reshape(-1, 2) for x in c]) - elif strategy == 'largest': # select largest segment - c = np.array(c[np.array([len(x) for x in c]).argmax()]).reshape(-1, 2) + if c: + if strategy == 'concat': # concatenate all segments + c = np.concatenate([x.reshape(-1, 2) for x in c]) + elif strategy == 'largest': # select largest segment + c = np.array(c[np.array([len(x) for x in c]).argmax()]).reshape(-1, 2) + else: + c = np.zeros((0, 2)) # no segments found segments.append(c.astype('float32')) return segments From 16f87bb38e76a5aa14ee93252042063b678ece86 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 13 Oct 2022 02:32:06 +0200 Subject: [PATCH 1502/1976] HUB VOC fix (#9792) Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- utils/general.py | 1 + 1 file changed, 1 insertion(+) diff --git a/utils/general.py b/utils/general.py index e2faca9dbf2a..d9d54d9e4f71 100644 --- a/utils/general.py +++ b/utils/general.py @@ -477,6 +477,7 @@ def check_dataset(data, autodownload=True): path = Path(extract_dir or data.get('path') or '') # optional 'path' default to '.' if not path.is_absolute(): path = (ROOT / path).resolve() + data['path'] = path # download scripts for k in 'train', 'val', 'test': if data.get(k): # prepend path if isinstance(data[k], str): From 15b75659ddc2552bd9239db8a3c940322da49b80 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 13 Oct 2022 15:27:16 +0200 Subject: [PATCH 1503/1976] Update hubconf.py local repo Usage example (#9803) * Update hubconf.py Signed-off-by: Glenn Jocher * Update hubconf.py Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- hubconf.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/hubconf.py b/hubconf.py index 95b95a5c30cc..2c6ec13f815c 100644 --- a/hubconf.py +++ b/hubconf.py @@ -4,8 +4,10 @@ Usage: import torch - model = torch.hub.load('ultralytics/yolov5', 'yolov5s') - model = torch.hub.load('ultralytics/yolov5:master', 'custom', 'path/to/yolov5s.onnx') # custom model from branch + model = torch.hub.load('ultralytics/yolov5', 'yolov5s') # official model + model = torch.hub.load('ultralytics/yolov5:master', 'yolov5s') # from branch + model = torch.hub.load('ultralytics/yolov5', 'custom', 'yolov5s.pt') # custom/local model + model = torch.hub.load('.', 'custom', 'yolov5s.pt', source='local') # local repo """ import torch From 2a19d070d8a92bbf44dca8a40c503ec7406228d9 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 14 Oct 2022 12:28:52 +0200 Subject: [PATCH 1504/1976] Fix xView dataloaders import (#9807) Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- data/xView.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/data/xView.yaml b/data/xView.yaml index b134ceac8164..770ab7870449 100644 --- a/data/xView.yaml +++ b/data/xView.yaml @@ -87,7 +87,7 @@ download: | from PIL import Image from tqdm import tqdm - from utils.datasets import autosplit + from utils.dataloaders import autosplit from utils.general import download, xyxy2xywhn From df80e7c723b5722fe5b8d935ace73b8b28572ed4 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 14 Oct 2022 18:18:58 +0200 Subject: [PATCH 1505/1976] Argoverse HUB fix (#9809) Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- data/Argoverse.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/data/Argoverse.yaml b/data/Argoverse.yaml index e3e9ba161ed0..558151dc849e 100644 --- a/data/Argoverse.yaml +++ b/data/Argoverse.yaml @@ -63,7 +63,7 @@ download: | # Download - dir = Path('../datasets/Argoverse') # dataset root dir + dir = Path(yaml['path']) # dataset root dir urls = ['https://argoverse-hd.s3.us-east-2.amazonaws.com/Argoverse-HD-Full.zip'] download(urls, dir=dir, delete=False) From e42c89d4efc99bfbd8c5c208ffe67c11632da84a Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 16 Oct 2022 20:51:32 +0200 Subject: [PATCH 1506/1976] `smart_optimizer()` revert to weight with decay (#9817) If a parameter does not fall into any other category Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- utils/torch_utils.py | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/utils/torch_utils.py b/utils/torch_utils.py index 9f257d06ac60..04a3873854ee 100644 --- a/utils/torch_utils.py +++ b/utils/torch_utils.py @@ -319,12 +319,13 @@ def smart_optimizer(model, name='Adam', lr=0.001, momentum=0.9, decay=1e-5): g = [], [], [] # optimizer parameter groups bn = tuple(v for k, v in nn.__dict__.items() if 'Norm' in k) # normalization layers, i.e. BatchNorm2d() for v in model.modules(): - if hasattr(v, 'bias') and isinstance(v.bias, nn.Parameter): # bias (no decay) - g[2].append(v.bias) - if isinstance(v, bn): # weight (no decay) - g[1].append(v.weight) - elif hasattr(v, 'weight') and isinstance(v.weight, nn.Parameter): # weight (with decay) - g[0].append(v.weight) + for p_name, p in v.named_parameters(recurse=0): + if p_name == 'bias': # bias (no decay) + g[2].append(p) + elif p_name == 'weight' and isinstance(v, bn): # weight (no decay) + g[1].append(p) + else: + g[0].append(p) # weight (with decay) if name == 'Adam': optimizer = torch.optim.Adam(g[2], lr=lr, betas=(momentum, 0.999)) # adjust beta1 to momentum From e3ff7806769444de864060494d1be8e18ce046a1 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 17 Oct 2022 14:34:33 +0200 Subject: [PATCH 1507/1976] Allow PyTorch Hub results to display in notebooks (#9825) * Allow PyTorch Hub results to display in notebooks * fix CI * fix CI * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix CI * fix CI * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix CI * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix CI * fix CI * fix CI * fix CI * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix CI * fix CI * fix CI Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- classify/predict.py | 2 +- detect.py | 2 +- models/common.py | 13 +++++++++---- segment/predict.py | 2 +- utils/__init__.py | 2 +- utils/autoanchor.py | 2 +- utils/general.py | 17 +++++++++++++---- utils/metrics.py | 2 +- 8 files changed, 28 insertions(+), 14 deletions(-) diff --git a/classify/predict.py b/classify/predict.py index 9114aab1d703..9373649bf27d 100644 --- a/classify/predict.py +++ b/classify/predict.py @@ -91,7 +91,7 @@ def run( # Dataloader bs = 1 # batch_size if webcam: - view_img = check_imshow() + view_img = check_imshow(warn=True) dataset = LoadStreams(source, img_size=imgsz, transforms=classify_transforms(imgsz[0]), vid_stride=vid_stride) bs = len(dataset) elif screenshot: diff --git a/detect.py b/detect.py index 8f48d8d28000..98af7235ea69 100644 --- a/detect.py +++ b/detect.py @@ -99,7 +99,7 @@ def run( # Dataloader bs = 1 # batch_size if webcam: - view_img = check_imshow() + view_img = check_imshow(warn=True) dataset = LoadStreams(source, img_size=imgsz, stride=stride, auto=pt, vid_stride=vid_stride) bs = len(dataset) elif screenshot: diff --git a/models/common.py b/models/common.py index d889d0292c61..e6da429de3e5 100644 --- a/models/common.py +++ b/models/common.py @@ -18,16 +18,20 @@ import requests import torch import torch.nn as nn +from IPython.display import display from PIL import Image from torch.cuda import amp +from utils import TryExcept from utils.dataloaders import exif_transpose, letterbox -from utils.general import (LOGGER, ROOT, Profile, check_requirements, check_suffix, check_version, colorstr, - increment_path, make_divisible, non_max_suppression, scale_boxes, xywh2xyxy, xyxy2xywh, - yaml_load) +from utils.general import (LOGGER, ROOT, Profile, check_imshow, check_requirements, check_suffix, check_version, + colorstr, increment_path, make_divisible, non_max_suppression, scale_boxes, xywh2xyxy, + xyxy2xywh, yaml_load) from utils.plots import Annotator, colors, save_one_box from utils.torch_utils import copy_attr, smart_inference_mode +CHECK_IMSHOW = check_imshow() + def autopad(k, p=None, d=1): # kernel, padding, dilation # Pad to 'same' shape outputs @@ -756,7 +760,7 @@ def _run(self, pprint=False, show=False, save=False, crop=False, render=False, l im = Image.fromarray(im.astype(np.uint8)) if isinstance(im, np.ndarray) else im # from np if show: - im.show(self.files[i]) # show + im.show(self.files[i]) if CHECK_IMSHOW else display(im) if save: f = self.files[i] im.save(save_dir / f) # save @@ -772,6 +776,7 @@ def _run(self, pprint=False, show=False, save=False, crop=False, render=False, l LOGGER.info(f'Saved results to {save_dir}\n') return crops + @TryExcept('Showing images is not supported in this environment') def show(self, labels=True): self._run(show=True, labels=labels) # show results diff --git a/segment/predict.py b/segment/predict.py index 94117cd78633..44d6d3904c19 100644 --- a/segment/predict.py +++ b/segment/predict.py @@ -102,7 +102,7 @@ def run( # Dataloader bs = 1 # batch_size if webcam: - view_img = check_imshow() + view_img = check_imshow(warn=True) dataset = LoadStreams(source, img_size=imgsz, stride=stride, auto=pt, vid_stride=vid_stride) bs = len(dataset) elif screenshot: diff --git a/utils/__init__.py b/utils/__init__.py index 8403a6149827..0afe6f475625 100644 --- a/utils/__init__.py +++ b/utils/__init__.py @@ -23,7 +23,7 @@ def __enter__(self): def __exit__(self, exc_type, value, traceback): if value: - print(emojis(f'{self.msg}{value}')) + print(emojis(f"{self.msg}{': ' if self.msg else ''}{value}")) return True diff --git a/utils/autoanchor.py b/utils/autoanchor.py index 7e7e9985d68a..cfc4c276e3aa 100644 --- a/utils/autoanchor.py +++ b/utils/autoanchor.py @@ -26,7 +26,7 @@ def check_anchor_order(m): m.anchors[:] = m.anchors.flip(0) -@TryExcept(f'{PREFIX}ERROR: ') +@TryExcept(f'{PREFIX}ERROR') def check_anchors(dataset, model, thr=4.0, imgsz=640): # Check anchor fit to data, recompute if necessary m = model.module.model[-1] if hasattr(model, 'module') else model.model[-1] # Detect() diff --git a/utils/general.py b/utils/general.py index d9d54d9e4f71..76bc0b1d7a79 100644 --- a/utils/general.py +++ b/utils/general.py @@ -27,6 +27,7 @@ from zipfile import ZipFile import cv2 +import IPython import numpy as np import pandas as pd import pkg_resources as pkg @@ -73,6 +74,12 @@ def is_colab(): return 'COLAB_GPU' in os.environ +def is_notebook(): + # Is environment a Jupyter notebook? Verified on Colab, Jupyterlab, Kaggle, Paperspace + ipython_type = str(type(IPython.get_ipython())) + return 'colab' in ipython_type or 'zmqshell' in ipython_type + + def is_kaggle(): # Is environment a Kaggle Notebook? return os.environ.get('PWD') == '/kaggle/working' and os.environ.get('KAGGLE_URL_BASE') == 'https://www.kaggle.com' @@ -383,18 +390,20 @@ def check_img_size(imgsz, s=32, floor=0): return new_size -def check_imshow(): +def check_imshow(warn=False): # Check if environment supports image displays try: - assert not is_docker(), 'cv2.imshow() is disabled in Docker environments' - assert not is_colab(), 'cv2.imshow() is disabled in Google Colab environments' + assert not is_notebook() + assert not is_docker() + assert 'NoneType' not in str(type(IPython.get_ipython())) # SSH terminals, GitHub CI cv2.imshow('test', np.zeros((1, 1, 3))) cv2.waitKey(1) cv2.destroyAllWindows() cv2.waitKey(1) return True except Exception as e: - LOGGER.warning(f'WARNING ⚠️ Environment does not support cv2.imshow() or PIL Image.show() image displays\n{e}') + if warn: + LOGGER.warning(f'WARNING ⚠️ Environment does not support cv2.imshow() or PIL Image.show()\n{e}') return False diff --git a/utils/metrics.py b/utils/metrics.py index ed611d7d38fa..f0bc787e1518 100644 --- a/utils/metrics.py +++ b/utils/metrics.py @@ -186,7 +186,7 @@ def tp_fp(self): # fn = self.matrix.sum(0) - tp # false negatives (missed detections) return tp[:-1], fp[:-1] # remove background class - @TryExcept('WARNING ⚠️ ConfusionMatrix plot failure: ') + @TryExcept('WARNING ⚠️ ConfusionMatrix plot failure') def plot(self, normalize=True, save_dir='', names=()): import seaborn as sn From acff977af3a6e23e9c25e932208efed73f9b7810 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 17 Oct 2022 15:30:42 +0200 Subject: [PATCH 1508/1976] Logger Cleanup (#9828) --- segment/train.py | 12 ------------ train.py | 4 +--- utils/general.py | 2 +- 3 files changed, 2 insertions(+), 16 deletions(-) diff --git a/segment/train.py b/segment/train.py index 26f0d0c13c78..5a5f15f10d84 100644 --- a/segment/train.py +++ b/segment/train.py @@ -91,17 +91,6 @@ def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictio data_dict = None if RANK in {-1, 0}: logger = GenericLogger(opt=opt, console_logger=LOGGER) - # loggers = Loggers(save_dir, weights, opt, hyp, LOGGER) # loggers instance - # if loggers.clearml: - # data_dict = loggers.clearml.data_dict # None if no ClearML dataset or filled in by ClearML - # if loggers.wandb: - # data_dict = loggers.wandb.data_dict - # if resume: - # weights, epochs, hyp, batch_size = opt.weights, opt.epochs, opt.hyp, opt.batch_size - # - # # Register actions - # for k in methods(loggers): - # callbacks.register_action(k, callback=getattr(loggers, k)) # Config plots = not evolve and not opt.noplots # create plots @@ -400,7 +389,6 @@ def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictio 'ema': deepcopy(ema.ema).half(), 'updates': ema.updates, 'optimizer': optimizer.state_dict(), - # 'wandb_id': loggers.wandb.wandb_run.id if loggers.wandb else None, 'opt': vars(opt), 'date': datetime.now().isoformat()} diff --git a/train.py b/train.py index 177e081c8c37..c24a8e81531d 100644 --- a/train.py +++ b/train.py @@ -53,7 +53,6 @@ one_cycle, print_args, print_mutation, strip_optimizer, yaml_save) from utils.loggers import Loggers from utils.loggers.comet.comet_utils import check_comet_resume -from utils.loggers.wandb.wandb_utils import check_wandb_resume from utils.loss import ComputeLoss from utils.metrics import fitness from utils.plots import plot_evolve @@ -375,7 +374,6 @@ def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictio 'ema': deepcopy(ema.ema).half(), 'updates': ema.updates, 'optimizer': optimizer.state_dict(), - 'wandb_id': loggers.wandb.wandb_run.id if loggers.wandb else None, 'opt': vars(opt), 'date': datetime.now().isoformat()} @@ -483,7 +481,7 @@ def main(opt, callbacks=Callbacks()): check_requirements() # Resume (from specified or most recent last.pt) - if opt.resume and not check_wandb_resume(opt) and not check_comet_resume(opt) and not opt.evolve: + if opt.resume and not check_comet_resume(opt) and not opt.evolve: last = Path(check_file(opt.resume) if isinstance(opt.resume, str) else get_latest_run()) opt_yaml = last.parent.parent / 'opt.yaml' # train options yaml opt_data = opt.data # original dataset diff --git a/utils/general.py b/utils/general.py index 76bc0b1d7a79..8ea0ad07ed13 100644 --- a/utils/general.py +++ b/utils/general.py @@ -956,7 +956,7 @@ def strip_optimizer(f='best.pt', s=''): # from utils.general import *; strip_op x = torch.load(f, map_location=torch.device('cpu')) if x.get('ema'): x['model'] = x['ema'] # replace model with ema - for k in 'optimizer', 'best_fitness', 'wandb_id', 'ema', 'updates': # keys + for k in 'optimizer', 'best_fitness', 'ema', 'updates': # keys x[k] = None x['epoch'] = -1 x['model'].half() # to FP16 From f1482b0667a7cb116fde43132c1e140a9f3cee20 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 18 Oct 2022 13:54:33 +0200 Subject: [PATCH 1509/1976] Remove ipython from `check_requirements` exclude list (#9841) May resolve https://github.com/ultralytics/yolov5/commit/e3ff7806769444de864060494d1be8e18ce046a1#commitcomment-87136818 Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- hubconf.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hubconf.py b/hubconf.py index 2c6ec13f815c..41af8e39d14d 100644 --- a/hubconf.py +++ b/hubconf.py @@ -39,7 +39,7 @@ def _create(name, pretrained=True, channels=3, classes=80, autoshape=True, verbo if not verbose: LOGGER.setLevel(logging.WARNING) - check_requirements(exclude=('ipython', 'opencv-python', 'tensorboard', 'thop')) + check_requirements(exclude=('opencv-python', 'tensorboard', 'thop')) name = Path(name) path = name.with_suffix('.pt') if name.suffix == '' and not name.is_dir() else name # checkpoint path try: From 010cd0db7d491484caae3c31754b2cf13156baa7 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 18 Oct 2022 15:25:21 +0200 Subject: [PATCH 1510/1976] Update HUBDatasetStats() usage examples (#9842) Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- utils/dataloaders.py | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/utils/dataloaders.py b/utils/dataloaders.py index d849d5150f4b..5074d25ee268 100644 --- a/utils/dataloaders.py +++ b/utils/dataloaders.py @@ -1005,13 +1005,18 @@ def verify_image_label(args): class HUBDatasetStats(): - """ Return dataset statistics dictionary with images and instances counts per split per class - To run in parent directory: export PYTHONPATH="$PWD/yolov5" - Usage1: from utils.dataloaders import *; HUBDatasetStats('coco128.yaml', autodownload=True) - Usage2: from utils.dataloaders import *; HUBDatasetStats('path/to/coco128_with_yaml.zip') + """ Class for generating HUB dataset JSON and `-hub` dataset directory + Arguments path: Path to data.yaml or data.zip (with data.yaml inside data.zip) autodownload: Attempt to download dataset if not found locally + + Usage + from utils.dataloaders import HUBDatasetStats + stats = HUBDatasetStats('coco128.yaml', autodownload=True) # usage 1 + stats = HUBDatasetStats('path/to/coco128.zip') # usage 2 + stats.get_json(save=False) + stats.process_images() """ def __init__(self, path='coco128.yaml', autodownload=False): From d0df6c840372b77a7c075f2231914f53112e79eb Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 18 Oct 2022 16:35:04 +0200 Subject: [PATCH 1511/1976] Update ZipFile to context manager (#9843) * Update zipFile to context manager * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Cleanup * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Cleanup Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- utils/dataloaders.py | 6 ++-- utils/downloads.py | 81 -------------------------------------------- utils/general.py | 14 ++++++-- 3 files changed, 15 insertions(+), 86 deletions(-) diff --git a/utils/dataloaders.py b/utils/dataloaders.py index 5074d25ee268..37b3ffb2728b 100644 --- a/utils/dataloaders.py +++ b/utils/dataloaders.py @@ -17,7 +17,6 @@ from pathlib import Path from threading import Thread from urllib.parse import urlparse -from zipfile import ZipFile import numpy as np import torch @@ -31,7 +30,8 @@ from utils.augmentations import (Albumentations, augment_hsv, classify_albumentations, classify_transforms, copy_paste, cutout, letterbox, mixup, random_perspective) from utils.general import (DATASETS_DIR, LOGGER, NUM_THREADS, check_dataset, check_requirements, check_yaml, clean_str, - cv2, is_colab, is_kaggle, segments2boxes, xyn2xy, xywh2xyxy, xywhn2xyxy, xyxy2xywhn) + cv2, is_colab, is_kaggle, segments2boxes, unzip_file, xyn2xy, xywh2xyxy, xywhn2xyxy, + xyxy2xywhn) from utils.torch_utils import torch_distributed_zero_first # Parameters @@ -1053,7 +1053,7 @@ def _unzip(self, path): if not str(path).endswith('.zip'): # path is data.yaml return False, None, path assert Path(path).is_file(), f'Error unzipping {path}, file not found' - ZipFile(path).extractall(path=path.parent) # unzip + unzip_file(path, path=path.parent) dir = path.with_suffix('') # dataset directory == zip name assert dir.is_dir(), f'Error unzipping {path}, {dir} not found. path/to/abc.zip MUST unzip to path/to/abc/' return True, str(dir), self._find_yaml(dir) # zipped, data_dir, yaml_path diff --git a/utils/downloads.py b/utils/downloads.py index 60417c1f8835..21bb6608d5ba 100644 --- a/utils/downloads.py +++ b/utils/downloads.py @@ -5,12 +5,9 @@ import logging import os -import platform import subprocess -import time import urllib from pathlib import Path -from zipfile import ZipFile import requests import torch @@ -109,81 +106,3 @@ def github_assets(repository, version='latest'): error_msg=f'{file} missing, try downloading from https://github.com/{repo}/releases/{tag} or {url3}') return str(file) - - -def gdrive_download(id='16TiPfZj7htmTyhntwcZyEEAejOUxuT6m', file='tmp.zip'): - # Downloads a file from Google Drive. from yolov5.utils.downloads import *; gdrive_download() - t = time.time() - file = Path(file) - cookie = Path('cookie') # gdrive cookie - print(f'Downloading https://drive.google.com/uc?export=download&id={id} as {file}... ', end='') - if file.exists(): - file.unlink() # remove existing file - if cookie.exists(): - cookie.unlink() # remove existing cookie - - # Attempt file download - out = "NUL" if platform.system() == "Windows" else "/dev/null" - os.system(f'curl -c ./cookie -s -L "drive.google.com/uc?export=download&id={id}" > {out}') - if os.path.exists('cookie'): # large file - s = f'curl -Lb ./cookie "drive.google.com/uc?export=download&confirm={get_token()}&id={id}" -o {file}' - else: # small file - s = f'curl -s -L -o {file} "drive.google.com/uc?export=download&id={id}"' - r = os.system(s) # execute, capture return - if cookie.exists(): - cookie.unlink() # remove existing cookie - - # Error check - if r != 0: - if file.exists(): - file.unlink() # remove partial - print('Download error ') # raise Exception('Download error') - return r - - # Unzip if archive - if file.suffix == '.zip': - print('unzipping... ', end='') - ZipFile(file).extractall(path=file.parent) # unzip - file.unlink() # remove zip - - print(f'Done ({time.time() - t:.1f}s)') - return r - - -def get_token(cookie="./cookie"): - with open(cookie) as f: - for line in f: - if "download" in line: - return line.split()[-1] - return "" - - -# Google utils: https://cloud.google.com/storage/docs/reference/libraries ---------------------------------------------- -# -# -# def upload_blob(bucket_name, source_file_name, destination_blob_name): -# # Uploads a file to a bucket -# # https://cloud.google.com/storage/docs/uploading-objects#storage-upload-object-python -# -# storage_client = storage.Client() -# bucket = storage_client.get_bucket(bucket_name) -# blob = bucket.blob(destination_blob_name) -# -# blob.upload_from_filename(source_file_name) -# -# print('File {} uploaded to {}.'.format( -# source_file_name, -# destination_blob_name)) -# -# -# def download_blob(bucket_name, source_blob_name, destination_file_name): -# # Uploads a blob from a bucket -# storage_client = storage.Client() -# bucket = storage_client.get_bucket(bucket_name) -# blob = bucket.blob(source_blob_name) -# -# blob.download_to_filename(destination_file_name) -# -# print('Blob {} downloaded to {}.'.format( -# source_blob_name, -# destination_file_name)) diff --git a/utils/general.py b/utils/general.py index 8ea0ad07ed13..fb8484ce434e 100644 --- a/utils/general.py +++ b/utils/general.py @@ -511,7 +511,7 @@ def check_dataset(data, autodownload=True): LOGGER.info(f'Downloading {s} to {f}...') torch.hub.download_url_to_file(s, f) Path(DATASETS_DIR).mkdir(parents=True, exist_ok=True) # create root - ZipFile(f).extractall(path=DATASETS_DIR) # unzip + unzip_file(f, path=DATASETS_DIR) # unzip Path(f).unlink() # remove zip r = None # success elif s.startswith('bash '): # bash script @@ -566,6 +566,16 @@ def yaml_save(file='data.yaml', data={}): yaml.safe_dump({k: str(v) if isinstance(v, Path) else v for k, v in data.items()}, f, sort_keys=False) +def unzip_file(file, path=None, exclude=('.DS_Store', '__MACOSX')): + # Unzip a *.zip file to path/, excluding files containing strings in exclude list + if path is None: + path = Path(file).parent # default path + with ZipFile(file) as zipObj: + for f in zipObj.namelist(): # list all archived filenames in the zip + if all(x not in f for x in exclude): + zipObj.extract(f, path=path) + + def url2file(url): # Convert URL to filename, i.e. https://url.com/file.txt?auth -> file.txt url = str(Path(url)).replace(':/', '://') # Pathlib turns :// -> :/ @@ -601,7 +611,7 @@ def download_one(url, dir): if unzip and success and f.suffix in ('.zip', '.tar', '.gz'): LOGGER.info(f'Unzipping {f}...') if f.suffix == '.zip': - ZipFile(f).extractall(path=dir) # unzip + unzip_file(f, dir) # unzip elif f.suffix == '.tar': os.system(f'tar xf {f} --directory {f.parent}') # unzip elif f.suffix == '.gz': From c4710012d83ec46f1759b38555c989e3c23ea727 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 18 Oct 2022 21:22:37 +0200 Subject: [PATCH 1512/1976] Update README.md (#9846) @pderrenger Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- README.md | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/README.md b/README.md index 8f45ccd229b5..52f2854dd601 100644 --- a/README.md +++ b/README.md @@ -168,22 +168,22 @@ python train.py --data coco.yaml --cfg yolov5n.yaml --weights '' --batch-size 12
+ + + + + + - - - - - -
-|Comet ⭐ NEW|Deci ⭐ NEW|ClearML ⭐ NEW|Roboflow| +|Roboflow|ClearML ⭐ NEW|Comet ⭐ NEW|Deci ⭐ NEW| |:-:|:-:|:-:|:-:| -|Visualize model metrics and predictions and upload models and datasets in realtime with [Comet](https://bit.ly/yolov5-readme-comet)|Automatically compile and quantize YOLOv5 for better inference performance in one click at [Deci](https://bit.ly/yolov5-deci-platform)|Automatically track, visualize and even remotely train YOLOv5 using [ClearML](https://cutt.ly/yolov5-readme-clearml) (open-source!)|Label and export your custom datasets directly to YOLOv5 for training with [Roboflow](https://roboflow.com/?ref=ultralytics)| +|Label and export your custom datasets directly to YOLOv5 for training with [Roboflow](https://roboflow.com/?ref=ultralytics)|Automatically track, visualize and even remotely train YOLOv5 using [ClearML](https://cutt.ly/yolov5-readme-clearml) (open-source!)|Visualize model metrics and predictions and upload models and datasets in realtime with [Comet](https://bit.ly/yolov5-readme-comet)|Automatically compile and quantize YOLOv5 for better inference performance in one click at [Deci](https://bit.ly/yolov5-deci-platform)| ##
Why YOLOv5
From 6371de8879e7ad7ec5283e8b95cc6dd85d6a5e72 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 18 Oct 2022 22:26:53 +0200 Subject: [PATCH 1513/1976] Webcam show fix (#9847) * Webcam show fix Signed-off-by: Glenn Jocher * Update common.py Signed-off-by: Glenn Jocher * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Signed-off-by: Glenn Jocher Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- models/common.py | 8 +++----- utils/general.py | 1 - 2 files changed, 3 insertions(+), 6 deletions(-) diff --git a/models/common.py b/models/common.py index e6da429de3e5..ba18cbce7429 100644 --- a/models/common.py +++ b/models/common.py @@ -24,14 +24,12 @@ from utils import TryExcept from utils.dataloaders import exif_transpose, letterbox -from utils.general import (LOGGER, ROOT, Profile, check_imshow, check_requirements, check_suffix, check_version, - colorstr, increment_path, make_divisible, non_max_suppression, scale_boxes, xywh2xyxy, +from utils.general import (LOGGER, ROOT, Profile, check_requirements, check_suffix, check_version, colorstr, + increment_path, is_notebook, make_divisible, non_max_suppression, scale_boxes, xywh2xyxy, xyxy2xywh, yaml_load) from utils.plots import Annotator, colors, save_one_box from utils.torch_utils import copy_attr, smart_inference_mode -CHECK_IMSHOW = check_imshow() - def autopad(k, p=None, d=1): # kernel, padding, dilation # Pad to 'same' shape outputs @@ -760,7 +758,7 @@ def _run(self, pprint=False, show=False, save=False, crop=False, render=False, l im = Image.fromarray(im.astype(np.uint8)) if isinstance(im, np.ndarray) else im # from np if show: - im.show(self.files[i]) if CHECK_IMSHOW else display(im) + display(im) if is_notebook() else im.show(self.files[i]) if save: f = self.files[i] im.save(save_dir / f) # save diff --git a/utils/general.py b/utils/general.py index fb8484ce434e..e1823b50ac56 100644 --- a/utils/general.py +++ b/utils/general.py @@ -395,7 +395,6 @@ def check_imshow(warn=False): try: assert not is_notebook() assert not is_docker() - assert 'NoneType' not in str(type(IPython.get_ipython())) # SSH terminals, GitHub CI cv2.imshow('test', np.zeros((1, 1, 3))) cv2.waitKey(1) cv2.destroyAllWindows() From 3b1a9d22a45f1e16e21c8e8ebec9ccd17068cd08 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 20 Oct 2022 19:54:07 +0200 Subject: [PATCH 1514/1976] Fix OpenVINO Usage example (#9874) * Fix OpenVINO Usage example * Fix OpenVINO Usage example --- classify/predict.py | 2 +- classify/val.py | 2 +- detect.py | 2 +- export.py | 2 +- models/common.py | 4 ++-- segment/predict.py | 2 +- segment/val.py | 4 ++-- val.py | 2 +- 8 files changed, 10 insertions(+), 10 deletions(-) diff --git a/classify/predict.py b/classify/predict.py index 9373649bf27d..96508d633da8 100644 --- a/classify/predict.py +++ b/classify/predict.py @@ -15,7 +15,7 @@ $ python classify/predict.py --weights yolov5s-cls.pt # PyTorch yolov5s-cls.torchscript # TorchScript yolov5s-cls.onnx # ONNX Runtime or OpenCV DNN with --dnn - yolov5s-cls.xml # OpenVINO + yolov5s-cls_openvino_model # OpenVINO yolov5s-cls.engine # TensorRT yolov5s-cls.mlmodel # CoreML (macOS-only) yolov5s-cls_saved_model # TensorFlow SavedModel diff --git a/classify/val.py b/classify/val.py index 3c16ec8092d8..c0b507785fb0 100644 --- a/classify/val.py +++ b/classify/val.py @@ -10,7 +10,7 @@ $ python classify/val.py --weights yolov5s-cls.pt # PyTorch yolov5s-cls.torchscript # TorchScript yolov5s-cls.onnx # ONNX Runtime or OpenCV DNN with --dnn - yolov5s-cls.xml # OpenVINO + yolov5s-cls_openvino_model # OpenVINO yolov5s-cls.engine # TensorRT yolov5s-cls.mlmodel # CoreML (macOS-only) yolov5s-cls_saved_model # TensorFlow SavedModel diff --git a/detect.py b/detect.py index 98af7235ea69..8e42fbe159d0 100644 --- a/detect.py +++ b/detect.py @@ -15,7 +15,7 @@ $ python detect.py --weights yolov5s.pt # PyTorch yolov5s.torchscript # TorchScript yolov5s.onnx # ONNX Runtime or OpenCV DNN with --dnn - yolov5s.xml # OpenVINO + yolov5s_openvino_model # OpenVINO yolov5s.engine # TensorRT yolov5s.mlmodel # CoreML (macOS-only) yolov5s_saved_model # TensorFlow SavedModel diff --git a/export.py b/export.py index 66d4d636133a..93845a0c14fa 100644 --- a/export.py +++ b/export.py @@ -28,7 +28,7 @@ $ python detect.py --weights yolov5s.pt # PyTorch yolov5s.torchscript # TorchScript yolov5s.onnx # ONNX Runtime or OpenCV DNN with --dnn - yolov5s.xml # OpenVINO + yolov5s_openvino_model # OpenVINO yolov5s.engine # TensorRT yolov5s.mlmodel # CoreML (macOS-only) yolov5s_saved_model # TensorFlow SavedModel diff --git a/models/common.py b/models/common.py index ba18cbce7429..af8132fffb7a 100644 --- a/models/common.py +++ b/models/common.py @@ -318,7 +318,7 @@ def __init__(self, weights='yolov5s.pt', device=torch.device('cpu'), dnn=False, # TorchScript: *.torchscript # ONNX Runtime: *.onnx # ONNX OpenCV DNN: *.onnx --dnn - # OpenVINO: *.xml + # OpenVINO: *_openvino_model # CoreML: *.mlmodel # TensorRT: *.engine # TensorFlow SavedModel: *_saved_model @@ -469,7 +469,7 @@ def gd_outputs(gd): check_requirements('paddlepaddle-gpu' if cuda else 'paddlepaddle') import paddle.inference as pdi if not Path(w).is_file(): # if not *.pdmodel - w = next(Path(w).rglob('*.pdmodel')) # get *.xml file from *_openvino_model dir + w = next(Path(w).rglob('*.pdmodel')) # get *.pdmodel file from *_paddle_model dir weights = Path(w).with_suffix('.pdiparams') config = pdi.Config(str(w), str(weights)) if cuda: diff --git a/segment/predict.py b/segment/predict.py index 44d6d3904c19..3ae68240726a 100644 --- a/segment/predict.py +++ b/segment/predict.py @@ -15,7 +15,7 @@ $ python segment/predict.py --weights yolov5s-seg.pt # PyTorch yolov5s-seg.torchscript # TorchScript yolov5s-seg.onnx # ONNX Runtime or OpenCV DNN with --dnn - yolov5s-seg.xml # OpenVINO + yolov5s-seg_openvino_model # OpenVINO yolov5s-seg.engine # TensorRT yolov5s-seg.mlmodel # CoreML (macOS-only) yolov5s-seg_saved_model # TensorFlow SavedModel diff --git a/segment/val.py b/segment/val.py index f1ec54638d61..a875b3b79907 100644 --- a/segment/val.py +++ b/segment/val.py @@ -4,13 +4,13 @@ Usage: $ bash data/scripts/get_coco.sh --val --segments # download COCO-segments val split (1G, 5000 images) - $ python segment/val.py --weights yolov5s-seg.pt --data coco.yaml --img 640- # validate COCO-segments + $ python segment/val.py --weights yolov5s-seg.pt --data coco.yaml --img 640 # validate COCO-segments Usage - formats: $ python segment/val.py --weights yolov5s-seg.pt # PyTorch yolov5s-seg.torchscript # TorchScript yolov5s-seg.onnx # ONNX Runtime or OpenCV DNN with --dnn - yolov5s-seg.xml # OpenVINO + yolov5s-seg_openvino_label # OpenVINO yolov5s-seg.engine # TensorRT yolov5s-seg.mlmodel # CoreML (macOS-only) yolov5s-seg_saved_model # TensorFlow SavedModel diff --git a/val.py b/val.py index ca838c0beb2f..127acf810029 100644 --- a/val.py +++ b/val.py @@ -9,7 +9,7 @@ $ python val.py --weights yolov5s.pt # PyTorch yolov5s.torchscript # TorchScript yolov5s.onnx # ONNX Runtime or OpenCV DNN with --dnn - yolov5s.xml # OpenVINO + yolov5s_openvino_model # OpenVINO yolov5s.engine # TensorRT yolov5s.mlmodel # CoreML (macOS-only) yolov5s_saved_model # TensorFlow SavedModel From eef90572bf11602b17816a1721980cdb07a95eb2 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 20 Oct 2022 20:16:58 +0200 Subject: [PATCH 1515/1976] ClearML Dockerfile fix (#9876) Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- utils/docker/Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/docker/Dockerfile b/utils/docker/Dockerfile index be5c2fb71517..05776510e160 100644 --- a/utils/docker/Dockerfile +++ b/utils/docker/Dockerfile @@ -16,7 +16,7 @@ RUN apt update && apt install --no-install-recommends -y zip htop screen libgl1- COPY requirements.txt . RUN python -m pip install --upgrade pip wheel RUN pip uninstall -y Pillow torchtext torch torchvision -RUN pip install --no-cache -r requirements.txt albumentations comet clearml gsutil notebook Pillow>=9.1.0 \ +RUN pip install --no-cache -r requirements.txt albumentations comet gsutil notebook Pillow>=9.1.0 \ 'opencv-python<4.6.0.66' \ --extra-index-url https://download.pytorch.org/whl/cu113 From fba61e55836273847947498c01314499d8e5e7dc Mon Sep 17 00:00:00 2001 From: SSTato <1210546396@qq.com> Date: Mon, 24 Oct 2022 22:20:47 +0800 Subject: [PATCH 1516/1976] Windows Python 3.7 .isfile() fix (#9879) * Update dataloaders.py Signed-off-by: SSTato <1210546396@qq.com> * Update general.py Signed-off-by: SSTato <1210546396@qq.com> * Update general.py Signed-off-by: Glenn Jocher * Update dataloaders.py Signed-off-by: Glenn Jocher * Update dataloaders.py Signed-off-by: Glenn Jocher * Update general.py Signed-off-by: SSTato <1210546396@qq.com> * Update ci-testing.yml Signed-off-by: SSTato <1210546396@qq.com> * Update ci-testing.yml Signed-off-by: SSTato <1210546396@qq.com> * Update ci-testing.yml Signed-off-by: SSTato <1210546396@qq.com> * Update general.py Signed-off-by: SSTato <1210546396@qq.com> * Update general.py Signed-off-by: SSTato <1210546396@qq.com> * Update dataloaders.py Signed-off-by: SSTato <1210546396@qq.com> Signed-off-by: SSTato <1210546396@qq.com> Signed-off-by: Glenn Jocher Co-authored-by: Glenn Jocher --- utils/dataloaders.py | 2 +- utils/general.py | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/utils/dataloaders.py b/utils/dataloaders.py index 37b3ffb2728b..403252ff6227 100644 --- a/utils/dataloaders.py +++ b/utils/dataloaders.py @@ -344,7 +344,7 @@ def __init__(self, sources='streams.txt', img_size=640, stride=32, auto=True, tr self.img_size = img_size self.stride = stride self.vid_stride = vid_stride # video frame-rate stride - sources = Path(sources).read_text().rsplit() if Path(sources).is_file() else [sources] + sources = Path(sources).read_text().rsplit() if os.path.isfile(sources) else [sources] n = len(sources) self.sources = [clean_str(x) for x in sources] # clean source names for later self.imgs, self.fps, self.frames, self.threads = [None] * n, [0] * n, [0] * n, [None] * n diff --git a/utils/general.py b/utils/general.py index e1823b50ac56..46978f1b8d7b 100644 --- a/utils/general.py +++ b/utils/general.py @@ -426,12 +426,12 @@ def check_file(file, suffix=''): # Search/download file (if necessary) and return path check_suffix(file, suffix) # optional file = str(file) # convert to str() - if Path(file).is_file() or not file: # exists + if os.path.isfile(file) or not file: # exists return file elif file.startswith(('http:/', 'https:/')): # download url = file # warning: Pathlib turns :// -> :/ file = Path(urllib.parse.unquote(file).split('?')[0]).name # '%2F' to '/', split https://url.com/file.txt?auth - if Path(file).is_file(): + if os.path.isfile(file): LOGGER.info(f'Found {url} locally at {file}') # file already exists else: LOGGER.info(f'Downloading {url} to {file}...') @@ -586,7 +586,7 @@ def download(url, dir='.', unzip=True, delete=True, curl=False, threads=1, retry def download_one(url, dir): # Download 1 file success = True - if Path(url).is_file(): + if os.path.isfile(url): f = Path(url) # filename else: # does not exist f = dir / Path(url).name From 54f49fa581aac1d9909636bfc13f94001b08b55b Mon Sep 17 00:00:00 2001 From: paradigm Date: Tue, 25 Oct 2022 17:53:22 +0200 Subject: [PATCH 1517/1976] Add TFLite Metadata to TFLite and Edge TPU models (#9903) * added embedded meta data to tflite models * added try block for inference * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * refactored tfite meta data into separate function * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Creat tmp file in /tmp * Update export.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update export.py * Update export.py * Update export.py * Update export.py * Update common.py * Update export.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update common.py Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Glenn Jocher --- export.py | 39 +++++++++++++++++++++++++++++++++++++-- models/common.py | 9 +++++++++ 2 files changed, 46 insertions(+), 2 deletions(-) diff --git a/export.py b/export.py index 93845a0c14fa..e43d9b730fc6 100644 --- a/export.py +++ b/export.py @@ -45,6 +45,7 @@ """ import argparse +import contextlib import json import os import platform @@ -453,6 +454,39 @@ def export_tfjs(file, prefix=colorstr('TensorFlow.js:')): return f, None +def add_tflite_metadata(file, metadata, num_outputs): + # Add metadata to *.tflite models per https://www.tensorflow.org/lite/models/convert/metadata + with contextlib.suppress(ImportError): + # check_requirements('tflite_support') + from tflite_support import flatbuffers + from tflite_support import metadata as _metadata + from tflite_support import metadata_schema_py_generated as _metadata_fb + + tmp_file = Path('/tmp/meta.txt') + with open(tmp_file, 'w') as meta_f: + meta_f.write(str(metadata)) + + model_meta = _metadata_fb.ModelMetadataT() + label_file = _metadata_fb.AssociatedFileT() + label_file.name = tmp_file.name + model_meta.associatedFiles = [label_file] + + subgraph = _metadata_fb.SubGraphMetadataT() + subgraph.inputTensorMetadata = [_metadata_fb.TensorMetadataT()] + subgraph.outputTensorMetadata = [_metadata_fb.TensorMetadataT()] * num_outputs + model_meta.subgraphMetadata = [subgraph] + + b = flatbuffers.Builder(0) + b.Finish(model_meta.Pack(b), _metadata.MetadataPopulator.METADATA_FILE_IDENTIFIER) + metadata_buf = b.Output() + + populator = _metadata.MetadataPopulator.with_model_file(file) + populator.load_metadata_buffer(metadata_buf) + populator.load_associated_files([str(tmp_file)]) + populator.populate() + tmp_file.unlink() + + @smart_inference_mode() def run( data=ROOT / 'data/coco128.yaml', # 'dataset.yaml path' @@ -550,8 +584,9 @@ def run( f[6], _ = export_pb(s_model, file) if tflite or edgetpu: f[7], _ = export_tflite(s_model, im, file, int8 or edgetpu, data=data, nms=nms, agnostic_nms=agnostic_nms) - if edgetpu: - f[8], _ = export_edgetpu(file) + if edgetpu: + f[8], _ = export_edgetpu(file) + add_tflite_metadata(f[8] or f[7], metadata, num_outputs=len(s_model.outputs)) if tfjs: f[9], _ = export_tfjs(file) if paddle: # PaddlePaddle diff --git a/models/common.py b/models/common.py index af8132fffb7a..6347e51cdf0b 100644 --- a/models/common.py +++ b/models/common.py @@ -3,10 +3,13 @@ Common modules """ +import ast +import contextlib import json import math import platform import warnings +import zipfile from collections import OrderedDict, namedtuple from copy import copy from pathlib import Path @@ -462,6 +465,12 @@ def gd_outputs(gd): interpreter.allocate_tensors() # allocate input_details = interpreter.get_input_details() # inputs output_details = interpreter.get_output_details() # outputs + # load metadata + with contextlib.suppress(zipfile.BadZipFile): + with zipfile.ZipFile(w, "r") as model: + meta_file = model.namelist()[0] + meta = ast.literal_eval(model.read(meta_file).decode("utf-8")) + stride, names = int(meta['stride']), meta['names'] elif tfjs: # TF.js raise NotImplementedError('ERROR: YOLOv5 TF.js inference is not supported') elif paddle: # PaddlePaddle From 8236d8818bca21c692d5c4508fee2af835ec1dbe Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 25 Oct 2022 18:13:48 +0200 Subject: [PATCH 1518/1976] Created using Colaboratory --- tutorial.ipynb | 141 +++---------------------------------------------- 1 file changed, 6 insertions(+), 135 deletions(-) diff --git a/tutorial.ipynb b/tutorial.ipynb index 63abebc5b37f..10e14b9b1208 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -954,7 +954,7 @@ "source": [ "# Appendix\n", "\n", - "Additional content below for PyTorch Hub, CI, reproducing results, profiling speeds, VOC training, classification training and TensorRT example." + "Additional content below." ] }, { @@ -963,145 +963,16 @@ "id": "GMusP4OAxFu6" }, "source": [ + "# YOLOv5 PyTorch HUB Inference (DetectionModels only)\n", "import torch\n", "\n", - "# PyTorch Hub Model\n", - "model = torch.hub.load('ultralytics/yolov5', 'yolov5s') # or yolov5n - yolov5x6, custom\n", - "\n", - "# Images\n", - "img = 'https://ultralytics.com/images/zidane.jpg' # or file, Path, PIL, OpenCV, numpy, list\n", - "\n", - "# Inference\n", - "results = model(img)\n", - "\n", - "# Results\n", + "model = torch.hub.load('ultralytics/yolov5', 'yolov5s') # yolov5n - yolov5x6 or custom\n", + "im = 'https://ultralytics.com/images/zidane.jpg' # file, Path, PIL.Image, OpenCV, nparray, list\n", + "results = model(im) # inference\n", "results.print() # or .show(), .save(), .crop(), .pandas(), etc." ], "execution_count": null, "outputs": [] - }, - { - "cell_type": "code", - "metadata": { - "id": "FGH0ZjkGjejy" - }, - "source": [ - "# YOLOv5 CI\n", - "%%shell\n", - "rm -rf runs # remove runs/\n", - "m=yolov5n # official weights\n", - "b=runs/train/exp/weights/best # best.pt checkpoint\n", - "python train.py --imgsz 64 --batch 32 --weights $m.pt --cfg $m.yaml --epochs 1 --device 0 # train\n", - "for d in 0 cpu; do # devices\n", - " for w in $m $b; do # weights\n", - " python val.py --imgsz 64 --batch 32 --weights $w.pt --device $d # val\n", - " python detect.py --imgsz 64 --weights $w.pt --device $d # detect\n", - " done\n", - "done\n", - "python hubconf.py --model $m # hub\n", - "python models/tf.py --weights $m.pt # build TF model\n", - "python models/yolo.py --cfg $m.yaml # build PyTorch model\n", - "python export.py --weights $m.pt --img 64 --include torchscript # export" - ], - "execution_count": null, - "outputs": [] - }, - { - "cell_type": "code", - "metadata": { - "id": "mcKoSIK2WSzj" - }, - "source": [ - "# Reproduce\n", - "for x in (f'yolov5{x}' for x in 'nsmlx'):\n", - " !python val.py --weights {x}.pt --data coco.yaml --img 640 --task speed # speed\n", - " !python val.py --weights {x}.pt --data coco.yaml --img 640 --conf 0.001 --iou 0.65 # mAP" - ], - "execution_count": null, - "outputs": [] - }, - { - "cell_type": "code", - "metadata": { - "id": "gogI-kwi3Tye" - }, - "source": [ - "# Profile\n", - "from utils.torch_utils import profile\n", - "\n", - "m1 = lambda x: x * torch.sigmoid(x)\n", - "m2 = torch.nn.SiLU()\n", - "results = profile(input=torch.randn(16, 3, 640, 640), ops=[m1, m2], n=100)" - ], - "execution_count": null, - "outputs": [] - }, - { - "cell_type": "code", - "metadata": { - "id": "BSgFCAcMbk1R" - }, - "source": [ - "# VOC\n", - "for b, m in zip([64, 64, 64, 32, 16], [f'yolov5{x}' for x in 'nsmlx']): # batch, model\n", - " !python train.py --batch {b} --weights {m}.pt --data VOC.yaml --epochs 50 --img 512 --hyp hyp.VOC.yaml --project VOC --name {m} --cache" - ], - "execution_count": null, - "outputs": [] - }, - { - "cell_type": "code", - "source": [ - "# Classification train\n", - "for m in [*(f'yolov5{x}-cls.pt' for x in 'nsmlx'), 'resnet50.pt', 'resnet101.pt', 'efficientnet_b0.pt', 'efficientnet_b1.pt']:\n", - " for d in 'mnist', 'fashion-mnist', 'cifar10', 'cifar100', 'imagenette160', 'imagenette320', 'imagenette', 'imagewoof160', 'imagewoof320', 'imagewoof':\n", - " !python classify/train.py --model {m} --data {d} --epochs 10 --project YOLOv5-cls --name {m}-{d}" - ], - "metadata": { - "id": "UWGH7H6yakVl" - }, - "execution_count": null, - "outputs": [] - }, - { - "cell_type": "code", - "source": [ - "# Classification val\n", - "!bash data/scripts/get_imagenet.sh --val # download ImageNet val split (6.3G - 50000 images)\n", - "!python classify/val.py --weights yolov5m-cls.pt --data ../datasets/imagenet --img 224 # validate" - ], - "metadata": { - "id": "yYgOiFNHZx-1" - }, - "execution_count": null, - "outputs": [] - }, - { - "cell_type": "code", - "source": [ - "# Validate on COCO test. Zip results.json and submit to eval server at https://competitions.codalab.org/competitions/20794\n", - "!bash data/scripts/get_coco.sh --test # download COCO test-dev2017 (7G - 40000 images, test 20000)\n", - "!python val.py --weights yolov5x.pt --data coco.yaml --img 640 --iou 0.65 --half --task test" - ], - "metadata": { - "id": "aq4DPWGu0Bl1" - }, - "execution_count": null, - "outputs": [] - }, - { - "cell_type": "code", - "metadata": { - "id": "VTRwsvA9u7ln" - }, - "source": [ - "# TensorRT \n", - "!pip install -U nvidia-tensorrt --index-url https://pypi.ngc.nvidia.com # install\n", - "!python export.py --weights yolov5s.pt --include engine --imgsz 640 --device 0 # export\n", - "!python detect.py --weights yolov5s.engine --imgsz 640 --device 0 # inference" - ], - "execution_count": null, - "outputs": [] } ] -} +} \ No newline at end of file From a5d875adcac05f8f68329c2cb742aba742d1953d Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 26 Oct 2022 13:42:52 +0200 Subject: [PATCH 1519/1976] Add `gnupg` to Dockerfile-cpu (#9932) Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- utils/docker/Dockerfile-cpu | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/docker/Dockerfile-cpu b/utils/docker/Dockerfile-cpu index d6fac645dba1..f3f81ec02c23 100644 --- a/utils/docker/Dockerfile-cpu +++ b/utils/docker/Dockerfile-cpu @@ -11,7 +11,7 @@ ADD https://ultralytics.com/assets/Arial.ttf https://ultralytics.com/assets/Aria # Install linux packages RUN apt update RUN DEBIAN_FRONTEND=noninteractive TZ=Etc/UTC apt install -y tzdata -RUN apt install --no-install-recommends -y python3-pip git zip curl htop libgl1-mesa-glx libglib2.0-0 libpython3-dev +RUN apt install --no-install-recommends -y python3-pip git zip curl htop libgl1-mesa-glx libglib2.0-0 libpython3-dev gnupg # RUN alias python=python3 # Install pip packages From f9bb984e817a71a90490ed3a4655fb7ad408d8fb Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 26 Oct 2022 14:06:05 +0200 Subject: [PATCH 1520/1976] Add ClearML minimum version requirement (#9933) * Add ClearML minimum version requirement Signed-off-by: Glenn Jocher * Update README.md Signed-off-by: Glenn Jocher * Update README.md Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- requirements.txt | 2 +- utils/loggers/clearml/README.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/requirements.txt b/requirements.txt index 52f7b9ea57d2..8cb1bd4c6fe1 100644 --- a/requirements.txt +++ b/requirements.txt @@ -16,7 +16,7 @@ tqdm>=4.64.0 # Logging ------------------------------------- tensorboard>=2.4.1 -# clearml +# clearml>=1.2.0 # comet # Plotting ------------------------------------ diff --git a/utils/loggers/clearml/README.md b/utils/loggers/clearml/README.md index 64eef6befc93..e0c5824bc2a2 100644 --- a/utils/loggers/clearml/README.md +++ b/utils/loggers/clearml/README.md @@ -54,7 +54,7 @@ That's it! You're done 😎 To enable ClearML experiment tracking, simply install the ClearML pip package. ```bash -pip install clearml +pip install clearml>=1.2.0 ``` This will enable integration with the YOLOv5 training script. Every training run from now on, will be captured and stored by the ClearML experiment manager. If you want to change the `project_name` or `task_name`, head over to our custom logger, where you can change it: `utils/loggers/clearml/clearml_utils.py` From 32a92185738c93e5f0b0f6971de0812cd6fd5f34 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 26 Oct 2022 23:51:40 +0200 Subject: [PATCH 1521/1976] Update Comet Integrations table text (#9937) Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 52f2854dd601..dc21ad8d6639 100644 --- a/README.md +++ b/README.md @@ -183,7 +183,7 @@ python train.py --data coco.yaml --cfg yolov5n.yaml --weights '' --batch-size 12 |Roboflow|ClearML ⭐ NEW|Comet ⭐ NEW|Deci ⭐ NEW| |:-:|:-:|:-:|:-:| -|Label and export your custom datasets directly to YOLOv5 for training with [Roboflow](https://roboflow.com/?ref=ultralytics)|Automatically track, visualize and even remotely train YOLOv5 using [ClearML](https://cutt.ly/yolov5-readme-clearml) (open-source!)|Visualize model metrics and predictions and upload models and datasets in realtime with [Comet](https://bit.ly/yolov5-readme-comet)|Automatically compile and quantize YOLOv5 for better inference performance in one click at [Deci](https://bit.ly/yolov5-deci-platform)| +|Label and export your custom datasets directly to YOLOv5 for training with [Roboflow](https://roboflow.com/?ref=ultralytics)|Automatically track, visualize and even remotely train YOLOv5 using [ClearML](https://cutt.ly/yolov5-readme-clearml) (open-source!)|Free forever, [Comet](https://bit.ly/yolov5-readme-comet) lets you save YOLOv5 models, resume training, and interactively visualise and debug predictions|Automatically compile and quantize YOLOv5 for better inference performance in one click at [Deci](https://bit.ly/yolov5-deci-platform)| ##
Why YOLOv5
From 38e5aae9a20522b69e21629f1558ab8902b351f6 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 28 Oct 2022 17:37:25 +0200 Subject: [PATCH 1522/1976] Update README.md (#9957) * Update README.md Signed-off-by: Glenn Jocher * Update README.md Signed-off-by: Glenn Jocher * Update README.md Signed-off-by: Glenn Jocher * Update README_cn.md Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- .github/README_cn.md | 7 +------ README.md | 12 ++++-------- 2 files changed, 5 insertions(+), 14 deletions(-) diff --git a/.github/README_cn.md b/.github/README_cn.md index 7e8aa6f7f087..981fd8a5b820 100644 --- a/.github/README_cn.md +++ b/.github/README_cn.md @@ -1,12 +1,7 @@

- -

- -   - - +

[English](../README.md) | 简体中文 diff --git a/README.md b/README.md index dc21ad8d6639..98cad8de4294 100644 --- a/README.md +++ b/README.md @@ -1,12 +1,7 @@

- -

- -   - - +

English | [简体中文](.github/README_cn.md) @@ -23,8 +18,9 @@

- YOLOv5 🚀 is a family of object detection architectures and models pretrained on the COCO dataset, and represents Ultralytics - open-source research into future vision AI methods, incorporating lessons learned and best practices evolved over thousands of hours of research and development. + YOLOv5 🚀 is the world's most loved vision AI, representing Ultralytics + open-source research into future vision AI methods, incorporating lessons learned and best practices evolved over thousands of hours of research and development. To request a commercial license please complete the form at Ultralytics Licensing. +

From be9ef3871e85d6e06b736f08a1c9f1d01998afe6 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 28 Oct 2022 17:48:41 +0200 Subject: [PATCH 1523/1976] Update README.md (#9958) * Update README.md Signed-off-by: Glenn Jocher * Update README_cn.md Signed-off-by: Glenn Jocher * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update tutorial.ipynb Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .github/README_cn.md | 2 +- README.md | 2 +- tutorial.ipynb | 4 ++-- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/README_cn.md b/.github/README_cn.md index 981fd8a5b820..d0cf6b9ff3bd 100644 --- a/.github/README_cn.md +++ b/.github/README_cn.md @@ -1,7 +1,7 @@

- +

[English](../README.md) | 简体中文 diff --git a/README.md b/README.md index 98cad8de4294..64a2e9001538 100644 --- a/README.md +++ b/README.md @@ -1,7 +1,7 @@

- +

English | [简体中文](.github/README_cn.md) diff --git a/tutorial.ipynb b/tutorial.ipynb index 10e14b9b1208..b40f08ef20b3 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -371,7 +371,7 @@ "
\n", "\n", " \n", - " \n", + " \n", "\n", "\n", "
\n", @@ -975,4 +975,4 @@ "outputs": [] } ] -} \ No newline at end of file +} From 6db0fac66cfb78697af21dc12d434774e4ccbcab Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 28 Oct 2022 18:25:33 +0200 Subject: [PATCH 1524/1976] Update README.md (#9961) Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- README.md | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/README.md b/README.md index 64a2e9001538..8dbf527c2dbd 100644 --- a/README.md +++ b/README.md @@ -339,8 +339,7 @@ We love your input! We want to make contributing to YOLOv5 as easy and transpare ##
Contact
-For YOLOv5 bugs and feature requests please visit [GitHub Issues](https://github.com/ultralytics/yolov5/issues). For business inquiries or -professional support requests please visit [https://ultralytics.com/contact](https://ultralytics.com/contact). +For YOLOv5 bugs and feature requests please visit [GitHub Issues](https://github.com/ultralytics/yolov5/issues). For professional support please [Contact Us](https://ultralytics.com/contact). To request a commercial license please complete the form at [Ultralytics Licensing](https://ultralytics.com/license).
From 575055ce7028ee99618ff1c5c0e8919e8e2cd849 Mon Sep 17 00:00:00 2001 From: Kalen Michael Date: Fri, 28 Oct 2022 21:16:03 +0200 Subject: [PATCH 1525/1976] Switch from suffix checks to archive checks (#9963) * fix: switched from suffix checks to archive checks * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Cleanup seems like both functions accept Path type input so removing str() * import always Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Glenn Jocher --- utils/general.py | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/utils/general.py b/utils/general.py index 46978f1b8d7b..88cefb7bb662 100644 --- a/utils/general.py +++ b/utils/general.py @@ -23,8 +23,9 @@ from multiprocessing.pool import ThreadPool from pathlib import Path from subprocess import check_output +from tarfile import is_tarfile from typing import Optional -from zipfile import ZipFile +from zipfile import ZipFile, is_zipfile import cv2 import IPython @@ -465,7 +466,7 @@ def check_dataset(data, autodownload=True): # Download (optional) extract_dir = '' - if isinstance(data, (str, Path)) and str(data).endswith('.zip'): # i.e. gs://bucket/dir/coco128.zip + if isinstance(data, (str, Path)) and (is_zipfile(data) or is_tarfile(data)): download(data, dir=f'{DATASETS_DIR}/{Path(data).stem}', unzip=True, delete=False, curl=False, threads=1) data = next((DATASETS_DIR / Path(data).stem).rglob('*.yaml')) extract_dir, autodownload = data.parent, False @@ -607,11 +608,11 @@ def download_one(url, dir): else: LOGGER.warning(f'❌ Failed to download {url}...') - if unzip and success and f.suffix in ('.zip', '.tar', '.gz'): + if unzip and success and (f.suffix == '.gz' or is_zipfile(f) or is_tarfile(f)): LOGGER.info(f'Unzipping {f}...') - if f.suffix == '.zip': + if is_zipfile(f): unzip_file(f, dir) # unzip - elif f.suffix == '.tar': + elif is_tarfile(f): os.system(f'tar xf {f} --directory {f.parent}') # unzip elif f.suffix == '.gz': os.system(f'tar xfz {f} --directory {f.parent}') # unzip From 6e544d5f7c0b699c7c6002074b822a03308bbe3c Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 29 Oct 2022 13:31:01 +0200 Subject: [PATCH 1526/1976] FROM nvcr.io/nvidia/pytorch:22.10-py3 (#9966) Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- utils/docker/Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/docker/Dockerfile b/utils/docker/Dockerfile index 05776510e160..87605456a5d9 100644 --- a/utils/docker/Dockerfile +++ b/utils/docker/Dockerfile @@ -3,7 +3,7 @@ # Image is CUDA-optimized for YOLOv5 single/multi-GPU training and inference # Start FROM NVIDIA PyTorch image https://ngc.nvidia.com/catalog/containers/nvidia:pytorch -FROM nvcr.io/nvidia/pytorch:22.09-py3 +FROM nvcr.io/nvidia/pytorch:22.10-py3 RUN rm -rf /opt/pytorch # remove 1.2GB dir # Downloads to user config dir From a625f29967d09beeee1f010313a05ad7d5997c32 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 30 Oct 2022 22:09:36 +0100 Subject: [PATCH 1527/1976] Full-size proto code (optional) (#9980) * Update tf.py Signed-off-by: Glenn Jocher * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update tf.py Signed-off-by: Glenn Jocher * Update tf.py Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- models/tf.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/models/tf.py b/models/tf.py index 1446d8841646..3f3dc8dbe7e7 100644 --- a/models/tf.py +++ b/models/tf.py @@ -333,6 +333,7 @@ def __init__(self, nc=80, anchors=(), nm=32, npr=256, ch=(), imgsz=(640, 640), w def call(self, x): p = self.proto(x[0]) + # p = TFUpsample(None, scale_factor=4, mode='nearest')(self.proto(x[0])) # (optional) full-size protos p = tf.transpose(p, [0, 3, 1, 2]) # from shape(1,160,160,32) to shape(1,32,160,160) x = self.detect(self, x) return (x, p) if self.training else (x[0], p) @@ -355,8 +356,8 @@ class TFUpsample(keras.layers.Layer): # TF version of torch.nn.Upsample() def __init__(self, size, scale_factor, mode, w=None): # warning: all arguments needed including 'w' super().__init__() - assert scale_factor == 2, "scale_factor must be 2" - self.upsample = lambda x: tf.image.resize(x, (x.shape[1] * 2, x.shape[2] * 2), method=mode) + assert scale_factor % 2 == 0, "scale_factor must be multiple of 2" + self.upsample = lambda x: tf.image.resize(x, (x.shape[1] * scale_factor, x.shape[2] * scale_factor), mode) # self.upsample = keras.layers.UpSampling2D(size=scale_factor, interpolation=mode) # with default arguments: align_corners=False, half_pixel_centers=False # self.upsample = lambda x: tf.raw_ops.ResizeNearestNeighbor(images=x, From e704970f7f606d6d3e58641e9384f38b532aa846 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 31 Oct 2022 12:43:17 +0100 Subject: [PATCH 1528/1976] Update README.md (#9970) * Update README.md @taliabender updated spacing per our convo Signed-off-by: Glenn Jocher * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update README.md Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- README.md | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 8dbf527c2dbd..7cb4d09446ca 100644 --- a/README.md +++ b/README.md @@ -19,8 +19,10 @@

YOLOv5 🚀 is the world's most loved vision AI, representing Ultralytics - open-source research into future vision AI methods, incorporating lessons learned and best practices evolved over thousands of hours of research and development. To request a commercial license please complete the form at Ultralytics Licensing. - + open-source research into future vision AI methods, incorporating lessons learned and best practices evolved over thousands of hours of research and development. +

+ To request a commercial license please complete the form at Ultralytics Licensing. +

From a83d2a50132982fa89a22420155f6c9f097a92c7 Mon Sep 17 00:00:00 2001 From: Paul Guerrie <97041392+paulguerrie@users.noreply.github.com> Date: Mon, 31 Oct 2022 15:25:11 +0100 Subject: [PATCH 1529/1976] Segmentation Tutorial (#9521) * Added a tutorial notebook for segmentation. * Updated header for segmentation tutorial and included other YOLOv5 sponsor sections. * Updated segmentation tutorial to match main object detection tutorial. * Update tutorial.ipynb Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher Co-authored-by: Ayush Chaurasia Co-authored-by: Glenn Jocher --- segment/tutorial.ipynb | 956 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 956 insertions(+) create mode 100644 segment/tutorial.ipynb diff --git a/segment/tutorial.ipynb b/segment/tutorial.ipynb new file mode 100644 index 000000000000..47559978be74 --- /dev/null +++ b/segment/tutorial.ipynb @@ -0,0 +1,956 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": { + "id": "t6MPjfT5NrKQ" + }, + "source": [ + "
\n", + "\n", + " \n", + " \n", + "\n", + "\n", + "
\n", + " \"Run\n", + " \"Open\n", + " \"Open\n", + "
\n", + "\n", + "This YOLOv5 🚀 notebook by Ultralytics presents simple train, validate and predict examples to help start your AI adventure.
See GitHub for community support or contact us for professional support.\n", + "\n", + "
" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "7mGmQbAO5pQb" + }, + "source": [ + "# Setup\n", + "\n", + "Clone GitHub [repository](https://github.com/ultralytics/yolov5), install [dependencies](https://github.com/ultralytics/yolov5/blob/master/requirements.txt) and check PyTorch and GPU." + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "wbvMlHd_QwMG", + "outputId": "0f9ee467-cea4-48e8-9050-7a76ae1b6141" + }, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "YOLOv5 🚀 v6.2-225-gf223cb2 Python-3.7.12 torch-1.12.1+cu102 CUDA:0 (Tesla V100-SXM2-16GB, 16161MiB)\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Setup complete ✅ (4 CPUs, 14.7 GB RAM, 107.3/196.6 GB disk)\n" + ] + } + ], + "source": [ + "!git clone https://github.com/ultralytics/yolov5 # clone\n", + "%cd yolov5\n", + "%pip install -qr requirements.txt # install\n", + "\n", + "import torch\n", + "import utils\n", + "display = utils.notebook_init() # checks" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "4JnkELT0cIJg" + }, + "source": [ + "# 1. Predict\n", + "\n", + "`segment/predict.py` runs YOLOv5 instance segmentation inference on a variety of sources, downloading models automatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases), and saving results to `runs/predict`. Example inference sources are:\n", + "\n", + "```shell\n", + "python segment/predict.py --source 0 # webcam\n", + " img.jpg # image \n", + " vid.mp4 # video\n", + " screen # screenshot\n", + " path/ # directory\n", + " 'path/*.jpg' # glob\n", + " 'https://youtu.be/Zgi9g1ksQHc' # YouTube\n", + " 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream\n", + "```" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "zR9ZbuQCH7FX", + "outputId": "60647b99-e8d4-402c-f444-331bf6746da4" + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\u001b[34m\u001b[1msegment/predict: \u001b[0mweights=['yolov5s-seg.pt'], source=data/images, data=data/coco128.yaml, imgsz=[640, 640], conf_thres=0.25, iou_thres=0.45, max_det=1000, device=, view_img=False, save_txt=False, save_conf=False, save_crop=False, nosave=False, classes=None, agnostic_nms=False, augment=False, visualize=False, update=False, project=runs/predict-seg, name=exp, exist_ok=False, line_thickness=3, hide_labels=False, hide_conf=False, half=False, dnn=False, vid_stride=1, retina_masks=False\n", + "YOLOv5 🚀 v6.2-225-gf223cb2 Python-3.7.12 torch-1.12.1+cu102 CUDA:0 (Tesla V100-SXM2-16GB, 16161MiB)\n", + "\n", + "Fusing layers... \n", + "YOLOv5s-seg summary: 224 layers, 7611485 parameters, 0 gradients, 26.4 GFLOPs\n", + "image 1/2 /home/paguerrie/yolov5/data/images/bus.jpg: 640x480 4 persons, 1 bus, 5.6ms\n", + "image 2/2 /home/paguerrie/yolov5/data/images/zidane.jpg: 384x640 2 persons, 1 tie, 5.5ms\n", + "Speed: 0.4ms pre-process, 5.6ms inference, 1.1ms NMS per image at shape (1, 3, 640, 640)\n", + "Results saved to \u001b[1mruns/predict-seg/exp\u001b[0m\n" + ] + } + ], + "source": [ + "!python segment/predict.py --weights yolov5s-seg.pt --img 640 --conf 0.25 --source data/images\n", + "#display.Image(filename='runs/predict-seg/exp/zidane.jpg', width=600)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "hkAzDWJ7cWTr" + }, + "source": [ + "        \n", + "" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "0eq1SMWl6Sfn" + }, + "source": [ + "# 2. Validate\n", + "Validate a model's accuracy on the [COCO](https://cocodataset.org/#home) dataset's `val` or `test` splits. Models are downloaded automatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases). To show results by class use the `--verbose` flag." + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/", + "height": 49, + "referenced_widgets": [ + "9b8caa3522fc4cbab31e13b5dfc7808d", + "574140e4c4bc48c9a171541a02cd0211", + "35e03ce5090346c9ae602891470fc555", + "c942c208e72d46568b476bb0f2d75496", + "65881db1db8a4e9c930fab9172d45143", + "60b913d755b34d638478e30705a2dde1", + "0856bea36ec148b68522ff9c9eb258d8", + "76879f6f2aa54637a7a07faeea2bd684", + "0ace3934ec6f4d36a1b3a9e086390926", + "d6b7a2243e0c4beca714d99dceec23d6", + "5966ba6e6f114d8c9d8d1d6b1bd4f4c7" + ] + }, + "id": "WQPtK1QYVaD_", + "outputId": "102dabed-bc31-42fe-9133-d9ce28a2c01e" + }, + "outputs": [ + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "89f5f0a84ca642378724f1bf05f17e0d", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + " 0%| | 0.00/6.79M [00:00

\n", + "Close the active learning loop by sampling images from your inference conditions with the `roboflow` pip package\n", + "

\n", + "\n", + "Train a YOLOv5s-seg model on the [COCO128](https://www.kaggle.com/ultralytics/coco128) dataset with `--data coco128-seg.yaml`, starting from pretrained `--weights yolov5s-seg.pt`, or from randomly initialized `--weights '' --cfg yolov5s-seg.yaml`.\n", + "\n", + "- **Pretrained [Models](https://github.com/ultralytics/yolov5/tree/master/models)** are downloaded\n", + "automatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases)\n", + "- **[Datasets](https://github.com/ultralytics/yolov5/tree/master/data)** available for autodownload include: [COCO](https://github.com/ultralytics/yolov5/blob/master/data/coco.yaml), [COCO128](https://github.com/ultralytics/yolov5/blob/master/data/coco128.yaml), [VOC](https://github.com/ultralytics/yolov5/blob/master/data/VOC.yaml), [Argoverse](https://github.com/ultralytics/yolov5/blob/master/data/Argoverse.yaml), [VisDrone](https://github.com/ultralytics/yolov5/blob/master/data/VisDrone.yaml), [GlobalWheat](https://github.com/ultralytics/yolov5/blob/master/data/GlobalWheat2020.yaml), [xView](https://github.com/ultralytics/yolov5/blob/master/data/xView.yaml), [Objects365](https://github.com/ultralytics/yolov5/blob/master/data/Objects365.yaml), [SKU-110K](https://github.com/ultralytics/yolov5/blob/master/data/SKU-110K.yaml).\n", + "- **Training Results** are saved to `runs/train-seg/` with incrementing run directories, i.e. `runs/train-seg/exp2`, `runs/train-seg/exp3` etc.\n", + "

\n", + "\n", + "A **Mosaic Dataloader** is used for training which combines 4 images into 1 mosaic.\n", + "\n", + "## Train on Custom Data with Roboflow 🌟 NEW\n", + "\n", + "[Roboflow](https://roboflow.com/?ref=ultralytics) enables you to easily **organize, label, and prepare** a high quality dataset with your own custom data. Roboflow also makes it easy to establish an active learning pipeline, collaborate with your team on dataset improvement, and integrate directly into your model building workflow with the `roboflow` pip package.\n", + "\n", + "- Custom Training Example: [https://blog.roboflow.com/train-yolov5-instance-segmentation-custom-dataset/](https://blog.roboflow.com/train-yolov5-instance-segmentation-custom-dataset/?ref=ultralytics)\n", + "- Custom Training Notebook: [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1JTz7kpmHsg-5qwVz2d2IH3AaenI1tv0N?usp=sharing)\n", + "
\n", + "\n", + "

Label images lightning fast (including with model-assisted labeling)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "i3oKtE4g-aNn" + }, + "outputs": [], + "source": [ + "#@title Select YOLOv5 🚀 logger {run: 'auto'}\n", + "logger = 'TensorBoard' #@param ['TensorBoard', 'Comet', 'ClearML']\n", + "\n", + "if logger == 'TensorBoard':\n", + " %load_ext tensorboard\n", + " %tensorboard --logdir runs/train-seg\n", + "elif logger == 'Comet':\n", + " %pip install -q comet_ml\n", + " import comet_ml; comet_ml.init()\n", + "elif logger == 'ClearML':\n", + " %pip install -q clearml && clearml-init" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "1NcFxRcFdJ_O", + "outputId": "baa6d4be-3379-4aab-844a-d5a5396c0e49" + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\u001b[34m\u001b[1msegment/train: \u001b[0mweights=yolov5s-seg.pt, cfg=, data=coco128-seg.yaml, hyp=data/hyps/hyp.scratch-low.yaml, epochs=3, batch_size=16, imgsz=640, rect=False, resume=False, nosave=False, noval=False, noautoanchor=False, noplots=False, evolve=None, bucket=, cache=ram, image_weights=False, device=, multi_scale=False, single_cls=False, optimizer=SGD, sync_bn=False, workers=8, project=runs/train-seg, name=exp, exist_ok=False, quad=False, cos_lr=False, label_smoothing=0.0, patience=100, freeze=[0], save_period=-1, seed=0, local_rank=-1, mask_ratio=4, no_overlap=False\n", + "\u001b[34m\u001b[1mgithub: \u001b[0mup to date with https://github.com/ultralytics/yolov5 ✅\n", + "YOLOv5 🚀 v6.2-225-gf223cb2 Python-3.7.12 torch-1.12.1+cu102 CUDA:0 (Tesla V100-SXM2-16GB, 16161MiB)\n", + "\n", + "\u001b[34m\u001b[1mhyperparameters: \u001b[0mlr0=0.01, lrf=0.01, momentum=0.937, weight_decay=0.0005, warmup_epochs=3.0, warmup_momentum=0.8, warmup_bias_lr=0.1, box=0.05, cls=0.5, cls_pw=1.0, obj=1.0, obj_pw=1.0, iou_t=0.2, anchor_t=4.0, fl_gamma=0.0, hsv_h=0.015, hsv_s=0.7, hsv_v=0.4, degrees=0.0, translate=0.1, scale=0.5, shear=0.0, perspective=0.0, flipud=0.0, fliplr=0.5, mosaic=1.0, mixup=0.0, copy_paste=0.0\n", + "\u001b[34m\u001b[1mTensorBoard: \u001b[0mStart with 'tensorboard --logdir runs/train-seg', view at http://localhost:6006/\n", + "\n", + " from n params module arguments \n", + " 0 -1 1 3520 models.common.Conv [3, 32, 6, 2, 2] \n", + " 1 -1 1 18560 models.common.Conv [32, 64, 3, 2] \n", + " 2 -1 1 18816 models.common.C3 [64, 64, 1] \n", + " 3 -1 1 73984 models.common.Conv [64, 128, 3, 2] \n", + " 4 -1 2 115712 models.common.C3 [128, 128, 2] \n", + " 5 -1 1 295424 models.common.Conv [128, 256, 3, 2] \n", + " 6 -1 3 625152 models.common.C3 [256, 256, 3] \n", + " 7 -1 1 1180672 models.common.Conv [256, 512, 3, 2] \n", + " 8 -1 1 1182720 models.common.C3 [512, 512, 1] \n", + " 9 -1 1 656896 models.common.SPPF [512, 512, 5] \n", + " 10 -1 1 131584 models.common.Conv [512, 256, 1, 1] \n", + " 11 -1 1 0 torch.nn.modules.upsampling.Upsample [None, 2, 'nearest'] \n", + " 12 [-1, 6] 1 0 models.common.Concat [1] \n", + " 13 -1 1 361984 models.common.C3 [512, 256, 1, False] \n", + " 14 -1 1 33024 models.common.Conv [256, 128, 1, 1] \n", + " 15 -1 1 0 torch.nn.modules.upsampling.Upsample [None, 2, 'nearest'] \n", + " 16 [-1, 4] 1 0 models.common.Concat [1] \n", + " 17 -1 1 90880 models.common.C3 [256, 128, 1, False] \n", + " 18 -1 1 147712 models.common.Conv [128, 128, 3, 2] \n", + " 19 [-1, 14] 1 0 models.common.Concat [1] \n", + " 20 -1 1 296448 models.common.C3 [256, 256, 1, False] \n", + " 21 -1 1 590336 models.common.Conv [256, 256, 3, 2] \n", + " 22 [-1, 10] 1 0 models.common.Concat [1] \n", + " 23 -1 1 1182720 models.common.C3 [512, 512, 1, False] \n", + " 24 [17, 20, 23] 1 615133 models.yolo.Segment [80, [[10, 13, 16, 30, 33, 23], [30, 61, 62, 45, 59, 119], [116, 90, 156, 198, 373, 326]], 32, 128, [128, 256, 512]]\n", + "Model summary: 225 layers, 7621277 parameters, 7621277 gradients, 26.6 GFLOPs\n", + "\n", + "Transferred 367/367 items from yolov5s-seg.pt\n", + "\u001b[34m\u001b[1mAMP: \u001b[0mchecks passed ✅\n", + "\u001b[34m\u001b[1moptimizer:\u001b[0m SGD(lr=0.01) with parameter groups 60 weight(decay=0.0), 63 weight(decay=0.0005), 63 bias\n", + "\u001b[34m\u001b[1mtrain: \u001b[0mScanning '/home/paguerrie/datasets/coco128-seg/labels/train2017.cache' im\u001b[0m\n", + "\u001b[34m\u001b[1mtrain: \u001b[0mCaching images (0.1GB ram): 100%|██████████| 128/128 [00:00<00:00, 544.41\u001b[0m\n", + "\u001b[34m\u001b[1mval: \u001b[0mScanning '/home/paguerrie/datasets/coco128-seg/labels/train2017.cache' imag\u001b[0m\n", + "\u001b[34m\u001b[1mval: \u001b[0mCaching images (0.1GB ram): 100%|██████████| 128/128 [00:00<00:00, 138.66it\u001b[0m\n", + "\n", + "\u001b[34m\u001b[1mAutoAnchor: \u001b[0m4.27 anchors/target, 0.994 Best Possible Recall (BPR). Current anchors are a good fit to dataset ✅\n", + "Plotting labels to runs/train-seg/exp/labels.jpg... \n", + "Image sizes 640 train, 640 val\n", + "Using 4 dataloader workers\n", + "Logging results to \u001b[1mruns/train-seg/exp\u001b[0m\n", + "Starting training for 3 epochs...\n", + "\n", + " Epoch GPU_mem box_loss seg_loss obj_loss cls_loss Instances Size\n", + " 0/2 4.67G 0.04464 0.05134 0.06548 0.01895 219 \n", + " Class Images Instances Box(P R mAP50 m\n", + " all 128 929 0.727 0.661 0.725 0.496 0.688 0.629 0.673 0.413\n", + "\n", + " Epoch GPU_mem box_loss seg_loss obj_loss cls_loss Instances Size\n", + " 1/2 6.36G 0.04102 0.04702 0.06873 0.01734 263 \n", + " Class Images Instances Box(P R mAP50 m\n", + " all 128 929 0.752 0.676 0.743 0.51 0.704 0.64 0.682 0.425\n", + "\n", + " Epoch GPU_mem box_loss seg_loss obj_loss cls_loss Instances Size\n", + " 2/2 6.36G 0.0421 0.04463 0.05951 0.01746 245 \n", + " Class Images Instances Box(P R mAP50 m\n", + " all 128 929 0.776 0.674 0.757 0.514 0.72 0.632 0.684 0.429\n", + "\n", + "3 epochs completed in 0.006 hours.\n", + "Optimizer stripped from runs/train-seg/exp/weights/last.pt, 15.6MB\n", + "Optimizer stripped from runs/train-seg/exp/weights/best.pt, 15.6MB\n", + "\n", + "Validating runs/train-seg/exp/weights/best.pt...\n", + "Fusing layers... \n", + "Model summary: 165 layers, 7611485 parameters, 0 gradients, 26.4 GFLOPs\n", + " Class Images Instances Box(P R mAP50 m\n", + " all 128 929 0.775 0.673 0.758 0.515 0.72 0.632 0.684 0.427\n", + " person 128 254 0.829 0.745 0.833 0.545 0.776 0.697 0.764 0.406\n", + " bicycle 128 6 0.614 0.333 0.539 0.331 0.614 0.333 0.531 0.308\n", + " car 128 46 0.774 0.413 0.571 0.266 0.693 0.37 0.493 0.204\n", + " motorcycle 128 5 0.817 0.901 0.895 0.678 0.817 0.901 0.895 0.47\n", + " airplane 128 6 1 0.951 0.995 0.71 0.882 0.833 0.839 0.515\n", + " bus 128 7 0.695 0.714 0.757 0.661 0.695 0.714 0.757 0.627\n", + " train 128 3 1 0.935 0.995 0.566 1 0.935 0.995 0.731\n", + " truck 128 12 0.741 0.417 0.463 0.283 0.741 0.417 0.4 0.27\n", + " boat 128 6 0.653 0.32 0.452 0.17 0.653 0.32 0.328 0.149\n", + " traffic light 128 14 0.627 0.36 0.527 0.234 0.503 0.289 0.409 0.293\n", + " stop sign 128 2 0.829 1 0.995 0.747 0.829 1 0.995 0.821\n", + " bench 128 9 0.822 0.667 0.76 0.414 0.685 0.556 0.678 0.228\n", + " bird 128 16 0.967 1 0.995 0.675 0.906 0.938 0.909 0.516\n", + " cat 128 4 0.778 0.89 0.945 0.728 0.778 0.89 0.945 0.69\n", + " dog 128 9 1 0.65 0.973 0.697 1 0.65 0.939 0.615\n", + " horse 128 2 0.727 1 0.995 0.672 0.727 1 0.995 0.2\n", + " elephant 128 17 1 0.912 0.946 0.704 0.871 0.794 0.822 0.565\n", + " bear 128 1 0.626 1 0.995 0.895 0.626 1 0.995 0.895\n", + " zebra 128 4 0.865 1 0.995 0.934 0.865 1 0.995 0.822\n", + " giraffe 128 9 0.975 1 0.995 0.672 0.866 0.889 0.876 0.473\n", + " backpack 128 6 1 0.573 0.707 0.38 0.891 0.5 0.524 0.249\n", + " umbrella 128 18 0.744 0.889 0.926 0.552 0.465 0.556 0.483 0.262\n", + " handbag 128 19 0.799 0.209 0.432 0.225 0.799 0.209 0.403 0.201\n", + " tie 128 7 0.968 0.857 0.857 0.53 0.968 0.857 0.857 0.519\n", + " suitcase 128 4 0.821 1 0.995 0.696 0.821 1 0.995 0.665\n", + " frisbee 128 5 0.777 0.8 0.761 0.613 0.777 0.8 0.761 0.558\n", + " skis 128 1 0.721 1 0.995 0.497 0.721 1 0.995 0.398\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + " snowboard 128 7 0.851 0.857 0.887 0.599 0.284 0.286 0.253 0.151\n", + " sports ball 128 6 0.961 0.667 0.687 0.429 0.721 0.5 0.481 0.476\n", + " kite 128 10 0.508 0.312 0.48 0.238 0.508 0.312 0.406 0.122\n", + " baseball bat 128 4 0.331 0.5 0.526 0.249 0.331 0.5 0.376 0.102\n", + " baseball glove 128 7 0.876 0.571 0.579 0.282 0.657 0.429 0.429 0.343\n", + " skateboard 128 5 1 0.697 0.824 0.471 0.707 0.497 0.552 0.299\n", + " tennis racket 128 7 0.524 0.714 0.646 0.426 0.524 0.714 0.646 0.452\n", + " bottle 128 18 0.657 0.389 0.531 0.359 0.657 0.389 0.569 0.362\n", + " wine glass 128 16 0.752 0.938 0.924 0.435 0.451 0.562 0.568 0.341\n", + " cup 128 36 0.859 0.676 0.848 0.503 0.823 0.648 0.793 0.496\n", + " fork 128 6 0.904 0.333 0.462 0.309 0.452 0.167 0.195 0.107\n", + " knife 128 16 0.749 0.5 0.665 0.413 0.655 0.438 0.523 0.314\n", + " spoon 128 22 0.787 0.409 0.577 0.275 0.787 0.409 0.528 0.236\n", + " bowl 128 28 0.793 0.679 0.744 0.577 0.751 0.643 0.688 0.366\n", + " banana 128 1 0.931 1 0.995 0.398 0.931 1 0.995 0.497\n", + " sandwich 128 2 1 0 0.828 0.713 1 0 0.498 0.449\n", + " orange 128 4 0.588 1 0.995 0.666 0.588 1 0.995 0.672\n", + " broccoli 128 11 0.563 0.455 0.356 0.258 0.563 0.455 0.362 0.259\n", + " carrot 128 24 0.683 0.75 0.753 0.489 0.758 0.833 0.835 0.451\n", + " hot dog 128 2 0.583 1 0.995 0.995 0.583 1 0.995 0.796\n", + " pizza 128 5 0.801 0.8 0.962 0.644 0.801 0.8 0.962 0.583\n", + " donut 128 14 0.704 1 0.889 0.759 0.704 1 0.889 0.683\n", + " cake 128 4 0.904 1 0.995 0.896 0.904 1 0.995 0.838\n", + " chair 128 35 0.672 0.543 0.629 0.333 0.708 0.571 0.583 0.284\n", + " couch 128 6 0.827 0.5 0.821 0.583 0.827 0.5 0.681 0.352\n", + " potted plant 128 14 0.809 0.908 0.884 0.584 0.809 0.908 0.884 0.474\n", + " bed 128 3 1 0.654 0.913 0.36 1 0.654 0.913 0.418\n", + " dining table 128 13 0.803 0.385 0.557 0.361 0.321 0.154 0.126 0.0487\n", + " toilet 128 2 0.802 1 0.995 0.921 0.802 1 0.995 0.698\n", + " tv 128 2 0.59 1 0.995 0.846 0.59 1 0.995 0.846\n", + " laptop 128 3 1 0 0.451 0.324 1 0 0.372 0.157\n", + " mouse 128 2 1 0 0 0 1 0 0 0\n", + " remote 128 8 0.831 0.5 0.625 0.495 0.831 0.5 0.629 0.436\n", + " cell phone 128 8 0.867 0.375 0.482 0.26 0.578 0.25 0.302 0.127\n", + " microwave 128 3 0.782 1 0.995 0.695 0.782 1 0.995 0.585\n", + " oven 128 5 0.389 0.4 0.432 0.299 0.584 0.6 0.642 0.411\n", + " sink 128 6 0.657 0.5 0.491 0.373 0.657 0.5 0.436 0.303\n", + " refrigerator 128 5 0.729 0.8 0.778 0.547 0.729 0.8 0.778 0.496\n", + " book 128 29 0.77 0.231 0.451 0.186 0.77 0.231 0.399 0.136\n", + " clock 128 9 0.798 0.889 0.956 0.747 0.798 0.889 0.926 0.68\n", + " vase 128 2 0.437 1 0.995 0.895 0.437 1 0.995 0.796\n", + " scissors 128 1 0 0 0.0226 0.0113 0 0 0 0\n", + " teddy bear 128 21 0.815 0.629 0.877 0.521 0.753 0.582 0.793 0.435\n", + " toothbrush 128 5 1 0.719 0.995 0.737 1 0.719 0.995 0.606\n", + "Results saved to \u001b[1mruns/train-seg/exp\u001b[0m\n" + ] + } + ], + "source": [ + "# Train YOLOv5s on COCO128 for 3 epochs\n", + "!python segment/train.py --img 640 --batch 16 --epochs 3 --data coco128-seg.yaml --weights yolov5s-seg.pt --cache" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "15glLzbQx5u0" + }, + "source": [ + "# 4. Visualize" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "nWOsI5wJR1o3" + }, + "source": [ + "## Comet Logging and Visualization 🌟 NEW\n", + "[Comet](https://bit.ly/yolov5-readme-comet) is now fully integrated with YOLOv5. Track and visualize model metrics in real time, save your hyperparameters, datasets, and model checkpoints, and visualize your model predictions with [Comet Custom Panels](https://bit.ly/yolov5-colab-comet-panels)! Comet makes sure you never lose track of your work and makes it easy to share results and collaborate across teams of all sizes! \n", + "\n", + "Getting started is easy:\n", + "```shell\n", + "pip install comet_ml # 1. install\n", + "export COMET_API_KEY= # 2. paste API key\n", + "python train.py --img 640 --epochs 3 --data coco128.yaml --weights yolov5s.pt # 3. train\n", + "```\n", + "\n", + "To learn more about all of the supported Comet features for this integration, check out the [Comet Tutorial](https://github.com/ultralytics/yolov5/tree/master/utils/loggers/comet). If you'd like to learn more about Comet, head over to our [documentation](https://bit.ly/yolov5-colab-comet-docs). Get started by trying out the Comet Colab Notebook:\n", + "[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1RG0WOQyxlDlo5Km8GogJpIEJlg_5lyYO?usp=sharing)\n", + "\n", + "\"yolo-ui\"" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "Lay2WsTjNJzP" + }, + "source": [ + "## ClearML Logging and Automation 🌟 NEW\n", + "\n", + "[ClearML](https://cutt.ly/yolov5-notebook-clearml) is completely integrated into YOLOv5 to track your experimentation, manage dataset versions and even remotely execute training runs. To enable ClearML (check cells above):\n", + "\n", + "- `pip install clearml`\n", + "- run `clearml-init` to connect to a ClearML server (**deploy your own [open-source server](https://github.com/allegroai/clearml-server)**, or use our [free hosted server](https://cutt.ly/yolov5-notebook-clearml))\n", + "\n", + "You'll get all the great expected features from an experiment manager: live updates, model upload, experiment comparison etc. but ClearML also tracks uncommitted changes and installed packages for example. Thanks to that ClearML Tasks (which is what we call experiments) are also reproducible on different machines! With only 1 extra line, we can schedule a YOLOv5 training task on a queue to be executed by any number of ClearML Agents (workers).\n", + "\n", + "You can use ClearML Data to version your dataset and then pass it to YOLOv5 simply using its unique ID. This will help you keep track of your data without adding extra hassle. Explore the [ClearML Tutorial](https://github.com/ultralytics/yolov5/tree/master/utils/loggers/clearml) for details!\n", + "\n", + "\n", + "\"ClearML" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "-WPvRbS5Swl6" + }, + "source": [ + "## Local Logging\n", + "\n", + "Training results are automatically logged with [Tensorboard](https://www.tensorflow.org/tensorboard) and [CSV](https://github.com/ultralytics/yolov5/pull/4148) loggers to `runs/train`, with a new experiment directory created for each new training as `runs/train/exp2`, `runs/train/exp3`, etc.\n", + "\n", + "This directory contains train and val statistics, mosaics, labels, predictions and augmentated mosaics, as well as metrics and charts including precision-recall (PR) curves and confusion matrices. \n", + "\n", + "\"Local\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "Zelyeqbyt3GD" + }, + "source": [ + "# Environments\n", + "\n", + "YOLOv5 may be run in any of the following up-to-date verified environments (with all dependencies including [CUDA](https://developer.nvidia.com/cuda)/[CUDNN](https://developer.nvidia.com/cudnn), [Python](https://www.python.org/) and [PyTorch](https://pytorch.org/) preinstalled):\n", + "\n", + "- **Notebooks** with free GPU: \"Run \"Open \"Open\n", + "- **Google Cloud** Deep Learning VM. See [GCP Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/GCP-Quickstart)\n", + "- **Amazon** Deep Learning AMI. See [AWS Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/AWS-Quickstart)\n", + "- **Docker Image**. See [Docker Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/Docker-Quickstart) \"Docker\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "6Qu7Iesl0p54" + }, + "source": [ + "# Status\n", + "\n", + "![YOLOv5 CI](https://github.com/ultralytics/yolov5/actions/workflows/ci-testing.yml/badge.svg)\n", + "\n", + "If this badge is green, all [YOLOv5 GitHub Actions](https://github.com/ultralytics/yolov5/actions) Continuous Integration (CI) tests are currently passing. CI tests verify correct operation of YOLOv5 training ([train.py](https://github.com/ultralytics/yolov5/blob/master/train.py)), testing ([val.py](https://github.com/ultralytics/yolov5/blob/master/val.py)), inference ([detect.py](https://github.com/ultralytics/yolov5/blob/master/detect.py)) and export ([export.py](https://github.com/ultralytics/yolov5/blob/master/export.py)) on macOS, Windows, and Ubuntu every 24 hours and on every commit.\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "IEijrePND_2I" + }, + "source": [ + "# Appendix\n", + "\n", + "Additional content below." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "GMusP4OAxFu6" + }, + "outputs": [], + "source": [ + "# YOLOv5 PyTorch HUB Inference (DetectionModels only)\n", + "import torch\n", + "\n", + "model = torch.hub.load('ultralytics/yolov5', 'yolov5s-seg') # yolov5n - yolov5x6 or custom\n", + "im = 'https://ultralytics.com/images/zidane.jpg' # file, Path, PIL.Image, OpenCV, nparray, list\n", + "results = model(im) # inference\n", + "results.print() # or .show(), .save(), .crop(), .pandas(), etc." + ] + } + ], + "metadata": { + "accelerator": "GPU", + "colab": { + "collapsed_sections": [], + "machine_shape": "hm", + "name": "YOLOv5 Tutorial", + "provenance": [], + "toc_visible": true + }, + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.7.12" + }, + "widgets": { + "application/vnd.jupyter.widget-state+json": { + "0856bea36ec148b68522ff9c9eb258d8": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "DescriptionStyleModel", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "DescriptionStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "description_width": "" + } + }, + "0ace3934ec6f4d36a1b3a9e086390926": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "ProgressStyleModel", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "ProgressStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "bar_color": null, + "description_width": "" + } + }, + "35e03ce5090346c9ae602891470fc555": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "FloatProgressModel", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "FloatProgressModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "ProgressView", + "bar_style": "success", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_76879f6f2aa54637a7a07faeea2bd684", + "max": 818322941, + "min": 0, + "orientation": "horizontal", + "style": "IPY_MODEL_0ace3934ec6f4d36a1b3a9e086390926", + "value": 818322941 + } + }, + "574140e4c4bc48c9a171541a02cd0211": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "HTMLModel", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HTMLModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HTMLView", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_60b913d755b34d638478e30705a2dde1", + "placeholder": "​", + "style": "IPY_MODEL_0856bea36ec148b68522ff9c9eb258d8", + "value": "100%" + } + }, + "5966ba6e6f114d8c9d8d1d6b1bd4f4c7": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "DescriptionStyleModel", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "DescriptionStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "description_width": "" + } + }, + "60b913d755b34d638478e30705a2dde1": { + "model_module": "@jupyter-widgets/base", + "model_module_version": "1.2.0", + "model_name": "LayoutModel", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "65881db1db8a4e9c930fab9172d45143": { + "model_module": "@jupyter-widgets/base", + "model_module_version": "1.2.0", + "model_name": "LayoutModel", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "76879f6f2aa54637a7a07faeea2bd684": { + "model_module": "@jupyter-widgets/base", + "model_module_version": "1.2.0", + "model_name": "LayoutModel", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "9b8caa3522fc4cbab31e13b5dfc7808d": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "HBoxModel", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HBoxModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HBoxView", + "box_style": "", + "children": [ + "IPY_MODEL_574140e4c4bc48c9a171541a02cd0211", + "IPY_MODEL_35e03ce5090346c9ae602891470fc555", + "IPY_MODEL_c942c208e72d46568b476bb0f2d75496" + ], + "layout": "IPY_MODEL_65881db1db8a4e9c930fab9172d45143" + } + }, + "c942c208e72d46568b476bb0f2d75496": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "HTMLModel", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HTMLModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HTMLView", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_d6b7a2243e0c4beca714d99dceec23d6", + "placeholder": "​", + "style": "IPY_MODEL_5966ba6e6f114d8c9d8d1d6b1bd4f4c7", + "value": " 780M/780M [02:19<00:00, 6.24MB/s]" + } + }, + "d6b7a2243e0c4beca714d99dceec23d6": { + "model_module": "@jupyter-widgets/base", + "model_module_version": "1.2.0", + "model_name": "LayoutModel", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + } + } + } + }, + "nbformat": 4, + "nbformat_minor": 1 +} From 49156eb1d18b6314554333c4bdae5ee3e6102992 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 1 Nov 2022 12:02:05 +0100 Subject: [PATCH 1530/1976] Fix `is_colab()` (#9994) @AyushExel @kalenmike Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- utils/general.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/general.py b/utils/general.py index 88cefb7bb662..5cf156dfe664 100644 --- a/utils/general.py +++ b/utils/general.py @@ -72,7 +72,7 @@ def is_chinese(s='人工智能'): def is_colab(): # Is environment a Google Colab instance? - return 'COLAB_GPU' in os.environ + return 'google.colab' in sys.modules def is_notebook(): From cf99788823dc952b9a5f11fd8be869235e172122 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 1 Nov 2022 14:27:36 +0100 Subject: [PATCH 1531/1976] Check online twice on AutoUpdate (#9999) Increased robustness to network failures Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- utils/general.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/general.py b/utils/general.py index 5cf156dfe664..cdf4f502fc9c 100644 --- a/utils/general.py +++ b/utils/general.py @@ -369,7 +369,7 @@ def check_requirements(requirements=ROOT / 'requirements.txt', exclude=(), insta if s and install and AUTOINSTALL: # check environment variable LOGGER.info(f"{prefix} YOLOv5 requirement{'s' * (n > 1)} {s}not found, attempting AutoUpdate...") try: - assert check_online(), "AutoUpdate skipped (offline)" + assert check_online() or check_online(), "AutoUpdate skipped (offline)" LOGGER.info(check_output(f'pip install {s} {cmds}', shell=True).decode()) source = file if 'file' in locals() else requirements s = f"{prefix} {n} package{'s' * (n > 1)} updated per {source}\n" \ From c55e2cd73b472de808665f8337d6edeaebb74521 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 1 Nov 2022 14:53:14 +0100 Subject: [PATCH 1532/1976] Add `min_items` filter option (#9997) * Add `min_items` filter option @AyushExel @Laughing-q dataset filter Signed-off-by: Glenn Jocher * Update dataloaders.py Signed-off-by: Glenn Jocher * fix * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Signed-off-by: Glenn Jocher Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- utils/dataloaders.py | 17 +++++++++++++++-- utils/segment/dataloaders.py | 3 ++- 2 files changed, 17 insertions(+), 3 deletions(-) diff --git a/utils/dataloaders.py b/utils/dataloaders.py index 403252ff6227..6b6e83e30456 100644 --- a/utils/dataloaders.py +++ b/utils/dataloaders.py @@ -444,6 +444,7 @@ def __init__(self, single_cls=False, stride=32, pad=0.0, + min_items=0, prefix=''): self.img_size = img_size self.augment = augment @@ -475,7 +476,7 @@ def __init__(self, # self.img_files = sorted([x for x in f if x.suffix[1:].lower() in IMG_FORMATS]) # pathlib assert self.im_files, f'{prefix}No images found' except Exception as e: - raise Exception(f'{prefix}Error loading data from {path}: {e}\n{HELP_URL}') + raise Exception(f'{prefix}Error loading data from {path}: {e}\n{HELP_URL}') from e # Check cache self.label_files = img2label_paths(self.im_files) # labels @@ -505,7 +506,19 @@ def __init__(self, self.shapes = np.array(shapes) self.im_files = list(cache.keys()) # update self.label_files = img2label_paths(cache.keys()) # update - n = len(shapes) # number of images + + # Filter images + if min_items: + include = np.array([len(x) > min_items for x in self.labels]).nonzero()[0].astype(int) + LOGGER.info(f'{prefix}{nf - len(include)}/{nf} images filtered from dataset') + self.im_files = [self.im_files[i] for i in include] + self.label_files = [self.label_files[i] for i in include] + self.labels = [self.labels[i] for i in include] + self.segments = [self.segments[i] for i in include] + self.shapes = self.shapes[include] # wh + + # Create indices + n = len(self.shapes) # number of images bi = np.floor(np.arange(n) / batch_size).astype(int) # batch index nb = bi[-1] + 1 # number of batches self.batch = bi # batch index of image diff --git a/utils/segment/dataloaders.py b/utils/segment/dataloaders.py index a63d6ec013fd..9de6f0fbf903 100644 --- a/utils/segment/dataloaders.py +++ b/utils/segment/dataloaders.py @@ -93,12 +93,13 @@ def __init__( single_cls=False, stride=32, pad=0, + min_items=0, prefix="", downsample_ratio=1, overlap=False, ): super().__init__(path, img_size, batch_size, augment, hyp, rect, image_weights, cache_images, single_cls, - stride, pad, prefix) + stride, pad, min_items, prefix) self.downsample_ratio = downsample_ratio self.overlap = overlap From 067ad9a2d1162fd33e6d47321e3f1d860b6df0e0 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 1 Nov 2022 15:55:18 +0100 Subject: [PATCH 1533/1976] Improved `check_online()` robustness (#10000) * Improved check_online() robustness YOLOv5-wide improvement, not just in check_requirements() Signed-off-by: Glenn Jocher * Update general.py Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- utils/general.py | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) diff --git a/utils/general.py b/utils/general.py index cdf4f502fc9c..aae466ba5c90 100644 --- a/utils/general.py +++ b/utils/general.py @@ -283,11 +283,16 @@ def file_size(path): def check_online(): # Check internet connectivity import socket - try: - socket.create_connection(("1.1.1.1", 443), 5) # check host accessibility - return True - except OSError: - return False + + def run_once(): + # Check once + try: + socket.create_connection(("1.1.1.1", 443), 5) # check host accessibility + return True + except OSError: + return False + + return run_once() or run_once() # check twice to increase robustness to intermittent connectivity issues def git_describe(path=ROOT): # path must be a directory @@ -369,7 +374,7 @@ def check_requirements(requirements=ROOT / 'requirements.txt', exclude=(), insta if s and install and AUTOINSTALL: # check environment variable LOGGER.info(f"{prefix} YOLOv5 requirement{'s' * (n > 1)} {s}not found, attempting AutoUpdate...") try: - assert check_online() or check_online(), "AutoUpdate skipped (offline)" + assert check_online(), "AutoUpdate skipped (offline)" LOGGER.info(check_output(f'pip install {s} {cmds}', shell=True).decode()) source = file if 'file' in locals() else requirements s = f"{prefix} {n} package{'s' * (n > 1)} updated per {source}\n" \ From 82a558557a825d380178527f4b0ff175f33457fe Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 1 Nov 2022 16:41:37 +0100 Subject: [PATCH 1534/1976] Fix `min_items` (#10001) Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- utils/dataloaders.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/utils/dataloaders.py b/utils/dataloaders.py index 6b6e83e30456..4e5b75edb5c2 100644 --- a/utils/dataloaders.py +++ b/utils/dataloaders.py @@ -509,8 +509,8 @@ def __init__(self, # Filter images if min_items: - include = np.array([len(x) > min_items for x in self.labels]).nonzero()[0].astype(int) - LOGGER.info(f'{prefix}{nf - len(include)}/{nf} images filtered from dataset') + include = np.array([len(x) >= min_items for x in self.labels]).nonzero()[0].astype(int) + LOGGER.info(f'{prefix}{n - len(include)}/{n} images filtered from dataset') self.im_files = [self.im_files[i] for i in include] self.label_files = [self.label_files[i] for i in include] self.labels = [self.labels[i] for i in include] From 02b8a4c21bb6d9419bbf01d4af20724743dab58b Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 3 Nov 2022 17:58:15 +0100 Subject: [PATCH 1535/1976] Update default `--epochs 100` (#10024) * Update default `--epochs 100` @AyushExel @kalenmike updating default Detection and Segmentation trainings to 100 epochs Signed-off-by: Glenn Jocher * Update train.py Signed-off-by: Glenn Jocher * Update README.md Signed-off-by: Glenn Jocher * Update README_cn.md Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- .github/README_cn.md | 10 +++++----- README.md | 10 +++++----- segment/train.py | 2 +- train.py | 2 +- 4 files changed, 12 insertions(+), 12 deletions(-) diff --git a/.github/README_cn.md b/.github/README_cn.md index d0cf6b9ff3bd..4184c4c683d0 100644 --- a/.github/README_cn.md +++ b/.github/README_cn.md @@ -111,11 +111,11 @@ python detect.py --source 0 # 网络摄像头 数据集结果. [模型](https://github.com/ultralytics/yolov5/tree/master/models) 和 [数据集](https://github.com/ultralytics/yolov5/tree/master/data) 自动从最新的YOLOv5 [版本](https://github.com/ultralytics/yolov5/releases) 中下载。YOLOv5n/s/m/l/x的训练时间在V100 GPU上是 1/2/4/6/8天(多GPU倍速). 尽可能使用最大的 `--batch-size`, 或通过 `--batch-size -1` 来实现 YOLOv5 [自动批处理](https://github.com/ultralytics/yolov5/pull/5092). 批量大小显示为 V100-16GB。 ```bash -python train.py --data coco.yaml --cfg yolov5n.yaml --weights '' --batch-size 128 - yolov5s 64 - yolov5m 40 - yolov5l 24 - yolov5x 16 +python train.py --data coco.yaml --epochs 300 --weights '' --cfg yolov5n.yaml --batch-size 128 + yolov5s 64 + yolov5m 40 + yolov5l 24 + yolov5x 16 ``` diff --git a/README.md b/README.md index 7cb4d09446ca..efe7d4b090bc 100644 --- a/README.md +++ b/README.md @@ -126,11 +126,11 @@ largest `--batch-size` possible, or pass `--batch-size -1` for YOLOv5 [AutoBatch](https://github.com/ultralytics/yolov5/pull/5092). Batch sizes shown for V100-16GB. ```bash -python train.py --data coco.yaml --cfg yolov5n.yaml --weights '' --batch-size 128 - yolov5s 64 - yolov5m 40 - yolov5l 24 - yolov5x 16 +python train.py --data coco.yaml --epochs 300 --weights '' --cfg yolov5n.yaml --batch-size 128 + yolov5s 64 + yolov5m 40 + yolov5l 24 + yolov5x 16 ``` diff --git a/segment/train.py b/segment/train.py index 5a5f15f10d84..7950f95df4f2 100644 --- a/segment/train.py +++ b/segment/train.py @@ -463,7 +463,7 @@ def parse_opt(known=False): parser.add_argument('--cfg', type=str, default='', help='model.yaml path') parser.add_argument('--data', type=str, default=ROOT / 'data/coco128-seg.yaml', help='dataset.yaml path') parser.add_argument('--hyp', type=str, default=ROOT / 'data/hyps/hyp.scratch-low.yaml', help='hyperparameters path') - parser.add_argument('--epochs', type=int, default=300, help='total training epochs') + parser.add_argument('--epochs', type=int, default=100, help='total training epochs') parser.add_argument('--batch-size', type=int, default=16, help='total batch size for all GPUs, -1 for autobatch') parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=640, help='train, val image size (pixels)') parser.add_argument('--rect', action='store_true', help='rectangular training') diff --git a/train.py b/train.py index c24a8e81531d..e882748581bf 100644 --- a/train.py +++ b/train.py @@ -433,7 +433,7 @@ def parse_opt(known=False): parser.add_argument('--cfg', type=str, default='', help='model.yaml path') parser.add_argument('--data', type=str, default=ROOT / 'data/coco128.yaml', help='dataset.yaml path') parser.add_argument('--hyp', type=str, default=ROOT / 'data/hyps/hyp.scratch-low.yaml', help='hyperparameters path') - parser.add_argument('--epochs', type=int, default=300, help='total training epochs') + parser.add_argument('--epochs', type=int, default=100, help='total training epochs') parser.add_argument('--batch-size', type=int, default=16, help='total batch size for all GPUs, -1 for autobatch') parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=640, help='train, val image size (pixels)') parser.add_argument('--rect', action='store_true', help='rectangular training') From fde77584687041aa62795bb2c27e895cf73686bf Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 4 Nov 2022 15:30:12 +0100 Subject: [PATCH 1536/1976] YOLOv5 AutoCache Update (#10027) * AutoCache * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * AutoCache * AutoCache * AutoCache * AutoCache Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- segment/train.py | 2 +- train.py | 2 +- utils/__init__.py | 1 - utils/dataloaders.py | 34 +++++++++++++++++++++++++++------- utils/general.py | 2 +- 5 files changed, 30 insertions(+), 11 deletions(-) diff --git a/segment/train.py b/segment/train.py index 7950f95df4f2..f067918e7c3c 100644 --- a/segment/train.py +++ b/segment/train.py @@ -474,7 +474,7 @@ def parse_opt(known=False): parser.add_argument('--noplots', action='store_true', help='save no plot files') parser.add_argument('--evolve', type=int, nargs='?', const=300, help='evolve hyperparameters for x generations') parser.add_argument('--bucket', type=str, default='', help='gsutil bucket') - parser.add_argument('--cache', type=str, nargs='?', const='ram', help='--cache images in "ram" (default) or "disk"') + parser.add_argument('--cache', type=str, nargs='?', const='ram', help='image --cache ram/disk') parser.add_argument('--image-weights', action='store_true', help='use weighted image selection for training') parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') parser.add_argument('--multi-scale', action='store_true', help='vary img-size +/- 50%%') diff --git a/train.py b/train.py index e882748581bf..1fe6cf4d9ebd 100644 --- a/train.py +++ b/train.py @@ -444,7 +444,7 @@ def parse_opt(known=False): parser.add_argument('--noplots', action='store_true', help='save no plot files') parser.add_argument('--evolve', type=int, nargs='?', const=300, help='evolve hyperparameters for x generations') parser.add_argument('--bucket', type=str, default='', help='gsutil bucket') - parser.add_argument('--cache', type=str, nargs='?', const='ram', help='--cache images in "ram" (default) or "disk"') + parser.add_argument('--cache', type=str, nargs='?', const='ram', help='image --cache ram/disk') parser.add_argument('--image-weights', action='store_true', help='use weighted image selection for training') parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') parser.add_argument('--multi-scale', action='store_true', help='vary img-size +/- 50%%') diff --git a/utils/__init__.py b/utils/__init__.py index 0afe6f475625..8354d91c4269 100644 --- a/utils/__init__.py +++ b/utils/__init__.py @@ -47,7 +47,6 @@ def notebook_init(verbose=True): from utils.general import check_font, check_requirements, is_colab from utils.torch_utils import select_device # imports - check_requirements(('psutil', 'IPython')) check_font() import psutil diff --git a/utils/dataloaders.py b/utils/dataloaders.py index 4e5b75edb5c2..b33a24a46f9c 100644 --- a/utils/dataloaders.py +++ b/utils/dataloaders.py @@ -19,6 +19,7 @@ from urllib.parse import urlparse import numpy as np +import psutil import torch import torch.nn.functional as F import torchvision @@ -30,8 +31,8 @@ from utils.augmentations import (Albumentations, augment_hsv, classify_albumentations, classify_transforms, copy_paste, cutout, letterbox, mixup, random_perspective) from utils.general import (DATASETS_DIR, LOGGER, NUM_THREADS, check_dataset, check_requirements, check_yaml, clean_str, - cv2, is_colab, is_kaggle, segments2boxes, unzip_file, xyn2xy, xywh2xyxy, xywhn2xyxy, - xyxy2xywhn) + colorstr, cv2, is_colab, is_kaggle, segments2boxes, unzip_file, xyn2xy, xywh2xyxy, + xywhn2xyxy, xyxy2xywhn) from utils.torch_utils import torch_distributed_zero_first # Parameters @@ -564,24 +565,43 @@ def __init__(self, self.batch_shapes = np.ceil(np.array(shapes) * img_size / stride + pad).astype(int) * stride - # Cache images into RAM/disk for faster training (WARNING: large datasets may exceed system resources) + # Cache images into RAM/disk for faster training + if cache_images == 'ram' and not self.check_cache_ram(prefix=prefix): + cache_images = False self.ims = [None] * n self.npy_files = [Path(f).with_suffix('.npy') for f in self.im_files] if cache_images: - gb = 0 # Gigabytes of cached images + b, gb = 0, 1 << 30 # bytes of cached images, bytes per gigabytes self.im_hw0, self.im_hw = [None] * n, [None] * n fcn = self.cache_images_to_disk if cache_images == 'disk' else self.load_image results = ThreadPool(NUM_THREADS).imap(fcn, range(n)) pbar = tqdm(enumerate(results), total=n, bar_format=BAR_FORMAT, disable=LOCAL_RANK > 0) for i, x in pbar: if cache_images == 'disk': - gb += self.npy_files[i].stat().st_size + b += self.npy_files[i].stat().st_size else: # 'ram' self.ims[i], self.im_hw0[i], self.im_hw[i] = x # im, hw_orig, hw_resized = load_image(self, i) - gb += self.ims[i].nbytes - pbar.desc = f'{prefix}Caching images ({gb / 1E9:.1f}GB {cache_images})' + b += self.ims[i].nbytes + pbar.desc = f'{prefix}Caching images ({b / gb:.1f}GB {cache_images})' pbar.close() + def check_cache_ram(self, safety_margin=0.1, prefix=''): + # Check image caching requirements vs available memory + b, gb = 0, 1 << 30 # bytes of cached images, bytes per gigabytes + n = min(self.n, 30) # extrapolate from 30 random images + for _ in range(n): + im = cv2.imread(random.choice(self.im_files)) # sample image + ratio = self.img_size / max(im.shape[0], im.shape[1]) # max(h, w) # ratio + b += im.nbytes * ratio ** 2 + mem_required = b * self.n / n # GB required to cache dataset into RAM + mem = psutil.virtual_memory() + cache = mem_required * (1 + safety_margin) < mem.available # to cache or not to cache, that is the question + if not cache: + LOGGER.info(f"{prefix}{mem_required / gb:.1f}GB RAM required, " + f"{mem.available / gb:.1f}/{mem.total / gb:.1f}GB available, " + f"{'caching images ✅' if cache else 'not caching images ⚠️'}") + return cache + def cache_labels(self, path=Path('./labels.cache'), prefix=''): # Cache dataset labels, check images and read shapes x = {} # dict diff --git a/utils/general.py b/utils/general.py index aae466ba5c90..0c3b44d7f9b0 100644 --- a/utils/general.py +++ b/utils/general.py @@ -374,7 +374,7 @@ def check_requirements(requirements=ROOT / 'requirements.txt', exclude=(), insta if s and install and AUTOINSTALL: # check environment variable LOGGER.info(f"{prefix} YOLOv5 requirement{'s' * (n > 1)} {s}not found, attempting AutoUpdate...") try: - assert check_online(), "AutoUpdate skipped (offline)" + # assert check_online(), "AutoUpdate skipped (offline)" LOGGER.info(check_output(f'pip install {s} {cmds}', shell=True).decode()) source = file if 'file' in locals() else requirements s = f"{prefix} {n} package{'s' * (n > 1)} updated per {source}\n" \ From 78ed31c95a3b01c98a39a5b2edceb48ab630c95d Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 6 Nov 2022 15:06:18 +0100 Subject: [PATCH 1537/1976] IoU `eps` adjustment (#10051) IoU eps adjustment Unify h1 and h2 with eps values Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- utils/metrics.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/utils/metrics.py b/utils/metrics.py index f0bc787e1518..3b854d4f1583 100644 --- a/utils/metrics.py +++ b/utils/metrics.py @@ -234,8 +234,8 @@ def bbox_iou(box1, box2, xywh=True, GIoU=False, DIoU=False, CIoU=False, eps=1e-7 else: # x1, y1, x2, y2 = box1 b1_x1, b1_y1, b1_x2, b1_y2 = box1.chunk(4, 1) b2_x1, b2_y1, b2_x2, b2_y2 = box2.chunk(4, 1) - w1, h1 = b1_x2 - b1_x1, b1_y2 - b1_y1 - w2, h2 = b2_x2 - b2_x1, b2_y2 - b2_y1 + w1, h1 = b1_x2 - b1_x1, b1_y2 - b1_y1 + eps + w2, h2 = b2_x2 - b2_x1, b2_y2 - b2_y1 + eps # Intersection area inter = (torch.min(b1_x2, b2_x2) - torch.max(b1_x1, b2_x1)).clamp(0) * \ @@ -253,7 +253,7 @@ def bbox_iou(box1, box2, xywh=True, GIoU=False, DIoU=False, CIoU=False, eps=1e-7 c2 = cw ** 2 + ch ** 2 + eps # convex diagonal squared rho2 = ((b2_x1 + b2_x2 - b1_x1 - b1_x2) ** 2 + (b2_y1 + b2_y2 - b1_y1 - b1_y2) ** 2) / 4 # center dist ** 2 if CIoU: # https://github.com/Zzh-tju/DIoU-SSD-pytorch/blob/master/utils/box/box_utils.py#L47 - v = (4 / math.pi ** 2) * torch.pow(torch.atan(w2 / (h2 + eps)) - torch.atan(w1 / (h1 + eps)), 2) + v = (4 / math.pi ** 2) * torch.pow(torch.atan(w2 / h2) - torch.atan(w1 / h1), 2) with torch.no_grad(): alpha = v / (v - iou + (1 + eps)) return iou - (rho2 / c2 + v * alpha) # CIoU From 6ae3dff7d48bd914a5ab5d20e277b8222cd547c7 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 6 Nov 2022 18:43:07 +0100 Subject: [PATCH 1538/1976] Update get_coco.sh (#10057) Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- data/scripts/get_coco.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/data/scripts/get_coco.sh b/data/scripts/get_coco.sh index 506d46df9fb5..0d388b0a12a8 100755 --- a/data/scripts/get_coco.sh +++ b/data/scripts/get_coco.sh @@ -30,7 +30,7 @@ url=https://github.com/ultralytics/yolov5/releases/download/v1.0/ if [ "$segments" == "true" ]; then f='coco2017labels-segments.zip' # 168 MB else - f='coco2017labels.zip' # 168 MB + f='coco2017labels.zip' # 46 MB fi echo 'Downloading' $url$f ' ...' curl -L $url$f -o $f -# && unzip -q $f -d $d && rm $f & From 58b3d078543ed92bb960ec3f213291c5fd459e43 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 7 Nov 2022 22:56:23 +0100 Subject: [PATCH 1539/1976] [pre-commit.ci] pre-commit suggestions (#10068) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/asottile/pyupgrade: v2.38.2 → v3.2.0](https://github.com/asottile/pyupgrade/compare/v2.38.2...v3.2.0) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 1cd102c26b41..0106b4aab523 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -24,7 +24,7 @@ repos: - id: check-docstring-first - repo: https://github.com/asottile/pyupgrade - rev: v2.38.2 + rev: v3.2.0 hooks: - id: pyupgrade name: Upgrade code From e00d02d78b772d7848689d8947238e4b05986a54 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 7 Nov 2022 23:07:10 +0100 Subject: [PATCH 1540/1976] Use MNIST160 (#10069) New 160-image MNIST subset composed of first 8 examples of each class. Suitable for fast CI. Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- .github/workflows/ci-testing.yml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/ci-testing.yml b/.github/workflows/ci-testing.yml index 1ec68e8412f9..f31bb6e6ce3c 100644 --- a/.github/workflows/ci-testing.yml +++ b/.github/workflows/ci-testing.yml @@ -155,11 +155,11 @@ jobs: run: | m=${{ matrix.model }}-cls.pt # official weights b=runs/train-cls/exp/weights/best.pt # best.pt checkpoint - python classify/train.py --imgsz 32 --model $m --data mnist2560 --epochs 1 # train - python classify/val.py --imgsz 32 --weights $b --data ../datasets/mnist2560 # val - python classify/predict.py --imgsz 32 --weights $b --source ../datasets/mnist2560/test/7/60.png # predict + python classify/train.py --imgsz 32 --model $m --data mnist160 --epochs 1 # train + python classify/val.py --imgsz 32 --weights $b --data ../datasets/mnist160 # val + python classify/predict.py --imgsz 32 --weights $b --source ../datasets/mnist160/test/7/60.png # predict python classify/predict.py --imgsz 32 --weights $m --source data/images/bus.jpg # predict - python export.py --weights $b --img 64 --imgsz 224 --include torchscript # export + python export.py --weights $b --img 64 --include torchscript # export python - < Date: Tue, 8 Nov 2022 00:58:00 +0100 Subject: [PATCH 1541/1976] Update Dockerfile keep default torch installation (#10071) Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- utils/docker/Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/docker/Dockerfile b/utils/docker/Dockerfile index 87605456a5d9..7ec6efaeacba 100644 --- a/utils/docker/Dockerfile +++ b/utils/docker/Dockerfile @@ -15,7 +15,7 @@ RUN apt update && apt install --no-install-recommends -y zip htop screen libgl1- # Install pip packages COPY requirements.txt . RUN python -m pip install --upgrade pip wheel -RUN pip uninstall -y Pillow torchtext torch torchvision +RUN pip uninstall -y Pillow torchtext # torch torchvision RUN pip install --no-cache -r requirements.txt albumentations comet gsutil notebook Pillow>=9.1.0 \ 'opencv-python<4.6.0.66' \ --extra-index-url https://download.pytorch.org/whl/cu113 From 86decb3c49f91547975d7b7399290eb247888f6f Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 9 Nov 2022 15:05:19 +0100 Subject: [PATCH 1542/1976] Add `ultralytics` pip package (#10103) --- requirements.txt | 23 ++++++++++++----------- utils/docker/Dockerfile | 2 +- utils/docker/Dockerfile-arm64 | 2 +- utils/docker/Dockerfile-cpu | 2 +- 4 files changed, 15 insertions(+), 14 deletions(-) diff --git a/requirements.txt b/requirements.txt index 8cb1bd4c6fe1..70dd7ce53ba3 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,29 +1,32 @@ -# YOLOv5 requirements +# YOLOv5 🚀 requirements # Usage: pip install -r requirements.txt -# Base ---------------------------------------- +# Base ------------------------------------------------------------------------ +ipython # interactive notebook matplotlib>=3.2.2 numpy>=1.18.5 opencv-python>=4.1.1 Pillow>=7.1.2 +psutil # system resources PyYAML>=5.3.1 requests>=2.23.0 scipy>=1.4.1 -torch>=1.7.0 # see https://pytorch.org/get-started/locally/ (recommended) +thop>=0.1.1 # FLOPs computation +torch>=1.7.0 # see https://pytorch.org/get-started/locally (recommended) torchvision>=0.8.1 tqdm>=4.64.0 # protobuf<=3.20.1 # https://github.com/ultralytics/yolov5/issues/8012 -# Logging ------------------------------------- +# Logging --------------------------------------------------------------------- tensorboard>=2.4.1 # clearml>=1.2.0 # comet -# Plotting ------------------------------------ +# Plotting -------------------------------------------------------------------- pandas>=1.1.4 seaborn>=0.11.0 -# Export -------------------------------------- +# Export ---------------------------------------------------------------------- # coremltools>=6.0 # CoreML export # onnx>=1.9.0 # ONNX export # onnx-simplifier>=0.4.1 # ONNX simplifier @@ -34,14 +37,12 @@ seaborn>=0.11.0 # tensorflowjs>=3.9.0 # TF.js export # openvino-dev # OpenVINO export -# Deploy -------------------------------------- +# Deploy ---------------------------------------------------------------------- # tritonclient[all]~=2.24.0 -# Extras -------------------------------------- -ipython # interactive notebook -psutil # system utilization -thop>=0.1.1 # FLOPs computation +# Extras ---------------------------------------------------------------------- # mss # screenshots # albumentations>=1.0.3 # pycocotools>=2.0 # COCO mAP # roboflow +# ultralytics # HUB https://hub.ultralytics.com diff --git a/utils/docker/Dockerfile b/utils/docker/Dockerfile index 7ec6efaeacba..a5035c6abc33 100644 --- a/utils/docker/Dockerfile +++ b/utils/docker/Dockerfile @@ -16,7 +16,7 @@ RUN apt update && apt install --no-install-recommends -y zip htop screen libgl1- COPY requirements.txt . RUN python -m pip install --upgrade pip wheel RUN pip uninstall -y Pillow torchtext # torch torchvision -RUN pip install --no-cache -r requirements.txt albumentations comet gsutil notebook Pillow>=9.1.0 \ +RUN pip install --no-cache -r requirements.txt ultralytics albumentations comet gsutil notebook Pillow>=9.1.0 \ 'opencv-python<4.6.0.66' \ --extra-index-url https://download.pytorch.org/whl/cu113 diff --git a/utils/docker/Dockerfile-arm64 b/utils/docker/Dockerfile-arm64 index 6e8ff77545c5..8ec71622d9b6 100644 --- a/utils/docker/Dockerfile-arm64 +++ b/utils/docker/Dockerfile-arm64 @@ -17,7 +17,7 @@ RUN apt install --no-install-recommends -y python3-pip git zip curl htop gcc lib # Install pip packages COPY requirements.txt . RUN python3 -m pip install --upgrade pip wheel -RUN pip install --no-cache -r requirements.txt gsutil notebook \ +RUN pip install --no-cache -r requirements.txt ultralytics gsutil notebook \ tensorflow-aarch64 # tensorflowjs \ # onnx onnx-simplifier onnxruntime \ diff --git a/utils/docker/Dockerfile-cpu b/utils/docker/Dockerfile-cpu index f3f81ec02c23..017e2826458b 100644 --- a/utils/docker/Dockerfile-cpu +++ b/utils/docker/Dockerfile-cpu @@ -17,7 +17,7 @@ RUN apt install --no-install-recommends -y python3-pip git zip curl htop libgl1- # Install pip packages COPY requirements.txt . RUN python3 -m pip install --upgrade pip wheel -RUN pip install --no-cache -r requirements.txt albumentations gsutil notebook \ +RUN pip install --no-cache -r requirements.txt ultralytics albumentations gsutil notebook \ coremltools onnx onnx-simplifier onnxruntime tensorflow-cpu tensorflowjs \ # openvino-dev \ --extra-index-url https://download.pytorch.org/whl/cpu From 892c4cd4a5a99d9c824ffeb49ce512ee2c9b93e5 Mon Sep 17 00:00:00 2001 From: janus-zheng <106574221+janus-zheng@users.noreply.github.com> Date: Wed, 9 Nov 2022 22:21:43 +0800 Subject: [PATCH 1543/1976] AutoShape integer image-size fix (#10090) Update common.py We have a division at line 694, and then a multiplication at line 695, so it makes `y*g` not an integer. And since `shape1` will be used at line 697 to ensure the size is divisible by the `stride`, this may lead to different image size. In my experiment, my image is [640, 640], it's divisible by the default stride 32, but I found that the result is changed to [672, 672] after line 697. So the final detection result is slightly different from that directly using the `detect.py` script, which does not call the AutoShape methods. Signed-off-by: janus-zheng <106574221+janus-zheng@users.noreply.github.com> Signed-off-by: janus-zheng <106574221+janus-zheng@users.noreply.github.com> Co-authored-by: Glenn Jocher --- models/common.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/models/common.py b/models/common.py index 6347e51cdf0b..64f1b9354225 100644 --- a/models/common.py +++ b/models/common.py @@ -692,7 +692,7 @@ def forward(self, ims, size=640, augment=False, profile=False): s = im.shape[:2] # HWC shape0.append(s) # image shape g = max(size) / max(s) # gain - shape1.append([y * g for y in s]) + shape1.append([int(y * g) for y in s]) ims[i] = im if im.data.contiguous else np.ascontiguousarray(im) # update shape1 = [make_divisible(x, self.stride) for x in np.array(shape1).max(0)] if self.pt else size # inf shape x = [letterbox(im, shape1, auto=False)[0] for im in ims] # pad From c1fcfe8cd9030939dd1635b158984fb066279b22 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 9 Nov 2022 17:20:26 +0100 Subject: [PATCH 1544/1976] YouTube Usage example comments (#10106) * YouTube Usage example comments Signed-off-by: Glenn Jocher * Update dataloaders.py Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- utils/dataloaders.py | 1 + 1 file changed, 1 insertion(+) diff --git a/utils/dataloaders.py b/utils/dataloaders.py index b33a24a46f9c..621c03cd2db1 100644 --- a/utils/dataloaders.py +++ b/utils/dataloaders.py @@ -353,6 +353,7 @@ def __init__(self, sources='streams.txt', img_size=640, stride=32, auto=True, tr # Start thread to read frames from video stream st = f'{i + 1}/{n}: {s}... ' if urlparse(s).hostname in ('www.youtube.com', 'youtube.com', 'youtu.be'): # if source is YouTube video + # YouTube format i.e. 'https://www.youtube.com/watch?v=Zgi9g1ksQHc' or 'https://youtu.be/Zgi9g1ksQHc' check_requirements(('pafy', 'youtube_dl==2020.12.2')) import pafy s = pafy.new(s).getbest(preftype="mp4").url # YouTube URL From 75728bb3ea99113f306280b734dedcc5d7d067b1 Mon Sep 17 00:00:00 2001 From: Victor Sonck Date: Wed, 9 Nov 2022 17:45:09 +0100 Subject: [PATCH 1545/1976] Mapped project and name to ClearML (#10100) * Mapped project and name to ClearML * Add project and task name docs * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Glenn Jocher --- utils/loggers/clearml/README.md | 10 +++++++++- utils/loggers/clearml/clearml_utils.py | 4 ++-- 2 files changed, 11 insertions(+), 3 deletions(-) diff --git a/utils/loggers/clearml/README.md b/utils/loggers/clearml/README.md index e0c5824bc2a2..3cf4c268583f 100644 --- a/utils/loggers/clearml/README.md +++ b/utils/loggers/clearml/README.md @@ -57,12 +57,20 @@ To enable ClearML experiment tracking, simply install the ClearML pip package. pip install clearml>=1.2.0 ``` -This will enable integration with the YOLOv5 training script. Every training run from now on, will be captured and stored by the ClearML experiment manager. If you want to change the `project_name` or `task_name`, head over to our custom logger, where you can change it: `utils/loggers/clearml/clearml_utils.py` +This will enable integration with the YOLOv5 training script. Every training run from now on, will be captured and stored by the ClearML experiment manager. + +If you want to change the `project_name` or `task_name`, use the `--project` and `--name` arguments of the `train.py` script, by default the project will be called `YOLOv5` and the task `Training`. +PLEASE NOTE: ClearML uses `/` as a delimter for subprojects, so be careful when using `/` in your project name! ```bash python train.py --img 640 --batch 16 --epochs 3 --data coco128.yaml --weights yolov5s.pt --cache ``` +or with custom project and task name: +```bash +python train.py --project my_project --name my_training --img 640 --batch 16 --epochs 3 --data coco128.yaml --weights yolov5s.pt --cache +``` + This will capture: - Source code + uncommitted changes - Installed packages diff --git a/utils/loggers/clearml/clearml_utils.py b/utils/loggers/clearml/clearml_utils.py index eb1c12ce6cac..fe5f597a87a6 100644 --- a/utils/loggers/clearml/clearml_utils.py +++ b/utils/loggers/clearml/clearml_utils.py @@ -85,8 +85,8 @@ def __init__(self, opt, hyp): self.data_dict = None if self.clearml: self.task = Task.init( - project_name='YOLOv5', - task_name='training', + project_name=opt.project if opt.project != 'runs/train' else 'YOLOv5', + task_name=opt.name if opt.name != 'exp' else 'Training', tags=['YOLOv5'], output_uri=True, auto_connect_frameworks={'pytorch': False} From 078059c5b3ead9579c53f68c521ed5f0e7e87afa Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 10 Nov 2022 18:32:34 +0100 Subject: [PATCH 1546/1976] Update IoU functions (#10123) Remove box area function and support expandable bbox_iou() calls. Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- utils/metrics.py | 15 +++++---------- 1 file changed, 5 insertions(+), 10 deletions(-) diff --git a/utils/metrics.py b/utils/metrics.py index 3b854d4f1583..65ea463c0dab 100644 --- a/utils/metrics.py +++ b/utils/metrics.py @@ -227,13 +227,13 @@ def bbox_iou(box1, box2, xywh=True, GIoU=False, DIoU=False, CIoU=False, eps=1e-7 # Get the coordinates of bounding boxes if xywh: # transform from xywh to xyxy - (x1, y1, w1, h1), (x2, y2, w2, h2) = box1.chunk(4, 1), box2.chunk(4, 1) + (x1, y1, w1, h1), (x2, y2, w2, h2) = box1.chunk(4, -1), box2.chunk(4, -1) w1_, h1_, w2_, h2_ = w1 / 2, h1 / 2, w2 / 2, h2 / 2 b1_x1, b1_x2, b1_y1, b1_y2 = x1 - w1_, x1 + w1_, y1 - h1_, y1 + h1_ b2_x1, b2_x2, b2_y1, b2_y2 = x2 - w2_, x2 + w2_, y2 - h2_, y2 + h2_ else: # x1, y1, x2, y2 = box1 - b1_x1, b1_y1, b1_x2, b1_y2 = box1.chunk(4, 1) - b2_x1, b2_y1, b2_x2, b2_y2 = box2.chunk(4, 1) + b1_x1, b1_y1, b1_x2, b1_y2 = box1.chunk(4, -1) + b2_x1, b2_y1, b2_x2, b2_y2 = box2.chunk(4, -1) w1, h1 = b1_x2 - b1_x1, b1_y2 - b1_y1 + eps w2, h2 = b2_x2 - b2_x1, b2_y2 - b2_y1 + eps @@ -263,11 +263,6 @@ def bbox_iou(box1, box2, xywh=True, GIoU=False, DIoU=False, CIoU=False, eps=1e-7 return iou # IoU -def box_area(box): - # box = xyxy(4,n) - return (box[2] - box[0]) * (box[3] - box[1]) - - def box_iou(box1, box2, eps=1e-7): # https://github.com/pytorch/vision/blob/master/torchvision/ops/boxes.py """ @@ -282,11 +277,11 @@ def box_iou(box1, box2, eps=1e-7): """ # inter(N,M) = (rb(N,M,2) - lt(N,M,2)).clamp(0).prod(2) - (a1, a2), (b1, b2) = box1[:, None].chunk(2, 2), box2.chunk(2, 1) + (a1, a2), (b1, b2) = box1.unsqueeze(1).chunk(2, 2), box2.unsqueeze(0).chunk(2, 2) inter = (torch.min(a2, b2) - torch.max(a1, b1)).clamp(0).prod(2) # IoU = inter / (area1 + area2 - inter) - return inter / (box_area(box1.T)[:, None] + box_area(box2.T) - inter + eps) + return inter / ((a2 - a1).prod(2) + (b2 - b1).prod(2) - inter + eps) def bbox_ioa(box1, box2, eps=1e-7): From 55e95168465b094733e3ef1ec36e0a18f200cd94 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 11 Nov 2022 00:21:29 +0100 Subject: [PATCH 1547/1976] Add Ultralytics HUB to README (#10070) * Add Ultralytics HUB section to README @pderrenger @kalenmike @AlanDimmer @AyushExel new Ultralytics HUB section in YOLOv5 README. Signed-off-by: Glenn Jocher * Update README.md Signed-off-by: Glenn Jocher * Update README.md Signed-off-by: Glenn Jocher * Update README.md @AlanDimmer @kalenmike new integrations image Signed-off-by: Glenn Jocher * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update README.md Signed-off-by: Glenn Jocher * Update tutorial.ipynb Signed-off-by: Glenn Jocher * Update tutorial.ipynb Signed-off-by: Glenn Jocher * Update README.md Signed-off-by: Glenn Jocher * Update tutorial.ipynb Signed-off-by: Glenn Jocher * Update README_cn.md Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .github/README_cn.md | 57 ++++++++++++++++++------------------------ README.md | 20 +++++++++++---- segment/tutorial.ipynb | 2 +- tutorial.ipynb | 2 +- 4 files changed, 42 insertions(+), 39 deletions(-) diff --git a/.github/README_cn.md b/.github/README_cn.md index 4184c4c683d0..90d3da8298cc 100644 --- a/.github/README_cn.md +++ b/.github/README_cn.md @@ -144,47 +144,40 @@ python train.py --data coco.yaml --epochs 300 --weights '' --cfg yolov5n.yaml -
-##
环境
-使用经过我们验证的环境,几秒钟就可以开始。点击下面的每个图标了解详情。 +##
Integrations
- - -##
如何与第三方集成
+
+ + +
+
- - - - - - - - - + + + + + + + + +
-|Deci ⭐ NEW|ClearML ⭐ NEW|Roboflow|Weights & Biases +|Roboflow|ClearML ⭐ NEW|Comet ⭐ NEW|Deci ⭐ NEW| |:-:|:-:|:-:|:-:| -|在[Deci](https://bit.ly/yolov5-deci-platform)一键自动编译和量化YOLOv5以提高推理性能|使用[ClearML](https://cutt.ly/yolov5-readme-clearml) (开源!)自动追踪,可视化,以及远程训练YOLOv5|标记并将您的自定义数据直接导出到YOLOv5后,用[Roboflow](https://roboflow.com/?ref=ultralytics)进行训练 |通过[Weights & Biases](https://wandb.ai/site?utm_campaign=repo_yolo_readme)自动跟踪以及可视化你在云端所有的YOLOv5训练运行情况 +|Label and export your custom datasets directly to YOLOv5 for training with [Roboflow](https://roboflow.com/?ref=ultralytics)|Automatically track, visualize and even remotely train YOLOv5 using [ClearML](https://cutt.ly/yolov5-readme-clearml) (open-source!)|Free forever, [Comet](https://bit.ly/yolov5-readme-comet) lets you save YOLOv5 models, resume training, and interactively visualise and debug predictions|Automatically compile and quantize YOLOv5 for better inference performance in one click at [Deci](https://bit.ly/yolov5-deci-platform)| + + +##
Ultralytics HUB
+ +[Ultralytics HUB](https://bit.ly/ultralytics_hub) is our ⭐ **NEW** no-code solution to visualize datasets, train YOLOv5 🚀 models, and deploy to the real world in a seamless experience. Get started for **Free** now! + + + ##
为什么选择 YOLOv5
diff --git a/README.md b/README.md index efe7d4b090bc..5101297782d0 100644 --- a/README.md +++ b/README.md @@ -52,9 +52,7 @@ ##
Documentation
-See the [YOLOv5 Docs](https://docs.ultralytics.com) for full documentation on training, testing and deployment. - -##
Quick Start Examples
+See the [YOLOv5 Docs](https://docs.ultralytics.com) for full documentation on training, testing and deployment. See below for quickstart examples.
Install @@ -71,7 +69,7 @@ pip install -r requirements.txt # install
-
+
Inference YOLOv5 [PyTorch Hub](https://github.com/ultralytics/yolov5/issues/36) inference. [Models](https://github.com/ultralytics/yolov5/tree/master/models) download automatically from the latest @@ -163,7 +161,11 @@ python train.py --data coco.yaml --epochs 300 --weights '' --cfg yolov5n.yaml - ##
Integrations
- +
+ + +
+
@@ -184,6 +186,14 @@ python train.py --data coco.yaml --epochs 300 --weights '' --cfg yolov5n.yaml - |Label and export your custom datasets directly to YOLOv5 for training with [Roboflow](https://roboflow.com/?ref=ultralytics)|Automatically track, visualize and even remotely train YOLOv5 using [ClearML](https://cutt.ly/yolov5-readme-clearml) (open-source!)|Free forever, [Comet](https://bit.ly/yolov5-readme-comet) lets you save YOLOv5 models, resume training, and interactively visualise and debug predictions|Automatically compile and quantize YOLOv5 for better inference performance in one click at [Deci](https://bit.ly/yolov5-deci-platform)| +##
Ultralytics HUB
+ +[Ultralytics HUB](https://bit.ly/ultralytics_hub) is our ⭐ **NEW** no-code solution to visualize datasets, train YOLOv5 🚀 models, and deploy to the real world in a seamless experience. Get started for **Free** now! + +
+ + + ##
Why YOLOv5
YOLOv5 has been designed to be super easy to get started and simple to learn. We prioritize real-world results. diff --git a/segment/tutorial.ipynb b/segment/tutorial.ipynb index 47559978be74..079bfe3057bc 100644 --- a/segment/tutorial.ipynb +++ b/segment/tutorial.ipynb @@ -232,7 +232,7 @@ "source": [ "# 3. Train\n", "\n", - "

\n", + "

\n", "Close the active learning loop by sampling images from your inference conditions with the `roboflow` pip package\n", "

\n", "\n", diff --git a/tutorial.ipynb b/tutorial.ipynb index b40f08ef20b3..96f05426b4a8 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -626,7 +626,7 @@ "source": [ "# 3. Train\n", "\n", - "

\n", + "

\n", "Close the active learning loop by sampling images from your inference conditions with the `roboflow` pip package\n", "

\n", "\n", From 7c3827a2d66ce83a4afdffebe55d1bfbd39359d3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Rub=C3=A9n=20Usamentiaga?= Date: Fri, 11 Nov 2022 20:43:16 +0100 Subject: [PATCH 1548/1976] Fix benchmark.py usage comment (#10131) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Update benchmarks.py Signed-off-by: Rubén Usamentiaga Signed-off-by: Rubén Usamentiaga --- benchmarks.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/benchmarks.py b/benchmarks.py index ef5c882973f0..03d7d693a936 100644 --- a/benchmarks.py +++ b/benchmarks.py @@ -22,7 +22,7 @@ $ pip install -U nvidia-tensorrt --index-url https://pypi.ngc.nvidia.com # TensorRT Usage: - $ python utils/benchmarks.py --weights yolov5s.pt --img 640 + $ python benchmarks.py --weights yolov5s.pt --img 640 """ import argparse From f33718f36f756301b91da6207f1d02f30b3916e1 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 11 Nov 2022 21:20:48 +0100 Subject: [PATCH 1549/1976] Update HUB banner image (#10134) * Update HUB banner image Passed through tinypng for filesize reduction Signed-off-by: Glenn Jocher * Update README_cn.md Signed-off-by: Glenn Jocher * Update README_cn.md Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- .github/README_cn.md | 2 +- README.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/README_cn.md b/.github/README_cn.md index 90d3da8298cc..65ecd31a3e69 100644 --- a/.github/README_cn.md +++ b/.github/README_cn.md @@ -177,7 +177,7 @@ python train.py --data coco.yaml --epochs 300 --weights '' --cfg yolov5n.yaml - [Ultralytics HUB](https://bit.ly/ultralytics_hub) is our ⭐ **NEW** no-code solution to visualize datasets, train YOLOv5 🚀 models, and deploy to the real world in a seamless experience. Get started for **Free** now! - + ##
为什么选择 YOLOv5
diff --git a/README.md b/README.md index 5101297782d0..0fa95f404117 100644 --- a/README.md +++ b/README.md @@ -191,7 +191,7 @@ python train.py --data coco.yaml --epochs 300 --weights '' --cfg yolov5n.yaml - [Ultralytics HUB](https://bit.ly/ultralytics_hub) is our ⭐ **NEW** no-code solution to visualize datasets, train YOLOv5 🚀 models, and deploy to the real world in a seamless experience. Get started for **Free** now! - + ##
Why YOLOv5
From abbfd695232b1bfcbd8e122e2aeb37fcc3d146d5 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 14 Nov 2022 18:54:49 +0100 Subject: [PATCH 1550/1976] Copy-Paste zero value fix (#10152) * Copy-Paste zero value fix Signed-off-by: Glenn Jocher * Update augmentations.py Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- utils/augmentations.py | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/utils/augmentations.py b/utils/augmentations.py index 7c8e0bcdede6..1eae5db8f816 100644 --- a/utils/augmentations.py +++ b/utils/augmentations.py @@ -250,12 +250,10 @@ def copy_paste(im, labels, segments, p=0.5): if (ioa < 0.30).all(): # allow 30% obscuration of existing labels labels = np.concatenate((labels, [[l[0], *box]]), 0) segments.append(np.concatenate((w - s[:, 0:1], s[:, 1:2]), 1)) - cv2.drawContours(im_new, [segments[j].astype(np.int32)], -1, (255, 255, 255), cv2.FILLED) + cv2.drawContours(im_new, [segments[j].astype(np.int32)], -1, (1, 1, 1), cv2.FILLED) - result = cv2.bitwise_and(src1=im, src2=im_new) - result = cv2.flip(result, 1) # augment segments (flip left-right) - i = result > 0 # pixels to replace - # i[:, :] = result.max(2).reshape(h, w, 1) # act over ch + result = cv2.flip(im, 1) # augment segments (flip left-right) + i = cv2.flip(im_new, 1).astype(bool) im[i] = result[i] # cv2.imwrite('debug.jpg', im) # debug return im, labels, segments From ea73386e5a21f6b6d4f2bdc0ba1f9f8a7ced3f2a Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 15 Nov 2022 14:19:47 +0100 Subject: [PATCH 1551/1976] Add Copy-Paste to `mosaic9()` (#10165) Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- utils/dataloaders.py | 1 + 1 file changed, 1 insertion(+) diff --git a/utils/dataloaders.py b/utils/dataloaders.py index 621c03cd2db1..54d3f7bbba00 100644 --- a/utils/dataloaders.py +++ b/utils/dataloaders.py @@ -869,6 +869,7 @@ def load_mosaic9(self, index): # img9, labels9 = replicate(img9, labels9) # replicate # Augment + img9, labels9, segments9 = copy_paste(img9, labels9, segments9, p=self.hyp['copy_paste']) img9, labels9 = random_perspective(img9, labels9, segments9, From 9dd40f072386134d5271a902f135e95979de1419 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 15 Nov 2022 16:27:07 +0100 Subject: [PATCH 1552/1976] Add `join_threads()` (#10086) * Update __init__.py Signed-off-by: Glenn Jocher * Update __init__.py Signed-off-by: Glenn Jocher * Update __init__.py Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- utils/__init__.py | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/utils/__init__.py b/utils/__init__.py index 8354d91c4269..7bf3efe6b8c7 100644 --- a/utils/__init__.py +++ b/utils/__init__.py @@ -37,6 +37,16 @@ def wrapper(*args, **kwargs): return wrapper +def join_threads(verbose=False): + # Join all daemon threads, i.e. atexit.register(lambda: join_threads()) + main_thread = threading.current_thread() + for t in threading.enumerate(): + if t is not main_thread: + if verbose: + print(f'Joining thread {t.name}') + t.join() + + def notebook_init(verbose=True): # Check system software and hardware print('Checking setup...') From 5e03f5fc8cbd658e183bb3812fe1c8553cb8cf05 Mon Sep 17 00:00:00 2001 From: Amol Dumrewal Date: Tue, 15 Nov 2022 23:30:33 +0530 Subject: [PATCH 1553/1976] Fix dataloader filepath modification to perform replace only once and not for all occurences of string (#10163) * Fix dataloader filepath modification to perform only once and not for all occurences of string * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * cleanup Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Glenn Jocher --- utils/dataloaders.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/utils/dataloaders.py b/utils/dataloaders.py index 54d3f7bbba00..0418293a6e21 100644 --- a/utils/dataloaders.py +++ b/utils/dataloaders.py @@ -470,8 +470,8 @@ def __init__(self, with open(p) as t: t = t.read().strip().splitlines() parent = str(p.parent) + os.sep - f += [x.replace('./', parent) if x.startswith('./') else x for x in t] # local to global path - # f += [p.parent / x.lstrip(os.sep) for x in t] # local to global path (pathlib) + f += [x.replace('./', parent, 1) if x.startswith('./') else x for x in t] # to global path + # f += [p.parent / x.lstrip(os.sep) for x in t] # to global path (pathlib) else: raise FileNotFoundError(f'{prefix}{p} does not exist') self.im_files = sorted(x.replace('/', os.sep) for x in f if x.split('.')[-1].lower() in IMG_FORMATS) From 166b9f2fa79a67788a2a372dc52c9e8e0f7a7cc1 Mon Sep 17 00:00:00 2001 From: Ryan Echols Date: Wed, 16 Nov 2022 08:09:30 -0700 Subject: [PATCH 1554/1976] fix: prevent logging config clobbering (#10133) Previous behavior: loading this repository with `torch.hub.load` clobbers the existing logging configuration by modifying the root logger's configuration. New behavior: loading this repository with `torch.hub.load` only clobbers the logging configuration for logger `yolov5` and its descendants. Signed-off-by: Ryan Echols Signed-off-by: Ryan Echols --- utils/general.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/utils/general.py b/utils/general.py index 0c3b44d7f9b0..76dd2a40b51b 100644 --- a/utils/general.py +++ b/utils/general.py @@ -126,8 +126,9 @@ def set_logging(name=None, verbose=VERBOSE): log.addHandler(handler) -set_logging() # run before defining LOGGER -LOGGER = logging.getLogger("yolov5") # define globally (used in train.py, val.py, detect.py, etc.) +logger_name = "yolov5" +set_logging(logger_name) # run before defining LOGGER +LOGGER = logging.getLogger(logger_name) # define globally (used in train.py, val.py, detect.py, etc.) if platform.system() == 'Windows': for fn in LOGGER.info, LOGGER.warning: setattr(LOGGER, fn.__name__, lambda x: fn(emojis(x))) # emoji safe logging From 90575107e7b06d48ef91ffa46a41a55439ebdab1 Mon Sep 17 00:00:00 2001 From: tripleMu Date: Wed, 16 Nov 2022 23:10:15 +0800 Subject: [PATCH 1555/1976] Filter PyTorch 1.13 UserWarnings (#10166) FilterWarning for torch.distributed._all_gather_base Co-authored-by: Glenn Jocher --- utils/torch_utils.py | 1 + 1 file changed, 1 insertion(+) diff --git a/utils/torch_utils.py b/utils/torch_utils.py index 04a3873854ee..fe934abf118c 100644 --- a/utils/torch_utils.py +++ b/utils/torch_utils.py @@ -32,6 +32,7 @@ # Suppress PyTorch warnings warnings.filterwarnings('ignore', message='User provided device_type of \'cuda\', but CUDA is not available. Disabling') +warnings.filterwarnings('ignore', category=UserWarning) def smart_inference_mode(torch_1_9=check_version(torch.__version__, '1.9.0')): From e40662ffdd80a6f108a62cf0d53d06085d943223 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 16 Nov 2022 19:06:19 +0100 Subject: [PATCH 1556/1976] Revert "fix: prevent logging config clobbering" (#10177) Revert "fix: prevent logging config clobbering (#10133)" This reverts commit 166b9f2fa79a67788a2a372dc52c9e8e0f7a7cc1. --- utils/general.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/utils/general.py b/utils/general.py index 76dd2a40b51b..0c3b44d7f9b0 100644 --- a/utils/general.py +++ b/utils/general.py @@ -126,9 +126,8 @@ def set_logging(name=None, verbose=VERBOSE): log.addHandler(handler) -logger_name = "yolov5" -set_logging(logger_name) # run before defining LOGGER -LOGGER = logging.getLogger(logger_name) # define globally (used in train.py, val.py, detect.py, etc.) +set_logging() # run before defining LOGGER +LOGGER = logging.getLogger("yolov5") # define globally (used in train.py, val.py, detect.py, etc.) if platform.system() == 'Windows': for fn in LOGGER.info, LOGGER.warning: setattr(LOGGER, fn.__name__, lambda x: fn(emojis(x))) # emoji safe logging From a9f895d304aea5920e694606927fa9208aa7f0ed Mon Sep 17 00:00:00 2001 From: janus-zheng <106574221+janus-zheng@users.noreply.github.com> Date: Thu, 17 Nov 2022 20:42:26 +0800 Subject: [PATCH 1557/1976] Apply make_divisible for ONNX models in Autoshape (#10172) * Apply make_divisible for onnx models in Autoshape At line 697 we have this `make_divisible` function for pytorch models. * Context: we want to run inference on varied input sizes instead of fixed image size. * When I test an image of size [720, 720] for a pytorch model (e.g., yolov5n.pt), we can see that it will be reshaped to [736, 736] by the function. This is as expected. * When I test the same image for the onnx model (e.g., yolov5n.onnx, exported with `--dynamic`), I got an error and it's due to the indivisible problem ``` onnxruntime.capi.onnxruntime_pybind11_state.Fail: [ONNXRuntimeError] : 1 : FAIL : Non-zero status code returned while running Concat node. Name:'Concat_143' Status Message: concat.cc:156 PrepareForCompute Non concat axis dimensions must match: Axis 3 has mismatched dimensions of 45 and 46 ``` The simple solution is to enable the `make_divisible` function for onnx model too. Signed-off-by: janus-zheng <106574221+janus-zheng@users.noreply.github.com> * revise indent Signed-off-by: janus-zheng <106574221+janus-zheng@users.noreply.github.com> * Apply make_divisible to all formats All formats from DetectMultiBackend should have default stride=32 Signed-off-by: Glenn Jocher Signed-off-by: janus-zheng <106574221+janus-zheng@users.noreply.github.com> Signed-off-by: Glenn Jocher Co-authored-by: Glenn Jocher --- models/common.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/models/common.py b/models/common.py index 64f1b9354225..8b5ec1c786d8 100644 --- a/models/common.py +++ b/models/common.py @@ -694,7 +694,7 @@ def forward(self, ims, size=640, augment=False, profile=False): g = max(size) / max(s) # gain shape1.append([int(y * g) for y in s]) ims[i] = im if im.data.contiguous else np.ascontiguousarray(im) # update - shape1 = [make_divisible(x, self.stride) for x in np.array(shape1).max(0)] if self.pt else size # inf shape + shape1 = [make_divisible(x, self.stride) for x in np.array(shape1).max(0)] # inf shape x = [letterbox(im, shape1, auto=False)[0] for im in ims] # pad x = np.ascontiguousarray(np.array(x).transpose((0, 3, 1, 2))) # stack and BHWC to BCHW x = torch.from_numpy(x).to(p.device).type_as(p) / 255 # uint8 to fp16/32 From 1510111b46a24a0c0fa2d685a6f3c96693368654 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 17 Nov 2022 19:22:18 +0100 Subject: [PATCH 1558/1976] data.yaml `names.keys()` integer assert (#10190) * data.yaml `names.keys()` integer assert Signed-off-by: Glenn Jocher * Update general.py Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- utils/general.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/utils/general.py b/utils/general.py index 0c3b44d7f9b0..2f047b351228 100644 --- a/utils/general.py +++ b/utils/general.py @@ -482,9 +482,10 @@ def check_dataset(data, autodownload=True): # Checks for k in 'train', 'val', 'names': - assert k in data, f"data.yaml '{k}:' field missing ❌" + assert k in data, emojis(f"data.yaml '{k}:' field missing ❌") if isinstance(data['names'], (list, tuple)): # old array format data['names'] = dict(enumerate(data['names'])) # convert to dict + assert all(isinstance(k, int) for k in data['names'].keys()), 'data.yaml names keys must be integers, i.e. 2: car' data['nc'] = len(data['names']) # Resolve paths From ff6e6e328efe43547bc57d4e02ae8ddc3387ef58 Mon Sep 17 00:00:00 2001 From: Ryan Echols Date: Thu, 17 Nov 2022 12:47:46 -0700 Subject: [PATCH 1559/1976] Fix: try 2 - prevent logging config clobbering (#10192) * fix: try 2 - prevent logging config clobbering Previous behavior: loading this repository with `torch.hub.load` clobbers the existing logging configuration by modifying the root logger's configuration. New behavior: loading this repository with `torch.hub.load` only clobbers the logging configuration for logger `yolov5` and its descendants. This is done in a way compatible with Google Colab Signed-off-by: Ryan Echols * chore: fill in comment no-op so a pre-commit hook can auto-format files Signed-off-by: Ryan Echols * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Signed-off-by: Ryan Echols Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- utils/general.py | 41 ++++++++++++++++++++++++++--------------- 1 file changed, 26 insertions(+), 15 deletions(-) diff --git a/utils/general.py b/utils/general.py index 2f047b351228..8f85557a065a 100644 --- a/utils/general.py +++ b/utils/general.py @@ -7,6 +7,7 @@ import glob import inspect import logging +import logging.config import math import os import platform @@ -111,23 +112,33 @@ def is_writeable(dir, test=False): return False -def set_logging(name=None, verbose=VERBOSE): - # Sets level and returns logger - if is_kaggle() or is_colab(): - for h in logging.root.handlers: - logging.root.removeHandler(h) # remove all handlers associated with the root logger object - rank = int(os.getenv('RANK', -1)) # rank in world for Multi-GPU trainings - level = logging.INFO if verbose and rank in {-1, 0} else logging.ERROR - log = logging.getLogger(name) - log.setLevel(level) - handler = logging.StreamHandler() - handler.setFormatter(logging.Formatter("%(message)s")) - handler.setLevel(level) - log.addHandler(handler) +LOGGING_NAME = "yolov5" -set_logging() # run before defining LOGGER -LOGGER = logging.getLogger("yolov5") # define globally (used in train.py, val.py, detect.py, etc.) +def set_logging(name=LOGGING_NAME, verbose=True): + # sets up logging for the given name + rank = int(os.getenv('RANK', -1)) # rank in world for Multi-GPU trainings + level = logging.INFO if verbose and rank in {-1, 0} else logging.ERROR + logging.config.dictConfig({ + "version": 1, + "disable_existing_loggers": False, + "formatters": { + name: { + "format": "%(message)s"}}, + "handlers": { + name: { + "class": "logging.StreamHandler", + "formatter": name, + "level": level,}}, + "loggers": { + name: { + "level": level, + "handlers": [name], + "propagate": False,}}}) + + +set_logging(LOGGING_NAME) # run before defining LOGGER +LOGGER = logging.getLogger(LOGGING_NAME) # define globally (used in train.py, val.py, detect.py, etc.) if platform.system() == 'Windows': for fn in LOGGER.info, LOGGER.warning: setattr(LOGGER, fn.__name__, lambda x: fn(emojis(x))) # emoji safe logging From 467a57f01b393989867426261d3e9a95566e3e24 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 18 Nov 2022 14:19:46 +0100 Subject: [PATCH 1560/1976] Segment prediction labels normalization fix (#10205) * normalize_segments * round remove * swap axes fix --- segment/predict.py | 2 +- utils/general.py | 17 ++++++++++------- 2 files changed, 11 insertions(+), 8 deletions(-) diff --git a/segment/predict.py b/segment/predict.py index 3ae68240726a..da1097c047c1 100644 --- a/segment/predict.py +++ b/segment/predict.py @@ -156,7 +156,7 @@ def run( # Segments if save_txt: segments = reversed(masks2segments(masks)) - segments = [scale_segments(im.shape[2:], x, im0.shape).round() for x in segments] + segments = [scale_segments(im.shape[2:], x, im0.shape, normalize=True) for x in segments] # Print results for c in det[:, 5].unique(): diff --git a/utils/general.py b/utils/general.py index 8f85557a065a..c543a237d25b 100644 --- a/utils/general.py +++ b/utils/general.py @@ -822,7 +822,7 @@ def scale_boxes(img1_shape, boxes, img0_shape, ratio_pad=None): return boxes -def scale_segments(img1_shape, segments, img0_shape, ratio_pad=None): +def scale_segments(img1_shape, segments, img0_shape, ratio_pad=None, normalize=False): # Rescale coords (xyxy) from img1_shape to img0_shape if ratio_pad is None: # calculate from img0_shape gain = min(img1_shape[0] / img0_shape[0], img1_shape[1] / img0_shape[1]) # gain = old / new @@ -835,6 +835,9 @@ def scale_segments(img1_shape, segments, img0_shape, ratio_pad=None): segments[:, 1] -= pad[1] # y padding segments /= gain clip_segments(segments, img0_shape) + if normalize: + segments[:, 0] /= img0_shape[1] # width + segments[:, 1] /= img0_shape[0] # height return segments @@ -850,14 +853,14 @@ def clip_boxes(boxes, shape): boxes[:, [1, 3]] = boxes[:, [1, 3]].clip(0, shape[0]) # y1, y2 -def clip_segments(boxes, shape): +def clip_segments(segments, shape): # Clip segments (xy1,xy2,...) to image shape (height, width) - if isinstance(boxes, torch.Tensor): # faster individually - boxes[:, 0].clamp_(0, shape[1]) # x - boxes[:, 1].clamp_(0, shape[0]) # y + if isinstance(segments, torch.Tensor): # faster individually + segments[:, 0].clamp_(0, shape[1]) # x + segments[:, 1].clamp_(0, shape[0]) # y else: # np.array (faster grouped) - boxes[:, 0] = boxes[:, 0].clip(0, shape[1]) # x - boxes[:, 1] = boxes[:, 1].clip(0, shape[0]) # y + segments[:, 0] = segments[:, 0].clip(0, shape[1]) # x + segments[:, 1] = segments[:, 1].clip(0, shape[0]) # y def non_max_suppression( From 241d798bb44a2900591786456a61fd73f3993b4f Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 18 Nov 2022 15:05:25 +0100 Subject: [PATCH 1561/1976] Created using Colaboratory --- tutorial.ipynb | 320 ++++++++++++++++++++++++------------------------- 1 file changed, 159 insertions(+), 161 deletions(-) diff --git a/tutorial.ipynb b/tutorial.ipynb index 96f05426b4a8..07a6625a1491 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -5,7 +5,6 @@ "colab": { "name": "YOLOv5 Tutorial", "provenance": [], - "collapsed_sections": [], "machine_shape": "hm", "toc_visible": true }, @@ -16,7 +15,7 @@ "accelerator": "GPU", "widgets": { "application/vnd.jupyter.widget-state+json": { - "9b8caa3522fc4cbab31e13b5dfc7808d": { + "13e0e8b77bf54b25b8893f0b4164315f": { "model_module": "@jupyter-widgets/controls", "model_name": "HBoxModel", "model_module_version": "1.5.0", @@ -31,14 +30,14 @@ "_view_name": "HBoxView", "box_style": "", "children": [ - "IPY_MODEL_574140e4c4bc48c9a171541a02cd0211", - "IPY_MODEL_35e03ce5090346c9ae602891470fc555", - "IPY_MODEL_c942c208e72d46568b476bb0f2d75496" + "IPY_MODEL_48037f2f7fea4012b9b341f6aee75297", + "IPY_MODEL_3f3b925287274893baf5ed7bb0cf6635", + "IPY_MODEL_c44bdca7c9784b20ba2146250ee744d6" ], - "layout": "IPY_MODEL_65881db1db8a4e9c930fab9172d45143" + "layout": "IPY_MODEL_5b0ed23cd32c4c7d8d9467b7425684ad" } }, - "574140e4c4bc48c9a171541a02cd0211": { + "48037f2f7fea4012b9b341f6aee75297": { "model_module": "@jupyter-widgets/controls", "model_name": "HTMLModel", "model_module_version": "1.5.0", @@ -53,13 +52,13 @@ "_view_name": "HTMLView", "description": "", "description_tooltip": null, - "layout": "IPY_MODEL_60b913d755b34d638478e30705a2dde1", + "layout": "IPY_MODEL_1e10b4db5d644cb78bd6e005bb34038a", "placeholder": "​", - "style": "IPY_MODEL_0856bea36ec148b68522ff9c9eb258d8", + "style": "IPY_MODEL_a58728093ecb4eafb826bee11a84c549", "value": "100%" } }, - "35e03ce5090346c9ae602891470fc555": { + "3f3b925287274893baf5ed7bb0cf6635": { "model_module": "@jupyter-widgets/controls", "model_name": "FloatProgressModel", "model_module_version": "1.5.0", @@ -75,15 +74,15 @@ "bar_style": "success", "description": "", "description_tooltip": null, - "layout": "IPY_MODEL_76879f6f2aa54637a7a07faeea2bd684", + "layout": "IPY_MODEL_9ce169fe4b8543c0b26d745daa230f18", "max": 818322941, "min": 0, "orientation": "horizontal", - "style": "IPY_MODEL_0ace3934ec6f4d36a1b3a9e086390926", + "style": "IPY_MODEL_d5da01aca8fb400c96e76f44c9403581", "value": 818322941 } }, - "c942c208e72d46568b476bb0f2d75496": { + "c44bdca7c9784b20ba2146250ee744d6": { "model_module": "@jupyter-widgets/controls", "model_name": "HTMLModel", "model_module_version": "1.5.0", @@ -98,13 +97,13 @@ "_view_name": "HTMLView", "description": "", "description_tooltip": null, - "layout": "IPY_MODEL_d6b7a2243e0c4beca714d99dceec23d6", + "layout": "IPY_MODEL_98cbaa572fdd4c42975f52015672b3a5", "placeholder": "​", - "style": "IPY_MODEL_5966ba6e6f114d8c9d8d1d6b1bd4f4c7", - "value": " 780M/780M [02:19<00:00, 6.24MB/s]" + "style": "IPY_MODEL_a636aa81f5cc453099c9e552f0986e63", + "value": " 780M/780M [01:27<00:00, 6.98MB/s]" } }, - "65881db1db8a4e9c930fab9172d45143": { + "5b0ed23cd32c4c7d8d9467b7425684ad": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", @@ -156,7 +155,7 @@ "width": null } }, - "60b913d755b34d638478e30705a2dde1": { + "1e10b4db5d644cb78bd6e005bb34038a": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", @@ -208,7 +207,7 @@ "width": null } }, - "0856bea36ec148b68522ff9c9eb258d8": { + "a58728093ecb4eafb826bee11a84c549": { "model_module": "@jupyter-widgets/controls", "model_name": "DescriptionStyleModel", "model_module_version": "1.5.0", @@ -223,7 +222,7 @@ "description_width": "" } }, - "76879f6f2aa54637a7a07faeea2bd684": { + "9ce169fe4b8543c0b26d745daa230f18": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", @@ -275,7 +274,7 @@ "width": null } }, - "0ace3934ec6f4d36a1b3a9e086390926": { + "d5da01aca8fb400c96e76f44c9403581": { "model_module": "@jupyter-widgets/controls", "model_name": "ProgressStyleModel", "model_module_version": "1.5.0", @@ -291,7 +290,7 @@ "description_width": "" } }, - "d6b7a2243e0c4beca714d99dceec23d6": { + "98cbaa572fdd4c42975f52015672b3a5": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", @@ -343,7 +342,7 @@ "width": null } }, - "5966ba6e6f114d8c9d8d1d6b1bd4f4c7": { + "a636aa81f5cc453099c9e552f0986e63": { "model_module": "@jupyter-widgets/controls", "model_name": "DescriptionStyleModel", "model_module_version": "1.5.0", @@ -403,7 +402,7 @@ "colab": { "base_uri": "https://localhost:8080/" }, - "outputId": "0f9ee467-cea4-48e8-9050-7a76ae1b6141" + "outputId": "bcb6db4a-fc21-4258-9b53-4a760a534656" }, "source": [ "!git clone https://github.com/ultralytics/yolov5 # clone\n", @@ -414,20 +413,20 @@ "import utils\n", "display = utils.notebook_init() # checks" ], - "execution_count": null, + "execution_count": 1, "outputs": [ { "output_type": "stream", "name": "stderr", "text": [ - "YOLOv5 🚀 v6.2-56-g30e674b Python-3.7.13 torch-1.12.1+cu113 CUDA:0 (Tesla V100-SXM2-16GB, 16160MiB)\n" + "YOLOv5 🚀 v6.2-250-g467a57f Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n" ] }, { "output_type": "stream", "name": "stdout", "text": [ - "Setup complete ✅ (8 CPUs, 51.0 GB RAM, 37.4/166.8 GB disk)\n" + "Setup complete ✅ (2 CPUs, 12.7 GB RAM, 22.6/78.2 GB disk)\n" ] } ] @@ -461,29 +460,29 @@ "colab": { "base_uri": "https://localhost:8080/" }, - "outputId": "60647b99-e8d4-402c-f444-331bf6746da4" + "outputId": "de684b46-7623-4836-ee44-49cdb320cbf3" }, "source": [ "!python detect.py --weights yolov5s.pt --img 640 --conf 0.25 --source data/images\n", "# display.Image(filename='runs/detect/exp/zidane.jpg', width=600)" ], - "execution_count": null, + "execution_count": 2, "outputs": [ { "output_type": "stream", "name": "stdout", "text": [ - "\u001b[34m\u001b[1mdetect: \u001b[0mweights=['yolov5s.pt'], source=data/images, data=data/coco128.yaml, imgsz=[640, 640], conf_thres=0.25, iou_thres=0.45, max_det=1000, device=, view_img=False, save_txt=False, save_conf=False, save_crop=False, nosave=False, classes=None, agnostic_nms=False, augment=False, visualize=False, update=False, project=runs/detect, name=exp, exist_ok=False, line_thickness=3, hide_labels=False, hide_conf=False, half=False, dnn=False\n", - "YOLOv5 🚀 v6.2-56-g30e674b Python-3.7.13 torch-1.12.1+cu113 CUDA:0 (Tesla V100-SXM2-16GB, 16160MiB)\n", + "\u001b[34m\u001b[1mdetect: \u001b[0mweights=['yolov5s.pt'], source=data/images, data=data/coco128.yaml, imgsz=[640, 640], conf_thres=0.25, iou_thres=0.45, max_det=1000, device=, view_img=False, save_txt=False, save_conf=False, save_crop=False, nosave=False, classes=None, agnostic_nms=False, augment=False, visualize=False, update=False, project=runs/detect, name=exp, exist_ok=False, line_thickness=3, hide_labels=False, hide_conf=False, half=False, dnn=False, vid_stride=1\n", + "YOLOv5 🚀 v6.2-250-g467a57f Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n", "\n", "Downloading https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5s.pt to yolov5s.pt...\n", - "100% 14.1M/14.1M [00:00<00:00, 27.8MB/s]\n", + "100% 14.1M/14.1M [00:00<00:00, 162MB/s]\n", "\n", "Fusing layers... \n", "YOLOv5s summary: 213 layers, 7225885 parameters, 0 gradients\n", - "image 1/2 /content/yolov5/data/images/bus.jpg: 640x480 4 persons, 1 bus, 14.8ms\n", - "image 2/2 /content/yolov5/data/images/zidane.jpg: 384x640 2 persons, 2 ties, 20.1ms\n", - "Speed: 0.6ms pre-process, 17.4ms inference, 21.6ms NMS per image at shape (1, 3, 640, 640)\n", + "image 1/2 /content/yolov5/data/images/bus.jpg: 640x480 4 persons, 1 bus, 17.2ms\n", + "image 2/2 /content/yolov5/data/images/zidane.jpg: 384x640 2 persons, 2 ties, 13.3ms\n", + "Speed: 0.5ms pre-process, 15.2ms inference, 19.5ms NMS per image at shape (1, 3, 640, 640)\n", "Results saved to \u001b[1mruns/detect/exp\u001b[0m\n" ] } @@ -517,27 +516,27 @@ "base_uri": "https://localhost:8080/", "height": 49, "referenced_widgets": [ - "9b8caa3522fc4cbab31e13b5dfc7808d", - "574140e4c4bc48c9a171541a02cd0211", - "35e03ce5090346c9ae602891470fc555", - "c942c208e72d46568b476bb0f2d75496", - "65881db1db8a4e9c930fab9172d45143", - "60b913d755b34d638478e30705a2dde1", - "0856bea36ec148b68522ff9c9eb258d8", - "76879f6f2aa54637a7a07faeea2bd684", - "0ace3934ec6f4d36a1b3a9e086390926", - "d6b7a2243e0c4beca714d99dceec23d6", - "5966ba6e6f114d8c9d8d1d6b1bd4f4c7" + "13e0e8b77bf54b25b8893f0b4164315f", + "48037f2f7fea4012b9b341f6aee75297", + "3f3b925287274893baf5ed7bb0cf6635", + "c44bdca7c9784b20ba2146250ee744d6", + "5b0ed23cd32c4c7d8d9467b7425684ad", + "1e10b4db5d644cb78bd6e005bb34038a", + "a58728093ecb4eafb826bee11a84c549", + "9ce169fe4b8543c0b26d745daa230f18", + "d5da01aca8fb400c96e76f44c9403581", + "98cbaa572fdd4c42975f52015672b3a5", + "a636aa81f5cc453099c9e552f0986e63" ] }, - "outputId": "102dabed-bc31-42fe-9133-d9ce28a2c01e" + "outputId": "b1e02a1f-981f-4739-e75d-10d0204cc32d" }, "source": [ "# Download COCO val\n", "torch.hub.download_url_to_file('https://ultralytics.com/assets/coco2017val.zip', 'tmp.zip') # download (780M - 5000 images)\n", "!unzip -q tmp.zip -d ../datasets && rm tmp.zip # unzip" ], - "execution_count": null, + "execution_count": 3, "outputs": [ { "output_type": "display_data", @@ -548,7 +547,7 @@ "application/vnd.jupyter.widget-view+json": { "version_major": 2, "version_minor": 0, - "model_id": "9b8caa3522fc4cbab31e13b5dfc7808d" + "model_id": "13e0e8b77bf54b25b8893f0b4164315f" } }, "metadata": {} @@ -562,45 +561,43 @@ "colab": { "base_uri": "https://localhost:8080/" }, - "outputId": "daf60b1b-b098-4657-c863-584f4c9cf078" + "outputId": "9c2f755f-f383-4a9e-cd19-f73a0c763a9c" }, "source": [ "# Validate YOLOv5s on COCO val\n", "!python val.py --weights yolov5s.pt --data coco.yaml --img 640 --half" ], - "execution_count": null, + "execution_count": 4, "outputs": [ { "output_type": "stream", "name": "stdout", "text": [ - "\u001b[34m\u001b[1mval: \u001b[0mdata=/content/yolov5/data/coco.yaml, weights=['yolov5s.pt'], batch_size=32, imgsz=640, conf_thres=0.001, iou_thres=0.6, task=val, device=, workers=8, single_cls=False, augment=False, verbose=False, save_txt=False, save_hybrid=False, save_conf=False, save_json=True, project=runs/val, name=exp, exist_ok=False, half=True, dnn=False\n", - "YOLOv5 🚀 v6.2-56-g30e674b Python-3.7.13 torch-1.12.1+cu113 CUDA:0 (Tesla V100-SXM2-16GB, 16160MiB)\n", + "\u001b[34m\u001b[1mval: \u001b[0mdata=/content/yolov5/data/coco.yaml, weights=['yolov5s.pt'], batch_size=32, imgsz=640, conf_thres=0.001, iou_thres=0.6, max_det=300, task=val, device=, workers=8, single_cls=False, augment=False, verbose=False, save_txt=False, save_hybrid=False, save_conf=False, save_json=True, project=runs/val, name=exp, exist_ok=False, half=True, dnn=False\n", + "YOLOv5 🚀 v6.2-250-g467a57f Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n", "\n", "Fusing layers... \n", "YOLOv5s summary: 213 layers, 7225885 parameters, 0 gradients\n", - "Downloading https://ultralytics.com/assets/Arial.ttf to /root/.config/Ultralytics/Arial.ttf...\n", - "100% 755k/755k [00:00<00:00, 52.7MB/s]\n", - "\u001b[34m\u001b[1mval: \u001b[0mScanning '/content/datasets/coco/val2017' images and labels...4952 found, 48 missing, 0 empty, 0 corrupt: 100% 5000/5000 [00:00<00:00, 10509.20it/s]\n", + "\u001b[34m\u001b[1mval: \u001b[0mScanning '/content/datasets/coco/val2017' images and labels...4952 found, 48 missing, 0 empty, 0 corrupt: 100% 5000/5000 [00:02<00:00, 2019.92it/s]\n", "\u001b[34m\u001b[1mval: \u001b[0mNew cache created: /content/datasets/coco/val2017.cache\n", - " Class Images Instances P R mAP@.5 mAP@.5:.95: 100% 157/157 [00:50<00:00, 3.10it/s]\n", + " Class Images Instances P R mAP50 mAP50-95: 100% 157/157 [01:09<00:00, 2.25it/s]\n", " all 5000 36335 0.67 0.521 0.566 0.371\n", - "Speed: 0.1ms pre-process, 1.0ms inference, 1.5ms NMS per image at shape (32, 3, 640, 640)\n", + "Speed: 0.2ms pre-process, 2.7ms inference, 2.1ms NMS per image at shape (32, 3, 640, 640)\n", "\n", "Evaluating pycocotools mAP... saving runs/val/exp/yolov5s_predictions.json...\n", "loading annotations into memory...\n", - "Done (t=0.81s)\n", + "Done (t=0.41s)\n", "creating index...\n", "index created!\n", "Loading and preparing results...\n", - "DONE (t=5.62s)\n", + "DONE (t=6.19s)\n", "creating index...\n", "index created!\n", "Running per image evaluation...\n", "Evaluate annotation type *bbox*\n", - "DONE (t=77.03s).\n", + "DONE (t=75.81s).\n", "Accumulating evaluation results...\n", - "DONE (t=14.63s).\n", + "DONE (t=15.26s).\n", " Average Precision (AP) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.374\n", " Average Precision (AP) @[ IoU=0.50 | area= all | maxDets=100 ] = 0.572\n", " Average Precision (AP) @[ IoU=0.75 | area= all | maxDets=100 ] = 0.402\n", @@ -612,7 +609,7 @@ " Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.566\n", " Average Recall (AR) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.378\n", " Average Recall (AR) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.625\n", - " Average Recall (AR) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.724\n", + " Average Recall (AR) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.723\n", "Results saved to \u001b[1mruns/val/exp\u001b[0m\n" ] } @@ -664,7 +661,8 @@ " %pip install -q comet_ml\n", " import comet_ml; comet_ml.init()\n", "elif logger == 'ClearML':\n", - " %pip install -q clearml && clearml-init" + " %pip install -q clearml\n", + " import clearml; clearml.browser_login()" ], "metadata": { "id": "i3oKtE4g-aNn" @@ -679,13 +677,13 @@ "colab": { "base_uri": "https://localhost:8080/" }, - "outputId": "baa6d4be-3379-4aab-844a-d5a5396c0e49" + "outputId": "7d03d4d2-9a6e-47de-88f4-c673b55c73c5" }, "source": [ "# Train YOLOv5s on COCO128 for 3 epochs\n", "!python train.py --img 640 --batch 16 --epochs 3 --data coco128.yaml --weights yolov5s.pt --cache" ], - "execution_count": null, + "execution_count": 5, "outputs": [ { "output_type": "stream", @@ -693,17 +691,17 @@ "text": [ "\u001b[34m\u001b[1mtrain: \u001b[0mweights=yolov5s.pt, cfg=, data=coco128.yaml, hyp=data/hyps/hyp.scratch-low.yaml, epochs=3, batch_size=16, imgsz=640, rect=False, resume=False, nosave=False, noval=False, noautoanchor=False, noplots=False, evolve=None, bucket=, cache=ram, image_weights=False, device=, multi_scale=False, single_cls=False, optimizer=SGD, sync_bn=False, workers=8, project=runs/train, name=exp, exist_ok=False, quad=False, cos_lr=False, label_smoothing=0.0, patience=100, freeze=[0], save_period=-1, seed=0, local_rank=-1, entity=None, upload_dataset=False, bbox_interval=-1, artifact_alias=latest\n", "\u001b[34m\u001b[1mgithub: \u001b[0mup to date with https://github.com/ultralytics/yolov5 ✅\n", - "YOLOv5 🚀 v6.2-56-g30e674b Python-3.7.13 torch-1.12.1+cu113 CUDA:0 (Tesla V100-SXM2-16GB, 16160MiB)\n", + "YOLOv5 🚀 v6.2-250-g467a57f Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n", "\n", "\u001b[34m\u001b[1mhyperparameters: \u001b[0mlr0=0.01, lrf=0.01, momentum=0.937, weight_decay=0.0005, warmup_epochs=3.0, warmup_momentum=0.8, warmup_bias_lr=0.1, box=0.05, cls=0.5, cls_pw=1.0, obj=1.0, obj_pw=1.0, iou_t=0.2, anchor_t=4.0, fl_gamma=0.0, hsv_h=0.015, hsv_s=0.7, hsv_v=0.4, degrees=0.0, translate=0.1, scale=0.5, shear=0.0, perspective=0.0, flipud=0.0, fliplr=0.5, mosaic=1.0, mixup=0.0, copy_paste=0.0\n", - "\u001b[34m\u001b[1mComet: \u001b[0mrun 'pip install comet' to automatically track and visualize YOLOv5 🚀 runs with Comet\n", "\u001b[34m\u001b[1mClearML: \u001b[0mrun 'pip install clearml' to automatically track, visualize and remotely train YOLOv5 🚀 in ClearML\n", + "\u001b[34m\u001b[1mComet: \u001b[0mrun 'pip install comet_ml' to automatically track and visualize YOLOv5 🚀 runs in Comet\n", "\u001b[34m\u001b[1mTensorBoard: \u001b[0mStart with 'tensorboard --logdir runs/train', view at http://localhost:6006/\n", "\n", "Dataset not found ⚠️, missing paths ['/content/datasets/coco128/images/train2017']\n", "Downloading https://ultralytics.com/assets/coco128.zip to coco128.zip...\n", - "100% 6.66M/6.66M [00:00<00:00, 41.1MB/s]\n", - "Dataset download success ✅ (0.8s), saved to \u001b[1m/content/datasets\u001b[0m\n", + "100% 6.66M/6.66M [00:00<00:00, 26.1MB/s]\n", + "Dataset download success ✅ (0.9s), saved to \u001b[1m/content/datasets\u001b[0m\n", "\n", " from n params module arguments \n", " 0 -1 1 3520 models.common.Conv [3, 32, 6, 2, 2] \n", @@ -731,120 +729,120 @@ " 22 [-1, 10] 1 0 models.common.Concat [1] \n", " 23 -1 1 1182720 models.common.C3 [512, 512, 1, False] \n", " 24 [17, 20, 23] 1 229245 models.yolo.Detect [80, [[10, 13, 16, 30, 33, 23], [30, 61, 62, 45, 59, 119], [116, 90, 156, 198, 373, 326]], [128, 256, 512]]\n", - "Model summary: 270 layers, 7235389 parameters, 7235389 gradients, 16.6 GFLOPs\n", + "Model summary: 214 layers, 7235389 parameters, 7235389 gradients, 16.6 GFLOPs\n", "\n", "Transferred 349/349 items from yolov5s.pt\n", "\u001b[34m\u001b[1mAMP: \u001b[0mchecks passed ✅\n", "\u001b[34m\u001b[1moptimizer:\u001b[0m SGD(lr=0.01) with parameter groups 57 weight(decay=0.0), 60 weight(decay=0.0005), 60 bias\n", "\u001b[34m\u001b[1malbumentations: \u001b[0mBlur(p=0.01, blur_limit=(3, 7)), MedianBlur(p=0.01, blur_limit=(3, 7)), ToGray(p=0.01), CLAHE(p=0.01, clip_limit=(1, 4.0), tile_grid_size=(8, 8))\n", - "\u001b[34m\u001b[1mtrain: \u001b[0mScanning '/content/datasets/coco128/labels/train2017' images and labels...128 found, 0 missing, 2 empty, 0 corrupt: 100% 128/128 [00:00<00:00, 9659.25it/s]\n", + "\u001b[34m\u001b[1mtrain: \u001b[0mScanning '/content/datasets/coco128/labels/train2017' images and labels...126 found, 2 missing, 0 empty, 0 corrupt: 100% 128/128 [00:00<00:00, 1989.66it/s]\n", "\u001b[34m\u001b[1mtrain: \u001b[0mNew cache created: /content/datasets/coco128/labels/train2017.cache\n", - "\u001b[34m\u001b[1mtrain: \u001b[0mCaching images (0.1GB ram): 100% 128/128 [00:00<00:00, 951.31it/s]\n", - "\u001b[34m\u001b[1mval: \u001b[0mScanning '/content/datasets/coco128/labels/train2017.cache' images and labels... 128 found, 0 missing, 2 empty, 0 corrupt: 100% 128/128 [00:00 Date: Fri, 18 Nov 2022 16:32:29 +0100 Subject: [PATCH 1562/1976] Created using Colaboratory --- segment/tutorial.ipynb | 1500 +++++++++++++++------------------------- 1 file changed, 572 insertions(+), 928 deletions(-) diff --git a/segment/tutorial.ipynb b/segment/tutorial.ipynb index 079bfe3057bc..c26878fb0dbf 100644 --- a/segment/tutorial.ipynb +++ b/segment/tutorial.ipynb @@ -1,956 +1,600 @@ { - "cells": [ - { - "cell_type": "markdown", - "metadata": { - "id": "t6MPjfT5NrKQ" - }, - "source": [ - "
\n", - "\n", - " \n", - " \n", - "\n", - "\n", - "
\n", - " \"Run\n", - " \"Open\n", - " \"Open\n", - "
\n", - "\n", - "This YOLOv5 🚀 notebook by Ultralytics presents simple train, validate and predict examples to help start your AI adventure.
See GitHub for community support or contact us for professional support.\n", - "\n", - "
" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "7mGmQbAO5pQb" - }, - "source": [ - "# Setup\n", - "\n", - "Clone GitHub [repository](https://github.com/ultralytics/yolov5), install [dependencies](https://github.com/ultralytics/yolov5/blob/master/requirements.txt) and check PyTorch and GPU." - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" + "cells": [ + { + "cell_type": "markdown", + "metadata": { + "id": "t6MPjfT5NrKQ" + }, + "source": [ + "
\n", + "\n", + " \n", + " \n", + "\n", + "\n", + "
\n", + " \"Run\n", + " \"Open\n", + " \"Open\n", + "
\n", + "\n", + "This YOLOv5 🚀 notebook by Ultralytics presents simple train, validate and predict examples to help start your AI adventure.
See GitHub for community support or contact us for professional support.\n", + "\n", + "
" + ] }, - "id": "wbvMlHd_QwMG", - "outputId": "0f9ee467-cea4-48e8-9050-7a76ae1b6141" - }, - "outputs": [ { - "name": "stderr", - "output_type": "stream", - "text": [ - "YOLOv5 🚀 v6.2-225-gf223cb2 Python-3.7.12 torch-1.12.1+cu102 CUDA:0 (Tesla V100-SXM2-16GB, 16161MiB)\n" - ] + "cell_type": "markdown", + "metadata": { + "id": "7mGmQbAO5pQb" + }, + "source": [ + "# Setup\n", + "\n", + "Clone GitHub [repository](https://github.com/ultralytics/yolov5), install [dependencies](https://github.com/ultralytics/yolov5/blob/master/requirements.txt) and check PyTorch and GPU." + ] }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "Setup complete ✅ (4 CPUs, 14.7 GB RAM, 107.3/196.6 GB disk)\n" - ] - } - ], - "source": [ - "!git clone https://github.com/ultralytics/yolov5 # clone\n", - "%cd yolov5\n", - "%pip install -qr requirements.txt # install\n", - "\n", - "import torch\n", - "import utils\n", - "display = utils.notebook_init() # checks" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "4JnkELT0cIJg" - }, - "source": [ - "# 1. Predict\n", - "\n", - "`segment/predict.py` runs YOLOv5 instance segmentation inference on a variety of sources, downloading models automatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases), and saving results to `runs/predict`. Example inference sources are:\n", - "\n", - "```shell\n", - "python segment/predict.py --source 0 # webcam\n", - " img.jpg # image \n", - " vid.mp4 # video\n", - " screen # screenshot\n", - " path/ # directory\n", - " 'path/*.jpg' # glob\n", - " 'https://youtu.be/Zgi9g1ksQHc' # YouTube\n", - " 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream\n", - "```" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" + "cell_type": "code", + "execution_count": 1, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "wbvMlHd_QwMG", + "outputId": "d1e33dfc-9ad4-436e-f1e5-01acee40c029" + }, + "outputs": [ + { + "output_type": "stream", + "name": "stderr", + "text": [ + "YOLOv5 🚀 v6.2-251-g241d798 Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n" + ] + }, + { + "output_type": "stream", + "name": "stdout", + "text": [ + "Setup complete ✅ (2 CPUs, 12.7 GB RAM, 22.6/78.2 GB disk)\n" + ] + } + ], + "source": [ + "!git clone https://github.com/ultralytics/yolov5 # clone\n", + "%cd yolov5\n", + "%pip install -qr requirements.txt # install\n", + "\n", + "import torch\n", + "import utils\n", + "display = utils.notebook_init() # checks" + ] }, - "id": "zR9ZbuQCH7FX", - "outputId": "60647b99-e8d4-402c-f444-331bf6746da4" - }, - "outputs": [ { - "name": "stdout", - "output_type": "stream", - "text": [ - "\u001b[34m\u001b[1msegment/predict: \u001b[0mweights=['yolov5s-seg.pt'], source=data/images, data=data/coco128.yaml, imgsz=[640, 640], conf_thres=0.25, iou_thres=0.45, max_det=1000, device=, view_img=False, save_txt=False, save_conf=False, save_crop=False, nosave=False, classes=None, agnostic_nms=False, augment=False, visualize=False, update=False, project=runs/predict-seg, name=exp, exist_ok=False, line_thickness=3, hide_labels=False, hide_conf=False, half=False, dnn=False, vid_stride=1, retina_masks=False\n", - "YOLOv5 🚀 v6.2-225-gf223cb2 Python-3.7.12 torch-1.12.1+cu102 CUDA:0 (Tesla V100-SXM2-16GB, 16161MiB)\n", - "\n", - "Fusing layers... \n", - "YOLOv5s-seg summary: 224 layers, 7611485 parameters, 0 gradients, 26.4 GFLOPs\n", - "image 1/2 /home/paguerrie/yolov5/data/images/bus.jpg: 640x480 4 persons, 1 bus, 5.6ms\n", - "image 2/2 /home/paguerrie/yolov5/data/images/zidane.jpg: 384x640 2 persons, 1 tie, 5.5ms\n", - "Speed: 0.4ms pre-process, 5.6ms inference, 1.1ms NMS per image at shape (1, 3, 640, 640)\n", - "Results saved to \u001b[1mruns/predict-seg/exp\u001b[0m\n" - ] - } - ], - "source": [ - "!python segment/predict.py --weights yolov5s-seg.pt --img 640 --conf 0.25 --source data/images\n", - "#display.Image(filename='runs/predict-seg/exp/zidane.jpg', width=600)" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "hkAzDWJ7cWTr" - }, - "source": [ - "        \n", - "" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "0eq1SMWl6Sfn" - }, - "source": [ - "# 2. Validate\n", - "Validate a model's accuracy on the [COCO](https://cocodataset.org/#home) dataset's `val` or `test` splits. Models are downloaded automatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases). To show results by class use the `--verbose` flag." - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/", - "height": 49, - "referenced_widgets": [ - "9b8caa3522fc4cbab31e13b5dfc7808d", - "574140e4c4bc48c9a171541a02cd0211", - "35e03ce5090346c9ae602891470fc555", - "c942c208e72d46568b476bb0f2d75496", - "65881db1db8a4e9c930fab9172d45143", - "60b913d755b34d638478e30705a2dde1", - "0856bea36ec148b68522ff9c9eb258d8", - "76879f6f2aa54637a7a07faeea2bd684", - "0ace3934ec6f4d36a1b3a9e086390926", - "d6b7a2243e0c4beca714d99dceec23d6", - "5966ba6e6f114d8c9d8d1d6b1bd4f4c7" - ] + "cell_type": "markdown", + "metadata": { + "id": "4JnkELT0cIJg" + }, + "source": [ + "# 1. Predict\n", + "\n", + "`segment/predict.py` runs YOLOv5 instance segmentation inference on a variety of sources, downloading models automatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases), and saving results to `runs/predict`. Example inference sources are:\n", + "\n", + "```shell\n", + "python segment/predict.py --source 0 # webcam\n", + " img.jpg # image \n", + " vid.mp4 # video\n", + " screen # screenshot\n", + " path/ # directory\n", + " 'path/*.jpg' # glob\n", + " 'https://youtu.be/Zgi9g1ksQHc' # YouTube\n", + " 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream\n", + "```" + ] }, - "id": "WQPtK1QYVaD_", - "outputId": "102dabed-bc31-42fe-9133-d9ce28a2c01e" - }, - "outputs": [ { - "data": { - "application/vnd.jupyter.widget-view+json": { - "model_id": "89f5f0a84ca642378724f1bf05f17e0d", - "version_major": 2, - "version_minor": 0 + "cell_type": "code", + "execution_count": 2, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "zR9ZbuQCH7FX", + "outputId": "e206fcec-cf42-4754-8a42-39bc3603eba8" }, - "text/plain": [ - " 0%| | 0.00/6.79M [00:00

\n", - "Close the active learning loop by sampling images from your inference conditions with the `roboflow` pip package\n", - "

\n", - "\n", - "Train a YOLOv5s-seg model on the [COCO128](https://www.kaggle.com/ultralytics/coco128) dataset with `--data coco128-seg.yaml`, starting from pretrained `--weights yolov5s-seg.pt`, or from randomly initialized `--weights '' --cfg yolov5s-seg.yaml`.\n", - "\n", - "- **Pretrained [Models](https://github.com/ultralytics/yolov5/tree/master/models)** are downloaded\n", - "automatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases)\n", - "- **[Datasets](https://github.com/ultralytics/yolov5/tree/master/data)** available for autodownload include: [COCO](https://github.com/ultralytics/yolov5/blob/master/data/coco.yaml), [COCO128](https://github.com/ultralytics/yolov5/blob/master/data/coco128.yaml), [VOC](https://github.com/ultralytics/yolov5/blob/master/data/VOC.yaml), [Argoverse](https://github.com/ultralytics/yolov5/blob/master/data/Argoverse.yaml), [VisDrone](https://github.com/ultralytics/yolov5/blob/master/data/VisDrone.yaml), [GlobalWheat](https://github.com/ultralytics/yolov5/blob/master/data/GlobalWheat2020.yaml), [xView](https://github.com/ultralytics/yolov5/blob/master/data/xView.yaml), [Objects365](https://github.com/ultralytics/yolov5/blob/master/data/Objects365.yaml), [SKU-110K](https://github.com/ultralytics/yolov5/blob/master/data/SKU-110K.yaml).\n", - "- **Training Results** are saved to `runs/train-seg/` with incrementing run directories, i.e. `runs/train-seg/exp2`, `runs/train-seg/exp3` etc.\n", - "

\n", - "\n", - "A **Mosaic Dataloader** is used for training which combines 4 images into 1 mosaic.\n", - "\n", - "## Train on Custom Data with Roboflow 🌟 NEW\n", - "\n", - "[Roboflow](https://roboflow.com/?ref=ultralytics) enables you to easily **organize, label, and prepare** a high quality dataset with your own custom data. Roboflow also makes it easy to establish an active learning pipeline, collaborate with your team on dataset improvement, and integrate directly into your model building workflow with the `roboflow` pip package.\n", - "\n", - "- Custom Training Example: [https://blog.roboflow.com/train-yolov5-instance-segmentation-custom-dataset/](https://blog.roboflow.com/train-yolov5-instance-segmentation-custom-dataset/?ref=ultralytics)\n", - "- Custom Training Notebook: [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1JTz7kpmHsg-5qwVz2d2IH3AaenI1tv0N?usp=sharing)\n", - "
\n", - "\n", - "

Label images lightning fast (including with model-assisted labeling)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "i3oKtE4g-aNn" - }, - "outputs": [], - "source": [ - "#@title Select YOLOv5 🚀 logger {run: 'auto'}\n", - "logger = 'TensorBoard' #@param ['TensorBoard', 'Comet', 'ClearML']\n", - "\n", - "if logger == 'TensorBoard':\n", - " %load_ext tensorboard\n", - " %tensorboard --logdir runs/train-seg\n", - "elif logger == 'Comet':\n", - " %pip install -q comet_ml\n", - " import comet_ml; comet_ml.init()\n", - "elif logger == 'ClearML':\n", - " %pip install -q clearml && clearml-init" - ] - }, - { - "cell_type": "code", - "execution_count": 7, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" + "cell_type": "markdown", + "metadata": { + "id": "hkAzDWJ7cWTr" + }, + "source": [ + "        \n", + "" + ] }, - "id": "1NcFxRcFdJ_O", - "outputId": "baa6d4be-3379-4aab-844a-d5a5396c0e49" - }, - "outputs": [ { - "name": "stdout", - "output_type": "stream", - "text": [ - "\u001b[34m\u001b[1msegment/train: \u001b[0mweights=yolov5s-seg.pt, cfg=, data=coco128-seg.yaml, hyp=data/hyps/hyp.scratch-low.yaml, epochs=3, batch_size=16, imgsz=640, rect=False, resume=False, nosave=False, noval=False, noautoanchor=False, noplots=False, evolve=None, bucket=, cache=ram, image_weights=False, device=, multi_scale=False, single_cls=False, optimizer=SGD, sync_bn=False, workers=8, project=runs/train-seg, name=exp, exist_ok=False, quad=False, cos_lr=False, label_smoothing=0.0, patience=100, freeze=[0], save_period=-1, seed=0, local_rank=-1, mask_ratio=4, no_overlap=False\n", - "\u001b[34m\u001b[1mgithub: \u001b[0mup to date with https://github.com/ultralytics/yolov5 ✅\n", - "YOLOv5 🚀 v6.2-225-gf223cb2 Python-3.7.12 torch-1.12.1+cu102 CUDA:0 (Tesla V100-SXM2-16GB, 16161MiB)\n", - "\n", - "\u001b[34m\u001b[1mhyperparameters: \u001b[0mlr0=0.01, lrf=0.01, momentum=0.937, weight_decay=0.0005, warmup_epochs=3.0, warmup_momentum=0.8, warmup_bias_lr=0.1, box=0.05, cls=0.5, cls_pw=1.0, obj=1.0, obj_pw=1.0, iou_t=0.2, anchor_t=4.0, fl_gamma=0.0, hsv_h=0.015, hsv_s=0.7, hsv_v=0.4, degrees=0.0, translate=0.1, scale=0.5, shear=0.0, perspective=0.0, flipud=0.0, fliplr=0.5, mosaic=1.0, mixup=0.0, copy_paste=0.0\n", - "\u001b[34m\u001b[1mTensorBoard: \u001b[0mStart with 'tensorboard --logdir runs/train-seg', view at http://localhost:6006/\n", - "\n", - " from n params module arguments \n", - " 0 -1 1 3520 models.common.Conv [3, 32, 6, 2, 2] \n", - " 1 -1 1 18560 models.common.Conv [32, 64, 3, 2] \n", - " 2 -1 1 18816 models.common.C3 [64, 64, 1] \n", - " 3 -1 1 73984 models.common.Conv [64, 128, 3, 2] \n", - " 4 -1 2 115712 models.common.C3 [128, 128, 2] \n", - " 5 -1 1 295424 models.common.Conv [128, 256, 3, 2] \n", - " 6 -1 3 625152 models.common.C3 [256, 256, 3] \n", - " 7 -1 1 1180672 models.common.Conv [256, 512, 3, 2] \n", - " 8 -1 1 1182720 models.common.C3 [512, 512, 1] \n", - " 9 -1 1 656896 models.common.SPPF [512, 512, 5] \n", - " 10 -1 1 131584 models.common.Conv [512, 256, 1, 1] \n", - " 11 -1 1 0 torch.nn.modules.upsampling.Upsample [None, 2, 'nearest'] \n", - " 12 [-1, 6] 1 0 models.common.Concat [1] \n", - " 13 -1 1 361984 models.common.C3 [512, 256, 1, False] \n", - " 14 -1 1 33024 models.common.Conv [256, 128, 1, 1] \n", - " 15 -1 1 0 torch.nn.modules.upsampling.Upsample [None, 2, 'nearest'] \n", - " 16 [-1, 4] 1 0 models.common.Concat [1] \n", - " 17 -1 1 90880 models.common.C3 [256, 128, 1, False] \n", - " 18 -1 1 147712 models.common.Conv [128, 128, 3, 2] \n", - " 19 [-1, 14] 1 0 models.common.Concat [1] \n", - " 20 -1 1 296448 models.common.C3 [256, 256, 1, False] \n", - " 21 -1 1 590336 models.common.Conv [256, 256, 3, 2] \n", - " 22 [-1, 10] 1 0 models.common.Concat [1] \n", - " 23 -1 1 1182720 models.common.C3 [512, 512, 1, False] \n", - " 24 [17, 20, 23] 1 615133 models.yolo.Segment [80, [[10, 13, 16, 30, 33, 23], [30, 61, 62, 45, 59, 119], [116, 90, 156, 198, 373, 326]], 32, 128, [128, 256, 512]]\n", - "Model summary: 225 layers, 7621277 parameters, 7621277 gradients, 26.6 GFLOPs\n", - "\n", - "Transferred 367/367 items from yolov5s-seg.pt\n", - "\u001b[34m\u001b[1mAMP: \u001b[0mchecks passed ✅\n", - "\u001b[34m\u001b[1moptimizer:\u001b[0m SGD(lr=0.01) with parameter groups 60 weight(decay=0.0), 63 weight(decay=0.0005), 63 bias\n", - "\u001b[34m\u001b[1mtrain: \u001b[0mScanning '/home/paguerrie/datasets/coco128-seg/labels/train2017.cache' im\u001b[0m\n", - "\u001b[34m\u001b[1mtrain: \u001b[0mCaching images (0.1GB ram): 100%|██████████| 128/128 [00:00<00:00, 544.41\u001b[0m\n", - "\u001b[34m\u001b[1mval: \u001b[0mScanning '/home/paguerrie/datasets/coco128-seg/labels/train2017.cache' imag\u001b[0m\n", - "\u001b[34m\u001b[1mval: \u001b[0mCaching images (0.1GB ram): 100%|██████████| 128/128 [00:00<00:00, 138.66it\u001b[0m\n", - "\n", - "\u001b[34m\u001b[1mAutoAnchor: \u001b[0m4.27 anchors/target, 0.994 Best Possible Recall (BPR). Current anchors are a good fit to dataset ✅\n", - "Plotting labels to runs/train-seg/exp/labels.jpg... \n", - "Image sizes 640 train, 640 val\n", - "Using 4 dataloader workers\n", - "Logging results to \u001b[1mruns/train-seg/exp\u001b[0m\n", - "Starting training for 3 epochs...\n", - "\n", - " Epoch GPU_mem box_loss seg_loss obj_loss cls_loss Instances Size\n", - " 0/2 4.67G 0.04464 0.05134 0.06548 0.01895 219 \n", - " Class Images Instances Box(P R mAP50 m\n", - " all 128 929 0.727 0.661 0.725 0.496 0.688 0.629 0.673 0.413\n", - "\n", - " Epoch GPU_mem box_loss seg_loss obj_loss cls_loss Instances Size\n", - " 1/2 6.36G 0.04102 0.04702 0.06873 0.01734 263 \n", - " Class Images Instances Box(P R mAP50 m\n", - " all 128 929 0.752 0.676 0.743 0.51 0.704 0.64 0.682 0.425\n", - "\n", - " Epoch GPU_mem box_loss seg_loss obj_loss cls_loss Instances Size\n", - " 2/2 6.36G 0.0421 0.04463 0.05951 0.01746 245 \n", - " Class Images Instances Box(P R mAP50 m\n", - " all 128 929 0.776 0.674 0.757 0.514 0.72 0.632 0.684 0.429\n", - "\n", - "3 epochs completed in 0.006 hours.\n", - "Optimizer stripped from runs/train-seg/exp/weights/last.pt, 15.6MB\n", - "Optimizer stripped from runs/train-seg/exp/weights/best.pt, 15.6MB\n", - "\n", - "Validating runs/train-seg/exp/weights/best.pt...\n", - "Fusing layers... \n", - "Model summary: 165 layers, 7611485 parameters, 0 gradients, 26.4 GFLOPs\n", - " Class Images Instances Box(P R mAP50 m\n", - " all 128 929 0.775 0.673 0.758 0.515 0.72 0.632 0.684 0.427\n", - " person 128 254 0.829 0.745 0.833 0.545 0.776 0.697 0.764 0.406\n", - " bicycle 128 6 0.614 0.333 0.539 0.331 0.614 0.333 0.531 0.308\n", - " car 128 46 0.774 0.413 0.571 0.266 0.693 0.37 0.493 0.204\n", - " motorcycle 128 5 0.817 0.901 0.895 0.678 0.817 0.901 0.895 0.47\n", - " airplane 128 6 1 0.951 0.995 0.71 0.882 0.833 0.839 0.515\n", - " bus 128 7 0.695 0.714 0.757 0.661 0.695 0.714 0.757 0.627\n", - " train 128 3 1 0.935 0.995 0.566 1 0.935 0.995 0.731\n", - " truck 128 12 0.741 0.417 0.463 0.283 0.741 0.417 0.4 0.27\n", - " boat 128 6 0.653 0.32 0.452 0.17 0.653 0.32 0.328 0.149\n", - " traffic light 128 14 0.627 0.36 0.527 0.234 0.503 0.289 0.409 0.293\n", - " stop sign 128 2 0.829 1 0.995 0.747 0.829 1 0.995 0.821\n", - " bench 128 9 0.822 0.667 0.76 0.414 0.685 0.556 0.678 0.228\n", - " bird 128 16 0.967 1 0.995 0.675 0.906 0.938 0.909 0.516\n", - " cat 128 4 0.778 0.89 0.945 0.728 0.778 0.89 0.945 0.69\n", - " dog 128 9 1 0.65 0.973 0.697 1 0.65 0.939 0.615\n", - " horse 128 2 0.727 1 0.995 0.672 0.727 1 0.995 0.2\n", - " elephant 128 17 1 0.912 0.946 0.704 0.871 0.794 0.822 0.565\n", - " bear 128 1 0.626 1 0.995 0.895 0.626 1 0.995 0.895\n", - " zebra 128 4 0.865 1 0.995 0.934 0.865 1 0.995 0.822\n", - " giraffe 128 9 0.975 1 0.995 0.672 0.866 0.889 0.876 0.473\n", - " backpack 128 6 1 0.573 0.707 0.38 0.891 0.5 0.524 0.249\n", - " umbrella 128 18 0.744 0.889 0.926 0.552 0.465 0.556 0.483 0.262\n", - " handbag 128 19 0.799 0.209 0.432 0.225 0.799 0.209 0.403 0.201\n", - " tie 128 7 0.968 0.857 0.857 0.53 0.968 0.857 0.857 0.519\n", - " suitcase 128 4 0.821 1 0.995 0.696 0.821 1 0.995 0.665\n", - " frisbee 128 5 0.777 0.8 0.761 0.613 0.777 0.8 0.761 0.558\n", - " skis 128 1 0.721 1 0.995 0.497 0.721 1 0.995 0.398\n" - ] + "cell_type": "markdown", + "metadata": { + "id": "0eq1SMWl6Sfn" + }, + "source": [ + "# 2. Validate\n", + "Validate a model's accuracy on the [COCO](https://cocodataset.org/#home) dataset's `val` or `test` splits. Models are downloaded automatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases). To show results by class use the `--verbose` flag." + ] }, { - "name": "stdout", - "output_type": "stream", - "text": [ - " snowboard 128 7 0.851 0.857 0.887 0.599 0.284 0.286 0.253 0.151\n", - " sports ball 128 6 0.961 0.667 0.687 0.429 0.721 0.5 0.481 0.476\n", - " kite 128 10 0.508 0.312 0.48 0.238 0.508 0.312 0.406 0.122\n", - " baseball bat 128 4 0.331 0.5 0.526 0.249 0.331 0.5 0.376 0.102\n", - " baseball glove 128 7 0.876 0.571 0.579 0.282 0.657 0.429 0.429 0.343\n", - " skateboard 128 5 1 0.697 0.824 0.471 0.707 0.497 0.552 0.299\n", - " tennis racket 128 7 0.524 0.714 0.646 0.426 0.524 0.714 0.646 0.452\n", - " bottle 128 18 0.657 0.389 0.531 0.359 0.657 0.389 0.569 0.362\n", - " wine glass 128 16 0.752 0.938 0.924 0.435 0.451 0.562 0.568 0.341\n", - " cup 128 36 0.859 0.676 0.848 0.503 0.823 0.648 0.793 0.496\n", - " fork 128 6 0.904 0.333 0.462 0.309 0.452 0.167 0.195 0.107\n", - " knife 128 16 0.749 0.5 0.665 0.413 0.655 0.438 0.523 0.314\n", - " spoon 128 22 0.787 0.409 0.577 0.275 0.787 0.409 0.528 0.236\n", - " bowl 128 28 0.793 0.679 0.744 0.577 0.751 0.643 0.688 0.366\n", - " banana 128 1 0.931 1 0.995 0.398 0.931 1 0.995 0.497\n", - " sandwich 128 2 1 0 0.828 0.713 1 0 0.498 0.449\n", - " orange 128 4 0.588 1 0.995 0.666 0.588 1 0.995 0.672\n", - " broccoli 128 11 0.563 0.455 0.356 0.258 0.563 0.455 0.362 0.259\n", - " carrot 128 24 0.683 0.75 0.753 0.489 0.758 0.833 0.835 0.451\n", - " hot dog 128 2 0.583 1 0.995 0.995 0.583 1 0.995 0.796\n", - " pizza 128 5 0.801 0.8 0.962 0.644 0.801 0.8 0.962 0.583\n", - " donut 128 14 0.704 1 0.889 0.759 0.704 1 0.889 0.683\n", - " cake 128 4 0.904 1 0.995 0.896 0.904 1 0.995 0.838\n", - " chair 128 35 0.672 0.543 0.629 0.333 0.708 0.571 0.583 0.284\n", - " couch 128 6 0.827 0.5 0.821 0.583 0.827 0.5 0.681 0.352\n", - " potted plant 128 14 0.809 0.908 0.884 0.584 0.809 0.908 0.884 0.474\n", - " bed 128 3 1 0.654 0.913 0.36 1 0.654 0.913 0.418\n", - " dining table 128 13 0.803 0.385 0.557 0.361 0.321 0.154 0.126 0.0487\n", - " toilet 128 2 0.802 1 0.995 0.921 0.802 1 0.995 0.698\n", - " tv 128 2 0.59 1 0.995 0.846 0.59 1 0.995 0.846\n", - " laptop 128 3 1 0 0.451 0.324 1 0 0.372 0.157\n", - " mouse 128 2 1 0 0 0 1 0 0 0\n", - " remote 128 8 0.831 0.5 0.625 0.495 0.831 0.5 0.629 0.436\n", - " cell phone 128 8 0.867 0.375 0.482 0.26 0.578 0.25 0.302 0.127\n", - " microwave 128 3 0.782 1 0.995 0.695 0.782 1 0.995 0.585\n", - " oven 128 5 0.389 0.4 0.432 0.299 0.584 0.6 0.642 0.411\n", - " sink 128 6 0.657 0.5 0.491 0.373 0.657 0.5 0.436 0.303\n", - " refrigerator 128 5 0.729 0.8 0.778 0.547 0.729 0.8 0.778 0.496\n", - " book 128 29 0.77 0.231 0.451 0.186 0.77 0.231 0.399 0.136\n", - " clock 128 9 0.798 0.889 0.956 0.747 0.798 0.889 0.926 0.68\n", - " vase 128 2 0.437 1 0.995 0.895 0.437 1 0.995 0.796\n", - " scissors 128 1 0 0 0.0226 0.0113 0 0 0 0\n", - " teddy bear 128 21 0.815 0.629 0.877 0.521 0.753 0.582 0.793 0.435\n", - " toothbrush 128 5 1 0.719 0.995 0.737 1 0.719 0.995 0.606\n", - "Results saved to \u001b[1mruns/train-seg/exp\u001b[0m\n" - ] - } - ], - "source": [ - "# Train YOLOv5s on COCO128 for 3 epochs\n", - "!python segment/train.py --img 640 --batch 16 --epochs 3 --data coco128-seg.yaml --weights yolov5s-seg.pt --cache" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "15glLzbQx5u0" - }, - "source": [ - "# 4. Visualize" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "nWOsI5wJR1o3" - }, - "source": [ - "## Comet Logging and Visualization 🌟 NEW\n", - "[Comet](https://bit.ly/yolov5-readme-comet) is now fully integrated with YOLOv5. Track and visualize model metrics in real time, save your hyperparameters, datasets, and model checkpoints, and visualize your model predictions with [Comet Custom Panels](https://bit.ly/yolov5-colab-comet-panels)! Comet makes sure you never lose track of your work and makes it easy to share results and collaborate across teams of all sizes! \n", - "\n", - "Getting started is easy:\n", - "```shell\n", - "pip install comet_ml # 1. install\n", - "export COMET_API_KEY= # 2. paste API key\n", - "python train.py --img 640 --epochs 3 --data coco128.yaml --weights yolov5s.pt # 3. train\n", - "```\n", - "\n", - "To learn more about all of the supported Comet features for this integration, check out the [Comet Tutorial](https://github.com/ultralytics/yolov5/tree/master/utils/loggers/comet). If you'd like to learn more about Comet, head over to our [documentation](https://bit.ly/yolov5-colab-comet-docs). Get started by trying out the Comet Colab Notebook:\n", - "[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1RG0WOQyxlDlo5Km8GogJpIEJlg_5lyYO?usp=sharing)\n", - "\n", - "\"yolo-ui\"" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "Lay2WsTjNJzP" - }, - "source": [ - "## ClearML Logging and Automation 🌟 NEW\n", - "\n", - "[ClearML](https://cutt.ly/yolov5-notebook-clearml) is completely integrated into YOLOv5 to track your experimentation, manage dataset versions and even remotely execute training runs. To enable ClearML (check cells above):\n", - "\n", - "- `pip install clearml`\n", - "- run `clearml-init` to connect to a ClearML server (**deploy your own [open-source server](https://github.com/allegroai/clearml-server)**, or use our [free hosted server](https://cutt.ly/yolov5-notebook-clearml))\n", - "\n", - "You'll get all the great expected features from an experiment manager: live updates, model upload, experiment comparison etc. but ClearML also tracks uncommitted changes and installed packages for example. Thanks to that ClearML Tasks (which is what we call experiments) are also reproducible on different machines! With only 1 extra line, we can schedule a YOLOv5 training task on a queue to be executed by any number of ClearML Agents (workers).\n", - "\n", - "You can use ClearML Data to version your dataset and then pass it to YOLOv5 simply using its unique ID. This will help you keep track of your data without adding extra hassle. Explore the [ClearML Tutorial](https://github.com/ultralytics/yolov5/tree/master/utils/loggers/clearml) for details!\n", - "\n", - "\n", - "\"ClearML" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "-WPvRbS5Swl6" - }, - "source": [ - "## Local Logging\n", - "\n", - "Training results are automatically logged with [Tensorboard](https://www.tensorflow.org/tensorboard) and [CSV](https://github.com/ultralytics/yolov5/pull/4148) loggers to `runs/train`, with a new experiment directory created for each new training as `runs/train/exp2`, `runs/train/exp3`, etc.\n", - "\n", - "This directory contains train and val statistics, mosaics, labels, predictions and augmentated mosaics, as well as metrics and charts including precision-recall (PR) curves and confusion matrices. \n", - "\n", - "\"Local\n" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "Zelyeqbyt3GD" - }, - "source": [ - "# Environments\n", - "\n", - "YOLOv5 may be run in any of the following up-to-date verified environments (with all dependencies including [CUDA](https://developer.nvidia.com/cuda)/[CUDNN](https://developer.nvidia.com/cudnn), [Python](https://www.python.org/) and [PyTorch](https://pytorch.org/) preinstalled):\n", - "\n", - "- **Notebooks** with free GPU: \"Run \"Open \"Open\n", - "- **Google Cloud** Deep Learning VM. See [GCP Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/GCP-Quickstart)\n", - "- **Amazon** Deep Learning AMI. See [AWS Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/AWS-Quickstart)\n", - "- **Docker Image**. See [Docker Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/Docker-Quickstart) \"Docker\n" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "6Qu7Iesl0p54" - }, - "source": [ - "# Status\n", - "\n", - "![YOLOv5 CI](https://github.com/ultralytics/yolov5/actions/workflows/ci-testing.yml/badge.svg)\n", - "\n", - "If this badge is green, all [YOLOv5 GitHub Actions](https://github.com/ultralytics/yolov5/actions) Continuous Integration (CI) tests are currently passing. CI tests verify correct operation of YOLOv5 training ([train.py](https://github.com/ultralytics/yolov5/blob/master/train.py)), testing ([val.py](https://github.com/ultralytics/yolov5/blob/master/val.py)), inference ([detect.py](https://github.com/ultralytics/yolov5/blob/master/detect.py)) and export ([export.py](https://github.com/ultralytics/yolov5/blob/master/export.py)) on macOS, Windows, and Ubuntu every 24 hours and on every commit.\n" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "IEijrePND_2I" - }, - "source": [ - "# Appendix\n", - "\n", - "Additional content below." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "GMusP4OAxFu6" - }, - "outputs": [], - "source": [ - "# YOLOv5 PyTorch HUB Inference (DetectionModels only)\n", - "import torch\n", - "\n", - "model = torch.hub.load('ultralytics/yolov5', 'yolov5s-seg') # yolov5n - yolov5x6 or custom\n", - "im = 'https://ultralytics.com/images/zidane.jpg' # file, Path, PIL.Image, OpenCV, nparray, list\n", - "results = model(im) # inference\n", - "results.print() # or .show(), .save(), .crop(), .pandas(), etc." - ] - } - ], - "metadata": { - "accelerator": "GPU", - "colab": { - "collapsed_sections": [], - "machine_shape": "hm", - "name": "YOLOv5 Tutorial", - "provenance": [], - "toc_visible": true - }, - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.7.12" - }, - "widgets": { - "application/vnd.jupyter.widget-state+json": { - "0856bea36ec148b68522ff9c9eb258d8": { - "model_module": "@jupyter-widgets/controls", - "model_module_version": "1.5.0", - "model_name": "DescriptionStyleModel", - "state": { - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "_model_name": "DescriptionStyleModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "1.2.0", - "_view_name": "StyleView", - "description_width": "" - } + "cell_type": "code", + "execution_count": 3, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "WQPtK1QYVaD_", + "outputId": "f7eba0ae-49d1-405b-a1cf-169212fadc2c" + }, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "Downloading https://github.com/ultralytics/yolov5/releases/download/v1.0/coco2017labels-segments.zip ...\n", + "Downloading http://images.cocodataset.org/zips/val2017.zip ...\n", + "######################################################################## 100.0%\n", + "######################################################################## 100.0%\n" + ] + } + ], + "source": [ + "# Download COCO val\n", + "!bash data/scripts/get_coco.sh --val --segments # download (780M - 5000 images)" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "X58w8JLpMnjH", + "outputId": "73533135-6995-4f2d-adb0-3acb5ef9b300" + }, + "outputs": [ + { + "metadata": { + "tags": null + }, + "name": "stdout", + "output_type": "stream", + "text": [ + "\u001b[34m\u001b[1msegment/val: \u001b[0mdata=/content/yolov5/data/coco.yaml, weights=['yolov5s-seg.pt'], batch_size=32, imgsz=640, conf_thres=0.001, iou_thres=0.6, max_det=300, task=val, device=, workers=8, single_cls=False, augment=False, verbose=False, save_txt=False, save_hybrid=False, save_conf=False, save_json=False, project=runs/val-seg, name=exp, exist_ok=False, half=True, dnn=False\n", + "YOLOv5 🚀 v6.2-251-g241d798 Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n", + "\n", + "Fusing layers... \n", + "YOLOv5s-seg summary: 224 layers, 7611485 parameters, 0 gradients, 26.4 GFLOPs\n", + "\u001b[34m\u001b[1mval: \u001b[0mScanning '/content/datasets/coco/val2017' images and labels...4952 found, 48 missing, 0 empty, 0 corrupt: 100% 5000/5000 [00:03<00:00, 1420.92it/s]\n", + "\u001b[34m\u001b[1mval: \u001b[0mNew cache created: /content/datasets/coco/val2017.cache\n", + " Class Images Instances Box(P R mAP50 mAP50-95) Mask(P R mAP50 mAP50-95): 100% 157/157 [01:54<00:00, 1.37it/s]\n", + " all 5000 36335 0.673 0.517 0.566 0.373 0.672 0.49 0.532 0.319\n", + "Speed: 0.9ms pre-process, 3.9ms inference, 3.0ms NMS per image at shape (32, 3, 640, 640)\n", + "Results saved to \u001b[1mruns/val-seg/exp\u001b[0m\n" + ] + } + ], + "source": [ + "# Validate YOLOv5s-seg on COCO val\n", + "!python segment/val.py --weights yolov5s-seg.pt --data coco.yaml --img 640 --half" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "ZY2VXXXu74w5" + }, + "source": [ + "# 3. Train\n", + "\n", + "

\n", + "Close the active learning loop by sampling images from your inference conditions with the `roboflow` pip package\n", + "

\n", + "\n", + "Train a YOLOv5s-seg model on the [COCO128](https://www.kaggle.com/ultralytics/coco128) dataset with `--data coco128-seg.yaml`, starting from pretrained `--weights yolov5s-seg.pt`, or from randomly initialized `--weights '' --cfg yolov5s-seg.yaml`.\n", + "\n", + "- **Pretrained [Models](https://github.com/ultralytics/yolov5/tree/master/models)** are downloaded\n", + "automatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases)\n", + "- **[Datasets](https://github.com/ultralytics/yolov5/tree/master/data)** available for autodownload include: [COCO](https://github.com/ultralytics/yolov5/blob/master/data/coco.yaml), [COCO128](https://github.com/ultralytics/yolov5/blob/master/data/coco128.yaml), [VOC](https://github.com/ultralytics/yolov5/blob/master/data/VOC.yaml), [Argoverse](https://github.com/ultralytics/yolov5/blob/master/data/Argoverse.yaml), [VisDrone](https://github.com/ultralytics/yolov5/blob/master/data/VisDrone.yaml), [GlobalWheat](https://github.com/ultralytics/yolov5/blob/master/data/GlobalWheat2020.yaml), [xView](https://github.com/ultralytics/yolov5/blob/master/data/xView.yaml), [Objects365](https://github.com/ultralytics/yolov5/blob/master/data/Objects365.yaml), [SKU-110K](https://github.com/ultralytics/yolov5/blob/master/data/SKU-110K.yaml).\n", + "- **Training Results** are saved to `runs/train-seg/` with incrementing run directories, i.e. `runs/train-seg/exp2`, `runs/train-seg/exp3` etc.\n", + "

\n", + "\n", + "A **Mosaic Dataloader** is used for training which combines 4 images into 1 mosaic.\n", + "\n", + "## Train on Custom Data with Roboflow 🌟 NEW\n", + "\n", + "[Roboflow](https://roboflow.com/?ref=ultralytics) enables you to easily **organize, label, and prepare** a high quality dataset with your own custom data. Roboflow also makes it easy to establish an active learning pipeline, collaborate with your team on dataset improvement, and integrate directly into your model building workflow with the `roboflow` pip package.\n", + "\n", + "- Custom Training Example: [https://blog.roboflow.com/train-yolov5-instance-segmentation-custom-dataset/](https://blog.roboflow.com/train-yolov5-instance-segmentation-custom-dataset/?ref=ultralytics)\n", + "- Custom Training Notebook: [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1JTz7kpmHsg-5qwVz2d2IH3AaenI1tv0N?usp=sharing)\n", + "
\n", + "\n", + "

Label images lightning fast (including with model-assisted labeling)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "i3oKtE4g-aNn" + }, + "outputs": [], + "source": [ + "#@title Select YOLOv5 🚀 logger {run: 'auto'}\n", + "logger = 'TensorBoard' #@param ['TensorBoard', 'Comet', 'ClearML']\n", + "\n", + "if logger == 'TensorBoard':\n", + " %load_ext tensorboard\n", + " %tensorboard --logdir runs/train-seg\n", + "elif logger == 'Comet':\n", + " %pip install -q comet_ml\n", + " import comet_ml; comet_ml.init()\n", + "elif logger == 'ClearML':\n", + " import clearml; clearml.browser_login()" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "1NcFxRcFdJ_O", + "outputId": "8e349df5-9910-4a91-a845-748def15d3d7" + }, + "outputs": [ + { + "metadata": { + "tags": null + }, + "name": "stdout", + "output_type": "stream", + "text": [ + "\u001b[34m\u001b[1msegment/train: \u001b[0mweights=yolov5s-seg.pt, cfg=, data=coco128-seg.yaml, hyp=data/hyps/hyp.scratch-low.yaml, epochs=3, batch_size=16, imgsz=640, rect=False, resume=False, nosave=False, noval=False, noautoanchor=False, noplots=False, evolve=None, bucket=, cache=ram, image_weights=False, device=, multi_scale=False, single_cls=False, optimizer=SGD, sync_bn=False, workers=8, project=runs/train-seg, name=exp, exist_ok=False, quad=False, cos_lr=False, label_smoothing=0.0, patience=100, freeze=[0], save_period=-1, seed=0, local_rank=-1, mask_ratio=4, no_overlap=False\n", + "\u001b[34m\u001b[1mgithub: \u001b[0mup to date with https://github.com/ultralytics/yolov5 ✅\n", + "YOLOv5 🚀 v6.2-251-g241d798 Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n", + "\n", + "\u001b[34m\u001b[1mhyperparameters: \u001b[0mlr0=0.01, lrf=0.01, momentum=0.937, weight_decay=0.0005, warmup_epochs=3.0, warmup_momentum=0.8, warmup_bias_lr=0.1, box=0.05, cls=0.5, cls_pw=1.0, obj=1.0, obj_pw=1.0, iou_t=0.2, anchor_t=4.0, fl_gamma=0.0, hsv_h=0.015, hsv_s=0.7, hsv_v=0.4, degrees=0.0, translate=0.1, scale=0.5, shear=0.0, perspective=0.0, flipud=0.0, fliplr=0.5, mosaic=1.0, mixup=0.0, copy_paste=0.0\n", + "\u001b[34m\u001b[1mTensorBoard: \u001b[0mStart with 'tensorboard --logdir runs/train-seg', view at http://localhost:6006/\n", + "\n", + "Dataset not found ⚠️, missing paths ['/content/datasets/coco128-seg/images/train2017']\n", + "Downloading https://ultralytics.com/assets/coco128-seg.zip to coco128-seg.zip...\n", + "100% 6.79M/6.79M [00:01<00:00, 4.42MB/s]\n", + "Dataset download success ✅ (2.8s), saved to \u001b[1m/content/datasets\u001b[0m\n", + "\n", + " from n params module arguments \n", + " 0 -1 1 3520 models.common.Conv [3, 32, 6, 2, 2] \n", + " 1 -1 1 18560 models.common.Conv [32, 64, 3, 2] \n", + " 2 -1 1 18816 models.common.C3 [64, 64, 1] \n", + " 3 -1 1 73984 models.common.Conv [64, 128, 3, 2] \n", + " 4 -1 2 115712 models.common.C3 [128, 128, 2] \n", + " 5 -1 1 295424 models.common.Conv [128, 256, 3, 2] \n", + " 6 -1 3 625152 models.common.C3 [256, 256, 3] \n", + " 7 -1 1 1180672 models.common.Conv [256, 512, 3, 2] \n", + " 8 -1 1 1182720 models.common.C3 [512, 512, 1] \n", + " 9 -1 1 656896 models.common.SPPF [512, 512, 5] \n", + " 10 -1 1 131584 models.common.Conv [512, 256, 1, 1] \n", + " 11 -1 1 0 torch.nn.modules.upsampling.Upsample [None, 2, 'nearest'] \n", + " 12 [-1, 6] 1 0 models.common.Concat [1] \n", + " 13 -1 1 361984 models.common.C3 [512, 256, 1, False] \n", + " 14 -1 1 33024 models.common.Conv [256, 128, 1, 1] \n", + " 15 -1 1 0 torch.nn.modules.upsampling.Upsample [None, 2, 'nearest'] \n", + " 16 [-1, 4] 1 0 models.common.Concat [1] \n", + " 17 -1 1 90880 models.common.C3 [256, 128, 1, False] \n", + " 18 -1 1 147712 models.common.Conv [128, 128, 3, 2] \n", + " 19 [-1, 14] 1 0 models.common.Concat [1] \n", + " 20 -1 1 296448 models.common.C3 [256, 256, 1, False] \n", + " 21 -1 1 590336 models.common.Conv [256, 256, 3, 2] \n", + " 22 [-1, 10] 1 0 models.common.Concat [1] \n", + " 23 -1 1 1182720 models.common.C3 [512, 512, 1, False] \n", + " 24 [17, 20, 23] 1 615133 models.yolo.Segment [80, [[10, 13, 16, 30, 33, 23], [30, 61, 62, 45, 59, 119], [116, 90, 156, 198, 373, 326]], 32, 128, [128, 256, 512]]\n", + "Model summary: 225 layers, 7621277 parameters, 7621277 gradients, 26.6 GFLOPs\n", + "\n", + "Transferred 367/367 items from yolov5s-seg.pt\n", + "\u001b[34m\u001b[1mAMP: \u001b[0mchecks passed ✅\n", + "\u001b[34m\u001b[1moptimizer:\u001b[0m SGD(lr=0.01) with parameter groups 60 weight(decay=0.0), 63 weight(decay=0.0005), 63 bias\n", + "\u001b[34m\u001b[1malbumentations: \u001b[0mBlur(p=0.01, blur_limit=(3, 7)), MedianBlur(p=0.01, blur_limit=(3, 7)), ToGray(p=0.01), CLAHE(p=0.01, clip_limit=(1, 4.0), tile_grid_size=(8, 8))\n", + "\u001b[34m\u001b[1mtrain: \u001b[0mScanning '/content/datasets/coco128-seg/labels/train2017' images and labels...126 found, 2 missing, 0 empty, 0 corrupt: 100% 128/128 [00:00<00:00, 1383.68it/s]\n", + "\u001b[34m\u001b[1mtrain: \u001b[0mNew cache created: /content/datasets/coco128-seg/labels/train2017.cache\n", + "\u001b[34m\u001b[1mtrain: \u001b[0mCaching images (0.1GB ram): 100% 128/128 [00:00<00:00, 241.77it/s]\n", + "\u001b[34m\u001b[1mval: \u001b[0mScanning '/content/datasets/coco128-seg/labels/train2017.cache' images and labels... 126 found, 2 missing, 0 empty, 0 corrupt: 100% 128/128 [00:00 # 2. paste API key\n", + "python train.py --img 640 --epochs 3 --data coco128.yaml --weights yolov5s.pt # 3. train\n", + "```\n", + "\n", + "To learn more about all of the supported Comet features for this integration, check out the [Comet Tutorial](https://github.com/ultralytics/yolov5/tree/master/utils/loggers/comet). If you'd like to learn more about Comet, head over to our [documentation](https://bit.ly/yolov5-colab-comet-docs). Get started by trying out the Comet Colab Notebook:\n", + "[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1RG0WOQyxlDlo5Km8GogJpIEJlg_5lyYO?usp=sharing)\n", + "\n", + "\"yolo-ui\"" + ] }, - "574140e4c4bc48c9a171541a02cd0211": { - "model_module": "@jupyter-widgets/controls", - "model_module_version": "1.5.0", - "model_name": "HTMLModel", - "state": { - "_dom_classes": [], - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "_model_name": "HTMLModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/controls", - "_view_module_version": "1.5.0", - "_view_name": "HTMLView", - "description": "", - "description_tooltip": null, - "layout": "IPY_MODEL_60b913d755b34d638478e30705a2dde1", - "placeholder": "​", - "style": "IPY_MODEL_0856bea36ec148b68522ff9c9eb258d8", - "value": "100%" - } + { + "cell_type": "markdown", + "metadata": { + "id": "Lay2WsTjNJzP" + }, + "source": [ + "## ClearML Logging and Automation 🌟 NEW\n", + "\n", + "[ClearML](https://cutt.ly/yolov5-notebook-clearml) is completely integrated into YOLOv5 to track your experimentation, manage dataset versions and even remotely execute training runs. To enable ClearML (check cells above):\n", + "\n", + "- `pip install clearml`\n", + "- run `clearml-init` to connect to a ClearML server (**deploy your own [open-source server](https://github.com/allegroai/clearml-server)**, or use our [free hosted server](https://cutt.ly/yolov5-notebook-clearml))\n", + "\n", + "You'll get all the great expected features from an experiment manager: live updates, model upload, experiment comparison etc. but ClearML also tracks uncommitted changes and installed packages for example. Thanks to that ClearML Tasks (which is what we call experiments) are also reproducible on different machines! With only 1 extra line, we can schedule a YOLOv5 training task on a queue to be executed by any number of ClearML Agents (workers).\n", + "\n", + "You can use ClearML Data to version your dataset and then pass it to YOLOv5 simply using its unique ID. This will help you keep track of your data without adding extra hassle. Explore the [ClearML Tutorial](https://github.com/ultralytics/yolov5/tree/master/utils/loggers/clearml) for details!\n", + "\n", + "\n", + "\"ClearML" + ] }, - "5966ba6e6f114d8c9d8d1d6b1bd4f4c7": { - "model_module": "@jupyter-widgets/controls", - "model_module_version": "1.5.0", - "model_name": "DescriptionStyleModel", - "state": { - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "_model_name": "DescriptionStyleModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "1.2.0", - "_view_name": "StyleView", - "description_width": "" - } + { + "cell_type": "markdown", + "metadata": { + "id": "-WPvRbS5Swl6" + }, + "source": [ + "## Local Logging\n", + "\n", + "Training results are automatically logged with [Tensorboard](https://www.tensorflow.org/tensorboard) and [CSV](https://github.com/ultralytics/yolov5/pull/4148) loggers to `runs/train`, with a new experiment directory created for each new training as `runs/train/exp2`, `runs/train/exp3`, etc.\n", + "\n", + "This directory contains train and val statistics, mosaics, labels, predictions and augmentated mosaics, as well as metrics and charts including precision-recall (PR) curves and confusion matrices. \n", + "\n", + "\"Local\n" + ] }, - "60b913d755b34d638478e30705a2dde1": { - "model_module": "@jupyter-widgets/base", - "model_module_version": "1.2.0", - "model_name": "LayoutModel", - "state": { - "_model_module": "@jupyter-widgets/base", - "_model_module_version": "1.2.0", - "_model_name": "LayoutModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "1.2.0", - "_view_name": "LayoutView", - "align_content": null, - "align_items": null, - "align_self": null, - "border": null, - "bottom": null, - "display": null, - "flex": null, - "flex_flow": null, - "grid_area": null, - "grid_auto_columns": null, - "grid_auto_flow": null, - "grid_auto_rows": null, - "grid_column": null, - "grid_gap": null, - "grid_row": null, - "grid_template_areas": null, - "grid_template_columns": null, - "grid_template_rows": null, - "height": null, - "justify_content": null, - "justify_items": null, - "left": null, - "margin": null, - "max_height": null, - "max_width": null, - "min_height": null, - "min_width": null, - "object_fit": null, - "object_position": null, - "order": null, - "overflow": null, - "overflow_x": null, - "overflow_y": null, - "padding": null, - "right": null, - "top": null, - "visibility": null, - "width": null - } + { + "cell_type": "markdown", + "metadata": { + "id": "Zelyeqbyt3GD" + }, + "source": [ + "# Environments\n", + "\n", + "YOLOv5 may be run in any of the following up-to-date verified environments (with all dependencies including [CUDA](https://developer.nvidia.com/cuda)/[CUDNN](https://developer.nvidia.com/cudnn), [Python](https://www.python.org/) and [PyTorch](https://pytorch.org/) preinstalled):\n", + "\n", + "- **Notebooks** with free GPU: \"Run \"Open \"Open\n", + "- **Google Cloud** Deep Learning VM. See [GCP Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/GCP-Quickstart)\n", + "- **Amazon** Deep Learning AMI. See [AWS Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/AWS-Quickstart)\n", + "- **Docker Image**. See [Docker Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/Docker-Quickstart) \"Docker\n" + ] }, - "65881db1db8a4e9c930fab9172d45143": { - "model_module": "@jupyter-widgets/base", - "model_module_version": "1.2.0", - "model_name": "LayoutModel", - "state": { - "_model_module": "@jupyter-widgets/base", - "_model_module_version": "1.2.0", - "_model_name": "LayoutModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "1.2.0", - "_view_name": "LayoutView", - "align_content": null, - "align_items": null, - "align_self": null, - "border": null, - "bottom": null, - "display": null, - "flex": null, - "flex_flow": null, - "grid_area": null, - "grid_auto_columns": null, - "grid_auto_flow": null, - "grid_auto_rows": null, - "grid_column": null, - "grid_gap": null, - "grid_row": null, - "grid_template_areas": null, - "grid_template_columns": null, - "grid_template_rows": null, - "height": null, - "justify_content": null, - "justify_items": null, - "left": null, - "margin": null, - "max_height": null, - "max_width": null, - "min_height": null, - "min_width": null, - "object_fit": null, - "object_position": null, - "order": null, - "overflow": null, - "overflow_x": null, - "overflow_y": null, - "padding": null, - "right": null, - "top": null, - "visibility": null, - "width": null - } + { + "cell_type": "markdown", + "metadata": { + "id": "6Qu7Iesl0p54" + }, + "source": [ + "# Status\n", + "\n", + "![YOLOv5 CI](https://github.com/ultralytics/yolov5/actions/workflows/ci-testing.yml/badge.svg)\n", + "\n", + "If this badge is green, all [YOLOv5 GitHub Actions](https://github.com/ultralytics/yolov5/actions) Continuous Integration (CI) tests are currently passing. CI tests verify correct operation of YOLOv5 training ([train.py](https://github.com/ultralytics/yolov5/blob/master/train.py)), testing ([val.py](https://github.com/ultralytics/yolov5/blob/master/val.py)), inference ([detect.py](https://github.com/ultralytics/yolov5/blob/master/detect.py)) and export ([export.py](https://github.com/ultralytics/yolov5/blob/master/export.py)) on macOS, Windows, and Ubuntu every 24 hours and on every commit.\n" + ] }, - "76879f6f2aa54637a7a07faeea2bd684": { - "model_module": "@jupyter-widgets/base", - "model_module_version": "1.2.0", - "model_name": "LayoutModel", - "state": { - "_model_module": "@jupyter-widgets/base", - "_model_module_version": "1.2.0", - "_model_name": "LayoutModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "1.2.0", - "_view_name": "LayoutView", - "align_content": null, - "align_items": null, - "align_self": null, - "border": null, - "bottom": null, - "display": null, - "flex": null, - "flex_flow": null, - "grid_area": null, - "grid_auto_columns": null, - "grid_auto_flow": null, - "grid_auto_rows": null, - "grid_column": null, - "grid_gap": null, - "grid_row": null, - "grid_template_areas": null, - "grid_template_columns": null, - "grid_template_rows": null, - "height": null, - "justify_content": null, - "justify_items": null, - "left": null, - "margin": null, - "max_height": null, - "max_width": null, - "min_height": null, - "min_width": null, - "object_fit": null, - "object_position": null, - "order": null, - "overflow": null, - "overflow_x": null, - "overflow_y": null, - "padding": null, - "right": null, - "top": null, - "visibility": null, - "width": null - } + { + "cell_type": "markdown", + "metadata": { + "id": "IEijrePND_2I" + }, + "source": [ + "# Appendix\n", + "\n", + "Additional content below." + ] }, - "9b8caa3522fc4cbab31e13b5dfc7808d": { - "model_module": "@jupyter-widgets/controls", - "model_module_version": "1.5.0", - "model_name": "HBoxModel", - "state": { - "_dom_classes": [], - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "_model_name": "HBoxModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/controls", - "_view_module_version": "1.5.0", - "_view_name": "HBoxView", - "box_style": "", - "children": [ - "IPY_MODEL_574140e4c4bc48c9a171541a02cd0211", - "IPY_MODEL_35e03ce5090346c9ae602891470fc555", - "IPY_MODEL_c942c208e72d46568b476bb0f2d75496" - ], - "layout": "IPY_MODEL_65881db1db8a4e9c930fab9172d45143" - } + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "GMusP4OAxFu6" + }, + "outputs": [], + "source": [ + "# YOLOv5 PyTorch HUB Inference (DetectionModels only)\n", + "import torch\n", + "\n", + "model = torch.hub.load('ultralytics/yolov5', 'yolov5s-seg') # yolov5n - yolov5x6 or custom\n", + "im = 'https://ultralytics.com/images/zidane.jpg' # file, Path, PIL.Image, OpenCV, nparray, list\n", + "results = model(im) # inference\n", + "results.print() # or .show(), .save(), .crop(), .pandas(), etc." + ] + } + ], + "metadata": { + "accelerator": "GPU", + "colab": { + "machine_shape": "hm", + "name": "YOLOv5 Tutorial", + "provenance": [], + "toc_visible": true }, - "c942c208e72d46568b476bb0f2d75496": { - "model_module": "@jupyter-widgets/controls", - "model_module_version": "1.5.0", - "model_name": "HTMLModel", - "state": { - "_dom_classes": [], - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "_model_name": "HTMLModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/controls", - "_view_module_version": "1.5.0", - "_view_name": "HTMLView", - "description": "", - "description_tooltip": null, - "layout": "IPY_MODEL_d6b7a2243e0c4beca714d99dceec23d6", - "placeholder": "​", - "style": "IPY_MODEL_5966ba6e6f114d8c9d8d1d6b1bd4f4c7", - "value": " 780M/780M [02:19<00:00, 6.24MB/s]" - } + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" }, - "d6b7a2243e0c4beca714d99dceec23d6": { - "model_module": "@jupyter-widgets/base", - "model_module_version": "1.2.0", - "model_name": "LayoutModel", - "state": { - "_model_module": "@jupyter-widgets/base", - "_model_module_version": "1.2.0", - "_model_name": "LayoutModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "1.2.0", - "_view_name": "LayoutView", - "align_content": null, - "align_items": null, - "align_self": null, - "border": null, - "bottom": null, - "display": null, - "flex": null, - "flex_flow": null, - "grid_area": null, - "grid_auto_columns": null, - "grid_auto_flow": null, - "grid_auto_rows": null, - "grid_column": null, - "grid_gap": null, - "grid_row": null, - "grid_template_areas": null, - "grid_template_columns": null, - "grid_template_rows": null, - "height": null, - "justify_content": null, - "justify_items": null, - "left": null, - "margin": null, - "max_height": null, - "max_width": null, - "min_height": null, - "min_width": null, - "object_fit": null, - "object_position": null, - "order": null, - "overflow": null, - "overflow_x": null, - "overflow_y": null, - "padding": null, - "right": null, - "top": null, - "visibility": null, - "width": null - } + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.7.12" } - } - } - }, - "nbformat": 4, - "nbformat_minor": 1 -} + }, + "nbformat": 4, + "nbformat_minor": 0 +} \ No newline at end of file From 74b3886edd55bc9b681b8a956275abb9e6e1e2cd Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 18 Nov 2022 21:18:57 +0100 Subject: [PATCH 1563/1976] Simplify dataloader tqdm descriptions (#10210) * Simplify dataloader tqdm descriptions @AyushExel this should help our tqdm dataloader messages fit better within a single line in our Colab notebooks and also help avoid confusion about missing/empty labels, now combined into 'backgrounds'. Signed-off-by: Glenn Jocher * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update dataloaders.py Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- utils/dataloaders.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/utils/dataloaders.py b/utils/dataloaders.py index 0418293a6e21..39db3c0dfd21 100644 --- a/utils/dataloaders.py +++ b/utils/dataloaders.py @@ -31,8 +31,8 @@ from utils.augmentations import (Albumentations, augment_hsv, classify_albumentations, classify_transforms, copy_paste, cutout, letterbox, mixup, random_perspective) from utils.general import (DATASETS_DIR, LOGGER, NUM_THREADS, check_dataset, check_requirements, check_yaml, clean_str, - colorstr, cv2, is_colab, is_kaggle, segments2boxes, unzip_file, xyn2xy, xywh2xyxy, - xywhn2xyxy, xyxy2xywhn) + cv2, is_colab, is_kaggle, segments2boxes, unzip_file, xyn2xy, xywh2xyxy, xywhn2xyxy, + xyxy2xywhn) from utils.torch_utils import torch_distributed_zero_first # Parameters @@ -493,7 +493,7 @@ def __init__(self, # Display cache nf, nm, ne, nc, n = cache.pop('results') # found, missing, empty, corrupt, total if exists and LOCAL_RANK in {-1, 0}: - d = f"Scanning '{cache_path}' images and labels... {nf} found, {nm} missing, {ne} empty, {nc} corrupt" + d = f"Scanning {cache_path}... {nf} images, {nm + ne} backgrounds, {nc} corrupt" tqdm(None, desc=prefix + d, total=n, initial=n, bar_format=BAR_FORMAT) # display cache results if cache['msgs']: LOGGER.info('\n'.join(cache['msgs'])) # display warnings @@ -607,7 +607,7 @@ def cache_labels(self, path=Path('./labels.cache'), prefix=''): # Cache dataset labels, check images and read shapes x = {} # dict nm, nf, ne, nc, msgs = 0, 0, 0, 0, [] # number missing, found, empty, corrupt, messages - desc = f"{prefix}Scanning '{path.parent / path.stem}' images and labels..." + desc = f"{prefix}Scanning {path.parent / path.stem}..." with Pool(NUM_THREADS) as pool: pbar = tqdm(pool.imap(verify_image_label, zip(self.im_files, self.label_files, repeat(prefix))), desc=desc, @@ -622,7 +622,7 @@ def cache_labels(self, path=Path('./labels.cache'), prefix=''): x[im_file] = [lb, shape, segments] if msg: msgs.append(msg) - pbar.desc = f"{desc}{nf} found, {nm} missing, {ne} empty, {nc} corrupt" + pbar.desc = f"{desc} {nf} images, {nm + ne} backgrounds, {nc} corrupt" pbar.close() if msgs: From 0322bb31962d68caefa0c0c5880d80d27e8ab8ec Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 18 Nov 2022 21:39:13 +0100 Subject: [PATCH 1564/1976] New global `TQDM_BAR_FORMAT` (#10211) * New global TQDM_BAR_FORMAT * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- classify/train.py | 6 +++--- classify/val.py | 5 +++-- segment/train.py | 10 +++++----- segment/val.py | 8 ++++---- train.py | 11 ++++++----- utils/autoanchor.py | 4 ++-- utils/dataloaders.py | 15 +++++++-------- utils/general.py | 1 + val.py | 8 ++++---- 9 files changed, 35 insertions(+), 33 deletions(-) diff --git a/classify/train.py b/classify/train.py index 178ebcdfff53..4422ca26b0ae 100644 --- a/classify/train.py +++ b/classify/train.py @@ -40,8 +40,8 @@ from models.experimental import attempt_load from models.yolo import ClassificationModel, DetectionModel from utils.dataloaders import create_classification_dataloader -from utils.general import (DATASETS_DIR, LOGGER, WorkingDirectory, check_git_status, check_requirements, colorstr, - download, increment_path, init_seeds, print_args, yaml_save) +from utils.general import (DATASETS_DIR, LOGGER, TQDM_BAR_FORMAT, WorkingDirectory, check_git_status, + check_requirements, colorstr, download, increment_path, init_seeds, print_args, yaml_save) from utils.loggers import GenericLogger from utils.plots import imshow_cls from utils.torch_utils import (ModelEMA, model_info, reshape_classifier_output, select_device, smart_DDP, @@ -174,7 +174,7 @@ def train(opt, device): trainloader.sampler.set_epoch(epoch) pbar = enumerate(trainloader) if RANK in {-1, 0}: - pbar = tqdm(enumerate(trainloader), total=len(trainloader), bar_format='{l_bar}{bar:10}{r_bar}{bar:-10b}') + pbar = tqdm(enumerate(trainloader), total=len(trainloader), bar_format=TQDM_BAR_FORMAT) for i, (images, labels) in pbar: # progress bar images, labels = images.to(device, non_blocking=True), labels.to(device) diff --git a/classify/val.py b/classify/val.py index c0b507785fb0..8657036fb2a2 100644 --- a/classify/val.py +++ b/classify/val.py @@ -36,7 +36,8 @@ from models.common import DetectMultiBackend from utils.dataloaders import create_classification_dataloader -from utils.general import LOGGER, Profile, check_img_size, check_requirements, colorstr, increment_path, print_args +from utils.general import (LOGGER, TQDM_BAR_FORMAT, Profile, check_img_size, check_requirements, colorstr, + increment_path, print_args) from utils.torch_utils import select_device, smart_inference_mode @@ -100,7 +101,7 @@ def run( n = len(dataloader) # number of batches action = 'validating' if dataloader.dataset.root.stem == 'val' else 'testing' desc = f"{pbar.desc[:-36]}{action:>36}" if pbar else f"{action}" - bar = tqdm(dataloader, desc, n, not training, bar_format='{l_bar}{bar:10}{r_bar}{bar:-10b}', position=0) + bar = tqdm(dataloader, desc, n, not training, bar_format=TQDM_BAR_FORMAT, position=0) with torch.cuda.amp.autocast(enabled=device.type != 'cpu'): for images, labels in bar: with dt[0]: diff --git a/segment/train.py b/segment/train.py index f067918e7c3c..2a0793d1aa3e 100644 --- a/segment/train.py +++ b/segment/train.py @@ -46,10 +46,10 @@ from utils.autobatch import check_train_batch_size from utils.callbacks import Callbacks from utils.downloads import attempt_download, is_url -from utils.general import (LOGGER, check_amp, check_dataset, check_file, check_git_status, check_img_size, - check_requirements, check_suffix, check_yaml, colorstr, get_latest_run, increment_path, - init_seeds, intersect_dicts, labels_to_class_weights, labels_to_image_weights, one_cycle, - print_args, print_mutation, strip_optimizer, yaml_save) +from utils.general import (LOGGER, TQDM_BAR_FORMAT, check_amp, check_dataset, check_file, check_git_status, + check_img_size, check_requirements, check_suffix, check_yaml, colorstr, get_latest_run, + increment_path, init_seeds, intersect_dicts, labels_to_class_weights, + labels_to_image_weights, one_cycle, print_args, print_mutation, strip_optimizer, yaml_save) from utils.loggers import GenericLogger from utils.plots import plot_evolve, plot_labels from utils.segment.dataloaders import create_dataloader @@ -277,7 +277,7 @@ def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictio LOGGER.info(('\n' + '%11s' * 8) % ('Epoch', 'GPU_mem', 'box_loss', 'seg_loss', 'obj_loss', 'cls_loss', 'Instances', 'Size')) if RANK in {-1, 0}: - pbar = tqdm(pbar, total=nb, bar_format='{l_bar}{bar:10}{r_bar}{bar:-10b}') # progress bar + pbar = tqdm(pbar, total=nb, bar_format=TQDM_BAR_FORMAT) # progress bar optimizer.zero_grad() for i, (imgs, targets, paths, _, masks) in pbar: # batch ------------------------------------------------------ # callbacks.run('on_train_batch_start') diff --git a/segment/val.py b/segment/val.py index a875b3b79907..9bb8f9e4cf54 100644 --- a/segment/val.py +++ b/segment/val.py @@ -42,9 +42,9 @@ from models.common import DetectMultiBackend from models.yolo import SegmentationModel from utils.callbacks import Callbacks -from utils.general import (LOGGER, NUM_THREADS, Profile, check_dataset, check_img_size, check_requirements, check_yaml, - coco80_to_coco91_class, colorstr, increment_path, non_max_suppression, print_args, - scale_boxes, xywh2xyxy, xyxy2xywh) +from utils.general import (LOGGER, NUM_THREADS, TQDM_BAR_FORMAT, Profile, check_dataset, check_img_size, + check_requirements, check_yaml, coco80_to_coco91_class, colorstr, increment_path, + non_max_suppression, print_args, scale_boxes, xywh2xyxy, xyxy2xywh) from utils.metrics import ConfusionMatrix, box_iou from utils.plots import output_to_target, plot_val_study from utils.segment.dataloaders import create_dataloader @@ -237,7 +237,7 @@ def run( loss = torch.zeros(4, device=device) jdict, stats = [], [] # callbacks.run('on_val_start') - pbar = tqdm(dataloader, desc=s, bar_format='{l_bar}{bar:10}{r_bar}{bar:-10b}') # progress bar + pbar = tqdm(dataloader, desc=s, bar_format=TQDM_BAR_FORMAT) # progress bar for batch_i, (im, targets, paths, shapes, masks) in enumerate(pbar): # callbacks.run('on_val_batch_start') with dt[0]: diff --git a/train.py b/train.py index 1fe6cf4d9ebd..bbbd6d07db00 100644 --- a/train.py +++ b/train.py @@ -47,10 +47,11 @@ from utils.callbacks import Callbacks from utils.dataloaders import create_dataloader from utils.downloads import attempt_download, is_url -from utils.general import (LOGGER, check_amp, check_dataset, check_file, check_git_status, check_img_size, - check_requirements, check_suffix, check_yaml, colorstr, get_latest_run, increment_path, - init_seeds, intersect_dicts, labels_to_class_weights, labels_to_image_weights, methods, - one_cycle, print_args, print_mutation, strip_optimizer, yaml_save) +from utils.general import (LOGGER, TQDM_BAR_FORMAT, check_amp, check_dataset, check_file, check_git_status, + check_img_size, check_requirements, check_suffix, check_yaml, colorstr, get_latest_run, + increment_path, init_seeds, intersect_dicts, labels_to_class_weights, + labels_to_image_weights, methods, one_cycle, print_args, print_mutation, strip_optimizer, + yaml_save) from utils.loggers import Loggers from utils.loggers.comet.comet_utils import check_comet_resume from utils.loss import ComputeLoss @@ -275,7 +276,7 @@ def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictio pbar = enumerate(train_loader) LOGGER.info(('\n' + '%11s' * 7) % ('Epoch', 'GPU_mem', 'box_loss', 'obj_loss', 'cls_loss', 'Instances', 'Size')) if RANK in {-1, 0}: - pbar = tqdm(pbar, total=nb, bar_format='{l_bar}{bar:10}{r_bar}{bar:-10b}') # progress bar + pbar = tqdm(pbar, total=nb, bar_format=TQDM_BAR_FORMAT) # progress bar optimizer.zero_grad() for i, (imgs, targets, paths, _) in pbar: # batch ------------------------------------------------------------- callbacks.run('on_train_batch_start') diff --git a/utils/autoanchor.py b/utils/autoanchor.py index cfc4c276e3aa..bb5cf6e6965e 100644 --- a/utils/autoanchor.py +++ b/utils/autoanchor.py @@ -11,7 +11,7 @@ from tqdm import tqdm from utils import TryExcept -from utils.general import LOGGER, colorstr +from utils.general import LOGGER, TQDM_BAR_FORMAT, colorstr PREFIX = colorstr('AutoAnchor: ') @@ -153,7 +153,7 @@ def print_results(k, verbose=True): # Evolve f, sh, mp, s = anchor_fitness(k), k.shape, 0.9, 0.1 # fitness, generations, mutation prob, sigma - pbar = tqdm(range(gen), bar_format='{l_bar}{bar:10}{r_bar}{bar:-10b}') # progress bar + pbar = tqdm(range(gen), bar_format=TQDM_BAR_FORMAT) # progress bar for _ in pbar: v = np.ones(sh) while (v == 1).all(): # mutate until a change occurs (prevent duplicates) diff --git a/utils/dataloaders.py b/utils/dataloaders.py index 39db3c0dfd21..e107d1a2bccf 100644 --- a/utils/dataloaders.py +++ b/utils/dataloaders.py @@ -29,17 +29,16 @@ from tqdm import tqdm from utils.augmentations import (Albumentations, augment_hsv, classify_albumentations, classify_transforms, copy_paste, - cutout, letterbox, mixup, random_perspective) -from utils.general import (DATASETS_DIR, LOGGER, NUM_THREADS, check_dataset, check_requirements, check_yaml, clean_str, - cv2, is_colab, is_kaggle, segments2boxes, unzip_file, xyn2xy, xywh2xyxy, xywhn2xyxy, - xyxy2xywhn) + letterbox, mixup, random_perspective) +from utils.general import (DATASETS_DIR, LOGGER, NUM_THREADS, TQDM_BAR_FORMAT, check_dataset, check_requirements, + check_yaml, clean_str, cv2, is_colab, is_kaggle, segments2boxes, unzip_file, xyn2xy, + xywh2xyxy, xywhn2xyxy, xyxy2xywhn) from utils.torch_utils import torch_distributed_zero_first # Parameters HELP_URL = 'See https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data' IMG_FORMATS = 'bmp', 'dng', 'jpeg', 'jpg', 'mpo', 'png', 'tif', 'tiff', 'webp', 'pfm' # include image suffixes VID_FORMATS = 'asf', 'avi', 'gif', 'm4v', 'mkv', 'mov', 'mp4', 'mpeg', 'mpg', 'ts', 'wmv' # include video suffixes -BAR_FORMAT = '{l_bar}{bar:10}{r_bar}{bar:-10b}' # tqdm bar format LOCAL_RANK = int(os.getenv('LOCAL_RANK', -1)) # https://pytorch.org/docs/stable/elastic/run.html RANK = int(os.getenv('RANK', -1)) PIN_MEMORY = str(os.getenv('PIN_MEMORY', True)).lower() == 'true' # global pin_memory for dataloaders @@ -494,7 +493,7 @@ def __init__(self, nf, nm, ne, nc, n = cache.pop('results') # found, missing, empty, corrupt, total if exists and LOCAL_RANK in {-1, 0}: d = f"Scanning {cache_path}... {nf} images, {nm + ne} backgrounds, {nc} corrupt" - tqdm(None, desc=prefix + d, total=n, initial=n, bar_format=BAR_FORMAT) # display cache results + tqdm(None, desc=prefix + d, total=n, initial=n, bar_format=TQDM_BAR_FORMAT) # display cache results if cache['msgs']: LOGGER.info('\n'.join(cache['msgs'])) # display warnings assert nf > 0 or not augment, f'{prefix}No labels found in {cache_path}, can not start training. {HELP_URL}' @@ -576,7 +575,7 @@ def __init__(self, self.im_hw0, self.im_hw = [None] * n, [None] * n fcn = self.cache_images_to_disk if cache_images == 'disk' else self.load_image results = ThreadPool(NUM_THREADS).imap(fcn, range(n)) - pbar = tqdm(enumerate(results), total=n, bar_format=BAR_FORMAT, disable=LOCAL_RANK > 0) + pbar = tqdm(enumerate(results), total=n, bar_format=TQDM_BAR_FORMAT, disable=LOCAL_RANK > 0) for i, x in pbar: if cache_images == 'disk': b += self.npy_files[i].stat().st_size @@ -612,7 +611,7 @@ def cache_labels(self, path=Path('./labels.cache'), prefix=''): pbar = tqdm(pool.imap(verify_image_label, zip(self.im_files, self.label_files, repeat(prefix))), desc=desc, total=len(self.im_files), - bar_format=BAR_FORMAT) + bar_format=TQDM_BAR_FORMAT) for im_file, lb, shape, segments, nm_f, nf_f, ne_f, nc_f, msg in pbar: nm += nm_f nf += nf_f diff --git a/utils/general.py b/utils/general.py index c543a237d25b..58181f00568d 100644 --- a/utils/general.py +++ b/utils/general.py @@ -50,6 +50,7 @@ DATASETS_DIR = Path(os.getenv('YOLOv5_DATASETS_DIR', ROOT.parent / 'datasets')) # global datasets directory AUTOINSTALL = str(os.getenv('YOLOv5_AUTOINSTALL', True)).lower() == 'true' # global auto-install mode VERBOSE = str(os.getenv('YOLOv5_VERBOSE', True)).lower() == 'true' # global verbose mode +TQDM_BAR_FORMAT = '{l_bar}{bar:10}| {n_fmt}/{total_fmt} {elapsed}' # tqdm bar format FONT = 'Arial.ttf' # https://ultralytics.com/assets/Arial.ttf torch.set_printoptions(linewidth=320, precision=5, profile='long') diff --git a/val.py b/val.py index 127acf810029..ef282e37bdc1 100644 --- a/val.py +++ b/val.py @@ -38,9 +38,9 @@ from models.common import DetectMultiBackend from utils.callbacks import Callbacks from utils.dataloaders import create_dataloader -from utils.general import (LOGGER, Profile, check_dataset, check_img_size, check_requirements, check_yaml, - coco80_to_coco91_class, colorstr, increment_path, non_max_suppression, print_args, - scale_boxes, xywh2xyxy, xyxy2xywh) +from utils.general import (LOGGER, TQDM_BAR_FORMAT, Profile, check_dataset, check_img_size, check_requirements, + check_yaml, coco80_to_coco91_class, colorstr, increment_path, non_max_suppression, + print_args, scale_boxes, xywh2xyxy, xyxy2xywh) from utils.metrics import ConfusionMatrix, ap_per_class, box_iou from utils.plots import output_to_target, plot_images, plot_val_study from utils.torch_utils import select_device, smart_inference_mode @@ -193,7 +193,7 @@ def run( loss = torch.zeros(3, device=device) jdict, stats, ap, ap_class = [], [], [], [] callbacks.run('on_val_start') - pbar = tqdm(dataloader, desc=s, bar_format='{l_bar}{bar:10}{r_bar}{bar:-10b}') # progress bar + pbar = tqdm(dataloader, desc=s, bar_format=TQDM_BAR_FORMAT) # progress bar for batch_i, (im, targets, paths, shapes) in enumerate(pbar): callbacks.run('on_val_batch_start') with dt[0]: From 6f377f9d8a7f24a0766d2cfdef6d1e18873d33f9 Mon Sep 17 00:00:00 2001 From: Paul Guerrie <97041392+paulguerrie@users.noreply.github.com> Date: Fri, 18 Nov 2022 14:05:45 -0700 Subject: [PATCH 1565/1976] Feature/classification tutorial refactor (#10039) * Added a tutorial notebook for classification. * Split a cell so that there is less room for error when a user pastes their own code snippet. Also added an active learning section at the end. * Added a section to the classification tutorial notebook about the various methods of input for `classify/predict.py`. * Updated link to colab * WIP commit to show some of the errors when trying to match the main tutorial. * Refactored the classification tutorial to be closer to the main tutorial. * Update tutorial.ipynb Signed-off-by: Glenn Jocher * Update tutorial.ipynb Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher Co-authored-by: Ayush Chaurasia Co-authored-by: Glenn Jocher --- classify/tutorial.ipynb | 1843 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 1843 insertions(+) create mode 100644 classify/tutorial.ipynb diff --git a/classify/tutorial.ipynb b/classify/tutorial.ipynb new file mode 100644 index 000000000000..8ed8b5db8a35 --- /dev/null +++ b/classify/tutorial.ipynb @@ -0,0 +1,1843 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": { + "id": "t6MPjfT5NrKQ" + }, + "source": [ + "
\n", + "\n", + " \n", + " \n", + "\n", + "\n", + "
\n", + " \"Run\n", + " \"Open\n", + " \"Open\n", + "
\n", + "\n", + "This YOLOv5 🚀 notebook by Ultralytics presents simple train, validate and predict examples to help start your AI adventure.
See GitHub for community support or contact us for professional support.\n", + "\n", + "
" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "7mGmQbAO5pQb" + }, + "source": [ + "# Setup\n", + "\n", + "Clone GitHub [repository](https://github.com/ultralytics/yolov5), install [dependencies](https://github.com/ultralytics/yolov5/blob/master/requirements.txt) and check PyTorch and GPU." + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "wbvMlHd_QwMG", + "outputId": "0f9ee467-cea4-48e8-9050-7a76ae1b6141" + }, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "YOLOv5 🚀 v6.2-237-g34c3e1c Python-3.7.12 torch-1.12.1+cu102 CUDA:0 (Tesla V100-SXM2-16GB, 16161MiB)\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Setup complete ✅ (4 CPUs, 14.7 GB RAM, 152.0/196.6 GB disk)\n" + ] + } + ], + "source": [ + "!git clone https://github.com/ultralytics/yolov5 # clone\n", + "%cd yolov5\n", + "%pip install -qr requirements.txt # install\n", + "\n", + "import torch\n", + "import utils\n", + "display = utils.notebook_init() # checks" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "4JnkELT0cIJg" + }, + "source": [ + "# 1. Predict\n", + "\n", + "`classify/predict.py` runs YOLOv5 Classifcation inference on a variety of sources, downloading models automatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases), and saving results to `runs/predict-cls`. Example inference sources are:\n", + "\n", + "```shell\n", + "python classify/predict.py --source 0 # webcam\n", + " img.jpg # image \n", + " vid.mp4 # video\n", + " screen # screenshot\n", + " path/ # directory\n", + " 'path/*.jpg' # glob\n", + " 'https://youtu.be/Zgi9g1ksQHc' # YouTube\n", + " 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream\n", + "```" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "zR9ZbuQCH7FX", + "outputId": "60647b99-e8d4-402c-f444-331bf6746da4" + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\u001b[34m\u001b[1mclassify/predict: \u001b[0mweights=['yolov5s-cls.pt'], source=data/images, data=data/coco128.yaml, imgsz=[640, 640], device=, view_img=False, save_txt=True, nosave=False, augment=False, visualize=False, update=False, project=runs/predict-cls, name=exp, exist_ok=False, half=False, dnn=False, vid_stride=1\n", + "YOLOv5 🚀 v6.2-237-g34c3e1c Python-3.7.12 torch-1.12.1+cu102 CUDA:0 (Tesla V100-SXM2-16GB, 16161MiB)\n", + "\n", + "Fusing layers... \n", + "Model summary: 117 layers, 5447688 parameters, 0 gradients, 11.4 GFLOPs\n", + "image 1/2 /home/paguerrie/yolov5/data/images/bus.jpg: 640x640 minibus 0.01, recreational vehicle 0.01, ambulance 0.01, tram 0.01, trolleybus 0.01, 2.6ms\n", + "image 2/2 /home/paguerrie/yolov5/data/images/zidane.jpg: 640x640 suit 0.05, bow tie 0.01, ping-pong ball 0.01, microphone 0.01, bassoon 0.01, 2.8ms\n", + "Speed: 1.2ms pre-process, 2.7ms inference, 0.1ms NMS per image at shape (1, 3, 640, 640)\n", + "Results saved to \u001b[1mruns/predict-cls/exp\u001b[0m\n", + "2 labels saved to runs/predict-cls/exp/labels\n" + ] + } + ], + "source": [ + "!python classify/predict.py --weights yolov5s-cls.pt --img 640 --source data/images\n", + "# display.Image(filename='runs/predict-cls/exp/zidane.jpg', width=600)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "hkAzDWJ7cWTr" + }, + "source": [ + "        \n", + "" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "0eq1SMWl6Sfn" + }, + "source": [ + "# 2. Validate\n", + "Validate a model's accuracy on the [Imagenet](https://image-net.org/) dataset's `val` or `test` splits. Models are downloaded automatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases). To show results by class use the `--verbose` flag." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/", + "height": 49, + "referenced_widgets": [ + "9b8caa3522fc4cbab31e13b5dfc7808d", + "574140e4c4bc48c9a171541a02cd0211", + "35e03ce5090346c9ae602891470fc555", + "c942c208e72d46568b476bb0f2d75496", + "65881db1db8a4e9c930fab9172d45143", + "60b913d755b34d638478e30705a2dde1", + "0856bea36ec148b68522ff9c9eb258d8", + "76879f6f2aa54637a7a07faeea2bd684", + "0ace3934ec6f4d36a1b3a9e086390926", + "d6b7a2243e0c4beca714d99dceec23d6", + "5966ba6e6f114d8c9d8d1d6b1bd4f4c7" + ] + }, + "id": "WQPtK1QYVaD_", + "outputId": "102dabed-bc31-42fe-9133-d9ce28a2c01e" + }, + "outputs": [], + "source": [ + "# Download Imagenet val\n", + "!bash data/scripts/get_imagenet.sh --val" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "X58w8JLpMnjH", + "outputId": "daf60b1b-b098-4657-c863-584f4c9cf078" + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\u001b[34m\u001b[1mclassify/val: \u001b[0mdata=../datasets/imagenet, weights=['yolov5s-cls.pt'], batch_size=128, imgsz=320, device=, workers=8, verbose=True, project=runs/val-cls, name=exp, exist_ok=False, half=True, dnn=False\n", + "YOLOv5 🚀 v6.2-237-g34c3e1c Python-3.7.12 torch-1.12.1+cu102 CUDA:0 (Tesla V100-SXM2-16GB, 16161MiB)\n", + "\n", + "Fusing layers... \n", + "Model summary: 117 layers, 5447688 parameters, 0 gradients, 11.4 GFLOPs\n", + "validating: 100%|██████████| 391/391 [02:36<00:00, 2.49it/s] \n", + " Class Images top1_acc top5_acc\n", + " all 50000 0.734 0.914\n", + " tench 50 0.92 0.98\n", + " goldfish 50 0.86 0.98\n", + " great white shark 50 0.76 0.94\n", + " tiger shark 50 0.84 0.96\n", + " hammerhead shark 50 0.88 0.98\n", + " electric ray 50 0.76 0.88\n", + " stingray 50 0.74 0.94\n", + " cock 50 0.74 0.94\n", + " hen 50 0.86 0.96\n", + " ostrich 50 0.98 1\n", + " brambling 50 0.9 0.98\n", + " goldfinch 50 0.92 1\n", + " house finch 50 0.92 1\n", + " junco 50 0.98 1\n", + " indigo bunting 50 0.86 0.94\n", + " American robin 50 0.94 1\n", + " bulbul 50 0.88 0.92\n", + " jay 50 0.92 0.98\n", + " magpie 50 0.9 0.98\n", + " chickadee 50 0.96 1\n", + " American dipper 50 0.86 0.92\n", + " kite 50 0.8 0.94\n", + " bald eagle 50 0.9 0.98\n", + " vulture 50 0.96 1\n", + " great grey owl 50 0.96 0.98\n", + " fire salamander 50 0.96 0.98\n", + " smooth newt 50 0.66 0.98\n", + " newt 50 0.74 0.84\n", + " spotted salamander 50 0.9 0.98\n", + " axolotl 50 0.9 0.98\n", + " American bullfrog 50 0.8 0.92\n", + " tree frog 50 0.8 0.94\n", + " tailed frog 50 0.5 0.82\n", + " loggerhead sea turtle 50 0.7 0.92\n", + " leatherback sea turtle 50 0.58 0.8\n", + " mud turtle 50 0.58 0.84\n", + " terrapin 50 0.52 0.98\n", + " box turtle 50 0.88 1\n", + " banded gecko 50 0.78 0.9\n", + " green iguana 50 0.78 0.92\n", + " Carolina anole 50 0.62 0.98\n", + "desert grassland whiptail lizard 50 0.88 0.96\n", + " agama 50 0.78 0.96\n", + " frilled-necked lizard 50 0.82 0.94\n", + " alligator lizard 50 0.64 0.84\n", + " Gila monster 50 0.76 0.86\n", + " European green lizard 50 0.5 0.96\n", + " chameleon 50 0.78 0.9\n", + " Komodo dragon 50 0.9 1\n", + " Nile crocodile 50 0.66 0.92\n", + " American alligator 50 0.78 0.98\n", + " triceratops 50 0.96 0.98\n", + " worm snake 50 0.76 0.9\n", + " ring-necked snake 50 0.84 0.96\n", + " eastern hog-nosed snake 50 0.62 0.86\n", + " smooth green snake 50 0.64 0.96\n", + " kingsnake 50 0.78 0.94\n", + " garter snake 50 0.86 0.98\n", + " water snake 50 0.78 0.92\n", + " vine snake 50 0.72 0.86\n", + " night snake 50 0.34 0.86\n", + " boa constrictor 50 0.8 0.96\n", + " African rock python 50 0.52 0.82\n", + " Indian cobra 50 0.8 0.94\n", + " green mamba 50 0.56 0.92\n", + " sea snake 50 0.76 0.94\n", + " Saharan horned viper 50 0.48 0.88\n", + "eastern diamondback rattlesnake 50 0.72 0.92\n", + " sidewinder 50 0.38 0.92\n", + " trilobite 50 0.98 0.98\n", + " harvestman 50 0.86 0.94\n", + " scorpion 50 0.88 0.94\n", + " yellow garden spider 50 0.88 0.96\n", + " barn spider 50 0.38 0.96\n", + " European garden spider 50 0.6 0.98\n", + " southern black widow 50 0.84 0.98\n", + " tarantula 50 0.94 0.98\n", + " wolf spider 50 0.7 0.92\n", + " tick 50 0.76 0.82\n", + " centipede 50 0.74 0.86\n", + " black grouse 50 0.88 0.98\n", + " ptarmigan 50 0.84 0.98\n", + " ruffed grouse 50 0.9 1\n", + " prairie grouse 50 0.9 0.96\n", + " peacock 50 0.9 0.9\n", + " quail 50 0.88 0.94\n", + " partridge 50 0.66 0.94\n", + " grey parrot 50 0.94 0.98\n", + " macaw 50 0.92 0.98\n", + "sulphur-crested cockatoo 50 0.94 0.98\n", + " lorikeet 50 0.98 1\n", + " coucal 50 0.9 0.92\n", + " bee eater 50 0.96 0.98\n", + " hornbill 50 0.86 0.98\n", + " hummingbird 50 0.9 0.98\n", + " jacamar 50 0.94 0.94\n", + " toucan 50 0.84 0.94\n", + " duck 50 0.78 0.94\n", + " red-breasted merganser 50 0.94 0.98\n", + " goose 50 0.76 0.98\n", + " black swan 50 0.94 1\n", + " tusker 50 0.58 0.92\n", + " echidna 50 1 1\n", + " platypus 50 0.72 0.84\n", + " wallaby 50 0.86 0.92\n", + " koala 50 0.84 0.98\n", + " wombat 50 0.82 0.86\n", + " jellyfish 50 0.94 0.96\n", + " sea anemone 50 0.66 0.98\n", + " brain coral 50 0.9 0.96\n", + " flatworm 50 0.76 1\n", + " nematode 50 0.9 0.92\n", + " conch 50 0.74 0.92\n", + " snail 50 0.78 0.86\n", + " slug 50 0.78 0.9\n", + " sea slug 50 0.94 0.98\n", + " chiton 50 0.86 0.96\n", + " chambered nautilus 50 0.86 0.94\n", + " Dungeness crab 50 0.86 0.96\n", + " rock crab 50 0.66 0.88\n", + " fiddler crab 50 0.64 0.88\n", + " red king crab 50 0.78 0.92\n", + " American lobster 50 0.78 0.96\n", + " spiny lobster 50 0.78 0.88\n", + " crayfish 50 0.56 0.84\n", + " hermit crab 50 0.82 0.96\n", + " isopod 50 0.62 0.74\n", + " white stork 50 0.88 0.94\n", + " black stork 50 0.86 0.96\n", + " spoonbill 50 0.96 1\n", + " flamingo 50 0.94 1\n", + " little blue heron 50 0.92 0.98\n", + " great egret 50 0.9 0.98\n", + " bittern 50 0.9 0.92\n", + " crane (bird) 50 0.64 0.94\n", + " limpkin 50 0.96 0.98\n", + " common gallinule 50 0.96 0.96\n", + " American coot 50 0.94 1\n", + " bustard 50 0.96 0.98\n", + " ruddy turnstone 50 0.96 1\n", + " dunlin 50 0.86 0.94\n", + " common redshank 50 0.92 0.96\n", + " dowitcher 50 0.9 1\n", + " oystercatcher 50 0.9 0.96\n", + " pelican 50 0.96 1\n", + " king penguin 50 0.88 0.92\n", + " albatross 50 0.9 0.98\n", + " grey whale 50 0.86 0.94\n", + " killer whale 50 0.9 0.98\n", + " dugong 50 0.88 0.94\n", + " sea lion 50 0.78 0.98\n", + " Chihuahua 50 0.56 0.82\n", + " Japanese Chin 50 0.7 0.98\n", + " Maltese 50 0.86 0.94\n", + " Pekingese 50 0.84 0.94\n", + " Shih Tzu 50 0.68 0.94\n", + " King Charles Spaniel 50 0.92 0.98\n", + " Papillon 50 0.92 0.94\n", + " toy terrier 50 0.48 0.96\n", + " Rhodesian Ridgeback 50 0.76 0.94\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + " Afghan Hound 50 0.9 0.98\n", + " Basset Hound 50 0.78 0.9\n", + " Beagle 50 0.82 0.98\n", + " Bloodhound 50 0.5 0.78\n", + " Bluetick Coonhound 50 0.84 0.94\n", + " Black and Tan Coonhound 50 0.46 0.8\n", + "Treeing Walker Coonhound 50 0.58 0.98\n", + " English foxhound 50 0.24 0.8\n", + " Redbone Coonhound 50 0.66 0.92\n", + " borzoi 50 0.94 1\n", + " Irish Wolfhound 50 0.64 0.9\n", + " Italian Greyhound 50 0.8 0.98\n", + " Whippet 50 0.82 0.98\n", + " Ibizan Hound 50 0.64 0.92\n", + " Norwegian Elkhound 50 0.88 1\n", + " Otterhound 50 0.58 0.9\n", + " Saluki 50 0.72 0.92\n", + " Scottish Deerhound 50 0.86 1\n", + " Weimaraner 50 0.88 0.96\n", + "Staffordshire Bull Terrier 50 0.62 0.92\n", + "American Staffordshire Terrier 50 0.66 0.92\n", + " Bedlington Terrier 50 0.82 0.96\n", + " Border Terrier 50 0.9 0.98\n", + " Kerry Blue Terrier 50 0.82 1\n", + " Irish Terrier 50 0.74 0.94\n", + " Norfolk Terrier 50 0.74 0.92\n", + " Norwich Terrier 50 0.68 0.98\n", + " Yorkshire Terrier 50 0.66 0.88\n", + " Wire Fox Terrier 50 0.66 0.96\n", + " Lakeland Terrier 50 0.82 0.94\n", + " Sealyham Terrier 50 0.74 0.9\n", + " Airedale Terrier 50 0.82 0.9\n", + " Cairn Terrier 50 0.82 0.94\n", + " Australian Terrier 50 0.48 0.84\n", + " Dandie Dinmont Terrier 50 0.84 0.9\n", + " Boston Terrier 50 0.88 1\n", + " Miniature Schnauzer 50 0.7 0.92\n", + " Giant Schnauzer 50 0.82 1\n", + " Standard Schnauzer 50 0.72 0.98\n", + " Scottish Terrier 50 0.78 0.94\n", + " Tibetan Terrier 50 0.64 0.98\n", + "Australian Silky Terrier 50 0.72 0.96\n", + "Soft-coated Wheaten Terrier 50 0.86 0.98\n", + "West Highland White Terrier 50 0.94 0.98\n", + " Lhasa Apso 50 0.66 0.96\n", + " Flat-Coated Retriever 50 0.78 1\n", + " Curly-coated Retriever 50 0.84 0.96\n", + " Golden Retriever 50 0.88 0.96\n", + " Labrador Retriever 50 0.82 0.94\n", + "Chesapeake Bay Retriever 50 0.86 0.98\n", + "German Shorthaired Pointer 50 0.84 0.96\n", + " Vizsla 50 0.7 0.94\n", + " English Setter 50 0.8 1\n", + " Irish Setter 50 0.78 0.9\n", + " Gordon Setter 50 0.84 0.92\n", + " Brittany 50 0.86 0.98\n", + " Clumber Spaniel 50 0.9 0.96\n", + "English Springer Spaniel 50 0.96 1\n", + " Welsh Springer Spaniel 50 0.92 1\n", + " Cocker Spaniels 50 0.7 0.96\n", + " Sussex Spaniel 50 0.7 0.88\n", + " Irish Water Spaniel 50 0.86 0.94\n", + " Kuvasz 50 0.7 0.92\n", + " Schipperke 50 0.94 0.98\n", + " Groenendael 50 0.78 0.92\n", + " Malinois 50 0.92 0.98\n", + " Briard 50 0.6 0.84\n", + " Australian Kelpie 50 0.74 0.96\n", + " Komondor 50 0.9 0.96\n", + " Old English Sheepdog 50 0.94 0.98\n", + " Shetland Sheepdog 50 0.72 0.94\n", + " collie 50 0.6 0.96\n", + " Border Collie 50 0.82 0.96\n", + " Bouvier des Flandres 50 0.78 0.96\n", + " Rottweiler 50 0.94 0.98\n", + " German Shepherd Dog 50 0.76 0.98\n", + " Dobermann 50 0.74 1\n", + " Miniature Pinscher 50 0.76 0.96\n", + "Greater Swiss Mountain Dog 50 0.66 0.94\n", + " Bernese Mountain Dog 50 0.94 1\n", + " Appenzeller Sennenhund 50 0.3 1\n", + " Entlebucher Sennenhund 50 0.72 0.98\n", + " Boxer 50 0.7 0.92\n", + " Bullmastiff 50 0.8 0.98\n", + " Tibetan Mastiff 50 0.92 0.98\n", + " French Bulldog 50 0.86 0.98\n", + " Great Dane 50 0.6 0.92\n", + " St. Bernard 50 0.94 1\n", + " husky 50 0.5 0.94\n", + " Alaskan Malamute 50 0.76 0.96\n", + " Siberian Husky 50 0.56 0.98\n", + " Dalmatian 50 0.94 0.98\n", + " Affenpinscher 50 0.76 0.92\n", + " Basenji 50 0.9 1\n", + " pug 50 0.96 0.98\n", + " Leonberger 50 0.98 1\n", + " Newfoundland 50 0.82 0.96\n", + " Pyrenean Mountain Dog 50 0.76 0.94\n", + " Samoyed 50 0.9 0.98\n", + " Pomeranian 50 0.96 1\n", + " Chow Chow 50 0.88 0.96\n", + " Keeshond 50 0.94 1\n", + " Griffon Bruxellois 50 0.92 0.98\n", + " Pembroke Welsh Corgi 50 0.9 0.98\n", + " Cardigan Welsh Corgi 50 0.7 0.94\n", + " Toy Poodle 50 0.52 0.96\n", + " Miniature Poodle 50 0.56 0.92\n", + " Standard Poodle 50 0.78 0.96\n", + " Mexican hairless dog 50 0.86 0.98\n", + " grey wolf 50 0.74 0.92\n", + " Alaskan tundra wolf 50 0.86 0.98\n", + " red wolf 50 0.54 0.92\n", + " coyote 50 0.62 0.82\n", + " dingo 50 0.76 0.94\n", + " dhole 50 0.9 0.96\n", + " African wild dog 50 1 1\n", + " hyena 50 0.9 0.94\n", + " red fox 50 0.62 0.92\n", + " kit fox 50 0.7 0.98\n", + " Arctic fox 50 0.92 0.98\n", + " grey fox 50 0.66 0.96\n", + " tabby cat 50 0.58 0.92\n", + " tiger cat 50 0.2 0.94\n", + " Persian cat 50 0.92 1\n", + " Siamese cat 50 0.94 0.98\n", + " Egyptian Mau 50 0.52 0.84\n", + " cougar 50 0.94 0.96\n", + " lynx 50 0.74 0.9\n", + " leopard 50 0.86 1\n", + " snow leopard 50 0.9 0.98\n", + " jaguar 50 0.72 0.92\n", + " lion 50 0.9 0.98\n", + " tiger 50 0.96 0.98\n", + " cheetah 50 0.94 0.98\n", + " brown bear 50 0.9 0.98\n", + " American black bear 50 0.9 0.98\n", + " polar bear 50 0.86 0.94\n", + " sloth bear 50 0.72 0.92\n", + " mongoose 50 0.7 0.86\n", + " meerkat 50 0.82 0.98\n", + " tiger beetle 50 0.9 0.94\n", + " ladybug 50 0.78 0.98\n", + " ground beetle 50 0.62 0.94\n", + " longhorn beetle 50 0.58 0.9\n", + " leaf beetle 50 0.66 0.98\n", + " dung beetle 50 0.88 0.98\n", + " rhinoceros beetle 50 0.88 1\n", + " weevil 50 0.92 1\n", + " fly 50 0.78 0.94\n", + " bee 50 0.8 0.96\n", + " ant 50 0.68 0.84\n", + " grasshopper 50 0.48 0.9\n", + " cricket 50 0.66 0.94\n", + " stick insect 50 0.7 0.94\n", + " cockroach 50 0.72 0.84\n", + " mantis 50 0.72 0.9\n", + " cicada 50 0.9 0.96\n", + " leafhopper 50 0.9 0.96\n", + " lacewing 50 0.8 0.94\n", + " dragonfly 50 0.76 0.98\n", + " damselfly 50 0.82 1\n", + " red admiral 50 0.96 0.96\n", + " ringlet 50 0.88 1\n", + " monarch butterfly 50 0.9 0.96\n", + " small white 50 0.88 1\n", + " sulphur butterfly 50 0.92 1\n", + "gossamer-winged butterfly 50 0.9 1\n", + " starfish 50 0.82 0.94\n", + " sea urchin 50 0.84 0.98\n", + " sea cucumber 50 0.76 0.92\n", + " cottontail rabbit 50 0.7 0.98\n", + " hare 50 0.9 1\n", + " Angora rabbit 50 0.92 0.98\n", + " hamster 50 1 1\n", + " porcupine 50 0.9 0.98\n", + " fox squirrel 50 0.82 0.96\n", + " marmot 50 0.94 0.96\n", + " beaver 50 0.78 0.96\n", + " guinea pig 50 0.78 0.92\n", + " common sorrel 50 0.98 0.98\n", + " zebra 50 0.96 0.98\n", + " pig 50 0.54 0.82\n", + " wild boar 50 0.86 0.96\n", + " warthog 50 0.96 0.96\n", + " hippopotamus 50 0.9 1\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + " ox 50 0.52 0.94\n", + " water buffalo 50 0.86 0.94\n", + " bison 50 0.9 0.98\n", + " ram 50 0.62 0.98\n", + " bighorn sheep 50 0.72 1\n", + " Alpine ibex 50 0.96 0.98\n", + " hartebeest 50 0.94 1\n", + " impala 50 0.86 0.98\n", + " gazelle 50 0.74 0.96\n", + " dromedary 50 0.94 1\n", + " llama 50 0.86 0.94\n", + " weasel 50 0.42 0.96\n", + " mink 50 0.78 0.92\n", + " European polecat 50 0.54 0.88\n", + " black-footed ferret 50 0.74 0.96\n", + " otter 50 0.68 0.9\n", + " skunk 50 0.94 0.96\n", + " badger 50 0.88 0.92\n", + " armadillo 50 0.88 0.96\n", + " three-toed sloth 50 0.96 1\n", + " orangutan 50 0.82 0.9\n", + " gorilla 50 0.78 0.94\n", + " chimpanzee 50 0.86 0.94\n", + " gibbon 50 0.74 0.9\n", + " siamang 50 0.68 0.94\n", + " guenon 50 0.82 0.96\n", + " patas monkey 50 0.66 0.86\n", + " baboon 50 0.88 0.96\n", + " macaque 50 0.72 0.84\n", + " langur 50 0.56 0.78\n", + " black-and-white colobus 50 0.84 0.92\n", + " proboscis monkey 50 0.98 1\n", + " marmoset 50 0.7 0.92\n", + " white-headed capuchin 50 0.82 0.94\n", + " howler monkey 50 0.9 0.96\n", + " titi 50 0.54 0.9\n", + "Geoffroy's spider monkey 50 0.36 0.86\n", + " common squirrel monkey 50 0.76 0.92\n", + " ring-tailed lemur 50 0.7 0.94\n", + " indri 50 0.86 0.98\n", + " Asian elephant 50 0.54 0.96\n", + " African bush elephant 50 0.62 0.96\n", + " red panda 50 0.94 0.94\n", + " giant panda 50 0.92 0.98\n", + " snoek 50 0.76 0.9\n", + " eel 50 0.58 0.86\n", + " coho salmon 50 0.8 0.98\n", + " rock beauty 50 0.8 0.96\n", + " clownfish 50 0.8 0.98\n", + " sturgeon 50 0.76 0.96\n", + " garfish 50 0.7 0.82\n", + " lionfish 50 0.94 0.98\n", + " pufferfish 50 0.86 0.98\n", + " abacus 50 0.8 0.88\n", + " abaya 50 0.72 0.94\n", + " academic gown 50 0.44 0.94\n", + " accordion 50 0.78 0.96\n", + " acoustic guitar 50 0.54 0.78\n", + " aircraft carrier 50 0.7 0.98\n", + " airliner 50 0.92 1\n", + " airship 50 0.8 0.88\n", + " altar 50 0.6 0.94\n", + " ambulance 50 0.84 0.98\n", + " amphibious vehicle 50 0.68 0.9\n", + " analog clock 50 0.5 0.88\n", + " apiary 50 0.9 1\n", + " apron 50 0.68 0.86\n", + " waste container 50 0.6 0.86\n", + " assault rifle 50 0.36 0.9\n", + " backpack 50 0.36 0.72\n", + " bakery 50 0.38 0.64\n", + " balance beam 50 0.84 0.98\n", + " balloon 50 0.88 0.96\n", + " ballpoint pen 50 0.52 0.96\n", + " Band-Aid 50 0.68 0.96\n", + " banjo 50 0.9 1\n", + " baluster 50 0.74 0.94\n", + " barbell 50 0.58 0.9\n", + " barber chair 50 0.72 0.9\n", + " barbershop 50 0.64 0.9\n", + " barn 50 0.96 0.96\n", + " barometer 50 0.86 0.96\n", + " barrel 50 0.64 0.86\n", + " wheelbarrow 50 0.64 0.92\n", + " baseball 50 0.76 0.96\n", + " basketball 50 0.88 0.98\n", + " bassinet 50 0.8 0.94\n", + " bassoon 50 0.84 0.98\n", + " swimming cap 50 0.7 0.88\n", + " bath towel 50 0.56 0.84\n", + " bathtub 50 0.34 0.86\n", + " station wagon 50 0.68 0.9\n", + " lighthouse 50 0.74 0.96\n", + " beaker 50 0.46 0.7\n", + " military cap 50 0.88 0.98\n", + " beer bottle 50 0.72 0.9\n", + " beer glass 50 0.72 0.9\n", + " bell-cot 50 0.6 0.96\n", + " bib 50 0.58 0.86\n", + " tandem bicycle 50 0.76 0.96\n", + " bikini 50 0.52 0.88\n", + " ring binder 50 0.7 0.86\n", + " binoculars 50 0.54 0.78\n", + " birdhouse 50 0.86 0.96\n", + " boathouse 50 0.78 0.96\n", + " bobsleigh 50 0.94 0.96\n", + " bolo tie 50 0.86 0.88\n", + " poke bonnet 50 0.68 0.88\n", + " bookcase 50 0.68 0.92\n", + " bookstore 50 0.58 0.88\n", + " bottle cap 50 0.62 0.8\n", + " bow 50 0.74 0.84\n", + " bow tie 50 0.68 0.92\n", + " brass 50 0.92 0.98\n", + " bra 50 0.52 0.76\n", + " breakwater 50 0.64 0.94\n", + " breastplate 50 0.36 0.9\n", + " broom 50 0.58 0.84\n", + " bucket 50 0.58 0.88\n", + " buckle 50 0.5 0.76\n", + " bulletproof vest 50 0.52 0.76\n", + " high-speed train 50 0.94 0.98\n", + " butcher shop 50 0.76 0.94\n", + " taxicab 50 0.7 0.92\n", + " cauldron 50 0.5 0.72\n", + " candle 50 0.5 0.76\n", + " cannon 50 0.88 0.96\n", + " canoe 50 0.94 1\n", + " can opener 50 0.72 0.88\n", + " cardigan 50 0.66 0.88\n", + " car mirror 50 0.94 0.98\n", + " carousel 50 0.96 0.96\n", + " tool kit 50 0.68 0.84\n", + " carton 50 0.44 0.78\n", + " car wheel 50 0.4 0.78\n", + "automated teller machine 50 0.82 0.94\n", + " cassette 50 0.62 0.84\n", + " cassette player 50 0.3 0.92\n", + " castle 50 0.74 0.9\n", + " catamaran 50 0.74 0.98\n", + " CD player 50 0.52 0.8\n", + " cello 50 0.84 1\n", + " mobile phone 50 0.72 0.86\n", + " chain 50 0.34 0.78\n", + " chain-link fence 50 0.7 0.86\n", + " chain mail 50 0.68 0.86\n", + " chainsaw 50 0.88 0.96\n", + " chest 50 0.7 0.88\n", + " chiffonier 50 0.32 0.64\n", + " chime 50 0.64 0.84\n", + " china cabinet 50 0.78 0.94\n", + " Christmas stocking 50 0.92 0.98\n", + " church 50 0.6 0.86\n", + " movie theater 50 0.68 0.9\n", + " cleaver 50 0.36 0.68\n", + " cliff dwelling 50 0.86 1\n", + " cloak 50 0.28 0.7\n", + " clogs 50 0.6 0.88\n", + " cocktail shaker 50 0.62 0.76\n", + " coffee mug 50 0.48 0.78\n", + " coffeemaker 50 0.62 0.92\n", + " coil 50 0.64 0.86\n", + " combination lock 50 0.62 0.92\n", + " computer keyboard 50 0.72 0.92\n", + " confectionery store 50 0.56 0.84\n", + " container ship 50 0.82 0.98\n", + " convertible 50 0.78 1\n", + " corkscrew 50 0.84 0.98\n", + " cornet 50 0.56 0.98\n", + " cowboy boot 50 0.66 0.78\n", + " cowboy hat 50 0.66 0.88\n", + " cradle 50 0.34 0.8\n", + " crane (machine) 50 0.8 0.92\n", + " crash helmet 50 0.92 0.96\n", + " crate 50 0.6 0.86\n", + " infant bed 50 0.8 0.96\n", + " Crock Pot 50 0.78 0.88\n", + " croquet ball 50 0.9 1\n", + " crutch 50 0.42 0.7\n", + " cuirass 50 0.54 0.92\n", + " dam 50 0.78 0.92\n", + " desk 50 0.68 0.88\n", + " desktop computer 50 0.54 0.9\n", + " rotary dial telephone 50 0.92 0.96\n", + " diaper 50 0.68 0.84\n", + " digital clock 50 0.6 0.8\n", + " digital watch 50 0.56 0.82\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + " dining table 50 0.78 0.88\n", + " dishcloth 50 0.98 1\n", + " dishwasher 50 0.52 0.74\n", + " disc brake 50 0.96 1\n", + " dock 50 0.56 0.96\n", + " dog sled 50 0.9 0.98\n", + " dome 50 0.74 0.96\n", + " doormat 50 0.6 0.82\n", + " drilling rig 50 0.82 0.94\n", + " drum 50 0.4 0.72\n", + " drumstick 50 0.56 0.82\n", + " dumbbell 50 0.6 0.92\n", + " Dutch oven 50 0.66 0.88\n", + " electric fan 50 0.82 0.84\n", + " electric guitar 50 0.66 0.92\n", + " electric locomotive 50 0.92 0.98\n", + " entertainment center 50 0.92 1\n", + " envelope 50 0.58 0.88\n", + " espresso machine 50 0.72 0.94\n", + " face powder 50 0.76 0.92\n", + " feather boa 50 0.8 0.88\n", + " filing cabinet 50 0.84 0.98\n", + " fireboat 50 0.96 0.96\n", + " fire engine 50 0.82 0.92\n", + " fire screen sheet 50 0.52 0.78\n", + " flagpole 50 0.76 0.92\n", + " flute 50 0.4 0.76\n", + " folding chair 50 0.68 0.9\n", + " football helmet 50 0.9 0.96\n", + " forklift 50 0.8 0.94\n", + " fountain 50 0.88 0.92\n", + " fountain pen 50 0.76 0.92\n", + " four-poster bed 50 0.82 0.92\n", + " freight car 50 0.98 0.98\n", + " French horn 50 0.76 0.92\n", + " frying pan 50 0.48 0.82\n", + " fur coat 50 0.86 0.96\n", + " garbage truck 50 0.9 0.98\n", + " gas mask 50 0.82 0.92\n", + " gas pump 50 0.82 0.98\n", + " goblet 50 0.64 0.9\n", + " go-kart 50 0.9 1\n", + " golf ball 50 0.86 0.96\n", + " golf cart 50 0.76 0.9\n", + " gondola 50 0.94 0.98\n", + " gong 50 0.74 0.92\n", + " gown 50 0.72 0.94\n", + " grand piano 50 0.74 0.96\n", + " greenhouse 50 0.84 1\n", + " grille 50 0.72 0.88\n", + " grocery store 50 0.68 0.9\n", + " guillotine 50 0.84 0.94\n", + " barrette 50 0.48 0.68\n", + " hair spray 50 0.4 0.76\n", + " half-track 50 0.76 0.96\n", + " hammer 50 0.54 0.78\n", + " hamper 50 0.72 0.9\n", + " hair dryer 50 0.7 0.8\n", + " hand-held computer 50 0.52 0.88\n", + " handkerchief 50 0.8 0.96\n", + " hard disk drive 50 0.78 0.86\n", + " harmonica 50 0.68 0.96\n", + " harp 50 0.9 0.96\n", + " harvester 50 0.86 1\n", + " hatchet 50 0.6 0.84\n", + " holster 50 0.7 0.84\n", + " home theater 50 0.72 0.96\n", + " honeycomb 50 0.74 0.86\n", + " hook 50 0.28 0.62\n", + " hoop skirt 50 0.68 0.8\n", + " horizontal bar 50 0.76 0.98\n", + " horse-drawn vehicle 50 0.9 0.9\n", + " hourglass 50 0.92 0.98\n", + " iPod 50 0.9 0.94\n", + " clothes iron 50 0.72 0.9\n", + " jack-o'-lantern 50 0.94 0.98\n", + " jeans 50 0.7 0.82\n", + " jeep 50 0.76 0.9\n", + " T-shirt 50 0.72 0.94\n", + " jigsaw puzzle 50 0.92 0.96\n", + " pulled rickshaw 50 0.88 0.96\n", + " joystick 50 0.74 0.98\n", + " kimono 50 0.78 0.94\n", + " knee pad 50 0.7 0.86\n", + " knot 50 0.8 0.86\n", + " lab coat 50 0.82 0.98\n", + " ladle 50 0.26 0.64\n", + " lampshade 50 0.62 0.8\n", + " laptop computer 50 0.2 0.88\n", + " lawn mower 50 0.8 0.96\n", + " lens cap 50 0.5 0.8\n", + " paper knife 50 0.3 0.58\n", + " library 50 0.62 0.92\n", + " lifeboat 50 0.94 0.98\n", + " lighter 50 0.56 0.8\n", + " limousine 50 0.74 0.92\n", + " ocean liner 50 0.88 0.96\n", + " lipstick 50 0.7 0.88\n", + " slip-on shoe 50 0.82 0.94\n", + " lotion 50 0.56 0.9\n", + " speaker 50 0.58 0.64\n", + " loupe 50 0.32 0.54\n", + " sawmill 50 0.74 0.9\n", + " magnetic compass 50 0.48 0.78\n", + " mail bag 50 0.64 0.94\n", + " mailbox 50 0.82 0.92\n", + " tights 50 0.28 0.9\n", + " tank suit 50 0.3 0.88\n", + " manhole cover 50 0.94 0.98\n", + " maraca 50 0.72 0.86\n", + " marimba 50 0.84 0.94\n", + " mask 50 0.48 0.78\n", + " match 50 0.74 0.92\n", + " maypole 50 0.96 1\n", + " maze 50 0.82 1\n", + " measuring cup 50 0.66 0.82\n", + " medicine chest 50 0.6 0.9\n", + " megalith 50 0.84 0.92\n", + " microphone 50 0.56 0.74\n", + " microwave oven 50 0.56 0.8\n", + " military uniform 50 0.62 0.86\n", + " milk can 50 0.7 0.82\n", + " minibus 50 0.68 1\n", + " miniskirt 50 0.58 0.84\n", + " minivan 50 0.48 0.8\n", + " missile 50 0.34 0.82\n", + " mitten 50 0.76 0.88\n", + " mixing bowl 50 0.82 0.98\n", + " mobile home 50 0.58 0.8\n", + " Model T 50 0.92 0.96\n", + " modem 50 0.7 0.9\n", + " monastery 50 0.52 0.86\n", + " monitor 50 0.34 0.86\n", + " moped 50 0.56 0.94\n", + " mortar 50 0.72 0.88\n", + " square academic cap 50 0.48 0.82\n", + " mosque 50 0.98 1\n", + " mosquito net 50 0.96 0.98\n", + " scooter 50 0.88 0.98\n", + " mountain bike 50 0.74 0.96\n", + " tent 50 0.88 0.96\n", + " computer mouse 50 0.38 0.82\n", + " mousetrap 50 0.82 0.9\n", + " moving van 50 0.48 0.8\n", + " muzzle 50 0.5 0.74\n", + " nail 50 0.68 0.76\n", + " neck brace 50 0.62 0.72\n", + " necklace 50 0.92 1\n", + " nipple 50 0.8 0.92\n", + " notebook computer 50 0.34 0.88\n", + " obelisk 50 0.82 0.94\n", + " oboe 50 0.62 0.84\n", + " ocarina 50 0.82 0.88\n", + " odometer 50 0.98 1\n", + " oil filter 50 0.6 0.82\n", + " organ 50 0.84 0.94\n", + " oscilloscope 50 0.94 0.96\n", + " overskirt 50 0.2 0.62\n", + " bullock cart 50 0.76 0.94\n", + " oxygen mask 50 0.48 0.8\n", + " packet 50 0.54 0.74\n", + " paddle 50 0.7 0.94\n", + " paddle wheel 50 0.92 0.98\n", + " padlock 50 0.64 0.78\n", + " paintbrush 50 0.66 0.78\n", + " pajamas 50 0.68 0.94\n", + " palace 50 0.66 0.94\n", + " pan flute 50 0.84 0.86\n", + " paper towel 50 0.68 0.86\n", + " parachute 50 0.92 0.96\n", + " parallel bars 50 0.68 0.96\n", + " park bench 50 0.82 0.94\n", + " parking meter 50 0.86 0.98\n", + " passenger car 50 0.48 0.86\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + " patio 50 0.6 0.84\n", + " payphone 50 0.78 0.94\n", + " pedestal 50 0.66 0.86\n", + " pencil case 50 0.74 0.98\n", + " pencil sharpener 50 0.6 0.76\n", + " perfume 50 0.66 0.96\n", + " Petri dish 50 0.64 0.82\n", + " photocopier 50 0.94 1\n", + " plectrum 50 0.72 0.92\n", + " Pickelhaube 50 0.78 0.88\n", + " picket fence 50 0.86 0.94\n", + " pickup truck 50 0.72 0.94\n", + " pier 50 0.54 0.92\n", + " piggy bank 50 0.8 0.94\n", + " pill bottle 50 0.72 0.9\n", + " pillow 50 0.76 0.88\n", + " ping-pong ball 50 0.78 0.88\n", + " pinwheel 50 0.8 0.94\n", + " pirate ship 50 0.76 0.92\n", + " pitcher 50 0.48 0.86\n", + " hand plane 50 0.9 0.92\n", + " planetarium 50 0.9 0.98\n", + " plastic bag 50 0.42 0.66\n", + " plate rack 50 0.52 0.82\n", + " plow 50 0.8 0.94\n", + " plunger 50 0.42 0.72\n", + " Polaroid camera 50 0.84 0.94\n", + " pole 50 0.4 0.76\n", + " police van 50 0.84 0.94\n", + " poncho 50 0.64 0.88\n", + " billiard table 50 0.84 0.92\n", + " soda bottle 50 0.58 0.9\n", + " pot 50 0.86 0.94\n", + " potter's wheel 50 0.92 0.94\n", + " power drill 50 0.38 0.7\n", + " prayer rug 50 0.7 0.88\n", + " printer 50 0.52 0.86\n", + " prison 50 0.66 0.9\n", + " projectile 50 0.34 0.96\n", + " projector 50 0.6 0.82\n", + " hockey puck 50 0.9 0.98\n", + " punching bag 50 0.62 0.72\n", + " purse 50 0.48 0.88\n", + " quill 50 0.78 0.86\n", + " quilt 50 0.6 0.9\n", + " race car 50 0.72 0.92\n", + " racket 50 0.78 0.94\n", + " radiator 50 0.7 0.84\n", + " radio 50 0.68 0.9\n", + " radio telescope 50 0.88 0.94\n", + " rain barrel 50 0.8 0.96\n", + " recreational vehicle 50 0.84 0.96\n", + " reel 50 0.72 0.8\n", + " reflex camera 50 0.76 0.96\n", + " refrigerator 50 0.76 0.92\n", + " remote control 50 0.72 0.94\n", + " restaurant 50 0.52 0.62\n", + " revolver 50 0.8 0.98\n", + " rifle 50 0.46 0.76\n", + " rocking chair 50 0.72 0.9\n", + " rotisserie 50 0.88 0.96\n", + " eraser 50 0.62 0.76\n", + " rugby ball 50 0.84 0.94\n", + " ruler 50 0.72 0.86\n", + " running shoe 50 0.84 0.94\n", + " safe 50 0.9 0.94\n", + " safety pin 50 0.48 0.8\n", + " salt shaker 50 0.62 0.8\n", + " sandal 50 0.7 0.82\n", + " sarong 50 0.62 0.8\n", + " saxophone 50 0.66 0.9\n", + " scabbard 50 0.78 0.92\n", + " weighing scale 50 0.62 0.84\n", + " school bus 50 0.92 1\n", + " schooner 50 0.8 1\n", + " scoreboard 50 0.86 0.98\n", + " CRT screen 50 0.16 0.8\n", + " screw 50 0.96 0.98\n", + " screwdriver 50 0.4 0.58\n", + " seat belt 50 0.9 0.92\n", + " sewing machine 50 0.74 0.94\n", + " shield 50 0.64 0.78\n", + " shoe store 50 0.84 0.98\n", + " shoji 50 0.76 0.92\n", + " shopping basket 50 0.52 0.84\n", + " shopping cart 50 0.76 0.9\n", + " shovel 50 0.7 0.84\n", + " shower cap 50 0.74 0.88\n", + " shower curtain 50 0.72 0.9\n", + " ski 50 0.68 0.94\n", + " ski mask 50 0.66 0.9\n", + " sleeping bag 50 0.66 0.8\n", + " slide rule 50 0.7 0.86\n", + " sliding door 50 0.54 0.76\n", + " slot machine 50 0.92 0.96\n", + " snorkel 50 0.86 1\n", + " snowmobile 50 0.86 0.96\n", + " snowplow 50 0.9 1\n", + " soap dispenser 50 0.52 0.9\n", + " soccer ball 50 0.84 0.98\n", + " sock 50 0.66 0.78\n", + " solar thermal collector 50 0.72 0.9\n", + " sombrero 50 0.7 0.84\n", + " soup bowl 50 0.6 0.94\n", + " space bar 50 0.32 0.84\n", + " space heater 50 0.64 0.74\n", + " space shuttle 50 0.86 0.98\n", + " spatula 50 0.28 0.6\n", + " motorboat 50 0.94 1\n", + " spider web 50 0.76 0.96\n", + " spindle 50 0.92 1\n", + " sports car 50 0.5 0.96\n", + " spotlight 50 0.34 0.66\n", + " stage 50 0.76 0.92\n", + " steam locomotive 50 0.96 1\n", + " through arch bridge 50 0.82 0.96\n", + " steel drum 50 0.8 0.94\n", + " stethoscope 50 0.52 0.84\n", + " scarf 50 0.54 0.92\n", + " stone wall 50 0.8 0.92\n", + " stopwatch 50 0.54 0.9\n", + " stove 50 0.46 0.78\n", + " strainer 50 0.58 0.84\n", + " tram 50 0.9 0.96\n", + " stretcher 50 0.46 0.74\n", + " couch 50 0.72 0.94\n", + " stupa 50 0.84 0.9\n", + " submarine 50 0.78 0.9\n", + " suit 50 0.62 0.88\n", + " sundial 50 0.46 0.78\n", + " sunglass 50 0.18 0.6\n", + " sunglasses 50 0.32 0.64\n", + " sunscreen 50 0.32 0.7\n", + " suspension bridge 50 0.64 0.94\n", + " mop 50 0.8 0.96\n", + " sweatshirt 50 0.26 0.68\n", + " swimsuit 50 0.6 0.84\n", + " swing 50 0.78 0.88\n", + " switch 50 0.62 0.8\n", + " syringe 50 0.68 0.8\n", + " table lamp 50 0.54 0.88\n", + " tank 50 0.78 0.94\n", + " tape player 50 0.38 0.88\n", + " teapot 50 0.82 1\n", + " teddy bear 50 0.82 0.92\n", + " television 50 0.6 0.9\n", + " tennis ball 50 0.7 0.94\n", + " thatched roof 50 0.86 0.94\n", + " front curtain 50 0.76 0.94\n", + " thimble 50 0.68 0.82\n", + " threshing machine 50 0.64 0.9\n", + " throne 50 0.68 0.82\n", + " tile roof 50 0.84 0.96\n", + " toaster 50 0.64 0.82\n", + " tobacco shop 50 0.44 0.74\n", + " toilet seat 50 0.64 0.88\n", + " torch 50 0.62 0.86\n", + " totem pole 50 0.9 1\n", + " tow truck 50 0.64 0.92\n", + " toy store 50 0.64 0.9\n", + " tractor 50 0.86 0.98\n", + " semi-trailer truck 50 0.76 0.96\n", + " tray 50 0.54 0.76\n", + " trench coat 50 0.6 0.78\n", + " tricycle 50 0.78 0.96\n", + " trimaran 50 0.78 0.98\n", + " tripod 50 0.66 0.86\n", + " triumphal arch 50 0.92 0.98\n", + " trolleybus 50 0.98 1\n", + " trombone 50 0.66 0.94\n", + " tub 50 0.3 0.86\n", + " turnstile 50 0.8 0.9\n", + " typewriter keyboard 50 0.74 0.98\n", + " umbrella 50 0.6 0.78\n", + " unicycle 50 0.78 0.96\n", + " upright piano 50 0.84 0.94\n", + " vacuum cleaner 50 0.84 0.92\n", + " vase 50 0.56 0.74\n", + " vault 50 0.78 0.9\n", + " velvet 50 0.22 0.5\n", + " vending machine 50 0.94 1\n", + " vestment 50 0.62 0.86\n", + " viaduct 50 0.78 0.88\n", + " violin 50 0.64 0.88\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + " volleyball 50 0.96 1\n", + " waffle iron 50 0.72 0.84\n", + " wall clock 50 0.58 0.86\n", + " wallet 50 0.58 0.94\n", + " wardrobe 50 0.7 0.9\n", + " military aircraft 50 0.9 0.98\n", + " sink 50 0.74 0.94\n", + " washing machine 50 0.82 0.94\n", + " water bottle 50 0.54 0.68\n", + " water jug 50 0.3 0.78\n", + " water tower 50 0.94 0.96\n", + " whiskey jug 50 0.64 0.76\n", + " whistle 50 0.7 0.82\n", + " wig 50 0.86 0.88\n", + " window screen 50 0.7 0.82\n", + " window shade 50 0.54 0.9\n", + " Windsor tie 50 0.32 0.64\n", + " wine bottle 50 0.46 0.76\n", + " wing 50 0.52 0.96\n", + " wok 50 0.54 0.92\n", + " wooden spoon 50 0.62 0.86\n", + " wool 50 0.42 0.84\n", + " split-rail fence 50 0.7 0.92\n", + " shipwreck 50 0.86 0.98\n", + " yawl 50 0.76 0.92\n", + " yurt 50 0.86 0.96\n", + " website 50 0.98 1\n", + " comic book 50 0.72 0.88\n", + " crossword 50 0.8 0.88\n", + " traffic sign 50 0.72 0.9\n", + " traffic light 50 0.8 0.96\n", + " dust jacket 50 0.78 0.94\n", + " menu 50 0.8 0.96\n", + " plate 50 0.44 0.86\n", + " guacamole 50 0.76 0.96\n", + " consomme 50 0.52 0.92\n", + " hot pot 50 0.78 1\n", + " trifle 50 0.9 1\n", + " ice cream 50 0.68 0.94\n", + " ice pop 50 0.68 0.8\n", + " baguette 50 0.62 0.88\n", + " bagel 50 0.64 0.86\n", + " pretzel 50 0.68 0.9\n", + " cheeseburger 50 0.92 0.96\n", + " hot dog 50 0.74 0.96\n", + " mashed potato 50 0.72 0.88\n", + " cabbage 50 0.88 0.98\n", + " broccoli 50 0.88 0.96\n", + " cauliflower 50 0.84 0.98\n", + " zucchini 50 0.68 0.98\n", + " spaghetti squash 50 0.82 0.96\n", + " acorn squash 50 0.8 1\n", + " butternut squash 50 0.72 0.94\n", + " cucumber 50 0.66 0.94\n", + " artichoke 50 0.86 0.96\n", + " bell pepper 50 0.86 0.94\n", + " cardoon 50 0.92 0.94\n", + " mushroom 50 0.38 0.96\n", + " Granny Smith 50 0.9 0.98\n", + " strawberry 50 0.64 0.88\n", + " orange 50 0.74 0.94\n", + " lemon 50 0.78 0.98\n", + " fig 50 0.84 0.94\n", + " pineapple 50 0.9 1\n", + " banana 50 0.88 0.98\n", + " jackfruit 50 0.96 0.98\n", + " custard apple 50 0.86 0.96\n", + " pomegranate 50 0.8 0.96\n", + " hay 50 0.84 0.96\n", + " carbonara 50 0.88 0.96\n", + " chocolate syrup 50 0.58 0.94\n", + " dough 50 0.36 0.68\n", + " meatloaf 50 0.64 0.88\n", + " pizza 50 0.78 0.9\n", + " pot pie 50 0.66 0.92\n", + " burrito 50 0.88 0.98\n", + " red wine 50 0.66 0.84\n", + " espresso 50 0.66 0.9\n", + " cup 50 0.42 0.78\n", + " eggnog 50 0.36 0.64\n", + " alp 50 0.54 0.94\n", + " bubble 50 0.86 0.96\n", + " cliff 50 0.66 1\n", + " coral reef 50 0.74 0.94\n", + " geyser 50 0.92 1\n", + " lakeshore 50 0.52 0.86\n", + " promontory 50 0.58 0.92\n", + " shoal 50 0.66 0.98\n", + " seashore 50 0.44 0.86\n", + " valley 50 0.72 0.98\n", + " volcano 50 0.72 0.94\n", + " baseball player 50 0.74 0.96\n", + " bridegroom 50 0.78 0.92\n", + " scuba diver 50 0.82 1\n", + " rapeseed 50 0.98 0.98\n", + " daisy 50 0.96 0.98\n", + " yellow lady's slipper 50 1 1\n", + " corn 50 0.42 0.86\n", + " acorn 50 0.96 0.98\n", + " rose hip 50 0.9 0.96\n", + " horse chestnut seed 50 1 1\n", + " coral fungus 50 0.98 0.98\n", + " agaric 50 0.84 0.94\n", + " gyromitra 50 0.98 0.98\n", + " stinkhorn mushroom 50 0.84 0.92\n", + " earth star 50 1 1\n", + " hen-of-the-woods 50 0.9 0.96\n", + " bolete 50 0.8 0.94\n", + " ear 50 0.54 0.94\n", + " toilet paper 50 0.44 0.68\n", + "Speed: 0.1ms pre-process, 0.2ms inference, 0.0ms post-process per image at shape (1, 3, 320, 320)\n", + "Results saved to \u001b[1mruns/val-cls/exp\u001b[0m\n" + ] + } + ], + "source": [ + "# Validate YOLOv5s on Imagenet val\n", + "!python classify/val.py --weights yolov5s-cls.pt --data ../datasets/imagenet --img 320 --half" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "ZY2VXXXu74w5" + }, + "source": [ + "# 3. Train\n", + "\n", + "

\n", + "Close the active learning loop by sampling images from your inference conditions with the `roboflow` pip package\n", + "

\n", + "\n", + "Train a YOLOv5s Classification model on the [Imagenette](https://image-net.org/) dataset with `--data imagenet`, starting from pretrained `--pretrained yolov5s-cls.pt`.\n", + "\n", + "- **Pretrained [Models](https://github.com/ultralytics/yolov5/tree/master/models)** are downloaded\n", + "automatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases)\n", + "- **Training Results** are saved to `runs/train-cls/` with incrementing run directories, i.e. `runs/train-cls/exp2`, `runs/train-cls/exp3` etc.\n", + "

\n", + "\n", + "A **Mosaic Dataloader** is used for training which combines 4 images into 1 mosaic.\n", + "\n", + "## Train on Custom Data with Roboflow 🌟 NEW\n", + "\n", + "[Roboflow](https://roboflow.com/?ref=ultralytics) enables you to easily **organize, label, and prepare** a high quality dataset with your own custom data. Roboflow also makes it easy to establish an active learning pipeline, collaborate with your team on dataset improvement, and integrate directly into your model building workflow with the `roboflow` pip package.\n", + "\n", + "- Custom Training Example: [https://blog.roboflow.com/train-yolov5-classification-custom-data/](https://blog.roboflow.com/train-yolov5-classification-custom-data/?ref=ultralytics)\n", + "- Custom Training Notebook: [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1KZiKUAjtARHAfZCXbJRv14-pOnIsBLPV?usp=sharing)\n", + "
\n", + "\n", + "

Label images lightning fast (including with model-assisted labeling)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "i3oKtE4g-aNn" + }, + "outputs": [], + "source": [ + "#@title Select YOLOv5 🚀 logger {run: 'auto'}\n", + "logger = 'TensorBoard' #@param ['TensorBoard', 'Comet', 'ClearML']\n", + "\n", + "if logger == 'TensorBoard':\n", + " %load_ext tensorboard\n", + " %tensorboard --logdir runs/train\n", + "elif logger == 'Comet':\n", + " %pip install -q comet_ml\n", + " import comet_ml; comet_ml.init()\n", + "elif logger == 'ClearML':\n", + " %pip install -q clearml && clearml-init" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "1NcFxRcFdJ_O", + "outputId": "baa6d4be-3379-4aab-844a-d5a5396c0e49" + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\u001b[34m\u001b[1mclassify/train: \u001b[0mmodel=yolov5s-cls.pt, data=imagenette160, epochs=3, batch_size=16, imgsz=160, nosave=False, cache=ram, device=, workers=8, project=runs/train-cls, name=exp, exist_ok=False, pretrained=True, optimizer=Adam, lr0=0.001, decay=5e-05, label_smoothing=0.1, cutoff=None, dropout=None, verbose=False, seed=0, local_rank=-1\n", + "\u001b[34m\u001b[1mgithub: \u001b[0m⚠️ YOLOv5 is out of date by 7 commits. Use `git pull ultralytics master` or `git clone https://github.com/ultralytics/yolov5` to update.\n", + "YOLOv5 🚀 v6.2-237-g34c3e1c Python-3.7.12 torch-1.12.1+cu102 CUDA:0 (Tesla V100-SXM2-16GB, 16161MiB)\n", + "\n", + "\u001b[34m\u001b[1mTensorBoard: \u001b[0mStart with 'tensorboard --logdir runs/train-cls', view at http://localhost:6006/\n", + "\u001b[34m\u001b[1malbumentations: \u001b[0m⚠️ not found, install with `pip install albumentations` (recommended)\n", + "Model summary: 149 layers, 4185290 parameters, 4185290 gradients, 10.5 GFLOPs\n", + "\u001b[34m\u001b[1moptimizer:\u001b[0m Adam(lr=0.001) with parameter groups 32 weight(decay=0.0), 33 weight(decay=5e-05), 33 bias\n", + "Image sizes 160 train, 160 test\n", + "Using 3 dataloader workers\n", + "Logging results to \u001b[1mruns/train-cls/exp\u001b[0m\n", + "Starting yolov5s-cls.pt training on imagenette160 dataset with 10 classes for 3 epochs...\n", + "\n", + " Epoch GPU_mem train_loss val_loss top1_acc top5_acc\n", + " 1/3 0.369G 1.05 0.935 0.837 0.985: 100%|█████\n", + " 2/3 0.369G 0.767 0.873 0.859 0.982: 100%|█████\n", + " 3/3 0.369G 0.626 0.713 0.927 0.992: 100%|█████\n", + "\n", + "Training complete (0.025 hours)\n", + "Results saved to \u001b[1mruns/train-cls/exp\u001b[0m\n", + "Predict: python classify/predict.py --weights runs/train-cls/exp/weights/best.pt --source im.jpg\n", + "Validate: python classify/val.py --weights runs/train-cls/exp/weights/best.pt --data /home/paguerrie/datasets/imagenette160\n", + "Export: python export.py --weights runs/train-cls/exp/weights/best.pt --include onnx\n", + "PyTorch Hub: model = torch.hub.load('ultralytics/yolov5', 'custom', 'runs/train-cls/exp/weights/best.pt')\n", + "Visualize: https://netron.app\n", + "\n" + ] + } + ], + "source": [ + "# Train YOLOv5s Classification on Imagenette160 for 3 epochs\n", + "!python classify/train.py --img 160 --batch 16 --epochs 3 --data imagenette160 --model yolov5s-cls.pt --cache" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "15glLzbQx5u0" + }, + "source": [ + "# 4. Visualize" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "nWOsI5wJR1o3" + }, + "source": [ + "## Comet Logging and Visualization 🌟 NEW\n", + "[Comet](https://bit.ly/yolov5-readme-comet) is now fully integrated with YOLOv5. Track and visualize model metrics in real time, save your hyperparameters, datasets, and model checkpoints, and visualize your model predictions with [Comet Custom Panels](https://bit.ly/yolov5-colab-comet-panels)! Comet makes sure you never lose track of your work and makes it easy to share results and collaborate across teams of all sizes! \n", + "\n", + "Getting started is easy:\n", + "```shell\n", + "pip install comet_ml # 1. install\n", + "export COMET_API_KEY= # 2. paste API key\n", + "python train.py --img 640 --epochs 3 --data coco128.yaml --weights yolov5s.pt # 3. train\n", + "```\n", + "\n", + "To learn more about all of the supported Comet features for this integration, check out the [Comet Tutorial](https://github.com/ultralytics/yolov5/tree/master/utils/loggers/comet). If you'd like to learn more about Comet, head over to our [documentation](https://bit.ly/yolov5-colab-comet-docs). Get started by trying out the Comet Colab Notebook:\n", + "[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1RG0WOQyxlDlo5Km8GogJpIEJlg_5lyYO?usp=sharing)\n", + "\n", + "\"yolo-ui\"" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "Lay2WsTjNJzP" + }, + "source": [ + "## ClearML Logging and Automation 🌟 NEW\n", + "\n", + "[ClearML](https://cutt.ly/yolov5-notebook-clearml) is completely integrated into YOLOv5 to track your experimentation, manage dataset versions and even remotely execute training runs. To enable ClearML (check cells above):\n", + "\n", + "- `pip install clearml`\n", + "- run `clearml-init` to connect to a ClearML server (**deploy your own [open-source server](https://github.com/allegroai/clearml-server)**, or use our [free hosted server](https://cutt.ly/yolov5-notebook-clearml))\n", + "\n", + "You'll get all the great expected features from an experiment manager: live updates, model upload, experiment comparison etc. but ClearML also tracks uncommitted changes and installed packages for example. Thanks to that ClearML Tasks (which is what we call experiments) are also reproducible on different machines! With only 1 extra line, we can schedule a YOLOv5 training task on a queue to be executed by any number of ClearML Agents (workers).\n", + "\n", + "You can use ClearML Data to version your dataset and then pass it to YOLOv5 simply using its unique ID. This will help you keep track of your data without adding extra hassle. Explore the [ClearML Tutorial](https://github.com/ultralytics/yolov5/tree/master/utils/loggers/clearml) for details!\n", + "\n", + "\n", + "\"ClearML" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "-WPvRbS5Swl6" + }, + "source": [ + "## Local Logging\n", + "\n", + "Training results are automatically logged with [Tensorboard](https://www.tensorflow.org/tensorboard) and [CSV](https://github.com/ultralytics/yolov5/pull/4148) loggers to `runs/train`, with a new experiment directory created for each new training as `runs/train/exp2`, `runs/train/exp3`, etc.\n", + "\n", + "This directory contains train and val statistics, mosaics, labels, predictions and augmentated mosaics, as well as metrics and charts including precision-recall (PR) curves and confusion matrices. \n", + "\n", + "\"Local\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "Zelyeqbyt3GD" + }, + "source": [ + "# Environments\n", + "\n", + "YOLOv5 may be run in any of the following up-to-date verified environments (with all dependencies including [CUDA](https://developer.nvidia.com/cuda)/[CUDNN](https://developer.nvidia.com/cudnn), [Python](https://www.python.org/) and [PyTorch](https://pytorch.org/) preinstalled):\n", + "\n", + "- **Notebooks** with free GPU: \"Run \"Open \"Open\n", + "- **Google Cloud** Deep Learning VM. See [GCP Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/GCP-Quickstart)\n", + "- **Amazon** Deep Learning AMI. See [AWS Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/AWS-Quickstart)\n", + "- **Docker Image**. See [Docker Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/Docker-Quickstart) \"Docker\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "6Qu7Iesl0p54" + }, + "source": [ + "# Status\n", + "\n", + "![YOLOv5 CI](https://github.com/ultralytics/yolov5/actions/workflows/ci-testing.yml/badge.svg)\n", + "\n", + "If this badge is green, all [YOLOv5 GitHub Actions](https://github.com/ultralytics/yolov5/actions) Continuous Integration (CI) tests are currently passing. CI tests verify correct operation of YOLOv5 training ([train.py](https://github.com/ultralytics/yolov5/blob/master/train.py)), testing ([val.py](https://github.com/ultralytics/yolov5/blob/master/val.py)), inference ([detect.py](https://github.com/ultralytics/yolov5/blob/master/detect.py)) and export ([export.py](https://github.com/ultralytics/yolov5/blob/master/export.py)) on macOS, Windows, and Ubuntu every 24 hours and on every commit.\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "IEijrePND_2I" + }, + "source": [ + "# Appendix\n", + "\n", + "Additional content below." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "GMusP4OAxFu6" + }, + "outputs": [], + "source": [ + "# YOLOv5 PyTorch HUB Inference (DetectionModels only)\n", + "import torch\n", + "\n", + "model = torch.hub.load('ultralytics/yolov5', 'yolov5s') # yolov5n - yolov5x6 or custom\n", + "im = 'https://ultralytics.com/images/zidane.jpg' # file, Path, PIL.Image, OpenCV, nparray, list\n", + "results = model(im) # inference\n", + "results.print() # or .show(), .save(), .crop(), .pandas(), etc." + ] + } + ], + "metadata": { + "accelerator": "GPU", + "colab": { + "collapsed_sections": [], + "machine_shape": "hm", + "name": "YOLOv5 Tutorial", + "provenance": [], + "toc_visible": true + }, + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.7.12" + }, + "widgets": { + "application/vnd.jupyter.widget-state+json": { + "0856bea36ec148b68522ff9c9eb258d8": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "DescriptionStyleModel", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "DescriptionStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "description_width": "" + } + }, + "0ace3934ec6f4d36a1b3a9e086390926": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "ProgressStyleModel", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "ProgressStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "bar_color": null, + "description_width": "" + } + }, + "35e03ce5090346c9ae602891470fc555": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "FloatProgressModel", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "FloatProgressModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "ProgressView", + "bar_style": "success", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_76879f6f2aa54637a7a07faeea2bd684", + "max": 818322941, + "min": 0, + "orientation": "horizontal", + "style": "IPY_MODEL_0ace3934ec6f4d36a1b3a9e086390926", + "value": 818322941 + } + }, + "574140e4c4bc48c9a171541a02cd0211": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "HTMLModel", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HTMLModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HTMLView", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_60b913d755b34d638478e30705a2dde1", + "placeholder": "​", + "style": "IPY_MODEL_0856bea36ec148b68522ff9c9eb258d8", + "value": "100%" + } + }, + "5966ba6e6f114d8c9d8d1d6b1bd4f4c7": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "DescriptionStyleModel", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "DescriptionStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "description_width": "" + } + }, + "60b913d755b34d638478e30705a2dde1": { + "model_module": "@jupyter-widgets/base", + "model_module_version": "1.2.0", + "model_name": "LayoutModel", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "65881db1db8a4e9c930fab9172d45143": { + "model_module": "@jupyter-widgets/base", + "model_module_version": "1.2.0", + "model_name": "LayoutModel", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "76879f6f2aa54637a7a07faeea2bd684": { + "model_module": "@jupyter-widgets/base", + "model_module_version": "1.2.0", + "model_name": "LayoutModel", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "9b8caa3522fc4cbab31e13b5dfc7808d": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "HBoxModel", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HBoxModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HBoxView", + "box_style": "", + "children": [ + "IPY_MODEL_574140e4c4bc48c9a171541a02cd0211", + "IPY_MODEL_35e03ce5090346c9ae602891470fc555", + "IPY_MODEL_c942c208e72d46568b476bb0f2d75496" + ], + "layout": "IPY_MODEL_65881db1db8a4e9c930fab9172d45143" + } + }, + "c942c208e72d46568b476bb0f2d75496": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "HTMLModel", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "HTMLModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "HTMLView", + "description": "", + "description_tooltip": null, + "layout": "IPY_MODEL_d6b7a2243e0c4beca714d99dceec23d6", + "placeholder": "​", + "style": "IPY_MODEL_5966ba6e6f114d8c9d8d1d6b1bd4f4c7", + "value": " 780M/780M [02:19<00:00, 6.24MB/s]" + } + }, + "d6b7a2243e0c4beca714d99dceec23d6": { + "model_module": "@jupyter-widgets/base", + "model_module_version": "1.2.0", + "model_name": "LayoutModel", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + } + } + } + }, + "nbformat": 4, + "nbformat_minor": 1 +} From 005161514f0db7203195dae99caa94a617ac09f9 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 18 Nov 2022 22:10:35 +0100 Subject: [PATCH 1566/1976] Remove Colab notebook High-Memory notices (#10212) * Remove Colab notebook High-Memory notices Signed-off-by: Glenn Jocher * Update tutorial.ipynb Signed-off-by: Glenn Jocher * Update tutorial.ipynb Signed-off-by: Glenn Jocher * Update tutorial.ipynb Signed-off-by: Glenn Jocher * Update tutorial.ipynb Signed-off-by: Glenn Jocher * Update tutorial.ipynb Signed-off-by: Glenn Jocher * Update tutorial.ipynb Signed-off-by: Glenn Jocher * Update tutorial.ipynb Signed-off-by: Glenn Jocher * Update tutorial.ipynb Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- classify/tutorial.ipynb | 5 ++--- segment/tutorial.ipynb | 7 +++---- tutorial.ipynb | 3 +-- 3 files changed, 6 insertions(+), 9 deletions(-) diff --git a/classify/tutorial.ipynb b/classify/tutorial.ipynb index 8ed8b5db8a35..f235b754d7b4 100644 --- a/classify/tutorial.ipynb +++ b/classify/tutorial.ipynb @@ -14,7 +14,7 @@ "\n", "
\n", " \"Run\n", - " \"Open\n", + " \"Open\n", " \"Open\n", "
\n", "\n", @@ -1469,8 +1469,7 @@ "accelerator": "GPU", "colab": { "collapsed_sections": [], - "machine_shape": "hm", - "name": "YOLOv5 Tutorial", + "name": "YOLOv5 Classification Tutorial", "provenance": [], "toc_visible": true }, diff --git a/segment/tutorial.ipynb b/segment/tutorial.ipynb index c26878fb0dbf..f3f978d43d93 100644 --- a/segment/tutorial.ipynb +++ b/segment/tutorial.ipynb @@ -14,7 +14,7 @@ "\n", "
\n", " \"Run\n", - " \"Open\n", + " \"Open\n", " \"Open\n", "
\n", "\n", @@ -572,8 +572,7 @@ "metadata": { "accelerator": "GPU", "colab": { - "machine_shape": "hm", - "name": "YOLOv5 Tutorial", + "name": "YOLOv5 Segmentation Tutorial", "provenance": [], "toc_visible": true }, @@ -597,4 +596,4 @@ }, "nbformat": 4, "nbformat_minor": 0 -} \ No newline at end of file +} diff --git a/tutorial.ipynb b/tutorial.ipynb index 07a6625a1491..eb5b675db2be 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -5,7 +5,6 @@ "colab": { "name": "YOLOv5 Tutorial", "provenance": [], - "machine_shape": "hm", "toc_visible": true }, "kernelspec": { @@ -973,4 +972,4 @@ "outputs": [] } ] -} \ No newline at end of file +} From 2ecaa96c847c2b117bf1057d6caec54520fd592a Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 18 Nov 2022 22:17:52 +0100 Subject: [PATCH 1567/1976] Created using Colaboratory --- tutorial.ipynb | 134 ++++++++++++++++++++++++------------------------- 1 file changed, 67 insertions(+), 67 deletions(-) diff --git a/tutorial.ipynb b/tutorial.ipynb index eb5b675db2be..9d5aa9c85c51 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -14,7 +14,7 @@ "accelerator": "GPU", "widgets": { "application/vnd.jupyter.widget-state+json": { - "13e0e8b77bf54b25b8893f0b4164315f": { + "300b4d5355ef4967bd5246afeef6eef5": { "model_module": "@jupyter-widgets/controls", "model_name": "HBoxModel", "model_module_version": "1.5.0", @@ -29,14 +29,14 @@ "_view_name": "HBoxView", "box_style": "", "children": [ - "IPY_MODEL_48037f2f7fea4012b9b341f6aee75297", - "IPY_MODEL_3f3b925287274893baf5ed7bb0cf6635", - "IPY_MODEL_c44bdca7c9784b20ba2146250ee744d6" + "IPY_MODEL_84e6829bb88845a8a4f42700b8496925", + "IPY_MODEL_c038e52d41bf4d5b9602930c3d074087", + "IPY_MODEL_2667604641764341b0bc8c6afea438fd" ], - "layout": "IPY_MODEL_5b0ed23cd32c4c7d8d9467b7425684ad" + "layout": "IPY_MODEL_98b3a4806ed14102b0d75e6c571d6134" } }, - "48037f2f7fea4012b9b341f6aee75297": { + "84e6829bb88845a8a4f42700b8496925": { "model_module": "@jupyter-widgets/controls", "model_name": "HTMLModel", "model_module_version": "1.5.0", @@ -51,13 +51,13 @@ "_view_name": "HTMLView", "description": "", "description_tooltip": null, - "layout": "IPY_MODEL_1e10b4db5d644cb78bd6e005bb34038a", + "layout": "IPY_MODEL_c66a77395e42424d904699edcbb67291", "placeholder": "​", - "style": "IPY_MODEL_a58728093ecb4eafb826bee11a84c549", + "style": "IPY_MODEL_c4bbc15bf853439399dbcf1d40a5a407", "value": "100%" } }, - "3f3b925287274893baf5ed7bb0cf6635": { + "c038e52d41bf4d5b9602930c3d074087": { "model_module": "@jupyter-widgets/controls", "model_name": "FloatProgressModel", "model_module_version": "1.5.0", @@ -73,15 +73,15 @@ "bar_style": "success", "description": "", "description_tooltip": null, - "layout": "IPY_MODEL_9ce169fe4b8543c0b26d745daa230f18", + "layout": "IPY_MODEL_0aaabfac395b43afbdd6d752c502bbf6", "max": 818322941, "min": 0, "orientation": "horizontal", - "style": "IPY_MODEL_d5da01aca8fb400c96e76f44c9403581", + "style": "IPY_MODEL_3786d970492b4aa38f886f2572fd958c", "value": 818322941 } }, - "c44bdca7c9784b20ba2146250ee744d6": { + "2667604641764341b0bc8c6afea438fd": { "model_module": "@jupyter-widgets/controls", "model_name": "HTMLModel", "model_module_version": "1.5.0", @@ -96,13 +96,13 @@ "_view_name": "HTMLView", "description": "", "description_tooltip": null, - "layout": "IPY_MODEL_98cbaa572fdd4c42975f52015672b3a5", + "layout": "IPY_MODEL_b86d0f2d7be74cebbcaa884b53123eeb", "placeholder": "​", - "style": "IPY_MODEL_a636aa81f5cc453099c9e552f0986e63", - "value": " 780M/780M [01:27<00:00, 6.98MB/s]" + "style": "IPY_MODEL_fa7b1497925a457f89286a71f073f416", + "value": " 780M/780M [00:57<00:00, 10.1MB/s]" } }, - "5b0ed23cd32c4c7d8d9467b7425684ad": { + "98b3a4806ed14102b0d75e6c571d6134": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", @@ -154,7 +154,7 @@ "width": null } }, - "1e10b4db5d644cb78bd6e005bb34038a": { + "c66a77395e42424d904699edcbb67291": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", @@ -206,7 +206,7 @@ "width": null } }, - "a58728093ecb4eafb826bee11a84c549": { + "c4bbc15bf853439399dbcf1d40a5a407": { "model_module": "@jupyter-widgets/controls", "model_name": "DescriptionStyleModel", "model_module_version": "1.5.0", @@ -221,7 +221,7 @@ "description_width": "" } }, - "9ce169fe4b8543c0b26d745daa230f18": { + "0aaabfac395b43afbdd6d752c502bbf6": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", @@ -273,7 +273,7 @@ "width": null } }, - "d5da01aca8fb400c96e76f44c9403581": { + "3786d970492b4aa38f886f2572fd958c": { "model_module": "@jupyter-widgets/controls", "model_name": "ProgressStyleModel", "model_module_version": "1.5.0", @@ -289,7 +289,7 @@ "description_width": "" } }, - "98cbaa572fdd4c42975f52015672b3a5": { + "b86d0f2d7be74cebbcaa884b53123eeb": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", @@ -341,7 +341,7 @@ "width": null } }, - "a636aa81f5cc453099c9e552f0986e63": { + "fa7b1497925a457f89286a71f073f416": { "model_module": "@jupyter-widgets/controls", "model_name": "DescriptionStyleModel", "model_module_version": "1.5.0", @@ -401,7 +401,7 @@ "colab": { "base_uri": "https://localhost:8080/" }, - "outputId": "bcb6db4a-fc21-4258-9b53-4a760a534656" + "outputId": "32e3bc15-6d02-4352-f0a3-912059d134a5" }, "source": [ "!git clone https://github.com/ultralytics/yolov5 # clone\n", @@ -418,7 +418,7 @@ "output_type": "stream", "name": "stderr", "text": [ - "YOLOv5 🚀 v6.2-250-g467a57f Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n" + "YOLOv5 🚀 v6.2-256-g0051615 Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n" ] }, { @@ -459,7 +459,7 @@ "colab": { "base_uri": "https://localhost:8080/" }, - "outputId": "de684b46-7623-4836-ee44-49cdb320cbf3" + "outputId": "8e81d6e9-0360-4212-cd61-9a5a58d3f703" }, "source": [ "!python detect.py --weights yolov5s.pt --img 640 --conf 0.25 --source data/images\n", @@ -472,16 +472,16 @@ "name": "stdout", "text": [ "\u001b[34m\u001b[1mdetect: \u001b[0mweights=['yolov5s.pt'], source=data/images, data=data/coco128.yaml, imgsz=[640, 640], conf_thres=0.25, iou_thres=0.45, max_det=1000, device=, view_img=False, save_txt=False, save_conf=False, save_crop=False, nosave=False, classes=None, agnostic_nms=False, augment=False, visualize=False, update=False, project=runs/detect, name=exp, exist_ok=False, line_thickness=3, hide_labels=False, hide_conf=False, half=False, dnn=False, vid_stride=1\n", - "YOLOv5 🚀 v6.2-250-g467a57f Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n", + "YOLOv5 🚀 v6.2-256-g0051615 Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n", "\n", "Downloading https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5s.pt to yolov5s.pt...\n", - "100% 14.1M/14.1M [00:00<00:00, 162MB/s]\n", + "100% 14.1M/14.1M [00:00<00:00, 19.5MB/s]\n", "\n", "Fusing layers... \n", "YOLOv5s summary: 213 layers, 7225885 parameters, 0 gradients\n", - "image 1/2 /content/yolov5/data/images/bus.jpg: 640x480 4 persons, 1 bus, 17.2ms\n", - "image 2/2 /content/yolov5/data/images/zidane.jpg: 384x640 2 persons, 2 ties, 13.3ms\n", - "Speed: 0.5ms pre-process, 15.2ms inference, 19.5ms NMS per image at shape (1, 3, 640, 640)\n", + "image 1/2 /content/yolov5/data/images/bus.jpg: 640x480 4 persons, 1 bus, 17.5ms\n", + "image 2/2 /content/yolov5/data/images/zidane.jpg: 384x640 2 persons, 2 ties, 18.0ms\n", + "Speed: 0.5ms pre-process, 17.8ms inference, 17.6ms NMS per image at shape (1, 3, 640, 640)\n", "Results saved to \u001b[1mruns/detect/exp\u001b[0m\n" ] } @@ -515,20 +515,20 @@ "base_uri": "https://localhost:8080/", "height": 49, "referenced_widgets": [ - "13e0e8b77bf54b25b8893f0b4164315f", - "48037f2f7fea4012b9b341f6aee75297", - "3f3b925287274893baf5ed7bb0cf6635", - "c44bdca7c9784b20ba2146250ee744d6", - "5b0ed23cd32c4c7d8d9467b7425684ad", - "1e10b4db5d644cb78bd6e005bb34038a", - "a58728093ecb4eafb826bee11a84c549", - "9ce169fe4b8543c0b26d745daa230f18", - "d5da01aca8fb400c96e76f44c9403581", - "98cbaa572fdd4c42975f52015672b3a5", - "a636aa81f5cc453099c9e552f0986e63" + "300b4d5355ef4967bd5246afeef6eef5", + "84e6829bb88845a8a4f42700b8496925", + "c038e52d41bf4d5b9602930c3d074087", + "2667604641764341b0bc8c6afea438fd", + "98b3a4806ed14102b0d75e6c571d6134", + "c66a77395e42424d904699edcbb67291", + "c4bbc15bf853439399dbcf1d40a5a407", + "0aaabfac395b43afbdd6d752c502bbf6", + "3786d970492b4aa38f886f2572fd958c", + "b86d0f2d7be74cebbcaa884b53123eeb", + "fa7b1497925a457f89286a71f073f416" ] }, - "outputId": "b1e02a1f-981f-4739-e75d-10d0204cc32d" + "outputId": "61ffec5e-90ea-44f6-c0ea-b006e6e7072f" }, "source": [ "# Download COCO val\n", @@ -546,7 +546,7 @@ "application/vnd.jupyter.widget-view+json": { "version_major": 2, "version_minor": 0, - "model_id": "13e0e8b77bf54b25b8893f0b4164315f" + "model_id": "300b4d5355ef4967bd5246afeef6eef5" } }, "metadata": {} @@ -560,7 +560,7 @@ "colab": { "base_uri": "https://localhost:8080/" }, - "outputId": "9c2f755f-f383-4a9e-cd19-f73a0c763a9c" + "outputId": "aa5d5cea-14c1-4a19-bfdf-95b7164962cf" }, "source": [ "# Validate YOLOv5s on COCO val\n", @@ -573,30 +573,30 @@ "name": "stdout", "text": [ "\u001b[34m\u001b[1mval: \u001b[0mdata=/content/yolov5/data/coco.yaml, weights=['yolov5s.pt'], batch_size=32, imgsz=640, conf_thres=0.001, iou_thres=0.6, max_det=300, task=val, device=, workers=8, single_cls=False, augment=False, verbose=False, save_txt=False, save_hybrid=False, save_conf=False, save_json=True, project=runs/val, name=exp, exist_ok=False, half=True, dnn=False\n", - "YOLOv5 🚀 v6.2-250-g467a57f Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n", + "YOLOv5 🚀 v6.2-256-g0051615 Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n", "\n", "Fusing layers... \n", "YOLOv5s summary: 213 layers, 7225885 parameters, 0 gradients\n", - "\u001b[34m\u001b[1mval: \u001b[0mScanning '/content/datasets/coco/val2017' images and labels...4952 found, 48 missing, 0 empty, 0 corrupt: 100% 5000/5000 [00:02<00:00, 2019.92it/s]\n", + "\u001b[34m\u001b[1mval: \u001b[0mScanning /content/datasets/coco/val2017... 4952 images, 48 backgrounds, 0 corrupt: 100% 5000/5000 [00:02<00:00, 2066.57it/s]\n", "\u001b[34m\u001b[1mval: \u001b[0mNew cache created: /content/datasets/coco/val2017.cache\n", - " Class Images Instances P R mAP50 mAP50-95: 100% 157/157 [01:09<00:00, 2.25it/s]\n", + " Class Images Instances P R mAP50 mAP50-95: 100% 157/157 [01:09<00:00, 2.26it/s]\n", " all 5000 36335 0.67 0.521 0.566 0.371\n", - "Speed: 0.2ms pre-process, 2.7ms inference, 2.1ms NMS per image at shape (32, 3, 640, 640)\n", + "Speed: 0.1ms pre-process, 2.7ms inference, 1.9ms NMS per image at shape (32, 3, 640, 640)\n", "\n", "Evaluating pycocotools mAP... saving runs/val/exp/yolov5s_predictions.json...\n", "loading annotations into memory...\n", - "Done (t=0.41s)\n", + "Done (t=0.82s)\n", "creating index...\n", "index created!\n", "Loading and preparing results...\n", - "DONE (t=6.19s)\n", + "DONE (t=5.49s)\n", "creating index...\n", "index created!\n", "Running per image evaluation...\n", "Evaluate annotation type *bbox*\n", - "DONE (t=75.81s).\n", + "DONE (t=74.26s).\n", "Accumulating evaluation results...\n", - "DONE (t=15.26s).\n", + "DONE (t=13.46s).\n", " Average Precision (AP) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.374\n", " Average Precision (AP) @[ IoU=0.50 | area= all | maxDets=100 ] = 0.572\n", " Average Precision (AP) @[ IoU=0.75 | area= all | maxDets=100 ] = 0.402\n", @@ -676,7 +676,7 @@ "colab": { "base_uri": "https://localhost:8080/" }, - "outputId": "7d03d4d2-9a6e-47de-88f4-c673b55c73c5" + "outputId": "f0fcdc77-5326-41e1-bacc-be5432eefa2a" }, "source": [ "# Train YOLOv5s on COCO128 for 3 epochs\n", @@ -690,7 +690,7 @@ "text": [ "\u001b[34m\u001b[1mtrain: \u001b[0mweights=yolov5s.pt, cfg=, data=coco128.yaml, hyp=data/hyps/hyp.scratch-low.yaml, epochs=3, batch_size=16, imgsz=640, rect=False, resume=False, nosave=False, noval=False, noautoanchor=False, noplots=False, evolve=None, bucket=, cache=ram, image_weights=False, device=, multi_scale=False, single_cls=False, optimizer=SGD, sync_bn=False, workers=8, project=runs/train, name=exp, exist_ok=False, quad=False, cos_lr=False, label_smoothing=0.0, patience=100, freeze=[0], save_period=-1, seed=0, local_rank=-1, entity=None, upload_dataset=False, bbox_interval=-1, artifact_alias=latest\n", "\u001b[34m\u001b[1mgithub: \u001b[0mup to date with https://github.com/ultralytics/yolov5 ✅\n", - "YOLOv5 🚀 v6.2-250-g467a57f Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n", + "YOLOv5 🚀 v6.2-256-g0051615 Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n", "\n", "\u001b[34m\u001b[1mhyperparameters: \u001b[0mlr0=0.01, lrf=0.01, momentum=0.937, weight_decay=0.0005, warmup_epochs=3.0, warmup_momentum=0.8, warmup_bias_lr=0.1, box=0.05, cls=0.5, cls_pw=1.0, obj=1.0, obj_pw=1.0, iou_t=0.2, anchor_t=4.0, fl_gamma=0.0, hsv_h=0.015, hsv_s=0.7, hsv_v=0.4, degrees=0.0, translate=0.1, scale=0.5, shear=0.0, perspective=0.0, flipud=0.0, fliplr=0.5, mosaic=1.0, mixup=0.0, copy_paste=0.0\n", "\u001b[34m\u001b[1mClearML: \u001b[0mrun 'pip install clearml' to automatically track, visualize and remotely train YOLOv5 🚀 in ClearML\n", @@ -699,8 +699,8 @@ "\n", "Dataset not found ⚠️, missing paths ['/content/datasets/coco128/images/train2017']\n", "Downloading https://ultralytics.com/assets/coco128.zip to coco128.zip...\n", - "100% 6.66M/6.66M [00:00<00:00, 26.1MB/s]\n", - "Dataset download success ✅ (0.9s), saved to \u001b[1m/content/datasets\u001b[0m\n", + "100% 6.66M/6.66M [00:00<00:00, 39.8MB/s]\n", + "Dataset download success ✅ (0.8s), saved to \u001b[1m/content/datasets\u001b[0m\n", "\n", " from n params module arguments \n", " 0 -1 1 3520 models.common.Conv [3, 32, 6, 2, 2] \n", @@ -734,11 +734,11 @@ "\u001b[34m\u001b[1mAMP: \u001b[0mchecks passed ✅\n", "\u001b[34m\u001b[1moptimizer:\u001b[0m SGD(lr=0.01) with parameter groups 57 weight(decay=0.0), 60 weight(decay=0.0005), 60 bias\n", "\u001b[34m\u001b[1malbumentations: \u001b[0mBlur(p=0.01, blur_limit=(3, 7)), MedianBlur(p=0.01, blur_limit=(3, 7)), ToGray(p=0.01), CLAHE(p=0.01, clip_limit=(1, 4.0), tile_grid_size=(8, 8))\n", - "\u001b[34m\u001b[1mtrain: \u001b[0mScanning '/content/datasets/coco128/labels/train2017' images and labels...126 found, 2 missing, 0 empty, 0 corrupt: 100% 128/128 [00:00<00:00, 1989.66it/s]\n", + "\u001b[34m\u001b[1mtrain: \u001b[0mScanning /content/datasets/coco128/labels/train2017... 126 images, 2 backgrounds, 0 corrupt: 100% 128/128 [00:00<00:00, 2084.63it/s]\n", "\u001b[34m\u001b[1mtrain: \u001b[0mNew cache created: /content/datasets/coco128/labels/train2017.cache\n", - "\u001b[34m\u001b[1mtrain: \u001b[0mCaching images (0.1GB ram): 100% 128/128 [00:00<00:00, 246.25it/s]\n", - "\u001b[34m\u001b[1mval: \u001b[0mScanning '/content/datasets/coco128/labels/train2017.cache' images and labels... 126 found, 2 missing, 0 empty, 0 corrupt: 100% 128/128 [00:00 Date: Fri, 18 Nov 2022 22:41:46 +0100 Subject: [PATCH 1568/1976] Created using Colaboratory --- segment/tutorial.ipynb | 70 +++++++++++++++++++----------------------- 1 file changed, 32 insertions(+), 38 deletions(-) diff --git a/segment/tutorial.ipynb b/segment/tutorial.ipynb index f3f978d43d93..4192c69da628 100644 --- a/segment/tutorial.ipynb +++ b/segment/tutorial.ipynb @@ -42,14 +42,14 @@ "base_uri": "https://localhost:8080/" }, "id": "wbvMlHd_QwMG", - "outputId": "d1e33dfc-9ad4-436e-f1e5-01acee40c029" + "outputId": "664f49fa-554a-4dca-8d0e-5c9dd60f6d28" }, "outputs": [ { "output_type": "stream", "name": "stderr", "text": [ - "YOLOv5 🚀 v6.2-251-g241d798 Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n" + "YOLOv5 🚀 v6.2-257-g2ecaa96 Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n" ] }, { @@ -100,7 +100,7 @@ "base_uri": "https://localhost:8080/" }, "id": "zR9ZbuQCH7FX", - "outputId": "e206fcec-cf42-4754-8a42-39bc3603eba8" + "outputId": "6392c9ff-0863-4665-faf9-b3af9881c305" }, "outputs": [ { @@ -108,16 +108,16 @@ "name": "stdout", "text": [ "\u001b[34m\u001b[1msegment/predict: \u001b[0mweights=['yolov5s-seg.pt'], source=data/images, data=data/coco128.yaml, imgsz=[640, 640], conf_thres=0.25, iou_thres=0.45, max_det=1000, device=, view_img=False, save_txt=False, save_conf=False, save_crop=False, nosave=False, classes=None, agnostic_nms=False, augment=False, visualize=False, update=False, project=runs/predict-seg, name=exp, exist_ok=False, line_thickness=3, hide_labels=False, hide_conf=False, half=False, dnn=False, vid_stride=1, retina_masks=False\n", - "YOLOv5 🚀 v6.2-251-g241d798 Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n", + "YOLOv5 🚀 v6.2-257-g2ecaa96 Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n", "\n", "Downloading https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5s-seg.pt to yolov5s-seg.pt...\n", - "100% 14.9M/14.9M [00:03<00:00, 3.93MB/s]\n", + "100% 14.9M/14.9M [00:01<00:00, 9.09MB/s]\n", "\n", "Fusing layers... \n", "YOLOv5s-seg summary: 224 layers, 7611485 parameters, 0 gradients, 26.4 GFLOPs\n", - "image 1/2 /content/yolov5/data/images/bus.jpg: 640x480 4 persons, 1 bus, 17.2ms\n", - "image 2/2 /content/yolov5/data/images/zidane.jpg: 384x640 2 persons, 1 tie, 13.7ms\n", - "Speed: 0.4ms pre-process, 15.5ms inference, 22.2ms NMS per image at shape (1, 3, 640, 640)\n", + "image 1/2 /content/yolov5/data/images/bus.jpg: 640x480 4 persons, 1 bus, 18.0ms\n", + "image 2/2 /content/yolov5/data/images/zidane.jpg: 384x640 2 persons, 1 tie, 13.5ms\n", + "Speed: 0.5ms pre-process, 15.7ms inference, 18.5ms NMS per image at shape (1, 3, 640, 640)\n", "Results saved to \u001b[1mruns/predict-seg/exp\u001b[0m\n" ] } @@ -155,7 +155,7 @@ "base_uri": "https://localhost:8080/" }, "id": "WQPtK1QYVaD_", - "outputId": "f7eba0ae-49d1-405b-a1cf-169212fadc2c" + "outputId": "4707734e-00c7-43da-d642-32c3c3fe3090" }, "outputs": [ { @@ -182,26 +182,23 @@ "base_uri": "https://localhost:8080/" }, "id": "X58w8JLpMnjH", - "outputId": "73533135-6995-4f2d-adb0-3acb5ef9b300" + "outputId": "f96b700d-c779-4a34-930b-e85be4e58974" }, "outputs": [ { - "metadata": { - "tags": null - }, - "name": "stdout", "output_type": "stream", + "name": "stdout", "text": [ "\u001b[34m\u001b[1msegment/val: \u001b[0mdata=/content/yolov5/data/coco.yaml, weights=['yolov5s-seg.pt'], batch_size=32, imgsz=640, conf_thres=0.001, iou_thres=0.6, max_det=300, task=val, device=, workers=8, single_cls=False, augment=False, verbose=False, save_txt=False, save_hybrid=False, save_conf=False, save_json=False, project=runs/val-seg, name=exp, exist_ok=False, half=True, dnn=False\n", - "YOLOv5 🚀 v6.2-251-g241d798 Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n", + "YOLOv5 🚀 v6.2-257-g2ecaa96 Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n", "\n", "Fusing layers... \n", "YOLOv5s-seg summary: 224 layers, 7611485 parameters, 0 gradients, 26.4 GFLOPs\n", - "\u001b[34m\u001b[1mval: \u001b[0mScanning '/content/datasets/coco/val2017' images and labels...4952 found, 48 missing, 0 empty, 0 corrupt: 100% 5000/5000 [00:03<00:00, 1420.92it/s]\n", + "\u001b[34m\u001b[1mval: \u001b[0mScanning /content/datasets/coco/val2017... 4952 images, 48 backgrounds, 0 corrupt: 100% 5000/5000 [00:03<00:00, 1409.04it/s]\n", "\u001b[34m\u001b[1mval: \u001b[0mNew cache created: /content/datasets/coco/val2017.cache\n", - " Class Images Instances Box(P R mAP50 mAP50-95) Mask(P R mAP50 mAP50-95): 100% 157/157 [01:54<00:00, 1.37it/s]\n", + " Class Images Instances Box(P R mAP50 mAP50-95) Mask(P R mAP50 mAP50-95): 100% 157/157 [01:53<00:00, 1.38it/s]\n", " all 5000 36335 0.673 0.517 0.566 0.373 0.672 0.49 0.532 0.319\n", - "Speed: 0.9ms pre-process, 3.9ms inference, 3.0ms NMS per image at shape (32, 3, 640, 640)\n", + "Speed: 0.8ms pre-process, 4.0ms inference, 2.8ms NMS per image at shape (32, 3, 640, 640)\n", "Results saved to \u001b[1mruns/val-seg/exp\u001b[0m\n" ] } @@ -273,27 +270,24 @@ "base_uri": "https://localhost:8080/" }, "id": "1NcFxRcFdJ_O", - "outputId": "8e349df5-9910-4a91-a845-748def15d3d7" + "outputId": "2cdb19cc-69af-4c90-f8de-af02dfedba91" }, "outputs": [ { - "metadata": { - "tags": null - }, - "name": "stdout", "output_type": "stream", + "name": "stdout", "text": [ "\u001b[34m\u001b[1msegment/train: \u001b[0mweights=yolov5s-seg.pt, cfg=, data=coco128-seg.yaml, hyp=data/hyps/hyp.scratch-low.yaml, epochs=3, batch_size=16, imgsz=640, rect=False, resume=False, nosave=False, noval=False, noautoanchor=False, noplots=False, evolve=None, bucket=, cache=ram, image_weights=False, device=, multi_scale=False, single_cls=False, optimizer=SGD, sync_bn=False, workers=8, project=runs/train-seg, name=exp, exist_ok=False, quad=False, cos_lr=False, label_smoothing=0.0, patience=100, freeze=[0], save_period=-1, seed=0, local_rank=-1, mask_ratio=4, no_overlap=False\n", "\u001b[34m\u001b[1mgithub: \u001b[0mup to date with https://github.com/ultralytics/yolov5 ✅\n", - "YOLOv5 🚀 v6.2-251-g241d798 Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n", + "YOLOv5 🚀 v6.2-257-g2ecaa96 Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n", "\n", "\u001b[34m\u001b[1mhyperparameters: \u001b[0mlr0=0.01, lrf=0.01, momentum=0.937, weight_decay=0.0005, warmup_epochs=3.0, warmup_momentum=0.8, warmup_bias_lr=0.1, box=0.05, cls=0.5, cls_pw=1.0, obj=1.0, obj_pw=1.0, iou_t=0.2, anchor_t=4.0, fl_gamma=0.0, hsv_h=0.015, hsv_s=0.7, hsv_v=0.4, degrees=0.0, translate=0.1, scale=0.5, shear=0.0, perspective=0.0, flipud=0.0, fliplr=0.5, mosaic=1.0, mixup=0.0, copy_paste=0.0\n", "\u001b[34m\u001b[1mTensorBoard: \u001b[0mStart with 'tensorboard --logdir runs/train-seg', view at http://localhost:6006/\n", "\n", "Dataset not found ⚠️, missing paths ['/content/datasets/coco128-seg/images/train2017']\n", "Downloading https://ultralytics.com/assets/coco128-seg.zip to coco128-seg.zip...\n", - "100% 6.79M/6.79M [00:01<00:00, 4.42MB/s]\n", - "Dataset download success ✅ (2.8s), saved to \u001b[1m/content/datasets\u001b[0m\n", + "100% 6.79M/6.79M [00:01<00:00, 5.87MB/s]\n", + "Dataset download success ✅ (2.1s), saved to \u001b[1m/content/datasets\u001b[0m\n", "\n", " from n params module arguments \n", " 0 -1 1 3520 models.common.Conv [3, 32, 6, 2, 2] \n", @@ -327,11 +321,11 @@ "\u001b[34m\u001b[1mAMP: \u001b[0mchecks passed ✅\n", "\u001b[34m\u001b[1moptimizer:\u001b[0m SGD(lr=0.01) with parameter groups 60 weight(decay=0.0), 63 weight(decay=0.0005), 63 bias\n", "\u001b[34m\u001b[1malbumentations: \u001b[0mBlur(p=0.01, blur_limit=(3, 7)), MedianBlur(p=0.01, blur_limit=(3, 7)), ToGray(p=0.01), CLAHE(p=0.01, clip_limit=(1, 4.0), tile_grid_size=(8, 8))\n", - "\u001b[34m\u001b[1mtrain: \u001b[0mScanning '/content/datasets/coco128-seg/labels/train2017' images and labels...126 found, 2 missing, 0 empty, 0 corrupt: 100% 128/128 [00:00<00:00, 1383.68it/s]\n", + "\u001b[34m\u001b[1mtrain: \u001b[0mScanning /content/datasets/coco128-seg/labels/train2017... 126 images, 2 backgrounds, 0 corrupt: 100% 128/128 [00:00<00:00, 1439.54it/s]\n", "\u001b[34m\u001b[1mtrain: \u001b[0mNew cache created: /content/datasets/coco128-seg/labels/train2017.cache\n", - "\u001b[34m\u001b[1mtrain: \u001b[0mCaching images (0.1GB ram): 100% 128/128 [00:00<00:00, 241.77it/s]\n", - "\u001b[34m\u001b[1mval: \u001b[0mScanning '/content/datasets/coco128-seg/labels/train2017.cache' images and labels... 126 found, 2 missing, 0 empty, 0 corrupt: 100% 128/128 [00:00 Date: Fri, 18 Nov 2022 23:12:09 +0100 Subject: [PATCH 1569/1976] Created using Colaboratory --- classify/tutorial.ipynb | 3254 +++++++++++++++++---------------------- 1 file changed, 1445 insertions(+), 1809 deletions(-) diff --git a/classify/tutorial.ipynb b/classify/tutorial.ipynb index f235b754d7b4..e035a7bda40d 100644 --- a/classify/tutorial.ipynb +++ b/classify/tutorial.ipynb @@ -1,1842 +1,1478 @@ { - "cells": [ - { - "cell_type": "markdown", - "metadata": { - "id": "t6MPjfT5NrKQ" - }, - "source": [ - "
\n", - "\n", - " \n", - " \n", - "\n", - "\n", - "
\n", - " \"Run\n", - " \"Open\n", - " \"Open\n", - "
\n", - "\n", - "This YOLOv5 🚀 notebook by Ultralytics presents simple train, validate and predict examples to help start your AI adventure.
See GitHub for community support or contact us for professional support.\n", - "\n", - "
" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "7mGmQbAO5pQb" - }, - "source": [ - "# Setup\n", - "\n", - "Clone GitHub [repository](https://github.com/ultralytics/yolov5), install [dependencies](https://github.com/ultralytics/yolov5/blob/master/requirements.txt) and check PyTorch and GPU." - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" + "cells": [ + { + "cell_type": "markdown", + "metadata": { + "id": "t6MPjfT5NrKQ" + }, + "source": [ + "
\n", + "\n", + " \n", + " \n", + "\n", + "\n", + "
\n", + " \"Run\n", + " \"Open\n", + " \"Open\n", + "
\n", + "\n", + "This YOLOv5 🚀 notebook by Ultralytics presents simple train, validate and predict examples to help start your AI adventure.
See GitHub for community support or contact us for professional support.\n", + "\n", + "
" + ] }, - "id": "wbvMlHd_QwMG", - "outputId": "0f9ee467-cea4-48e8-9050-7a76ae1b6141" - }, - "outputs": [ { - "name": "stderr", - "output_type": "stream", - "text": [ - "YOLOv5 🚀 v6.2-237-g34c3e1c Python-3.7.12 torch-1.12.1+cu102 CUDA:0 (Tesla V100-SXM2-16GB, 16161MiB)\n" - ] + "cell_type": "markdown", + "metadata": { + "id": "7mGmQbAO5pQb" + }, + "source": [ + "# Setup\n", + "\n", + "Clone GitHub [repository](https://github.com/ultralytics/yolov5), install [dependencies](https://github.com/ultralytics/yolov5/blob/master/requirements.txt) and check PyTorch and GPU." + ] }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "Setup complete ✅ (4 CPUs, 14.7 GB RAM, 152.0/196.6 GB disk)\n" - ] - } - ], - "source": [ - "!git clone https://github.com/ultralytics/yolov5 # clone\n", - "%cd yolov5\n", - "%pip install -qr requirements.txt # install\n", - "\n", - "import torch\n", - "import utils\n", - "display = utils.notebook_init() # checks" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "4JnkELT0cIJg" - }, - "source": [ - "# 1. Predict\n", - "\n", - "`classify/predict.py` runs YOLOv5 Classifcation inference on a variety of sources, downloading models automatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases), and saving results to `runs/predict-cls`. Example inference sources are:\n", - "\n", - "```shell\n", - "python classify/predict.py --source 0 # webcam\n", - " img.jpg # image \n", - " vid.mp4 # video\n", - " screen # screenshot\n", - " path/ # directory\n", - " 'path/*.jpg' # glob\n", - " 'https://youtu.be/Zgi9g1ksQHc' # YouTube\n", - " 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream\n", - "```" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" + "cell_type": "code", + "execution_count": 1, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "wbvMlHd_QwMG", + "outputId": "43b2e1b5-78d9-4e1d-8530-ee9779bba160" + }, + "outputs": [ + { + "output_type": "stream", + "name": "stderr", + "text": [ + "YOLOv5 🚀 v6.2-258-g7fc7ed7 Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n" + ] + }, + { + "output_type": "stream", + "name": "stdout", + "text": [ + "Setup complete ✅ (2 CPUs, 12.7 GB RAM, 22.6/78.2 GB disk)\n" + ] + } + ], + "source": [ + "!git clone https://github.com/ultralytics/yolov5 # clone\n", + "%cd yolov5\n", + "%pip install -qr requirements.txt # install\n", + "\n", + "import torch\n", + "import utils\n", + "display = utils.notebook_init() # checks" + ] }, - "id": "zR9ZbuQCH7FX", - "outputId": "60647b99-e8d4-402c-f444-331bf6746da4" - }, - "outputs": [ { - "name": "stdout", - "output_type": "stream", - "text": [ - "\u001b[34m\u001b[1mclassify/predict: \u001b[0mweights=['yolov5s-cls.pt'], source=data/images, data=data/coco128.yaml, imgsz=[640, 640], device=, view_img=False, save_txt=True, nosave=False, augment=False, visualize=False, update=False, project=runs/predict-cls, name=exp, exist_ok=False, half=False, dnn=False, vid_stride=1\n", - "YOLOv5 🚀 v6.2-237-g34c3e1c Python-3.7.12 torch-1.12.1+cu102 CUDA:0 (Tesla V100-SXM2-16GB, 16161MiB)\n", - "\n", - "Fusing layers... \n", - "Model summary: 117 layers, 5447688 parameters, 0 gradients, 11.4 GFLOPs\n", - "image 1/2 /home/paguerrie/yolov5/data/images/bus.jpg: 640x640 minibus 0.01, recreational vehicle 0.01, ambulance 0.01, tram 0.01, trolleybus 0.01, 2.6ms\n", - "image 2/2 /home/paguerrie/yolov5/data/images/zidane.jpg: 640x640 suit 0.05, bow tie 0.01, ping-pong ball 0.01, microphone 0.01, bassoon 0.01, 2.8ms\n", - "Speed: 1.2ms pre-process, 2.7ms inference, 0.1ms NMS per image at shape (1, 3, 640, 640)\n", - "Results saved to \u001b[1mruns/predict-cls/exp\u001b[0m\n", - "2 labels saved to runs/predict-cls/exp/labels\n" - ] - } - ], - "source": [ - "!python classify/predict.py --weights yolov5s-cls.pt --img 640 --source data/images\n", - "# display.Image(filename='runs/predict-cls/exp/zidane.jpg', width=600)" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "hkAzDWJ7cWTr" - }, - "source": [ - "        \n", - "" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "0eq1SMWl6Sfn" - }, - "source": [ - "# 2. Validate\n", - "Validate a model's accuracy on the [Imagenet](https://image-net.org/) dataset's `val` or `test` splits. Models are downloaded automatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases). To show results by class use the `--verbose` flag." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/", - "height": 49, - "referenced_widgets": [ - "9b8caa3522fc4cbab31e13b5dfc7808d", - "574140e4c4bc48c9a171541a02cd0211", - "35e03ce5090346c9ae602891470fc555", - "c942c208e72d46568b476bb0f2d75496", - "65881db1db8a4e9c930fab9172d45143", - "60b913d755b34d638478e30705a2dde1", - "0856bea36ec148b68522ff9c9eb258d8", - "76879f6f2aa54637a7a07faeea2bd684", - "0ace3934ec6f4d36a1b3a9e086390926", - "d6b7a2243e0c4beca714d99dceec23d6", - "5966ba6e6f114d8c9d8d1d6b1bd4f4c7" - ] + "cell_type": "markdown", + "metadata": { + "id": "4JnkELT0cIJg" + }, + "source": [ + "# 1. Predict\n", + "\n", + "`classify/predict.py` runs YOLOv5 Classifcation inference on a variety of sources, downloading models automatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases), and saving results to `runs/predict-cls`. Example inference sources are:\n", + "\n", + "```shell\n", + "python classify/predict.py --source 0 # webcam\n", + " img.jpg # image \n", + " vid.mp4 # video\n", + " screen # screenshot\n", + " path/ # directory\n", + " 'path/*.jpg' # glob\n", + " 'https://youtu.be/Zgi9g1ksQHc' # YouTube\n", + " 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream\n", + "```" + ] }, - "id": "WQPtK1QYVaD_", - "outputId": "102dabed-bc31-42fe-9133-d9ce28a2c01e" - }, - "outputs": [], - "source": [ - "# Download Imagenet val\n", - "!bash data/scripts/get_imagenet.sh --val" - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" + { + "cell_type": "code", + "execution_count": 2, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "zR9ZbuQCH7FX", + "outputId": "1b610787-7cf7-4c33-aac2-aa50fbb84a94" + }, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "\u001b[34m\u001b[1mclassify/predict: \u001b[0mweights=['yolov5s-cls.pt'], source=data/images, data=data/coco128.yaml, imgsz=[224, 224], device=, view_img=False, save_txt=True, nosave=False, augment=False, visualize=False, update=False, project=runs/predict-cls, name=exp, exist_ok=False, half=False, dnn=False, vid_stride=1\n", + "YOLOv5 🚀 v6.2-258-g7fc7ed7 Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n", + "\n", + "Downloading https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5s-cls.pt to yolov5s-cls.pt...\n", + "100% 10.5M/10.5M [00:03<00:00, 2.94MB/s]\n", + "\n", + "Fusing layers... \n", + "Model summary: 117 layers, 5447688 parameters, 0 gradients, 11.4 GFLOPs\n", + "image 1/2 /content/yolov5/data/images/bus.jpg: 224x224 minibus 0.39, police van 0.24, amphibious vehicle 0.05, recreational vehicle 0.04, trolleybus 0.03, 3.9ms\n", + "image 2/2 /content/yolov5/data/images/zidane.jpg: 224x224 suit 0.38, bow tie 0.19, bridegroom 0.18, rugby ball 0.04, stage 0.02, 4.1ms\n", + "Speed: 0.3ms pre-process, 4.0ms inference, 1.5ms NMS per image at shape (1, 3, 224, 224)\n", + "Results saved to \u001b[1mruns/predict-cls/exp\u001b[0m\n", + "2 labels saved to runs/predict-cls/exp/labels\n" + ] + } + ], + "source": [ + "!python classify/predict.py --weights yolov5s-cls.pt --img 224 --source data/images\n", + "# display.Image(filename='runs/predict-cls/exp/zidane.jpg', width=600)" + ] }, - "id": "X58w8JLpMnjH", - "outputId": "daf60b1b-b098-4657-c863-584f4c9cf078" - }, - "outputs": [ { - "name": "stdout", - "output_type": "stream", - "text": [ - "\u001b[34m\u001b[1mclassify/val: \u001b[0mdata=../datasets/imagenet, weights=['yolov5s-cls.pt'], batch_size=128, imgsz=320, device=, workers=8, verbose=True, project=runs/val-cls, name=exp, exist_ok=False, half=True, dnn=False\n", - "YOLOv5 🚀 v6.2-237-g34c3e1c Python-3.7.12 torch-1.12.1+cu102 CUDA:0 (Tesla V100-SXM2-16GB, 16161MiB)\n", - "\n", - "Fusing layers... \n", - "Model summary: 117 layers, 5447688 parameters, 0 gradients, 11.4 GFLOPs\n", - "validating: 100%|██████████| 391/391 [02:36<00:00, 2.49it/s] \n", - " Class Images top1_acc top5_acc\n", - " all 50000 0.734 0.914\n", - " tench 50 0.92 0.98\n", - " goldfish 50 0.86 0.98\n", - " great white shark 50 0.76 0.94\n", - " tiger shark 50 0.84 0.96\n", - " hammerhead shark 50 0.88 0.98\n", - " electric ray 50 0.76 0.88\n", - " stingray 50 0.74 0.94\n", - " cock 50 0.74 0.94\n", - " hen 50 0.86 0.96\n", - " ostrich 50 0.98 1\n", - " brambling 50 0.9 0.98\n", - " goldfinch 50 0.92 1\n", - " house finch 50 0.92 1\n", - " junco 50 0.98 1\n", - " indigo bunting 50 0.86 0.94\n", - " American robin 50 0.94 1\n", - " bulbul 50 0.88 0.92\n", - " jay 50 0.92 0.98\n", - " magpie 50 0.9 0.98\n", - " chickadee 50 0.96 1\n", - " American dipper 50 0.86 0.92\n", - " kite 50 0.8 0.94\n", - " bald eagle 50 0.9 0.98\n", - " vulture 50 0.96 1\n", - " great grey owl 50 0.96 0.98\n", - " fire salamander 50 0.96 0.98\n", - " smooth newt 50 0.66 0.98\n", - " newt 50 0.74 0.84\n", - " spotted salamander 50 0.9 0.98\n", - " axolotl 50 0.9 0.98\n", - " American bullfrog 50 0.8 0.92\n", - " tree frog 50 0.8 0.94\n", - " tailed frog 50 0.5 0.82\n", - " loggerhead sea turtle 50 0.7 0.92\n", - " leatherback sea turtle 50 0.58 0.8\n", - " mud turtle 50 0.58 0.84\n", - " terrapin 50 0.52 0.98\n", - " box turtle 50 0.88 1\n", - " banded gecko 50 0.78 0.9\n", - " green iguana 50 0.78 0.92\n", - " Carolina anole 50 0.62 0.98\n", - "desert grassland whiptail lizard 50 0.88 0.96\n", - " agama 50 0.78 0.96\n", - " frilled-necked lizard 50 0.82 0.94\n", - " alligator lizard 50 0.64 0.84\n", - " Gila monster 50 0.76 0.86\n", - " European green lizard 50 0.5 0.96\n", - " chameleon 50 0.78 0.9\n", - " Komodo dragon 50 0.9 1\n", - " Nile crocodile 50 0.66 0.92\n", - " American alligator 50 0.78 0.98\n", - " triceratops 50 0.96 0.98\n", - " worm snake 50 0.76 0.9\n", - " ring-necked snake 50 0.84 0.96\n", - " eastern hog-nosed snake 50 0.62 0.86\n", - " smooth green snake 50 0.64 0.96\n", - " kingsnake 50 0.78 0.94\n", - " garter snake 50 0.86 0.98\n", - " water snake 50 0.78 0.92\n", - " vine snake 50 0.72 0.86\n", - " night snake 50 0.34 0.86\n", - " boa constrictor 50 0.8 0.96\n", - " African rock python 50 0.52 0.82\n", - " Indian cobra 50 0.8 0.94\n", - " green mamba 50 0.56 0.92\n", - " sea snake 50 0.76 0.94\n", - " Saharan horned viper 50 0.48 0.88\n", - "eastern diamondback rattlesnake 50 0.72 0.92\n", - " sidewinder 50 0.38 0.92\n", - " trilobite 50 0.98 0.98\n", - " harvestman 50 0.86 0.94\n", - " scorpion 50 0.88 0.94\n", - " yellow garden spider 50 0.88 0.96\n", - " barn spider 50 0.38 0.96\n", - " European garden spider 50 0.6 0.98\n", - " southern black widow 50 0.84 0.98\n", - " tarantula 50 0.94 0.98\n", - " wolf spider 50 0.7 0.92\n", - " tick 50 0.76 0.82\n", - " centipede 50 0.74 0.86\n", - " black grouse 50 0.88 0.98\n", - " ptarmigan 50 0.84 0.98\n", - " ruffed grouse 50 0.9 1\n", - " prairie grouse 50 0.9 0.96\n", - " peacock 50 0.9 0.9\n", - " quail 50 0.88 0.94\n", - " partridge 50 0.66 0.94\n", - " grey parrot 50 0.94 0.98\n", - " macaw 50 0.92 0.98\n", - "sulphur-crested cockatoo 50 0.94 0.98\n", - " lorikeet 50 0.98 1\n", - " coucal 50 0.9 0.92\n", - " bee eater 50 0.96 0.98\n", - " hornbill 50 0.86 0.98\n", - " hummingbird 50 0.9 0.98\n", - " jacamar 50 0.94 0.94\n", - " toucan 50 0.84 0.94\n", - " duck 50 0.78 0.94\n", - " red-breasted merganser 50 0.94 0.98\n", - " goose 50 0.76 0.98\n", - " black swan 50 0.94 1\n", - " tusker 50 0.58 0.92\n", - " echidna 50 1 1\n", - " platypus 50 0.72 0.84\n", - " wallaby 50 0.86 0.92\n", - " koala 50 0.84 0.98\n", - " wombat 50 0.82 0.86\n", - " jellyfish 50 0.94 0.96\n", - " sea anemone 50 0.66 0.98\n", - " brain coral 50 0.9 0.96\n", - " flatworm 50 0.76 1\n", - " nematode 50 0.9 0.92\n", - " conch 50 0.74 0.92\n", - " snail 50 0.78 0.86\n", - " slug 50 0.78 0.9\n", - " sea slug 50 0.94 0.98\n", - " chiton 50 0.86 0.96\n", - " chambered nautilus 50 0.86 0.94\n", - " Dungeness crab 50 0.86 0.96\n", - " rock crab 50 0.66 0.88\n", - " fiddler crab 50 0.64 0.88\n", - " red king crab 50 0.78 0.92\n", - " American lobster 50 0.78 0.96\n", - " spiny lobster 50 0.78 0.88\n", - " crayfish 50 0.56 0.84\n", - " hermit crab 50 0.82 0.96\n", - " isopod 50 0.62 0.74\n", - " white stork 50 0.88 0.94\n", - " black stork 50 0.86 0.96\n", - " spoonbill 50 0.96 1\n", - " flamingo 50 0.94 1\n", - " little blue heron 50 0.92 0.98\n", - " great egret 50 0.9 0.98\n", - " bittern 50 0.9 0.92\n", - " crane (bird) 50 0.64 0.94\n", - " limpkin 50 0.96 0.98\n", - " common gallinule 50 0.96 0.96\n", - " American coot 50 0.94 1\n", - " bustard 50 0.96 0.98\n", - " ruddy turnstone 50 0.96 1\n", - " dunlin 50 0.86 0.94\n", - " common redshank 50 0.92 0.96\n", - " dowitcher 50 0.9 1\n", - " oystercatcher 50 0.9 0.96\n", - " pelican 50 0.96 1\n", - " king penguin 50 0.88 0.92\n", - " albatross 50 0.9 0.98\n", - " grey whale 50 0.86 0.94\n", - " killer whale 50 0.9 0.98\n", - " dugong 50 0.88 0.94\n", - " sea lion 50 0.78 0.98\n", - " Chihuahua 50 0.56 0.82\n", - " Japanese Chin 50 0.7 0.98\n", - " Maltese 50 0.86 0.94\n", - " Pekingese 50 0.84 0.94\n", - " Shih Tzu 50 0.68 0.94\n", - " King Charles Spaniel 50 0.92 0.98\n", - " Papillon 50 0.92 0.94\n", - " toy terrier 50 0.48 0.96\n", - " Rhodesian Ridgeback 50 0.76 0.94\n" - ] + "cell_type": "markdown", + "metadata": { + "id": "hkAzDWJ7cWTr" + }, + "source": [ + "        \n", + "" + ] }, { - "name": "stdout", - "output_type": "stream", - "text": [ - " Afghan Hound 50 0.9 0.98\n", - " Basset Hound 50 0.78 0.9\n", - " Beagle 50 0.82 0.98\n", - " Bloodhound 50 0.5 0.78\n", - " Bluetick Coonhound 50 0.84 0.94\n", - " Black and Tan Coonhound 50 0.46 0.8\n", - "Treeing Walker Coonhound 50 0.58 0.98\n", - " English foxhound 50 0.24 0.8\n", - " Redbone Coonhound 50 0.66 0.92\n", - " borzoi 50 0.94 1\n", - " Irish Wolfhound 50 0.64 0.9\n", - " Italian Greyhound 50 0.8 0.98\n", - " Whippet 50 0.82 0.98\n", - " Ibizan Hound 50 0.64 0.92\n", - " Norwegian Elkhound 50 0.88 1\n", - " Otterhound 50 0.58 0.9\n", - " Saluki 50 0.72 0.92\n", - " Scottish Deerhound 50 0.86 1\n", - " Weimaraner 50 0.88 0.96\n", - "Staffordshire Bull Terrier 50 0.62 0.92\n", - "American Staffordshire Terrier 50 0.66 0.92\n", - " Bedlington Terrier 50 0.82 0.96\n", - " Border Terrier 50 0.9 0.98\n", - " Kerry Blue Terrier 50 0.82 1\n", - " Irish Terrier 50 0.74 0.94\n", - " Norfolk Terrier 50 0.74 0.92\n", - " Norwich Terrier 50 0.68 0.98\n", - " Yorkshire Terrier 50 0.66 0.88\n", - " Wire Fox Terrier 50 0.66 0.96\n", - " Lakeland Terrier 50 0.82 0.94\n", - " Sealyham Terrier 50 0.74 0.9\n", - " Airedale Terrier 50 0.82 0.9\n", - " Cairn Terrier 50 0.82 0.94\n", - " Australian Terrier 50 0.48 0.84\n", - " Dandie Dinmont Terrier 50 0.84 0.9\n", - " Boston Terrier 50 0.88 1\n", - " Miniature Schnauzer 50 0.7 0.92\n", - " Giant Schnauzer 50 0.82 1\n", - " Standard Schnauzer 50 0.72 0.98\n", - " Scottish Terrier 50 0.78 0.94\n", - " Tibetan Terrier 50 0.64 0.98\n", - "Australian Silky Terrier 50 0.72 0.96\n", - "Soft-coated Wheaten Terrier 50 0.86 0.98\n", - "West Highland White Terrier 50 0.94 0.98\n", - " Lhasa Apso 50 0.66 0.96\n", - " Flat-Coated Retriever 50 0.78 1\n", - " Curly-coated Retriever 50 0.84 0.96\n", - " Golden Retriever 50 0.88 0.96\n", - " Labrador Retriever 50 0.82 0.94\n", - "Chesapeake Bay Retriever 50 0.86 0.98\n", - "German Shorthaired Pointer 50 0.84 0.96\n", - " Vizsla 50 0.7 0.94\n", - " English Setter 50 0.8 1\n", - " Irish Setter 50 0.78 0.9\n", - " Gordon Setter 50 0.84 0.92\n", - " Brittany 50 0.86 0.98\n", - " Clumber Spaniel 50 0.9 0.96\n", - "English Springer Spaniel 50 0.96 1\n", - " Welsh Springer Spaniel 50 0.92 1\n", - " Cocker Spaniels 50 0.7 0.96\n", - " Sussex Spaniel 50 0.7 0.88\n", - " Irish Water Spaniel 50 0.86 0.94\n", - " Kuvasz 50 0.7 0.92\n", - " Schipperke 50 0.94 0.98\n", - " Groenendael 50 0.78 0.92\n", - " Malinois 50 0.92 0.98\n", - " Briard 50 0.6 0.84\n", - " Australian Kelpie 50 0.74 0.96\n", - " Komondor 50 0.9 0.96\n", - " Old English Sheepdog 50 0.94 0.98\n", - " Shetland Sheepdog 50 0.72 0.94\n", - " collie 50 0.6 0.96\n", - " Border Collie 50 0.82 0.96\n", - " Bouvier des Flandres 50 0.78 0.96\n", - " Rottweiler 50 0.94 0.98\n", - " German Shepherd Dog 50 0.76 0.98\n", - " Dobermann 50 0.74 1\n", - " Miniature Pinscher 50 0.76 0.96\n", - "Greater Swiss Mountain Dog 50 0.66 0.94\n", - " Bernese Mountain Dog 50 0.94 1\n", - " Appenzeller Sennenhund 50 0.3 1\n", - " Entlebucher Sennenhund 50 0.72 0.98\n", - " Boxer 50 0.7 0.92\n", - " Bullmastiff 50 0.8 0.98\n", - " Tibetan Mastiff 50 0.92 0.98\n", - " French Bulldog 50 0.86 0.98\n", - " Great Dane 50 0.6 0.92\n", - " St. Bernard 50 0.94 1\n", - " husky 50 0.5 0.94\n", - " Alaskan Malamute 50 0.76 0.96\n", - " Siberian Husky 50 0.56 0.98\n", - " Dalmatian 50 0.94 0.98\n", - " Affenpinscher 50 0.76 0.92\n", - " Basenji 50 0.9 1\n", - " pug 50 0.96 0.98\n", - " Leonberger 50 0.98 1\n", - " Newfoundland 50 0.82 0.96\n", - " Pyrenean Mountain Dog 50 0.76 0.94\n", - " Samoyed 50 0.9 0.98\n", - " Pomeranian 50 0.96 1\n", - " Chow Chow 50 0.88 0.96\n", - " Keeshond 50 0.94 1\n", - " Griffon Bruxellois 50 0.92 0.98\n", - " Pembroke Welsh Corgi 50 0.9 0.98\n", - " Cardigan Welsh Corgi 50 0.7 0.94\n", - " Toy Poodle 50 0.52 0.96\n", - " Miniature Poodle 50 0.56 0.92\n", - " Standard Poodle 50 0.78 0.96\n", - " Mexican hairless dog 50 0.86 0.98\n", - " grey wolf 50 0.74 0.92\n", - " Alaskan tundra wolf 50 0.86 0.98\n", - " red wolf 50 0.54 0.92\n", - " coyote 50 0.62 0.82\n", - " dingo 50 0.76 0.94\n", - " dhole 50 0.9 0.96\n", - " African wild dog 50 1 1\n", - " hyena 50 0.9 0.94\n", - " red fox 50 0.62 0.92\n", - " kit fox 50 0.7 0.98\n", - " Arctic fox 50 0.92 0.98\n", - " grey fox 50 0.66 0.96\n", - " tabby cat 50 0.58 0.92\n", - " tiger cat 50 0.2 0.94\n", - " Persian cat 50 0.92 1\n", - " Siamese cat 50 0.94 0.98\n", - " Egyptian Mau 50 0.52 0.84\n", - " cougar 50 0.94 0.96\n", - " lynx 50 0.74 0.9\n", - " leopard 50 0.86 1\n", - " snow leopard 50 0.9 0.98\n", - " jaguar 50 0.72 0.92\n", - " lion 50 0.9 0.98\n", - " tiger 50 0.96 0.98\n", - " cheetah 50 0.94 0.98\n", - " brown bear 50 0.9 0.98\n", - " American black bear 50 0.9 0.98\n", - " polar bear 50 0.86 0.94\n", - " sloth bear 50 0.72 0.92\n", - " mongoose 50 0.7 0.86\n", - " meerkat 50 0.82 0.98\n", - " tiger beetle 50 0.9 0.94\n", - " ladybug 50 0.78 0.98\n", - " ground beetle 50 0.62 0.94\n", - " longhorn beetle 50 0.58 0.9\n", - " leaf beetle 50 0.66 0.98\n", - " dung beetle 50 0.88 0.98\n", - " rhinoceros beetle 50 0.88 1\n", - " weevil 50 0.92 1\n", - " fly 50 0.78 0.94\n", - " bee 50 0.8 0.96\n", - " ant 50 0.68 0.84\n", - " grasshopper 50 0.48 0.9\n", - " cricket 50 0.66 0.94\n", - " stick insect 50 0.7 0.94\n", - " cockroach 50 0.72 0.84\n", - " mantis 50 0.72 0.9\n", - " cicada 50 0.9 0.96\n", - " leafhopper 50 0.9 0.96\n", - " lacewing 50 0.8 0.94\n", - " dragonfly 50 0.76 0.98\n", - " damselfly 50 0.82 1\n", - " red admiral 50 0.96 0.96\n", - " ringlet 50 0.88 1\n", - " monarch butterfly 50 0.9 0.96\n", - " small white 50 0.88 1\n", - " sulphur butterfly 50 0.92 1\n", - "gossamer-winged butterfly 50 0.9 1\n", - " starfish 50 0.82 0.94\n", - " sea urchin 50 0.84 0.98\n", - " sea cucumber 50 0.76 0.92\n", - " cottontail rabbit 50 0.7 0.98\n", - " hare 50 0.9 1\n", - " Angora rabbit 50 0.92 0.98\n", - " hamster 50 1 1\n", - " porcupine 50 0.9 0.98\n", - " fox squirrel 50 0.82 0.96\n", - " marmot 50 0.94 0.96\n", - " beaver 50 0.78 0.96\n", - " guinea pig 50 0.78 0.92\n", - " common sorrel 50 0.98 0.98\n", - " zebra 50 0.96 0.98\n", - " pig 50 0.54 0.82\n", - " wild boar 50 0.86 0.96\n", - " warthog 50 0.96 0.96\n", - " hippopotamus 50 0.9 1\n" - ] + "cell_type": "markdown", + "metadata": { + "id": "0eq1SMWl6Sfn" + }, + "source": [ + "# 2. Validate\n", + "Validate a model's accuracy on the [Imagenet](https://image-net.org/) dataset's `val` or `test` splits. Models are downloaded automatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases). To show results by class use the `--verbose` flag." + ] }, { - "name": "stdout", - "output_type": "stream", - "text": [ - " ox 50 0.52 0.94\n", - " water buffalo 50 0.86 0.94\n", - " bison 50 0.9 0.98\n", - " ram 50 0.62 0.98\n", - " bighorn sheep 50 0.72 1\n", - " Alpine ibex 50 0.96 0.98\n", - " hartebeest 50 0.94 1\n", - " impala 50 0.86 0.98\n", - " gazelle 50 0.74 0.96\n", - " dromedary 50 0.94 1\n", - " llama 50 0.86 0.94\n", - " weasel 50 0.42 0.96\n", - " mink 50 0.78 0.92\n", - " European polecat 50 0.54 0.88\n", - " black-footed ferret 50 0.74 0.96\n", - " otter 50 0.68 0.9\n", - " skunk 50 0.94 0.96\n", - " badger 50 0.88 0.92\n", - " armadillo 50 0.88 0.96\n", - " three-toed sloth 50 0.96 1\n", - " orangutan 50 0.82 0.9\n", - " gorilla 50 0.78 0.94\n", - " chimpanzee 50 0.86 0.94\n", - " gibbon 50 0.74 0.9\n", - " siamang 50 0.68 0.94\n", - " guenon 50 0.82 0.96\n", - " patas monkey 50 0.66 0.86\n", - " baboon 50 0.88 0.96\n", - " macaque 50 0.72 0.84\n", - " langur 50 0.56 0.78\n", - " black-and-white colobus 50 0.84 0.92\n", - " proboscis monkey 50 0.98 1\n", - " marmoset 50 0.7 0.92\n", - " white-headed capuchin 50 0.82 0.94\n", - " howler monkey 50 0.9 0.96\n", - " titi 50 0.54 0.9\n", - "Geoffroy's spider monkey 50 0.36 0.86\n", - " common squirrel monkey 50 0.76 0.92\n", - " ring-tailed lemur 50 0.7 0.94\n", - " indri 50 0.86 0.98\n", - " Asian elephant 50 0.54 0.96\n", - " African bush elephant 50 0.62 0.96\n", - " red panda 50 0.94 0.94\n", - " giant panda 50 0.92 0.98\n", - " snoek 50 0.76 0.9\n", - " eel 50 0.58 0.86\n", - " coho salmon 50 0.8 0.98\n", - " rock beauty 50 0.8 0.96\n", - " clownfish 50 0.8 0.98\n", - " sturgeon 50 0.76 0.96\n", - " garfish 50 0.7 0.82\n", - " lionfish 50 0.94 0.98\n", - " pufferfish 50 0.86 0.98\n", - " abacus 50 0.8 0.88\n", - " abaya 50 0.72 0.94\n", - " academic gown 50 0.44 0.94\n", - " accordion 50 0.78 0.96\n", - " acoustic guitar 50 0.54 0.78\n", - " aircraft carrier 50 0.7 0.98\n", - " airliner 50 0.92 1\n", - " airship 50 0.8 0.88\n", - " altar 50 0.6 0.94\n", - " ambulance 50 0.84 0.98\n", - " amphibious vehicle 50 0.68 0.9\n", - " analog clock 50 0.5 0.88\n", - " apiary 50 0.9 1\n", - " apron 50 0.68 0.86\n", - " waste container 50 0.6 0.86\n", - " assault rifle 50 0.36 0.9\n", - " backpack 50 0.36 0.72\n", - " bakery 50 0.38 0.64\n", - " balance beam 50 0.84 0.98\n", - " balloon 50 0.88 0.96\n", - " ballpoint pen 50 0.52 0.96\n", - " Band-Aid 50 0.68 0.96\n", - " banjo 50 0.9 1\n", - " baluster 50 0.74 0.94\n", - " barbell 50 0.58 0.9\n", - " barber chair 50 0.72 0.9\n", - " barbershop 50 0.64 0.9\n", - " barn 50 0.96 0.96\n", - " barometer 50 0.86 0.96\n", - " barrel 50 0.64 0.86\n", - " wheelbarrow 50 0.64 0.92\n", - " baseball 50 0.76 0.96\n", - " basketball 50 0.88 0.98\n", - " bassinet 50 0.8 0.94\n", - " bassoon 50 0.84 0.98\n", - " swimming cap 50 0.7 0.88\n", - " bath towel 50 0.56 0.84\n", - " bathtub 50 0.34 0.86\n", - " station wagon 50 0.68 0.9\n", - " lighthouse 50 0.74 0.96\n", - " beaker 50 0.46 0.7\n", - " military cap 50 0.88 0.98\n", - " beer bottle 50 0.72 0.9\n", - " beer glass 50 0.72 0.9\n", - " bell-cot 50 0.6 0.96\n", - " bib 50 0.58 0.86\n", - " tandem bicycle 50 0.76 0.96\n", - " bikini 50 0.52 0.88\n", - " ring binder 50 0.7 0.86\n", - " binoculars 50 0.54 0.78\n", - " birdhouse 50 0.86 0.96\n", - " boathouse 50 0.78 0.96\n", - " bobsleigh 50 0.94 0.96\n", - " bolo tie 50 0.86 0.88\n", - " poke bonnet 50 0.68 0.88\n", - " bookcase 50 0.68 0.92\n", - " bookstore 50 0.58 0.88\n", - " bottle cap 50 0.62 0.8\n", - " bow 50 0.74 0.84\n", - " bow tie 50 0.68 0.92\n", - " brass 50 0.92 0.98\n", - " bra 50 0.52 0.76\n", - " breakwater 50 0.64 0.94\n", - " breastplate 50 0.36 0.9\n", - " broom 50 0.58 0.84\n", - " bucket 50 0.58 0.88\n", - " buckle 50 0.5 0.76\n", - " bulletproof vest 50 0.52 0.76\n", - " high-speed train 50 0.94 0.98\n", - " butcher shop 50 0.76 0.94\n", - " taxicab 50 0.7 0.92\n", - " cauldron 50 0.5 0.72\n", - " candle 50 0.5 0.76\n", - " cannon 50 0.88 0.96\n", - " canoe 50 0.94 1\n", - " can opener 50 0.72 0.88\n", - " cardigan 50 0.66 0.88\n", - " car mirror 50 0.94 0.98\n", - " carousel 50 0.96 0.96\n", - " tool kit 50 0.68 0.84\n", - " carton 50 0.44 0.78\n", - " car wheel 50 0.4 0.78\n", - "automated teller machine 50 0.82 0.94\n", - " cassette 50 0.62 0.84\n", - " cassette player 50 0.3 0.92\n", - " castle 50 0.74 0.9\n", - " catamaran 50 0.74 0.98\n", - " CD player 50 0.52 0.8\n", - " cello 50 0.84 1\n", - " mobile phone 50 0.72 0.86\n", - " chain 50 0.34 0.78\n", - " chain-link fence 50 0.7 0.86\n", - " chain mail 50 0.68 0.86\n", - " chainsaw 50 0.88 0.96\n", - " chest 50 0.7 0.88\n", - " chiffonier 50 0.32 0.64\n", - " chime 50 0.64 0.84\n", - " china cabinet 50 0.78 0.94\n", - " Christmas stocking 50 0.92 0.98\n", - " church 50 0.6 0.86\n", - " movie theater 50 0.68 0.9\n", - " cleaver 50 0.36 0.68\n", - " cliff dwelling 50 0.86 1\n", - " cloak 50 0.28 0.7\n", - " clogs 50 0.6 0.88\n", - " cocktail shaker 50 0.62 0.76\n", - " coffee mug 50 0.48 0.78\n", - " coffeemaker 50 0.62 0.92\n", - " coil 50 0.64 0.86\n", - " combination lock 50 0.62 0.92\n", - " computer keyboard 50 0.72 0.92\n", - " confectionery store 50 0.56 0.84\n", - " container ship 50 0.82 0.98\n", - " convertible 50 0.78 1\n", - " corkscrew 50 0.84 0.98\n", - " cornet 50 0.56 0.98\n", - " cowboy boot 50 0.66 0.78\n", - " cowboy hat 50 0.66 0.88\n", - " cradle 50 0.34 0.8\n", - " crane (machine) 50 0.8 0.92\n", - " crash helmet 50 0.92 0.96\n", - " crate 50 0.6 0.86\n", - " infant bed 50 0.8 0.96\n", - " Crock Pot 50 0.78 0.88\n", - " croquet ball 50 0.9 1\n", - " crutch 50 0.42 0.7\n", - " cuirass 50 0.54 0.92\n", - " dam 50 0.78 0.92\n", - " desk 50 0.68 0.88\n", - " desktop computer 50 0.54 0.9\n", - " rotary dial telephone 50 0.92 0.96\n", - " diaper 50 0.68 0.84\n", - " digital clock 50 0.6 0.8\n", - " digital watch 50 0.56 0.82\n" - ] + "cell_type": "code", + "execution_count": 3, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "WQPtK1QYVaD_", + "outputId": "92de5f34-cf41-49e7-b679-41db94e995ac" + }, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "--2022-11-18 21:48:38-- https://image-net.org/data/ILSVRC/2012/ILSVRC2012_img_val.tar\n", + "Resolving image-net.org (image-net.org)... 171.64.68.16\n", + "Connecting to image-net.org (image-net.org)|171.64.68.16|:443... connected.\n", + "HTTP request sent, awaiting response... 200 OK\n", + "Length: 6744924160 (6.3G) [application/x-tar]\n", + "Saving to: ‘ILSVRC2012_img_val.tar’\n", + "\n", + "ILSVRC2012_img_val. 100%[===================>] 6.28G 7.15MB/s in 11m 13s \n", + "\n", + "2022-11-18 21:59:52 (9.55 MB/s) - ‘ILSVRC2012_img_val.tar’ saved [6744924160/6744924160]\n", + "\n" + ] + } + ], + "source": [ + "# Download Imagenet val (6.3G, 50000 images)\n", + "!bash data/scripts/get_imagenet.sh --val" + ] }, { - "name": "stdout", - "output_type": "stream", - "text": [ - " dining table 50 0.78 0.88\n", - " dishcloth 50 0.98 1\n", - " dishwasher 50 0.52 0.74\n", - " disc brake 50 0.96 1\n", - " dock 50 0.56 0.96\n", - " dog sled 50 0.9 0.98\n", - " dome 50 0.74 0.96\n", - " doormat 50 0.6 0.82\n", - " drilling rig 50 0.82 0.94\n", - " drum 50 0.4 0.72\n", - " drumstick 50 0.56 0.82\n", - " dumbbell 50 0.6 0.92\n", - " Dutch oven 50 0.66 0.88\n", - " electric fan 50 0.82 0.84\n", - " electric guitar 50 0.66 0.92\n", - " electric locomotive 50 0.92 0.98\n", - " entertainment center 50 0.92 1\n", - " envelope 50 0.58 0.88\n", - " espresso machine 50 0.72 0.94\n", - " face powder 50 0.76 0.92\n", - " feather boa 50 0.8 0.88\n", - " filing cabinet 50 0.84 0.98\n", - " fireboat 50 0.96 0.96\n", - " fire engine 50 0.82 0.92\n", - " fire screen sheet 50 0.52 0.78\n", - " flagpole 50 0.76 0.92\n", - " flute 50 0.4 0.76\n", - " folding chair 50 0.68 0.9\n", - " football helmet 50 0.9 0.96\n", - " forklift 50 0.8 0.94\n", - " fountain 50 0.88 0.92\n", - " fountain pen 50 0.76 0.92\n", - " four-poster bed 50 0.82 0.92\n", - " freight car 50 0.98 0.98\n", - " French horn 50 0.76 0.92\n", - " frying pan 50 0.48 0.82\n", - " fur coat 50 0.86 0.96\n", - " garbage truck 50 0.9 0.98\n", - " gas mask 50 0.82 0.92\n", - " gas pump 50 0.82 0.98\n", - " goblet 50 0.64 0.9\n", - " go-kart 50 0.9 1\n", - " golf ball 50 0.86 0.96\n", - " golf cart 50 0.76 0.9\n", - " gondola 50 0.94 0.98\n", - " gong 50 0.74 0.92\n", - " gown 50 0.72 0.94\n", - " grand piano 50 0.74 0.96\n", - " greenhouse 50 0.84 1\n", - " grille 50 0.72 0.88\n", - " grocery store 50 0.68 0.9\n", - " guillotine 50 0.84 0.94\n", - " barrette 50 0.48 0.68\n", - " hair spray 50 0.4 0.76\n", - " half-track 50 0.76 0.96\n", - " hammer 50 0.54 0.78\n", - " hamper 50 0.72 0.9\n", - " hair dryer 50 0.7 0.8\n", - " hand-held computer 50 0.52 0.88\n", - " handkerchief 50 0.8 0.96\n", - " hard disk drive 50 0.78 0.86\n", - " harmonica 50 0.68 0.96\n", - " harp 50 0.9 0.96\n", - " harvester 50 0.86 1\n", - " hatchet 50 0.6 0.84\n", - " holster 50 0.7 0.84\n", - " home theater 50 0.72 0.96\n", - " honeycomb 50 0.74 0.86\n", - " hook 50 0.28 0.62\n", - " hoop skirt 50 0.68 0.8\n", - " horizontal bar 50 0.76 0.98\n", - " horse-drawn vehicle 50 0.9 0.9\n", - " hourglass 50 0.92 0.98\n", - " iPod 50 0.9 0.94\n", - " clothes iron 50 0.72 0.9\n", - " jack-o'-lantern 50 0.94 0.98\n", - " jeans 50 0.7 0.82\n", - " jeep 50 0.76 0.9\n", - " T-shirt 50 0.72 0.94\n", - " jigsaw puzzle 50 0.92 0.96\n", - " pulled rickshaw 50 0.88 0.96\n", - " joystick 50 0.74 0.98\n", - " kimono 50 0.78 0.94\n", - " knee pad 50 0.7 0.86\n", - " knot 50 0.8 0.86\n", - " lab coat 50 0.82 0.98\n", - " ladle 50 0.26 0.64\n", - " lampshade 50 0.62 0.8\n", - " laptop computer 50 0.2 0.88\n", - " lawn mower 50 0.8 0.96\n", - " lens cap 50 0.5 0.8\n", - " paper knife 50 0.3 0.58\n", - " library 50 0.62 0.92\n", - " lifeboat 50 0.94 0.98\n", - " lighter 50 0.56 0.8\n", - " limousine 50 0.74 0.92\n", - " ocean liner 50 0.88 0.96\n", - " lipstick 50 0.7 0.88\n", - " slip-on shoe 50 0.82 0.94\n", - " lotion 50 0.56 0.9\n", - " speaker 50 0.58 0.64\n", - " loupe 50 0.32 0.54\n", - " sawmill 50 0.74 0.9\n", - " magnetic compass 50 0.48 0.78\n", - " mail bag 50 0.64 0.94\n", - " mailbox 50 0.82 0.92\n", - " tights 50 0.28 0.9\n", - " tank suit 50 0.3 0.88\n", - " manhole cover 50 0.94 0.98\n", - " maraca 50 0.72 0.86\n", - " marimba 50 0.84 0.94\n", - " mask 50 0.48 0.78\n", - " match 50 0.74 0.92\n", - " maypole 50 0.96 1\n", - " maze 50 0.82 1\n", - " measuring cup 50 0.66 0.82\n", - " medicine chest 50 0.6 0.9\n", - " megalith 50 0.84 0.92\n", - " microphone 50 0.56 0.74\n", - " microwave oven 50 0.56 0.8\n", - " military uniform 50 0.62 0.86\n", - " milk can 50 0.7 0.82\n", - " minibus 50 0.68 1\n", - " miniskirt 50 0.58 0.84\n", - " minivan 50 0.48 0.8\n", - " missile 50 0.34 0.82\n", - " mitten 50 0.76 0.88\n", - " mixing bowl 50 0.82 0.98\n", - " mobile home 50 0.58 0.8\n", - " Model T 50 0.92 0.96\n", - " modem 50 0.7 0.9\n", - " monastery 50 0.52 0.86\n", - " monitor 50 0.34 0.86\n", - " moped 50 0.56 0.94\n", - " mortar 50 0.72 0.88\n", - " square academic cap 50 0.48 0.82\n", - " mosque 50 0.98 1\n", - " mosquito net 50 0.96 0.98\n", - " scooter 50 0.88 0.98\n", - " mountain bike 50 0.74 0.96\n", - " tent 50 0.88 0.96\n", - " computer mouse 50 0.38 0.82\n", - " mousetrap 50 0.82 0.9\n", - " moving van 50 0.48 0.8\n", - " muzzle 50 0.5 0.74\n", - " nail 50 0.68 0.76\n", - " neck brace 50 0.62 0.72\n", - " necklace 50 0.92 1\n", - " nipple 50 0.8 0.92\n", - " notebook computer 50 0.34 0.88\n", - " obelisk 50 0.82 0.94\n", - " oboe 50 0.62 0.84\n", - " ocarina 50 0.82 0.88\n", - " odometer 50 0.98 1\n", - " oil filter 50 0.6 0.82\n", - " organ 50 0.84 0.94\n", - " oscilloscope 50 0.94 0.96\n", - " overskirt 50 0.2 0.62\n", - " bullock cart 50 0.76 0.94\n", - " oxygen mask 50 0.48 0.8\n", - " packet 50 0.54 0.74\n", - " paddle 50 0.7 0.94\n", - " paddle wheel 50 0.92 0.98\n", - " padlock 50 0.64 0.78\n", - " paintbrush 50 0.66 0.78\n", - " pajamas 50 0.68 0.94\n", - " palace 50 0.66 0.94\n", - " pan flute 50 0.84 0.86\n", - " paper towel 50 0.68 0.86\n", - " parachute 50 0.92 0.96\n", - " parallel bars 50 0.68 0.96\n", - " park bench 50 0.82 0.94\n", - " parking meter 50 0.86 0.98\n", - " passenger car 50 0.48 0.86\n" - ] + "cell_type": "code", + "execution_count": 4, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "X58w8JLpMnjH", + "outputId": "9961ad87-d639-4489-b578-0a0578fefaab" + }, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "\u001b[34m\u001b[1mclassify/val: \u001b[0mdata=../datasets/imagenet, weights=['yolov5s-cls.pt'], batch_size=128, imgsz=224, device=, workers=8, verbose=True, project=runs/val-cls, name=exp, exist_ok=False, half=True, dnn=False\n", + "YOLOv5 🚀 v6.2-258-g7fc7ed7 Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n", + "\n", + "Fusing layers... \n", + "Model summary: 117 layers, 5447688 parameters, 0 gradients, 11.4 GFLOPs\n", + "validating: 100% 391/391 [04:48<00:00, 1.35it/s]\n", + " Class Images top1_acc top5_acc\n", + " all 50000 0.715 0.902\n", + " tench 50 0.94 0.98\n", + " goldfish 50 0.88 0.92\n", + " great white shark 50 0.78 0.96\n", + " tiger shark 50 0.68 0.96\n", + " hammerhead shark 50 0.82 0.92\n", + " electric ray 50 0.76 0.9\n", + " stingray 50 0.7 0.9\n", + " cock 50 0.78 0.92\n", + " hen 50 0.84 0.96\n", + " ostrich 50 0.98 1\n", + " brambling 50 0.9 0.96\n", + " goldfinch 50 0.92 0.98\n", + " house finch 50 0.88 0.96\n", + " junco 50 0.94 0.98\n", + " indigo bunting 50 0.86 0.88\n", + " American robin 50 0.9 0.96\n", + " bulbul 50 0.84 0.96\n", + " jay 50 0.9 0.96\n", + " magpie 50 0.84 0.96\n", + " chickadee 50 0.9 1\n", + " American dipper 50 0.82 0.92\n", + " kite 50 0.76 0.94\n", + " bald eagle 50 0.92 1\n", + " vulture 50 0.96 1\n", + " great grey owl 50 0.94 0.98\n", + " fire salamander 50 0.96 0.98\n", + " smooth newt 50 0.58 0.94\n", + " newt 50 0.74 0.9\n", + " spotted salamander 50 0.86 0.94\n", + " axolotl 50 0.86 0.96\n", + " American bullfrog 50 0.78 0.92\n", + " tree frog 50 0.84 0.96\n", + " tailed frog 50 0.48 0.8\n", + " loggerhead sea turtle 50 0.68 0.94\n", + " leatherback sea turtle 50 0.5 0.8\n", + " mud turtle 50 0.64 0.84\n", + " terrapin 50 0.52 0.98\n", + " box turtle 50 0.84 0.98\n", + " banded gecko 50 0.7 0.88\n", + " green iguana 50 0.76 0.94\n", + " Carolina anole 50 0.58 0.96\n", + "desert grassland whiptail lizard 50 0.82 0.94\n", + " agama 50 0.74 0.92\n", + " frilled-necked lizard 50 0.84 0.86\n", + " alligator lizard 50 0.58 0.78\n", + " Gila monster 50 0.72 0.8\n", + " European green lizard 50 0.42 0.9\n", + " chameleon 50 0.76 0.84\n", + " Komodo dragon 50 0.86 0.96\n", + " Nile crocodile 50 0.7 0.84\n", + " American alligator 50 0.76 0.96\n", + " triceratops 50 0.9 0.94\n", + " worm snake 50 0.76 0.88\n", + " ring-necked snake 50 0.8 0.92\n", + " eastern hog-nosed snake 50 0.58 0.88\n", + " smooth green snake 50 0.6 0.94\n", + " kingsnake 50 0.82 0.9\n", + " garter snake 50 0.88 0.94\n", + " water snake 50 0.7 0.94\n", + " vine snake 50 0.66 0.76\n", + " night snake 50 0.34 0.82\n", + " boa constrictor 50 0.8 0.96\n", + " African rock python 50 0.48 0.76\n", + " Indian cobra 50 0.82 0.94\n", + " green mamba 50 0.54 0.86\n", + " sea snake 50 0.62 0.9\n", + " Saharan horned viper 50 0.56 0.86\n", + "eastern diamondback rattlesnake 50 0.6 0.86\n", + " sidewinder 50 0.28 0.86\n", + " trilobite 50 0.98 0.98\n", + " harvestman 50 0.86 0.94\n", + " scorpion 50 0.86 0.94\n", + " yellow garden spider 50 0.92 0.96\n", + " barn spider 50 0.38 0.98\n", + " European garden spider 50 0.62 0.98\n", + " southern black widow 50 0.88 0.94\n", + " tarantula 50 0.94 1\n", + " wolf spider 50 0.82 0.92\n", + " tick 50 0.74 0.84\n", + " centipede 50 0.68 0.82\n", + " black grouse 50 0.88 0.98\n", + " ptarmigan 50 0.78 0.94\n", + " ruffed grouse 50 0.88 1\n", + " prairie grouse 50 0.92 1\n", + " peacock 50 0.88 0.9\n", + " quail 50 0.9 0.94\n", + " partridge 50 0.74 0.96\n", + " grey parrot 50 0.9 0.96\n", + " macaw 50 0.88 0.98\n", + "sulphur-crested cockatoo 50 0.86 0.92\n", + " lorikeet 50 0.96 1\n", + " coucal 50 0.82 0.88\n", + " bee eater 50 0.96 0.98\n", + " hornbill 50 0.9 0.96\n", + " hummingbird 50 0.88 0.96\n", + " jacamar 50 0.92 0.94\n", + " toucan 50 0.84 0.94\n", + " duck 50 0.76 0.94\n", + " red-breasted merganser 50 0.86 0.96\n", + " goose 50 0.74 0.96\n", + " black swan 50 0.94 0.98\n", + " tusker 50 0.54 0.92\n", + " echidna 50 0.98 1\n", + " platypus 50 0.72 0.84\n", + " wallaby 50 0.78 0.88\n", + " koala 50 0.84 0.92\n", + " wombat 50 0.78 0.84\n", + " jellyfish 50 0.88 0.96\n", + " sea anemone 50 0.72 0.9\n", + " brain coral 50 0.88 0.96\n", + " flatworm 50 0.8 0.98\n", + " nematode 50 0.86 0.9\n", + " conch 50 0.74 0.88\n", + " snail 50 0.78 0.88\n", + " slug 50 0.74 0.82\n", + " sea slug 50 0.88 0.98\n", + " chiton 50 0.88 0.98\n", + " chambered nautilus 50 0.88 0.92\n", + " Dungeness crab 50 0.78 0.94\n", + " rock crab 50 0.68 0.86\n", + " fiddler crab 50 0.64 0.86\n", + " red king crab 50 0.76 0.96\n", + " American lobster 50 0.78 0.96\n", + " spiny lobster 50 0.74 0.88\n", + " crayfish 50 0.56 0.86\n", + " hermit crab 50 0.78 0.96\n", + " isopod 50 0.66 0.78\n", + " white stork 50 0.88 0.96\n", + " black stork 50 0.84 0.98\n", + " spoonbill 50 0.96 1\n", + " flamingo 50 0.94 1\n", + " little blue heron 50 0.92 0.98\n", + " great egret 50 0.9 0.96\n", + " bittern 50 0.86 0.94\n", + " crane (bird) 50 0.62 0.9\n", + " limpkin 50 0.98 1\n", + " common gallinule 50 0.92 0.96\n", + " American coot 50 0.9 0.98\n", + " bustard 50 0.92 0.96\n", + " ruddy turnstone 50 0.94 1\n", + " dunlin 50 0.86 0.94\n", + " common redshank 50 0.9 0.96\n", + " dowitcher 50 0.84 0.96\n", + " oystercatcher 50 0.86 0.94\n", + " pelican 50 0.92 0.96\n", + " king penguin 50 0.88 0.96\n", + " albatross 50 0.9 1\n", + " grey whale 50 0.84 0.92\n", + " killer whale 50 0.92 1\n", + " dugong 50 0.84 0.96\n", + " sea lion 50 0.82 0.92\n", + " Chihuahua 50 0.66 0.84\n", + " Japanese Chin 50 0.72 0.98\n", + " Maltese 50 0.76 0.94\n", + " Pekingese 50 0.84 0.94\n", + " Shih Tzu 50 0.74 0.96\n", + " King Charles Spaniel 50 0.88 0.98\n", + " Papillon 50 0.86 0.94\n", + " toy terrier 50 0.48 0.94\n", + " Rhodesian Ridgeback 50 0.76 0.98\n", + " Afghan Hound 50 0.84 1\n", + " Basset Hound 50 0.8 0.92\n", + " Beagle 50 0.82 0.96\n", + " Bloodhound 50 0.48 0.72\n", + " Bluetick Coonhound 50 0.86 0.94\n", + " Black and Tan Coonhound 50 0.54 0.8\n", + "Treeing Walker Coonhound 50 0.66 0.98\n", + " English foxhound 50 0.32 0.84\n", + " Redbone Coonhound 50 0.62 0.94\n", + " borzoi 50 0.92 1\n", + " Irish Wolfhound 50 0.48 0.88\n", + " Italian Greyhound 50 0.76 0.98\n", + " Whippet 50 0.74 0.92\n", + " Ibizan Hound 50 0.6 0.86\n", + " Norwegian Elkhound 50 0.88 0.98\n", + " Otterhound 50 0.62 0.9\n", + " Saluki 50 0.72 0.92\n", + " Scottish Deerhound 50 0.86 0.98\n", + " Weimaraner 50 0.88 0.94\n", + "Staffordshire Bull Terrier 50 0.66 0.98\n", + "American Staffordshire Terrier 50 0.64 0.92\n", + " Bedlington Terrier 50 0.9 0.92\n", + " Border Terrier 50 0.86 0.92\n", + " Kerry Blue Terrier 50 0.78 0.98\n", + " Irish Terrier 50 0.7 0.96\n", + " Norfolk Terrier 50 0.68 0.9\n", + " Norwich Terrier 50 0.72 1\n", + " Yorkshire Terrier 50 0.66 0.9\n", + " Wire Fox Terrier 50 0.64 0.98\n", + " Lakeland Terrier 50 0.74 0.92\n", + " Sealyham Terrier 50 0.76 0.9\n", + " Airedale Terrier 50 0.82 0.92\n", + " Cairn Terrier 50 0.76 0.9\n", + " Australian Terrier 50 0.48 0.84\n", + " Dandie Dinmont Terrier 50 0.82 0.92\n", + " Boston Terrier 50 0.92 1\n", + " Miniature Schnauzer 50 0.68 0.9\n", + " Giant Schnauzer 50 0.72 0.98\n", + " Standard Schnauzer 50 0.74 1\n", + " Scottish Terrier 50 0.76 0.96\n", + " Tibetan Terrier 50 0.48 1\n", + "Australian Silky Terrier 50 0.66 0.96\n", + "Soft-coated Wheaten Terrier 50 0.74 0.96\n", + "West Highland White Terrier 50 0.88 0.96\n", + " Lhasa Apso 50 0.68 0.96\n", + " Flat-Coated Retriever 50 0.72 0.94\n", + " Curly-coated Retriever 50 0.82 0.94\n", + " Golden Retriever 50 0.86 0.94\n", + " Labrador Retriever 50 0.82 0.94\n", + "Chesapeake Bay Retriever 50 0.76 0.96\n", + "German Shorthaired Pointer 50 0.8 0.96\n", + " Vizsla 50 0.68 0.96\n", + " English Setter 50 0.7 1\n", + " Irish Setter 50 0.8 0.9\n", + " Gordon Setter 50 0.84 0.92\n", + " Brittany 50 0.84 0.96\n", + " Clumber Spaniel 50 0.92 0.96\n", + "English Springer Spaniel 50 0.88 1\n", + " Welsh Springer Spaniel 50 0.92 1\n", + " Cocker Spaniels 50 0.7 0.94\n", + " Sussex Spaniel 50 0.72 0.92\n", + " Irish Water Spaniel 50 0.88 0.98\n", + " Kuvasz 50 0.66 0.9\n", + " Schipperke 50 0.9 0.98\n", + " Groenendael 50 0.8 0.94\n", + " Malinois 50 0.86 0.98\n", + " Briard 50 0.52 0.8\n", + " Australian Kelpie 50 0.6 0.88\n", + " Komondor 50 0.88 0.94\n", + " Old English Sheepdog 50 0.94 0.98\n", + " Shetland Sheepdog 50 0.74 0.9\n", + " collie 50 0.6 0.96\n", + " Border Collie 50 0.74 0.96\n", + " Bouvier des Flandres 50 0.78 0.94\n", + " Rottweiler 50 0.88 0.96\n", + " German Shepherd Dog 50 0.8 0.98\n", + " Dobermann 50 0.68 0.96\n", + " Miniature Pinscher 50 0.76 0.88\n", + "Greater Swiss Mountain Dog 50 0.68 0.94\n", + " Bernese Mountain Dog 50 0.96 1\n", + " Appenzeller Sennenhund 50 0.22 1\n", + " Entlebucher Sennenhund 50 0.64 0.98\n", + " Boxer 50 0.7 0.92\n", + " Bullmastiff 50 0.78 0.98\n", + " Tibetan Mastiff 50 0.88 0.96\n", + " French Bulldog 50 0.84 0.94\n", + " Great Dane 50 0.54 0.9\n", + " St. Bernard 50 0.92 1\n", + " husky 50 0.46 0.98\n", + " Alaskan Malamute 50 0.76 0.96\n", + " Siberian Husky 50 0.46 0.98\n", + " Dalmatian 50 0.94 0.98\n", + " Affenpinscher 50 0.78 0.9\n", + " Basenji 50 0.92 0.94\n", + " pug 50 0.94 0.98\n", + " Leonberger 50 1 1\n", + " Newfoundland 50 0.78 0.96\n", + " Pyrenean Mountain Dog 50 0.78 0.96\n", + " Samoyed 50 0.96 1\n", + " Pomeranian 50 0.98 1\n", + " Chow Chow 50 0.9 0.96\n", + " Keeshond 50 0.88 0.94\n", + " Griffon Bruxellois 50 0.84 0.98\n", + " Pembroke Welsh Corgi 50 0.82 0.94\n", + " Cardigan Welsh Corgi 50 0.66 0.98\n", + " Toy Poodle 50 0.52 0.88\n", + " Miniature Poodle 50 0.52 0.92\n", + " Standard Poodle 50 0.8 1\n", + " Mexican hairless dog 50 0.88 0.98\n", + " grey wolf 50 0.82 0.92\n", + " Alaskan tundra wolf 50 0.78 0.98\n", + " red wolf 50 0.48 0.9\n", + " coyote 50 0.64 0.86\n", + " dingo 50 0.76 0.88\n", + " dhole 50 0.9 0.98\n", + " African wild dog 50 0.98 1\n", + " hyena 50 0.88 0.96\n", + " red fox 50 0.54 0.92\n", + " kit fox 50 0.72 0.98\n", + " Arctic fox 50 0.94 1\n", + " grey fox 50 0.7 0.94\n", + " tabby cat 50 0.54 0.92\n", + " tiger cat 50 0.22 0.94\n", + " Persian cat 50 0.9 0.98\n", + " Siamese cat 50 0.96 1\n", + " Egyptian Mau 50 0.54 0.8\n", + " cougar 50 0.9 1\n", + " lynx 50 0.72 0.88\n", + " leopard 50 0.78 0.98\n", + " snow leopard 50 0.9 0.98\n", + " jaguar 50 0.7 0.94\n", + " lion 50 0.9 0.98\n", + " tiger 50 0.92 0.98\n", + " cheetah 50 0.94 0.98\n", + " brown bear 50 0.94 0.98\n", + " American black bear 50 0.8 1\n", + " polar bear 50 0.84 0.96\n", + " sloth bear 50 0.72 0.92\n", + " mongoose 50 0.7 0.92\n", + " meerkat 50 0.82 0.92\n", + " tiger beetle 50 0.92 0.94\n", + " ladybug 50 0.86 0.94\n", + " ground beetle 50 0.64 0.94\n", + " longhorn beetle 50 0.62 0.88\n", + " leaf beetle 50 0.64 0.98\n", + " dung beetle 50 0.86 0.98\n", + " rhinoceros beetle 50 0.86 0.94\n", + " weevil 50 0.9 1\n", + " fly 50 0.78 0.94\n", + " bee 50 0.68 0.94\n", + " ant 50 0.68 0.78\n", + " grasshopper 50 0.5 0.92\n", + " cricket 50 0.64 0.92\n", + " stick insect 50 0.64 0.92\n", + " cockroach 50 0.72 0.8\n", + " mantis 50 0.64 0.86\n", + " cicada 50 0.9 0.96\n", + " leafhopper 50 0.88 0.94\n", + " lacewing 50 0.78 0.92\n", + " dragonfly 50 0.82 0.98\n", + " damselfly 50 0.82 1\n", + " red admiral 50 0.94 0.96\n", + " ringlet 50 0.86 0.98\n", + " monarch butterfly 50 0.9 0.92\n", + " small white 50 0.9 1\n", + " sulphur butterfly 50 0.92 1\n", + "gossamer-winged butterfly 50 0.88 1\n", + " starfish 50 0.88 0.92\n", + " sea urchin 50 0.84 0.94\n", + " sea cucumber 50 0.66 0.84\n", + " cottontail rabbit 50 0.72 0.94\n", + " hare 50 0.84 0.96\n", + " Angora rabbit 50 0.94 0.98\n", + " hamster 50 0.96 1\n", + " porcupine 50 0.88 0.98\n", + " fox squirrel 50 0.76 0.94\n", + " marmot 50 0.92 0.96\n", + " beaver 50 0.78 0.94\n", + " guinea pig 50 0.78 0.94\n", + " common sorrel 50 0.96 0.98\n", + " zebra 50 0.94 0.96\n", + " pig 50 0.5 0.76\n", + " wild boar 50 0.84 0.96\n", + " warthog 50 0.84 0.96\n", + " hippopotamus 50 0.88 0.96\n", + " ox 50 0.48 0.94\n", + " water buffalo 50 0.78 0.94\n", + " bison 50 0.88 0.96\n", + " ram 50 0.58 0.92\n", + " bighorn sheep 50 0.66 1\n", + " Alpine ibex 50 0.92 0.98\n", + " hartebeest 50 0.94 1\n", + " impala 50 0.82 0.96\n", + " gazelle 50 0.7 0.96\n", + " dromedary 50 0.9 1\n", + " llama 50 0.82 0.94\n", + " weasel 50 0.44 0.92\n", + " mink 50 0.78 0.96\n", + " European polecat 50 0.46 0.9\n", + " black-footed ferret 50 0.68 0.96\n", + " otter 50 0.66 0.88\n", + " skunk 50 0.96 0.96\n", + " badger 50 0.86 0.92\n", + " armadillo 50 0.88 0.9\n", + " three-toed sloth 50 0.96 1\n", + " orangutan 50 0.78 0.92\n", + " gorilla 50 0.82 0.94\n", + " chimpanzee 50 0.84 0.94\n", + " gibbon 50 0.76 0.86\n", + " siamang 50 0.68 0.94\n", + " guenon 50 0.8 0.94\n", + " patas monkey 50 0.62 0.82\n", + " baboon 50 0.9 0.98\n", + " macaque 50 0.8 0.86\n", + " langur 50 0.6 0.82\n", + " black-and-white colobus 50 0.86 0.9\n", + " proboscis monkey 50 1 1\n", + " marmoset 50 0.74 0.98\n", + " white-headed capuchin 50 0.72 0.9\n", + " howler monkey 50 0.86 0.94\n", + " titi 50 0.5 0.9\n", + "Geoffroy's spider monkey 50 0.42 0.8\n", + " common squirrel monkey 50 0.76 0.92\n", + " ring-tailed lemur 50 0.72 0.94\n", + " indri 50 0.9 0.96\n", + " Asian elephant 50 0.58 0.92\n", + " African bush elephant 50 0.7 0.98\n", + " red panda 50 0.94 0.94\n", + " giant panda 50 0.94 0.98\n", + " snoek 50 0.74 0.9\n", + " eel 50 0.6 0.84\n", + " coho salmon 50 0.84 0.96\n", + " rock beauty 50 0.88 0.98\n", + " clownfish 50 0.78 0.98\n", + " sturgeon 50 0.68 0.94\n", + " garfish 50 0.62 0.8\n", + " lionfish 50 0.96 0.96\n", + " pufferfish 50 0.88 0.96\n", + " abacus 50 0.74 0.88\n", + " abaya 50 0.84 0.92\n", + " academic gown 50 0.42 0.86\n", + " accordion 50 0.8 0.9\n", + " acoustic guitar 50 0.5 0.76\n", + " aircraft carrier 50 0.8 0.96\n", + " airliner 50 0.92 1\n", + " airship 50 0.76 0.82\n", + " altar 50 0.64 0.98\n", + " ambulance 50 0.88 0.98\n", + " amphibious vehicle 50 0.64 0.94\n", + " analog clock 50 0.52 0.92\n", + " apiary 50 0.82 0.96\n", + " apron 50 0.7 0.84\n", + " waste container 50 0.4 0.8\n", + " assault rifle 50 0.42 0.84\n", + " backpack 50 0.34 0.64\n", + " bakery 50 0.4 0.68\n", + " balance beam 50 0.8 0.98\n", + " balloon 50 0.86 0.96\n", + " ballpoint pen 50 0.52 0.96\n", + " Band-Aid 50 0.7 0.9\n", + " banjo 50 0.84 1\n", + " baluster 50 0.68 0.94\n", + " barbell 50 0.56 0.9\n", + " barber chair 50 0.7 0.92\n", + " barbershop 50 0.54 0.86\n", + " barn 50 0.96 0.96\n", + " barometer 50 0.84 0.98\n", + " barrel 50 0.56 0.88\n", + " wheelbarrow 50 0.66 0.88\n", + " baseball 50 0.74 0.98\n", + " basketball 50 0.88 0.98\n", + " bassinet 50 0.66 0.92\n", + " bassoon 50 0.74 0.98\n", + " swimming cap 50 0.62 0.88\n", + " bath towel 50 0.54 0.78\n", + " bathtub 50 0.4 0.88\n", + " station wagon 50 0.66 0.84\n", + " lighthouse 50 0.78 0.94\n", + " beaker 50 0.52 0.68\n", + " military cap 50 0.84 0.96\n", + " beer bottle 50 0.66 0.88\n", + " beer glass 50 0.6 0.84\n", + " bell-cot 50 0.56 0.96\n", + " bib 50 0.58 0.82\n", + " tandem bicycle 50 0.86 0.96\n", + " bikini 50 0.56 0.88\n", + " ring binder 50 0.64 0.84\n", + " binoculars 50 0.54 0.78\n", + " birdhouse 50 0.86 0.94\n", + " boathouse 50 0.74 0.92\n", + " bobsleigh 50 0.92 0.96\n", + " bolo tie 50 0.8 0.94\n", + " poke bonnet 50 0.64 0.86\n", + " bookcase 50 0.66 0.92\n", + " bookstore 50 0.62 0.88\n", + " bottle cap 50 0.58 0.7\n", + " bow 50 0.72 0.86\n", + " bow tie 50 0.7 0.9\n", + " brass 50 0.92 0.96\n", + " bra 50 0.5 0.7\n", + " breakwater 50 0.62 0.86\n", + " breastplate 50 0.4 0.9\n", + " broom 50 0.6 0.86\n", + " bucket 50 0.66 0.8\n", + " buckle 50 0.5 0.68\n", + " bulletproof vest 50 0.5 0.78\n", + " high-speed train 50 0.94 0.96\n", + " butcher shop 50 0.74 0.94\n", + " taxicab 50 0.64 0.86\n", + " cauldron 50 0.44 0.66\n", + " candle 50 0.48 0.74\n", + " cannon 50 0.88 0.94\n", + " canoe 50 0.94 1\n", + " can opener 50 0.66 0.86\n", + " cardigan 50 0.68 0.8\n", + " car mirror 50 0.94 0.96\n", + " carousel 50 0.94 0.98\n", + " tool kit 50 0.56 0.78\n", + " carton 50 0.42 0.7\n", + " car wheel 50 0.38 0.74\n", + "automated teller machine 50 0.76 0.94\n", + " cassette 50 0.52 0.8\n", + " cassette player 50 0.28 0.9\n", + " castle 50 0.78 0.88\n", + " catamaran 50 0.78 1\n", + " CD player 50 0.52 0.82\n", + " cello 50 0.82 1\n", + " mobile phone 50 0.68 0.86\n", + " chain 50 0.38 0.66\n", + " chain-link fence 50 0.7 0.84\n", + " chain mail 50 0.64 0.9\n", + " chainsaw 50 0.84 0.92\n", + " chest 50 0.68 0.92\n", + " chiffonier 50 0.26 0.64\n", + " chime 50 0.62 0.84\n", + " china cabinet 50 0.82 0.96\n", + " Christmas stocking 50 0.92 0.94\n", + " church 50 0.62 0.9\n", + " movie theater 50 0.58 0.88\n", + " cleaver 50 0.32 0.62\n", + " cliff dwelling 50 0.88 1\n", + " cloak 50 0.32 0.64\n", + " clogs 50 0.58 0.88\n", + " cocktail shaker 50 0.62 0.7\n", + " coffee mug 50 0.44 0.72\n", + " coffeemaker 50 0.64 0.92\n", + " coil 50 0.66 0.84\n", + " combination lock 50 0.64 0.84\n", + " computer keyboard 50 0.7 0.82\n", + " confectionery store 50 0.54 0.86\n", + " container ship 50 0.82 0.98\n", + " convertible 50 0.78 0.98\n", + " corkscrew 50 0.82 0.92\n", + " cornet 50 0.46 0.88\n", + " cowboy boot 50 0.64 0.8\n", + " cowboy hat 50 0.64 0.82\n", + " cradle 50 0.38 0.8\n", + " crane (machine) 50 0.78 0.94\n", + " crash helmet 50 0.92 0.96\n", + " crate 50 0.52 0.82\n", + " infant bed 50 0.74 1\n", + " Crock Pot 50 0.78 0.9\n", + " croquet ball 50 0.9 0.96\n", + " crutch 50 0.46 0.7\n", + " cuirass 50 0.54 0.86\n", + " dam 50 0.74 0.92\n", + " desk 50 0.6 0.86\n", + " desktop computer 50 0.54 0.94\n", + " rotary dial telephone 50 0.88 0.94\n", + " diaper 50 0.68 0.84\n", + " digital clock 50 0.54 0.76\n", + " digital watch 50 0.58 0.86\n", + " dining table 50 0.76 0.9\n", + " dishcloth 50 0.94 1\n", + " dishwasher 50 0.44 0.78\n", + " disc brake 50 0.98 1\n", + " dock 50 0.54 0.94\n", + " dog sled 50 0.84 1\n", + " dome 50 0.72 0.92\n", + " doormat 50 0.56 0.82\n", + " drilling rig 50 0.84 0.96\n", + " drum 50 0.38 0.68\n", + " drumstick 50 0.56 0.72\n", + " dumbbell 50 0.62 0.9\n", + " Dutch oven 50 0.7 0.84\n", + " electric fan 50 0.82 0.86\n", + " electric guitar 50 0.62 0.84\n", + " electric locomotive 50 0.92 0.98\n", + " entertainment center 50 0.9 0.98\n", + " envelope 50 0.44 0.86\n", + " espresso machine 50 0.72 0.94\n", + " face powder 50 0.7 0.92\n", + " feather boa 50 0.7 0.84\n", + " filing cabinet 50 0.88 0.98\n", + " fireboat 50 0.94 0.98\n", + " fire engine 50 0.84 0.9\n", + " fire screen sheet 50 0.62 0.76\n", + " flagpole 50 0.74 0.88\n", + " flute 50 0.36 0.72\n", + " folding chair 50 0.62 0.84\n", + " football helmet 50 0.86 0.94\n", + " forklift 50 0.8 0.92\n", + " fountain 50 0.84 0.94\n", + " fountain pen 50 0.76 0.92\n", + " four-poster bed 50 0.78 0.94\n", + " freight car 50 0.96 1\n", + " French horn 50 0.76 0.92\n", + " frying pan 50 0.36 0.78\n", + " fur coat 50 0.84 0.96\n", + " garbage truck 50 0.9 0.98\n", + " gas mask 50 0.84 0.92\n", + " gas pump 50 0.9 0.98\n", + " goblet 50 0.68 0.82\n", + " go-kart 50 0.9 1\n", + " golf ball 50 0.84 0.9\n", + " golf cart 50 0.78 0.86\n", + " gondola 50 0.98 0.98\n", + " gong 50 0.74 0.92\n", + " gown 50 0.62 0.96\n", + " grand piano 50 0.7 0.96\n", + " greenhouse 50 0.8 0.98\n", + " grille 50 0.72 0.9\n", + " grocery store 50 0.66 0.94\n", + " guillotine 50 0.86 0.92\n", + " barrette 50 0.52 0.66\n", + " hair spray 50 0.5 0.74\n", + " half-track 50 0.78 0.9\n", + " hammer 50 0.56 0.76\n", + " hamper 50 0.64 0.84\n", + " hair dryer 50 0.56 0.74\n", + " hand-held computer 50 0.42 0.86\n", + " handkerchief 50 0.78 0.94\n", + " hard disk drive 50 0.76 0.84\n", + " harmonica 50 0.7 0.88\n", + " harp 50 0.88 0.96\n", + " harvester 50 0.78 1\n", + " hatchet 50 0.54 0.74\n", + " holster 50 0.66 0.84\n", + " home theater 50 0.64 0.94\n", + " honeycomb 50 0.56 0.88\n", + " hook 50 0.3 0.6\n", + " hoop skirt 50 0.64 0.86\n", + " horizontal bar 50 0.68 0.98\n", + " horse-drawn vehicle 50 0.88 0.94\n", + " hourglass 50 0.88 0.96\n", + " iPod 50 0.76 0.94\n", + " clothes iron 50 0.82 0.88\n", + " jack-o'-lantern 50 0.98 0.98\n", + " jeans 50 0.68 0.84\n", + " jeep 50 0.72 0.9\n", + " T-shirt 50 0.72 0.96\n", + " jigsaw puzzle 50 0.84 0.94\n", + " pulled rickshaw 50 0.86 0.94\n", + " joystick 50 0.8 0.9\n", + " kimono 50 0.84 0.96\n", + " knee pad 50 0.62 0.88\n", + " knot 50 0.66 0.8\n", + " lab coat 50 0.8 0.96\n", + " ladle 50 0.36 0.64\n", + " lampshade 50 0.48 0.84\n", + " laptop computer 50 0.26 0.88\n", + " lawn mower 50 0.78 0.96\n", + " lens cap 50 0.46 0.72\n", + " paper knife 50 0.26 0.5\n", + " library 50 0.54 0.9\n", + " lifeboat 50 0.92 0.98\n", + " lighter 50 0.56 0.78\n", + " limousine 50 0.76 0.92\n", + " ocean liner 50 0.88 0.94\n", + " lipstick 50 0.74 0.9\n", + " slip-on shoe 50 0.74 0.92\n", + " lotion 50 0.5 0.86\n", + " speaker 50 0.52 0.68\n", + " loupe 50 0.32 0.52\n", + " sawmill 50 0.72 0.9\n", + " magnetic compass 50 0.52 0.82\n", + " mail bag 50 0.68 0.92\n", + " mailbox 50 0.82 0.92\n", + " tights 50 0.22 0.94\n", + " tank suit 50 0.24 0.9\n", + " manhole cover 50 0.96 0.98\n", + " maraca 50 0.74 0.9\n", + " marimba 50 0.84 0.94\n", + " mask 50 0.44 0.82\n", + " match 50 0.66 0.9\n", + " maypole 50 0.96 1\n", + " maze 50 0.8 0.96\n", + " measuring cup 50 0.54 0.76\n", + " medicine chest 50 0.6 0.84\n", + " megalith 50 0.8 0.92\n", + " microphone 50 0.52 0.7\n", + " microwave oven 50 0.48 0.72\n", + " military uniform 50 0.62 0.84\n", + " milk can 50 0.68 0.82\n", + " minibus 50 0.7 1\n", + " miniskirt 50 0.46 0.76\n", + " minivan 50 0.38 0.8\n", + " missile 50 0.4 0.84\n", + " mitten 50 0.76 0.88\n", + " mixing bowl 50 0.8 0.92\n", + " mobile home 50 0.54 0.78\n", + " Model T 50 0.92 0.96\n", + " modem 50 0.58 0.86\n", + " monastery 50 0.44 0.9\n", + " monitor 50 0.4 0.86\n", + " moped 50 0.56 0.94\n", + " mortar 50 0.68 0.94\n", + " square academic cap 50 0.5 0.84\n", + " mosque 50 0.9 1\n", + " mosquito net 50 0.9 0.98\n", + " scooter 50 0.9 0.98\n", + " mountain bike 50 0.78 0.96\n", + " tent 50 0.88 0.96\n", + " computer mouse 50 0.42 0.82\n", + " mousetrap 50 0.76 0.88\n", + " moving van 50 0.4 0.72\n", + " muzzle 50 0.5 0.72\n", + " nail 50 0.68 0.74\n", + " neck brace 50 0.56 0.68\n", + " necklace 50 0.86 1\n", + " nipple 50 0.7 0.88\n", + " notebook computer 50 0.34 0.84\n", + " obelisk 50 0.8 0.92\n", + " oboe 50 0.6 0.84\n", + " ocarina 50 0.8 0.86\n", + " odometer 50 0.96 1\n", + " oil filter 50 0.58 0.82\n", + " organ 50 0.82 0.9\n", + " oscilloscope 50 0.9 0.96\n", + " overskirt 50 0.2 0.7\n", + " bullock cart 50 0.7 0.94\n", + " oxygen mask 50 0.46 0.84\n", + " packet 50 0.5 0.78\n", + " paddle 50 0.56 0.94\n", + " paddle wheel 50 0.86 0.96\n", + " padlock 50 0.74 0.78\n", + " paintbrush 50 0.62 0.8\n", + " pajamas 50 0.56 0.92\n", + " palace 50 0.64 0.96\n", + " pan flute 50 0.84 0.86\n", + " paper towel 50 0.66 0.84\n", + " parachute 50 0.92 0.94\n", + " parallel bars 50 0.62 0.96\n", + " park bench 50 0.74 0.9\n", + " parking meter 50 0.84 0.92\n", + " passenger car 50 0.5 0.82\n", + " patio 50 0.58 0.84\n", + " payphone 50 0.74 0.92\n", + " pedestal 50 0.52 0.9\n", + " pencil case 50 0.64 0.92\n", + " pencil sharpener 50 0.52 0.78\n", + " perfume 50 0.7 0.9\n", + " Petri dish 50 0.6 0.8\n", + " photocopier 50 0.88 0.98\n", + " plectrum 50 0.7 0.84\n", + " Pickelhaube 50 0.72 0.86\n", + " picket fence 50 0.84 0.94\n", + " pickup truck 50 0.64 0.92\n", + " pier 50 0.52 0.82\n", + " piggy bank 50 0.82 0.94\n", + " pill bottle 50 0.76 0.86\n", + " pillow 50 0.76 0.9\n", + " ping-pong ball 50 0.84 0.88\n", + " pinwheel 50 0.76 0.88\n", + " pirate ship 50 0.76 0.94\n", + " pitcher 50 0.46 0.84\n", + " hand plane 50 0.84 0.94\n", + " planetarium 50 0.88 0.98\n", + " plastic bag 50 0.36 0.62\n", + " plate rack 50 0.52 0.78\n", + " plow 50 0.78 0.88\n", + " plunger 50 0.42 0.7\n", + " Polaroid camera 50 0.84 0.92\n", + " pole 50 0.38 0.74\n", + " police van 50 0.76 0.94\n", + " poncho 50 0.58 0.86\n", + " billiard table 50 0.8 0.88\n", + " soda bottle 50 0.56 0.94\n", + " pot 50 0.78 0.92\n", + " potter's wheel 50 0.9 0.94\n", + " power drill 50 0.42 0.72\n", + " prayer rug 50 0.7 0.86\n", + " printer 50 0.54 0.86\n", + " prison 50 0.7 0.9\n", + " projectile 50 0.28 0.9\n", + " projector 50 0.62 0.84\n", + " hockey puck 50 0.92 0.96\n", + " punching bag 50 0.6 0.68\n", + " purse 50 0.42 0.78\n", + " quill 50 0.68 0.84\n", + " quilt 50 0.64 0.9\n", + " race car 50 0.72 0.92\n", + " racket 50 0.72 0.9\n", + " radiator 50 0.66 0.76\n", + " radio 50 0.64 0.92\n", + " radio telescope 50 0.9 0.96\n", + " rain barrel 50 0.8 0.98\n", + " recreational vehicle 50 0.84 0.94\n", + " reel 50 0.72 0.82\n", + " reflex camera 50 0.72 0.92\n", + " refrigerator 50 0.7 0.9\n", + " remote control 50 0.7 0.88\n", + " restaurant 50 0.5 0.66\n", + " revolver 50 0.82 1\n", + " rifle 50 0.38 0.7\n", + " rocking chair 50 0.62 0.84\n", + " rotisserie 50 0.88 0.92\n", + " eraser 50 0.54 0.76\n", + " rugby ball 50 0.86 0.94\n", + " ruler 50 0.68 0.86\n", + " running shoe 50 0.78 0.94\n", + " safe 50 0.82 0.92\n", + " safety pin 50 0.4 0.62\n", + " salt shaker 50 0.66 0.9\n", + " sandal 50 0.66 0.86\n", + " sarong 50 0.64 0.86\n", + " saxophone 50 0.66 0.88\n", + " scabbard 50 0.76 0.92\n", + " weighing scale 50 0.58 0.78\n", + " school bus 50 0.92 1\n", + " schooner 50 0.84 1\n", + " scoreboard 50 0.9 0.96\n", + " CRT screen 50 0.14 0.7\n", + " screw 50 0.9 0.98\n", + " screwdriver 50 0.3 0.58\n", + " seat belt 50 0.88 0.94\n", + " sewing machine 50 0.76 0.9\n", + " shield 50 0.56 0.82\n", + " shoe store 50 0.78 0.96\n", + " shoji 50 0.8 0.92\n", + " shopping basket 50 0.52 0.88\n", + " shopping cart 50 0.76 0.92\n", + " shovel 50 0.62 0.84\n", + " shower cap 50 0.7 0.84\n", + " shower curtain 50 0.64 0.82\n", + " ski 50 0.74 0.92\n", + " ski mask 50 0.72 0.88\n", + " sleeping bag 50 0.68 0.8\n", + " slide rule 50 0.72 0.88\n", + " sliding door 50 0.44 0.78\n", + " slot machine 50 0.94 0.98\n", + " snorkel 50 0.86 0.98\n", + " snowmobile 50 0.88 1\n", + " snowplow 50 0.84 0.98\n", + " soap dispenser 50 0.56 0.86\n", + " soccer ball 50 0.86 0.96\n", + " sock 50 0.62 0.76\n", + " solar thermal collector 50 0.72 0.96\n", + " sombrero 50 0.6 0.84\n", + " soup bowl 50 0.56 0.94\n", + " space bar 50 0.34 0.88\n", + " space heater 50 0.52 0.74\n", + " space shuttle 50 0.82 0.96\n", + " spatula 50 0.3 0.6\n", + " motorboat 50 0.86 1\n", + " spider web 50 0.7 0.9\n", + " spindle 50 0.86 0.98\n", + " sports car 50 0.6 0.94\n", + " spotlight 50 0.26 0.6\n", + " stage 50 0.68 0.86\n", + " steam locomotive 50 0.94 1\n", + " through arch bridge 50 0.84 0.96\n", + " steel drum 50 0.82 0.9\n", + " stethoscope 50 0.6 0.82\n", + " scarf 50 0.5 0.92\n", + " stone wall 50 0.76 0.9\n", + " stopwatch 50 0.58 0.9\n", + " stove 50 0.46 0.74\n", + " strainer 50 0.64 0.84\n", + " tram 50 0.88 0.96\n", + " stretcher 50 0.6 0.8\n", + " couch 50 0.8 0.96\n", + " stupa 50 0.88 0.88\n", + " submarine 50 0.72 0.92\n", + " suit 50 0.4 0.78\n", + " sundial 50 0.58 0.74\n", + " sunglass 50 0.14 0.58\n", + " sunglasses 50 0.28 0.58\n", + " sunscreen 50 0.32 0.7\n", + " suspension bridge 50 0.6 0.94\n", + " mop 50 0.74 0.92\n", + " sweatshirt 50 0.28 0.66\n", + " swimsuit 50 0.52 0.82\n", + " swing 50 0.76 0.84\n", + " switch 50 0.56 0.76\n", + " syringe 50 0.62 0.82\n", + " table lamp 50 0.6 0.88\n", + " tank 50 0.8 0.96\n", + " tape player 50 0.46 0.76\n", + " teapot 50 0.84 1\n", + " teddy bear 50 0.82 0.94\n", + " television 50 0.6 0.9\n", + " tennis ball 50 0.7 0.94\n", + " thatched roof 50 0.88 0.9\n", + " front curtain 50 0.8 0.92\n", + " thimble 50 0.6 0.8\n", + " threshing machine 50 0.56 0.88\n", + " throne 50 0.72 0.82\n", + " tile roof 50 0.72 0.94\n", + " toaster 50 0.66 0.84\n", + " tobacco shop 50 0.42 0.7\n", + " toilet seat 50 0.62 0.88\n", + " torch 50 0.64 0.84\n", + " totem pole 50 0.92 0.98\n", + " tow truck 50 0.62 0.88\n", + " toy store 50 0.6 0.94\n", + " tractor 50 0.76 0.98\n", + " semi-trailer truck 50 0.78 0.92\n", + " tray 50 0.46 0.64\n", + " trench coat 50 0.54 0.72\n", + " tricycle 50 0.72 0.94\n", + " trimaran 50 0.7 0.98\n", + " tripod 50 0.58 0.86\n", + " triumphal arch 50 0.92 0.98\n", + " trolleybus 50 0.9 1\n", + " trombone 50 0.54 0.88\n", + " tub 50 0.24 0.82\n", + " turnstile 50 0.84 0.94\n", + " typewriter keyboard 50 0.68 0.98\n", + " umbrella 50 0.52 0.7\n", + " unicycle 50 0.74 0.96\n", + " upright piano 50 0.76 0.9\n", + " vacuum cleaner 50 0.62 0.9\n", + " vase 50 0.5 0.78\n", + " vault 50 0.76 0.92\n", + " velvet 50 0.2 0.42\n", + " vending machine 50 0.9 1\n", + " vestment 50 0.54 0.82\n", + " viaduct 50 0.78 0.86\n", + " violin 50 0.68 0.78\n", + " volleyball 50 0.86 1\n", + " waffle iron 50 0.72 0.88\n", + " wall clock 50 0.54 0.88\n", + " wallet 50 0.52 0.9\n", + " wardrobe 50 0.68 0.88\n", + " military aircraft 50 0.9 0.98\n", + " sink 50 0.72 0.96\n", + " washing machine 50 0.78 0.94\n", + " water bottle 50 0.54 0.74\n", + " water jug 50 0.22 0.74\n", + " water tower 50 0.9 0.96\n", + " whiskey jug 50 0.64 0.74\n", + " whistle 50 0.72 0.84\n", + " wig 50 0.84 0.9\n", + " window screen 50 0.68 0.8\n", + " window shade 50 0.52 0.76\n", + " Windsor tie 50 0.22 0.66\n", + " wine bottle 50 0.42 0.82\n", + " wing 50 0.54 0.96\n", + " wok 50 0.46 0.82\n", + " wooden spoon 50 0.58 0.8\n", + " wool 50 0.32 0.82\n", + " split-rail fence 50 0.74 0.9\n", + " shipwreck 50 0.84 0.96\n", + " yawl 50 0.78 0.96\n", + " yurt 50 0.84 1\n", + " website 50 0.98 1\n", + " comic book 50 0.62 0.9\n", + " crossword 50 0.84 0.88\n", + " traffic sign 50 0.78 0.9\n", + " traffic light 50 0.8 0.94\n", + " dust jacket 50 0.72 0.94\n", + " menu 50 0.82 0.96\n", + " plate 50 0.44 0.88\n", + " guacamole 50 0.8 0.92\n", + " consomme 50 0.54 0.88\n", + " hot pot 50 0.86 0.98\n", + " trifle 50 0.92 0.98\n", + " ice cream 50 0.68 0.94\n", + " ice pop 50 0.62 0.84\n", + " baguette 50 0.62 0.88\n", + " bagel 50 0.64 0.92\n", + " pretzel 50 0.72 0.88\n", + " cheeseburger 50 0.9 1\n", + " hot dog 50 0.74 0.94\n", + " mashed potato 50 0.74 0.9\n", + " cabbage 50 0.84 0.96\n", + " broccoli 50 0.9 0.96\n", + " cauliflower 50 0.82 1\n", + " zucchini 50 0.74 0.9\n", + " spaghetti squash 50 0.8 0.96\n", + " acorn squash 50 0.82 0.96\n", + " butternut squash 50 0.7 0.94\n", + " cucumber 50 0.6 0.96\n", + " artichoke 50 0.84 0.94\n", + " bell pepper 50 0.84 0.98\n", + " cardoon 50 0.88 0.94\n", + " mushroom 50 0.38 0.92\n", + " Granny Smith 50 0.9 0.96\n", + " strawberry 50 0.6 0.88\n", + " orange 50 0.7 0.92\n", + " lemon 50 0.78 0.98\n", + " fig 50 0.82 0.96\n", + " pineapple 50 0.86 0.96\n", + " banana 50 0.84 0.96\n", + " jackfruit 50 0.9 0.98\n", + " custard apple 50 0.86 0.96\n", + " pomegranate 50 0.82 0.98\n", + " hay 50 0.8 0.92\n", + " carbonara 50 0.88 0.94\n", + " chocolate syrup 50 0.46 0.84\n", + " dough 50 0.4 0.6\n", + " meatloaf 50 0.58 0.84\n", + " pizza 50 0.84 0.96\n", + " pot pie 50 0.68 0.9\n", + " burrito 50 0.8 0.98\n", + " red wine 50 0.54 0.82\n", + " espresso 50 0.64 0.88\n", + " cup 50 0.38 0.7\n", + " eggnog 50 0.38 0.7\n", + " alp 50 0.54 0.88\n", + " bubble 50 0.8 0.96\n", + " cliff 50 0.64 1\n", + " coral reef 50 0.72 0.96\n", + " geyser 50 0.94 1\n", + " lakeshore 50 0.54 0.88\n", + " promontory 50 0.58 0.94\n", + " shoal 50 0.6 0.96\n", + " seashore 50 0.44 0.78\n", + " valley 50 0.72 0.94\n", + " volcano 50 0.78 0.96\n", + " baseball player 50 0.72 0.94\n", + " bridegroom 50 0.72 0.88\n", + " scuba diver 50 0.8 1\n", + " rapeseed 50 0.94 0.98\n", + " daisy 50 0.96 0.98\n", + " yellow lady's slipper 50 1 1\n", + " corn 50 0.4 0.88\n", + " acorn 50 0.92 0.98\n", + " rose hip 50 0.92 0.98\n", + " horse chestnut seed 50 0.94 0.98\n", + " coral fungus 50 0.96 0.96\n", + " agaric 50 0.82 0.94\n", + " gyromitra 50 0.98 1\n", + " stinkhorn mushroom 50 0.8 0.94\n", + " earth star 50 0.98 1\n", + " hen-of-the-woods 50 0.8 0.96\n", + " bolete 50 0.74 0.94\n", + " ear 50 0.48 0.94\n", + " toilet paper 50 0.36 0.68\n", + "Speed: 0.1ms pre-process, 0.3ms inference, 0.0ms post-process per image at shape (1, 3, 224, 224)\n", + "Results saved to \u001b[1mruns/val-cls/exp\u001b[0m\n" + ] + } + ], + "source": [ + "# Validate YOLOv5s on Imagenet val\n", + "!python classify/val.py --weights yolov5s-cls.pt --data ../datasets/imagenet --img 224 --half" + ] }, { - "name": "stdout", - "output_type": "stream", - "text": [ - " patio 50 0.6 0.84\n", - " payphone 50 0.78 0.94\n", - " pedestal 50 0.66 0.86\n", - " pencil case 50 0.74 0.98\n", - " pencil sharpener 50 0.6 0.76\n", - " perfume 50 0.66 0.96\n", - " Petri dish 50 0.64 0.82\n", - " photocopier 50 0.94 1\n", - " plectrum 50 0.72 0.92\n", - " Pickelhaube 50 0.78 0.88\n", - " picket fence 50 0.86 0.94\n", - " pickup truck 50 0.72 0.94\n", - " pier 50 0.54 0.92\n", - " piggy bank 50 0.8 0.94\n", - " pill bottle 50 0.72 0.9\n", - " pillow 50 0.76 0.88\n", - " ping-pong ball 50 0.78 0.88\n", - " pinwheel 50 0.8 0.94\n", - " pirate ship 50 0.76 0.92\n", - " pitcher 50 0.48 0.86\n", - " hand plane 50 0.9 0.92\n", - " planetarium 50 0.9 0.98\n", - " plastic bag 50 0.42 0.66\n", - " plate rack 50 0.52 0.82\n", - " plow 50 0.8 0.94\n", - " plunger 50 0.42 0.72\n", - " Polaroid camera 50 0.84 0.94\n", - " pole 50 0.4 0.76\n", - " police van 50 0.84 0.94\n", - " poncho 50 0.64 0.88\n", - " billiard table 50 0.84 0.92\n", - " soda bottle 50 0.58 0.9\n", - " pot 50 0.86 0.94\n", - " potter's wheel 50 0.92 0.94\n", - " power drill 50 0.38 0.7\n", - " prayer rug 50 0.7 0.88\n", - " printer 50 0.52 0.86\n", - " prison 50 0.66 0.9\n", - " projectile 50 0.34 0.96\n", - " projector 50 0.6 0.82\n", - " hockey puck 50 0.9 0.98\n", - " punching bag 50 0.62 0.72\n", - " purse 50 0.48 0.88\n", - " quill 50 0.78 0.86\n", - " quilt 50 0.6 0.9\n", - " race car 50 0.72 0.92\n", - " racket 50 0.78 0.94\n", - " radiator 50 0.7 0.84\n", - " radio 50 0.68 0.9\n", - " radio telescope 50 0.88 0.94\n", - " rain barrel 50 0.8 0.96\n", - " recreational vehicle 50 0.84 0.96\n", - " reel 50 0.72 0.8\n", - " reflex camera 50 0.76 0.96\n", - " refrigerator 50 0.76 0.92\n", - " remote control 50 0.72 0.94\n", - " restaurant 50 0.52 0.62\n", - " revolver 50 0.8 0.98\n", - " rifle 50 0.46 0.76\n", - " rocking chair 50 0.72 0.9\n", - " rotisserie 50 0.88 0.96\n", - " eraser 50 0.62 0.76\n", - " rugby ball 50 0.84 0.94\n", - " ruler 50 0.72 0.86\n", - " running shoe 50 0.84 0.94\n", - " safe 50 0.9 0.94\n", - " safety pin 50 0.48 0.8\n", - " salt shaker 50 0.62 0.8\n", - " sandal 50 0.7 0.82\n", - " sarong 50 0.62 0.8\n", - " saxophone 50 0.66 0.9\n", - " scabbard 50 0.78 0.92\n", - " weighing scale 50 0.62 0.84\n", - " school bus 50 0.92 1\n", - " schooner 50 0.8 1\n", - " scoreboard 50 0.86 0.98\n", - " CRT screen 50 0.16 0.8\n", - " screw 50 0.96 0.98\n", - " screwdriver 50 0.4 0.58\n", - " seat belt 50 0.9 0.92\n", - " sewing machine 50 0.74 0.94\n", - " shield 50 0.64 0.78\n", - " shoe store 50 0.84 0.98\n", - " shoji 50 0.76 0.92\n", - " shopping basket 50 0.52 0.84\n", - " shopping cart 50 0.76 0.9\n", - " shovel 50 0.7 0.84\n", - " shower cap 50 0.74 0.88\n", - " shower curtain 50 0.72 0.9\n", - " ski 50 0.68 0.94\n", - " ski mask 50 0.66 0.9\n", - " sleeping bag 50 0.66 0.8\n", - " slide rule 50 0.7 0.86\n", - " sliding door 50 0.54 0.76\n", - " slot machine 50 0.92 0.96\n", - " snorkel 50 0.86 1\n", - " snowmobile 50 0.86 0.96\n", - " snowplow 50 0.9 1\n", - " soap dispenser 50 0.52 0.9\n", - " soccer ball 50 0.84 0.98\n", - " sock 50 0.66 0.78\n", - " solar thermal collector 50 0.72 0.9\n", - " sombrero 50 0.7 0.84\n", - " soup bowl 50 0.6 0.94\n", - " space bar 50 0.32 0.84\n", - " space heater 50 0.64 0.74\n", - " space shuttle 50 0.86 0.98\n", - " spatula 50 0.28 0.6\n", - " motorboat 50 0.94 1\n", - " spider web 50 0.76 0.96\n", - " spindle 50 0.92 1\n", - " sports car 50 0.5 0.96\n", - " spotlight 50 0.34 0.66\n", - " stage 50 0.76 0.92\n", - " steam locomotive 50 0.96 1\n", - " through arch bridge 50 0.82 0.96\n", - " steel drum 50 0.8 0.94\n", - " stethoscope 50 0.52 0.84\n", - " scarf 50 0.54 0.92\n", - " stone wall 50 0.8 0.92\n", - " stopwatch 50 0.54 0.9\n", - " stove 50 0.46 0.78\n", - " strainer 50 0.58 0.84\n", - " tram 50 0.9 0.96\n", - " stretcher 50 0.46 0.74\n", - " couch 50 0.72 0.94\n", - " stupa 50 0.84 0.9\n", - " submarine 50 0.78 0.9\n", - " suit 50 0.62 0.88\n", - " sundial 50 0.46 0.78\n", - " sunglass 50 0.18 0.6\n", - " sunglasses 50 0.32 0.64\n", - " sunscreen 50 0.32 0.7\n", - " suspension bridge 50 0.64 0.94\n", - " mop 50 0.8 0.96\n", - " sweatshirt 50 0.26 0.68\n", - " swimsuit 50 0.6 0.84\n", - " swing 50 0.78 0.88\n", - " switch 50 0.62 0.8\n", - " syringe 50 0.68 0.8\n", - " table lamp 50 0.54 0.88\n", - " tank 50 0.78 0.94\n", - " tape player 50 0.38 0.88\n", - " teapot 50 0.82 1\n", - " teddy bear 50 0.82 0.92\n", - " television 50 0.6 0.9\n", - " tennis ball 50 0.7 0.94\n", - " thatched roof 50 0.86 0.94\n", - " front curtain 50 0.76 0.94\n", - " thimble 50 0.68 0.82\n", - " threshing machine 50 0.64 0.9\n", - " throne 50 0.68 0.82\n", - " tile roof 50 0.84 0.96\n", - " toaster 50 0.64 0.82\n", - " tobacco shop 50 0.44 0.74\n", - " toilet seat 50 0.64 0.88\n", - " torch 50 0.62 0.86\n", - " totem pole 50 0.9 1\n", - " tow truck 50 0.64 0.92\n", - " toy store 50 0.64 0.9\n", - " tractor 50 0.86 0.98\n", - " semi-trailer truck 50 0.76 0.96\n", - " tray 50 0.54 0.76\n", - " trench coat 50 0.6 0.78\n", - " tricycle 50 0.78 0.96\n", - " trimaran 50 0.78 0.98\n", - " tripod 50 0.66 0.86\n", - " triumphal arch 50 0.92 0.98\n", - " trolleybus 50 0.98 1\n", - " trombone 50 0.66 0.94\n", - " tub 50 0.3 0.86\n", - " turnstile 50 0.8 0.9\n", - " typewriter keyboard 50 0.74 0.98\n", - " umbrella 50 0.6 0.78\n", - " unicycle 50 0.78 0.96\n", - " upright piano 50 0.84 0.94\n", - " vacuum cleaner 50 0.84 0.92\n", - " vase 50 0.56 0.74\n", - " vault 50 0.78 0.9\n", - " velvet 50 0.22 0.5\n", - " vending machine 50 0.94 1\n", - " vestment 50 0.62 0.86\n", - " viaduct 50 0.78 0.88\n", - " violin 50 0.64 0.88\n" - ] + "cell_type": "markdown", + "metadata": { + "id": "ZY2VXXXu74w5" + }, + "source": [ + "# 3. Train\n", + "\n", + "

\n", + "Close the active learning loop by sampling images from your inference conditions with the `roboflow` pip package\n", + "

\n", + "\n", + "Train a YOLOv5s Classification model on the [Imagenette](https://image-net.org/) dataset with `--data imagenet`, starting from pretrained `--pretrained yolov5s-cls.pt`.\n", + "\n", + "- **Pretrained [Models](https://github.com/ultralytics/yolov5/tree/master/models)** are downloaded\n", + "automatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases)\n", + "- **Training Results** are saved to `runs/train-cls/` with incrementing run directories, i.e. `runs/train-cls/exp2`, `runs/train-cls/exp3` etc.\n", + "

\n", + "\n", + "A **Mosaic Dataloader** is used for training which combines 4 images into 1 mosaic.\n", + "\n", + "## Train on Custom Data with Roboflow 🌟 NEW\n", + "\n", + "[Roboflow](https://roboflow.com/?ref=ultralytics) enables you to easily **organize, label, and prepare** a high quality dataset with your own custom data. Roboflow also makes it easy to establish an active learning pipeline, collaborate with your team on dataset improvement, and integrate directly into your model building workflow with the `roboflow` pip package.\n", + "\n", + "- Custom Training Example: [https://blog.roboflow.com/train-yolov5-classification-custom-data/](https://blog.roboflow.com/train-yolov5-classification-custom-data/?ref=ultralytics)\n", + "- Custom Training Notebook: [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1KZiKUAjtARHAfZCXbJRv14-pOnIsBLPV?usp=sharing)\n", + "
\n", + "\n", + "

Label images lightning fast (including with model-assisted labeling)" + ] }, { - "name": "stdout", - "output_type": "stream", - "text": [ - " volleyball 50 0.96 1\n", - " waffle iron 50 0.72 0.84\n", - " wall clock 50 0.58 0.86\n", - " wallet 50 0.58 0.94\n", - " wardrobe 50 0.7 0.9\n", - " military aircraft 50 0.9 0.98\n", - " sink 50 0.74 0.94\n", - " washing machine 50 0.82 0.94\n", - " water bottle 50 0.54 0.68\n", - " water jug 50 0.3 0.78\n", - " water tower 50 0.94 0.96\n", - " whiskey jug 50 0.64 0.76\n", - " whistle 50 0.7 0.82\n", - " wig 50 0.86 0.88\n", - " window screen 50 0.7 0.82\n", - " window shade 50 0.54 0.9\n", - " Windsor tie 50 0.32 0.64\n", - " wine bottle 50 0.46 0.76\n", - " wing 50 0.52 0.96\n", - " wok 50 0.54 0.92\n", - " wooden spoon 50 0.62 0.86\n", - " wool 50 0.42 0.84\n", - " split-rail fence 50 0.7 0.92\n", - " shipwreck 50 0.86 0.98\n", - " yawl 50 0.76 0.92\n", - " yurt 50 0.86 0.96\n", - " website 50 0.98 1\n", - " comic book 50 0.72 0.88\n", - " crossword 50 0.8 0.88\n", - " traffic sign 50 0.72 0.9\n", - " traffic light 50 0.8 0.96\n", - " dust jacket 50 0.78 0.94\n", - " menu 50 0.8 0.96\n", - " plate 50 0.44 0.86\n", - " guacamole 50 0.76 0.96\n", - " consomme 50 0.52 0.92\n", - " hot pot 50 0.78 1\n", - " trifle 50 0.9 1\n", - " ice cream 50 0.68 0.94\n", - " ice pop 50 0.68 0.8\n", - " baguette 50 0.62 0.88\n", - " bagel 50 0.64 0.86\n", - " pretzel 50 0.68 0.9\n", - " cheeseburger 50 0.92 0.96\n", - " hot dog 50 0.74 0.96\n", - " mashed potato 50 0.72 0.88\n", - " cabbage 50 0.88 0.98\n", - " broccoli 50 0.88 0.96\n", - " cauliflower 50 0.84 0.98\n", - " zucchini 50 0.68 0.98\n", - " spaghetti squash 50 0.82 0.96\n", - " acorn squash 50 0.8 1\n", - " butternut squash 50 0.72 0.94\n", - " cucumber 50 0.66 0.94\n", - " artichoke 50 0.86 0.96\n", - " bell pepper 50 0.86 0.94\n", - " cardoon 50 0.92 0.94\n", - " mushroom 50 0.38 0.96\n", - " Granny Smith 50 0.9 0.98\n", - " strawberry 50 0.64 0.88\n", - " orange 50 0.74 0.94\n", - " lemon 50 0.78 0.98\n", - " fig 50 0.84 0.94\n", - " pineapple 50 0.9 1\n", - " banana 50 0.88 0.98\n", - " jackfruit 50 0.96 0.98\n", - " custard apple 50 0.86 0.96\n", - " pomegranate 50 0.8 0.96\n", - " hay 50 0.84 0.96\n", - " carbonara 50 0.88 0.96\n", - " chocolate syrup 50 0.58 0.94\n", - " dough 50 0.36 0.68\n", - " meatloaf 50 0.64 0.88\n", - " pizza 50 0.78 0.9\n", - " pot pie 50 0.66 0.92\n", - " burrito 50 0.88 0.98\n", - " red wine 50 0.66 0.84\n", - " espresso 50 0.66 0.9\n", - " cup 50 0.42 0.78\n", - " eggnog 50 0.36 0.64\n", - " alp 50 0.54 0.94\n", - " bubble 50 0.86 0.96\n", - " cliff 50 0.66 1\n", - " coral reef 50 0.74 0.94\n", - " geyser 50 0.92 1\n", - " lakeshore 50 0.52 0.86\n", - " promontory 50 0.58 0.92\n", - " shoal 50 0.66 0.98\n", - " seashore 50 0.44 0.86\n", - " valley 50 0.72 0.98\n", - " volcano 50 0.72 0.94\n", - " baseball player 50 0.74 0.96\n", - " bridegroom 50 0.78 0.92\n", - " scuba diver 50 0.82 1\n", - " rapeseed 50 0.98 0.98\n", - " daisy 50 0.96 0.98\n", - " yellow lady's slipper 50 1 1\n", - " corn 50 0.42 0.86\n", - " acorn 50 0.96 0.98\n", - " rose hip 50 0.9 0.96\n", - " horse chestnut seed 50 1 1\n", - " coral fungus 50 0.98 0.98\n", - " agaric 50 0.84 0.94\n", - " gyromitra 50 0.98 0.98\n", - " stinkhorn mushroom 50 0.84 0.92\n", - " earth star 50 1 1\n", - " hen-of-the-woods 50 0.9 0.96\n", - " bolete 50 0.8 0.94\n", - " ear 50 0.54 0.94\n", - " toilet paper 50 0.44 0.68\n", - "Speed: 0.1ms pre-process, 0.2ms inference, 0.0ms post-process per image at shape (1, 3, 320, 320)\n", - "Results saved to \u001b[1mruns/val-cls/exp\u001b[0m\n" - ] - } - ], - "source": [ - "# Validate YOLOv5s on Imagenet val\n", - "!python classify/val.py --weights yolov5s-cls.pt --data ../datasets/imagenet --img 320 --half" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "ZY2VXXXu74w5" - }, - "source": [ - "# 3. Train\n", - "\n", - "

\n", - "Close the active learning loop by sampling images from your inference conditions with the `roboflow` pip package\n", - "

\n", - "\n", - "Train a YOLOv5s Classification model on the [Imagenette](https://image-net.org/) dataset with `--data imagenet`, starting from pretrained `--pretrained yolov5s-cls.pt`.\n", - "\n", - "- **Pretrained [Models](https://github.com/ultralytics/yolov5/tree/master/models)** are downloaded\n", - "automatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases)\n", - "- **Training Results** are saved to `runs/train-cls/` with incrementing run directories, i.e. `runs/train-cls/exp2`, `runs/train-cls/exp3` etc.\n", - "

\n", - "\n", - "A **Mosaic Dataloader** is used for training which combines 4 images into 1 mosaic.\n", - "\n", - "## Train on Custom Data with Roboflow 🌟 NEW\n", - "\n", - "[Roboflow](https://roboflow.com/?ref=ultralytics) enables you to easily **organize, label, and prepare** a high quality dataset with your own custom data. Roboflow also makes it easy to establish an active learning pipeline, collaborate with your team on dataset improvement, and integrate directly into your model building workflow with the `roboflow` pip package.\n", - "\n", - "- Custom Training Example: [https://blog.roboflow.com/train-yolov5-classification-custom-data/](https://blog.roboflow.com/train-yolov5-classification-custom-data/?ref=ultralytics)\n", - "- Custom Training Notebook: [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1KZiKUAjtARHAfZCXbJRv14-pOnIsBLPV?usp=sharing)\n", - "
\n", - "\n", - "

Label images lightning fast (including with model-assisted labeling)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "i3oKtE4g-aNn" - }, - "outputs": [], - "source": [ - "#@title Select YOLOv5 🚀 logger {run: 'auto'}\n", - "logger = 'TensorBoard' #@param ['TensorBoard', 'Comet', 'ClearML']\n", - "\n", - "if logger == 'TensorBoard':\n", - " %load_ext tensorboard\n", - " %tensorboard --logdir runs/train\n", - "elif logger == 'Comet':\n", - " %pip install -q comet_ml\n", - " import comet_ml; comet_ml.init()\n", - "elif logger == 'ClearML':\n", - " %pip install -q clearml && clearml-init" - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "i3oKtE4g-aNn" + }, + "outputs": [], + "source": [ + "#@title Select YOLOv5 🚀 logger {run: 'auto'}\n", + "logger = 'TensorBoard' #@param ['TensorBoard', 'Comet', 'ClearML']\n", + "\n", + "if logger == 'TensorBoard':\n", + " %load_ext tensorboard\n", + " %tensorboard --logdir runs/train\n", + "elif logger == 'Comet':\n", + " %pip install -q comet_ml\n", + " import comet_ml; comet_ml.init()\n", + "elif logger == 'ClearML':\n", + " import clearml; clearml.browser_login()" + ] }, - "id": "1NcFxRcFdJ_O", - "outputId": "baa6d4be-3379-4aab-844a-d5a5396c0e49" - }, - "outputs": [ { - "name": "stdout", - "output_type": "stream", - "text": [ - "\u001b[34m\u001b[1mclassify/train: \u001b[0mmodel=yolov5s-cls.pt, data=imagenette160, epochs=3, batch_size=16, imgsz=160, nosave=False, cache=ram, device=, workers=8, project=runs/train-cls, name=exp, exist_ok=False, pretrained=True, optimizer=Adam, lr0=0.001, decay=5e-05, label_smoothing=0.1, cutoff=None, dropout=None, verbose=False, seed=0, local_rank=-1\n", - "\u001b[34m\u001b[1mgithub: \u001b[0m⚠️ YOLOv5 is out of date by 7 commits. Use `git pull ultralytics master` or `git clone https://github.com/ultralytics/yolov5` to update.\n", - "YOLOv5 🚀 v6.2-237-g34c3e1c Python-3.7.12 torch-1.12.1+cu102 CUDA:0 (Tesla V100-SXM2-16GB, 16161MiB)\n", - "\n", - "\u001b[34m\u001b[1mTensorBoard: \u001b[0mStart with 'tensorboard --logdir runs/train-cls', view at http://localhost:6006/\n", - "\u001b[34m\u001b[1malbumentations: \u001b[0m⚠️ not found, install with `pip install albumentations` (recommended)\n", - "Model summary: 149 layers, 4185290 parameters, 4185290 gradients, 10.5 GFLOPs\n", - "\u001b[34m\u001b[1moptimizer:\u001b[0m Adam(lr=0.001) with parameter groups 32 weight(decay=0.0), 33 weight(decay=5e-05), 33 bias\n", - "Image sizes 160 train, 160 test\n", - "Using 3 dataloader workers\n", - "Logging results to \u001b[1mruns/train-cls/exp\u001b[0m\n", - "Starting yolov5s-cls.pt training on imagenette160 dataset with 10 classes for 3 epochs...\n", - "\n", - " Epoch GPU_mem train_loss val_loss top1_acc top5_acc\n", - " 1/3 0.369G 1.05 0.935 0.837 0.985: 100%|█████\n", - " 2/3 0.369G 0.767 0.873 0.859 0.982: 100%|█████\n", - " 3/3 0.369G 0.626 0.713 0.927 0.992: 100%|█████\n", - "\n", - "Training complete (0.025 hours)\n", - "Results saved to \u001b[1mruns/train-cls/exp\u001b[0m\n", - "Predict: python classify/predict.py --weights runs/train-cls/exp/weights/best.pt --source im.jpg\n", - "Validate: python classify/val.py --weights runs/train-cls/exp/weights/best.pt --data /home/paguerrie/datasets/imagenette160\n", - "Export: python export.py --weights runs/train-cls/exp/weights/best.pt --include onnx\n", - "PyTorch Hub: model = torch.hub.load('ultralytics/yolov5', 'custom', 'runs/train-cls/exp/weights/best.pt')\n", - "Visualize: https://netron.app\n", - "\n" - ] - } - ], - "source": [ - "# Train YOLOv5s Classification on Imagenette160 for 3 epochs\n", - "!python classify/train.py --img 160 --batch 16 --epochs 3 --data imagenette160 --model yolov5s-cls.pt --cache" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "15glLzbQx5u0" - }, - "source": [ - "# 4. Visualize" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "nWOsI5wJR1o3" - }, - "source": [ - "## Comet Logging and Visualization 🌟 NEW\n", - "[Comet](https://bit.ly/yolov5-readme-comet) is now fully integrated with YOLOv5. Track and visualize model metrics in real time, save your hyperparameters, datasets, and model checkpoints, and visualize your model predictions with [Comet Custom Panels](https://bit.ly/yolov5-colab-comet-panels)! Comet makes sure you never lose track of your work and makes it easy to share results and collaborate across teams of all sizes! \n", - "\n", - "Getting started is easy:\n", - "```shell\n", - "pip install comet_ml # 1. install\n", - "export COMET_API_KEY= # 2. paste API key\n", - "python train.py --img 640 --epochs 3 --data coco128.yaml --weights yolov5s.pt # 3. train\n", - "```\n", - "\n", - "To learn more about all of the supported Comet features for this integration, check out the [Comet Tutorial](https://github.com/ultralytics/yolov5/tree/master/utils/loggers/comet). If you'd like to learn more about Comet, head over to our [documentation](https://bit.ly/yolov5-colab-comet-docs). Get started by trying out the Comet Colab Notebook:\n", - "[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1RG0WOQyxlDlo5Km8GogJpIEJlg_5lyYO?usp=sharing)\n", - "\n", - "\"yolo-ui\"" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "Lay2WsTjNJzP" - }, - "source": [ - "## ClearML Logging and Automation 🌟 NEW\n", - "\n", - "[ClearML](https://cutt.ly/yolov5-notebook-clearml) is completely integrated into YOLOv5 to track your experimentation, manage dataset versions and even remotely execute training runs. To enable ClearML (check cells above):\n", - "\n", - "- `pip install clearml`\n", - "- run `clearml-init` to connect to a ClearML server (**deploy your own [open-source server](https://github.com/allegroai/clearml-server)**, or use our [free hosted server](https://cutt.ly/yolov5-notebook-clearml))\n", - "\n", - "You'll get all the great expected features from an experiment manager: live updates, model upload, experiment comparison etc. but ClearML also tracks uncommitted changes and installed packages for example. Thanks to that ClearML Tasks (which is what we call experiments) are also reproducible on different machines! With only 1 extra line, we can schedule a YOLOv5 training task on a queue to be executed by any number of ClearML Agents (workers).\n", - "\n", - "You can use ClearML Data to version your dataset and then pass it to YOLOv5 simply using its unique ID. This will help you keep track of your data without adding extra hassle. Explore the [ClearML Tutorial](https://github.com/ultralytics/yolov5/tree/master/utils/loggers/clearml) for details!\n", - "\n", - "\n", - "\"ClearML" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "-WPvRbS5Swl6" - }, - "source": [ - "## Local Logging\n", - "\n", - "Training results are automatically logged with [Tensorboard](https://www.tensorflow.org/tensorboard) and [CSV](https://github.com/ultralytics/yolov5/pull/4148) loggers to `runs/train`, with a new experiment directory created for each new training as `runs/train/exp2`, `runs/train/exp3`, etc.\n", - "\n", - "This directory contains train and val statistics, mosaics, labels, predictions and augmentated mosaics, as well as metrics and charts including precision-recall (PR) curves and confusion matrices. \n", - "\n", - "\"Local\n" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "Zelyeqbyt3GD" - }, - "source": [ - "# Environments\n", - "\n", - "YOLOv5 may be run in any of the following up-to-date verified environments (with all dependencies including [CUDA](https://developer.nvidia.com/cuda)/[CUDNN](https://developer.nvidia.com/cudnn), [Python](https://www.python.org/) and [PyTorch](https://pytorch.org/) preinstalled):\n", - "\n", - "- **Notebooks** with free GPU: \"Run \"Open \"Open\n", - "- **Google Cloud** Deep Learning VM. See [GCP Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/GCP-Quickstart)\n", - "- **Amazon** Deep Learning AMI. See [AWS Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/AWS-Quickstart)\n", - "- **Docker Image**. See [Docker Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/Docker-Quickstart) \"Docker\n" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "6Qu7Iesl0p54" - }, - "source": [ - "# Status\n", - "\n", - "![YOLOv5 CI](https://github.com/ultralytics/yolov5/actions/workflows/ci-testing.yml/badge.svg)\n", - "\n", - "If this badge is green, all [YOLOv5 GitHub Actions](https://github.com/ultralytics/yolov5/actions) Continuous Integration (CI) tests are currently passing. CI tests verify correct operation of YOLOv5 training ([train.py](https://github.com/ultralytics/yolov5/blob/master/train.py)), testing ([val.py](https://github.com/ultralytics/yolov5/blob/master/val.py)), inference ([detect.py](https://github.com/ultralytics/yolov5/blob/master/detect.py)) and export ([export.py](https://github.com/ultralytics/yolov5/blob/master/export.py)) on macOS, Windows, and Ubuntu every 24 hours and on every commit.\n" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "IEijrePND_2I" - }, - "source": [ - "# Appendix\n", - "\n", - "Additional content below." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "GMusP4OAxFu6" - }, - "outputs": [], - "source": [ - "# YOLOv5 PyTorch HUB Inference (DetectionModels only)\n", - "import torch\n", - "\n", - "model = torch.hub.load('ultralytics/yolov5', 'yolov5s') # yolov5n - yolov5x6 or custom\n", - "im = 'https://ultralytics.com/images/zidane.jpg' # file, Path, PIL.Image, OpenCV, nparray, list\n", - "results = model(im) # inference\n", - "results.print() # or .show(), .save(), .crop(), .pandas(), etc." - ] - } - ], - "metadata": { - "accelerator": "GPU", - "colab": { - "collapsed_sections": [], - "name": "YOLOv5 Classification Tutorial", - "provenance": [], - "toc_visible": true - }, - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.7.12" - }, - "widgets": { - "application/vnd.jupyter.widget-state+json": { - "0856bea36ec148b68522ff9c9eb258d8": { - "model_module": "@jupyter-widgets/controls", - "model_module_version": "1.5.0", - "model_name": "DescriptionStyleModel", - "state": { - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "_model_name": "DescriptionStyleModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "1.2.0", - "_view_name": "StyleView", - "description_width": "" - } + "cell_type": "code", + "execution_count": 5, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "1NcFxRcFdJ_O", + "outputId": "638c55b1-dc45-4eee-cabc-4921dc61faf5" + }, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "\u001b[34m\u001b[1mclassify/train: \u001b[0mmodel=yolov5s-cls.pt, data=imagenette160, epochs=3, batch_size=16, imgsz=224, nosave=False, cache=ram, device=, workers=8, project=runs/train-cls, name=exp, exist_ok=False, pretrained=True, optimizer=Adam, lr0=0.001, decay=5e-05, label_smoothing=0.1, cutoff=None, dropout=None, verbose=False, seed=0, local_rank=-1\n", + "\u001b[34m\u001b[1mgithub: \u001b[0mup to date with https://github.com/ultralytics/yolov5 ✅\n", + "YOLOv5 🚀 v6.2-258-g7fc7ed7 Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n", + "\n", + "\u001b[34m\u001b[1mTensorBoard: \u001b[0mStart with 'tensorboard --logdir runs/train-cls', view at http://localhost:6006/\n", + "\n", + "Dataset not found ⚠️, missing path /content/datasets/imagenette160, attempting download...\n", + "Downloading https://github.com/ultralytics/yolov5/releases/download/v1.0/imagenette160.zip to /content/datasets/imagenette160.zip...\n", + "100% 103M/103M [00:09<00:00, 11.1MB/s]\n", + "Unzipping /content/datasets/imagenette160.zip...\n", + "Dataset download success ✅ (13.2s), saved to \u001b[1m/content/datasets/imagenette160\u001b[0m\n", + "\n", + "\u001b[34m\u001b[1malbumentations: \u001b[0mRandomResizedCrop(p=1.0, height=224, width=224, scale=(0.08, 1.0), ratio=(0.75, 1.3333333333333333), interpolation=1), HorizontalFlip(p=0.5), ColorJitter(p=0.5, brightness=[0.6, 1.4], contrast=[0.6, 1.4], saturation=[0.6, 1.4], hue=[0, 0]), Normalize(p=1.0, mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225), max_pixel_value=255.0), ToTensorV2(always_apply=True, p=1.0, transpose_mask=False)\n", + "Model summary: 149 layers, 4185290 parameters, 4185290 gradients, 10.5 GFLOPs\n", + "\u001b[34m\u001b[1moptimizer:\u001b[0m Adam(lr=0.001) with parameter groups 32 weight(decay=0.0), 33 weight(decay=5e-05), 33 bias\n", + "Image sizes 224 train, 224 test\n", + "Using 1 dataloader workers\n", + "Logging results to \u001b[1mruns/train-cls/exp\u001b[0m\n", + "Starting yolov5s-cls.pt training on imagenette160 dataset with 10 classes for 3 epochs...\n", + "\n", + " Epoch GPU_mem train_loss val_loss top1_acc top5_acc\n", + " 1/3 0.348G 1.31 1.09 0.794 0.979: 100% 592/592 [01:02<00:00, 9.47it/s]\n", + " 2/3 0.415G 1.09 0.852 0.883 0.99: 100% 592/592 [00:59<00:00, 10.00it/s]\n", + " 3/3 0.415G 0.954 0.776 0.907 0.994: 100% 592/592 [00:59<00:00, 9.89it/s]\n", + "\n", + "Training complete (0.051 hours)\n", + "Results saved to \u001b[1mruns/train-cls/exp\u001b[0m\n", + "Predict: python classify/predict.py --weights runs/train-cls/exp/weights/best.pt --source im.jpg\n", + "Validate: python classify/val.py --weights runs/train-cls/exp/weights/best.pt --data /content/datasets/imagenette160\n", + "Export: python export.py --weights runs/train-cls/exp/weights/best.pt --include onnx\n", + "PyTorch Hub: model = torch.hub.load('ultralytics/yolov5', 'custom', 'runs/train-cls/exp/weights/best.pt')\n", + "Visualize: https://netron.app\n", + "\n" + ] + } + ], + "source": [ + "# Train YOLOv5s Classification on Imagenette160 for 3 epochs\n", + "!python classify/train.py --img 224 --batch 16 --epochs 3 --data imagenette160 --model yolov5s-cls.pt --cache" + ] }, - "0ace3934ec6f4d36a1b3a9e086390926": { - "model_module": "@jupyter-widgets/controls", - "model_module_version": "1.5.0", - "model_name": "ProgressStyleModel", - "state": { - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "_model_name": "ProgressStyleModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "1.2.0", - "_view_name": "StyleView", - "bar_color": null, - "description_width": "" - } + { + "cell_type": "markdown", + "metadata": { + "id": "15glLzbQx5u0" + }, + "source": [ + "# 4. Visualize" + ] }, - "35e03ce5090346c9ae602891470fc555": { - "model_module": "@jupyter-widgets/controls", - "model_module_version": "1.5.0", - "model_name": "FloatProgressModel", - "state": { - "_dom_classes": [], - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "_model_name": "FloatProgressModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/controls", - "_view_module_version": "1.5.0", - "_view_name": "ProgressView", - "bar_style": "success", - "description": "", - "description_tooltip": null, - "layout": "IPY_MODEL_76879f6f2aa54637a7a07faeea2bd684", - "max": 818322941, - "min": 0, - "orientation": "horizontal", - "style": "IPY_MODEL_0ace3934ec6f4d36a1b3a9e086390926", - "value": 818322941 - } + { + "cell_type": "markdown", + "metadata": { + "id": "nWOsI5wJR1o3" + }, + "source": [ + "## Comet Logging and Visualization 🌟 NEW\n", + "[Comet](https://bit.ly/yolov5-readme-comet) is now fully integrated with YOLOv5. Track and visualize model metrics in real time, save your hyperparameters, datasets, and model checkpoints, and visualize your model predictions with [Comet Custom Panels](https://bit.ly/yolov5-colab-comet-panels)! Comet makes sure you never lose track of your work and makes it easy to share results and collaborate across teams of all sizes! \n", + "\n", + "Getting started is easy:\n", + "```shell\n", + "pip install comet_ml # 1. install\n", + "export COMET_API_KEY= # 2. paste API key\n", + "python train.py --img 640 --epochs 3 --data coco128.yaml --weights yolov5s.pt # 3. train\n", + "```\n", + "\n", + "To learn more about all of the supported Comet features for this integration, check out the [Comet Tutorial](https://github.com/ultralytics/yolov5/tree/master/utils/loggers/comet). If you'd like to learn more about Comet, head over to our [documentation](https://bit.ly/yolov5-colab-comet-docs). Get started by trying out the Comet Colab Notebook:\n", + "[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1RG0WOQyxlDlo5Km8GogJpIEJlg_5lyYO?usp=sharing)\n", + "\n", + "\"yolo-ui\"" + ] }, - "574140e4c4bc48c9a171541a02cd0211": { - "model_module": "@jupyter-widgets/controls", - "model_module_version": "1.5.0", - "model_name": "HTMLModel", - "state": { - "_dom_classes": [], - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "_model_name": "HTMLModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/controls", - "_view_module_version": "1.5.0", - "_view_name": "HTMLView", - "description": "", - "description_tooltip": null, - "layout": "IPY_MODEL_60b913d755b34d638478e30705a2dde1", - "placeholder": "​", - "style": "IPY_MODEL_0856bea36ec148b68522ff9c9eb258d8", - "value": "100%" - } + { + "cell_type": "markdown", + "metadata": { + "id": "Lay2WsTjNJzP" + }, + "source": [ + "## ClearML Logging and Automation 🌟 NEW\n", + "\n", + "[ClearML](https://cutt.ly/yolov5-notebook-clearml) is completely integrated into YOLOv5 to track your experimentation, manage dataset versions and even remotely execute training runs. To enable ClearML (check cells above):\n", + "\n", + "- `pip install clearml`\n", + "- run `clearml-init` to connect to a ClearML server (**deploy your own [open-source server](https://github.com/allegroai/clearml-server)**, or use our [free hosted server](https://cutt.ly/yolov5-notebook-clearml))\n", + "\n", + "You'll get all the great expected features from an experiment manager: live updates, model upload, experiment comparison etc. but ClearML also tracks uncommitted changes and installed packages for example. Thanks to that ClearML Tasks (which is what we call experiments) are also reproducible on different machines! With only 1 extra line, we can schedule a YOLOv5 training task on a queue to be executed by any number of ClearML Agents (workers).\n", + "\n", + "You can use ClearML Data to version your dataset and then pass it to YOLOv5 simply using its unique ID. This will help you keep track of your data without adding extra hassle. Explore the [ClearML Tutorial](https://github.com/ultralytics/yolov5/tree/master/utils/loggers/clearml) for details!\n", + "\n", + "\n", + "\"ClearML" + ] }, - "5966ba6e6f114d8c9d8d1d6b1bd4f4c7": { - "model_module": "@jupyter-widgets/controls", - "model_module_version": "1.5.0", - "model_name": "DescriptionStyleModel", - "state": { - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "_model_name": "DescriptionStyleModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "1.2.0", - "_view_name": "StyleView", - "description_width": "" - } + { + "cell_type": "markdown", + "metadata": { + "id": "-WPvRbS5Swl6" + }, + "source": [ + "## Local Logging\n", + "\n", + "Training results are automatically logged with [Tensorboard](https://www.tensorflow.org/tensorboard) and [CSV](https://github.com/ultralytics/yolov5/pull/4148) loggers to `runs/train`, with a new experiment directory created for each new training as `runs/train/exp2`, `runs/train/exp3`, etc.\n", + "\n", + "This directory contains train and val statistics, mosaics, labels, predictions and augmentated mosaics, as well as metrics and charts including precision-recall (PR) curves and confusion matrices. \n", + "\n", + "\"Local\n" + ] }, - "60b913d755b34d638478e30705a2dde1": { - "model_module": "@jupyter-widgets/base", - "model_module_version": "1.2.0", - "model_name": "LayoutModel", - "state": { - "_model_module": "@jupyter-widgets/base", - "_model_module_version": "1.2.0", - "_model_name": "LayoutModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "1.2.0", - "_view_name": "LayoutView", - "align_content": null, - "align_items": null, - "align_self": null, - "border": null, - "bottom": null, - "display": null, - "flex": null, - "flex_flow": null, - "grid_area": null, - "grid_auto_columns": null, - "grid_auto_flow": null, - "grid_auto_rows": null, - "grid_column": null, - "grid_gap": null, - "grid_row": null, - "grid_template_areas": null, - "grid_template_columns": null, - "grid_template_rows": null, - "height": null, - "justify_content": null, - "justify_items": null, - "left": null, - "margin": null, - "max_height": null, - "max_width": null, - "min_height": null, - "min_width": null, - "object_fit": null, - "object_position": null, - "order": null, - "overflow": null, - "overflow_x": null, - "overflow_y": null, - "padding": null, - "right": null, - "top": null, - "visibility": null, - "width": null - } + { + "cell_type": "markdown", + "metadata": { + "id": "Zelyeqbyt3GD" + }, + "source": [ + "# Environments\n", + "\n", + "YOLOv5 may be run in any of the following up-to-date verified environments (with all dependencies including [CUDA](https://developer.nvidia.com/cuda)/[CUDNN](https://developer.nvidia.com/cudnn), [Python](https://www.python.org/) and [PyTorch](https://pytorch.org/) preinstalled):\n", + "\n", + "- **Notebooks** with free GPU: \"Run \"Open \"Open\n", + "- **Google Cloud** Deep Learning VM. See [GCP Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/GCP-Quickstart)\n", + "- **Amazon** Deep Learning AMI. See [AWS Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/AWS-Quickstart)\n", + "- **Docker Image**. See [Docker Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/Docker-Quickstart) \"Docker\n" + ] }, - "65881db1db8a4e9c930fab9172d45143": { - "model_module": "@jupyter-widgets/base", - "model_module_version": "1.2.0", - "model_name": "LayoutModel", - "state": { - "_model_module": "@jupyter-widgets/base", - "_model_module_version": "1.2.0", - "_model_name": "LayoutModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "1.2.0", - "_view_name": "LayoutView", - "align_content": null, - "align_items": null, - "align_self": null, - "border": null, - "bottom": null, - "display": null, - "flex": null, - "flex_flow": null, - "grid_area": null, - "grid_auto_columns": null, - "grid_auto_flow": null, - "grid_auto_rows": null, - "grid_column": null, - "grid_gap": null, - "grid_row": null, - "grid_template_areas": null, - "grid_template_columns": null, - "grid_template_rows": null, - "height": null, - "justify_content": null, - "justify_items": null, - "left": null, - "margin": null, - "max_height": null, - "max_width": null, - "min_height": null, - "min_width": null, - "object_fit": null, - "object_position": null, - "order": null, - "overflow": null, - "overflow_x": null, - "overflow_y": null, - "padding": null, - "right": null, - "top": null, - "visibility": null, - "width": null - } + { + "cell_type": "markdown", + "metadata": { + "id": "6Qu7Iesl0p54" + }, + "source": [ + "# Status\n", + "\n", + "![YOLOv5 CI](https://github.com/ultralytics/yolov5/actions/workflows/ci-testing.yml/badge.svg)\n", + "\n", + "If this badge is green, all [YOLOv5 GitHub Actions](https://github.com/ultralytics/yolov5/actions) Continuous Integration (CI) tests are currently passing. CI tests verify correct operation of YOLOv5 training ([train.py](https://github.com/ultralytics/yolov5/blob/master/train.py)), testing ([val.py](https://github.com/ultralytics/yolov5/blob/master/val.py)), inference ([detect.py](https://github.com/ultralytics/yolov5/blob/master/detect.py)) and export ([export.py](https://github.com/ultralytics/yolov5/blob/master/export.py)) on macOS, Windows, and Ubuntu every 24 hours and on every commit.\n" + ] }, - "76879f6f2aa54637a7a07faeea2bd684": { - "model_module": "@jupyter-widgets/base", - "model_module_version": "1.2.0", - "model_name": "LayoutModel", - "state": { - "_model_module": "@jupyter-widgets/base", - "_model_module_version": "1.2.0", - "_model_name": "LayoutModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "1.2.0", - "_view_name": "LayoutView", - "align_content": null, - "align_items": null, - "align_self": null, - "border": null, - "bottom": null, - "display": null, - "flex": null, - "flex_flow": null, - "grid_area": null, - "grid_auto_columns": null, - "grid_auto_flow": null, - "grid_auto_rows": null, - "grid_column": null, - "grid_gap": null, - "grid_row": null, - "grid_template_areas": null, - "grid_template_columns": null, - "grid_template_rows": null, - "height": null, - "justify_content": null, - "justify_items": null, - "left": null, - "margin": null, - "max_height": null, - "max_width": null, - "min_height": null, - "min_width": null, - "object_fit": null, - "object_position": null, - "order": null, - "overflow": null, - "overflow_x": null, - "overflow_y": null, - "padding": null, - "right": null, - "top": null, - "visibility": null, - "width": null - } + { + "cell_type": "markdown", + "metadata": { + "id": "IEijrePND_2I" + }, + "source": [ + "# Appendix\n", + "\n", + "Additional content below." + ] }, - "9b8caa3522fc4cbab31e13b5dfc7808d": { - "model_module": "@jupyter-widgets/controls", - "model_module_version": "1.5.0", - "model_name": "HBoxModel", - "state": { - "_dom_classes": [], - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "_model_name": "HBoxModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/controls", - "_view_module_version": "1.5.0", - "_view_name": "HBoxView", - "box_style": "", - "children": [ - "IPY_MODEL_574140e4c4bc48c9a171541a02cd0211", - "IPY_MODEL_35e03ce5090346c9ae602891470fc555", - "IPY_MODEL_c942c208e72d46568b476bb0f2d75496" - ], - "layout": "IPY_MODEL_65881db1db8a4e9c930fab9172d45143" - } + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "GMusP4OAxFu6" + }, + "outputs": [], + "source": [ + "# YOLOv5 PyTorch HUB Inference (DetectionModels only)\n", + "import torch\n", + "\n", + "model = torch.hub.load('ultralytics/yolov5', 'yolov5s') # yolov5n - yolov5x6 or custom\n", + "im = 'https://ultralytics.com/images/zidane.jpg' # file, Path, PIL.Image, OpenCV, nparray, list\n", + "results = model(im) # inference\n", + "results.print() # or .show(), .save(), .crop(), .pandas(), etc." + ] + } + ], + "metadata": { + "accelerator": "GPU", + "colab": { + "name": "YOLOv5 Classification Tutorial", + "provenance": [] }, - "c942c208e72d46568b476bb0f2d75496": { - "model_module": "@jupyter-widgets/controls", - "model_module_version": "1.5.0", - "model_name": "HTMLModel", - "state": { - "_dom_classes": [], - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "_model_name": "HTMLModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/controls", - "_view_module_version": "1.5.0", - "_view_name": "HTMLView", - "description": "", - "description_tooltip": null, - "layout": "IPY_MODEL_d6b7a2243e0c4beca714d99dceec23d6", - "placeholder": "​", - "style": "IPY_MODEL_5966ba6e6f114d8c9d8d1d6b1bd4f4c7", - "value": " 780M/780M [02:19<00:00, 6.24MB/s]" - } + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" }, - "d6b7a2243e0c4beca714d99dceec23d6": { - "model_module": "@jupyter-widgets/base", - "model_module_version": "1.2.0", - "model_name": "LayoutModel", - "state": { - "_model_module": "@jupyter-widgets/base", - "_model_module_version": "1.2.0", - "_model_name": "LayoutModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "1.2.0", - "_view_name": "LayoutView", - "align_content": null, - "align_items": null, - "align_self": null, - "border": null, - "bottom": null, - "display": null, - "flex": null, - "flex_flow": null, - "grid_area": null, - "grid_auto_columns": null, - "grid_auto_flow": null, - "grid_auto_rows": null, - "grid_column": null, - "grid_gap": null, - "grid_row": null, - "grid_template_areas": null, - "grid_template_columns": null, - "grid_template_rows": null, - "height": null, - "justify_content": null, - "justify_items": null, - "left": null, - "margin": null, - "max_height": null, - "max_width": null, - "min_height": null, - "min_width": null, - "object_fit": null, - "object_position": null, - "order": null, - "overflow": null, - "overflow_x": null, - "overflow_y": null, - "padding": null, - "right": null, - "top": null, - "visibility": null, - "width": null - } + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.7.12" } - } - } - }, - "nbformat": 4, - "nbformat_minor": 1 -} + }, + "nbformat": 4, + "nbformat_minor": 0 +} \ No newline at end of file From 9bf18554c3e4b250ba7063876f0191f573ffb7a4 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 18 Nov 2022 23:46:15 +0100 Subject: [PATCH 1570/1976] Revert `--save-txt` to default False (#10213) * Revert `--save-txt` to default False Signed-off-by: Glenn Jocher * Update tutorial.ipynb Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- classify/predict.py | 2 +- classify/tutorial.ipynb | 5 ++--- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/classify/predict.py b/classify/predict.py index 96508d633da8..a9104ed315ec 100644 --- a/classify/predict.py +++ b/classify/predict.py @@ -196,7 +196,7 @@ def parse_opt(): parser.add_argument('--imgsz', '--img', '--img-size', nargs='+', type=int, default=[224], help='inference size h,w') parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') parser.add_argument('--view-img', action='store_true', help='show results') - parser.add_argument('--save-txt', action='store_false', help='save results to *.txt') + parser.add_argument('--save-txt', action='store_true', help='save results to *.txt') parser.add_argument('--nosave', action='store_true', help='do not save images/videos') parser.add_argument('--augment', action='store_true', help='augmented inference') parser.add_argument('--visualize', action='store_true', help='visualize features') diff --git a/classify/tutorial.ipynb b/classify/tutorial.ipynb index e035a7bda40d..9e65e53d8736 100644 --- a/classify/tutorial.ipynb +++ b/classify/tutorial.ipynb @@ -118,8 +118,7 @@ "image 1/2 /content/yolov5/data/images/bus.jpg: 224x224 minibus 0.39, police van 0.24, amphibious vehicle 0.05, recreational vehicle 0.04, trolleybus 0.03, 3.9ms\n", "image 2/2 /content/yolov5/data/images/zidane.jpg: 224x224 suit 0.38, bow tie 0.19, bridegroom 0.18, rugby ball 0.04, stage 0.02, 4.1ms\n", "Speed: 0.3ms pre-process, 4.0ms inference, 1.5ms NMS per image at shape (1, 3, 224, 224)\n", - "Results saved to \u001b[1mruns/predict-cls/exp\u001b[0m\n", - "2 labels saved to runs/predict-cls/exp/labels\n" + "Results saved to \u001b[1mruns/predict-cls/exp\u001b[0m\n" ] } ], @@ -1475,4 +1474,4 @@ }, "nbformat": 4, "nbformat_minor": 0 -} \ No newline at end of file +} From 9bc60349b62500096832d78989336fcda200d286 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 18 Nov 2022 23:48:47 +0100 Subject: [PATCH 1571/1976] Add `--source screen` Usage example (#10215) --- classify/predict.py | 1 + detect.py | 1 + segment/predict.py | 1 + 3 files changed, 3 insertions(+) diff --git a/classify/predict.py b/classify/predict.py index a9104ed315ec..9a6b00062932 100644 --- a/classify/predict.py +++ b/classify/predict.py @@ -6,6 +6,7 @@ $ python classify/predict.py --weights yolov5s-cls.pt --source 0 # webcam img.jpg # image vid.mp4 # video + screen # screenshot path/ # directory 'path/*.jpg' # glob 'https://youtu.be/Zgi9g1ksQHc' # YouTube diff --git a/detect.py b/detect.py index 8e42fbe159d0..58b02802e6d9 100644 --- a/detect.py +++ b/detect.py @@ -6,6 +6,7 @@ $ python detect.py --weights yolov5s.pt --source 0 # webcam img.jpg # image vid.mp4 # video + screen # screenshot path/ # directory 'path/*.jpg' # glob 'https://youtu.be/Zgi9g1ksQHc' # YouTube diff --git a/segment/predict.py b/segment/predict.py index da1097c047c1..42389938cee7 100644 --- a/segment/predict.py +++ b/segment/predict.py @@ -6,6 +6,7 @@ $ python segment/predict.py --weights yolov5s-seg.pt --source 0 # webcam img.jpg # image vid.mp4 # video + screen # screenshot path/ # directory 'path/*.jpg' # glob 'https://youtu.be/Zgi9g1ksQHc' # YouTube From 9286336cb49d577873b2113739788bbe3b90f83c Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 19 Nov 2022 03:16:17 +0100 Subject: [PATCH 1572/1976] Add `git` info to training checkpoints (#9655) * Add git status on train checkpoints * Update * Update * Update * Update * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update general.py Signed-off-by: Glenn Jocher * Update general.py Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- requirements.txt | 1 + train.py | 3 ++- utils/general.py | 19 ++++++++++++++++++- 3 files changed, 21 insertions(+), 2 deletions(-) diff --git a/requirements.txt b/requirements.txt index 70dd7ce53ba3..85eb839df8a0 100644 --- a/requirements.txt +++ b/requirements.txt @@ -2,6 +2,7 @@ # Usage: pip install -r requirements.txt # Base ------------------------------------------------------------------------ +gitpython ipython # interactive notebook matplotlib>=3.2.2 numpy>=1.18.5 diff --git a/train.py b/train.py index bbbd6d07db00..6fa33f47d100 100644 --- a/train.py +++ b/train.py @@ -47,7 +47,7 @@ from utils.callbacks import Callbacks from utils.dataloaders import create_dataloader from utils.downloads import attempt_download, is_url -from utils.general import (LOGGER, TQDM_BAR_FORMAT, check_amp, check_dataset, check_file, check_git_status, +from utils.general import (GIT, LOGGER, TQDM_BAR_FORMAT, check_amp, check_dataset, check_file, check_git_status, check_img_size, check_requirements, check_suffix, check_yaml, colorstr, get_latest_run, increment_path, init_seeds, intersect_dicts, labels_to_class_weights, labels_to_image_weights, methods, one_cycle, print_args, print_mutation, strip_optimizer, @@ -376,6 +376,7 @@ def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictio 'updates': ema.updates, 'optimizer': optimizer.state_dict(), 'opt': vars(opt), + 'git': GIT, 'date': datetime.now().isoformat()} # Save last, best and delete diff --git a/utils/general.py b/utils/general.py index 58181f00568d..57b6e4e78166 100644 --- a/utils/general.py +++ b/utils/general.py @@ -29,6 +29,7 @@ from zipfile import ZipFile, is_zipfile import cv2 +import git import IPython import numpy as np import pandas as pd @@ -344,6 +345,22 @@ def check_git_status(repo='ultralytics/yolov5', branch='master'): LOGGER.info(s) +@WorkingDirectory(ROOT) +def check_git(path='.'): + # YOLOv5 git check, return git {remote, branch, commit} + try: + repo = git.Repo(path) + remote = repo.remotes.origin.url.replace('.git', '') # i.e. 'https://github.com/ultralytics/yolov5' + commit = repo.head.commit.hexsha # i.e. '3134699c73af83aac2a481435550b968d5792c0d' + try: + branch = repo.active_branch.name # i.e. 'main' + except TypeError: # not on any branch + branch = None # i.e. 'detached HEAD' state + return {'remote': remote, 'branch': branch, 'commit': commit} + except git.exc.InvalidGitRepositoryError: # path is not a git dir + return {'remote': None, 'branch': None, 'commit': None} + + def check_python(minimum='3.7.0'): # Check current python version vs. required python version check_version(platform.python_version(), minimum, name='Python ', hard=True) @@ -1121,4 +1138,4 @@ def imshow(path, im): cv2.imread, cv2.imwrite, cv2.imshow = imread, imwrite, imshow # redefine # Variables ------------------------------------------------------------------------------------------------------------ -NCOLS = 0 if is_docker() else shutil.get_terminal_size().columns # terminal window size for tqdm +GIT = check_git() # repo, branch, commit From 0307954e4e17da66e6bf36950f02972d976ba621 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 19 Nov 2022 03:32:21 +0100 Subject: [PATCH 1573/1976] Add git info to cls, seg checkpoints (#10217) --- classify/train.py | 3 ++- segment/train.py | 9 ++------- train.py | 2 +- 3 files changed, 5 insertions(+), 9 deletions(-) diff --git a/classify/train.py b/classify/train.py index 4422ca26b0ae..5faef08e876c 100644 --- a/classify/train.py +++ b/classify/train.py @@ -40,7 +40,7 @@ from models.experimental import attempt_load from models.yolo import ClassificationModel, DetectionModel from utils.dataloaders import create_classification_dataloader -from utils.general import (DATASETS_DIR, LOGGER, TQDM_BAR_FORMAT, WorkingDirectory, check_git_status, +from utils.general import (DATASETS_DIR, GIT, LOGGER, TQDM_BAR_FORMAT, WorkingDirectory, check_git_status, check_requirements, colorstr, download, increment_path, init_seeds, print_args, yaml_save) from utils.loggers import GenericLogger from utils.plots import imshow_cls @@ -237,6 +237,7 @@ def train(opt, device): 'updates': ema.updates, 'optimizer': None, # optimizer.state_dict(), 'opt': vars(opt), + 'git': GIT, # {remote, branch, commit} if a git repo 'date': datetime.now().isoformat()} # Save last, best and delete diff --git a/segment/train.py b/segment/train.py index 2a0793d1aa3e..5d9ed78f527c 100644 --- a/segment/train.py +++ b/segment/train.py @@ -46,7 +46,7 @@ from utils.autobatch import check_train_batch_size from utils.callbacks import Callbacks from utils.downloads import attempt_download, is_url -from utils.general import (LOGGER, TQDM_BAR_FORMAT, check_amp, check_dataset, check_file, check_git_status, +from utils.general import (GIT, LOGGER, TQDM_BAR_FORMAT, check_amp, check_dataset, check_file, check_git_status, check_img_size, check_requirements, check_suffix, check_yaml, colorstr, get_latest_run, increment_path, init_seeds, intersect_dicts, labels_to_class_weights, labels_to_image_weights, one_cycle, print_args, print_mutation, strip_optimizer, yaml_save) @@ -390,6 +390,7 @@ def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictio 'updates': ema.updates, 'optimizer': optimizer.state_dict(), 'opt': vars(opt), + 'git': GIT, # {remote, branch, commit} if a git repo 'date': datetime.now().isoformat()} # Save last, best and delete @@ -498,12 +499,6 @@ def parse_opt(known=False): parser.add_argument('--mask-ratio', type=int, default=4, help='Downsample the truth masks to saving memory') parser.add_argument('--no-overlap', action='store_true', help='Overlap masks train faster at slightly less mAP') - # Weights & Biases arguments - # parser.add_argument('--entity', default=None, help='W&B: Entity') - # parser.add_argument('--upload_dataset', nargs='?', const=True, default=False, help='W&B: Upload data, "val" option') - # parser.add_argument('--bbox_interval', type=int, default=-1, help='W&B: Set bounding-box image logging interval') - # parser.add_argument('--artifact_alias', type=str, default='latest', help='W&B: Version of dataset artifact to use') - return parser.parse_known_args()[0] if known else parser.parse_args() diff --git a/train.py b/train.py index 6fa33f47d100..1ea5c5bbeddd 100644 --- a/train.py +++ b/train.py @@ -376,7 +376,7 @@ def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictio 'updates': ema.updates, 'optimizer': optimizer.state_dict(), 'opt': vars(opt), - 'git': GIT, + 'git': GIT, # {remote, branch, commit} if a git repo 'date': datetime.now().isoformat()} # Save last, best and delete From 6992dde4bd628f6bffe7d4c5025afadf79ed679b Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 19 Nov 2022 13:44:46 +0100 Subject: [PATCH 1574/1976] Update Comet preview image (#10220) * Update Comet preview image Pass through tinyjpg: 2.2MB -> 497kB :) Signed-off-by: Glenn Jocher * Update tutorial.ipynb Signed-off-by: Glenn Jocher * Update tutorial.ipynb Signed-off-by: Glenn Jocher * Update tutorial.ipynb Signed-off-by: Glenn Jocher * Update README.md Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- classify/tutorial.ipynb | 2 +- segment/tutorial.ipynb | 4 ++-- tutorial.ipynb | 4 ++-- utils/loggers/comet/README.md | 2 +- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/classify/tutorial.ipynb b/classify/tutorial.ipynb index 9e65e53d8736..956452a5aeda 100644 --- a/classify/tutorial.ipynb +++ b/classify/tutorial.ipynb @@ -1351,7 +1351,7 @@ "To learn more about all of the supported Comet features for this integration, check out the [Comet Tutorial](https://github.com/ultralytics/yolov5/tree/master/utils/loggers/comet). If you'd like to learn more about Comet, head over to our [documentation](https://bit.ly/yolov5-colab-comet-docs). Get started by trying out the Comet Colab Notebook:\n", "[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1RG0WOQyxlDlo5Km8GogJpIEJlg_5lyYO?usp=sharing)\n", "\n", - "\"yolo-ui\"" + "\"yolo-ui\"" ] }, { diff --git a/segment/tutorial.ipynb b/segment/tutorial.ipynb index 4192c69da628..70bbf857d02b 100644 --- a/segment/tutorial.ipynb +++ b/segment/tutorial.ipynb @@ -466,7 +466,7 @@ "To learn more about all of the supported Comet features for this integration, check out the [Comet Tutorial](https://github.com/ultralytics/yolov5/tree/master/utils/loggers/comet). If you'd like to learn more about Comet, head over to our [documentation](https://bit.ly/yolov5-colab-comet-docs). Get started by trying out the Comet Colab Notebook:\n", "[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1RG0WOQyxlDlo5Km8GogJpIEJlg_5lyYO?usp=sharing)\n", "\n", - "\"yolo-ui\"" + "\"yolo-ui\"" ] }, { @@ -590,4 +590,4 @@ }, "nbformat": 4, "nbformat_minor": 0 -} \ No newline at end of file +} diff --git a/tutorial.ipynb b/tutorial.ipynb index 9d5aa9c85c51..6cf99650ad45 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -872,7 +872,7 @@ "To learn more about all of the supported Comet features for this integration, check out the [Comet Tutorial](https://github.com/ultralytics/yolov5/tree/master/utils/loggers/comet). If you'd like to learn more about Comet, head over to our [documentation](https://bit.ly/yolov5-colab-comet-docs). Get started by trying out the Comet Colab Notebook:\n", "[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1RG0WOQyxlDlo5Km8GogJpIEJlg_5lyYO?usp=sharing)\n", "\n", - "\"yolo-ui\"" + "\"yolo-ui\"" ], "metadata": { "id": "nWOsI5wJR1o3" @@ -972,4 +972,4 @@ "outputs": [] } ] -} \ No newline at end of file +} diff --git a/utils/loggers/comet/README.md b/utils/loggers/comet/README.md index 3a51cb9b5a25..8f206cd9830e 100644 --- a/utils/loggers/comet/README.md +++ b/utils/loggers/comet/README.md @@ -51,7 +51,7 @@ python train.py --img 640 --batch 16 --epochs 5 --data coco128.yaml --weights yo That's it! Comet will automatically log your hyperparameters, command line arguments, training and valiation metrics. You can visualize and analyze your runs in the Comet UI -yolo-ui +yolo-ui # Try out an Example! Check out an example of a [completed run here](https://www.comet.com/examples/comet-example-yolov5/a0e29e0e9b984e4a822db2a62d0cb357?experiment-tab=chart&showOutliers=true&smoothing=0&transformY=smoothing&xAxis=step&ref=yolov5&utm_source=yolov5&utm_medium=affilliate&utm_campaign=yolov5_comet_integration) From 40bb8030f8468eb7145ff648588aa5f96e32447c Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 19 Nov 2022 14:22:07 +0100 Subject: [PATCH 1575/1976] Scope gitpyhon import in `check_git_info()` (#10221) * Scope gitpyhon import in `check_git_info()` * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- classify/train.py | 5 +++-- segment/train.py | 9 +++++---- train.py | 9 +++++---- utils/general.py | 9 ++++----- 4 files changed, 17 insertions(+), 15 deletions(-) diff --git a/classify/train.py b/classify/train.py index 5faef08e876c..a50845a4f781 100644 --- a/classify/train.py +++ b/classify/train.py @@ -40,7 +40,7 @@ from models.experimental import attempt_load from models.yolo import ClassificationModel, DetectionModel from utils.dataloaders import create_classification_dataloader -from utils.general import (DATASETS_DIR, GIT, LOGGER, TQDM_BAR_FORMAT, WorkingDirectory, check_git_status, +from utils.general import (DATASETS_DIR, LOGGER, TQDM_BAR_FORMAT, WorkingDirectory, check_git_info, check_git_status, check_requirements, colorstr, download, increment_path, init_seeds, print_args, yaml_save) from utils.loggers import GenericLogger from utils.plots import imshow_cls @@ -50,6 +50,7 @@ LOCAL_RANK = int(os.getenv('LOCAL_RANK', -1)) # https://pytorch.org/docs/stable/elastic/run.html RANK = int(os.getenv('RANK', -1)) WORLD_SIZE = int(os.getenv('WORLD_SIZE', 1)) +GIT_INFO = check_git_info() def train(opt, device): @@ -237,7 +238,7 @@ def train(opt, device): 'updates': ema.updates, 'optimizer': None, # optimizer.state_dict(), 'opt': vars(opt), - 'git': GIT, # {remote, branch, commit} if a git repo + 'git': GIT_INFO, # {remote, branch, commit} if a git repo 'date': datetime.now().isoformat()} # Save last, best and delete diff --git a/segment/train.py b/segment/train.py index 5d9ed78f527c..3f32d2100a75 100644 --- a/segment/train.py +++ b/segment/train.py @@ -46,9 +46,9 @@ from utils.autobatch import check_train_batch_size from utils.callbacks import Callbacks from utils.downloads import attempt_download, is_url -from utils.general import (GIT, LOGGER, TQDM_BAR_FORMAT, check_amp, check_dataset, check_file, check_git_status, - check_img_size, check_requirements, check_suffix, check_yaml, colorstr, get_latest_run, - increment_path, init_seeds, intersect_dicts, labels_to_class_weights, +from utils.general import (LOGGER, TQDM_BAR_FORMAT, check_amp, check_dataset, check_file, check_git_info, + check_git_status, check_img_size, check_requirements, check_suffix, check_yaml, colorstr, + get_latest_run, increment_path, init_seeds, intersect_dicts, labels_to_class_weights, labels_to_image_weights, one_cycle, print_args, print_mutation, strip_optimizer, yaml_save) from utils.loggers import GenericLogger from utils.plots import plot_evolve, plot_labels @@ -62,6 +62,7 @@ LOCAL_RANK = int(os.getenv('LOCAL_RANK', -1)) # https://pytorch.org/docs/stable/elastic/run.html RANK = int(os.getenv('RANK', -1)) WORLD_SIZE = int(os.getenv('WORLD_SIZE', 1)) +GIT_INFO = check_git_info() def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictionary @@ -390,7 +391,7 @@ def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictio 'updates': ema.updates, 'optimizer': optimizer.state_dict(), 'opt': vars(opt), - 'git': GIT, # {remote, branch, commit} if a git repo + 'git': GIT_INFO, # {remote, branch, commit} if a git repo 'date': datetime.now().isoformat()} # Save last, best and delete diff --git a/train.py b/train.py index 1ea5c5bbeddd..8b5446e58f2d 100644 --- a/train.py +++ b/train.py @@ -47,9 +47,9 @@ from utils.callbacks import Callbacks from utils.dataloaders import create_dataloader from utils.downloads import attempt_download, is_url -from utils.general import (GIT, LOGGER, TQDM_BAR_FORMAT, check_amp, check_dataset, check_file, check_git_status, - check_img_size, check_requirements, check_suffix, check_yaml, colorstr, get_latest_run, - increment_path, init_seeds, intersect_dicts, labels_to_class_weights, +from utils.general import (LOGGER, TQDM_BAR_FORMAT, check_amp, check_dataset, check_file, check_git_info, + check_git_status, check_img_size, check_requirements, check_suffix, check_yaml, colorstr, + get_latest_run, increment_path, init_seeds, intersect_dicts, labels_to_class_weights, labels_to_image_weights, methods, one_cycle, print_args, print_mutation, strip_optimizer, yaml_save) from utils.loggers import Loggers @@ -63,6 +63,7 @@ LOCAL_RANK = int(os.getenv('LOCAL_RANK', -1)) # https://pytorch.org/docs/stable/elastic/run.html RANK = int(os.getenv('RANK', -1)) WORLD_SIZE = int(os.getenv('WORLD_SIZE', 1)) +GIT_INFO = check_git_info() def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictionary @@ -376,7 +377,7 @@ def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictio 'updates': ema.updates, 'optimizer': optimizer.state_dict(), 'opt': vars(opt), - 'git': GIT, # {remote, branch, commit} if a git repo + 'git': GIT_INFO, # {remote, branch, commit} if a git repo 'date': datetime.now().isoformat()} # Save last, best and delete diff --git a/utils/general.py b/utils/general.py index 57b6e4e78166..c5b738983719 100644 --- a/utils/general.py +++ b/utils/general.py @@ -13,7 +13,6 @@ import platform import random import re -import shutil import signal import sys import time @@ -29,7 +28,6 @@ from zipfile import ZipFile, is_zipfile import cv2 -import git import IPython import numpy as np import pandas as pd @@ -346,8 +344,10 @@ def check_git_status(repo='ultralytics/yolov5', branch='master'): @WorkingDirectory(ROOT) -def check_git(path='.'): - # YOLOv5 git check, return git {remote, branch, commit} +def check_git_info(path='.'): + # YOLOv5 git info check, return {remote, branch, commit} + check_requirements('gitpython') + import git try: repo = git.Repo(path) remote = repo.remotes.origin.url.replace('.git', '') # i.e. 'https://github.com/ultralytics/yolov5' @@ -1138,4 +1138,3 @@ def imshow(path, im): cv2.imread, cv2.imwrite, cv2.imshow = imread, imwrite, imshow # redefine # Variables ------------------------------------------------------------------------------------------------------------ -GIT = check_git() # repo, branch, commit From 72cad39854a7d9ebbd4d58994cefa966b0da8fc1 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 19 Nov 2022 16:44:56 +0100 Subject: [PATCH 1576/1976] Squeezenet reshape outputs fix (#10222) @AyushExel Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- utils/torch_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/torch_utils.py b/utils/torch_utils.py index fe934abf118c..77549b005ceb 100644 --- a/utils/torch_utils.py +++ b/utils/torch_utils.py @@ -82,7 +82,7 @@ def reshape_classifier_output(model, n=1000): elif nn.Conv2d in types: i = types.index(nn.Conv2d) # nn.Conv2d index if m[i].out_channels != n: - m[i] = nn.Conv2d(m[i].in_channels, n, m[i].kernel_size, m[i].stride, bias=m[i].bias) + m[i] = nn.Conv2d(m[i].in_channels, n, m[i].kernel_size, m[i].stride, bias=m[i].bias is not None) @contextmanager From be348cc33925738825ab40dd6eacdfe4afd4e215 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 21 Nov 2022 16:54:36 +0100 Subject: [PATCH 1577/1976] Validate --task speed CPU fix (#10244) --- segment/val.py | 2 +- val.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/segment/val.py b/segment/val.py index 9bb8f9e4cf54..48bf28d4bf4f 100644 --- a/segment/val.py +++ b/segment/val.py @@ -444,7 +444,7 @@ def main(opt): else: weights = opt.weights if isinstance(opt.weights, list) else [opt.weights] - opt.half = True # FP16 for fastest results + opt.half = torch.cuda.is_available() and opt.device != 'cpu' # FP16 for fastest results if opt.task == 'speed': # speed benchmarks # python val.py --task speed --data coco.yaml --batch 1 --weights yolov5n.pt yolov5s.pt... opt.conf_thres, opt.iou_thres, opt.save_json = 0.25, 0.45, False diff --git a/val.py b/val.py index ef282e37bdc1..7c610e83a856 100644 --- a/val.py +++ b/val.py @@ -380,7 +380,7 @@ def main(opt): else: weights = opt.weights if isinstance(opt.weights, list) else [opt.weights] - opt.half = True # FP16 for fastest results + opt.half = torch.cuda.is_available() and opt.device != 'cpu' # FP16 for fastest results if opt.task == 'speed': # speed benchmarks # python val.py --task speed --data coco.yaml --batch 1 --weights yolov5n.pt yolov5s.pt... opt.conf_thres, opt.iou_thres, opt.save_json = 0.25, 0.45, False From 915bbf294bb74c859f0b41f1c23bc395014ea679 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 22 Nov 2022 16:23:47 +0100 Subject: [PATCH 1578/1976] YOLOv5 v7.0 release updates (#10245) * YOLOv5 v7.0 splash image update * Update tutorial.ipynb Signed-off-by: Glenn Jocher * Update tutorial.ipynb Signed-off-by: Glenn Jocher * Update tutorial.ipynb Signed-off-by: Glenn Jocher * readme segmentation section * readme segmentation section * readme segmentation section * readme segmentation section * readme segmentation section * Update README.md Signed-off-by: Glenn Jocher * Update README.md Signed-off-by: Glenn Jocher * Update tutorial.ipynb Signed-off-by: Glenn Jocher * Update README.md Signed-off-by: Glenn Jocher * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update README.md Signed-off-by: Glenn Jocher * Update README.md Signed-off-by: Glenn Jocher * Update download URLs to 7.0 assets Signed-off-by: Glenn Jocher Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .github/README_cn.md | 2 +- README.md | 114 +++++++++++++++++++++++++------ classify/tutorial.ipynb | 5 +- data/scripts/download_weights.sh | 5 +- segment/tutorial.ipynb | 2 +- tutorial.ipynb | 2 +- utils/downloads.py | 8 +-- 7 files changed, 107 insertions(+), 31 deletions(-) diff --git a/.github/README_cn.md b/.github/README_cn.md index 65ecd31a3e69..0a2f61ee35b2 100644 --- a/.github/README_cn.md +++ b/.github/README_cn.md @@ -1,7 +1,7 @@

- +

[English](../README.md) | 简体中文 diff --git a/README.md b/README.md index 0fa95f404117..298e14570860 100644 --- a/README.md +++ b/README.md @@ -1,7 +1,7 @@

- +

English | [简体中文](.github/README_cn.md) @@ -50,6 +50,79 @@
+##
Segmentation ⭐ NEW
+ +
+ + +
+ +Our new YOLOv5 [release v7.0](https://github.com/ultralytics/yolov5/releases/v7.0) instance segmentation models are the fastest and most accurate in the world, beating all current [SOTA benchmarks](https://paperswithcode.com/sota/real-time-instance-segmentation-on-mscoco). We've made them super simple to train, validate and deploy. See full details in our [Release Notes](https://github.com/ultralytics/yolov5/releases/v7.0) and visit our [YOLOv5 Segmentation Colab Notebook](https://github.com/ultralytics/yolov5/blob/master/segment/tutorial.ipynb) for quickstart tutorials. + +
+ Segmentation Checkpoints + +
+ +We trained YOLOv5 segmentations models on COCO for 300 epochs at image size 640 using A100 GPUs. We exported all models to ONNX FP32 for CPU speed tests and to TensorRT FP16 for GPU speed tests. We ran all speed tests on Google [Colab Pro](https://colab.research.google.com/signup) notebooks for easy reproducibility. + +| Model | size
(pixels) | mAPbox
50-95 | mAPmask
50-95 | Train time
300 epochs
A100 (hours) | Speed
ONNX CPU
(ms) | Speed
TRT A100
(ms) | params
(M) | FLOPs
@640 (B) | +|----------------------------------------------------------------------------------------------------|-----------------------|----------------------|-----------------------|-----------------------------------------------|--------------------------------|--------------------------------|--------------------|------------------------| +| [YOLOv5n-seg](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5n-seg.pt) | 640 | 27.6 | 23.4 | 80:17 | **62.7** | **1.2** | **2.0** | **7.1** | +| [YOLOv5s-seg](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5s-seg.pt) | 640 | 37.6 | 31.7 | 88:16 | 173.3 | 1.4 | 7.6 | 26.4 | +| [YOLOv5m-seg](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5m-seg.pt) | 640 | 45.0 | 37.1 | 108:36 | 427.0 | 2.2 | 22.0 | 70.8 | +| [YOLOv5l-seg](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5l-seg.pt) | 640 | 49.0 | 39.9 | 66:43 (2x) | 857.4 | 2.9 | 47.9 | 147.7 | +| [YOLOv5x-seg](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5x-seg.pt) | 640 | **50.7** | **41.4** | 62:56 (3x) | 1579.2 | 4.5 | 88.8 | 265.7 | + +- All checkpoints are trained to 300 epochs with SGD optimizer with `lr0=0.01` and `weight_decay=5e-5` at image size 640 and all default settings.
Runs logged to https://wandb.ai/glenn-jocher/YOLOv5_v70_official +- **Accuracy** values are for single-model single-scale on COCO dataset.
Reproduce by `python segment/val.py --data coco.yaml --weights yolov5s-seg.pt` +- **Speed** averaged over 100 inference images using a [Colab Pro](https://colab.research.google.com/signup) A100 High-RAM instance. Values indicate inference speed only (NMS adds about 1ms per image).
Reproduce by `python segment/val.py --data coco.yaml --weights yolov5s-seg.pt --batch 1` +- **Export** to ONNX at FP32 and TensorRT at FP16 done with `export.py`.
Reproduce by `python export.py --weights yolov5s-seg.pt --include engine --device 0 --half` + +
+ +
+ Segmentation Usage Examples  Open In Colab + +### Train +YOLOv5 segmentation training supports auto-download COCO128-seg segmentation dataset with `--data coco128-seg.yaml` argument and manual download of COCO-segments dataset with `bash data/scripts/get_coco.sh --train --val --segments` and then `python train.py --data coco.yaml`. + +```bash +# Single-GPU +python segment/train.py --model yolov5s-seg.pt --data coco128-seg.yaml --epochs 5 --img 640 + +# Multi-GPU DDP +python -m torch.distributed.run --nproc_per_node 4 --master_port 1 segment/train.py --model yolov5s-seg.pt --data coco128-seg.yaml --epochs 5 --img 640 --device 0,1,2,3 +``` + +### Val +Validate YOLOv5m-seg accuracy on ImageNet-1k dataset: +```bash +bash data/scripts/get_coco.sh --val --segments # download COCO val segments split (780MB, 5000 images) +python segment/val.py --weights yolov5s-seg.pt --data coco.yaml --img 640 # validate +``` + +### Predict +Use pretrained YOLOv5m-seg.pt to predict bus.jpg: +```bash +python segment/predict.py --weights yolov5m-seg.pt --data data/images/bus.jpg +``` +```python +model = torch.hub.load('ultralytics/yolov5', 'custom', 'yolov5m-seg.pt') # load from PyTorch Hub (WARNING: inference not yet supported) +``` + +![zidane](https://user-images.githubusercontent.com/26833433/203113421-decef4c4-183d-4a0a-a6c2-6435b33bc5d3.jpg) | ![bus](https://user-images.githubusercontent.com/26833433/203113416-11fe0025-69f7-4874-a0a6-65d0bfe2999a.jpg) +--- |--- + +### Export +Export YOLOv5s-seg model to ONNX and TensorRT: +```bash +python export.py --weights yolov5s-seg.pt --include onnx engine --img 640 --device 0 +``` + +
+ + ##
Documentation
See the [YOLOv5 Docs](https://docs.ultralytics.com) for full documentation on training, testing and deployment. See below for quickstart examples. @@ -200,12 +273,12 @@ YOLOv5 has been designed to be super easy to get started and simple to learn. We

- YOLOv5-P5 640 Figure (click to expand) + YOLOv5-P5 640 Figure

- Figure Notes (click to expand) + Figure Notes - **COCO AP val** denotes mAP@0.5:0.95 metric measured on the 5000-image [COCO val2017](http://cocodataset.org) dataset over various inference sizes from 256 to 1536. - **GPU Speed** measures average inference time per image on [COCO val2017](http://cocodataset.org) dataset using a [AWS p3.2xlarge](https://aws.amazon.com/ec2/instance-types/p3/) V100 instance at batch-size 32. @@ -216,22 +289,22 @@ YOLOv5 has been designed to be super easy to get started and simple to learn. We ### Pretrained Checkpoints -| Model | size
(pixels) | mAPval
0.5:0.95 | mAPval
0.5 | Speed
CPU b1
(ms) | Speed
V100 b1
(ms) | Speed
V100 b32
(ms) | params
(M) | FLOPs
@640 (B) | -|------------------------------------------------------------------------------------------------------|-----------------------|-------------------------|--------------------|------------------------------|-------------------------------|--------------------------------|--------------------|------------------------| -| [YOLOv5n](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5n.pt) | 640 | 28.0 | 45.7 | **45** | **6.3** | **0.6** | **1.9** | **4.5** | -| [YOLOv5s](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5s.pt) | 640 | 37.4 | 56.8 | 98 | 6.4 | 0.9 | 7.2 | 16.5 | -| [YOLOv5m](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5m.pt) | 640 | 45.4 | 64.1 | 224 | 8.2 | 1.7 | 21.2 | 49.0 | -| [YOLOv5l](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5l.pt) | 640 | 49.0 | 67.3 | 430 | 10.1 | 2.7 | 46.5 | 109.1 | -| [YOLOv5x](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5x.pt) | 640 | 50.7 | 68.9 | 766 | 12.1 | 4.8 | 86.7 | 205.7 | -| | | | | | | | | | -| [YOLOv5n6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5n6.pt) | 1280 | 36.0 | 54.4 | 153 | 8.1 | 2.1 | 3.2 | 4.6 | -| [YOLOv5s6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5s6.pt) | 1280 | 44.8 | 63.7 | 385 | 8.2 | 3.6 | 12.6 | 16.8 | -| [YOLOv5m6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5m6.pt) | 1280 | 51.3 | 69.3 | 887 | 11.1 | 6.8 | 35.7 | 50.0 | -| [YOLOv5l6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5l6.pt) | 1280 | 53.7 | 71.3 | 1784 | 15.8 | 10.5 | 76.8 | 111.4 | -| [YOLOv5x6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5x6.pt)
+ [TTA][TTA] | 1280
1536 | 55.0
**55.8** | 72.7
**72.7** | 3136
- | 26.2
- | 19.4
- | 140.7
- | 209.8
- | +| Model | size
(pixels) | mAPval
50-95 | mAPval
50 | Speed
CPU b1
(ms) | Speed
V100 b1
(ms) | Speed
V100 b32
(ms) | params
(M) | FLOPs
@640 (B) | +|------------------------------------------------------------------------------------------------------|-----------------------|----------------------|-------------------|------------------------------|-------------------------------|--------------------------------|--------------------|------------------------| +| [YOLOv5n](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5n.pt) | 640 | 28.0 | 45.7 | **45** | **6.3** | **0.6** | **1.9** | **4.5** | +| [YOLOv5s](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5s.pt) | 640 | 37.4 | 56.8 | 98 | 6.4 | 0.9 | 7.2 | 16.5 | +| [YOLOv5m](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5m.pt) | 640 | 45.4 | 64.1 | 224 | 8.2 | 1.7 | 21.2 | 49.0 | +| [YOLOv5l](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5l.pt) | 640 | 49.0 | 67.3 | 430 | 10.1 | 2.7 | 46.5 | 109.1 | +| [YOLOv5x](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5x.pt) | 640 | 50.7 | 68.9 | 766 | 12.1 | 4.8 | 86.7 | 205.7 | +| | | | | | | | | | +| [YOLOv5n6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5n6.pt) | 1280 | 36.0 | 54.4 | 153 | 8.1 | 2.1 | 3.2 | 4.6 | +| [YOLOv5s6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5s6.pt) | 1280 | 44.8 | 63.7 | 385 | 8.2 | 3.6 | 12.6 | 16.8 | +| [YOLOv5m6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5m6.pt) | 1280 | 51.3 | 69.3 | 887 | 11.1 | 6.8 | 35.7 | 50.0 | +| [YOLOv5l6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5l6.pt) | 1280 | 53.7 | 71.3 | 1784 | 15.8 | 10.5 | 76.8 | 111.4 | +| [YOLOv5x6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5x6.pt)
+ [TTA][TTA] | 1280
1536 | 55.0
**55.8** | 72.7
**72.7** | 3136
- | 26.2
- | 19.4
- | 140.7
- | 209.8
- |
- Table Notes (click to expand) + Table Notes - All checkpoints are trained to 300 epochs with default settings. Nano and Small models use [hyp.scratch-low.yaml](https://github.com/ultralytics/yolov5/blob/master/data/hyps/hyp.scratch-low.yaml) hyps, all others use [hyp.scratch-high.yaml](https://github.com/ultralytics/yolov5/blob/master/data/hyps/hyp.scratch-high.yaml). - **mAPval** values are for single-model single-scale on [COCO val2017](http://cocodataset.org) dataset.
Reproduce by `python val.py --data coco.yaml --img 640 --conf 0.001 --iou 0.65` @@ -240,12 +313,13 @@ YOLOv5 has been designed to be super easy to get started and simple to learn. We
+ ##
Classification ⭐ NEW
-YOLOv5 [release v6.2](https://github.com/ultralytics/yolov5/releases) brings support for classification model training, validation, prediction and export! We've made training classifier models super simple. Click below to get started. +YOLOv5 [release v6.2](https://github.com/ultralytics/yolov5/releases) brings support for classification model training, validation and deployment! See full details in our [Release Notes](https://github.com/ultralytics/yolov5/releases/v6.2) and visit our [YOLOv5 Classification Colab Notebook](https://github.com/ultralytics/yolov5/blob/master/classify/tutorial.ipynb) for quickstart tutorials.
- Classification Checkpoints (click to expand) + Classification Checkpoints
@@ -280,7 +354,7 @@ We trained YOLOv5-cls classification models on ImageNet for 90 epochs using a 4x
- Classification Usage Examples (click to expand) + Classification Usage Examples  Open In Colab ### Train YOLOv5 classification training supports auto-download of MNIST, Fashion-MNIST, CIFAR10, CIFAR100, Imagenette, Imagewoof, and ImageNet datasets with the `--data` argument. To start training on MNIST for example use `--data mnist`. diff --git a/classify/tutorial.ipynb b/classify/tutorial.ipynb index 956452a5aeda..a3da0dbd3231 100644 --- a/classify/tutorial.ipynb +++ b/classify/tutorial.ipynb @@ -9,7 +9,7 @@ "
\n", "\n", " \n", - " \n", + " \n", "\n", "\n", "
\n", @@ -1452,7 +1452,8 @@ "accelerator": "GPU", "colab": { "name": "YOLOv5 Classification Tutorial", - "provenance": [] + "provenance": [], + "toc_visible": true }, "kernelspec": { "display_name": "Python 3 (ipykernel)", diff --git a/data/scripts/download_weights.sh b/data/scripts/download_weights.sh index a4f3becfdbeb..31e0a15569f2 100755 --- a/data/scripts/download_weights.sh +++ b/data/scripts/download_weights.sh @@ -11,11 +11,12 @@ python - <\n", "\n", " \n", - " \n", + " \n", "\n", "\n", "
\n", diff --git a/tutorial.ipynb b/tutorial.ipynb index 6cf99650ad45..7d7f1649cc8d 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -369,7 +369,7 @@ "
\n", "\n", " \n", - " \n", + " \n", "\n", "\n", "
\n", diff --git a/utils/downloads.py b/utils/downloads.py index 21bb6608d5ba..72ea87340eb9 100644 --- a/utils/downloads.py +++ b/utils/downloads.py @@ -59,14 +59,14 @@ def safe_download(file, url, url2=None, min_bytes=1E0, error_msg=''): LOGGER.info('') -def attempt_download(file, repo='ultralytics/yolov5', release='v6.2'): - # Attempt file download from GitHub release assets if not found locally. release = 'latest', 'v6.2', etc. +def attempt_download(file, repo='ultralytics/yolov5', release='v7.0'): + # Attempt file download from GitHub release assets if not found locally. release = 'latest', 'v7.0', etc. from utils.general import LOGGER def github_assets(repository, version='latest'): - # Return GitHub repo tag (i.e. 'v6.2') and assets (i.e. ['yolov5s.pt', 'yolov5m.pt', ...]) + # Return GitHub repo tag (i.e. 'v7.0') and assets (i.e. ['yolov5s.pt', 'yolov5m.pt', ...]) if version != 'latest': - version = f'tags/{version}' # i.e. tags/v6.2 + version = f'tags/{version}' # i.e. tags/v7.0 response = requests.get(f'https://api.github.com/repos/{repository}/releases/{version}').json() # github api return response['tag_name'], [x['name'] for x in response['assets']] # tag, assets From b32f67f6beb4a921c98301fe7724003e23103728 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 22 Nov 2022 20:30:14 +0100 Subject: [PATCH 1579/1976] `--single-cls` segments fix (#10260) --single-cls segments fix May resolve #10230 Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- utils/dataloaders.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/utils/dataloaders.py b/utils/dataloaders.py index e107d1a2bccf..cc5f8843ef18 100644 --- a/utils/dataloaders.py +++ b/utils/dataloaders.py @@ -537,8 +537,6 @@ def __init__(self, self.segments[i] = segment[j] if single_cls: # single-class training, merge all classes into 0 self.labels[i][:, 0] = 0 - if segment: - self.segments[i][:, 0] = 0 # Rectangular Training if self.rect: From c9d47ae05632e2a42e560fbfeb22d3780224546c Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 22 Nov 2022 20:37:45 +0100 Subject: [PATCH 1580/1976] Created using Colaboratory --- tutorial.ipynb | 142 ++++++++++++++++++++++++------------------------- 1 file changed, 71 insertions(+), 71 deletions(-) diff --git a/tutorial.ipynb b/tutorial.ipynb index 7d7f1649cc8d..657dc266da92 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -14,7 +14,7 @@ "accelerator": "GPU", "widgets": { "application/vnd.jupyter.widget-state+json": { - "300b4d5355ef4967bd5246afeef6eef5": { + "1f7df330663048998adcf8a45bc8f69b": { "model_module": "@jupyter-widgets/controls", "model_name": "HBoxModel", "model_module_version": "1.5.0", @@ -29,14 +29,14 @@ "_view_name": "HBoxView", "box_style": "", "children": [ - "IPY_MODEL_84e6829bb88845a8a4f42700b8496925", - "IPY_MODEL_c038e52d41bf4d5b9602930c3d074087", - "IPY_MODEL_2667604641764341b0bc8c6afea438fd" + "IPY_MODEL_e896e6096dd244c59d7955e2035cd729", + "IPY_MODEL_a6ff238c29984b24bf6d0bd175c19430", + "IPY_MODEL_3c085ba3f3fd4c3c8a6bb41b41ce1479" ], - "layout": "IPY_MODEL_98b3a4806ed14102b0d75e6c571d6134" + "layout": "IPY_MODEL_16b0c8aa6e0f427e8a54d3791abb7504" } }, - "84e6829bb88845a8a4f42700b8496925": { + "e896e6096dd244c59d7955e2035cd729": { "model_module": "@jupyter-widgets/controls", "model_name": "HTMLModel", "model_module_version": "1.5.0", @@ -51,13 +51,13 @@ "_view_name": "HTMLView", "description": "", "description_tooltip": null, - "layout": "IPY_MODEL_c66a77395e42424d904699edcbb67291", + "layout": "IPY_MODEL_c7b2dd0f78384cad8e400b282996cdf5", "placeholder": "​", - "style": "IPY_MODEL_c4bbc15bf853439399dbcf1d40a5a407", + "style": "IPY_MODEL_6a27e43b0e434edd82ee63f0a91036ca", "value": "100%" } }, - "c038e52d41bf4d5b9602930c3d074087": { + "a6ff238c29984b24bf6d0bd175c19430": { "model_module": "@jupyter-widgets/controls", "model_name": "FloatProgressModel", "model_module_version": "1.5.0", @@ -73,15 +73,15 @@ "bar_style": "success", "description": "", "description_tooltip": null, - "layout": "IPY_MODEL_0aaabfac395b43afbdd6d752c502bbf6", + "layout": "IPY_MODEL_cce0e6c0c4ec442cb47e65c674e02e92", "max": 818322941, "min": 0, "orientation": "horizontal", - "style": "IPY_MODEL_3786d970492b4aa38f886f2572fd958c", + "style": "IPY_MODEL_c5b9f38e2f0d4f9aa97fe87265263743", "value": 818322941 } }, - "2667604641764341b0bc8c6afea438fd": { + "3c085ba3f3fd4c3c8a6bb41b41ce1479": { "model_module": "@jupyter-widgets/controls", "model_name": "HTMLModel", "model_module_version": "1.5.0", @@ -96,13 +96,13 @@ "_view_name": "HTMLView", "description": "", "description_tooltip": null, - "layout": "IPY_MODEL_b86d0f2d7be74cebbcaa884b53123eeb", + "layout": "IPY_MODEL_df554fb955c7454696beac5a82889386", "placeholder": "​", - "style": "IPY_MODEL_fa7b1497925a457f89286a71f073f416", - "value": " 780M/780M [00:57<00:00, 10.1MB/s]" + "style": "IPY_MODEL_74e9112a87a242f4831b7d68c7da6333", + "value": " 780M/780M [00:05<00:00, 126MB/s]" } }, - "98b3a4806ed14102b0d75e6c571d6134": { + "16b0c8aa6e0f427e8a54d3791abb7504": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", @@ -154,7 +154,7 @@ "width": null } }, - "c66a77395e42424d904699edcbb67291": { + "c7b2dd0f78384cad8e400b282996cdf5": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", @@ -206,7 +206,7 @@ "width": null } }, - "c4bbc15bf853439399dbcf1d40a5a407": { + "6a27e43b0e434edd82ee63f0a91036ca": { "model_module": "@jupyter-widgets/controls", "model_name": "DescriptionStyleModel", "model_module_version": "1.5.0", @@ -221,7 +221,7 @@ "description_width": "" } }, - "0aaabfac395b43afbdd6d752c502bbf6": { + "cce0e6c0c4ec442cb47e65c674e02e92": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", @@ -273,7 +273,7 @@ "width": null } }, - "3786d970492b4aa38f886f2572fd958c": { + "c5b9f38e2f0d4f9aa97fe87265263743": { "model_module": "@jupyter-widgets/controls", "model_name": "ProgressStyleModel", "model_module_version": "1.5.0", @@ -289,7 +289,7 @@ "description_width": "" } }, - "b86d0f2d7be74cebbcaa884b53123eeb": { + "df554fb955c7454696beac5a82889386": { "model_module": "@jupyter-widgets/base", "model_name": "LayoutModel", "model_module_version": "1.2.0", @@ -341,7 +341,7 @@ "width": null } }, - "fa7b1497925a457f89286a71f073f416": { + "74e9112a87a242f4831b7d68c7da6333": { "model_module": "@jupyter-widgets/controls", "model_name": "DescriptionStyleModel", "model_module_version": "1.5.0", @@ -401,7 +401,7 @@ "colab": { "base_uri": "https://localhost:8080/" }, - "outputId": "32e3bc15-6d02-4352-f0a3-912059d134a5" + "outputId": "f9f016ad-3dcf-4bd2-e1c3-d5b79efc6f32" }, "source": [ "!git clone https://github.com/ultralytics/yolov5 # clone\n", @@ -418,7 +418,7 @@ "output_type": "stream", "name": "stderr", "text": [ - "YOLOv5 🚀 v6.2-256-g0051615 Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n" + "YOLOv5 🚀 v7.0-1-gb32f67f Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n" ] }, { @@ -446,9 +446,9 @@ " vid.mp4 # video\n", " screen # screenshot\n", " path/ # directory\n", - " 'path/*.jpg' # glob\n", - " 'https://youtu.be/Zgi9g1ksQHc' # YouTube\n", - " 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream\n", + " 'path/*.jpg' # glob\n", + " 'https://youtu.be/Zgi9g1ksQHc' # YouTube\n", + " 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream\n", "```" ] }, @@ -459,7 +459,7 @@ "colab": { "base_uri": "https://localhost:8080/" }, - "outputId": "8e81d6e9-0360-4212-cd61-9a5a58d3f703" + "outputId": "b4db5c49-f501-4505-cf0d-a1d35236c485" }, "source": [ "!python detect.py --weights yolov5s.pt --img 640 --conf 0.25 --source data/images\n", @@ -472,16 +472,16 @@ "name": "stdout", "text": [ "\u001b[34m\u001b[1mdetect: \u001b[0mweights=['yolov5s.pt'], source=data/images, data=data/coco128.yaml, imgsz=[640, 640], conf_thres=0.25, iou_thres=0.45, max_det=1000, device=, view_img=False, save_txt=False, save_conf=False, save_crop=False, nosave=False, classes=None, agnostic_nms=False, augment=False, visualize=False, update=False, project=runs/detect, name=exp, exist_ok=False, line_thickness=3, hide_labels=False, hide_conf=False, half=False, dnn=False, vid_stride=1\n", - "YOLOv5 🚀 v6.2-256-g0051615 Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n", + "YOLOv5 🚀 v7.0-1-gb32f67f Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n", "\n", - "Downloading https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5s.pt to yolov5s.pt...\n", - "100% 14.1M/14.1M [00:00<00:00, 19.5MB/s]\n", + "Downloading https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5s.pt to yolov5s.pt...\n", + "100% 14.1M/14.1M [00:00<00:00, 116MB/s] \n", "\n", "Fusing layers... \n", "YOLOv5s summary: 213 layers, 7225885 parameters, 0 gradients\n", - "image 1/2 /content/yolov5/data/images/bus.jpg: 640x480 4 persons, 1 bus, 17.5ms\n", - "image 2/2 /content/yolov5/data/images/zidane.jpg: 384x640 2 persons, 2 ties, 18.0ms\n", - "Speed: 0.5ms pre-process, 17.8ms inference, 17.6ms NMS per image at shape (1, 3, 640, 640)\n", + "image 1/2 /content/yolov5/data/images/bus.jpg: 640x480 4 persons, 1 bus, 17.0ms\n", + "image 2/2 /content/yolov5/data/images/zidane.jpg: 384x640 2 persons, 2 ties, 14.3ms\n", + "Speed: 0.5ms pre-process, 15.7ms inference, 18.6ms NMS per image at shape (1, 3, 640, 640)\n", "Results saved to \u001b[1mruns/detect/exp\u001b[0m\n" ] } @@ -515,20 +515,20 @@ "base_uri": "https://localhost:8080/", "height": 49, "referenced_widgets": [ - "300b4d5355ef4967bd5246afeef6eef5", - "84e6829bb88845a8a4f42700b8496925", - "c038e52d41bf4d5b9602930c3d074087", - "2667604641764341b0bc8c6afea438fd", - "98b3a4806ed14102b0d75e6c571d6134", - "c66a77395e42424d904699edcbb67291", - "c4bbc15bf853439399dbcf1d40a5a407", - "0aaabfac395b43afbdd6d752c502bbf6", - "3786d970492b4aa38f886f2572fd958c", - "b86d0f2d7be74cebbcaa884b53123eeb", - "fa7b1497925a457f89286a71f073f416" + "1f7df330663048998adcf8a45bc8f69b", + "e896e6096dd244c59d7955e2035cd729", + "a6ff238c29984b24bf6d0bd175c19430", + "3c085ba3f3fd4c3c8a6bb41b41ce1479", + "16b0c8aa6e0f427e8a54d3791abb7504", + "c7b2dd0f78384cad8e400b282996cdf5", + "6a27e43b0e434edd82ee63f0a91036ca", + "cce0e6c0c4ec442cb47e65c674e02e92", + "c5b9f38e2f0d4f9aa97fe87265263743", + "df554fb955c7454696beac5a82889386", + "74e9112a87a242f4831b7d68c7da6333" ] }, - "outputId": "61ffec5e-90ea-44f6-c0ea-b006e6e7072f" + "outputId": "c7d0a0d2-abfb-44c3-d60d-f99d0e7aabad" }, "source": [ "# Download COCO val\n", @@ -546,7 +546,7 @@ "application/vnd.jupyter.widget-view+json": { "version_major": 2, "version_minor": 0, - "model_id": "300b4d5355ef4967bd5246afeef6eef5" + "model_id": "1f7df330663048998adcf8a45bc8f69b" } }, "metadata": {} @@ -560,7 +560,7 @@ "colab": { "base_uri": "https://localhost:8080/" }, - "outputId": "aa5d5cea-14c1-4a19-bfdf-95b7164962cf" + "outputId": "5fc61358-7bc5-4310-a310-9059f66c6322" }, "source": [ "# Validate YOLOv5s on COCO val\n", @@ -573,30 +573,30 @@ "name": "stdout", "text": [ "\u001b[34m\u001b[1mval: \u001b[0mdata=/content/yolov5/data/coco.yaml, weights=['yolov5s.pt'], batch_size=32, imgsz=640, conf_thres=0.001, iou_thres=0.6, max_det=300, task=val, device=, workers=8, single_cls=False, augment=False, verbose=False, save_txt=False, save_hybrid=False, save_conf=False, save_json=True, project=runs/val, name=exp, exist_ok=False, half=True, dnn=False\n", - "YOLOv5 🚀 v6.2-256-g0051615 Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n", + "YOLOv5 🚀 v7.0-1-gb32f67f Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n", "\n", "Fusing layers... \n", "YOLOv5s summary: 213 layers, 7225885 parameters, 0 gradients\n", - "\u001b[34m\u001b[1mval: \u001b[0mScanning /content/datasets/coco/val2017... 4952 images, 48 backgrounds, 0 corrupt: 100% 5000/5000 [00:02<00:00, 2066.57it/s]\n", + "\u001b[34m\u001b[1mval: \u001b[0mScanning /content/datasets/coco/val2017... 4952 images, 48 backgrounds, 0 corrupt: 100% 5000/5000 [00:02<00:00, 1977.30it/s]\n", "\u001b[34m\u001b[1mval: \u001b[0mNew cache created: /content/datasets/coco/val2017.cache\n", - " Class Images Instances P R mAP50 mAP50-95: 100% 157/157 [01:09<00:00, 2.26it/s]\n", + " Class Images Instances P R mAP50 mAP50-95: 100% 157/157 [01:12<00:00, 2.17it/s]\n", " all 5000 36335 0.67 0.521 0.566 0.371\n", - "Speed: 0.1ms pre-process, 2.7ms inference, 1.9ms NMS per image at shape (32, 3, 640, 640)\n", + "Speed: 0.1ms pre-process, 2.9ms inference, 2.0ms NMS per image at shape (32, 3, 640, 640)\n", "\n", "Evaluating pycocotools mAP... saving runs/val/exp/yolov5s_predictions.json...\n", "loading annotations into memory...\n", - "Done (t=0.82s)\n", + "Done (t=0.43s)\n", "creating index...\n", "index created!\n", "Loading and preparing results...\n", - "DONE (t=5.49s)\n", + "DONE (t=5.85s)\n", "creating index...\n", "index created!\n", "Running per image evaluation...\n", "Evaluate annotation type *bbox*\n", - "DONE (t=74.26s).\n", + "DONE (t=82.22s).\n", "Accumulating evaluation results...\n", - "DONE (t=13.46s).\n", + "DONE (t=14.92s).\n", " Average Precision (AP) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.374\n", " Average Precision (AP) @[ IoU=0.50 | area= all | maxDets=100 ] = 0.572\n", " Average Precision (AP) @[ IoU=0.75 | area= all | maxDets=100 ] = 0.402\n", @@ -676,7 +676,7 @@ "colab": { "base_uri": "https://localhost:8080/" }, - "outputId": "f0fcdc77-5326-41e1-bacc-be5432eefa2a" + "outputId": "721b9028-767f-4a05-c964-692c245f7398" }, "source": [ "# Train YOLOv5s on COCO128 for 3 epochs\n", @@ -690,7 +690,7 @@ "text": [ "\u001b[34m\u001b[1mtrain: \u001b[0mweights=yolov5s.pt, cfg=, data=coco128.yaml, hyp=data/hyps/hyp.scratch-low.yaml, epochs=3, batch_size=16, imgsz=640, rect=False, resume=False, nosave=False, noval=False, noautoanchor=False, noplots=False, evolve=None, bucket=, cache=ram, image_weights=False, device=, multi_scale=False, single_cls=False, optimizer=SGD, sync_bn=False, workers=8, project=runs/train, name=exp, exist_ok=False, quad=False, cos_lr=False, label_smoothing=0.0, patience=100, freeze=[0], save_period=-1, seed=0, local_rank=-1, entity=None, upload_dataset=False, bbox_interval=-1, artifact_alias=latest\n", "\u001b[34m\u001b[1mgithub: \u001b[0mup to date with https://github.com/ultralytics/yolov5 ✅\n", - "YOLOv5 🚀 v6.2-256-g0051615 Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n", + "YOLOv5 🚀 v7.0-1-gb32f67f Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n", "\n", "\u001b[34m\u001b[1mhyperparameters: \u001b[0mlr0=0.01, lrf=0.01, momentum=0.937, weight_decay=0.0005, warmup_epochs=3.0, warmup_momentum=0.8, warmup_bias_lr=0.1, box=0.05, cls=0.5, cls_pw=1.0, obj=1.0, obj_pw=1.0, iou_t=0.2, anchor_t=4.0, fl_gamma=0.0, hsv_h=0.015, hsv_s=0.7, hsv_v=0.4, degrees=0.0, translate=0.1, scale=0.5, shear=0.0, perspective=0.0, flipud=0.0, fliplr=0.5, mosaic=1.0, mixup=0.0, copy_paste=0.0\n", "\u001b[34m\u001b[1mClearML: \u001b[0mrun 'pip install clearml' to automatically track, visualize and remotely train YOLOv5 🚀 in ClearML\n", @@ -699,8 +699,8 @@ "\n", "Dataset not found ⚠️, missing paths ['/content/datasets/coco128/images/train2017']\n", "Downloading https://ultralytics.com/assets/coco128.zip to coco128.zip...\n", - "100% 6.66M/6.66M [00:00<00:00, 39.8MB/s]\n", - "Dataset download success ✅ (0.8s), saved to \u001b[1m/content/datasets\u001b[0m\n", + "100% 6.66M/6.66M [00:00<00:00, 261MB/s]\n", + "Dataset download success ✅ (0.3s), saved to \u001b[1m/content/datasets\u001b[0m\n", "\n", " from n params module arguments \n", " 0 -1 1 3520 models.common.Conv [3, 32, 6, 2, 2] \n", @@ -734,11 +734,11 @@ "\u001b[34m\u001b[1mAMP: \u001b[0mchecks passed ✅\n", "\u001b[34m\u001b[1moptimizer:\u001b[0m SGD(lr=0.01) with parameter groups 57 weight(decay=0.0), 60 weight(decay=0.0005), 60 bias\n", "\u001b[34m\u001b[1malbumentations: \u001b[0mBlur(p=0.01, blur_limit=(3, 7)), MedianBlur(p=0.01, blur_limit=(3, 7)), ToGray(p=0.01), CLAHE(p=0.01, clip_limit=(1, 4.0), tile_grid_size=(8, 8))\n", - "\u001b[34m\u001b[1mtrain: \u001b[0mScanning /content/datasets/coco128/labels/train2017... 126 images, 2 backgrounds, 0 corrupt: 100% 128/128 [00:00<00:00, 2084.63it/s]\n", + "\u001b[34m\u001b[1mtrain: \u001b[0mScanning /content/datasets/coco128/labels/train2017... 126 images, 2 backgrounds, 0 corrupt: 100% 128/128 [00:00<00:00, 1911.57it/s]\n", "\u001b[34m\u001b[1mtrain: \u001b[0mNew cache created: /content/datasets/coco128/labels/train2017.cache\n", - "\u001b[34m\u001b[1mtrain: \u001b[0mCaching images (0.1GB ram): 100% 128/128 [00:00<00:00, 255.09it/s]\n", + "\u001b[34m\u001b[1mtrain: \u001b[0mCaching images (0.1GB ram): 100% 128/128 [00:00<00:00, 229.69it/s]\n", "\u001b[34m\u001b[1mval: \u001b[0mScanning /content/datasets/coco128/labels/train2017.cache... 126 images, 2 backgrounds, 0 corrupt: 100% 128/128 [00:00 Date: Tue, 22 Nov 2022 20:47:54 +0100 Subject: [PATCH 1581/1976] Created using Colaboratory --- segment/tutorial.ipynb | 62 +++++++++++++++++++++--------------------- 1 file changed, 31 insertions(+), 31 deletions(-) diff --git a/segment/tutorial.ipynb b/segment/tutorial.ipynb index ad44f31d3833..09ca963d4b98 100644 --- a/segment/tutorial.ipynb +++ b/segment/tutorial.ipynb @@ -42,14 +42,14 @@ "base_uri": "https://localhost:8080/" }, "id": "wbvMlHd_QwMG", - "outputId": "664f49fa-554a-4dca-8d0e-5c9dd60f6d28" + "outputId": "171b23f0-71b9-4cbf-b666-6fa2ecef70c8" }, "outputs": [ { "output_type": "stream", "name": "stderr", "text": [ - "YOLOv5 🚀 v6.2-257-g2ecaa96 Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n" + "YOLOv5 🚀 v7.0-2-gc9d47ae Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n" ] }, { @@ -100,7 +100,7 @@ "base_uri": "https://localhost:8080/" }, "id": "zR9ZbuQCH7FX", - "outputId": "6392c9ff-0863-4665-faf9-b3af9881c305" + "outputId": "3f67f1c7-f15e-4fa5-d251-967c3b77eaad" }, "outputs": [ { @@ -108,16 +108,16 @@ "name": "stdout", "text": [ "\u001b[34m\u001b[1msegment/predict: \u001b[0mweights=['yolov5s-seg.pt'], source=data/images, data=data/coco128.yaml, imgsz=[640, 640], conf_thres=0.25, iou_thres=0.45, max_det=1000, device=, view_img=False, save_txt=False, save_conf=False, save_crop=False, nosave=False, classes=None, agnostic_nms=False, augment=False, visualize=False, update=False, project=runs/predict-seg, name=exp, exist_ok=False, line_thickness=3, hide_labels=False, hide_conf=False, half=False, dnn=False, vid_stride=1, retina_masks=False\n", - "YOLOv5 🚀 v6.2-257-g2ecaa96 Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n", + "YOLOv5 🚀 v7.0-2-gc9d47ae Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n", "\n", - "Downloading https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5s-seg.pt to yolov5s-seg.pt...\n", - "100% 14.9M/14.9M [00:01<00:00, 9.09MB/s]\n", + "Downloading https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5s-seg.pt to yolov5s-seg.pt...\n", + "100% 14.9M/14.9M [00:01<00:00, 12.0MB/s]\n", "\n", "Fusing layers... \n", "YOLOv5s-seg summary: 224 layers, 7611485 parameters, 0 gradients, 26.4 GFLOPs\n", - "image 1/2 /content/yolov5/data/images/bus.jpg: 640x480 4 persons, 1 bus, 18.0ms\n", - "image 2/2 /content/yolov5/data/images/zidane.jpg: 384x640 2 persons, 1 tie, 13.5ms\n", - "Speed: 0.5ms pre-process, 15.7ms inference, 18.5ms NMS per image at shape (1, 3, 640, 640)\n", + "image 1/2 /content/yolov5/data/images/bus.jpg: 640x480 4 persons, 1 bus, 18.2ms\n", + "image 2/2 /content/yolov5/data/images/zidane.jpg: 384x640 2 persons, 1 tie, 13.4ms\n", + "Speed: 0.5ms pre-process, 15.8ms inference, 18.5ms NMS per image at shape (1, 3, 640, 640)\n", "Results saved to \u001b[1mruns/predict-seg/exp\u001b[0m\n" ] } @@ -155,7 +155,7 @@ "base_uri": "https://localhost:8080/" }, "id": "WQPtK1QYVaD_", - "outputId": "4707734e-00c7-43da-d642-32c3c3fe3090" + "outputId": "9d751d8c-bee8-4339-cf30-9854ca530449" }, "outputs": [ { @@ -182,7 +182,7 @@ "base_uri": "https://localhost:8080/" }, "id": "X58w8JLpMnjH", - "outputId": "f96b700d-c779-4a34-930b-e85be4e58974" + "outputId": "a140d67a-02da-479e-9ddb-7d54bf9e407a" }, "outputs": [ { @@ -190,15 +190,15 @@ "name": "stdout", "text": [ "\u001b[34m\u001b[1msegment/val: \u001b[0mdata=/content/yolov5/data/coco.yaml, weights=['yolov5s-seg.pt'], batch_size=32, imgsz=640, conf_thres=0.001, iou_thres=0.6, max_det=300, task=val, device=, workers=8, single_cls=False, augment=False, verbose=False, save_txt=False, save_hybrid=False, save_conf=False, save_json=False, project=runs/val-seg, name=exp, exist_ok=False, half=True, dnn=False\n", - "YOLOv5 🚀 v6.2-257-g2ecaa96 Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n", + "YOLOv5 🚀 v7.0-2-gc9d47ae Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n", "\n", "Fusing layers... \n", "YOLOv5s-seg summary: 224 layers, 7611485 parameters, 0 gradients, 26.4 GFLOPs\n", - "\u001b[34m\u001b[1mval: \u001b[0mScanning /content/datasets/coco/val2017... 4952 images, 48 backgrounds, 0 corrupt: 100% 5000/5000 [00:03<00:00, 1409.04it/s]\n", + "\u001b[34m\u001b[1mval: \u001b[0mScanning /content/datasets/coco/val2017... 4952 images, 48 backgrounds, 0 corrupt: 100% 5000/5000 [00:03<00:00, 1361.31it/s]\n", "\u001b[34m\u001b[1mval: \u001b[0mNew cache created: /content/datasets/coco/val2017.cache\n", - " Class Images Instances Box(P R mAP50 mAP50-95) Mask(P R mAP50 mAP50-95): 100% 157/157 [01:53<00:00, 1.38it/s]\n", + " Class Images Instances Box(P R mAP50 mAP50-95) Mask(P R mAP50 mAP50-95): 100% 157/157 [01:54<00:00, 1.37it/s]\n", " all 5000 36335 0.673 0.517 0.566 0.373 0.672 0.49 0.532 0.319\n", - "Speed: 0.8ms pre-process, 4.0ms inference, 2.8ms NMS per image at shape (32, 3, 640, 640)\n", + "Speed: 0.6ms pre-process, 4.4ms inference, 2.9ms NMS per image at shape (32, 3, 640, 640)\n", "Results saved to \u001b[1mruns/val-seg/exp\u001b[0m\n" ] } @@ -270,7 +270,7 @@ "base_uri": "https://localhost:8080/" }, "id": "1NcFxRcFdJ_O", - "outputId": "2cdb19cc-69af-4c90-f8de-af02dfedba91" + "outputId": "3a3e0cf7-e79c-47a5-c8e7-2d26eeeab988" }, "outputs": [ { @@ -279,15 +279,15 @@ "text": [ "\u001b[34m\u001b[1msegment/train: \u001b[0mweights=yolov5s-seg.pt, cfg=, data=coco128-seg.yaml, hyp=data/hyps/hyp.scratch-low.yaml, epochs=3, batch_size=16, imgsz=640, rect=False, resume=False, nosave=False, noval=False, noautoanchor=False, noplots=False, evolve=None, bucket=, cache=ram, image_weights=False, device=, multi_scale=False, single_cls=False, optimizer=SGD, sync_bn=False, workers=8, project=runs/train-seg, name=exp, exist_ok=False, quad=False, cos_lr=False, label_smoothing=0.0, patience=100, freeze=[0], save_period=-1, seed=0, local_rank=-1, mask_ratio=4, no_overlap=False\n", "\u001b[34m\u001b[1mgithub: \u001b[0mup to date with https://github.com/ultralytics/yolov5 ✅\n", - "YOLOv5 🚀 v6.2-257-g2ecaa96 Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n", + "YOLOv5 🚀 v7.0-2-gc9d47ae Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n", "\n", "\u001b[34m\u001b[1mhyperparameters: \u001b[0mlr0=0.01, lrf=0.01, momentum=0.937, weight_decay=0.0005, warmup_epochs=3.0, warmup_momentum=0.8, warmup_bias_lr=0.1, box=0.05, cls=0.5, cls_pw=1.0, obj=1.0, obj_pw=1.0, iou_t=0.2, anchor_t=4.0, fl_gamma=0.0, hsv_h=0.015, hsv_s=0.7, hsv_v=0.4, degrees=0.0, translate=0.1, scale=0.5, shear=0.0, perspective=0.0, flipud=0.0, fliplr=0.5, mosaic=1.0, mixup=0.0, copy_paste=0.0\n", "\u001b[34m\u001b[1mTensorBoard: \u001b[0mStart with 'tensorboard --logdir runs/train-seg', view at http://localhost:6006/\n", "\n", "Dataset not found ⚠️, missing paths ['/content/datasets/coco128-seg/images/train2017']\n", "Downloading https://ultralytics.com/assets/coco128-seg.zip to coco128-seg.zip...\n", - "100% 6.79M/6.79M [00:01<00:00, 5.87MB/s]\n", - "Dataset download success ✅ (2.1s), saved to \u001b[1m/content/datasets\u001b[0m\n", + "100% 6.79M/6.79M [00:01<00:00, 6.73MB/s]\n", + "Dataset download success ✅ (1.9s), saved to \u001b[1m/content/datasets\u001b[0m\n", "\n", " from n params module arguments \n", " 0 -1 1 3520 models.common.Conv [3, 32, 6, 2, 2] \n", @@ -321,11 +321,11 @@ "\u001b[34m\u001b[1mAMP: \u001b[0mchecks passed ✅\n", "\u001b[34m\u001b[1moptimizer:\u001b[0m SGD(lr=0.01) with parameter groups 60 weight(decay=0.0), 63 weight(decay=0.0005), 63 bias\n", "\u001b[34m\u001b[1malbumentations: \u001b[0mBlur(p=0.01, blur_limit=(3, 7)), MedianBlur(p=0.01, blur_limit=(3, 7)), ToGray(p=0.01), CLAHE(p=0.01, clip_limit=(1, 4.0), tile_grid_size=(8, 8))\n", - "\u001b[34m\u001b[1mtrain: \u001b[0mScanning /content/datasets/coco128-seg/labels/train2017... 126 images, 2 backgrounds, 0 corrupt: 100% 128/128 [00:00<00:00, 1439.54it/s]\n", + "\u001b[34m\u001b[1mtrain: \u001b[0mScanning /content/datasets/coco128-seg/labels/train2017... 126 images, 2 backgrounds, 0 corrupt: 100% 128/128 [00:00<00:00, 1389.59it/s]\n", "\u001b[34m\u001b[1mtrain: \u001b[0mNew cache created: /content/datasets/coco128-seg/labels/train2017.cache\n", - "\u001b[34m\u001b[1mtrain: \u001b[0mCaching images (0.1GB ram): 100% 128/128 [00:00<00:00, 253.53it/s]\n", + "\u001b[34m\u001b[1mtrain: \u001b[0mCaching images (0.1GB ram): 100% 128/128 [00:00<00:00, 238.86it/s]\n", "\u001b[34m\u001b[1mval: \u001b[0mScanning /content/datasets/coco128-seg/labels/train2017.cache... 126 images, 2 backgrounds, 0 corrupt: 100% 128/128 [00:00 Date: Tue, 22 Nov 2022 21:27:33 +0100 Subject: [PATCH 1582/1976] Created using Colaboratory --- classify/tutorial.ipynb | 63 +++++++++++++++++++++-------------------- 1 file changed, 32 insertions(+), 31 deletions(-) diff --git a/classify/tutorial.ipynb b/classify/tutorial.ipynb index a3da0dbd3231..c6f5d0d88a2d 100644 --- a/classify/tutorial.ipynb +++ b/classify/tutorial.ipynb @@ -42,14 +42,14 @@ "base_uri": "https://localhost:8080/" }, "id": "wbvMlHd_QwMG", - "outputId": "43b2e1b5-78d9-4e1d-8530-ee9779bba160" + "outputId": "0806e375-610d-4ec0-c867-763dbb518279" }, "outputs": [ { "output_type": "stream", "name": "stderr", "text": [ - "YOLOv5 🚀 v6.2-258-g7fc7ed7 Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n" + "YOLOv5 🚀 v7.0-3-g61ebf5e Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n" ] }, { @@ -100,24 +100,24 @@ "base_uri": "https://localhost:8080/" }, "id": "zR9ZbuQCH7FX", - "outputId": "1b610787-7cf7-4c33-aac2-aa50fbb84a94" + "outputId": "50504ef7-aa3e-4281-a4e3-d0c7df3c0ffe" }, "outputs": [ { "output_type": "stream", "name": "stdout", "text": [ - "\u001b[34m\u001b[1mclassify/predict: \u001b[0mweights=['yolov5s-cls.pt'], source=data/images, data=data/coco128.yaml, imgsz=[224, 224], device=, view_img=False, save_txt=True, nosave=False, augment=False, visualize=False, update=False, project=runs/predict-cls, name=exp, exist_ok=False, half=False, dnn=False, vid_stride=1\n", - "YOLOv5 🚀 v6.2-258-g7fc7ed7 Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n", + "\u001b[34m\u001b[1mclassify/predict: \u001b[0mweights=['yolov5s-cls.pt'], source=data/images, data=data/coco128.yaml, imgsz=[224, 224], device=, view_img=False, save_txt=False, nosave=False, augment=False, visualize=False, update=False, project=runs/predict-cls, name=exp, exist_ok=False, half=False, dnn=False, vid_stride=1\n", + "YOLOv5 🚀 v7.0-3-g61ebf5e Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n", "\n", - "Downloading https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5s-cls.pt to yolov5s-cls.pt...\n", - "100% 10.5M/10.5M [00:03<00:00, 2.94MB/s]\n", + "Downloading https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5s-cls.pt to yolov5s-cls.pt...\n", + "100% 10.5M/10.5M [00:00<00:00, 12.3MB/s]\n", "\n", "Fusing layers... \n", "Model summary: 117 layers, 5447688 parameters, 0 gradients, 11.4 GFLOPs\n", "image 1/2 /content/yolov5/data/images/bus.jpg: 224x224 minibus 0.39, police van 0.24, amphibious vehicle 0.05, recreational vehicle 0.04, trolleybus 0.03, 3.9ms\n", - "image 2/2 /content/yolov5/data/images/zidane.jpg: 224x224 suit 0.38, bow tie 0.19, bridegroom 0.18, rugby ball 0.04, stage 0.02, 4.1ms\n", - "Speed: 0.3ms pre-process, 4.0ms inference, 1.5ms NMS per image at shape (1, 3, 224, 224)\n", + "image 2/2 /content/yolov5/data/images/zidane.jpg: 224x224 suit 0.38, bow tie 0.19, bridegroom 0.18, rugby ball 0.04, stage 0.02, 4.6ms\n", + "Speed: 0.3ms pre-process, 4.3ms inference, 1.5ms NMS per image at shape (1, 3, 224, 224)\n", "Results saved to \u001b[1mruns/predict-cls/exp\u001b[0m\n" ] } @@ -155,23 +155,23 @@ "base_uri": "https://localhost:8080/" }, "id": "WQPtK1QYVaD_", - "outputId": "92de5f34-cf41-49e7-b679-41db94e995ac" + "outputId": "20fc0630-141e-4a90-ea06-342cbd7ce496" }, "outputs": [ { "output_type": "stream", "name": "stdout", "text": [ - "--2022-11-18 21:48:38-- https://image-net.org/data/ILSVRC/2012/ILSVRC2012_img_val.tar\n", + "--2022-11-22 19:53:40-- https://image-net.org/data/ILSVRC/2012/ILSVRC2012_img_val.tar\n", "Resolving image-net.org (image-net.org)... 171.64.68.16\n", "Connecting to image-net.org (image-net.org)|171.64.68.16|:443... connected.\n", "HTTP request sent, awaiting response... 200 OK\n", "Length: 6744924160 (6.3G) [application/x-tar]\n", "Saving to: ‘ILSVRC2012_img_val.tar’\n", "\n", - "ILSVRC2012_img_val. 100%[===================>] 6.28G 7.15MB/s in 11m 13s \n", + "ILSVRC2012_img_val. 100%[===================>] 6.28G 16.1MB/s in 10m 52s \n", "\n", - "2022-11-18 21:59:52 (9.55 MB/s) - ‘ILSVRC2012_img_val.tar’ saved [6744924160/6744924160]\n", + "2022-11-22 20:04:32 (9.87 MB/s) - ‘ILSVRC2012_img_val.tar’ saved [6744924160/6744924160]\n", "\n" ] } @@ -189,7 +189,7 @@ "base_uri": "https://localhost:8080/" }, "id": "X58w8JLpMnjH", - "outputId": "9961ad87-d639-4489-b578-0a0578fefaab" + "outputId": "41843132-98e2-4c25-d474-4cd7b246fb8e" }, "outputs": [ { @@ -197,11 +197,11 @@ "name": "stdout", "text": [ "\u001b[34m\u001b[1mclassify/val: \u001b[0mdata=../datasets/imagenet, weights=['yolov5s-cls.pt'], batch_size=128, imgsz=224, device=, workers=8, verbose=True, project=runs/val-cls, name=exp, exist_ok=False, half=True, dnn=False\n", - "YOLOv5 🚀 v6.2-258-g7fc7ed7 Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n", + "YOLOv5 🚀 v7.0-3-g61ebf5e Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n", "\n", "Fusing layers... \n", "Model summary: 117 layers, 5447688 parameters, 0 gradients, 11.4 GFLOPs\n", - "validating: 100% 391/391 [04:48<00:00, 1.35it/s]\n", + "validating: 100% 391/391 [04:57<00:00, 1.31it/s]\n", " Class Images top1_acc top5_acc\n", " all 50000 0.715 0.902\n", " tench 50 0.94 0.98\n", @@ -1269,30 +1269,30 @@ }, { "cell_type": "code", - "execution_count": 5, + "execution_count": 10, "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "1NcFxRcFdJ_O", - "outputId": "638c55b1-dc45-4eee-cabc-4921dc61faf5" + "outputId": "77c8d487-16db-4073-b3ea-06cabf2e7766" }, "outputs": [ { "output_type": "stream", "name": "stdout", "text": [ - "\u001b[34m\u001b[1mclassify/train: \u001b[0mmodel=yolov5s-cls.pt, data=imagenette160, epochs=3, batch_size=16, imgsz=224, nosave=False, cache=ram, device=, workers=8, project=runs/train-cls, name=exp, exist_ok=False, pretrained=True, optimizer=Adam, lr0=0.001, decay=5e-05, label_smoothing=0.1, cutoff=None, dropout=None, verbose=False, seed=0, local_rank=-1\n", + "\u001b[34m\u001b[1mclassify/train: \u001b[0mmodel=yolov5s-cls.pt, data=imagenette160, epochs=5, batch_size=64, imgsz=224, nosave=False, cache=ram, device=, workers=8, project=runs/train-cls, name=exp, exist_ok=False, pretrained=True, optimizer=Adam, lr0=0.001, decay=5e-05, label_smoothing=0.1, cutoff=None, dropout=None, verbose=False, seed=0, local_rank=-1\n", "\u001b[34m\u001b[1mgithub: \u001b[0mup to date with https://github.com/ultralytics/yolov5 ✅\n", - "YOLOv5 🚀 v6.2-258-g7fc7ed7 Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n", + "YOLOv5 🚀 v7.0-3-g61ebf5e Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n", "\n", "\u001b[34m\u001b[1mTensorBoard: \u001b[0mStart with 'tensorboard --logdir runs/train-cls', view at http://localhost:6006/\n", "\n", "Dataset not found ⚠️, missing path /content/datasets/imagenette160, attempting download...\n", "Downloading https://github.com/ultralytics/yolov5/releases/download/v1.0/imagenette160.zip to /content/datasets/imagenette160.zip...\n", - "100% 103M/103M [00:09<00:00, 11.1MB/s]\n", + "100% 103M/103M [00:00<00:00, 347MB/s] \n", "Unzipping /content/datasets/imagenette160.zip...\n", - "Dataset download success ✅ (13.2s), saved to \u001b[1m/content/datasets/imagenette160\u001b[0m\n", + "Dataset download success ✅ (3.3s), saved to \u001b[1m/content/datasets/imagenette160\u001b[0m\n", "\n", "\u001b[34m\u001b[1malbumentations: \u001b[0mRandomResizedCrop(p=1.0, height=224, width=224, scale=(0.08, 1.0), ratio=(0.75, 1.3333333333333333), interpolation=1), HorizontalFlip(p=0.5), ColorJitter(p=0.5, brightness=[0.6, 1.4], contrast=[0.6, 1.4], saturation=[0.6, 1.4], hue=[0, 0]), Normalize(p=1.0, mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225), max_pixel_value=255.0), ToTensorV2(always_apply=True, p=1.0, transpose_mask=False)\n", "Model summary: 149 layers, 4185290 parameters, 4185290 gradients, 10.5 GFLOPs\n", @@ -1300,14 +1300,16 @@ "Image sizes 224 train, 224 test\n", "Using 1 dataloader workers\n", "Logging results to \u001b[1mruns/train-cls/exp\u001b[0m\n", - "Starting yolov5s-cls.pt training on imagenette160 dataset with 10 classes for 3 epochs...\n", + "Starting yolov5s-cls.pt training on imagenette160 dataset with 10 classes for 5 epochs...\n", "\n", " Epoch GPU_mem train_loss val_loss top1_acc top5_acc\n", - " 1/3 0.348G 1.31 1.09 0.794 0.979: 100% 592/592 [01:02<00:00, 9.47it/s]\n", - " 2/3 0.415G 1.09 0.852 0.883 0.99: 100% 592/592 [00:59<00:00, 10.00it/s]\n", - " 3/3 0.415G 0.954 0.776 0.907 0.994: 100% 592/592 [00:59<00:00, 9.89it/s]\n", + " 1/5 1.47G 1.05 0.974 0.828 0.975: 100% 148/148 [00:38<00:00, 3.82it/s]\n", + " 2/5 1.73G 0.895 0.766 0.911 0.994: 100% 148/148 [00:36<00:00, 4.03it/s]\n", + " 3/5 1.73G 0.82 0.704 0.934 0.996: 100% 148/148 [00:35<00:00, 4.20it/s]\n", + " 4/5 1.73G 0.766 0.664 0.951 0.998: 100% 148/148 [00:36<00:00, 4.05it/s]\n", + " 5/5 1.73G 0.724 0.634 0.959 0.997: 100% 148/148 [00:37<00:00, 3.94it/s]\n", "\n", - "Training complete (0.051 hours)\n", + "Training complete (0.052 hours)\n", "Results saved to \u001b[1mruns/train-cls/exp\u001b[0m\n", "Predict: python classify/predict.py --weights runs/train-cls/exp/weights/best.pt --source im.jpg\n", "Validate: python classify/val.py --weights runs/train-cls/exp/weights/best.pt --data /content/datasets/imagenette160\n", @@ -1320,7 +1322,7 @@ ], "source": [ "# Train YOLOv5s Classification on Imagenette160 for 3 epochs\n", - "!python classify/train.py --img 224 --batch 16 --epochs 3 --data imagenette160 --model yolov5s-cls.pt --cache" + "!python classify/train.py --model yolov5s-cls.pt --data imagenette160 --epochs 5 --img 224 --cache" ] }, { @@ -1452,8 +1454,7 @@ "accelerator": "GPU", "colab": { "name": "YOLOv5 Classification Tutorial", - "provenance": [], - "toc_visible": true + "provenance": [] }, "kernelspec": { "display_name": "Python 3 (ipykernel)", @@ -1475,4 +1476,4 @@ }, "nbformat": 4, "nbformat_minor": 0 -} +} \ No newline at end of file From bfa1f23045c7c4136a9b8ced9d6be8249ed72692 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 24 Nov 2022 16:34:14 +0100 Subject: [PATCH 1583/1976] FROM nvcr.io/nvidia/pytorch:22.11-py3 (#10279) * Update Docker usage examples * Update Dockerfile Signed-off-by: Glenn Jocher * Update DEBIAN_FRONTEND Signed-off-by: Glenn Jocher --- utils/docker/Dockerfile | 2 +- utils/docker/Dockerfile-arm64 | 8 +++++--- utils/docker/Dockerfile-cpu | 4 +++- 3 files changed, 9 insertions(+), 5 deletions(-) diff --git a/utils/docker/Dockerfile b/utils/docker/Dockerfile index a5035c6abc33..1ecf4c64f75f 100644 --- a/utils/docker/Dockerfile +++ b/utils/docker/Dockerfile @@ -3,7 +3,7 @@ # Image is CUDA-optimized for YOLOv5 single/multi-GPU training and inference # Start FROM NVIDIA PyTorch image https://ngc.nvidia.com/catalog/containers/nvidia:pytorch -FROM nvcr.io/nvidia/pytorch:22.10-py3 +FROM nvcr.io/nvidia/pytorch:22.11-py3 RUN rm -rf /opt/pytorch # remove 1.2GB dir # Downloads to user config dir diff --git a/utils/docker/Dockerfile-arm64 b/utils/docker/Dockerfile-arm64 index 8ec71622d9b6..eed1410793a1 100644 --- a/utils/docker/Dockerfile-arm64 +++ b/utils/docker/Dockerfile-arm64 @@ -9,8 +9,9 @@ FROM arm64v8/ubuntu:20.04 ADD https://ultralytics.com/assets/Arial.ttf https://ultralytics.com/assets/Arial.Unicode.ttf /root/.config/Ultralytics/ # Install linux packages +ENV DEBIAN_FRONTEND noninteractive RUN apt update -RUN DEBIAN_FRONTEND=noninteractive TZ=Etc/UTC apt install -y tzdata +RUN TZ=Etc/UTC apt install -y tzdata RUN apt install --no-install-recommends -y python3-pip git zip curl htop gcc libgl1-mesa-glx libglib2.0-0 libpython3-dev # RUN alias python=python3 @@ -30,12 +31,13 @@ WORKDIR /usr/src/app # Copy contents # COPY . /usr/src/app (issues as not a .git directory) RUN git clone https://github.com/ultralytics/yolov5 /usr/src/app +ENV DEBIAN_FRONTEND teletype # Usage Examples ------------------------------------------------------------------------------------------------------- # Build and Push -# t=ultralytics/yolov5:latest-M1 && sudo docker build --platform linux/arm64 -f utils/docker/Dockerfile-arm64 -t $t . && sudo docker push $t +# t=ultralytics/yolov5:latest-arm64 && sudo docker build --platform linux/arm64 -f utils/docker/Dockerfile-arm64 -t $t . && sudo docker push $t # Pull and Run -# t=ultralytics/yolov5:latest-M1 && sudo docker pull $t && sudo docker run -it --ipc=host -v "$(pwd)"/datasets:/usr/src/datasets $t +# t=ultralytics/yolov5:latest-arm64 && sudo docker pull $t && sudo docker run -it --ipc=host -v "$(pwd)"/datasets:/usr/src/datasets $t diff --git a/utils/docker/Dockerfile-cpu b/utils/docker/Dockerfile-cpu index 017e2826458b..558f81f00584 100644 --- a/utils/docker/Dockerfile-cpu +++ b/utils/docker/Dockerfile-cpu @@ -9,8 +9,9 @@ FROM ubuntu:20.04 ADD https://ultralytics.com/assets/Arial.ttf https://ultralytics.com/assets/Arial.Unicode.ttf /root/.config/Ultralytics/ # Install linux packages +ENV DEBIAN_FRONTEND noninteractive RUN apt update -RUN DEBIAN_FRONTEND=noninteractive TZ=Etc/UTC apt install -y tzdata +RUN TZ=Etc/UTC apt install -y tzdata RUN apt install --no-install-recommends -y python3-pip git zip curl htop libgl1-mesa-glx libglib2.0-0 libpython3-dev gnupg # RUN alias python=python3 @@ -29,6 +30,7 @@ WORKDIR /usr/src/app # Copy contents # COPY . /usr/src/app (issues as not a .git directory) RUN git clone https://github.com/ultralytics/yolov5 /usr/src/app +ENV DEBIAN_FRONTEND teletype # Usage Examples ------------------------------------------------------------------------------------------------------- From 31c1f111868fc0dd7140ddce13e743f79bfaa9d4 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 26 Nov 2022 00:28:50 +0100 Subject: [PATCH 1584/1976] `bbox_iou()` optimizations (#10296) Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- utils/metrics.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/utils/metrics.py b/utils/metrics.py index 65ea463c0dab..0be462551b89 100644 --- a/utils/metrics.py +++ b/utils/metrics.py @@ -234,12 +234,12 @@ def bbox_iou(box1, box2, xywh=True, GIoU=False, DIoU=False, CIoU=False, eps=1e-7 else: # x1, y1, x2, y2 = box1 b1_x1, b1_y1, b1_x2, b1_y2 = box1.chunk(4, -1) b2_x1, b2_y1, b2_x2, b2_y2 = box2.chunk(4, -1) - w1, h1 = b1_x2 - b1_x1, b1_y2 - b1_y1 + eps - w2, h2 = b2_x2 - b2_x1, b2_y2 - b2_y1 + eps + w1, h1 = b1_x2 - b1_x1, (b1_y2 - b1_y1).clamp(eps) + w2, h2 = b2_x2 - b2_x1, (b2_y2 - b2_y1).clamp(eps) # Intersection area - inter = (torch.min(b1_x2, b2_x2) - torch.max(b1_x1, b2_x1)).clamp(0) * \ - (torch.min(b1_y2, b2_y2) - torch.max(b1_y1, b2_y1)).clamp(0) + inter = (b1_x2.minimum(b2_x2) - b1_x1.maximum(b2_x1)).clamp(0) * \ + (b1_y2.minimum(b2_y2) - b1_y1.maximum(b2_y1)).clamp(0) # Union Area union = w1 * h1 + w2 * h2 - inter + eps @@ -247,13 +247,13 @@ def bbox_iou(box1, box2, xywh=True, GIoU=False, DIoU=False, CIoU=False, eps=1e-7 # IoU iou = inter / union if CIoU or DIoU or GIoU: - cw = torch.max(b1_x2, b2_x2) - torch.min(b1_x1, b2_x1) # convex (smallest enclosing box) width - ch = torch.max(b1_y2, b2_y2) - torch.min(b1_y1, b2_y1) # convex height + cw = b1_x2.maximum(b2_x2) - b1_x1.minimum(b2_x1) # convex (smallest enclosing box) width + ch = b1_y2.maximum(b2_y2) - b1_y1.minimum(b2_y1) # convex height if CIoU or DIoU: # Distance or Complete IoU https://arxiv.org/abs/1911.08287v1 c2 = cw ** 2 + ch ** 2 + eps # convex diagonal squared rho2 = ((b2_x1 + b2_x2 - b1_x1 - b1_x2) ** 2 + (b2_y1 + b2_y2 - b1_y1 - b1_y2) ** 2) / 4 # center dist ** 2 if CIoU: # https://github.com/Zzh-tju/DIoU-SSD-pytorch/blob/master/utils/box/box_utils.py#L47 - v = (4 / math.pi ** 2) * torch.pow(torch.atan(w2 / h2) - torch.atan(w1 / h1), 2) + v = (4 / math.pi ** 2) * (torch.atan(w2 / h2) - torch.atan(w1 / h1)).pow(2) with torch.no_grad(): alpha = v / (v - iou + (1 + eps)) return iou - (rho2 / c2 + v * alpha) # CIoU From 85f8379a68193cd9a9298e31035f01d304ac21f9 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 26 Nov 2022 06:06:22 +0100 Subject: [PATCH 1585/1976] README Segmentation Usage fixes (#10298) Fixes per https://github.com/ultralytics/yolov5/issues/10288 Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- README.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index 298e14570860..028a1c2f064c 100644 --- a/README.md +++ b/README.md @@ -89,14 +89,14 @@ YOLOv5 segmentation training supports auto-download COCO128-seg segmentation dat ```bash # Single-GPU -python segment/train.py --model yolov5s-seg.pt --data coco128-seg.yaml --epochs 5 --img 640 +python segment/train.py --data coco128-seg.yaml --weights yolov5s-seg.pt --img 640 # Multi-GPU DDP -python -m torch.distributed.run --nproc_per_node 4 --master_port 1 segment/train.py --model yolov5s-seg.pt --data coco128-seg.yaml --epochs 5 --img 640 --device 0,1,2,3 +python -m torch.distributed.run --nproc_per_node 4 --master_port 1 segment/train.py --data coco128-seg.yaml --weights yolov5s-seg.pt --img 640 --device 0,1,2,3 ``` ### Val -Validate YOLOv5m-seg accuracy on ImageNet-1k dataset: +Validate YOLOv5s-seg mask mAP on COCO dataset: ```bash bash data/scripts/get_coco.sh --val --segments # download COCO val segments split (780MB, 5000 images) python segment/val.py --weights yolov5s-seg.pt --data coco.yaml --img 640 # validate From 350e8eb69e01bb162ec0b22d1d13a1d1c2752853 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 26 Nov 2022 17:33:43 +0100 Subject: [PATCH 1586/1976] Fix SegmentationModel Usage (#10303) Update export.py Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- export.py | 1 + 1 file changed, 1 insertion(+) diff --git a/export.py b/export.py index e43d9b730fc6..3028e6581e63 100644 --- a/export.py +++ b/export.py @@ -596,6 +596,7 @@ def run( f = [str(x) for x in f if x] # filter out '' and None if any(f): cls, det, seg = (isinstance(model, x) for x in (ClassificationModel, DetectionModel, SegmentationModel)) # type + det &= not seg # segmentation models inherit from SegmentationModel(DetectionModel) dir = Path('segment' if seg else 'classify' if cls else '') h = '--half' if half else '' # --half FP16 inference arg s = "# WARNING ⚠️ ClassificationModel not yet supported for PyTorch Hub AutoShape inference" if cls else \ From f9ca3657f822da65a784aae7d750d86b69244ecb Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 28 Nov 2022 09:20:59 -0800 Subject: [PATCH 1587/1976] Ultralytics Live 1 - ClearML https://youtu.be/KS4weDInJYs (#10324) * Ultralytics Live Session banner - ClearML @taliabender @thepycoder @pderrenger Signed-off-by: Glenn Jocher * Update README.md Signed-off-by: Glenn Jocher * Update README.md Signed-off-by: Glenn Jocher * Update README.md Signed-off-by: Glenn Jocher * Update README.md Signed-off-by: Glenn Jocher * Update README.md Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- README.md | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/README.md b/README.md index 028a1c2f064c..96f40e0f040a 100644 --- a/README.md +++ b/README.md @@ -50,6 +50,15 @@
+##
Ultralytics Live - November 29th
+ +
+ +We're excited to announce our very first [Ultralytics Live](https://www.youtube.com/@Ultralytics/streams) session ✨ streaming live at https://youtu.be/KS4weDInJYs on **Tuesday, November 29th** with [@thepycoder](https://github.com/thepycoder) of [ClearML](https://cutt.ly/yolov5-readme-clearml). Glenn and Victor will discuss all things experiment tracking and how you can maximize the benefits of the new YOLOv5 + ClearML [integration](https://github.com/ultralytics/yolov5/tree/master/utils/loggers/clearml). 🔥 + + +
+ ##
Segmentation ⭐ NEW
From 10c025d794ca395a2ca0b2a00aff65f3a92ecd8d Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 28 Nov 2022 14:50:29 -0800 Subject: [PATCH 1588/1976] Add README License section (#10327) * Add README License section @pderrenger @AyushExel Signed-off-by: Glenn Jocher * live fix Signed-off-by: Glenn Jocher * Update README.md Signed-off-by: Glenn Jocher * Update README.md Signed-off-by: Glenn Jocher * Update README.md Signed-off-by: Glenn Jocher * Update README.md Signed-off-by: Glenn Jocher * Update README.md Signed-off-by: Glenn Jocher * Update README.md Signed-off-by: Glenn Jocher * Update README.md Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- README.md | 17 +++++++++++++---- 1 file changed, 13 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index 96f40e0f040a..53d37d2bcb35 100644 --- a/README.md +++ b/README.md @@ -21,7 +21,7 @@ YOLOv5 🚀 is the world's most loved vision AI, representing Ultralytics open-source research into future vision AI methods, incorporating lessons learned and best practices evolved over thousands of hours of research and development.

- To request a commercial license please complete the form at Ultralytics Licensing. + To request an Enterprise License please complete the form at Ultralytics Licensing.

@@ -50,11 +50,11 @@
-##
Ultralytics Live - November 29th
+##
Ultralytics Live Session
-We're excited to announce our very first [Ultralytics Live](https://www.youtube.com/@Ultralytics/streams) session ✨ streaming live at https://youtu.be/KS4weDInJYs on **Tuesday, November 29th** with [@thepycoder](https://github.com/thepycoder) of [ClearML](https://cutt.ly/yolov5-readme-clearml). Glenn and Victor will discuss all things experiment tracking and how you can maximize the benefits of the new YOLOv5 + ClearML [integration](https://github.com/ultralytics/yolov5/tree/master/utils/loggers/clearml). 🔥 +We're excited to announce our very first [Ultralytics Live Session](https://www.youtube.com/@Ultralytics/streams) ✨ streaming live at https://youtu.be/KS4weDInJYs on **Tuesday, November 29th at 16:00 CET** with [@thepycoder](https://github.com/thepycoder) of [ClearML](https://cutt.ly/yolov5-readme-clearml). Glenn and Victor will discuss all things experiment tracking and how you can maximize the benefits of the new YOLOv5 + ClearML [integration](https://github.com/ultralytics/yolov5/tree/master/utils/loggers/clearml). 🔥
@@ -432,9 +432,18 @@ We love your input! We want to make contributing to YOLOv5 as easy and transpare + +##
License
+ +YOLOv5 is available under two different licenses: + +- **GPL-3.0 License**: See [LICENSE](https://github.com/ultralytics/yolov5/blob/master/LICENSE) file for details. +- **Enterprise License**: Provides greater flexibility for commercial product development without the open-source requirements of GPL-3.0. Typical use cases are embedding Ultralytics software and AI models in commercial products and applications. Request an Enterprise License at [Ultralytics Licensing](https://ultralytics.com/license). + + ##
Contact
-For YOLOv5 bugs and feature requests please visit [GitHub Issues](https://github.com/ultralytics/yolov5/issues). For professional support please [Contact Us](https://ultralytics.com/contact). To request a commercial license please complete the form at [Ultralytics Licensing](https://ultralytics.com/license). +For YOLOv5 bugs and feature requests please visit [GitHub Issues](https://github.com/ultralytics/yolov5/issues). For professional support please [Contact Us](https://ultralytics.com/contact).
From e808f2267d0164edb7bc45588c4fcda68c3dd8cb Mon Sep 17 00:00:00 2001 From: Hu Ye Date: Wed, 30 Nov 2022 11:32:34 +0800 Subject: [PATCH 1589/1976] Eliminate unused `ConfusionMatrix.matrix()` method (#10309) * fix bug in confusion_matrix Signed-off-by: Hu Ye * Update metrics.py * Update metrics.py * Update metrics.py Signed-off-by: Hu Ye Co-authored-by: Glenn Jocher --- utils/metrics.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/utils/metrics.py b/utils/metrics.py index 0be462551b89..c01f823a77a1 100644 --- a/utils/metrics.py +++ b/utils/metrics.py @@ -177,9 +177,6 @@ def process_batch(self, detections, labels): if not any(m1 == i): self.matrix[dc, self.nc] += 1 # predicted background - def matrix(self): - return self.matrix - def tp_fp(self): tp = self.matrix.diagonal() # true positives fp = self.matrix.sum(1) - tp # false positives From 7f5724ba4b3e421d4c9162742810c52248d06ecd Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 29 Nov 2022 19:38:29 -0800 Subject: [PATCH 1590/1976] Correct Segmentation Comparison Plot (#10344) @AyushExel @Laughing-q updated plot here in README Addresses https://github.com/ultralytics/yolov5/pull/10245#issuecomment-1328482213 Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 53d37d2bcb35..dd24a938a060 100644 --- a/README.md +++ b/README.md @@ -63,7 +63,7 @@ We're excited to announce our very first [Ultralytics Live Session](https://www.
- +
Our new YOLOv5 [release v7.0](https://github.com/ultralytics/yolov5/releases/v7.0) instance segmentation models are the fastest and most accurate in the world, beating all current [SOTA benchmarks](https://paperswithcode.com/sota/real-time-instance-segmentation-on-mscoco). We've made them super simple to train, validate and deploy. See full details in our [Release Notes](https://github.com/ultralytics/yolov5/releases/v7.0) and visit our [YOLOv5 Segmentation Colab Notebook](https://github.com/ultralytics/yolov5/blob/master/segment/tutorial.ipynb) for quickstart tutorials. From b412696ff339fc573320f143290d4fb7146832b3 Mon Sep 17 00:00:00 2001 From: Laughing <61612323+Laughing-q@users.noreply.github.com> Date: Thu, 1 Dec 2022 10:39:24 -0600 Subject: [PATCH 1591/1976] Fix & speed up segment plot (#10350) * fix plot&&speed up * fix segment save-txt * fix channel * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- segment/predict.py | 21 ++++++++++++------ utils/plots.py | 48 ++++++++++++++-------------------------- utils/segment/general.py | 23 +++++++++++++++++++ 3 files changed, 53 insertions(+), 39 deletions(-) diff --git a/segment/predict.py b/segment/predict.py index 42389938cee7..4d8458fd879e 100644 --- a/segment/predict.py +++ b/segment/predict.py @@ -46,7 +46,7 @@ increment_path, non_max_suppression, print_args, scale_boxes, scale_segments, strip_optimizer, xyxy2xywh) from utils.plots import Annotator, colors, save_one_box -from utils.segment.general import masks2segments, process_mask +from utils.segment.general import masks2segments, process_mask, process_mask_native from utils.torch_utils import select_device, smart_inference_mode @@ -151,13 +151,20 @@ def run( imc = im0.copy() if save_crop else im0 # for save_crop annotator = Annotator(im0, line_width=line_thickness, example=str(names)) if len(det): - masks = process_mask(proto[i], det[:, 6:], det[:, :4], im.shape[2:], upsample=True) # HWC - det[:, :4] = scale_boxes(im.shape[2:], det[:, :4], im0.shape).round() # rescale boxes to im0 size + if retina_masks: + # scale bbox first the crop masks + det[:, :4] = scale_boxes(im.shape[2:], det[:, :4], im0.shape).round() # rescale boxes to im0 size + masks = process_mask_native(proto[i], det[:, 6:], det[:, :4], im0.shape[:2]) # HWC + else: + masks = process_mask(proto[i], det[:, 6:], det[:, :4], im.shape[2:], upsample=True) # HWC + det[:, :4] = scale_boxes(im.shape[2:], det[:, :4], im0.shape).round() # rescale boxes to im0 size # Segments if save_txt: segments = reversed(masks2segments(masks)) - segments = [scale_segments(im.shape[2:], x, im0.shape, normalize=True) for x in segments] + segments = [ + scale_segments(im0.shape if retina_masks else im.shape[2:], x, im0.shape, normalize=True) + for x in segments] # Print results for c in det[:, 5].unique(): @@ -165,9 +172,9 @@ def run( s += f"{n} {names[int(c)]}{'s' * (n > 1)}, " # add to string # Mask plotting - annotator.masks(masks, - colors=[colors(x, True) for x in det[:, 5]], - im_gpu=None if retina_masks else im[i]) + plot_img = torch.as_tensor(im0, dtype=torch.float16).to(device).permute(2, 0, 1).flip(0).contiguous() / 255. \ + if retina_masks else im[i] + annotator.masks(masks, colors=[colors(x, True) for x in det[:, 5]], im_gpu=plot_img) # Write results for j, (*xyxy, conf, cls) in enumerate(reversed(det[:, :6])): diff --git a/utils/plots.py b/utils/plots.py index 36df271c60e1..d2f232de0e97 100644 --- a/utils/plots.py +++ b/utils/plots.py @@ -114,7 +114,7 @@ def box_label(self, box, label='', color=(128, 128, 128), txt_color=(255, 255, 2 thickness=tf, lineType=cv2.LINE_AA) - def masks(self, masks, colors, im_gpu=None, alpha=0.5): + def masks(self, masks, colors, im_gpu, alpha=0.5, retina_masks=False): """Plot masks at once. Args: masks (tensor): predicted masks on cuda, shape: [n, h, w] @@ -125,37 +125,21 @@ def masks(self, masks, colors, im_gpu=None, alpha=0.5): if self.pil: # convert to numpy first self.im = np.asarray(self.im).copy() - if im_gpu is None: - # Add multiple masks of shape(h,w,n) with colors list([r,g,b], [r,g,b], ...) - if len(masks) == 0: - return - if isinstance(masks, torch.Tensor): - masks = torch.as_tensor(masks, dtype=torch.uint8) - masks = masks.permute(1, 2, 0).contiguous() - masks = masks.cpu().numpy() - # masks = np.ascontiguousarray(masks.transpose(1, 2, 0)) - masks = scale_image(masks.shape[:2], masks, self.im.shape) - masks = np.asarray(masks, dtype=np.float32) - colors = np.asarray(colors, dtype=np.float32) # shape(n,3) - s = masks.sum(2, keepdims=True).clip(0, 1) # add all masks together - masks = (masks @ colors).clip(0, 255) # (h,w,n) @ (n,3) = (h,w,3) - self.im[:] = masks * alpha + self.im * (1 - s * alpha) - else: - if len(masks) == 0: - self.im[:] = im_gpu.permute(1, 2, 0).contiguous().cpu().numpy() * 255 - colors = torch.tensor(colors, device=im_gpu.device, dtype=torch.float32) / 255.0 - colors = colors[:, None, None] # shape(n,1,1,3) - masks = masks.unsqueeze(3) # shape(n,h,w,1) - masks_color = masks * (colors * alpha) # shape(n,h,w,3) - - inv_alph_masks = (1 - masks * alpha).cumprod(0) # shape(n,h,w,1) - mcs = (masks_color * inv_alph_masks).sum(0) * 2 # mask color summand shape(n,h,w,3) - - im_gpu = im_gpu.flip(dims=[0]) # flip channel - im_gpu = im_gpu.permute(1, 2, 0).contiguous() # shape(h,w,3) - im_gpu = im_gpu * inv_alph_masks[-1] + mcs - im_mask = (im_gpu * 255).byte().cpu().numpy() - self.im[:] = scale_image(im_gpu.shape, im_mask, self.im.shape) + if len(masks) == 0: + self.im[:] = im_gpu.permute(1, 2, 0).contiguous().cpu().numpy() * 255 + colors = torch.tensor(colors, device=im_gpu.device, dtype=torch.float32) / 255.0 + colors = colors[:, None, None] # shape(n,1,1,3) + masks = masks.unsqueeze(3) # shape(n,h,w,1) + masks_color = masks * (colors * alpha) # shape(n,h,w,3) + + inv_alph_masks = (1 - masks * alpha).cumprod(0) # shape(n,h,w,1) + mcs = (masks_color * inv_alph_masks).sum(0) * 2 # mask color summand shape(n,h,w,3) + + im_gpu = im_gpu.flip(dims=[0]) # flip channel + im_gpu = im_gpu.permute(1, 2, 0).contiguous() # shape(h,w,3) + im_gpu = im_gpu * inv_alph_masks[-1] + mcs + im_mask = (im_gpu * 255).byte().cpu().numpy() + self.im[:] = im_mask if retina_masks else scale_image(im_gpu.shape, im_mask, self.im.shape) if self.pil: # convert im back to PIL and update draw self.fromarray(self.im) diff --git a/utils/segment/general.py b/utils/segment/general.py index b526333dc5a1..6ebfd27bd9d3 100644 --- a/utils/segment/general.py +++ b/utils/segment/general.py @@ -67,6 +67,29 @@ def process_mask(protos, masks_in, bboxes, shape, upsample=False): return masks.gt_(0.5) +def process_mask_native(protos, masks_in, bboxes, dst_shape): + """ + Crop after upsample. + proto_out: [mask_dim, mask_h, mask_w] + out_masks: [n, mask_dim], n is number of masks after nms + bboxes: [n, 4], n is number of masks after nms + shape:input_image_size, (h, w) + + return: h, w, n + """ + c, mh, mw = protos.shape # CHW + masks = (masks_in @ protos.float().view(c, -1)).sigmoid().view(-1, mh, mw) + gain = min(mh / dst_shape[0], mw / dst_shape[1]) # gain = old / new + pad = (mw - dst_shape[1] * gain) / 2, (mh - dst_shape[0] * gain) / 2 # wh padding + top, left = int(pad[1]), int(pad[0]) # y, x + bottom, right = int(mh - pad[1]), int(mw - pad[0]) + masks = masks[:, top:bottom, left:right] + + masks = F.interpolate(masks[None], dst_shape, mode='bilinear', align_corners=False)[0] # CHW + masks = crop_mask(masks, bboxes) # CHW + return masks.gt_(0.5) + + def scale_image(im1_shape, masks, im0_shape, ratio_pad=None): """ img1_shape: model input shape, [h, w] From 028b7cdb5a2e650b4d9e79eaa90a00c1efdcbcba Mon Sep 17 00:00:00 2001 From: Michael Ben ami <31584614+mbenami@users.noreply.github.com> Date: Thu, 1 Dec 2022 22:44:14 +0200 Subject: [PATCH 1592/1976] fix_reading_nan_in_evolve (#10358) when there is `nan` in evolve.csv pandas read it as str remove the space before fix that Signed-off-by: Michael Ben ami <31584614+mbenami@users.noreply.github.com> Signed-off-by: Michael Ben ami <31584614+mbenami@users.noreply.github.com> Co-authored-by: Glenn Jocher --- utils/general.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/general.py b/utils/general.py index c5b738983719..efe8590f85a1 100644 --- a/utils/general.py +++ b/utils/general.py @@ -1036,7 +1036,7 @@ def print_mutation(keys, results, hyp, save_dir, bucket, prefix=colorstr('evolve # Save yaml with open(evolve_yaml, 'w') as f: - data = pd.read_csv(evolve_csv) + data = pd.read_csv(evolve_csv, skipinitialspace=True) data = data.rename(columns=lambda x: x.strip()) # strip keys i = np.argmax(fitness(data.values[:, :4])) # generations = len(data) From 185d475d93ebd4c03b53b4eb6057a62a52018b24 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 1 Dec 2022 13:01:46 -0800 Subject: [PATCH 1593/1976] Add DNN warning comment (#10368) Update export.py Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- export.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/export.py b/export.py index 3028e6581e63..928992903b0b 100644 --- a/export.py +++ b/export.py @@ -153,7 +153,7 @@ def export_onnx(model, im, file, opset, dynamic, simplify, prefix=colorstr('ONNX f, verbose=False, opset_version=opset, - do_constant_folding=True, + do_constant_folding=True, # WARNING: DNN inference with torch>=1.12 may require do_constant_folding=False input_names=['images'], output_names=output_names, dynamic_axes=dynamic or None) From 1ce464f6890ed1afe887ab8eed78804ae5933aa8 Mon Sep 17 00:00:00 2001 From: Victor Sonck Date: Thu, 1 Dec 2022 22:32:55 +0100 Subject: [PATCH 1594/1976] Add docker info for ClearML remote execution (#10142) * Add docker info for ClearML remote execution * add additional clearml options to handle different python versions * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: Glenn Jocher Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- utils/loggers/clearml/clearml_utils.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/utils/loggers/clearml/clearml_utils.py b/utils/loggers/clearml/clearml_utils.py index fe5f597a87a6..08aa9fd3327f 100644 --- a/utils/loggers/clearml/clearml_utils.py +++ b/utils/loggers/clearml/clearml_utils.py @@ -97,6 +97,11 @@ def __init__(self, opt, hyp): # will have to be added manually! self.task.connect(hyp, name='Hyperparameters') + # Make sure the code is easily remotely runnable by setting the docker image to use by the remote agent + self.task.set_base_docker("ultralytics/yolov5:latest", + docker_arguments='--ipc=host -e="CLEARML_AGENT_SKIP_PYTHON_ENV_INSTALL=1"', + docker_setup_bash_script='pip install clearml') + # Get ClearML Dataset Version if requested if opt.data.startswith('clearml://'): # data_dict should have the following keys: From 7845cea91343e430566689deff6e50f6c2b473fa Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 1 Dec 2022 13:56:33 -0800 Subject: [PATCH 1595/1976] Fix ClearML unconfigured error (#10369) @thepycoder adds Try Except for installed but unconfigured clearml environments. Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- utils/loggers/__init__.py | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/utils/loggers/__init__.py b/utils/loggers/__init__.py index bc8dd7621579..22da87034f24 100644 --- a/utils/loggers/__init__.py +++ b/utils/loggers/__init__.py @@ -118,7 +118,14 @@ def __init__(self, save_dir=None, weights=None, opt=None, hyp=None, logger=None, # ClearML if clearml and 'clearml' in self.include: - self.clearml = ClearmlLogger(self.opt, self.hyp) + try: + self.clearml = ClearmlLogger(self.opt, self.hyp) + except Exception: + self.clearml = None + prefix = colorstr('ClearML: ') + LOGGER.warning(f'{prefix}WARNING ⚠️ ClearML is installed but not configured, skipping ClearML logging.' + f' See https://github.com/ultralytics/yolov5/tree/master/utils/loggers/clearml#readme') + else: self.clearml = None From d7955fe438cbc4ca9fd735b79fa99545ffa81575 Mon Sep 17 00:00:00 2001 From: Michael Sutton Date: Fri, 2 Dec 2022 00:00:43 +0100 Subject: [PATCH 1596/1976] Fix clearml args logging when training is launch with run() (#10359) * Connect opt to clearml args * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update clearml_utils.py Signed-off-by: Glenn Jocher * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Signed-off-by: Glenn Jocher Co-authored-by: Glenn Jocher Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- utils/loggers/clearml/clearml_utils.py | 1 + 1 file changed, 1 insertion(+) diff --git a/utils/loggers/clearml/clearml_utils.py b/utils/loggers/clearml/clearml_utils.py index 08aa9fd3327f..7ad40ea5f987 100644 --- a/utils/loggers/clearml/clearml_utils.py +++ b/utils/loggers/clearml/clearml_utils.py @@ -96,6 +96,7 @@ def __init__(self, opt, hyp): # Only the hyperparameters coming from the yaml config file # will have to be added manually! self.task.connect(hyp, name='Hyperparameters') + self.task.connect(opt, name='Args') # Make sure the code is easily remotely runnable by setting the docker image to use by the remote agent self.task.set_base_docker("ultralytics/yolov5:latest", From d1ffc3a3a72b438175d3b4cd6e84ef1bc8df2703 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 2 Dec 2022 13:18:39 -0800 Subject: [PATCH 1597/1976] Create CITATION.cff (#10387) * Create CITATION.cff @AyushExel @pderrenger new citation file!! :) Signed-off-by: Glenn Jocher * Update CITATION.cff Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- CITATION.cff | 14 ++++++++++++++ 1 file changed, 14 insertions(+) create mode 100644 CITATION.cff diff --git a/CITATION.cff b/CITATION.cff new file mode 100644 index 000000000000..f8d5fdc3785d --- /dev/null +++ b/CITATION.cff @@ -0,0 +1,14 @@ +cff-version: 1.2.0 +preferred-citation: + type: software + message: If you use YOLOv5, please cite it as below. + authors: + - family-names: Jocher + given-names: Glenn + orcid: "https://orcid.org/0000-0001-5950-6979" + title: "YOLOv5 by Ultralytics" + version: 7.0.0 + doi: 10.5281/zenodo.3908559 + date-released: 2020-5-29 + license: GPL-3.0 + url: "https://github.com/ultralytics/yolov5" From e96113e48591f246620a3696b7de84423c3c1e42 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 2 Dec 2022 13:27:45 -0800 Subject: [PATCH 1598/1976] Update CITATION.cff to version: v7.0 (#10389) Update version: v7.0 Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- CITATION.cff | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CITATION.cff b/CITATION.cff index f8d5fdc3785d..8e2cf1148b92 100644 --- a/CITATION.cff +++ b/CITATION.cff @@ -7,7 +7,7 @@ preferred-citation: given-names: Glenn orcid: "https://orcid.org/0000-0001-5950-6979" title: "YOLOv5 by Ultralytics" - version: 7.0.0 + version: 7.0 doi: 10.5281/zenodo.3908559 date-released: 2020-5-29 license: GPL-3.0 From a1b6e79ccf0b66f201720d82d79da14bc44bad6d Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 2 Dec 2022 13:28:33 -0800 Subject: [PATCH 1599/1976] Revert TQDM bar format changes (#10343) Per https://github.com/ultralytics/yolov5/issues/10342 Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- utils/general.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/general.py b/utils/general.py index efe8590f85a1..99a96576c3fd 100644 --- a/utils/general.py +++ b/utils/general.py @@ -49,7 +49,7 @@ DATASETS_DIR = Path(os.getenv('YOLOv5_DATASETS_DIR', ROOT.parent / 'datasets')) # global datasets directory AUTOINSTALL = str(os.getenv('YOLOv5_AUTOINSTALL', True)).lower() == 'true' # global auto-install mode VERBOSE = str(os.getenv('YOLOv5_VERBOSE', True)).lower() == 'true' # global verbose mode -TQDM_BAR_FORMAT = '{l_bar}{bar:10}| {n_fmt}/{total_fmt} {elapsed}' # tqdm bar format +TQDM_BAR_FORMAT = '{l_bar}{bar:10}{r_bar}' # tqdm bar format FONT = 'Arial.ttf' # https://ultralytics.com/assets/Arial.ttf torch.set_printoptions(linewidth=320, precision=5, profile='long') From 9722e6ffe5926fa20387c678d4ca0aef410a0c05 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 3 Dec 2022 14:41:08 -0800 Subject: [PATCH 1600/1976] `process_mask_native()` cleanup (#10366) * process_mask_native() cleanup * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix arg name * cleanup anno_json * Remove scale_image * Remove scale_image * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * update to native Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- segment/predict.py | 17 +++++++++-------- segment/val.py | 10 +++++----- utils/segment/general.py | 20 ++++++++++---------- val.py | 4 ++-- 4 files changed, 26 insertions(+), 25 deletions(-) diff --git a/segment/predict.py b/segment/predict.py index 4d8458fd879e..4ba9e46ddab0 100644 --- a/segment/predict.py +++ b/segment/predict.py @@ -44,7 +44,7 @@ from utils.dataloaders import IMG_FORMATS, VID_FORMATS, LoadImages, LoadScreenshots, LoadStreams from utils.general import (LOGGER, Profile, check_file, check_img_size, check_imshow, check_requirements, colorstr, cv2, increment_path, non_max_suppression, print_args, scale_boxes, scale_segments, - strip_optimizer, xyxy2xywh) + strip_optimizer) from utils.plots import Annotator, colors, save_one_box from utils.segment.general import masks2segments, process_mask, process_mask_native from utils.torch_utils import select_device, smart_inference_mode @@ -161,10 +161,9 @@ def run( # Segments if save_txt: - segments = reversed(masks2segments(masks)) segments = [ scale_segments(im0.shape if retina_masks else im.shape[2:], x, im0.shape, normalize=True) - for x in segments] + for x in reversed(masks2segments(masks))] # Print results for c in det[:, 5].unique(): @@ -172,15 +171,17 @@ def run( s += f"{n} {names[int(c)]}{'s' * (n > 1)}, " # add to string # Mask plotting - plot_img = torch.as_tensor(im0, dtype=torch.float16).to(device).permute(2, 0, 1).flip(0).contiguous() / 255. \ - if retina_masks else im[i] - annotator.masks(masks, colors=[colors(x, True) for x in det[:, 5]], im_gpu=plot_img) + annotator.masks( + masks, + colors=[colors(x, True) for x in det[:, 5]], + im_gpu=torch.as_tensor(im0, dtype=torch.float16).to(device).permute(2, 0, 1).flip(0).contiguous() / + 255 if retina_masks else im[i]) # Write results for j, (*xyxy, conf, cls) in enumerate(reversed(det[:, :6])): if save_txt: # Write to file - segj = segments[j].reshape(-1) # (n,2) to (n*2) - line = (cls, *segj, conf) if save_conf else (cls, *segj) # label format + seg = segments[j].reshape(-1) # (n,2) to (n*2) + line = (cls, *seg, conf) if save_conf else (cls, *seg) # label format with open(f'{txt_path}.txt', 'a') as f: f.write(('%g ' * len(line)).rstrip() % line + '\n') diff --git a/segment/val.py b/segment/val.py index 48bf28d4bf4f..368a058f9ced 100644 --- a/segment/val.py +++ b/segment/val.py @@ -48,7 +48,7 @@ from utils.metrics import ConfusionMatrix, box_iou from utils.plots import output_to_target, plot_val_study from utils.segment.dataloaders import create_dataloader -from utils.segment.general import mask_iou, process_mask, process_mask_upsample, scale_image +from utils.segment.general import mask_iou, process_mask, process_mask_native, scale_image from utils.segment.metrics import Metrics, ap_per_class_box_and_mask from utils.segment.plots import plot_images_and_masks from utils.torch_utils import de_parallel, select_device, smart_inference_mode @@ -160,7 +160,7 @@ def run( ): if save_json: check_requirements(['pycocotools']) - process = process_mask_upsample # more accurate + process = process_mask_native # more accurate else: process = process_mask # faster @@ -312,7 +312,7 @@ def run( pred_masks = torch.as_tensor(pred_masks, dtype=torch.uint8) if plots and batch_i < 3: - plot_masks.append(pred_masks[:15].cpu()) # filter top 15 to plot + plot_masks.append(pred_masks[:15]) # filter top 15 to plot # Save/log if save_txt: @@ -367,8 +367,8 @@ def run( # Save JSON if save_json and len(jdict): w = Path(weights[0] if isinstance(weights, list) else weights).stem if weights is not None else '' # weights - anno_json = str(Path(data.get('path', '../coco')) / 'annotations/instances_val2017.json') # annotations json - pred_json = str(save_dir / f"{w}_predictions.json") # predictions json + anno_json = str(Path('../datasets/coco/annotations/instances_val2017.json')) # annotations + pred_json = str(save_dir / f"{w}_predictions.json") # predictions LOGGER.info(f'\nEvaluating pycocotools mAP... saving {pred_json}...') with open(pred_json, 'w') as f: json.dump(jdict, f) diff --git a/utils/segment/general.py b/utils/segment/general.py index 6ebfd27bd9d3..9da894538665 100644 --- a/utils/segment/general.py +++ b/utils/segment/general.py @@ -25,10 +25,10 @@ def crop_mask(masks, boxes): def process_mask_upsample(protos, masks_in, bboxes, shape): """ Crop after upsample. - proto_out: [mask_dim, mask_h, mask_w] - out_masks: [n, mask_dim], n is number of masks after nms + protos: [mask_dim, mask_h, mask_w] + masks_in: [n, mask_dim], n is number of masks after nms bboxes: [n, 4], n is number of masks after nms - shape:input_image_size, (h, w) + shape: input_image_size, (h, w) return: h, w, n """ @@ -67,25 +67,25 @@ def process_mask(protos, masks_in, bboxes, shape, upsample=False): return masks.gt_(0.5) -def process_mask_native(protos, masks_in, bboxes, dst_shape): +def process_mask_native(protos, masks_in, bboxes, shape): """ Crop after upsample. - proto_out: [mask_dim, mask_h, mask_w] - out_masks: [n, mask_dim], n is number of masks after nms + protos: [mask_dim, mask_h, mask_w] + masks_in: [n, mask_dim], n is number of masks after nms bboxes: [n, 4], n is number of masks after nms - shape:input_image_size, (h, w) + shape: input_image_size, (h, w) return: h, w, n """ c, mh, mw = protos.shape # CHW masks = (masks_in @ protos.float().view(c, -1)).sigmoid().view(-1, mh, mw) - gain = min(mh / dst_shape[0], mw / dst_shape[1]) # gain = old / new - pad = (mw - dst_shape[1] * gain) / 2, (mh - dst_shape[0] * gain) / 2 # wh padding + gain = min(mh / shape[0], mw / shape[1]) # gain = old / new + pad = (mw - shape[1] * gain) / 2, (mh - shape[0] * gain) / 2 # wh padding top, left = int(pad[1]), int(pad[0]) # y, x bottom, right = int(mh - pad[1]), int(mw - pad[0]) masks = masks[:, top:bottom, left:right] - masks = F.interpolate(masks[None], dst_shape, mode='bilinear', align_corners=False)[0] # CHW + masks = F.interpolate(masks[None], shape, mode='bilinear', align_corners=False)[0] # CHW masks = crop_mask(masks, bboxes) # CHW return masks.gt_(0.5) diff --git a/val.py b/val.py index 7c610e83a856..e84249ed383f 100644 --- a/val.py +++ b/val.py @@ -302,8 +302,8 @@ def run( # Save JSON if save_json and len(jdict): w = Path(weights[0] if isinstance(weights, list) else weights).stem if weights is not None else '' # weights - anno_json = str(Path(data.get('path', '../coco')) / 'annotations/instances_val2017.json') # annotations json - pred_json = str(save_dir / f"{w}_predictions.json") # predictions json + anno_json = str(Path('../datasets/coco/annotations/instances_val2017.json')) # annotations + pred_json = str(save_dir / f"{w}_predictions.json") # predictions LOGGER.info(f'\nEvaluating pycocotools mAP... saving {pred_json}...') with open(pred_json, 'w') as f: json.dump(jdict, f) From 5dc1ce4e865960f5b5dfe4e4f5148a4731433bca Mon Sep 17 00:00:00 2001 From: Colin Wong Date: Sat, 3 Dec 2022 16:58:58 -0600 Subject: [PATCH 1601/1976] Support `.txt` files as a line-by-line media list rather than streams (#10059) * Update streams.txt default Signed-off-by: Colin Wong * Change streams list extension to .streams * Read txt as media per line * Missed one * Missed another one * Update dataloaders.py * Update detect.py * Update dataloaders.py * Update detect.py * Update predict.py * Update predict.py * Update README.md Signed-off-by: Colin Wong Co-authored-by: Glenn Jocher --- README.md | 18 ++++++++++-------- classify/predict.py | 4 +++- detect.py | 4 +++- segment/predict.py | 4 +++- utils/dataloaders.py | 4 +++- 5 files changed, 22 insertions(+), 12 deletions(-) diff --git a/README.md b/README.md index dd24a938a060..3c163b3e1742 100644 --- a/README.md +++ b/README.md @@ -182,14 +182,16 @@ results.print() # or .show(), .save(), .crop(), .pandas(), etc. the latest YOLOv5 [release](https://github.com/ultralytics/yolov5/releases) and saving results to `runs/detect`. ```bash -python detect.py --source 0 # webcam - img.jpg # image - vid.mp4 # video - screen # screenshot - path/ # directory - 'path/*.jpg' # glob - 'https://youtu.be/Zgi9g1ksQHc' # YouTube - 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream +python detect.py --weights yolov5s.pt --source 0 # webcam + img.jpg # image + vid.mp4 # video + screen # screenshot + path/ # directory + list.txt # list of images + list.streams # list of streams + 'path/*.jpg' # glob + 'https://youtu.be/Zgi9g1ksQHc' # YouTube + 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream ```
diff --git a/classify/predict.py b/classify/predict.py index 9a6b00062932..5a5edabda42c 100644 --- a/classify/predict.py +++ b/classify/predict.py @@ -8,6 +8,8 @@ vid.mp4 # video screen # screenshot path/ # directory + list.txt # list of images + list.streams # list of streams 'path/*.jpg' # glob 'https://youtu.be/Zgi9g1ksQHc' # YouTube 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream @@ -74,7 +76,7 @@ def run( save_img = not nosave and not source.endswith('.txt') # save inference images is_file = Path(source).suffix[1:] in (IMG_FORMATS + VID_FORMATS) is_url = source.lower().startswith(('rtsp://', 'rtmp://', 'http://', 'https://')) - webcam = source.isnumeric() or source.endswith('.txt') or (is_url and not is_file) + webcam = source.isnumeric() or source.endswith('.streams') or (is_url and not is_file) screenshot = source.lower().startswith('screen') if is_url and is_file: source = check_file(source) # download diff --git a/detect.py b/detect.py index 58b02802e6d9..2d13401f78bd 100644 --- a/detect.py +++ b/detect.py @@ -8,6 +8,8 @@ vid.mp4 # video screen # screenshot path/ # directory + list.txt # list of images + list.streams # list of streams 'path/*.jpg' # glob 'https://youtu.be/Zgi9g1ksQHc' # YouTube 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream @@ -82,7 +84,7 @@ def run( save_img = not nosave and not source.endswith('.txt') # save inference images is_file = Path(source).suffix[1:] in (IMG_FORMATS + VID_FORMATS) is_url = source.lower().startswith(('rtsp://', 'rtmp://', 'http://', 'https://')) - webcam = source.isnumeric() or source.endswith('.txt') or (is_url and not is_file) + webcam = source.isnumeric() or source.endswith('.streams') or (is_url and not is_file) screenshot = source.lower().startswith('screen') if is_url and is_file: source = check_file(source) # download diff --git a/segment/predict.py b/segment/predict.py index 4ba9e46ddab0..e9093baa1cc7 100644 --- a/segment/predict.py +++ b/segment/predict.py @@ -8,6 +8,8 @@ vid.mp4 # video screen # screenshot path/ # directory + list.txt # list of images + list.streams # list of streams 'path/*.jpg' # glob 'https://youtu.be/Zgi9g1ksQHc' # YouTube 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream @@ -85,7 +87,7 @@ def run( save_img = not nosave and not source.endswith('.txt') # save inference images is_file = Path(source).suffix[1:] in (IMG_FORMATS + VID_FORMATS) is_url = source.lower().startswith(('rtsp://', 'rtmp://', 'http://', 'https://')) - webcam = source.isnumeric() or source.endswith('.txt') or (is_url and not is_file) + webcam = source.isnumeric() or source.endswith('.streams') or (is_url and not is_file) screenshot = source.lower().startswith('screen') if is_url and is_file: source = check_file(source) # download diff --git a/utils/dataloaders.py b/utils/dataloaders.py index cc5f8843ef18..6d2b27ea5e60 100644 --- a/utils/dataloaders.py +++ b/utils/dataloaders.py @@ -238,6 +238,8 @@ def __next__(self): class LoadImages: # YOLOv5 image/video dataloader, i.e. `python detect.py --source image.jpg/vid.mp4` def __init__(self, path, img_size=640, stride=32, auto=True, transforms=None, vid_stride=1): + if isinstance(path, str) and Path(path).suffix == ".txt": # *.txt file with img/vid/dir on each line + path = Path(path).read_text().rsplit() files = [] for p in sorted(path) if isinstance(path, (list, tuple)) else [path]: p = str(Path(p).resolve()) @@ -338,7 +340,7 @@ def __len__(self): class LoadStreams: # YOLOv5 streamloader, i.e. `python detect.py --source 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP streams` - def __init__(self, sources='streams.txt', img_size=640, stride=32, auto=True, transforms=None, vid_stride=1): + def __init__(self, sources='file.streams', img_size=640, stride=32, auto=True, transforms=None, vid_stride=1): torch.backends.cudnn.benchmark = True # faster for fixed-size inference self.mode = 'stream' self.img_size = img_size From f8539a680041a9f4fbcc4fcdd8f540724da453af Mon Sep 17 00:00:00 2001 From: Victor Sonck Date: Mon, 5 Dec 2022 21:12:19 +0100 Subject: [PATCH 1602/1976] Allow users to specify how to override a ClearML Task (#10363) * Added basic flag to enable reusing last task clearml * Added option to provide task ID to override * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Use exist_ok argument instead Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Glenn Jocher --- utils/loggers/clearml/clearml_utils.py | 1 + 1 file changed, 1 insertion(+) diff --git a/utils/loggers/clearml/clearml_utils.py b/utils/loggers/clearml/clearml_utils.py index 7ad40ea5f987..3457727a96a4 100644 --- a/utils/loggers/clearml/clearml_utils.py +++ b/utils/loggers/clearml/clearml_utils.py @@ -89,6 +89,7 @@ def __init__(self, opt, hyp): task_name=opt.name if opt.name != 'exp' else 'Training', tags=['YOLOv5'], output_uri=True, + reuse_last_task_id=opt.exist_ok, auto_connect_frameworks={'pytorch': False} # We disconnect pytorch auto-detection, because we added manual model save points in the code ) From 0a1fdcd8ebaebf48d95d795c3693a0148f3ec0f9 Mon Sep 17 00:00:00 2001 From: Leander van Eekelen <47320151+leandervaneekelen@users.noreply.github.com> Date: Tue, 6 Dec 2022 23:48:17 +0100 Subject: [PATCH 1603/1976] Add catch for misspelled `--task` (#10420) * Add catch for misspelled task Signed-off-by: Leander van Eekelen <47320151+leandervaneekelen@users.noreply.github.com> * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update val.py Signed-off-by: Glenn Jocher * Update val.py Signed-off-by: Glenn Jocher Signed-off-by: Leander van Eekelen <47320151+leandervaneekelen@users.noreply.github.com> Signed-off-by: Glenn Jocher Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Glenn Jocher --- segment/val.py | 2 ++ val.py | 2 ++ 2 files changed, 4 insertions(+) diff --git a/segment/val.py b/segment/val.py index 368a058f9ced..5cf8ae8b41c1 100644 --- a/segment/val.py +++ b/segment/val.py @@ -463,6 +463,8 @@ def main(opt): np.savetxt(f, y, fmt='%10.4g') # save os.system('zip -r study.zip study_*.txt') plot_val_study(x=x) # plot + else: + raise NotImplementedError(f'--task {opt.task} not in ("train", "val", "test", "speed", "study")') if __name__ == "__main__": diff --git a/val.py b/val.py index e84249ed383f..8d27d9d3dab1 100644 --- a/val.py +++ b/val.py @@ -399,6 +399,8 @@ def main(opt): np.savetxt(f, y, fmt='%10.4g') # save os.system('zip -r study.zip study_*.txt') plot_val_study(x=x) # plot + else: + raise NotImplementedError(f'--task {opt.task} not in ("train", "val", "test", "speed", "study")') if __name__ == "__main__": From 06243845b3b7f367350ee93323e47740d40e560d Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 6 Dec 2022 15:12:20 -0800 Subject: [PATCH 1604/1976] [pre-commit.ci] pre-commit suggestions (#10409) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * [pre-commit.ci] pre-commit suggestions updates: - [github.com/pre-commit/pre-commit-hooks: v4.3.0 → v4.4.0](https://github.com/pre-commit/pre-commit-hooks/compare/v4.3.0...v4.4.0) - [github.com/asottile/pyupgrade: v3.2.0 → v3.3.0](https://github.com/asottile/pyupgrade/compare/v3.2.0...v3.3.0) - [github.com/PyCQA/flake8: 5.0.4 → 6.0.0](https://github.com/PyCQA/flake8/compare/5.0.4...6.0.0) * Fix flake8 ignore syntax Signed-off-by: Glenn Jocher * spacing Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Glenn Jocher --- .pre-commit-config.yaml | 6 +++--- setup.cfg | 24 ++++++++++-------------- 2 files changed, 13 insertions(+), 17 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 0106b4aab523..72c3cc67e59f 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -13,7 +13,7 @@ ci: repos: - repo: https://github.com/pre-commit/pre-commit-hooks - rev: v4.3.0 + rev: v4.4.0 hooks: # - id: end-of-file-fixer - id: trailing-whitespace @@ -24,7 +24,7 @@ repos: - id: check-docstring-first - repo: https://github.com/asottile/pyupgrade - rev: v3.2.0 + rev: v3.3.0 hooks: - id: pyupgrade name: Upgrade code @@ -58,7 +58,7 @@ repos: - id: yesqa - repo: https://github.com/PyCQA/flake8 - rev: 5.0.4 + rev: 6.0.0 hooks: - id: flake8 name: PEP8 diff --git a/setup.cfg b/setup.cfg index f12995da3e8e..d7c4cb3e1a4d 100644 --- a/setup.cfg +++ b/setup.cfg @@ -6,7 +6,6 @@ license_file = LICENSE description_file = README.md - [tool:pytest] norecursedirs = .git @@ -17,7 +16,6 @@ addopts = --durations=25 --color=yes - [flake8] max-line-length = 120 exclude = .tox,*.egg,build,temp @@ -27,17 +25,16 @@ verbose = 2 # https://pep8.readthedocs.io/en/latest/intro.html#error-codes format = pylint # see: https://www.flake8rules.com/ -ignore = - E731 # Do not assign a lambda expression, use a def - F405 # name may be undefined, or defined from star imports: module - E402 # module level import not at top of file - F401 # module imported but unused - W504 # line break after binary operator - E127 # continuation line over-indented for visual indent - E231 # missing whitespace after ‘,’, ‘;’, or ‘:’ - E501 # line too long - F403 # ‘from module import *’ used; unable to detect undefined names - +ignore = E731,F405,E402,F401,W504,E127,E231,E501,F403 + # E731: Do not assign a lambda expression, use a def + # F405: name may be undefined, or defined from star imports: module + # E402: module level import not at top of file + # F401: module imported but unused + # W504: line break after binary operator + # E127: continuation line over-indented for visual indent + # E231: missing whitespace after ‘,’, ‘;’, or ‘:’ + # E501: line too long + # F403: ‘from module import *’ used; unable to detect undefined names [isort] # https://pycqa.github.io/isort/docs/configuration/options.html @@ -45,7 +42,6 @@ line_length = 120 # see: https://pycqa.github.io/isort/docs/configuration/multi_line_output_modes.html multi_line_output = 0 - [yapf] based_on_style = pep8 spaces_before_comment = 2 From 454dae1301abb3fbf4fd1f54d5dc706cc69f8e7e Mon Sep 17 00:00:00 2001 From: Talia Bender <85292283+taliabender@users.noreply.github.com> Date: Wed, 7 Dec 2022 00:45:24 +0100 Subject: [PATCH 1605/1976] Ultralytics Live Session 2 - Roboflow https://youtu.be/LKpuzZllNpA (#10426) * Update README.md Info for Ep 2 of Ultralytics Live Sessions Signed-off-by: Talia Bender <85292283+taliabender@users.noreply.github.com> * Update README.md Signed-off-by: Glenn Jocher * Update image link Make sure we update the href field in the image so when users click the image they go directly to the YouTube live page. Signed-off-by: Glenn Jocher * Update README.md Signed-off-by: Glenn Jocher Signed-off-by: Talia Bender <85292283+taliabender@users.noreply.github.com> Signed-off-by: Glenn Jocher Co-authored-by: Glenn Jocher --- README.md | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 3c163b3e1742..91ffcb1f95a9 100644 --- a/README.md +++ b/README.md @@ -54,9 +54,10 @@
-We're excited to announce our very first [Ultralytics Live Session](https://www.youtube.com/@Ultralytics/streams) ✨ streaming live at https://youtu.be/KS4weDInJYs on **Tuesday, November 29th at 16:00 CET** with [@thepycoder](https://github.com/thepycoder) of [ClearML](https://cutt.ly/yolov5-readme-clearml). Glenn and Victor will discuss all things experiment tracking and how you can maximize the benefits of the new YOLOv5 + ClearML [integration](https://github.com/ultralytics/yolov5/tree/master/utils/loggers/clearml). 🔥 +[Ultralytics Live Session Ep. 2](https://youtu.be/LKpuzZllNpA) ✨ will be streaming live on **Tuesday, December 13th at 19:00 CET** with [Joseph Nelson](https://github.com/josephofiowa) of [Roboflow](https://roboflow.com/?ref=ultralytics) who will join us to discuss the brand new Roboflow x Ultralytics HUB integration. Tune in to ask Glenn and Joseph about how you can make speed up workflows with seamless dataset integration! 🔥 - + +
##
Segmentation ⭐ NEW
From de812396fe94996cfc0e8c75cfdcc446b61e3439 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 8 Dec 2022 17:25:45 -0800 Subject: [PATCH 1606/1976] Add README App section (#10446) * Add README App section @AyushExel @pderrenger this should increase our app visibility per https://github.com/ultralytics/yolov5/issues/10431 Signed-off-by: Glenn Jocher * Update README.md Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- README.md | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/README.md b/README.md index 91ffcb1f95a9..f00cb76c6ce9 100644 --- a/README.md +++ b/README.md @@ -427,6 +427,13 @@ Get started in seconds with our verified environments. Click each icon below for
+##
App
+ +Run YOLOv5 models on your iOS or Android device by downloading the [Ultralytics App](https://ultralytics.com/app_install)! + + +Ultralytics mobile app + ##
Contribute
From 1607aec4312719db820a026792223acad915015f Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 8 Dec 2022 17:27:36 -0800 Subject: [PATCH 1607/1976] Automatic README translation to Simplified Chinese (#10445) * Create translate-readme.yml @AyushExel @pderrenger @Laughing-q adding README translation action since we are unable to manually maintain our Chinese-translated README Signed-off-by: Glenn Jocher * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update README.md Signed-off-by: Glenn Jocher * Double hyperlinks Signed-off-by: Glenn Jocher * Delete README_cn.md Signed-off-by: Glenn Jocher * Create README.zh-CN.md Signed-off-by: Glenn Jocher * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update README.md Signed-off-by: Glenn Jocher * Update .pre-commit-config.yaml Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .github/README_cn.md | 344 ------------------ .github/workflows/translate-readme.yml | 27 ++ .pre-commit-config.yaml | 2 +- README.md | 17 +- README.zh-CN.md | 479 +++++++++++++++++++++++++ 5 files changed, 513 insertions(+), 356 deletions(-) delete mode 100644 .github/README_cn.md create mode 100644 .github/workflows/translate-readme.yml create mode 100644 README.zh-CN.md diff --git a/.github/README_cn.md b/.github/README_cn.md deleted file mode 100644 index 0a2f61ee35b2..000000000000 --- a/.github/README_cn.md +++ /dev/null @@ -1,344 +0,0 @@ -
-

- - -

- - [English](../README.md) | 简体中文 -
-
- YOLOv5 CI - YOLOv5 Citation - Docker Pulls -
- Run on Gradient - Open In Colab - Open In Kaggle -
- -
-

- YOLOv5🚀是一个在COCO数据集上预训练的物体检测架构和模型系列,它代表了Ultralytics对未来视觉AI方法的公开研究,其中包含了在数千小时的研究和开发中所获得的经验和最佳实践。 -

- -
- - - - - - - - - - - - - - - - - - - - -
-
- - -##
文件
- -请参阅[YOLOv5 Docs](https://docs.ultralytics.com),了解有关训练、测试和部署的完整文件。 - -##
快速开始案例
- -
-安装 - -在[**Python>=3.7.0**](https://www.python.org/) 的环境中克隆版本仓并安装 [requirements.txt](https://github.com/ultralytics/yolov5/blob/master/requirements.txt),包括[**PyTorch>=1.7**](https://pytorch.org/get-started/locally/)。 -```bash -git clone https://github.com/ultralytics/yolov5 # 克隆 -cd yolov5 -pip install -r requirements.txt # 安装 -``` - -
- -
-推理 - -YOLOv5 [PyTorch Hub](https://github.com/ultralytics/yolov5/issues/36) 推理. [模型](https://github.com/ultralytics/yolov5/tree/master/models) 自动从最新YOLOv5 [版本](https://github.com/ultralytics/yolov5/releases)下载。 - -```python -import torch - -# 模型 -model = torch.hub.load('ultralytics/yolov5', 'yolov5s') # or yolov5n - yolov5x6, custom - -# 图像 -img = 'https://ultralytics.com/images/zidane.jpg' # or file, Path, PIL, OpenCV, numpy, list - -# 推理 -results = model(img) - -# 结果 -results.print() # or .show(), .save(), .crop(), .pandas(), etc. -``` - -
- -
-用 detect.py 进行推理 - -`detect.py` 在各种数据源上运行推理, 其会从最新的 YOLOv5 [版本](https://github.com/ultralytics/yolov5/releases) 中自动下载 [模型](https://github.com/ultralytics/yolov5/tree/master/models) 并将检测结果保存到 `runs/detect` 目录。 - -```bash -python detect.py --source 0 # 网络摄像头 - img.jpg # 图像 - vid.mp4 # 视频 - path/ # 文件夹 - 'path/*.jpg' # glob - 'https://youtu.be/Zgi9g1ksQHc' # YouTube - 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP 流 -``` - -
- -
-训练 - -以下指令再现了 YOLOv5 [COCO](https://github.com/ultralytics/yolov5/blob/master/data/scripts/get_coco.sh) -数据集结果. [模型](https://github.com/ultralytics/yolov5/tree/master/models) 和 [数据集](https://github.com/ultralytics/yolov5/tree/master/data) 自动从最新的YOLOv5 [版本](https://github.com/ultralytics/yolov5/releases) 中下载。YOLOv5n/s/m/l/x的训练时间在V100 GPU上是 1/2/4/6/8天(多GPU倍速). 尽可能使用最大的 `--batch-size`, 或通过 `--batch-size -1` 来实现 YOLOv5 [自动批处理](https://github.com/ultralytics/yolov5/pull/5092). 批量大小显示为 V100-16GB。 - -```bash -python train.py --data coco.yaml --epochs 300 --weights '' --cfg yolov5n.yaml --batch-size 128 - yolov5s 64 - yolov5m 40 - yolov5l 24 - yolov5x 16 -``` - - - -
- -
-教程 - -- [训练自定义数据集](https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data) 🚀 推荐 -- [获得最佳训练效果的技巧](https://github.com/ultralytics/yolov5/wiki/Tips-for-Best-Training-Results) ☘️ - 推荐 -- [多GPU训练](https://github.com/ultralytics/yolov5/issues/475) -- [PyTorch Hub](https://github.com/ultralytics/yolov5/issues/36) 🌟 新 -- [TFLite, ONNX, CoreML, TensorRT 输出](https://github.com/ultralytics/yolov5/issues/251) 🚀 -- [测试时数据增强 (TTA)](https://github.com/ultralytics/yolov5/issues/303) -- [模型集成](https://github.com/ultralytics/yolov5/issues/318) -- [模型剪枝/稀疏性](https://github.com/ultralytics/yolov5/issues/304) -- [超参数进化](https://github.com/ultralytics/yolov5/issues/607) -- [带有冻结层的迁移学习](https://github.com/ultralytics/yolov5/issues/1314) -- [架构概要](https://github.com/ultralytics/yolov5/issues/6998) 🌟 新 -- [使用Weights & Biases 记录实验](https://github.com/ultralytics/yolov5/issues/1289) -- [Roboflow:数据集,标签和主动学习](https://github.com/ultralytics/yolov5/issues/4975) 🌟 新 -- [使用ClearML 记录实验](https://github.com/ultralytics/yolov5/tree/master/utils/loggers/clearml) 🌟 新 -- [Deci 平台](https://github.com/ultralytics/yolov5/wiki/Deci-Platform) 🌟 新 - -
- - -##
Integrations
- -
- - -
-
- -
- - - - - - - - - - - -
- -|Roboflow|ClearML ⭐ NEW|Comet ⭐ NEW|Deci ⭐ NEW| -|:-:|:-:|:-:|:-:| -|Label and export your custom datasets directly to YOLOv5 for training with [Roboflow](https://roboflow.com/?ref=ultralytics)|Automatically track, visualize and even remotely train YOLOv5 using [ClearML](https://cutt.ly/yolov5-readme-clearml) (open-source!)|Free forever, [Comet](https://bit.ly/yolov5-readme-comet) lets you save YOLOv5 models, resume training, and interactively visualise and debug predictions|Automatically compile and quantize YOLOv5 for better inference performance in one click at [Deci](https://bit.ly/yolov5-deci-platform)| - - -##
Ultralytics HUB
- -[Ultralytics HUB](https://bit.ly/ultralytics_hub) is our ⭐ **NEW** no-code solution to visualize datasets, train YOLOv5 🚀 models, and deploy to the real world in a seamless experience. Get started for **Free** now! - - - - - -##
为什么选择 YOLOv5
- -

-
- YOLOv5-P5 640 图像 (点击扩展) - -

-
-
- 图片注释 (点击扩展) - -- **COCO AP val** 表示 mAP@0.5:0.95 在5000张图像的[COCO val2017](http://cocodataset.org)数据集上,在256到1536的不同推理大小上测量的指标。 -- **GPU Speed** 衡量的是在 [COCO val2017](http://cocodataset.org) 数据集上使用 [AWS p3.2xlarge](https://aws.amazon.com/ec2/instance-types/p3/) V100实例在批量大小为32时每张图像的平均推理时间。 -- **EfficientDet** 数据来自 [google/automl](https://github.com/google/automl) ,批量大小设置为 8。 -- 复现 mAP 方法: `python val.py --task study --data coco.yaml --iou 0.7 --weights yolov5n6.pt yolov5s6.pt yolov5m6.pt yolov5l6.pt yolov5x6.pt` - -
- -### 预训练检查点 - -| 模型 | 规模
(像素) | mAP验证
0.5:0.95 | mAP验证
0.5 | 速度
CPU b1
(ms) | 速度
V100 b1
(ms) | 速度
V100 b32
(ms) | 参数
(M) | 浮点运算
@640 (B) | -|------------------------------------------------------------------------------------------------------|-----------------------|-------------------------|--------------------|------------------------------|-------------------------------|--------------------------------|--------------------|------------------------| -| [YOLOv5n](https://github.com/ultralytics/yolov5/releases/download/v6.1/yolov5n.pt) | 640 | 28.0 | 45.7 | **45** | **6.3** | **0.6** | **1.9** | **4.5** | -| [YOLOv5s](https://github.com/ultralytics/yolov5/releases/download/v6.1/yolov5s.pt) | 640 | 37.4 | 56.8 | 98 | 6.4 | 0.9 | 7.2 | 16.5 | -| [YOLOv5m](https://github.com/ultralytics/yolov5/releases/download/v6.1/yolov5m.pt) | 640 | 45.4 | 64.1 | 224 | 8.2 | 1.7 | 21.2 | 49.0 | -| [YOLOv5l](https://github.com/ultralytics/yolov5/releases/download/v6.1/yolov5l.pt) | 640 | 49.0 | 67.3 | 430 | 10.1 | 2.7 | 46.5 | 109.1 | -| [YOLOv5x](https://github.com/ultralytics/yolov5/releases/download/v6.1/yolov5x.pt) | 640 | 50.7 | 68.9 | 766 | 12.1 | 4.8 | 86.7 | 205.7 | -| | | | | | | | | | -| [YOLOv5n6](https://github.com/ultralytics/yolov5/releases/download/v6.1/yolov5n6.pt) | 1280 | 36.0 | 54.4 | 153 | 8.1 | 2.1 | 3.2 | 4.6 | -| [YOLOv5s6](https://github.com/ultralytics/yolov5/releases/download/v6.1/yolov5s6.pt) | 1280 | 44.8 | 63.7 | 385 | 8.2 | 3.6 | 12.6 | 16.8 | -| [YOLOv5m6](https://github.com/ultralytics/yolov5/releases/download/v6.1/yolov5m6.pt) | 1280 | 51.3 | 69.3 | 887 | 11.1 | 6.8 | 35.7 | 50.0 | -| [YOLOv5l6](https://github.com/ultralytics/yolov5/releases/download/v6.1/yolov5l6.pt) | 1280 | 53.7 | 71.3 | 1784 | 15.8 | 10.5 | 76.8 | 111.4 | -| [YOLOv5x6](https://github.com/ultralytics/yolov5/releases/download/v6.1/yolov5x6.pt)
+ [TTA][TTA] | 1280
1536 | 55.0
**55.8** | 72.7
**72.7** | 3136
- | 26.2
- | 19.4
- | 140.7
- | 209.8
- | - -
- 表格注释 (点击扩展) - -- 所有检查点都以默认设置训练到300个时期. Nano和Small模型用 [hyp.scratch-low.yaml](https://github.com/ultralytics/yolov5/blob/master/data/hyps/hyp.scratch-low.yaml) hyps, 其他模型使用 [hyp.scratch-high.yaml](https://github.com/ultralytics/yolov5/blob/master/data/hyps/hyp.scratch-high.yaml). -- **mAPval** 值是 [COCO val2017](http://cocodataset.org) 数据集上的单模型单尺度的值。 -
复现方法: `python val.py --data coco.yaml --img 640 --conf 0.001 --iou 0.65` -- 使用 [AWS p3.2xlarge](https://aws.amazon.com/ec2/instance-types/p3/) 实例对COCO val图像的平均速度。不包括NMS时间(~1 ms/img) -
复现方法: `python val.py --data coco.yaml --img 640 --task speed --batch 1` -- **TTA** [测试时数据增强](https://github.com/ultralytics/yolov5/issues/303) 包括反射和比例增强. -
复现方法: `python val.py --data coco.yaml --img 1536 --iou 0.7 --augment` - -
- - -##
分类 ⭐ 新
- -YOLOv5发布的[v6.2版本](https://github.com/ultralytics/yolov5/releases) 支持训练,验证,预测和输出分类模型!这使得训练分类器模型非常简单。点击下面开始尝试! - -
- 分类检查点 (点击展开) - -
- -我们在ImageNet上使用了4xA100的实例训练YOLOv5-cls分类模型90个epochs,并以相同的默认设置同时训练了ResNet和EfficientNet模型来进行比较。我们将所有的模型导出到ONNX FP32进行CPU速度测试,又导出到TensorRT FP16进行GPU速度测试。最后,为了方便重现,我们在[Google Colab Pro](https://colab.research.google.com/signup)上进行了所有的速度测试。 - -| 模型 | 规模
(像素) | 准确度
第一 | 准确度
前五 | 训练
90 epochs
4xA100 (小时) | 速度
ONNX CPU
(ms) | 速度
TensorRT V100
(ms) | 参数
(M) | 浮点运算
@224 (B) | -|----------------------------------------------------------------------------------------------------|-----------------------|------------------|------------------|----------------------------------------------|--------------------------------|-------------------------------------|--------------------|------------------------| -| [YOLOv5n-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5n-cls.pt) | 224 | 64.6 | 85.4 | 7:59 | **3.3** | **0.5** | **2.5** | **0.5** | -| [YOLOv5s-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5s-cls.pt) | 224 | 71.5 | 90.2 | 8:09 | 6.6 | 0.6 | 5.4 | 1.4 | -| [YOLOv5m-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5m-cls.pt) | 224 | 75.9 | 92.9 | 10:06 | 15.5 | 0.9 | 12.9 | 3.9 | -| [YOLOv5l-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5l-cls.pt) | 224 | 78.0 | 94.0 | 11:56 | 26.9 | 1.4 | 26.5 | 8.5 | -| [YOLOv5x-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5x-cls.pt) | 224 | **79.0** | **94.4** | 15:04 | 54.3 | 1.8 | 48.1 | 15.9 | -| | -| [ResNet18](https://github.com/ultralytics/yolov5/releases/download/v6.2/resnet18.pt) | 224 | 70.3 | 89.5 | **6:47** | 11.2 | 0.5 | 11.7 | 3.7 | -| [ResNet34](https://github.com/ultralytics/yolov5/releases/download/v6.2/resnet34.pt) | 224 | 73.9 | 91.8 | 8:33 | 20.6 | 0.9 | 21.8 | 7.4 | -| [ResNet50](https://github.com/ultralytics/yolov5/releases/download/v6.2/resnet50.pt) | 224 | 76.8 | 93.4 | 11:10 | 23.4 | 1.0 | 25.6 | 8.5 | -| [ResNet101](https://github.com/ultralytics/yolov5/releases/download/v6.2/resnet101.pt) | 224 | 78.5 | 94.3 | 17:10 | 42.1 | 1.9 | 44.5 | 15.9 | -| | -| [EfficientNet_b0](https://github.com/ultralytics/yolov5/releases/download/v6.2/efficientnet_b0.pt) | 224 | 75.1 | 92.4 | 13:03 | 12.5 | 1.3 | 5.3 | 1.0 | -| [EfficientNet_b1](https://github.com/ultralytics/yolov5/releases/download/v6.2/efficientnet_b1.pt) | 224 | 76.4 | 93.2 | 17:04 | 14.9 | 1.6 | 7.8 | 1.5 | -| [EfficientNet_b2](https://github.com/ultralytics/yolov5/releases/download/v6.2/efficientnet_b2.pt) | 224 | 76.6 | 93.4 | 17:10 | 15.9 | 1.6 | 9.1 | 1.7 | -| [EfficientNet_b3](https://github.com/ultralytics/yolov5/releases/download/v6.2/efficientnet_b3.pt) | 224 | 77.7 | 94.0 | 19:19 | 18.9 | 1.9 | 12.2 | 2.4 | - -
- 表格注释 (点击扩展) - -- 所有检查点都被SGD优化器训练到90 epochs, `lr0=0.001` 和 `weight_decay=5e-5`, 图像大小为224,全为默认设置。
运行数据记录于 https://wandb.ai/glenn-jocher/YOLOv5-Classifier-v6-2。 -- **准确度** 值为[ImageNet-1k](https://www.image-net.org/index.php)数据集上的单模型单尺度。
通过`python classify/val.py --data ../datasets/imagenet --img 224`进行复制。 -- 使用Google [Colab Pro](https://colab.research.google.com/signup) V100 High-RAM实例得出的100张推理图像的平均**速度**。
通过 `python classify/val.py --data ../datasets/imagenet --img 224 --batch 1`进行复制。 -- 用`export.py`**导出**到FP32的ONNX和FP16的TensorRT。
通过 `python export.py --weights yolov5s-cls.pt --include engine onnx --imgsz 224`进行复制。 -
-
- -
- 分类使用实例 (点击展开) - -### 训练 -YOLOv5分类训练支持自动下载MNIST, Fashion-MNIST, CIFAR10, CIFAR100, Imagenette, Imagewoof和ImageNet数据集,并使用`--data` 参数. 打个比方,在MNIST上使用`--data mnist`开始训练。 - -```bash -# 单GPU -python classify/train.py --model yolov5s-cls.pt --data cifar100 --epochs 5 --img 224 --batch 128 - -# 多-GPU DDP -python -m torch.distributed.run --nproc_per_node 4 --master_port 1 classify/train.py --model yolov5s-cls.pt --data imagenet --epochs 5 --img 224 --device 0,1,2,3 -``` - -### 验证 -在ImageNet-1k数据集上验证YOLOv5m-cl的准确性: -```bash -bash data/scripts/get_imagenet.sh --val # download ImageNet val split (6.3G, 50000 images) -python classify/val.py --weights yolov5m-cls.pt --data ../datasets/imagenet --img 224 # validate -``` - -### 预测 -用提前训练好的YOLOv5s-cls.pt去预测bus.jpg: -```bash -python classify/predict.py --weights yolov5s-cls.pt --data data/images/bus.jpg -``` -```python -model = torch.hub.load('ultralytics/yolov5', 'custom', 'yolov5s-cls.pt') # load from PyTorch Hub -``` - -### 导出 -导出一组训练好的YOLOv5s-cls, ResNet和EfficientNet模型到ONNX和TensorRT: -```bash -python export.py --weights yolov5s-cls.pt resnet50.pt efficientnet_b0.pt --include onnx engine --img 224 -``` -
- - -##
贡献
- -我们重视您的意见! 我们希望给大家提供尽可能的简单和透明的方式对 YOLOv5 做出贡献。开始之前请先点击并查看我们的 [贡献指南](CONTRIBUTING.md),填写[YOLOv5调查问卷](https://ultralytics.com/survey?utm_source=github&utm_medium=social&utm_campaign=Survey) 来向我们发送您的经验反馈。真诚感谢我们所有的贡献者! - - - - -##
联系
- -关于YOLOv5的漏洞和功能问题,请访问 [GitHub Issues](https://github.com/ultralytics/yolov5/issues)。商业咨询或技术支持服务请访问[https://ultralytics.com/contact](https://ultralytics.com/contact)。 - -
-
- - - - - - - - - - - - - - - - - - - - -
- -[assets]: https://github.com/ultralytics/yolov5/releases -[tta]: https://github.com/ultralytics/yolov5/issues/303 diff --git a/.github/workflows/translate-readme.yml b/.github/workflows/translate-readme.yml new file mode 100644 index 000000000000..76f59b83e65f --- /dev/null +++ b/.github/workflows/translate-readme.yml @@ -0,0 +1,27 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# README translation action to translate README.md to Chinese as README.zh-CN.md on any change to README.md + +name: Translate README + +on: + push: + branches: + - main + - master + paths: + - README.md + +jobs: + Translate: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + - name: Setup Node.js + uses: actions/setup-node@v3 + with: + node-version: 16 + # ISO Langusge Codes: https://cloud.google.com/translate/docs/languages + - name: Adding README - Chinese Simplified + uses: dephraiim/translate-readme@main + with: + LANG: zh-CN diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 72c3cc67e59f..28dbc89223cf 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -50,7 +50,7 @@ repos: additional_dependencies: - mdformat-gfm - mdformat-black - exclude: "README.md|README_cn.md" + exclude: "README.md|README.zh-CN.md" - repo: https://github.com/asottile/yesqa rev: v1.4.0 diff --git a/README.md b/README.md index f00cb76c6ce9..9ee97321082e 100644 --- a/README.md +++ b/README.md @@ -4,7 +4,7 @@

- English | [简体中文](.github/README_cn.md) + [English](README.md) | [简体中文](README.zh-CN.md)
YOLOv5 CI @@ -15,15 +15,11 @@ Open In Colab Open In Kaggle
-
-

- YOLOv5 🚀 is the world's most loved vision AI, representing Ultralytics - open-source research into future vision AI methods, incorporating lessons learned and best practices evolved over thousands of hours of research and development. -

- To request an Enterprise License please complete the form at Ultralytics Licensing. -

-

+ +YOLOv5 🚀 is the world's most loved vision AI, representing Ultralytics open-source research into future vision AI methods, incorporating lessons learned and best practices evolved over thousands of hours of research and development. + +To request an Enterprise License please complete the form at Ultralytics Licensing.
@@ -313,7 +309,7 @@ YOLOv5 has been designed to be super easy to get started and simple to learn. We | [YOLOv5s6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5s6.pt) | 1280 | 44.8 | 63.7 | 385 | 8.2 | 3.6 | 12.6 | 16.8 | | [YOLOv5m6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5m6.pt) | 1280 | 51.3 | 69.3 | 887 | 11.1 | 6.8 | 35.7 | 50.0 | | [YOLOv5l6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5l6.pt) | 1280 | 53.7 | 71.3 | 1784 | 15.8 | 10.5 | 76.8 | 111.4 | -| [YOLOv5x6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5x6.pt)
+ [TTA][TTA] | 1280
1536 | 55.0
**55.8** | 72.7
**72.7** | 3136
- | 26.2
- | 19.4
- | 140.7
- | 209.8
- | +| [YOLOv5x6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5x6.pt)
+ [TTA][tta] | 1280
1536 | 55.0
**55.8** | 72.7
**72.7** | 3136
- | 26.2
- | 19.4
- | 140.7
- | 209.8
- |
Table Notes @@ -479,5 +475,4 @@ For YOLOv5 bugs and feature requests please visit [GitHub Issues](https://github
-[assets]: https://github.com/ultralytics/yolov5/releases [tta]: https://github.com/ultralytics/yolov5/issues/303 diff --git a/README.zh-CN.md b/README.zh-CN.md new file mode 100644 index 000000000000..09cfc9472d9a --- /dev/null +++ b/README.zh-CN.md @@ -0,0 +1,479 @@ +
+

+ + +

+ +[英语](README.md)|[简体中文](README.zh-CN.md)
+ +
+ YOLOv5 CI + YOLOv5 Citation + Docker Pulls +
+ Run on Gradient + Open In Colab + Open In Kaggle +
+
+ +YOLOv5 🚀 是世界上最受欢迎的视觉 AI,代表超力对未来视觉 AI 方法的开源研究,结合在数千小时的研究和开发中积累的经验教训和最佳实践。 + +要申请企业许可证,请填写表格Ultralytics 许可. + +
+ + + + + + + + + + + + + + + + + + + + +
+
+ +##
Ultralytics 现场会议
+ +
+ +[Ultralytics Live Session Ep。 2个](https://youtu.be/LKpuzZllNpA)✨将直播**欧洲中部时间 12 月 13 日星期二 19:00**和[约瑟夫·纳尔逊](https://github.com/josephofiowa)的[机器人流](https://roboflow.com/?ref=ultralytics)谁将与我们一起讨论全新的 Roboflow x Ultralytics HUB 集成。收听 Glenn 和 Joseph 询问如何通过无缝数据集集成来加快工作流程! 🔥 + + + +
+ +##
细分 ⭐ 新
+ +
+ + +
+ +我们新的 YOLOv5[发布 v7.0](https://github.com/ultralytics/yolov5/releases/v7.0)实例分割模型是世界上最快和最准确的,击败所有当前[SOTA 基准](https://paperswithcode.com/sota/real-time-instance-segmentation-on-mscoco).我们使它们非常易于训练、验证和部署。查看我们的完整详细信息[发行说明](https://github.com/ultralytics/yolov5/releases/v7.0)并访问我们的[YOLOv5 分割 Colab 笔记本](https://github.com/ultralytics/yolov5/blob/master/segment/tutorial.ipynb)快速入门教程。 + +
+ Segmentation Checkpoints + +
+ +我们使用 A100 GPU 在 COCO 上以 640 图像大小训练了 300 个时期的 YOLOv5 分割模型。我们将所有模型导出到 ONNX FP32 以进行 CPU 速度测试,并导出到 TensorRT FP16 以进行 GPU 速度测试。我们在 Google 上进行了所有速度测试[协作临](https://colab.research.google.com/signup)便于重现的笔记本。 + +| 模型 | 尺寸
(像素) | 地图盒子
50-95 | 地图面具
50-95 | 火车时间
300个纪元
A100(小时) | 速度
ONNX 中央处理器
(小姐) | 速度
同仁堂A100
(小姐) | 参数
(男) | 失败者
@640(二) | +| ------------------------------------------------------------------------------------------ | --------------- | ------------------ | ------------------ | ------------------------------- | ----------------------------- | -------------------------- | -------------- | ------------------- | +| [YOLOv5n-se](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5n-seg.pt) | 640 | 27.6 | 23.4 | 80:17 | **62.7** | **1.2** | **2.0** | **7.1** | +| [YOLOv5s-se](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5s-seg.pt) | 640 | 37.6 | 31.7 | 88:16 | 173.3 | 1.4 | 7.6 | 26.4 | +| [YOLOv5m段](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5m-seg.pt) | 640 | 45.0 | 37.1 | 108:36 | 427.0 | 2.2 | 22.0 | 70.8 | +| [YOLOv5l-se](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5l-seg.pt) | 640 | 49.0 | 39.9 | 我:43(X) | 857.4 | 2.9 | 47.9 | 147.7 | +| [YOLOv5x-seg](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5x-seg.pt) | 640 | **50.7** | **41.4** | 62:56 (zks) | 1579.2 | 4.5 | 88.8 | 265.7 | + +- 使用 SGD 优化器将所有检查点训练到 300 个时期`lr0=0.01`和`weight_decay=5e-5`在图像大小 640 和所有默认设置。
运行记录到[HTTPS://玩豆瓣.爱/Glenn-就ocher/yo lo V5_V70_official](https://wandb.ai/glenn-jocher/YOLOv5_v70_official) +- **准确性**值适用于 COCO 数据集上的单模型单尺度。
重现者`python segment/val.py --data coco.yaml --weights yolov5s-seg.pt` +- **速度**使用 a 对超过 100 个推理图像进行平均[协作临](https://colab.research.google.com/signup)A100 高 RAM 实例。值仅表示推理速度(NMS 每张图像增加约 1 毫秒)。
重现者`python segment/val.py --data coco.yaml --weights yolov5s-seg.pt --batch 1` +- **出口**到 FP32 的 ONNX 和 FP16 的 TensorRT 完成`export.py`.
重现者`python export.py --weights yolov5s-seg.pt --include engine --device 0 --half` + +
+ +
+ Segmentation Usage Examples  Open In Colab + +### 火车 + +YOLOv5分割训练支持自动下载COCO128-seg分割数据集`--data coco128-seg.yaml`COCO-segments 数据集的参数和手动下载`bash data/scripts/get_coco.sh --train --val --segments`接着`python train.py --data coco.yaml`. + +```bash +# Single-GPU +python segment/train.py --data coco128-seg.yaml --weights yolov5s-seg.pt --img 640 + +# Multi-GPU DDP +python -m torch.distributed.run --nproc_per_node 4 --master_port 1 segment/train.py --data coco128-seg.yaml --weights yolov5s-seg.pt --img 640 --device 0,1,2,3 +``` + +### 瓦尔 + +在 COCO 数据集上验证 YOLOv5s-seg mask mAP: + +```bash +bash data/scripts/get_coco.sh --val --segments # download COCO val segments split (780MB, 5000 images) +python segment/val.py --weights yolov5s-seg.pt --data coco.yaml --img 640 # validate +``` + +### 预测 + +使用预训练的 YOLOv5m-seg.pt 来预测 bus.jpg: + +```bash +python segment/predict.py --weights yolov5m-seg.pt --data data/images/bus.jpg +``` + +```python +model = torch.hub.load( + "ultralytics/yolov5", "custom", "yolov5m-seg.pt" +) # load from PyTorch Hub (WARNING: inference not yet supported) +``` + +| ![zidane](https://user-images.githubusercontent.com/26833433/203113421-decef4c4-183d-4a0a-a6c2-6435b33bc5d3.jpg) | ![bus](https://user-images.githubusercontent.com/26833433/203113416-11fe0025-69f7-4874-a0a6-65d0bfe2999a.jpg) | +| ---------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------- | + +### 出口 + +将 YOLOv5s-seg 模型导出到 ONNX 和 TensorRT: + +```bash +python export.py --weights yolov5s-seg.pt --include onnx engine --img 640 --device 0 +``` + +
+ +##
文档
+ +见[YOLOv5 文档](https://docs.ultralytics.com)有关培训、测试和部署的完整文档。请参阅下面的快速入门示例。 + +
+Install + +克隆回购并安装[要求.txt](https://github.com/ultralytics/yolov5/blob/master/requirements.txt)在一个[**Python>=3.7.0**](https://www.python.org/)环境,包括[**PyTorch>=1.7**](https://pytorch.org/get-started/locally/). + +```bash +git clone https://github.com/ultralytics/yolov5 # clone +cd yolov5 +pip install -r requirements.txt # install +``` + +
+ +
+Inference + +YOLOv5[PyTorch 中心](https://github.com/ultralytics/yolov5/issues/36)推理。[楷模](https://github.com/ultralytics/yolov5/tree/master/models)自动从最新下载 +YOLOv5[发布](https://github.com/ultralytics/yolov5/releases). + +```python +import torch + +# Model +model = torch.hub.load("ultralytics/yolov5", "yolov5s") # or yolov5n - yolov5x6, custom + +# Images +img = "https://ultralytics.com/images/zidane.jpg" # or file, Path, PIL, OpenCV, numpy, list + +# Inference +results = model(img) + +# Results +results.print() # or .show(), .save(), .crop(), .pandas(), etc. +``` + +
+ +
+Inference with detect.py + +`detect.py`在各种来源上运行推理,下载[楷模](https://github.com/ultralytics/yolov5/tree/master/models)自动从 +最新的YOLOv5[发布](https://github.com/ultralytics/yolov5/releases)并将结果保存到`runs/detect`. + +```bash +python detect.py --weights yolov5s.pt --source 0 # webcam + img.jpg # image + vid.mp4 # video + screen # screenshot + path/ # directory + list.txt # list of images + list.streams # list of streams + 'path/*.jpg' # glob + 'https://youtu.be/Zgi9g1ksQHc' # YouTube + 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream +``` + +
+ +
+Training + +下面的命令重现 YOLOv5[可可](https://github.com/ultralytics/yolov5/blob/master/data/scripts/get_coco.sh)结果。[楷模](https://github.com/ultralytics/yolov5/tree/master/models)和[数据集](https://github.com/ultralytics/yolov5/tree/master/data)自动从最新下载 +YOLOv5[发布](https://github.com/ultralytics/yolov5/releases). YOLOv5n/s/m/l/x 的训练时间为 +V100 GPU 上 1/2/4/6/8 天([多GPU](https://github.com/ultralytics/yolov5/issues/475)倍快)。使用 +最大的`--batch-size`可能,或通过`--batch-size -1`为了 +YOLOv5[自动批处理](https://github.com/ultralytics/yolov5/pull/5092).显示的批量大小适用于 V100-16GB。 + +```bash +python train.py --data coco.yaml --epochs 300 --weights '' --cfg yolov5n.yaml --batch-size 128 + yolov5s 64 + yolov5m 40 + yolov5l 24 + yolov5x 16 +``` + + + +
+ +
+Tutorials + +- [训练自定义数据](https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data)🚀 推荐 +- [获得最佳训练结果的技巧](https://github.com/ultralytics/yolov5/wiki/Tips-for-Best-Training-Results)☘️ + 推荐的 +- [多 GPU 训练](https://github.com/ultralytics/yolov5/issues/475) +- [PyTorch 中心](https://github.com/ultralytics/yolov5/issues/36)🌟 新 +- [TFLite、ONNX、CoreML、TensorRT 导出](https://github.com/ultralytics/yolov5/issues/251)🚀 +- [NVIDIA Jetson Nano 部署](https://github.com/ultralytics/yolov5/issues/9627)🌟 新 +- [测试时间增强 (TTA)](https://github.com/ultralytics/yolov5/issues/303) +- [模型集成](https://github.com/ultralytics/yolov5/issues/318) +- [模型修剪/稀疏度](https://github.com/ultralytics/yolov5/issues/304) +- [超参数进化](https://github.com/ultralytics/yolov5/issues/607) +- [使用冻结层进行迁移学习](https://github.com/ultralytics/yolov5/issues/1314) +- [架构总结](https://github.com/ultralytics/yolov5/issues/6998)🌟 新 +- [用于数据集、标签和主动学习的 Roboflow](https://github.com/ultralytics/yolov5/issues/4975)🌟 新 +- [ClearML 记录](https://github.com/ultralytics/yolov5/tree/master/utils/loggers/clearml)🌟 新 +- [所以平台](https://github.com/ultralytics/yolov5/wiki/Deci-Platform)🌟 新 +- [彗星记录](https://github.com/ultralytics/yolov5/tree/master/utils/loggers/comet)🌟 新 + +
+ +##
集成
+ +
+ + +
+
+ +
+ + + + + + + + + + + +
+ +| 机器人流 | ClearML ⭐ 新 | 彗星⭐新 | 所以⭐新 | +| :-------------------------------------------------------------------------: | :-----------------------------------------------------------------------: | :----------------------------------------------------------------------------: | :---------------------------------------------------------------: | +| 将您的自定义数据集标记并直接导出到 YOLOv5 以进行训练[机器人流](https://roboflow.com/?ref=ultralytics) | 使用自动跟踪、可视化甚至远程训练 YOLOv5[清除ML](https://cutt.ly/yolov5-readme-clearml)(开源!) | 永远免费,[彗星](https://bit.ly/yolov5-readme-comet)可让您保存 YOLOv5 模型、恢复训练以及交互式可视化和调试预测 | 一键自动编译量化YOLOv5以获得更好的推理性能[所以](https://bit.ly/yolov5-deci-platform) | + +##
Ultralytics 集线器
+ +[Ultralytics 集线器](https://bit.ly/ultralytics_hub)是我们的⭐**新的**用于可视化数据集、训练 YOLOv5 🚀 模型并以无缝体验部署到现实世界的无代码解决方案。开始使用**自由的**现在! + + + + +##
为什么选择 YOLOv5
+ +YOLOv5 被设计为超级容易上手和简单易学。我们优先考虑现实世界的结果。 + +

+
+ YOLOv5-P5 640 Figure + +

+
+
+ Figure Notes + +- **COCO AP 值**表示[map@0.5](mailto:mAP@0.5):0.95 指标在 5000 张图像上测得[COCO val2017](http://cocodataset.org)从 256 到 1536 的各种推理大小的数据集。 +- **显卡速度**测量每张图像的平均推理时间[COCO val2017](http://cocodataset.org)数据集使用[美国销售.Excelerge](https://aws.amazon.com/ec2/instance-types/p3/)批量大小为 32 的 V100 实例。 +- **高效**数据来自[谷歌/汽车](https://github.com/google/automl)批量大小为 8。 +- **复制**经过`python val.py --task study --data coco.yaml --iou 0.7 --weights yolov5n6.pt yolov5s6.pt yolov5m6.pt yolov5l6.pt yolov5x6.pt` + +
+ +### 预训练检查点 + +| 模型 | 尺寸
(像素) | 地图
50-95 | 地图
50 | 速度
处理器b1
(小姐) | 速度
V100 b1
(小姐) | 速度
V100 b32
(小姐) | 参数
(男) | 失败者
@640(二) | +| --------------------------------------------------------------------------------------------------- | --------------- | ----------------- | ---------------- | ------------------------ | -------------------------- | --------------------------- | -------------- | ------------------- | +| [YOLOv5n](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5n.pt) | 640 | 28.0 | 45.7 | **45** | **6.3** | **0.6** | **1.9** | **4.5** | +| [YOLOv5s](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5s.pt) | 640 | 37.4 | 56.8 | 98 | 6.4 | 0.9 | 7.2 | 16.5 | +| [YOLOv5m](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5m.pt) | 640 | 45.4 | 64.1 | 224 | 8.2 | 1.7 | 21.2 | 49.0 | +| [YOLOv5l](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5l.pt) | 640 | 49.0 | 67.3 | 430 | 10.1 | 2.7 | 46.5 | 109.1 | +| [YOLOv5x](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5x.pt) | 640 | 50.7 | 68.9 | 766 | 12.1 | 4.8 | 86.7 | 205.7 | +| | | | | | | | | | +| [YOLOv5n6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5n6.pt) | 1280 | 36.0 | 54.4 | 153 | 8.1 | 2.1 | 3.2 | 4.6 | +| [YOLOv5s6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5s6.pt) | 1280 | 44.8 | 63.7 | 385 | 8.2 | 3.6 | 12.6 | 16.8 | +| [YOLOv5m6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5m6.pt) | 1280 | 51.3 | 69.3 | 887 | 11.1 | 6.8 | 35.7 | 50.0 | +| [YOLOv5l6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5l6.pt) | 1280 | 53.7 | 71.3 | 1784 | 15.8 | 10.5 | 76.8 | 111.4 | +| [YOLOv5x6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5x6.pt)
+[电讯局][tta] | 1280
1536 | 55.0
**55.8** | 72.7
**72.7** | 3136
- | 26.2
- | 19.4
- | 140.7
- | 209.8
- | + +
+ Table Notes + +- 所有检查点都使用默认设置训练到 300 个时期。纳米和小型型号使用[hyp.scratch-low.yaml](https://github.com/ultralytics/yolov5/blob/master/data/hyps/hyp.scratch-low.yaml)hyps,所有其他人都使用[hyp.scratch-high.yaml](https://github.com/ultralytics/yolov5/blob/master/data/hyps/hyp.scratch-high.yaml). +- \*\*地图\*\*值适用于单模型单尺度[COCO val2017](http://cocodataset.org)数据集。
重现者`python val.py --data coco.yaml --img 640 --conf 0.001 --iou 0.65` +- **速度**使用 a 对 COCO val 图像进行平均[美国销售.Excelerge](https://aws.amazon.com/ec2/instance-types/p3/)实例。 NMS 时间 (~1 ms/img) 不包括在内。
重现者`python val.py --data coco.yaml --img 640 --task speed --batch 1` +- **电讯局**[测试时间增加](https://github.com/ultralytics/yolov5/issues/303)包括反射和尺度增强。
重现者`python val.py --data coco.yaml --img 1536 --iou 0.7 --augment` + +
+ +##
分类⭐新
+ +YOLOv5[发布 v6.2](https://github.com/ultralytics/yolov5/releases)带来对分类模型训练、验证和部署的支持!查看我们的完整详细信息[发行说明](https://github.com/ultralytics/yolov5/releases/v6.2)并访问我们的[YOLOv5 分类 Colab 笔记本](https://github.com/ultralytics/yolov5/blob/master/classify/tutorial.ipynb)快速入门教程。 + +
+ Classification Checkpoints + +
+ +我们使用 4xA100 实例在 ImageNet 上训练了 90 个时期的 YOLOv5-cls 分类模型,我们训练了 ResNet 和 EfficientNet 模型以及相同的默认训练设置以进行比较。我们将所有模型导出到 ONNX FP32 以进行 CPU 速度测试,并导出到 TensorRT FP16 以进行 GPU 速度测试。我们在 Google 上进行了所有速度测试[协作临](https://colab.research.google.com/signup)为了便于重现。 + +| 模型 | 尺寸
(像素) | acc
top1 | acc
烹饪 | 训练
90个纪元
4xA100(小时) | 速度
ONNX 中央处理器
(小姐) | 速度
TensorRT V100
(小姐) | 参数
(男) | 失败者
@224(二) | +| ------------------------------------------------------------------------------------------ | --------------- | ---------------- | -------------- | ------------------------------ | ----------------------------- | -------------------------------- | -------------- | ------------------- | +| [YOLOv5n-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5n-cls.pt) | 224 | 64.6 | 85.4 | 7:59 | **3.3** | **0.5** | **2.5** | **0.5** | +| [YOLOv5s-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5s-cls.pt) | 224 | 71.5 | 90.2 | 8:09 | 6.6 | 0.6 | 5.4 | 1.4 | +| [YOLOv5m-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5m-cls.pt) | 224 | 75.9 | 92.9 | 10:06 | 15.5 | 0.9 | 12.9 | 3.9 | +| [YOLOv5l-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5l-cls.pt) | 224 | 78.0 | 94.0 | 11:56 | 26.9 | 1.4 | 26.5 | 8.5 | +| [YOLOv5x-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5x-cls.pt) | 224 | **79.0** | **94.4** | 15:04 | 54.3 | 1.8 | 48.1 | 15.9 | +| | | | | | | | | | +| [ResNet18](https://github.com/ultralytics/yolov5/releases/download/v6.2/resnet18.pt) | 224 | 70.3 | 89.5 | **6:47** | 11.2 | 0.5 | 11.7 | 3.7 | +| [Resnetzch](https://github.com/ultralytics/yolov5/releases/download/v6.2/resnet34.pt) | 224 | 73.9 | 91.8 | 8:33 | 20.6 | 0.9 | 21.8 | 7.4 | +| [ResNet50](https://github.com/ultralytics/yolov5/releases/download/v6.2/resnet50.pt) | 224 | 76.8 | 93.4 | 11:10 | 23.4 | 1.0 | 25.6 | 8.5 | +| [ResNet101](https://github.com/ultralytics/yolov5/releases/download/v6.2/resnet101.pt) | 224 | 78.5 | 94.3 | 17:10 | 42.1 | 1.9 | 44.5 | 15.9 | +| | | | | | | | | | +| [高效网络_b0](https://github.com/ultralytics/yolov5/releases/download/v6.2/efficientnet_b0.pt) | 224 | 75.1 | 92.4 | 13:03 | 12.5 | 1.3 | 5.3 | 1.0 | +| [高效网络 b1](https://github.com/ultralytics/yolov5/releases/download/v6.2/efficientnet_b1.pt) | 224 | 76.4 | 93.2 | 17:04 | 14.9 | 1.6 | 7.8 | 1.5 | +| [我们将预测](https://github.com/ultralytics/yolov5/releases/download/v6.2/efficientnet_b2.pt) | 224 | 76.6 | 93.4 | 17:10 | 15.9 | 1.6 | 9.1 | 1.7 | +| [高效Netb3](https://github.com/ultralytics/yolov5/releases/download/v6.2/efficientnet_b3.pt) | 224 | 77.7 | 94.0 | 19:19 | 18.9 | 1.9 | 12.2 | 2.4 | + +
+ Table Notes (click to expand) + +- 使用 SGD 优化器将所有检查点训练到 90 个时期`lr0=0.001`和`weight_decay=5e-5`在图像大小 224 和所有默认设置。
运行记录到[HTTPS://玩豆瓣.爱/Glenn-就ocher/yo lo V5-classifier-V6-2](https://wandb.ai/glenn-jocher/YOLOv5-Classifier-v6-2) +- **准确性**值适用于单模型单尺度[ImageNet-1k](https://www.image-net.org/index.php)数据集。
重现者`python classify/val.py --data ../datasets/imagenet --img 224` +- **速度**使用谷歌平均超过 100 个推理图像[协作临](https://colab.research.google.com/signup)V100 高 RAM 实例。
重现者`python classify/val.py --data ../datasets/imagenet --img 224 --batch 1` +- **出口**到 FP32 的 ONNX 和 FP16 的 TensorRT 完成`export.py`.
重现者`python export.py --weights yolov5s-cls.pt --include engine onnx --imgsz 224` +
+
+ +
+ Classification Usage Examples  Open In Colab + +### 火车 + +YOLOv5 分类训练支持自动下载 MNIST、Fashion-MNIST、CIFAR10、CIFAR100、Imagenette、Imagewoof 和 ImageNet 数据集`--data`争论。开始使用 MNIST 进行训练`--data mnist`. + +```bash +# Single-GPU +python classify/train.py --model yolov5s-cls.pt --data cifar100 --epochs 5 --img 224 --batch 128 + +# Multi-GPU DDP +python -m torch.distributed.run --nproc_per_node 4 --master_port 1 classify/train.py --model yolov5s-cls.pt --data imagenet --epochs 5 --img 224 --device 0,1,2,3 +``` + +### 瓦尔 + +在 ImageNet-1k 数据集上验证 YOLOv5m-cls 的准确性: + +```bash +bash data/scripts/get_imagenet.sh --val # download ImageNet val split (6.3G, 50000 images) +python classify/val.py --weights yolov5m-cls.pt --data ../datasets/imagenet --img 224 # validate +``` + +### 预测 + +使用预训练的 YOLOv5s-cls.pt 来预测 bus.jpg: + +```bash +python classify/predict.py --weights yolov5s-cls.pt --data data/images/bus.jpg +``` + +```python +model = torch.hub.load( + "ultralytics/yolov5", "custom", "yolov5s-cls.pt" +) # load from PyTorch Hub +``` + +### 出口 + +将一组经过训练的 YOLOv5s-cls、ResNet 和 EfficientNet 模型导出到 ONNX 和 TensorRT: + +```bash +python export.py --weights yolov5s-cls.pt resnet50.pt efficientnet_b0.pt --include onnx engine --img 224 +``` + +
+ +##
环境
+ +在几秒钟内开始使用我们经过验证的环境。单击下面的每个图标了解详细信息。 + +
+ + + + + + + + + + + + + + + + + +
+ +##
贡献
+ +我们喜欢您的意见!我们希望尽可能简单和透明地为 YOLOv5 做出贡献。请看我们的[投稿指南](CONTRIBUTING.md)开始,并填写[YOLOv5调查](https://ultralytics.com/survey?utm_source=github&utm_medium=social&utm_campaign=Survey)向我们发送您的体验反馈。感谢我们所有的贡献者! + + + + + +##
执照
+ +YOLOv5 在两种不同的许可下可用: + +- **GPL-3.0 许可证**: 看[执照](https://github.com/ultralytics/yolov5/blob/master/LICENSE)文件的详细信息。 +- **企业执照**:在没有 GPL-3.0 开源要求的情况下为商业产品开发提供更大的灵活性。典型用例是将 Ultralytics 软件和 AI 模型嵌入到商业产品和应用程序中。在以下位置申请企业许可证[Ultralytics 许可](https://ultralytics.com/license). + +##
接触
+ +对于 YOLOv5 错误和功能请求,请访问[GitHub 问题](https://github.com/ultralytics/yolov5/issues).如需专业支持,请[联系我们](https://ultralytics.com/contact). + +
+
+ + + + + + + + + + + + + + + + + + + + +
+ +[tta]: https://github.com/ultralytics/yolov5/issues/303 From 342fe05e6c88221750ce7e90b7d2e8baabd397dc Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Fri, 9 Dec 2022 01:47:14 +0000 Subject: [PATCH 1608/1976] docs: Added README."zh-CN".md translation via https://github.com/dephraiim/translate-readme --- README.zh-CN.md | 135 +++++++++++++++++++++++++----------------------- 1 file changed, 69 insertions(+), 66 deletions(-) diff --git a/README.zh-CN.md b/README.zh-CN.md index 09cfc9472d9a..0fc77565c5ef 100644 --- a/README.zh-CN.md +++ b/README.zh-CN.md @@ -4,9 +4,9 @@

-[英语](README.md)|[简体中文](README.zh-CN.md)
+[英语](README.md)\|[简体中文](README.zh-CN.md)
-
+
YOLOv5 CI YOLOv5 Citation Docker Pulls @@ -21,7 +21,7 @@ YOLOv5 🚀 是世界上最受欢迎的视觉 AI,代表Ultralytics 许可. -
+
@@ -79,10 +79,10 @@ YOLOv5 🚀 是世界上最受欢迎的视觉 AI,代表 Tutorials -- [训练自定义数据](https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data)🚀 推荐 -- [获得最佳训练结果的技巧](https://github.com/ultralytics/yolov5/wiki/Tips-for-Best-Training-Results)☘️ - 推荐的 -- [多 GPU 训练](https://github.com/ultralytics/yolov5/issues/475) -- [PyTorch 中心](https://github.com/ultralytics/yolov5/issues/36)🌟 新 -- [TFLite、ONNX、CoreML、TensorRT 导出](https://github.com/ultralytics/yolov5/issues/251)🚀 -- [NVIDIA Jetson Nano 部署](https://github.com/ultralytics/yolov5/issues/9627)🌟 新 -- [测试时间增强 (TTA)](https://github.com/ultralytics/yolov5/issues/303) -- [模型集成](https://github.com/ultralytics/yolov5/issues/318) -- [模型修剪/稀疏度](https://github.com/ultralytics/yolov5/issues/304) -- [超参数进化](https://github.com/ultralytics/yolov5/issues/607) -- [使用冻结层进行迁移学习](https://github.com/ultralytics/yolov5/issues/1314) -- [架构总结](https://github.com/ultralytics/yolov5/issues/6998)🌟 新 -- [用于数据集、标签和主动学习的 Roboflow](https://github.com/ultralytics/yolov5/issues/4975)🌟 新 -- [ClearML 记录](https://github.com/ultralytics/yolov5/tree/master/utils/loggers/clearml)🌟 新 -- [所以平台](https://github.com/ultralytics/yolov5/wiki/Deci-Platform)🌟 新 -- [彗星记录](https://github.com/ultralytics/yolov5/tree/master/utils/loggers/comet)🌟 新 +- [训练自定义数据](https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data)🚀 推荐 +- [获得最佳训练结果的技巧](https://github.com/ultralytics/yolov5/wiki/Tips-for-Best-Training-Results)☘️ + 推荐的 +- [多 GPU 训练](https://github.com/ultralytics/yolov5/issues/475) +- [PyTorch 中心](https://github.com/ultralytics/yolov5/issues/36)🌟 新 +- [TFLite、ONNX、CoreML、TensorRT 导出](https://github.com/ultralytics/yolov5/issues/251)🚀 +- [NVIDIA Jetson Nano 部署](https://github.com/ultralytics/yolov5/issues/9627)🌟 新 +- [测试时间增强 (TTA)](https://github.com/ultralytics/yolov5/issues/303) +- [模型集成](https://github.com/ultralytics/yolov5/issues/318) +- [模型修剪/稀疏度](https://github.com/ultralytics/yolov5/issues/304) +- [超参数进化](https://github.com/ultralytics/yolov5/issues/607) +- [使用冻结层进行迁移学习](https://github.com/ultralytics/yolov5/issues/1314) +- [架构总结](https://github.com/ultralytics/yolov5/issues/6998)🌟 新 +- [用于数据集、标签和主动学习的 Roboflow](https://github.com/ultralytics/yolov5/issues/4975)🌟 新 +- [ClearML 记录](https://github.com/ultralytics/yolov5/tree/master/utils/loggers/clearml)🌟 新 +- [所以平台](https://github.com/ultralytics/yolov5/wiki/Deci-Platform)🌟 新 +- [彗星记录](https://github.com/ultralytics/yolov5/tree/master/utils/loggers/comet)🌟 新
@@ -265,7 +263,7 @@ python train.py --data coco.yaml --epochs 300 --weights '' --cfg yolov5n.yaml -
-| 机器人流 | ClearML ⭐ 新 | 彗星⭐新 | 所以⭐新 | +| 机器人流 | ClearML ⭐ 新 | 彗星⭐新 | 所以⭐新 | | :-------------------------------------------------------------------------: | :-----------------------------------------------------------------------: | :----------------------------------------------------------------------------: | :---------------------------------------------------------------: | | 将您的自定义数据集标记并直接导出到 YOLOv5 以进行训练[机器人流](https://roboflow.com/?ref=ultralytics) | 使用自动跟踪、可视化甚至远程训练 YOLOv5[清除ML](https://cutt.ly/yolov5-readme-clearml)(开源!) | 永远免费,[彗星](https://bit.ly/yolov5-readme-comet)可让您保存 YOLOv5 模型、恢复训练以及交互式可视化和调试预测 | 一键自动编译量化YOLOv5以获得更好的推理性能[所以](https://bit.ly/yolov5-deci-platform) | @@ -289,10 +287,10 @@ YOLOv5 被设计为超级容易上手和简单易学。我们优先考虑现实
Figure Notes -- **COCO AP 值**表示[map@0.5](mailto:mAP@0.5):0.95 指标在 5000 张图像上测得[COCO val2017](http://cocodataset.org)从 256 到 1536 的各种推理大小的数据集。 -- **显卡速度**测量每张图像的平均推理时间[COCO val2017](http://cocodataset.org)数据集使用[美国销售.Excelerge](https://aws.amazon.com/ec2/instance-types/p3/)批量大小为 32 的 V100 实例。 -- **高效**数据来自[谷歌/汽车](https://github.com/google/automl)批量大小为 8。 -- **复制**经过`python val.py --task study --data coco.yaml --iou 0.7 --weights yolov5n6.pt yolov5s6.pt yolov5m6.pt yolov5l6.pt yolov5x6.pt` +- **COCO AP 值**表示[map@0.5](mailto:mAP@0.5):0.95 指标在 5000 张图像上测得[COCO val2017](http://cocodataset.org)从 256 到 1536 的各种推理大小的数据集。 +- **显卡速度**测量每张图像的平均推理时间[COCO val2017](http://cocodataset.org)数据集使用[美国销售.Excelerge](https://aws.amazon.com/ec2/instance-types/p3/)批量大小为 32 的 V100 实例。 +- **高效**数据来自[谷歌/汽车](https://github.com/google/automl)批量大小为 8。 +- **复制**经过`python val.py --task study --data coco.yaml --iou 0.7 --weights yolov5n6.pt yolov5s6.pt yolov5m6.pt yolov5l6.pt yolov5x6.pt`
@@ -315,10 +313,10 @@ YOLOv5 被设计为超级容易上手和简单易学。我们优先考虑现实
Table Notes -- 所有检查点都使用默认设置训练到 300 个时期。纳米和小型型号使用[hyp.scratch-low.yaml](https://github.com/ultralytics/yolov5/blob/master/data/hyps/hyp.scratch-low.yaml)hyps,所有其他人都使用[hyp.scratch-high.yaml](https://github.com/ultralytics/yolov5/blob/master/data/hyps/hyp.scratch-high.yaml). -- \*\*地图\*\*值适用于单模型单尺度[COCO val2017](http://cocodataset.org)数据集。
重现者`python val.py --data coco.yaml --img 640 --conf 0.001 --iou 0.65` -- **速度**使用 a 对 COCO val 图像进行平均[美国销售.Excelerge](https://aws.amazon.com/ec2/instance-types/p3/)实例。 NMS 时间 (~1 ms/img) 不包括在内。
重现者`python val.py --data coco.yaml --img 640 --task speed --batch 1` -- **电讯局**[测试时间增加](https://github.com/ultralytics/yolov5/issues/303)包括反射和尺度增强。
重现者`python val.py --data coco.yaml --img 1536 --iou 0.7 --augment` +- 所有检查点都使用默认设置训练到 300 个时期。纳米和小型型号使用[hyp.scratch-low.yaml](https://github.com/ultralytics/yolov5/blob/master/data/hyps/hyp.scratch-low.yaml)hyps,所有其他人都使用[hyp.scratch-high.yaml](https://github.com/ultralytics/yolov5/blob/master/data/hyps/hyp.scratch-high.yaml). +- **地图**值适用于单模型单尺度[COCO val2017](http://cocodataset.org)数据集。
重现者`python val.py --data coco.yaml --img 640 --conf 0.001 --iou 0.65` +- **速度**使用 a 对 COCO val 图像进行平均[美国销售.Excelerge](https://aws.amazon.com/ec2/instance-types/p3/)实例。 NMS 时间 (~1 ms/img) 不包括在内。
重现者`python val.py --data coco.yaml --img 640 --task speed --batch 1` +- **电讯局**[测试时间增加](https://github.com/ultralytics/yolov5/issues/303)包括反射和尺度增强。
重现者`python val.py --data coco.yaml --img 1536 --iou 0.7 --augment`
@@ -333,33 +331,33 @@ YOLOv5[发布 v6.2](https://github.com/ultralytics/yolov5/releases)带来对分 我们使用 4xA100 实例在 ImageNet 上训练了 90 个时期的 YOLOv5-cls 分类模型,我们训练了 ResNet 和 EfficientNet 模型以及相同的默认训练设置以进行比较。我们将所有模型导出到 ONNX FP32 以进行 CPU 速度测试,并导出到 TensorRT FP16 以进行 GPU 速度测试。我们在 Google 上进行了所有速度测试[协作临](https://colab.research.google.com/signup)为了便于重现。 -| 模型 | 尺寸
(像素) | acc
top1 | acc
烹饪 | 训练
90个纪元
4xA100(小时) | 速度
ONNX 中央处理器
(小姐) | 速度
TensorRT V100
(小姐) | 参数
(男) | 失败者
@224(二) | -| ------------------------------------------------------------------------------------------ | --------------- | ---------------- | -------------- | ------------------------------ | ----------------------------- | -------------------------------- | -------------- | ------------------- | -| [YOLOv5n-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5n-cls.pt) | 224 | 64.6 | 85.4 | 7:59 | **3.3** | **0.5** | **2.5** | **0.5** | -| [YOLOv5s-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5s-cls.pt) | 224 | 71.5 | 90.2 | 8:09 | 6.6 | 0.6 | 5.4 | 1.4 | -| [YOLOv5m-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5m-cls.pt) | 224 | 75.9 | 92.9 | 10:06 | 15.5 | 0.9 | 12.9 | 3.9 | -| [YOLOv5l-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5l-cls.pt) | 224 | 78.0 | 94.0 | 11:56 | 26.9 | 1.4 | 26.5 | 8.5 | -| [YOLOv5x-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5x-cls.pt) | 224 | **79.0** | **94.4** | 15:04 | 54.3 | 1.8 | 48.1 | 15.9 | -| | | | | | | | | | -| [ResNet18](https://github.com/ultralytics/yolov5/releases/download/v6.2/resnet18.pt) | 224 | 70.3 | 89.5 | **6:47** | 11.2 | 0.5 | 11.7 | 3.7 | -| [Resnetzch](https://github.com/ultralytics/yolov5/releases/download/v6.2/resnet34.pt) | 224 | 73.9 | 91.8 | 8:33 | 20.6 | 0.9 | 21.8 | 7.4 | -| [ResNet50](https://github.com/ultralytics/yolov5/releases/download/v6.2/resnet50.pt) | 224 | 76.8 | 93.4 | 11:10 | 23.4 | 1.0 | 25.6 | 8.5 | -| [ResNet101](https://github.com/ultralytics/yolov5/releases/download/v6.2/resnet101.pt) | 224 | 78.5 | 94.3 | 17:10 | 42.1 | 1.9 | 44.5 | 15.9 | -| | | | | | | | | | -| [高效网络_b0](https://github.com/ultralytics/yolov5/releases/download/v6.2/efficientnet_b0.pt) | 224 | 75.1 | 92.4 | 13:03 | 12.5 | 1.3 | 5.3 | 1.0 | -| [高效网络 b1](https://github.com/ultralytics/yolov5/releases/download/v6.2/efficientnet_b1.pt) | 224 | 76.4 | 93.2 | 17:04 | 14.9 | 1.6 | 7.8 | 1.5 | -| [我们将预测](https://github.com/ultralytics/yolov5/releases/download/v6.2/efficientnet_b2.pt) | 224 | 76.6 | 93.4 | 17:10 | 15.9 | 1.6 | 9.1 | 1.7 | -| [高效Netb3](https://github.com/ultralytics/yolov5/releases/download/v6.2/efficientnet_b3.pt) | 224 | 77.7 | 94.0 | 19:19 | 18.9 | 1.9 | 12.2 | 2.4 | +| 模型 | 尺寸
(像素) | acc
top1 | acc
烹饪 | 训练
90个纪元
4xA100(小时) | 速度
ONNX 中央处理器
(小姐) | 速度
TensorRT V100
(小姐) | 参数
(男) | 失败者
@224(乙) | +| ------------------------------------------------------------------------------------------- | --------------- | ---------------- | -------------- | ------------------------------ | ----------------------------- | -------------------------------- | -------------- | ------------------- | +| [YOLOv5n-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5n-cls.pt) | 224 | 64.6 | 85.4 | 7:59 | **3.3** | **0.5** | **2.5** | **0.5** | +| [YOLOv5s-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5s-cls.pt) | 224 | 71.5 | 90.2 | 8:09 | 6.6 | 0.6 | 5.4 | 1.4 | +| [YOLOv5m-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5m-cls.pt) | 224 | 75.9 | 92.9 | 10:06 | 15.5 | 0.9 | 12.9 | 3.9 | +| [YOLOv5l-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5l-cls.pt) | 224 | 78.0 | 94.0 | 11:56 | 26.9 | 1.4 | 26.5 | 8.5 | +| [YOLOv5x-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5x-cls.pt) | 224 | **79.0** | **94.4** | 15:04 | 54.3 | 1.8 | 48.1 | 15.9 | +| | | | | | | | | | +| [ResNet18](https://github.com/ultralytics/yolov5/releases/download/v6.2/resnet18.pt) | 224 | 70.3 | 89.5 | **6:47** | 11.2 | 0.5 | 11.7 | 3.7 | +| [Resnetzch](https://github.com/ultralytics/yolov5/releases/download/v6.2/resnet34.pt) | 224 | 73.9 | 91.8 | 8:33 | 20.6 | 0.9 | 21.8 | 7.4 | +| [ResNet50](https://github.com/ultralytics/yolov5/releases/download/v6.2/resnet50.pt) | 224 | 76.8 | 93.4 | 11:10 | 23.4 | 1.0 | 25.6 | 8.5 | +| [ResNet101](https://github.com/ultralytics/yolov5/releases/download/v6.2/resnet101.pt) | 224 | 78.5 | 94.3 | 17:10 | 42.1 | 1.9 | 44.5 | 15.9 | +| | | | | | | | | | +| [高效网络\_b0](https://github.com/ultralytics/yolov5/releases/download/v6.2/efficientnet_b0.pt) | 224 | 75.1 | 92.4 | 13:03 | 12.5 | 1.3 | 5.3 | 1.0 | +| [高效网络 b1](https://github.com/ultralytics/yolov5/releases/download/v6.2/efficientnet_b1.pt) | 224 | 76.4 | 93.2 | 17:04 | 14.9 | 1.6 | 7.8 | 1.5 | +| [我们将预测](https://github.com/ultralytics/yolov5/releases/download/v6.2/efficientnet_b2.pt) | 224 | 76.6 | 93.4 | 17:10 | 15.9 | 1.6 | 9.1 | 1.7 | +| [高效Netb3](https://github.com/ultralytics/yolov5/releases/download/v6.2/efficientnet_b3.pt) | 224 | 77.7 | 94.0 | 19:19 | 18.9 | 1.9 | 12.2 | 2.4 |
Table Notes (click to expand) -- 使用 SGD 优化器将所有检查点训练到 90 个时期`lr0=0.001`和`weight_decay=5e-5`在图像大小 224 和所有默认设置。
运行记录到[HTTPS://玩豆瓣.爱/Glenn-就ocher/yo lo V5-classifier-V6-2](https://wandb.ai/glenn-jocher/YOLOv5-Classifier-v6-2) -- **准确性**值适用于单模型单尺度[ImageNet-1k](https://www.image-net.org/index.php)数据集。
重现者`python classify/val.py --data ../datasets/imagenet --img 224` -- **速度**使用谷歌平均超过 100 个推理图像[协作临](https://colab.research.google.com/signup)V100 高 RAM 实例。
重现者`python classify/val.py --data ../datasets/imagenet --img 224 --batch 1` -- **出口**到 FP32 的 ONNX 和 FP16 的 TensorRT 完成`export.py`.
重现者`python export.py --weights yolov5s-cls.pt --include engine onnx --imgsz 224` -
- +- 使用 SGD 优化器将所有检查点训练到 90 个时期`lr0=0.001`和`weight_decay=5e-5`在图像大小 224 和所有默认设置。
运行记录到[HTTPS://玩豆瓣.爱/Glenn-就ocher/yo lo V5-classifier-V6-2](https://wandb.ai/glenn-jocher/YOLOv5-Classifier-v6-2) +- **准确性**值适用于单模型单尺度[ImageNet-1k](https://www.image-net.org/index.php)数据集。
重现者`python classify/val.py --data ../datasets/imagenet --img 224` +- **速度**使用谷歌平均超过 100 个推理图像[协作临](https://colab.research.google.com/signup)V100 高 RAM 实例。
重现者`python classify/val.py --data ../datasets/imagenet --img 224 --batch 1` +- **出口**到 FP32 的 ONNX 和 FP16 的 TensorRT 完成`export.py`.
重现者`python export.py --weights yolov5s-cls.pt --include engine onnx --imgsz 224` + +
Classification Usage Examples  Open In Colab @@ -394,9 +392,7 @@ python classify/predict.py --weights yolov5s-cls.pt --data data/images/bus.jpg ``` ```python -model = torch.hub.load( - "ultralytics/yolov5", "custom", "yolov5s-cls.pt" -) # load from PyTorch Hub +model = torch.hub.load('ultralytics/yolov5', 'custom', 'yolov5s-cls.pt') # load from PyTorch Hub ``` ### 出口 @@ -433,6 +429,13 @@ python export.py --weights yolov5s-cls.pt resnet50.pt efficientnet_b0.pt --inclu
+##
应用程序
+ +在您的 iOS 或 Android 设备上运行 YOLOv5 模型[Ultralytics 应用程序](https://ultralytics.com/app_install)! + + +Ultralytics mobile app + ##
贡献
我们喜欢您的意见!我们希望尽可能简单和透明地为 YOLOv5 做出贡献。请看我们的[投稿指南](CONTRIBUTING.md)开始,并填写[YOLOv5调查](https://ultralytics.com/survey?utm_source=github&utm_medium=social&utm_campaign=Survey)向我们发送您的体验反馈。感谢我们所有的贡献者! @@ -445,8 +448,8 @@ python export.py --weights yolov5s-cls.pt resnet50.pt efficientnet_b0.pt --inclu YOLOv5 在两种不同的许可下可用: -- **GPL-3.0 许可证**: 看[执照](https://github.com/ultralytics/yolov5/blob/master/LICENSE)文件的详细信息。 -- **企业执照**:在没有 GPL-3.0 开源要求的情况下为商业产品开发提供更大的灵活性。典型用例是将 Ultralytics 软件和 AI 模型嵌入到商业产品和应用程序中。在以下位置申请企业许可证[Ultralytics 许可](https://ultralytics.com/license). +- **GPL-3.0 许可证**: 看[执照](https://github.com/ultralytics/yolov5/blob/master/LICENSE)文件的详细信息。 +- **企业执照**:在没有 GPL-3.0 开源要求的情况下为商业产品开发提供更大的灵活性。典型用例是将 Ultralytics 软件和 AI 模型嵌入到商业产品和应用程序中。在以下位置申请企业许可证[Ultralytics 许可](https://ultralytics.com/license). ##
接触
From 443ef7f33e0943ccc5e5c8ff922c6fe7a0cb7053 Mon Sep 17 00:00:00 2001 From: Yonghye Kwon Date: Mon, 12 Dec 2022 12:29:19 +0900 Subject: [PATCH 1609/1976] Modify a comment for OpenCV File I/O Functions (#10467) Modify comment for OpenCV File I/O Functions Signed-off-by: Yonghye Kwon Signed-off-by: Yonghye Kwon --- utils/general.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/general.py b/utils/general.py index 99a96576c3fd..e5a843c4a758 100644 --- a/utils/general.py +++ b/utils/general.py @@ -1115,7 +1115,7 @@ def increment_path(path, exist_ok=False, sep='', mkdir=False): return path -# OpenCV Chinese-friendly functions ------------------------------------------------------------------------------------ +# OpenCV Multilanguage-friendly functions ------------------------------------------------------------------------------------ imshow_ = cv2.imshow # copy to avoid recursion errors From 357cde9ee7da13ba3095995488c5a23631467f1a Mon Sep 17 00:00:00 2001 From: Yonghye Kwon Date: Tue, 13 Dec 2022 05:05:20 +0900 Subject: [PATCH 1610/1976] add force_reload=True when loading model using torch hub (#10460) Signed-off-by: Yonghye Kwon Signed-off-by: Yonghye Kwon Co-authored-by: Glenn Jocher --- tutorial.ipynb | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tutorial.ipynb b/tutorial.ipynb index 657dc266da92..6ab0a33366a5 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -963,7 +963,7 @@ "# YOLOv5 PyTorch HUB Inference (DetectionModels only)\n", "import torch\n", "\n", - "model = torch.hub.load('ultralytics/yolov5', 'yolov5s') # yolov5n - yolov5x6 or custom\n", + "model = torch.hub.load('ultralytics/yolov5', 'yolov5s', force_reload=True) # yolov5n - yolov5x6 or custom\n", "im = 'https://ultralytics.com/images/zidane.jpg' # file, Path, PIL.Image, OpenCV, nparray, list\n", "results = model(im) # inference\n", "results.print() # or .show(), .save(), .crop(), .pandas(), etc." @@ -972,4 +972,4 @@ "outputs": [] } ] -} \ No newline at end of file +} From 1752768fb3b3ff4f842eaaecf7eba4808ac124a9 Mon Sep 17 00:00:00 2001 From: Nioolek <40284075+Nioolek@users.noreply.github.com> Date: Wed, 14 Dec 2022 06:48:15 +0800 Subject: [PATCH 1611/1976] Fix Chinese README (#10465) * Beautify Chinese Documents * Beautify Chinese Documents * Beautify Chinese Documents * Beautify Chinese Documents * add blank * Update translate-readme.yml Disable auto-translation by changing on-push branch to 'translate_readme'. This prevents overwriting of manual fixes. Signed-off-by: Glenn Jocher * Update translate-readme.yml Signed-off-by: Glenn Jocher * fix live doc * Update README.md Signed-off-by: Glenn Jocher * Update README.zh-CN.md Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher Co-authored-by: Glenn Jocher --- .github/workflows/translate-readme.yml | 3 +- README.md | 4 +- README.zh-CN.md | 260 ++++++++++++------------- 3 files changed, 133 insertions(+), 134 deletions(-) diff --git a/.github/workflows/translate-readme.yml b/.github/workflows/translate-readme.yml index 76f59b83e65f..538ff375097e 100644 --- a/.github/workflows/translate-readme.yml +++ b/.github/workflows/translate-readme.yml @@ -6,8 +6,7 @@ name: Translate README on: push: branches: - - main - - master + - translate_readme # replace with 'master' to enable action paths: - README.md diff --git a/README.md b/README.md index 9ee97321082e..21bdc83f349e 100644 --- a/README.md +++ b/README.md @@ -50,9 +50,9 @@ To request an Enterprise License please complete the form at
-[Ultralytics Live Session Ep. 2](https://youtu.be/LKpuzZllNpA) ✨ will be streaming live on **Tuesday, December 13th at 19:00 CET** with [Joseph Nelson](https://github.com/josephofiowa) of [Roboflow](https://roboflow.com/?ref=ultralytics) who will join us to discuss the brand new Roboflow x Ultralytics HUB integration. Tune in to ask Glenn and Joseph about how you can make speed up workflows with seamless dataset integration! 🔥 +[Ultralytics Live Session Ep. 2](https://youtu.be/QGRtEG7UjtE) ✨ will be streaming live on **Tuesday, December 13th at 19:00 CET** with [Joseph Nelson](https://github.com/josephofiowa) of [Roboflow](https://roboflow.com/?ref=ultralytics) who will join us to discuss the brand new Roboflow x Ultralytics HUB integration. Tune in to ask Glenn and Joseph about how you can make speed up workflows with seamless dataset integration! 🔥 - +
diff --git a/README.zh-CN.md b/README.zh-CN.md index 0fc77565c5ef..15232be3aa4f 100644 --- a/README.zh-CN.md +++ b/README.zh-CN.md @@ -4,7 +4,7 @@

-[英语](README.md)\|[简体中文](README.zh-CN.md)
+[英文](README.md)\|[简体中文](README.zh-CN.md)
YOLOv5 CI @@ -17,9 +17,9 @@

-YOLOv5 🚀 是世界上最受欢迎的视觉 AI,代表超力对未来视觉 AI 方法的开源研究,结合在数千小时的研究和开发中积累的经验教训和最佳实践。 +YOLOv5 🚀 是世界上最受欢迎的视觉 AI,代表 Ultralytics 对未来视觉 AI 方法的开源研究,结合在数千小时的研究和开发中积累的经验教训和最佳实践。 -要申请企业许可证,请填写表格Ultralytics 许可. +如果要申请企业许可证,请填写表格Ultralytics 许可. +##
Ultralytics 直播会议
-[Ultralytics Live Session Ep。 2个](https://youtu.be/LKpuzZllNpA)✨将直播**欧洲中部时间 12 月 13 日星期二 19:00**和[约瑟夫·纳尔逊](https://github.com/josephofiowa)的[机器人流](https://roboflow.com/?ref=ultralytics)谁将与我们一起讨论全新的 Roboflow x Ultralytics HUB 集成。收听 Glenn 和 Joseph 询问如何通过无缝数据集集成来加快工作流程! 🔥 +[Ultralytics Live Session Ep. 2](https://youtu.be/QGRtEG7UjtE) ✨ 将与 [Roboflow](https://roboflow.com/?ref=ultralytics) 的 [Joseph Nelson](https://github.com/josephofiowa) 在 **欧洲中部时间 12 月 13 日星期二的 19:00** ,他将与我们一起讨论全新的 Roboflow x Ultralytics HUB 集成。欢迎收听 Glenn 和 Joseph ,以了解如何通过无缝数据集集成来加快工作流程! 🔥 - +
-##
细分 ⭐ 新
+##
实例分割模型 ⭐ 新
-我们新的 YOLOv5[发布 v7.0](https://github.com/ultralytics/yolov5/releases/v7.0)实例分割模型是世界上最快和最准确的,击败所有当前[SOTA 基准](https://paperswithcode.com/sota/real-time-instance-segmentation-on-mscoco).我们使它们非常易于训练、验证和部署。查看我们的完整详细信息[发行说明](https://github.com/ultralytics/yolov5/releases/v7.0)并访问我们的[YOLOv5 分割 Colab 笔记本](https://github.com/ultralytics/yolov5/blob/master/segment/tutorial.ipynb)快速入门教程。 +我们新的 YOLOv5 [release v7.0](https://github.com/ultralytics/yolov5/releases/v7.0) 实例分割模型是世界上最快和最准确的模型,击败所有当前 [SOTA 基准](https://paperswithcode.com/sota/real-time-instance-segmentation-on-mscoco)。我们使它非常易于训练、验证和部署。更多细节请查看 [发行说明](https://github.com/ultralytics/yolov5/releases/v7.0) 或访问我们的 [YOLOv5 分割 Colab 笔记本](https://github.com/ultralytics/yolov5/blob/master/segment/tutorial.ipynb) 以快速入门。
- Segmentation Checkpoints + 实例分割模型列表
-我们使用 A100 GPU 在 COCO 上以 640 图像大小训练了 300 个时期的 YOLOv5 分割模型。我们将所有模型导出到 ONNX FP32 以进行 CPU 速度测试,并导出到 TensorRT FP16 以进行 GPU 速度测试。我们在 Google 上进行了所有速度测试[协作临](https://colab.research.google.com/signup)便于重现的笔记本。 +我们使用 A100 GPU 在 COCO 上以 640 图像大小训练了 300 epochs 得到 YOLOv5 分割模型。我们将所有模型导出到 ONNX FP32 以进行 CPU 速度测试,并导出到 TensorRT FP16 以进行 GPU 速度测试。为了便于再现,我们在 Google [Colab Pro](https://colab.research.google.com/signup) 上进行了所有速度测试。 -| 模型 | 尺寸
(像素) | 地图盒子
50-95 | 地图面具
50-95 | 火车时间
300个纪元
A100(小时) | 速度
ONNX 中央处理器
(小姐) | 速度
同仁堂A100
(小姐) | 参数
(男) | 失败者
@640(二) | -| ------------------------------------------------------------------------------------------ | --------------- | ------------------ | ------------------ | ------------------------------- | ----------------------------- | -------------------------- | -------------- | ------------------- | -| [YOLOv5n-se](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5n-seg.pt) | 640 | 27.6 | 23.4 | 80:17 | **62.7** | **1.2** | **2.0** | **7.1** | -| [YOLOv5s-se](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5s-seg.pt) | 640 | 37.6 | 31.7 | 88:16 | 173.3 | 1.4 | 7.6 | 26.4 | -| [YOLOv5m段](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5m-seg.pt) | 640 | 45.0 | 37.1 | 108:36 | 427.0 | 2.2 | 22.0 | 70.8 | -| [YOLOv5l-se](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5l-seg.pt) | 640 | 49.0 | 39.9 | 我:43(X) | 857.4 | 2.9 | 47.9 | 147.7 | -| [YOLOv5x-seg](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5x-seg.pt) | 640 | **50.7** | **41.4** | 62:56 (zks) | 1579.2 | 4.5 | 88.8 | 265.7 | +| 模型 | 尺寸
(像素) | mAPbox
50-95 | mAPmask
50-95 | 训练时长
300 epochs
A100 GPU(小时) | 推理速度
ONNX CPU
(ms) | 推理速度
TRT A100
(ms) | 参数量
(M) | FLOPs
@640 (B) | +| ------------------------------------------------------------------------------------------ | ------------------- | -------------------- | --------------------- | --------------------------------------------- | --------------------------------- | --------------------------------- | ----------------- | ---------------------- | +| [YOLOv5n-seg](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5n-seg.pt) | 640 | 27.6 | 23.4 | 80:17 | **62.7** | **1.2** | **2.0** | **7.1** | +| [YOLOv5s-seg](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5s-seg.pt) | 640 | 37.6 | 31.7 | 88:16 | 173.3 | 1.4 | 7.6 | 26.4 | +| [YOLOv5m-seg](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5m-seg.pt) | 640 | 45.0 | 37.1 | 108:36 | 427.0 | 2.2 | 22.0 | 70.8 | +| [YOLOv5l-seg](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5l-seg.pt) | 640 | 49.0 | 39.9 | 66:43 (2x) | 857.4 | 2.9 | 47.9 | 147.7 | +| [YOLOv5x-seg](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5x-seg.pt) | 640 | **50.7** | **41.4** | 62:56 (3x) | 1579.2 | 4.5 | 88.8 | 265.7 | -- 使用 SGD 优化器将所有检查点训练到 300 个时期`lr0=0.01`和`weight_decay=5e-5`在图像大小 640 和所有默认设置。
运行记录到[HTTPS://玩豆瓣.爱/Glenn-就ocher/yo lo V5_V70_official](https://wandb.ai/glenn-jocher/YOLOv5_v70_official) -- **准确性**值适用于 COCO 数据集上的单模型单尺度。
重现者`python segment/val.py --data coco.yaml --weights yolov5s-seg.pt` -- **速度**使用 a 对超过 100 个推理图像进行平均[协作临](https://colab.research.google.com/signup)A100 高 RAM 实例。值仅表示推理速度(NMS 每张图像增加约 1 毫秒)。
重现者`python segment/val.py --data coco.yaml --weights yolov5s-seg.pt --batch 1` -- **出口**到 FP32 的 ONNX 和 FP16 的 TensorRT 完成`export.py`.
重现者`python export.py --weights yolov5s-seg.pt --include engine --device 0 --half` +- 所有模型使用 SGD 优化器训练, 都使用 `lr0=0.01` 和 `weight_decay=5e-5` 参数, 图像大小为 640 。
训练 log 可以查看 https://wandb.ai/glenn-jocher/YOLOv5_v70_official +- **准确性**结果都在 COCO 数据集上,使用单模型单尺度测试得到。
复现命令 `python segment/val.py --data coco.yaml --weights yolov5s-seg.pt` +- **推理速度**是使用 100 张图像推理时间进行平均得到,测试环境使用 [Colab Pro](https://colab.research.google.com/signup) 上 A100 高 RAM 实例。结果仅表示推理速度(NMS 每张图像增加约 1 毫秒)。
复现命令 `python segment/val.py --data coco.yaml --weights yolov5s-seg.pt --batch 1` +- **模型转换**到 FP32 的 ONNX 和 FP16 的 TensorRT 脚本为 `export.py`.
运行命令 `python export.py --weights yolov5s-seg.pt --include engine --device 0 --half`
- Segmentation Usage Examples  Open In Colab + 分割模型使用示例  Open In Colab -### 火车 +### 训练 -YOLOv5分割训练支持自动下载COCO128-seg分割数据集`--data coco128-seg.yaml`COCO-segments 数据集的参数和手动下载`bash data/scripts/get_coco.sh --train --val --segments`接着`python train.py --data coco.yaml`. +YOLOv5分割训练支持自动下载 COCO128-seg 分割数据集,用户仅需在启动指令中包含 `--data coco128-seg.yaml` 参数。 若要手动下载,使用命令 `bash data/scripts/get_coco.sh --train --val --segments`, 在下载完毕后,使用命令 `python train.py --data coco.yaml` 开启训练。 ```bash -# Single-GPU +# 单 GPU python segment/train.py --data coco128-seg.yaml --weights yolov5s-seg.pt --img 640 -# Multi-GPU DDP +# 多 GPU, DDP 模式 python -m torch.distributed.run --nproc_per_node 4 --master_port 1 segment/train.py --data coco128-seg.yaml --weights yolov5s-seg.pt --img 640 --device 0,1,2,3 ``` -### 瓦尔 +### 验证 在 COCO 数据集上验证 YOLOv5s-seg mask mAP: ```bash -bash data/scripts/get_coco.sh --val --segments # download COCO val segments split (780MB, 5000 images) -python segment/val.py --weights yolov5s-seg.pt --data coco.yaml --img 640 # validate +bash data/scripts/get_coco.sh --val --segments # 下载 COCO val segments 数据集 (780MB, 5000 images) +python segment/val.py --weights yolov5s-seg.pt --data coco.yaml --img 640 # 验证 ``` ### 预测 @@ -119,13 +119,13 @@ python segment/predict.py --weights yolov5m-seg.pt --data data/images/bus.jpg ``` ```python -model = torch.hub.load('ultralytics/yolov5', 'custom', 'yolov5m-seg.pt') # load from PyTorch Hub (WARNING: inference not yet supported) +model = torch.hub.load('ultralytics/yolov5', 'custom', 'yolov5m-seg.pt') # 从load from PyTorch Hub 加载模型 (WARNING: 推理暂未支持) ``` | ![zidane](https://user-images.githubusercontent.com/26833433/203113421-decef4c4-183d-4a0a-a6c2-6435b33bc5d3.jpg) | ![bus](https://user-images.githubusercontent.com/26833433/203113416-11fe0025-69f7-4874-a0a6-65d0bfe2999a.jpg) | | ---------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------- | -### 出口 +### 模型导出 将 YOLOv5s-seg 模型导出到 ONNX 和 TensorRT: @@ -137,12 +137,12 @@ python export.py --weights yolov5s-seg.pt --include onnx engine --img 640 --devi ##
文档
-见[YOLOv5 文档](https://docs.ultralytics.com)有关培训、测试和部署的完整文档。请参阅下面的快速入门示例。 +有关训练、测试和部署的完整文档见[YOLOv5 文档](https://docs.ultralytics.com)。请参阅下面的快速入门示例。
-Install +安装 -克隆回购并安装[要求.txt](https://github.com/ultralytics/yolov5/blob/master/requirements.txt)在一个[**Python>=3.7.0**](https://www.python.org/)环境,包括[**PyTorch>=1.7**](https://pytorch.org/get-started/locally/). +克隆 repo,并要求在 [**Python>=3.7.0**](https://www.python.org/) 环境中安装 [requirements.txt](https://github.com/ultralytics/yolov5/blob/master/requirements.txt) ,且要求 [**PyTorch>=1.7**](https://pytorch.org/get-started/locally/) 。 ```bash git clone https://github.com/ultralytics/yolov5 # clone @@ -153,10 +153,10 @@ pip install -r requirements.txt # install
-Inference +推理 -YOLOv5[PyTorch 中心](https://github.com/ultralytics/yolov5/issues/36)推理。[楷模](https://github.com/ultralytics/yolov5/tree/master/models)自动从最新下载 -YOLOv5[发布](https://github.com/ultralytics/yolov5/releases). +使用 YOLOv5 [PyTorch Hub](https://github.com/ultralytics/yolov5/issues/36) 推理。最新 [模型](https://github.com/ultralytics/yolov5/tree/master/models) 将自动的从 +YOLOv5 [release](https://github.com/ultralytics/yolov5/releases) 中下载。 ```python import torch @@ -177,10 +177,10 @@ results.print() # or .show(), .save(), .crop(), .pandas(), etc.
-Inference with detect.py +使用 detect.py 推理 -`detect.py`在各种来源上运行推理,下载[楷模](https://github.com/ultralytics/yolov5/tree/master/models)自动从 -最新的YOLOv5[发布](https://github.com/ultralytics/yolov5/releases)并将结果保存到`runs/detect`. +`detect.py` 在各种来源上运行推理, [模型](https://github.com/ultralytics/yolov5/tree/master/models) 自动从 +最新的YOLOv5 [release](https://github.com/ultralytics/yolov5/releases) 中下载,并将结果保存到 `runs/detect` 。 ```bash python detect.py --weights yolov5s.pt --source 0 # webcam @@ -198,13 +198,14 @@ python detect.py --weights yolov5s.pt --source 0 #
-Training +训练 -下面的命令重现 YOLOv5[可可](https://github.com/ultralytics/yolov5/blob/master/data/scripts/get_coco.sh)结果。[楷模](https://github.com/ultralytics/yolov5/tree/master/models)和[数据集](https://github.com/ultralytics/yolov5/tree/master/data)自动从最新下载 -YOLOv5[发布](https://github.com/ultralytics/yolov5/releases). YOLOv5n/s/m/l/x 的训练时间为 -V100 GPU 上 1/2/4/6/8 天([多GPU](https://github.com/ultralytics/yolov5/issues/475)倍快)。使用 -最大的`--batch-size`可能,或通过`--batch-size -1`为了 -YOLOv5[自动批处理](https://github.com/ultralytics/yolov5/pull/5092).显示的批量大小适用于 V100-16GB。 +下面的命令重现 YOLOv5 在 [COCO](https://github.com/ultralytics/yolov5/blob/master/data/scripts/get_coco.sh) 数据集上的结果。 +最新的 [模型](https://github.com/ultralytics/yolov5/tree/master/models) 和 [数据集](https://github.com/ultralytics/yolov5/tree/master/data) +将自动的从 YOLOv5 [release](https://github.com/ultralytics/yolov5/releases) 中下载。 +YOLOv5n/s/m/l/x 在 V100 GPU 的训练时间为 1/2/4/6/8 天( [多GPU](https://github.com/ultralytics/yolov5/issues/475) 训练速度更快)。 +尽可能使用更大的 `--batch-size` ,或通过 `--batch-size -1` 实现 +YOLOv5 [自动批处理](https://github.com/ultralytics/yolov5/pull/5092) 。下方显示的 batchsize 适用于 V100-16GB。 ```bash python train.py --data coco.yaml --epochs 300 --weights '' --cfg yolov5n.yaml --batch-size 128 @@ -219,16 +220,15 @@ python train.py --data coco.yaml --epochs 300 --weights '' --cfg yolov5n.yaml -
-Tutorials +教程 - [训练自定义数据](https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data)🚀 推荐 -- [获得最佳训练结果的技巧](https://github.com/ultralytics/yolov5/wiki/Tips-for-Best-Training-Results)☘️ - 推荐的 +- [获得最佳训练结果的技巧](https://github.com/ultralytics/yolov5/wiki/Tips-for-Best-Training-Results)☘️ 推荐 - [多 GPU 训练](https://github.com/ultralytics/yolov5/issues/475) -- [PyTorch 中心](https://github.com/ultralytics/yolov5/issues/36)🌟 新 +- [PyTorch Hub](https://github.com/ultralytics/yolov5/issues/36)🌟 新 - [TFLite、ONNX、CoreML、TensorRT 导出](https://github.com/ultralytics/yolov5/issues/251)🚀 - [NVIDIA Jetson Nano 部署](https://github.com/ultralytics/yolov5/issues/9627)🌟 新 -- [测试时间增强 (TTA)](https://github.com/ultralytics/yolov5/issues/303) +- [测试时数据增强 (TTA)](https://github.com/ultralytics/yolov5/issues/303) - [模型集成](https://github.com/ultralytics/yolov5/issues/318) - [模型修剪/稀疏度](https://github.com/ultralytics/yolov5/issues/304) - [超参数进化](https://github.com/ultralytics/yolov5/issues/607) @@ -236,12 +236,12 @@ python train.py --data coco.yaml --epochs 300 --weights '' --cfg yolov5n.yaml - - [架构总结](https://github.com/ultralytics/yolov5/issues/6998)🌟 新 - [用于数据集、标签和主动学习的 Roboflow](https://github.com/ultralytics/yolov5/issues/4975)🌟 新 - [ClearML 记录](https://github.com/ultralytics/yolov5/tree/master/utils/loggers/clearml)🌟 新 -- [所以平台](https://github.com/ultralytics/yolov5/wiki/Deci-Platform)🌟 新 -- [彗星记录](https://github.com/ultralytics/yolov5/tree/master/utils/loggers/comet)🌟 新 +- [Deci 平台](https://github.com/ultralytics/yolov5/wiki/Deci-Platform)🌟 新 +- [Comet Logging](https://github.com/ultralytics/yolov5/tree/master/utils/loggers/comet)🌟 新
-##
集成
+##
模块集成

@@ -263,118 +263,118 @@ python train.py --data coco.yaml --epochs 300 --weights '' --cfg yolov5n.yaml - -| 机器人流 | ClearML ⭐ 新 | 彗星⭐新 | 所以⭐新 | -| :-------------------------------------------------------------------------: | :-----------------------------------------------------------------------: | :----------------------------------------------------------------------------: | :---------------------------------------------------------------: | -| 将您的自定义数据集标记并直接导出到 YOLOv5 以进行训练[机器人流](https://roboflow.com/?ref=ultralytics) | 使用自动跟踪、可视化甚至远程训练 YOLOv5[清除ML](https://cutt.ly/yolov5-readme-clearml)(开源!) | 永远免费,[彗星](https://bit.ly/yolov5-readme-comet)可让您保存 YOLOv5 模型、恢复训练以及交互式可视化和调试预测 | 一键自动编译量化YOLOv5以获得更好的推理性能[所以](https://bit.ly/yolov5-deci-platform) | +| Roboflow | ClearML ⭐ 新 | Comet ⭐ 新 | Deci ⭐ 新 | +| :-----------------------------------------------------------------------------------------------: | :------------------------------------------------------------------------------------: | :----------------------------------------------------------------------------------------------------: | :-------------------------------------------------------------------------------: | +| 将您的自定义数据集进行标注并直接导出到 YOLOv5 以进行训练 [Roboflow](https://roboflow.com/?ref=ultralytics) | 自动跟踪、可视化甚至远程训练 YOLOv5 [ClearML](https://cutt.ly/yolov5-readme-clearml)(开源!) | 永远免费,[Comet](https://bit.ly/yolov5-readme-comet)可让您保存 YOLOv5 模型、恢复训练以及交互式可视化和调试预测 | 一键自动编译量化 YOLOv5 以获得更好的推理性能[Deci](https://bit.ly/yolov5-deci-platform) | -##
Ultralytics 集线器
+##
Ultralytics HUB
-[Ultralytics 集线器](https://bit.ly/ultralytics_hub)是我们的⭐**新的**用于可视化数据集、训练 YOLOv5 🚀 模型并以无缝体验部署到现实世界的无代码解决方案。开始使用**自由的**现在! +[Ultralytics HUB](https://bit.ly/ultralytics_hub) 是我们的⭐**新的**用于可视化数据集、训练 YOLOv5 🚀 模型并以无缝体验部署到现实世界的无代码解决方案。现在开始 **免费** 使用他! ##
为什么选择 YOLOv5
-YOLOv5 被设计为超级容易上手和简单易学。我们优先考虑现实世界的结果。 +YOLOv5 超级容易上手,简单易学。我们优先考虑现实世界的结果。

- YOLOv5-P5 640 Figure + YOLOv5-P5 640 图

- Figure Notes + 图表笔记 -- **COCO AP 值**表示[map@0.5](mailto:mAP@0.5):0.95 指标在 5000 张图像上测得[COCO val2017](http://cocodataset.org)从 256 到 1536 的各种推理大小的数据集。 -- **显卡速度**测量每张图像的平均推理时间[COCO val2017](http://cocodataset.org)数据集使用[美国销售.Excelerge](https://aws.amazon.com/ec2/instance-types/p3/)批量大小为 32 的 V100 实例。 -- **高效**数据来自[谷歌/汽车](https://github.com/google/automl)批量大小为 8。 -- **复制**经过`python val.py --task study --data coco.yaml --iou 0.7 --weights yolov5n6.pt yolov5s6.pt yolov5m6.pt yolov5l6.pt yolov5x6.pt` +- **COCO AP val** 表示 mAP@0.5:0.95 指标,在 [COCO val2017](http://cocodataset.org) 数据集的 5000 张图像上测得, 图像包含 256 到 1536 各种推理大小。 +- **显卡推理速度** 为在 [COCO val2017](http://cocodataset.org) 数据集上的平均推理时间,使用 [AWS p3.2xlarge](https://aws.amazon.com/ec2/instance-types/p3/) V100实例,batchsize 为 32 。 +- **EfficientDet** 数据来自 [google/automl](https://github.com/google/automl) , batchsize 为32。 +- **复现命令** 为 `python val.py --task study --data coco.yaml --iou 0.7 --weights yolov5n6.pt yolov5s6.pt yolov5m6.pt yolov5l6.pt yolov5x6.pt`
-### 预训练检查点 - -| 模型 | 尺寸
(像素) | 地图
50-95 | 地图
50 | 速度
处理器b1
(小姐) | 速度
V100 b1
(小姐) | 速度
V100 b32
(小姐) | 参数
(男) | 失败者
@640(二) | -| --------------------------------------------------------------------------------------------------- | --------------- | ----------------- | ---------------- | ------------------------ | -------------------------- | --------------------------- | -------------- | ------------------- | -| [YOLOv5n](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5n.pt) | 640 | 28.0 | 45.7 | **45** | **6.3** | **0.6** | **1.9** | **4.5** | -| [YOLOv5s](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5s.pt) | 640 | 37.4 | 56.8 | 98 | 6.4 | 0.9 | 7.2 | 16.5 | -| [YOLOv5m](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5m.pt) | 640 | 45.4 | 64.1 | 224 | 8.2 | 1.7 | 21.2 | 49.0 | -| [YOLOv5l](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5l.pt) | 640 | 49.0 | 67.3 | 430 | 10.1 | 2.7 | 46.5 | 109.1 | -| [YOLOv5x](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5x.pt) | 640 | 50.7 | 68.9 | 766 | 12.1 | 4.8 | 86.7 | 205.7 | -| | | | | | | | | | -| [YOLOv5n6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5n6.pt) | 1280 | 36.0 | 54.4 | 153 | 8.1 | 2.1 | 3.2 | 4.6 | -| [YOLOv5s6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5s6.pt) | 1280 | 44.8 | 63.7 | 385 | 8.2 | 3.6 | 12.6 | 16.8 | -| [YOLOv5m6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5m6.pt) | 1280 | 51.3 | 69.3 | 887 | 11.1 | 6.8 | 35.7 | 50.0 | -| [YOLOv5l6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5l6.pt) | 1280 | 53.7 | 71.3 | 1784 | 15.8 | 10.5 | 76.8 | 111.4 | -| [YOLOv5x6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5x6.pt)
+[电讯局][tta] | 1280
1536 | 55.0
**55.8** | 72.7
**72.7** | 3136
- | 26.2
- | 19.4
- | 140.7
- | 209.8
- | +### 预训练模型 + +| 模型 | 尺寸
(像素) | mAPval
50-95 | mAPval
50 | 推理速度
CPU b1
(ms) | 推理速度
V100 b1
(ms) | 速度
V100 b32
(ms) | 参数量
(M) | FLOPs
@640 (B) | +| --------------------------------------------------------------------------------------------------- | ------------------- | -------------------- | ------------------- | ------------------------------- | -------------------------------- | ------------------------------ | ----------------- | ---------------------- | +| [YOLOv5n](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5n.pt) | 640 | 28.0 | 45.7 | **45** | **6.3** | **0.6** | **1.9** | **4.5** | +| [YOLOv5s](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5s.pt) | 640 | 37.4 | 56.8 | 98 | 6.4 | 0.9 | 7.2 | 16.5 | +| [YOLOv5m](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5m.pt) | 640 | 45.4 | 64.1 | 224 | 8.2 | 1.7 | 21.2 | 49.0 | +| [YOLOv5l](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5l.pt) | 640 | 49.0 | 67.3 | 430 | 10.1 | 2.7 | 46.5 | 109.1 | +| [YOLOv5x](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5x.pt) | 640 | 50.7 | 68.9 | 766 | 12.1 | 4.8 | 86.7 | 205.7 | +| | | | | | | | | | +| [YOLOv5n6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5n6.pt) | 1280 | 36.0 | 54.4 | 153 | 8.1 | 2.1 | 3.2 | 4.6 | +| [YOLOv5s6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5s6.pt) | 1280 | 44.8 | 63.7 | 385 | 8.2 | 3.6 | 12.6 | 16.8 | +| [YOLOv5m6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5m6.pt) | 1280 | 51.3 | 69.3 | 887 | 11.1 | 6.8 | 35.7 | 50.0 | +| [YOLOv5l6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5l6.pt) | 1280 | 53.7 | 71.3 | 1784 | 15.8 | 10.5 | 76.8 | 111.4 | +| [YOLOv5x6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5x6.pt)
+[TTA][tta] | 1280
1536 | 55.0
**55.8** | 72.7
**72.7** | 3136
- | 26.2
- | 19.4
- | 140.7
- | 209.8
- |
- Table Notes + 笔记 -- 所有检查点都使用默认设置训练到 300 个时期。纳米和小型型号使用[hyp.scratch-low.yaml](https://github.com/ultralytics/yolov5/blob/master/data/hyps/hyp.scratch-low.yaml)hyps,所有其他人都使用[hyp.scratch-high.yaml](https://github.com/ultralytics/yolov5/blob/master/data/hyps/hyp.scratch-high.yaml). -- **地图**值适用于单模型单尺度[COCO val2017](http://cocodataset.org)数据集。
重现者`python val.py --data coco.yaml --img 640 --conf 0.001 --iou 0.65` -- **速度**使用 a 对 COCO val 图像进行平均[美国销售.Excelerge](https://aws.amazon.com/ec2/instance-types/p3/)实例。 NMS 时间 (~1 ms/img) 不包括在内。
重现者`python val.py --data coco.yaml --img 640 --task speed --batch 1` -- **电讯局**[测试时间增加](https://github.com/ultralytics/yolov5/issues/303)包括反射和尺度增强。
重现者`python val.py --data coco.yaml --img 1536 --iou 0.7 --augment` +- 所有模型都使用默认配置,训练 300 epochs。n和s模型使用 [hyp.scratch-low.yaml](https://github.com/ultralytics/yolov5/blob/master/data/hyps/hyp.scratch-low.yaml) ,其他模型都使用 [hyp.scratch-high.yaml](https://github.com/ultralytics/yolov5/blob/master/data/hyps/hyp.scratch-high.yaml) 。 +- **mAPval**在单模型单尺度上计算,数据集使用 [COCO val2017](http://cocodataset.org) 。
复现命令 `python val.py --data coco.yaml --img 640 --conf 0.001 --iou 0.65` +- **推理速度**在 COCO val 图像总体时间上进行平均得到,测试环境使用[AWS p3.2xlarge](https://aws.amazon.com/ec2/instance-types/p3/)实例。 NMS 时间 (大约 1 ms/img) 不包括在内。
复现命令 `python val.py --data coco.yaml --img 640 --task speed --batch 1` +- **TTA** [测试时数据增强](https://github.com/ultralytics/yolov5/issues/303) 包括反射和尺度变换。
复现命令 `python val.py --data coco.yaml --img 1536 --iou 0.7 --augment`
-##
分类⭐新
+##
分类网络 ⭐ 新
-YOLOv5[发布 v6.2](https://github.com/ultralytics/yolov5/releases)带来对分类模型训练、验证和部署的支持!查看我们的完整详细信息[发行说明](https://github.com/ultralytics/yolov5/releases/v6.2)并访问我们的[YOLOv5 分类 Colab 笔记本](https://github.com/ultralytics/yolov5/blob/master/classify/tutorial.ipynb)快速入门教程。 +YOLOv5 [release v6.2](https://github.com/ultralytics/yolov5/releases) 带来对分类模型训练、验证和部署的支持!详情请查看 [发行说明](https://github.com/ultralytics/yolov5/releases/v6.2) 或访问我们的 [YOLOv5 分类 Colab 笔记本](https://github.com/ultralytics/yolov5/blob/master/classify/tutorial.ipynb) 以快速入门。
- Classification Checkpoints + 分类网络模型
-我们使用 4xA100 实例在 ImageNet 上训练了 90 个时期的 YOLOv5-cls 分类模型,我们训练了 ResNet 和 EfficientNet 模型以及相同的默认训练设置以进行比较。我们将所有模型导出到 ONNX FP32 以进行 CPU 速度测试,并导出到 TensorRT FP16 以进行 GPU 速度测试。我们在 Google 上进行了所有速度测试[协作临](https://colab.research.google.com/signup)为了便于重现。 - -| 模型 | 尺寸
(像素) | acc
top1 | acc
烹饪 | 训练
90个纪元
4xA100(小时) | 速度
ONNX 中央处理器
(小姐) | 速度
TensorRT V100
(小姐) | 参数
(男) | 失败者
@224(乙) | -| ------------------------------------------------------------------------------------------- | --------------- | ---------------- | -------------- | ------------------------------ | ----------------------------- | -------------------------------- | -------------- | ------------------- | -| [YOLOv5n-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5n-cls.pt) | 224 | 64.6 | 85.4 | 7:59 | **3.3** | **0.5** | **2.5** | **0.5** | -| [YOLOv5s-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5s-cls.pt) | 224 | 71.5 | 90.2 | 8:09 | 6.6 | 0.6 | 5.4 | 1.4 | -| [YOLOv5m-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5m-cls.pt) | 224 | 75.9 | 92.9 | 10:06 | 15.5 | 0.9 | 12.9 | 3.9 | -| [YOLOv5l-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5l-cls.pt) | 224 | 78.0 | 94.0 | 11:56 | 26.9 | 1.4 | 26.5 | 8.5 | -| [YOLOv5x-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5x-cls.pt) | 224 | **79.0** | **94.4** | 15:04 | 54.3 | 1.8 | 48.1 | 15.9 | -| | | | | | | | | | -| [ResNet18](https://github.com/ultralytics/yolov5/releases/download/v6.2/resnet18.pt) | 224 | 70.3 | 89.5 | **6:47** | 11.2 | 0.5 | 11.7 | 3.7 | -| [Resnetzch](https://github.com/ultralytics/yolov5/releases/download/v6.2/resnet34.pt) | 224 | 73.9 | 91.8 | 8:33 | 20.6 | 0.9 | 21.8 | 7.4 | -| [ResNet50](https://github.com/ultralytics/yolov5/releases/download/v6.2/resnet50.pt) | 224 | 76.8 | 93.4 | 11:10 | 23.4 | 1.0 | 25.6 | 8.5 | -| [ResNet101](https://github.com/ultralytics/yolov5/releases/download/v6.2/resnet101.pt) | 224 | 78.5 | 94.3 | 17:10 | 42.1 | 1.9 | 44.5 | 15.9 | -| | | | | | | | | | -| [高效网络\_b0](https://github.com/ultralytics/yolov5/releases/download/v6.2/efficientnet_b0.pt) | 224 | 75.1 | 92.4 | 13:03 | 12.5 | 1.3 | 5.3 | 1.0 | -| [高效网络 b1](https://github.com/ultralytics/yolov5/releases/download/v6.2/efficientnet_b1.pt) | 224 | 76.4 | 93.2 | 17:04 | 14.9 | 1.6 | 7.8 | 1.5 | -| [我们将预测](https://github.com/ultralytics/yolov5/releases/download/v6.2/efficientnet_b2.pt) | 224 | 76.6 | 93.4 | 17:10 | 15.9 | 1.6 | 9.1 | 1.7 | -| [高效Netb3](https://github.com/ultralytics/yolov5/releases/download/v6.2/efficientnet_b3.pt) | 224 | 77.7 | 94.0 | 19:19 | 18.9 | 1.9 | 12.2 | 2.4 | +我们使用 4xA100 实例在 ImageNet 上训练了 90 个 epochs 得到 YOLOv5-cls 分类模型,我们训练了 ResNet 和 EfficientNet 模型以及相同的默认训练设置以进行比较。我们将所有模型导出到 ONNX FP32 以进行 CPU 速度测试,并导出到 TensorRT FP16 以进行 GPU 速度测试。为了便于重现,我们在 Google 上进行了所有速度测试 [Colab Pro](https://colab.research.google.com/signup) 。 + +| 模型 | 尺寸
(像素) | acc
top1 | acc
top5 | 训练时长
90 epochs
4xA100(小时) | 推理速度
ONNX CPU
(ms) | 推理速度
TensorRT V100
(ms) | 参数
(M) | FLOPs
@640 (B) | +| -------------------------------------------------------------------------------------------------- | ------------------- | ---------------- | ---------------- | ------------------------------------------ | --------------------------------- | -------------------------------------- | --------------- | -----------------------| +| [YOLOv5n-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5n-cls.pt) | 224 | 64.6 | 85.4 | 7:59 | **3.3** | **0.5** | **2.5** | **0.5** | +| [YOLOv5s-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5s-cls.pt) | 224 | 71.5 | 90.2 | 8:09 | 6.6 | 0.6 | 5.4 | 1.4 | +| [YOLOv5m-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5m-cls.pt) | 224 | 75.9 | 92.9 | 10:06 | 15.5 | 0.9 | 12.9 | 3.9 | +| [YOLOv5l-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5l-cls.pt) | 224 | 78.0 | 94.0 | 11:56 | 26.9 | 1.4 | 26.5 | 8.5 | +| [YOLOv5x-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5x-cls.pt) | 224 | **79.0** | **94.4** | 15:04 | 54.3 | 1.8 | 48.1 | 15.9 | +| | | | | | | | | | +| [ResNet18](https://github.com/ultralytics/yolov5/releases/download/v6.2/resnet18.pt) | 224 | 70.3 | 89.5 | **6:47** | 11.2 | 0.5 | 11.7 | 3.7 | +| [Resnetzch](https://github.com/ultralytics/yolov5/releases/download/v6.2/resnet34.pt) | 224 | 73.9 | 91.8 | 8:33 | 20.6 | 0.9 | 21.8 | 7.4 | +| [ResNet50](https://github.com/ultralytics/yolov5/releases/download/v6.2/resnet50.pt) | 224 | 76.8 | 93.4 | 11:10 | 23.4 | 1.0 | 25.6 | 8.5 | +| [ResNet101](https://github.com/ultralytics/yolov5/releases/download/v6.2/resnet101.pt) | 224 | 78.5 | 94.3 | 17:10 | 42.1 | 1.9 | 44.5 | 15.9 | +| | | | | | | | | | +| [EfficientNet_b0](https://github.com/ultralytics/yolov5/releases/download/v6.2/efficientnet_b0.pt) | 224 | 75.1 | 92.4 | 13:03 | 12.5 | 1.3 | 5.3 | 1.0 | +| [EfficientNet_b1](https://github.com/ultralytics/yolov5/releases/download/v6.2/efficientnet_b1.pt) | 224 | 76.4 | 93.2 | 17:04 | 14.9 | 1.6 | 7.8 | 1.5 | +| [EfficientNet_b2](https://github.com/ultralytics/yolov5/releases/download/v6.2/efficientnet_b2.pt) | 224 | 76.6 | 93.4 | 17:10 | 15.9 | 1.6 | 9.1 | 1.7 | +| [EfficientNet_b3](https://github.com/ultralytics/yolov5/releases/download/v6.2/efficientnet_b3.pt) | 224 | 77.7 | 94.0 | 19:19 | 18.9 | 1.9 | 12.2 | 2.4 |
- Table Notes (click to expand) + Table Notes (点击以展开) -- 使用 SGD 优化器将所有检查点训练到 90 个时期`lr0=0.001`和`weight_decay=5e-5`在图像大小 224 和所有默认设置。
运行记录到[HTTPS://玩豆瓣.爱/Glenn-就ocher/yo lo V5-classifier-V6-2](https://wandb.ai/glenn-jocher/YOLOv5-Classifier-v6-2) -- **准确性**值适用于单模型单尺度[ImageNet-1k](https://www.image-net.org/index.php)数据集。
重现者`python classify/val.py --data ../datasets/imagenet --img 224` -- **速度**使用谷歌平均超过 100 个推理图像[协作临](https://colab.research.google.com/signup)V100 高 RAM 实例。
重现者`python classify/val.py --data ../datasets/imagenet --img 224 --batch 1` -- **出口**到 FP32 的 ONNX 和 FP16 的 TensorRT 完成`export.py`.
重现者`python export.py --weights yolov5s-cls.pt --include engine onnx --imgsz 224` +- 所有模型都使用 SGD 优化器训练 90 个 epochs,都使用 `lr0=0.001` 和 `weight_decay=5e-5` 参数, 图像大小为 224 ,且都使用默认设置。
训练 log 可以查看 https://wandb.ai/glenn-jocher/YOLOv5-Classifier-v6-2 +- **准确性**都在单模型单尺度上计算,数据集使用 [ImageNet-1k](https://www.image-net.org/index.php) 。
复现命令 `python classify/val.py --data ../datasets/imagenet --img 224` +- **推理速度**是使用 100 个推理图像进行平均得到,测试环境使用谷歌 [Colab Pro](https://colab.research.google.com/signup) V100 高 RAM 实例。
复现命令 `python classify/val.py --data ../datasets/imagenet --img 224 --batch 1` +- **模型导出**到 FP32 的 ONNX 和 FP16 的 TensorRT 使用 `export.py` 。
复现命令 `python export.py --weights yolov5s-cls.pt --include engine onnx --imgsz 224`
- Classification Usage Examples  Open In Colab + 分类训练示例  Open In Colab -### 火车 +### 训练 -YOLOv5 分类训练支持自动下载 MNIST、Fashion-MNIST、CIFAR10、CIFAR100、Imagenette、Imagewoof 和 ImageNet 数据集`--data`争论。开始使用 MNIST 进行训练`--data mnist`. +YOLOv5 分类训练支持自动下载 MNIST、Fashion-MNIST、CIFAR10、CIFAR100、Imagenette、Imagewoof 和 ImageNet 数据集,命令中使用 `--data` 即可。 MNIST 示例 `--data mnist` 。 ```bash -# Single-GPU +# 单 GPU python classify/train.py --model yolov5s-cls.pt --data cifar100 --epochs 5 --img 224 --batch 128 -# Multi-GPU DDP +# 多 GPU, DDP 模式 python -m torch.distributed.run --nproc_per_node 4 --master_port 1 classify/train.py --model yolov5s-cls.pt --data imagenet --epochs 5 --img 224 --device 0,1,2,3 ``` -### 瓦尔 +### 验证 在 ImageNet-1k 数据集上验证 YOLOv5m-cls 的准确性: @@ -395,7 +395,7 @@ python classify/predict.py --weights yolov5s-cls.pt --data data/images/bus.jpg model = torch.hub.load('ultralytics/yolov5', 'custom', 'yolov5s-cls.pt') # load from PyTorch Hub ``` -### 出口 +### 模型导出 将一组经过训练的 YOLOv5s-cls、ResNet 和 EfficientNet 模型导出到 ONNX 和 TensorRT: @@ -407,7 +407,7 @@ python export.py --weights yolov5s-cls.pt resnet50.pt efficientnet_b0.pt --inclu ##
环境
-在几秒钟内开始使用我们经过验证的环境。单击下面的每个图标了解详细信息。 +使用下面我们经过验证的环境,在几秒钟内开始使用 YOLOv5 。单击下面的图标了解详细信息。 -##
应用程序
+##
APP
-在您的 iOS 或 Android 设备上运行 YOLOv5 模型[Ultralytics 应用程序](https://ultralytics.com/app_install)! +通过下载 [Ultralytics APP](https://ultralytics.com/app_install) ,以在您的 iOS 或 Android 设备上运行 YOLOv5 模型! Ultralytics mobile app ##
贡献
-我们喜欢您的意见!我们希望尽可能简单和透明地为 YOLOv5 做出贡献。请看我们的[投稿指南](CONTRIBUTING.md)开始,并填写[YOLOv5调查](https://ultralytics.com/survey?utm_source=github&utm_medium=social&utm_campaign=Survey)向我们发送您的体验反馈。感谢我们所有的贡献者! +我们喜欢您的意见或建议!我们希望尽可能简单和透明地为 YOLOv5 做出贡献。请看我们的 [投稿指南](CONTRIBUTING.md),并填写 [YOLOv5调查](https://ultralytics.com/survey?utm_source=github&utm_medium=social&utm_campaign=Survey) 向我们发送您的体验反馈。感谢我们所有的贡献者!
-##
执照
+##
License
-YOLOv5 在两种不同的许可下可用: +YOLOv5 在两种不同的 License 下可用: -- **GPL-3.0 许可证**: 看[执照](https://github.com/ultralytics/yolov5/blob/master/LICENSE)文件的详细信息。 -- **企业执照**:在没有 GPL-3.0 开源要求的情况下为商业产品开发提供更大的灵活性。典型用例是将 Ultralytics 软件和 AI 模型嵌入到商业产品和应用程序中。在以下位置申请企业许可证[Ultralytics 许可](https://ultralytics.com/license). +- **GPL-3.0 License**: 查看 [License](https://github.com/ultralytics/yolov5/blob/master/LICENSE) 文件的详细信息。 +- **企业License**:在没有 GPL-3.0 开源要求的情况下为商业产品开发提供更大的灵活性。典型用例是将 Ultralytics 软件和 AI 模型嵌入到商业产品和应用程序中。在以下位置申请企业许可证 [Ultralytics 许可](https://ultralytics.com/license) 。 -##
接触
+##
联系我们
-对于 YOLOv5 错误和功能请求,请访问[GitHub 问题](https://github.com/ultralytics/yolov5/issues).如需专业支持,请[联系我们](https://ultralytics.com/contact). +若发现 YOLOv5 的 bug 或有功能需求,请访问 [GitHub 问题](https://github.com/ultralytics/yolov5/issues) 。如需专业支持,请 [联系我们](https://ultralytics.com/contact) 。
From 1ae91940abe9ca3e064784bb18c12271ab3157b4 Mon Sep 17 00:00:00 2001 From: nerdyespresso <106761627+nerdyespresso@users.noreply.github.com> Date: Thu, 15 Dec 2022 07:56:42 -0500 Subject: [PATCH 1612/1976] Update Comet hyperlinks (#10500) * Update README.md Signed-off-by: nerdyespresso <106761627+nerdyespresso@users.noreply.github.com> * Update README.md Signed-off-by: nerdyespresso <106761627+nerdyespresso@users.noreply.github.com> * Update README.md Signed-off-by: nerdyespresso <106761627+nerdyespresso@users.noreply.github.com> * Update README.md Signed-off-by: nerdyespresso <106761627+nerdyespresso@users.noreply.github.com> * Update tutorial.ipynb Signed-off-by: Glenn Jocher * Update tutorial.ipynb Signed-off-by: Glenn Jocher * Update tutorial.ipynb Signed-off-by: Glenn Jocher * Update README.md Signed-off-by: Glenn Jocher * Update README.md Signed-off-by: Glenn Jocher Signed-off-by: nerdyespresso <106761627+nerdyespresso@users.noreply.github.com> Signed-off-by: Glenn Jocher Co-authored-by: Glenn Jocher --- README.md | 2 +- classify/tutorial.ipynb | 4 ++-- segment/tutorial.ipynb | 4 ++-- tutorial.ipynb | 2 +- utils/loggers/comet/README.md | 12 ++++++------ 5 files changed, 12 insertions(+), 12 deletions(-) diff --git a/README.md b/README.md index 21bdc83f349e..56015b239fc9 100644 --- a/README.md +++ b/README.md @@ -264,7 +264,7 @@ python train.py --data coco.yaml --epochs 300 --weights '' --cfg yolov5n.yaml - |Roboflow|ClearML ⭐ NEW|Comet ⭐ NEW|Deci ⭐ NEW| |:-:|:-:|:-:|:-:| -|Label and export your custom datasets directly to YOLOv5 for training with [Roboflow](https://roboflow.com/?ref=ultralytics)|Automatically track, visualize and even remotely train YOLOv5 using [ClearML](https://cutt.ly/yolov5-readme-clearml) (open-source!)|Free forever, [Comet](https://bit.ly/yolov5-readme-comet) lets you save YOLOv5 models, resume training, and interactively visualise and debug predictions|Automatically compile and quantize YOLOv5 for better inference performance in one click at [Deci](https://bit.ly/yolov5-deci-platform)| +|Label and export your custom datasets directly to YOLOv5 for training with [Roboflow](https://roboflow.com/?ref=ultralytics)|Automatically track, visualize and even remotely train YOLOv5 using [ClearML](https://cutt.ly/yolov5-readme-clearml) (open-source!)|Free forever, [Comet](https://bit.ly/yolov5-readme-comet2) lets you save YOLOv5 models, resume training, and interactively visualise and debug predictions|Automatically compile and quantize YOLOv5 for better inference performance in one click at [Deci](https://bit.ly/yolov5-deci-platform)| ##
Ultralytics HUB
diff --git a/classify/tutorial.ipynb b/classify/tutorial.ipynb index c6f5d0d88a2d..94bafba00204 100644 --- a/classify/tutorial.ipynb +++ b/classify/tutorial.ipynb @@ -1341,7 +1341,7 @@ }, "source": [ "## Comet Logging and Visualization 🌟 NEW\n", - "[Comet](https://bit.ly/yolov5-readme-comet) is now fully integrated with YOLOv5. Track and visualize model metrics in real time, save your hyperparameters, datasets, and model checkpoints, and visualize your model predictions with [Comet Custom Panels](https://bit.ly/yolov5-colab-comet-panels)! Comet makes sure you never lose track of your work and makes it easy to share results and collaborate across teams of all sizes! \n", + "[Comet](https://bit.ly/yolov5-readme-comet2) is now fully integrated with YOLOv5. Track and visualize model metrics in real time, save your hyperparameters, datasets, and model checkpoints, and visualize your model predictions with [Comet Custom Panels](https://bit.ly/yolov5-colab-comet-panels)! Comet makes sure you never lose track of your work and makes it easy to share results and collaborate across teams of all sizes! \n", "\n", "Getting started is easy:\n", "```shell\n", @@ -1476,4 +1476,4 @@ }, "nbformat": 4, "nbformat_minor": 0 -} \ No newline at end of file +} diff --git a/segment/tutorial.ipynb b/segment/tutorial.ipynb index 09ca963d4b98..e1179ffc1cc6 100644 --- a/segment/tutorial.ipynb +++ b/segment/tutorial.ipynb @@ -454,7 +454,7 @@ }, "source": [ "## Comet Logging and Visualization 🌟 NEW\n", - "[Comet](https://bit.ly/yolov5-readme-comet) is now fully integrated with YOLOv5. Track and visualize model metrics in real time, save your hyperparameters, datasets, and model checkpoints, and visualize your model predictions with [Comet Custom Panels](https://bit.ly/yolov5-colab-comet-panels)! Comet makes sure you never lose track of your work and makes it easy to share results and collaborate across teams of all sizes! \n", + "[Comet](https://bit.ly/yolov5-readme-comet2) is now fully integrated with YOLOv5. Track and visualize model metrics in real time, save your hyperparameters, datasets, and model checkpoints, and visualize your model predictions with [Comet Custom Panels](https://bit.ly/yolov5-colab-comet-panels)! Comet makes sure you never lose track of your work and makes it easy to share results and collaborate across teams of all sizes! \n", "\n", "Getting started is easy:\n", "```shell\n", @@ -590,4 +590,4 @@ }, "nbformat": 4, "nbformat_minor": 0 -} \ No newline at end of file +} diff --git a/tutorial.ipynb b/tutorial.ipynb index 6ab0a33366a5..cebcee3dfd24 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -860,7 +860,7 @@ "cell_type": "markdown", "source": [ "## Comet Logging and Visualization 🌟 NEW\n", - "[Comet](https://bit.ly/yolov5-readme-comet) is now fully integrated with YOLOv5. Track and visualize model metrics in real time, save your hyperparameters, datasets, and model checkpoints, and visualize your model predictions with [Comet Custom Panels](https://bit.ly/yolov5-colab-comet-panels)! Comet makes sure you never lose track of your work and makes it easy to share results and collaborate across teams of all sizes! \n", + "[Comet](https://bit.ly/yolov5-readme-comet2) is now fully integrated with YOLOv5. Track and visualize model metrics in real time, save your hyperparameters, datasets, and model checkpoints, and visualize your model predictions with [Comet Custom Panels](https://bit.ly/yolov5-colab-comet-panels)! Comet makes sure you never lose track of your work and makes it easy to share results and collaborate across teams of all sizes! \n", "\n", "Getting started is easy:\n", "```shell\n", diff --git a/utils/loggers/comet/README.md b/utils/loggers/comet/README.md index 8f206cd9830e..8a361e2b211d 100644 --- a/utils/loggers/comet/README.md +++ b/utils/loggers/comet/README.md @@ -2,13 +2,13 @@ # YOLOv5 with Comet -This guide will cover how to use YOLOv5 with [Comet](https://bit.ly/yolov5-readme-comet) +This guide will cover how to use YOLOv5 with [Comet](https://bit.ly/yolov5-readme-comet2) # About Comet Comet builds tools that help data scientists, engineers, and team leaders accelerate and optimize machine learning and deep learning models. -Track and visualize model metrics in real time, save your hyperparameters, datasets, and model checkpoints, and visualize your model predictions with [Comet Custom Panels](https://bit.ly/yolov5-colab-comet-panels)! +Track and visualize model metrics in real time, save your hyperparameters, datasets, and model checkpoints, and visualize your model predictions with [Comet Custom Panels](https://www.comet.com/docs/v2/guides/comet-dashboard/code-panels/about-panels/?utm_source=yolov5&utm_medium=partner&utm_campaign=partner_yolov5_2022&utm_content=github)! Comet makes sure you never lose track of your work and makes it easy to share results and collaborate across teams of all sizes! # Getting Started @@ -54,7 +54,7 @@ That's it! Comet will automatically log your hyperparameters, command line argum yolo-ui # Try out an Example! -Check out an example of a [completed run here](https://www.comet.com/examples/comet-example-yolov5/a0e29e0e9b984e4a822db2a62d0cb357?experiment-tab=chart&showOutliers=true&smoothing=0&transformY=smoothing&xAxis=step&ref=yolov5&utm_source=yolov5&utm_medium=affilliate&utm_campaign=yolov5_comet_integration) +Check out an example of a [completed run here](https://www.comet.com/examples/comet-example-yolov5/a0e29e0e9b984e4a822db2a62d0cb357?experiment-tab=chart&showOutliers=true&smoothing=0&transformY=smoothing&xAxis=step&utm_source=yolov5&utm_medium=partner&utm_campaign=partner_yolov5_2022&utm_content=github) Or better yet, try it out yourself in this Colab Notebook @@ -119,7 +119,7 @@ You can control the frequency of logged predictions and the associated images by **Note:** The YOLOv5 validation dataloader will default to a batch size of 32, so you will have to set the logging frequency accordingly. -Here is an [example project using the Panel](https://www.comet.com/examples/comet-example-yolov5?shareable=YcwMiJaZSXfcEXpGOHDD12vA1&ref=yolov5&utm_source=yolov5&utm_medium=affilliate&utm_campaign=yolov5_comet_integration) +Here is an [example project using the Panel](https://www.comet.com/examples/comet-example-yolov5?shareable=YcwMiJaZSXfcEXpGOHDD12vA1&utm_source=yolov5&utm_medium=partner&utm_campaign=partner_yolov5_2022&utm_content=github) ```shell @@ -161,7 +161,7 @@ env COMET_LOG_PER_CLASS_METRICS=true python train.py \ ## Uploading a Dataset to Comet Artifacts -If you would like to store your data using [Comet Artifacts](https://www.comet.com/docs/v2/guides/data-management/using-artifacts/#learn-more?ref=yolov5&utm_source=yolov5&utm_medium=affilliate&utm_campaign=yolov5_comet_integration), you can do so using the `upload_dataset` flag. +If you would like to store your data using [Comet Artifacts](https://www.comet.com/docs/v2/guides/data-management/using-artifacts/#learn-more?utm_source=yolov5&utm_medium=partner&utm_campaign=partner_yolov5_2022&utm_content=github), you can do so using the `upload_dataset` flag. The dataset be organized in the way described in the [YOLOv5 documentation](https://docs.ultralytics.com/tutorials/train-custom-datasets/#3-organize-directories). The dataset config `yaml` file must follow the same format as that of the `coco128.yaml` file. @@ -251,6 +251,6 @@ comet optimizer -j utils/loggers/comet/hpo.py \ ### Visualizing Results -Comet provides a number of ways to visualize the results of your sweep. Take a look at a [project with a completed sweep here](https://www.comet.com/examples/comet-example-yolov5/view/PrlArHGuuhDTKC1UuBmTtOSXD/panels?ref=yolov5&utm_source=yolov5&utm_medium=affilliate&utm_campaign=yolov5_comet_integration) +Comet provides a number of ways to visualize the results of your sweep. Take a look at a [project with a completed sweep here](https://www.comet.com/examples/comet-example-yolov5/view/PrlArHGuuhDTKC1UuBmTtOSXD/panels?utm_source=yolov5&utm_medium=partner&utm_campaign=partner_yolov5_2022&utm_content=github) hyperparameter-yolo From b564c1f3653a9b11038a80e348a34afbf59943be Mon Sep 17 00:00:00 2001 From: Yonghye Kwon Date: Sat, 17 Dec 2022 20:05:00 +0900 Subject: [PATCH 1613/1976] Check `conf_thres` and `iou_thres` prior to use (#10515) * Checks conf_thres and iou_thres at beign Why checks conf_thres after operation with it? Signed-off-by: Yonghye Kwon * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update general.py Signed-off-by: Yonghye Kwon Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Glenn Jocher --- utils/general.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/utils/general.py b/utils/general.py index e5a843c4a758..6145801ca47f 100644 --- a/utils/general.py +++ b/utils/general.py @@ -898,6 +898,9 @@ def non_max_suppression( list of detections, on (n,6) tensor per image [xyxy, conf, cls] """ + # Checks + assert 0 <= conf_thres <= 1, f'Invalid Confidence threshold {conf_thres}, valid values are between 0.0 and 1.0' + assert 0 <= iou_thres <= 1, f'Invalid IoU {iou_thres}, valid values are between 0.0 and 1.0' if isinstance(prediction, (list, tuple)): # YOLOv5 model in validation model, output = (inference_out, loss_out) prediction = prediction[0] # select only inference output @@ -909,10 +912,6 @@ def non_max_suppression( nc = prediction.shape[2] - nm - 5 # number of classes xc = prediction[..., 4] > conf_thres # candidates - # Checks - assert 0 <= conf_thres <= 1, f'Invalid Confidence threshold {conf_thres}, valid values are between 0.0 and 1.0' - assert 0 <= iou_thres <= 1, f'Invalid IoU {iou_thres}, valid values are between 0.0 and 1.0' - # Settings # min_wh = 2 # (pixels) minimum box width and height max_wh = 7680 # (pixels) maximum box width and height From 8d65f9d8ce274f78949ab88b7359580cc8cabacc Mon Sep 17 00:00:00 2001 From: Yonghye Kwon Date: Sat, 17 Dec 2022 20:10:26 +0900 Subject: [PATCH 1614/1976] Support extensive shape for functions related to bounding box localization (#10516) * support extensive shape for functions related to bounding box localization Signed-off-by: Yonghye Kwon * merge exp branch updates Signed-off-by: Yonghye Kwon Co-authored-by: Glenn Jocher --- utils/general.py | 54 ++++++++++++++++++++++++------------------------ 1 file changed, 27 insertions(+), 27 deletions(-) diff --git a/utils/general.py b/utils/general.py index 6145801ca47f..744abb439ed1 100644 --- a/utils/general.py +++ b/utils/general.py @@ -750,30 +750,30 @@ def coco80_to_coco91_class(): # converts 80-index (val2014) to 91-index (paper) def xyxy2xywh(x): # Convert nx4 boxes from [x1, y1, x2, y2] to [x, y, w, h] where xy1=top-left, xy2=bottom-right y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) - y[:, 0] = (x[:, 0] + x[:, 2]) / 2 # x center - y[:, 1] = (x[:, 1] + x[:, 3]) / 2 # y center - y[:, 2] = x[:, 2] - x[:, 0] # width - y[:, 3] = x[:, 3] - x[:, 1] # height + y[..., 0] = (x[..., 0] + x[..., 2]) / 2 # x center + y[..., 1] = (x[..., 1] + x[..., 3]) / 2 # y center + y[..., 2] = x[..., 2] - x[..., 0] # width + y[..., 3] = x[..., 3] - x[..., 1] # height return y def xywh2xyxy(x): # Convert nx4 boxes from [x, y, w, h] to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) - y[:, 0] = x[:, 0] - x[:, 2] / 2 # top left x - y[:, 1] = x[:, 1] - x[:, 3] / 2 # top left y - y[:, 2] = x[:, 0] + x[:, 2] / 2 # bottom right x - y[:, 3] = x[:, 1] + x[:, 3] / 2 # bottom right y + y[..., 0] = x[..., 0] - x[..., 2] / 2 # top left x + y[..., 1] = x[..., 1] - x[..., 3] / 2 # top left y + y[..., 2] = x[..., 0] + x[..., 2] / 2 # bottom right x + y[..., 3] = x[..., 1] + x[..., 3] / 2 # bottom right y return y def xywhn2xyxy(x, w=640, h=640, padw=0, padh=0): # Convert nx4 boxes from [x, y, w, h] normalized to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) - y[:, 0] = w * (x[:, 0] - x[:, 2] / 2) + padw # top left x - y[:, 1] = h * (x[:, 1] - x[:, 3] / 2) + padh # top left y - y[:, 2] = w * (x[:, 0] + x[:, 2] / 2) + padw # bottom right x - y[:, 3] = h * (x[:, 1] + x[:, 3] / 2) + padh # bottom right y + y[..., 0] = w * (x[..., 0] - x[..., 2] / 2) + padw # top left x + y[..., 1] = h * (x[..., 1] - x[..., 3] / 2) + padh # top left y + y[..., 2] = w * (x[..., 0] + x[..., 2] / 2) + padw # bottom right x + y[..., 3] = h * (x[..., 1] + x[..., 3] / 2) + padh # bottom right y return y @@ -782,18 +782,18 @@ def xyxy2xywhn(x, w=640, h=640, clip=False, eps=0.0): if clip: clip_boxes(x, (h - eps, w - eps)) # warning: inplace clip y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) - y[:, 0] = ((x[:, 0] + x[:, 2]) / 2) / w # x center - y[:, 1] = ((x[:, 1] + x[:, 3]) / 2) / h # y center - y[:, 2] = (x[:, 2] - x[:, 0]) / w # width - y[:, 3] = (x[:, 3] - x[:, 1]) / h # height + y[..., 0] = ((x[..., 0] + x[..., 2]) / 2) / w # x center + y[..., 1] = ((x[..., 1] + x[..., 3]) / 2) / h # y center + y[..., 2] = (x[..., 2] - x[..., 0]) / w # width + y[..., 3] = (x[..., 3] - x[..., 1]) / h # height return y def xyn2xy(x, w=640, h=640, padw=0, padh=0): # Convert normalized segments into pixel segments, shape (n,2) y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) - y[:, 0] = w * x[:, 0] + padw # top left x - y[:, 1] = h * x[:, 1] + padh # top left y + y[..., 0] = w * x[..., 0] + padw # top left x + y[..., 1] = h * x[..., 1] + padh # top left y return y @@ -833,9 +833,9 @@ def scale_boxes(img1_shape, boxes, img0_shape, ratio_pad=None): gain = ratio_pad[0][0] pad = ratio_pad[1] - boxes[:, [0, 2]] -= pad[0] # x padding - boxes[:, [1, 3]] -= pad[1] # y padding - boxes[:, :4] /= gain + boxes[..., [0, 2]] -= pad[0] # x padding + boxes[..., [1, 3]] -= pad[1] # y padding + boxes[..., :4] /= gain clip_boxes(boxes, img0_shape) return boxes @@ -862,13 +862,13 @@ def scale_segments(img1_shape, segments, img0_shape, ratio_pad=None, normalize=F def clip_boxes(boxes, shape): # Clip boxes (xyxy) to image shape (height, width) if isinstance(boxes, torch.Tensor): # faster individually - boxes[:, 0].clamp_(0, shape[1]) # x1 - boxes[:, 1].clamp_(0, shape[0]) # y1 - boxes[:, 2].clamp_(0, shape[1]) # x2 - boxes[:, 3].clamp_(0, shape[0]) # y2 + boxes[..., 0].clamp_(0, shape[1]) # x1 + boxes[..., 1].clamp_(0, shape[0]) # y1 + boxes[..., 2].clamp_(0, shape[1]) # x2 + boxes[..., 3].clamp_(0, shape[0]) # y2 else: # np.array (faster grouped) - boxes[:, [0, 2]] = boxes[:, [0, 2]].clip(0, shape[1]) # x1, x2 - boxes[:, [1, 3]] = boxes[:, [1, 3]].clip(0, shape[0]) # y1, y2 + boxes[..., [0, 2]] = boxes[..., [0, 2]].clip(0, shape[1]) # x1, x2 + boxes[..., [1, 3]] = boxes[..., [1, 3]].clip(0, shape[0]) # y1, y2 def clip_segments(segments, shape): From b2f94e8c356083bb85d76a60ea2b54d5ad9fbe36 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 17 Dec 2022 12:26:57 +0100 Subject: [PATCH 1615/1976] Update to ONNX opset 17 (#10522) Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- export.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/export.py b/export.py index 928992903b0b..baf86f1d9297 100644 --- a/export.py +++ b/export.py @@ -624,7 +624,7 @@ def parse_opt(): parser.add_argument('--int8', action='store_true', help='CoreML/TF INT8 quantization') parser.add_argument('--dynamic', action='store_true', help='ONNX/TF/TensorRT: dynamic axes') parser.add_argument('--simplify', action='store_true', help='ONNX: simplify model') - parser.add_argument('--opset', type=int, default=12, help='ONNX: opset version') + parser.add_argument('--opset', type=int, default=17, help='ONNX: opset version') parser.add_argument('--verbose', action='store_true', help='TensorRT: verbose log') parser.add_argument('--workspace', type=int, default=4, help='TensorRT: workspace size (GB)') parser.add_argument('--nms', action='store_true', help='TF: add NMS to model') From 43623729cc634d690cece1f1d015e4d59e0b9d98 Mon Sep 17 00:00:00 2001 From: Wang Xin Date: Sat, 17 Dec 2022 19:55:08 +0800 Subject: [PATCH 1616/1976] Update train.py (#10485) Setting `master_port` to 1 may cause `Permission denied` due to failure to bind the port. So it is better to set it to a port greater than 1024. Signed-off-by: Wang Xin Signed-off-by: Wang Xin Co-authored-by: Ayush Chaurasia --- classify/train.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/classify/train.py b/classify/train.py index a50845a4f781..4767be77bd61 100644 --- a/classify/train.py +++ b/classify/train.py @@ -6,7 +6,7 @@ $ python classify/train.py --model yolov5s-cls.pt --data imagenette160 --epochs 5 --img 224 Usage - Multi-GPU DDP training: - $ python -m torch.distributed.run --nproc_per_node 4 --master_port 1 classify/train.py --model yolov5s-cls.pt --data imagenet --epochs 5 --img 224 --device 0,1,2,3 + $ python -m torch.distributed.run --nproc_per_node 4 --master_port 2022 classify/train.py --model yolov5s-cls.pt --data imagenet --epochs 5 --img 224 --device 0,1,2,3 Datasets: --data mnist, fashion-mnist, cifar10, cifar100, imagenette, imagewoof, imagenet, or 'path/to/data' YOLOv5-cls models: --model yolov5n-cls.pt, yolov5s-cls.pt, yolov5m-cls.pt, yolov5l-cls.pt, yolov5x-cls.pt From 2c35c1b318ecd4856275039220c052a976d2cfe2 Mon Sep 17 00:00:00 2001 From: Yonghye Kwon Date: Sun, 18 Dec 2022 21:03:01 +0900 Subject: [PATCH 1617/1976] Limit detections without explicit if condition (#10502) * limit detections without explicit if condition Signed-off-by: Yonghye Kwon * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * cleanup indexing code for limit detections Signed-off-by: Yonghye Kwon Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Glenn Jocher --- utils/general.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/utils/general.py b/utils/general.py index 744abb439ed1..70b6f6446f23 100644 --- a/utils/general.py +++ b/utils/general.py @@ -978,8 +978,7 @@ def non_max_suppression( c = x[:, 5:6] * (0 if agnostic else max_wh) # classes boxes, scores = x[:, :4] + c, x[:, 4] # boxes (offset by class), scores i = torchvision.ops.nms(boxes, scores, iou_thres) # NMS - if i.shape[0] > max_det: # limit detections - i = i[:max_det] + i = i[:max_det] # limit detections if merge and (1 < n < 3E3): # Merge NMS (boxes merged using weighted mean) # update boxes as boxes(i,4) = weights(i,n) * boxes(n,4) iou = box_iou(boxes[i], boxes) > iou_thres # iou matrix From b2a0f1cdc579bd81b3c4543752abaa4a90a53c8b Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 18 Dec 2022 20:06:01 +0100 Subject: [PATCH 1618/1976] Update `onnx>=1.12.0` (#10526) --- export.py | 2 +- requirements.txt | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/export.py b/export.py index baf86f1d9297..7910178b2338 100644 --- a/export.py +++ b/export.py @@ -132,7 +132,7 @@ def export_torchscript(model, im, file, optimize, prefix=colorstr('TorchScript:' @try_export def export_onnx(model, im, file, opset, dynamic, simplify, prefix=colorstr('ONNX:')): # YOLOv5 ONNX export - check_requirements('onnx') + check_requirements('onnx>=1.12.0') import onnx LOGGER.info(f'\n{prefix} starting export with onnx {onnx.__version__}...') diff --git a/requirements.txt b/requirements.txt index 85eb839df8a0..4a8649c696a8 100644 --- a/requirements.txt +++ b/requirements.txt @@ -29,7 +29,7 @@ seaborn>=0.11.0 # Export ---------------------------------------------------------------------- # coremltools>=6.0 # CoreML export -# onnx>=1.9.0 # ONNX export +# onnx>=1.12.0 # ONNX export # onnx-simplifier>=0.4.1 # ONNX simplifier # nvidia-pyindex # TensorRT export # nvidia-tensorrt # TensorRT export From 10e93d295fed1459666409751b4a897521c31b90 Mon Sep 17 00:00:00 2001 From: Yonghye Kwon Date: Mon, 19 Dec 2022 18:27:34 +0900 Subject: [PATCH 1619/1976] Set a seed of generator with an option for more randomness when training several models with different seeds (#10486) * set seed with parameter Signed-off-by: Yonghye Kwon * make seed to be a large number * set seed with a parameter * set a seed of dataloader with opt for more randomness Signed-off-by: Yonghye Kwon Co-authored-by: Glenn Jocher --- train.py | 3 ++- utils/dataloaders.py | 5 +++-- utils/segment/dataloaders.py | 5 +++-- 3 files changed, 8 insertions(+), 5 deletions(-) diff --git a/train.py b/train.py index 8b5446e58f2d..5d75f22b6335 100644 --- a/train.py +++ b/train.py @@ -198,7 +198,8 @@ def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictio image_weights=opt.image_weights, quad=opt.quad, prefix=colorstr('train: '), - shuffle=True) + shuffle=True, + seed=opt.seed) labels = np.concatenate(dataset.labels, 0) mlc = int(labels[:, 0].max()) # max label class assert mlc < nc, f'Label class {mlc} exceeds nc={nc} in {data}. Possible class labels are 0-{nc - 1}' diff --git a/utils/dataloaders.py b/utils/dataloaders.py index 6d2b27ea5e60..302cc3300d35 100644 --- a/utils/dataloaders.py +++ b/utils/dataloaders.py @@ -115,7 +115,8 @@ def create_dataloader(path, image_weights=False, quad=False, prefix='', - shuffle=False): + shuffle=False, + seed=0): if rect and shuffle: LOGGER.warning('WARNING ⚠️ --rect is incompatible with DataLoader shuffle, setting shuffle=False') shuffle = False @@ -140,7 +141,7 @@ def create_dataloader(path, sampler = None if rank == -1 else distributed.DistributedSampler(dataset, shuffle=shuffle) loader = DataLoader if image_weights else InfiniteDataLoader # only DataLoader allows for attribute updates generator = torch.Generator() - generator.manual_seed(6148914691236517205 + RANK) + generator.manual_seed(6148914691236517205 + seed + RANK) return loader(dataset, batch_size=batch_size, shuffle=shuffle and sampler is None, diff --git a/utils/segment/dataloaders.py b/utils/segment/dataloaders.py index 9de6f0fbf903..d66b36115e3f 100644 --- a/utils/segment/dataloaders.py +++ b/utils/segment/dataloaders.py @@ -37,7 +37,8 @@ def create_dataloader(path, prefix='', shuffle=False, mask_downsample_ratio=1, - overlap_mask=False): + overlap_mask=False, + seed=0): if rect and shuffle: LOGGER.warning('WARNING ⚠️ --rect is incompatible with DataLoader shuffle, setting shuffle=False') shuffle = False @@ -64,7 +65,7 @@ def create_dataloader(path, sampler = None if rank == -1 else distributed.DistributedSampler(dataset, shuffle=shuffle) loader = DataLoader if image_weights else InfiniteDataLoader # only DataLoader allows for attribute updates generator = torch.Generator() - generator.manual_seed(6148914691236517205 + RANK) + generator.manual_seed(6148914691236517205 + seed + RANK) return loader( dataset, batch_size=batch_size, From 5545ff3545d886417b4eff12203d1af4d758cc10 Mon Sep 17 00:00:00 2001 From: Yonghye Kwon Date: Tue, 20 Dec 2022 01:19:14 +0900 Subject: [PATCH 1620/1976] Sort by confidence and remove excess boxes without explicit if (#10517) * sort by confidence and remove excess boxes without explicit if Signed-off-by: Yonghye Kwon * cleanup indexing boxes for remove excess boxes it is related to https://github.com/ultralytics/yolov5/pull/10502. Signed-off-by: Yonghye Kwon Co-authored-by: Glenn Jocher --- utils/general.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/utils/general.py b/utils/general.py index 70b6f6446f23..0bbcb6e7334c 100644 --- a/utils/general.py +++ b/utils/general.py @@ -969,10 +969,7 @@ def non_max_suppression( n = x.shape[0] # number of boxes if not n: # no boxes continue - elif n > max_nms: # excess boxes - x = x[x[:, 4].argsort(descending=True)[:max_nms]] # sort by confidence - else: - x = x[x[:, 4].argsort(descending=True)] # sort by confidence + x = x[x[:, 4].argsort(descending=True)[:max_nms]] # sort by confidence and remove excess boxes # Batched NMS c = x[:, 5:6] * (0 if agnostic else max_wh) # classes From f72f0fec980b35d7f9575d15b326f529b5a9ac0d Mon Sep 17 00:00:00 2001 From: Amir Pourmand Date: Tue, 20 Dec 2022 18:37:43 +0330 Subject: [PATCH 1621/1976] Add Albumentation Default hyperparameter file (#10529) * add albumentation hyps * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Rename hyp.noAugmentation.yaml to hyp.no-augmentation.yaml * Update hyp.no-augmentation.yaml Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Glenn Jocher --- data/hyps/hyp.no-augmentation.yaml | 35 ++++++++++++++++++++++++++++++ 1 file changed, 35 insertions(+) create mode 100644 data/hyps/hyp.no-augmentation.yaml diff --git a/data/hyps/hyp.no-augmentation.yaml b/data/hyps/hyp.no-augmentation.yaml new file mode 100644 index 000000000000..8fbd5b262afa --- /dev/null +++ b/data/hyps/hyp.no-augmentation.yaml @@ -0,0 +1,35 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# Hyperparameters when using Albumentations frameworks +# python train.py --hyp hyp.no-augmentation.yaml +# See https://github.com/ultralytics/yolov5/pull/3882 for YOLOv5 + Albumentations Usage examples + +lr0: 0.01 # initial learning rate (SGD=1E-2, Adam=1E-3) +lrf: 0.1 # final OneCycleLR learning rate (lr0 * lrf) +momentum: 0.937 # SGD momentum/Adam beta1 +weight_decay: 0.0005 # optimizer weight decay 5e-4 +warmup_epochs: 3.0 # warmup epochs (fractions ok) +warmup_momentum: 0.8 # warmup initial momentum +warmup_bias_lr: 0.1 # warmup initial bias lr +box: 0.05 # box loss gain +cls: 0.3 # cls loss gain +cls_pw: 1.0 # cls BCELoss positive_weight +obj: 0.7 # obj loss gain (scale with pixels) +obj_pw: 1.0 # obj BCELoss positive_weight +iou_t: 0.20 # IoU training threshold +anchor_t: 4.0 # anchor-multiple threshold +# anchors: 3 # anchors per output layer (0 to ignore) +# this parameters are all zero since we want to use albumentation framework +fl_gamma: 0.0 # focal loss gamma (efficientDet default gamma=1.5) +hsv_h: 0 # image HSV-Hue augmentation (fraction) +hsv_s: 00 # image HSV-Saturation augmentation (fraction) +hsv_v: 0 # image HSV-Value augmentation (fraction) +degrees: 0.0 # image rotation (+/- deg) +translate: 0 # image translation (+/- fraction) +scale: 0 # image scale (+/- gain) +shear: 0 # image shear (+/- deg) +perspective: 0.0 # image perspective (+/- fraction), range 0-0.001 +flipud: 0.0 # image flip up-down (probability) +fliplr: 0.0 # image flip left-right (probability) +mosaic: 0.0 # image mosaic (probability) +mixup: 0.0 # image mixup (probability) +copy_paste: 0.0 # segment copy-paste (probability) From 887d95296642b2fdee1cafa80c0c59618ca3c2e7 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 21 Dec 2022 02:17:19 +0100 Subject: [PATCH 1622/1976] Created using Colaboratory --- segment/tutorial.ipynb | 21 +++++++++++---------- 1 file changed, 11 insertions(+), 10 deletions(-) diff --git a/segment/tutorial.ipynb b/segment/tutorial.ipynb index e1179ffc1cc6..dc6599415480 100644 --- a/segment/tutorial.ipynb +++ b/segment/tutorial.ipynb @@ -36,7 +36,7 @@ }, { "cell_type": "code", - "execution_count": 1, + "execution_count": null, "metadata": { "colab": { "base_uri": "https://localhost:8080/" @@ -94,7 +94,7 @@ }, { "cell_type": "code", - "execution_count": 2, + "execution_count": null, "metadata": { "colab": { "base_uri": "https://localhost:8080/" @@ -149,7 +149,7 @@ }, { "cell_type": "code", - "execution_count": 3, + "execution_count": null, "metadata": { "colab": { "base_uri": "https://localhost:8080/" @@ -176,7 +176,7 @@ }, { "cell_type": "code", - "execution_count": 4, + "execution_count": null, "metadata": { "colab": { "base_uri": "https://localhost:8080/" @@ -264,7 +264,7 @@ }, { "cell_type": "code", - "execution_count": 5, + "execution_count": null, "metadata": { "colab": { "base_uri": "https://localhost:8080/" @@ -454,7 +454,8 @@ }, "source": [ "## Comet Logging and Visualization 🌟 NEW\n", - "[Comet](https://bit.ly/yolov5-readme-comet2) is now fully integrated with YOLOv5. Track and visualize model metrics in real time, save your hyperparameters, datasets, and model checkpoints, and visualize your model predictions with [Comet Custom Panels](https://bit.ly/yolov5-colab-comet-panels)! Comet makes sure you never lose track of your work and makes it easy to share results and collaborate across teams of all sizes! \n", + "\n", + "[Comet](https://www.comet.com/site/lp/yolov5-with-comet/?utm_source=yolov5&utm_medium=partner&utm_campaign=partner_yolov5_2022&utm_content=yolov5_colab) is now fully integrated with YOLOv5. Track and visualize model metrics in real time, save your hyperparameters, datasets, and model checkpoints, and visualize your model predictions with [Comet Custom Panels](https://www.comet.com/docs/v2/guides/comet-dashboard/code-panels/about-panels/?utm_source=yolov5&utm_medium=partner&utm_campaign=partner_yolov5_2022&utm_content=yolov5_colab)! Comet makes sure you never lose track of your work and makes it easy to share results and collaborate across teams of all sizes!\n", "\n", "Getting started is easy:\n", "```shell\n", @@ -462,11 +463,11 @@ "export COMET_API_KEY= # 2. paste API key\n", "python train.py --img 640 --epochs 3 --data coco128.yaml --weights yolov5s.pt # 3. train\n", "```\n", - "\n", - "To learn more about all of the supported Comet features for this integration, check out the [Comet Tutorial](https://github.com/ultralytics/yolov5/tree/master/utils/loggers/comet). If you'd like to learn more about Comet, head over to our [documentation](https://bit.ly/yolov5-colab-comet-docs). Get started by trying out the Comet Colab Notebook:\n", + "To learn more about all of the supported Comet features for this integration, check out the [Comet Tutorial](https://github.com/ultralytics/yolov5/tree/master/utils/loggers/comet). If you'd like to learn more about Comet, head over to our [documentation](https://www.comet.com/docs/v2/?utm_source=yolov5&utm_medium=partner&utm_campaign=partner_yolov5_2022&utm_content=yolov5_colab). Get started by trying out the Comet Colab Notebook:\n", "[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1RG0WOQyxlDlo5Km8GogJpIEJlg_5lyYO?usp=sharing)\n", "\n", - "\"yolo-ui\"" + "\n", + "\"Comet" ] }, { @@ -590,4 +591,4 @@ }, "nbformat": 4, "nbformat_minor": 0 -} +} \ No newline at end of file From c765b8c274c78676ae351f159953652152725fcc Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 21 Dec 2022 02:18:09 +0100 Subject: [PATCH 1623/1976] Created using Colaboratory --- classify/tutorial.ipynb | 21 +++++++++++---------- 1 file changed, 11 insertions(+), 10 deletions(-) diff --git a/classify/tutorial.ipynb b/classify/tutorial.ipynb index 94bafba00204..06af62a1b4c1 100644 --- a/classify/tutorial.ipynb +++ b/classify/tutorial.ipynb @@ -36,7 +36,7 @@ }, { "cell_type": "code", - "execution_count": 1, + "execution_count": null, "metadata": { "colab": { "base_uri": "https://localhost:8080/" @@ -94,7 +94,7 @@ }, { "cell_type": "code", - "execution_count": 2, + "execution_count": null, "metadata": { "colab": { "base_uri": "https://localhost:8080/" @@ -149,7 +149,7 @@ }, { "cell_type": "code", - "execution_count": 3, + "execution_count": null, "metadata": { "colab": { "base_uri": "https://localhost:8080/" @@ -183,7 +183,7 @@ }, { "cell_type": "code", - "execution_count": 4, + "execution_count": null, "metadata": { "colab": { "base_uri": "https://localhost:8080/" @@ -1269,7 +1269,7 @@ }, { "cell_type": "code", - "execution_count": 10, + "execution_count": null, "metadata": { "colab": { "base_uri": "https://localhost:8080/" @@ -1341,7 +1341,8 @@ }, "source": [ "## Comet Logging and Visualization 🌟 NEW\n", - "[Comet](https://bit.ly/yolov5-readme-comet2) is now fully integrated with YOLOv5. Track and visualize model metrics in real time, save your hyperparameters, datasets, and model checkpoints, and visualize your model predictions with [Comet Custom Panels](https://bit.ly/yolov5-colab-comet-panels)! Comet makes sure you never lose track of your work and makes it easy to share results and collaborate across teams of all sizes! \n", + "\n", + "[Comet](https://www.comet.com/site/lp/yolov5-with-comet/?utm_source=yolov5&utm_medium=partner&utm_campaign=partner_yolov5_2022&utm_content=yolov5_colab) is now fully integrated with YOLOv5. Track and visualize model metrics in real time, save your hyperparameters, datasets, and model checkpoints, and visualize your model predictions with [Comet Custom Panels](https://www.comet.com/docs/v2/guides/comet-dashboard/code-panels/about-panels/?utm_source=yolov5&utm_medium=partner&utm_campaign=partner_yolov5_2022&utm_content=yolov5_colab)! Comet makes sure you never lose track of your work and makes it easy to share results and collaborate across teams of all sizes!\n", "\n", "Getting started is easy:\n", "```shell\n", @@ -1349,11 +1350,11 @@ "export COMET_API_KEY= # 2. paste API key\n", "python train.py --img 640 --epochs 3 --data coco128.yaml --weights yolov5s.pt # 3. train\n", "```\n", - "\n", - "To learn more about all of the supported Comet features for this integration, check out the [Comet Tutorial](https://github.com/ultralytics/yolov5/tree/master/utils/loggers/comet). If you'd like to learn more about Comet, head over to our [documentation](https://bit.ly/yolov5-colab-comet-docs). Get started by trying out the Comet Colab Notebook:\n", + "To learn more about all of the supported Comet features for this integration, check out the [Comet Tutorial](https://github.com/ultralytics/yolov5/tree/master/utils/loggers/comet). If you'd like to learn more about Comet, head over to our [documentation](https://www.comet.com/docs/v2/?utm_source=yolov5&utm_medium=partner&utm_campaign=partner_yolov5_2022&utm_content=yolov5_colab). Get started by trying out the Comet Colab Notebook:\n", "[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1RG0WOQyxlDlo5Km8GogJpIEJlg_5lyYO?usp=sharing)\n", "\n", - "\"yolo-ui\"" + "\n", + "\"Comet" ] }, { @@ -1476,4 +1477,4 @@ }, "nbformat": 4, "nbformat_minor": 0 -} +} \ No newline at end of file From 96a71b17a276fa0a0b6fbdf68d579ce0603bfa2f Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 21 Dec 2022 02:19:45 +0100 Subject: [PATCH 1624/1976] Created using Colaboratory --- tutorial.ipynb | 21 +++++++++++---------- 1 file changed, 11 insertions(+), 10 deletions(-) diff --git a/tutorial.ipynb b/tutorial.ipynb index cebcee3dfd24..e83617e9dce7 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -412,7 +412,7 @@ "import utils\n", "display = utils.notebook_init() # checks" ], - "execution_count": 1, + "execution_count": null, "outputs": [ { "output_type": "stream", @@ -465,7 +465,7 @@ "!python detect.py --weights yolov5s.pt --img 640 --conf 0.25 --source data/images\n", "# display.Image(filename='runs/detect/exp/zidane.jpg', width=600)" ], - "execution_count": 2, + "execution_count": null, "outputs": [ { "output_type": "stream", @@ -535,7 +535,7 @@ "torch.hub.download_url_to_file('https://ultralytics.com/assets/coco2017val.zip', 'tmp.zip') # download (780M - 5000 images)\n", "!unzip -q tmp.zip -d ../datasets && rm tmp.zip # unzip" ], - "execution_count": 3, + "execution_count": null, "outputs": [ { "output_type": "display_data", @@ -566,7 +566,7 @@ "# Validate YOLOv5s on COCO val\n", "!python val.py --weights yolov5s.pt --data coco.yaml --img 640 --half" ], - "execution_count": 4, + "execution_count": null, "outputs": [ { "output_type": "stream", @@ -682,7 +682,7 @@ "# Train YOLOv5s on COCO128 for 3 epochs\n", "!python train.py --img 640 --batch 16 --epochs 3 --data coco128.yaml --weights yolov5s.pt --cache" ], - "execution_count": 5, + "execution_count": null, "outputs": [ { "output_type": "stream", @@ -860,7 +860,8 @@ "cell_type": "markdown", "source": [ "## Comet Logging and Visualization 🌟 NEW\n", - "[Comet](https://bit.ly/yolov5-readme-comet2) is now fully integrated with YOLOv5. Track and visualize model metrics in real time, save your hyperparameters, datasets, and model checkpoints, and visualize your model predictions with [Comet Custom Panels](https://bit.ly/yolov5-colab-comet-panels)! Comet makes sure you never lose track of your work and makes it easy to share results and collaborate across teams of all sizes! \n", + "\n", + "[Comet](https://www.comet.com/site/lp/yolov5-with-comet/?utm_source=yolov5&utm_medium=partner&utm_campaign=partner_yolov5_2022&utm_content=yolov5_colab) is now fully integrated with YOLOv5. Track and visualize model metrics in real time, save your hyperparameters, datasets, and model checkpoints, and visualize your model predictions with [Comet Custom Panels](https://www.comet.com/docs/v2/guides/comet-dashboard/code-panels/about-panels/?utm_source=yolov5&utm_medium=partner&utm_campaign=partner_yolov5_2022&utm_content=yolov5_colab)! Comet makes sure you never lose track of your work and makes it easy to share results and collaborate across teams of all sizes!\n", "\n", "Getting started is easy:\n", "```shell\n", @@ -868,11 +869,11 @@ "export COMET_API_KEY= # 2. paste API key\n", "python train.py --img 640 --epochs 3 --data coco128.yaml --weights yolov5s.pt # 3. train\n", "```\n", - "\n", - "To learn more about all of the supported Comet features for this integration, check out the [Comet Tutorial](https://github.com/ultralytics/yolov5/tree/master/utils/loggers/comet). If you'd like to learn more about Comet, head over to our [documentation](https://bit.ly/yolov5-colab-comet-docs). Get started by trying out the Comet Colab Notebook:\n", + "To learn more about all of the supported Comet features for this integration, check out the [Comet Tutorial](https://github.com/ultralytics/yolov5/tree/master/utils/loggers/comet). If you'd like to learn more about Comet, head over to our [documentation](https://www.comet.com/docs/v2/?utm_source=yolov5&utm_medium=partner&utm_campaign=partner_yolov5_2022&utm_content=yolov5_colab). Get started by trying out the Comet Colab Notebook:\n", "[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1RG0WOQyxlDlo5Km8GogJpIEJlg_5lyYO?usp=sharing)\n", "\n", - "\"yolo-ui\"" + "\n", + "\"Comet" ], "metadata": { "id": "nWOsI5wJR1o3" @@ -972,4 +973,4 @@ "outputs": [] } ] -} +} \ No newline at end of file From 2370a5513ebf67bd10b8d15fd6353e008380bc43 Mon Sep 17 00:00:00 2001 From: "Mr.Li" <1055271769@qq.com> Date: Thu, 22 Dec 2022 21:55:09 +0800 Subject: [PATCH 1625/1976] Bugfix: update dataloaders.py to fix "resize to 0" (#10558) * fix bug "resize to 0" * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Use math.ceil() for resize to enforce min floor of 1 pixel Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Glenn Jocher --- utils/dataloaders.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/dataloaders.py b/utils/dataloaders.py index 302cc3300d35..cbb3114e94d8 100644 --- a/utils/dataloaders.py +++ b/utils/dataloaders.py @@ -737,7 +737,7 @@ def load_image(self, i): r = self.img_size / max(h0, w0) # ratio if r != 1: # if sizes are not equal interp = cv2.INTER_LINEAR if (self.augment or r > 1) else cv2.INTER_AREA - im = cv2.resize(im, (int(w0 * r), int(h0 * r)), interpolation=interp) + im = cv2.resize(im, (math.ceil(w0 * r), math.ceil(h0 * r)), interpolation=interp) return im, (h0, w0), im.shape[:2] # im, hw_original, hw_resized return self.ims[i], self.im_hw0[i], self.im_hw[i] # im, hw_original, hw_resized From 5f8054c47c4938c6df6c3f1344de774f15a18404 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 24 Dec 2022 18:15:33 +0100 Subject: [PATCH 1626/1976] FROM nvcr.io/nvidia/pytorch:22.12-py3 (#10588) Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- utils/docker/Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/docker/Dockerfile b/utils/docker/Dockerfile index 1ecf4c64f75f..26b3439c1941 100644 --- a/utils/docker/Dockerfile +++ b/utils/docker/Dockerfile @@ -3,7 +3,7 @@ # Image is CUDA-optimized for YOLOv5 single/multi-GPU training and inference # Start FROM NVIDIA PyTorch image https://ngc.nvidia.com/catalog/containers/nvidia:pytorch -FROM nvcr.io/nvidia/pytorch:22.11-py3 +FROM nvcr.io/nvidia/pytorch:22.12-py3 RUN rm -rf /opt/pytorch # remove 1.2GB dir # Downloads to user config dir From 3c1afd9ab69f289f46f6ad291e7be3cae15f6c35 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 26 Dec 2022 14:54:43 +0100 Subject: [PATCH 1627/1976] ENV OMP_NUM_THREADS=1 (#10593) @Laughing-q @AyushExel setting to 1 due to recent issues Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- utils/docker/Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/docker/Dockerfile b/utils/docker/Dockerfile index 26b3439c1941..e0d4411118f0 100644 --- a/utils/docker/Dockerfile +++ b/utils/docker/Dockerfile @@ -29,7 +29,7 @@ WORKDIR /usr/src/app RUN git clone https://github.com/ultralytics/yolov5 /usr/src/app # Set environment variables -ENV OMP_NUM_THREADS=8 +ENV OMP_NUM_THREADS=1 # Usage Examples ------------------------------------------------------------------------------------------------------- From e72dc1fabaaa47273a825f35ba3a8884bcc2e16b Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 27 Dec 2022 14:32:21 +0100 Subject: [PATCH 1628/1976] Dockerfile uninstall torch nightly in favor of stable (#10604) @AyushExel @Laughing-q fix for Docker error ``` AttributeError: Can't get attribute '_rebuild_parameter_v2' on ``` Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- utils/docker/Dockerfile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/utils/docker/Dockerfile b/utils/docker/Dockerfile index e0d4411118f0..abc3da0ee502 100644 --- a/utils/docker/Dockerfile +++ b/utils/docker/Dockerfile @@ -12,10 +12,10 @@ ADD https://ultralytics.com/assets/Arial.ttf https://ultralytics.com/assets/Aria # Install linux packages RUN apt update && apt install --no-install-recommends -y zip htop screen libgl1-mesa-glx -# Install pip packages +# Install pip packages (uninstall torch nightly in favor of stable) COPY requirements.txt . RUN python -m pip install --upgrade pip wheel -RUN pip uninstall -y Pillow torchtext # torch torchvision +RUN pip uninstall -y Pillow torchtext torch torchvision RUN pip install --no-cache -r requirements.txt ultralytics albumentations comet gsutil notebook Pillow>=9.1.0 \ 'opencv-python<4.6.0.66' \ --extra-index-url https://download.pytorch.org/whl/cu113 From b1e997642cec09f55ce71af8af874b9e7463aeba Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 27 Dec 2022 14:36:32 +0100 Subject: [PATCH 1629/1976] Bump actions/stale from 6 to 7 (#10590) Bumps [actions/stale](https://github.com/actions/stale) from 6 to 7. - [Release notes](https://github.com/actions/stale/releases) - [Changelog](https://github.com/actions/stale/blob/main/CHANGELOG.md) - [Commits](https://github.com/actions/stale/compare/v6...v7) --- updated-dependencies: - dependency-name: actions/stale dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Glenn Jocher --- .github/workflows/stale.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml index 9067c343608b..b21e9c00e6c5 100644 --- a/.github/workflows/stale.yml +++ b/.github/workflows/stale.yml @@ -9,7 +9,7 @@ jobs: stale: runs-on: ubuntu-latest steps: - - uses: actions/stale@v6 + - uses: actions/stale@v7 with: repo-token: ${{ secrets.GITHUB_TOKEN }} stale-issue-message: | From 8ca182613499c323a411f559b7b5ea072122c897 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 27 Dec 2022 15:41:03 +0100 Subject: [PATCH 1630/1976] Update `pycocotools>=2.0.6` (#10605) * Update `pycocotools>=2.0.6` Signed-off-by: Glenn Jocher * Update val.py Signed-off-by: Glenn Jocher * Update val.py Signed-off-by: Glenn Jocher * Update Dockerfile Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- requirements.txt | 2 +- segment/val.py | 2 +- utils/docker/Dockerfile | 4 ++-- val.py | 2 +- 4 files changed, 5 insertions(+), 5 deletions(-) diff --git a/requirements.txt b/requirements.txt index 4a8649c696a8..c6bd0f26cabb 100644 --- a/requirements.txt +++ b/requirements.txt @@ -44,6 +44,6 @@ seaborn>=0.11.0 # Extras ---------------------------------------------------------------------- # mss # screenshots # albumentations>=1.0.3 -# pycocotools>=2.0 # COCO mAP +# pycocotools>=2.0.6 # COCO mAP # roboflow # ultralytics # HUB https://hub.ultralytics.com diff --git a/segment/val.py b/segment/val.py index 5cf8ae8b41c1..248d2bee9be1 100644 --- a/segment/val.py +++ b/segment/val.py @@ -159,7 +159,7 @@ def run( callbacks=Callbacks(), ): if save_json: - check_requirements(['pycocotools']) + check_requirements('pycocotools>=2.0.6') process = process_mask_native # more accurate else: process = process_mask # faster diff --git a/utils/docker/Dockerfile b/utils/docker/Dockerfile index abc3da0ee502..6f9de5208e7f 100644 --- a/utils/docker/Dockerfile +++ b/utils/docker/Dockerfile @@ -16,8 +16,8 @@ RUN apt update && apt install --no-install-recommends -y zip htop screen libgl1- COPY requirements.txt . RUN python -m pip install --upgrade pip wheel RUN pip uninstall -y Pillow torchtext torch torchvision -RUN pip install --no-cache -r requirements.txt ultralytics albumentations comet gsutil notebook Pillow>=9.1.0 \ - 'opencv-python<4.6.0.66' \ +RUN pip install --no-cache -r requirements.txt albumentations comet gsutil notebook 'opencv-python<4.6.0.66' \ + Pillow>=9.1.0 pycocotools>=2.0.6 ultralytics \ --extra-index-url https://download.pytorch.org/whl/cu113 # Create working directory diff --git a/val.py b/val.py index 8d27d9d3dab1..599aa1afdd4a 100644 --- a/val.py +++ b/val.py @@ -309,7 +309,7 @@ def run( json.dump(jdict, f) try: # https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocoEvalDemo.ipynb - check_requirements('pycocotools') + check_requirements('pycocotools>=2.0.6') from pycocotools.coco import COCO from pycocotools.cocoeval import COCOeval From 65071da7181e2ede9d3514f20c88e6bd646af07c Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 27 Dec 2022 16:47:26 +0100 Subject: [PATCH 1631/1976] Update Dockerfile `pip install -U pycocotools` (#10606) * Update Dockerfile `pip install -U pycocotools` Previous command not working. Signed-off-by: Glenn Jocher * Update Dockerfile Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- utils/docker/Dockerfile | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/utils/docker/Dockerfile b/utils/docker/Dockerfile index 6f9de5208e7f..98e9c2927b87 100644 --- a/utils/docker/Dockerfile +++ b/utils/docker/Dockerfile @@ -16,8 +16,9 @@ RUN apt update && apt install --no-install-recommends -y zip htop screen libgl1- COPY requirements.txt . RUN python -m pip install --upgrade pip wheel RUN pip uninstall -y Pillow torchtext torch torchvision +RUN pip install --no-cache -U pycocotools # install --upgrade RUN pip install --no-cache -r requirements.txt albumentations comet gsutil notebook 'opencv-python<4.6.0.66' \ - Pillow>=9.1.0 pycocotools>=2.0.6 ultralytics \ + Pillow>=9.1.0 ultralytics \ --extra-index-url https://download.pytorch.org/whl/cu113 # Create working directory From a389bff3cb0209c4f74c512fc340a414056fc45d Mon Sep 17 00:00:00 2001 From: Hisam Fahri Date: Tue, 3 Jan 2023 03:09:02 +0700 Subject: [PATCH 1632/1976] docs: remove past Ultralytics Live Session event from readme (#10635) Signed-off-by: Hisam Fahri Signed-off-by: Hisam Fahri --- README.md | 11 ----------- 1 file changed, 11 deletions(-) diff --git a/README.md b/README.md index 56015b239fc9..c32f3d6fe4ae 100644 --- a/README.md +++ b/README.md @@ -45,17 +45,6 @@ To request an Enterprise License please complete the form at Ultralytics Live Session
- - - ##
Segmentation ⭐ NEW
From 632bf485b4ab2adbaef71f4eced5e6b59ecef7e2 Mon Sep 17 00:00:00 2001 From: Yonghye Kwon Date: Tue, 3 Jan 2023 05:10:13 +0900 Subject: [PATCH 1633/1976] Remove rocket emoji causes cp949 codec errors (#10646) Signed-off-by: Yonghye Kwon Signed-off-by: Yonghye Kwon Co-authored-by: Glenn Jocher --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index c6bd0f26cabb..c0e4a91d7dd1 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,4 +1,4 @@ -# YOLOv5 🚀 requirements +# YOLOv5 requirements # Usage: pip install -r requirements.txt # Base ------------------------------------------------------------------------ From c0ca1d21f24ced15fcc3ec6e80f5e55d78fde9d8 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 3 Jan 2023 19:21:31 +0100 Subject: [PATCH 1634/1976] `ultralytics/assets` update `master` to `main` (#10663) --- README.md | 74 ++++++++++++++++++++--------------------- README.zh-CN.md | 74 ++++++++++++++++++++--------------------- classify/tutorial.ipynb | 4 +-- segment/tutorial.ipynb | 4 +-- tutorial.ipynb | 4 +-- 5 files changed, 80 insertions(+), 80 deletions(-) diff --git a/README.md b/README.md index c32f3d6fe4ae..8044252cb74b 100644 --- a/README.md +++ b/README.md @@ -1,7 +1,7 @@

- +

[English](README.md) | [简体中文](README.zh-CN.md) @@ -23,25 +23,25 @@ To request an Enterprise License please complete the form at - - + + - - + + - - + + - - + + - - + + - - + + - +
@@ -233,20 +233,20 @@ python train.py --data coco.yaml --epochs 300 --weights '' --cfg yolov5n.yaml -
- +

- + - + - +
@@ -261,7 +261,7 @@ python train.py --data coco.yaml --epochs 300 --weights '' --cfg yolov5n.yaml - [Ultralytics HUB](https://bit.ly/ultralytics_hub) is our ⭐ **NEW** no-code solution to visualize datasets, train YOLOv5 🚀 models, and deploy to the real world in a seamless experience. Get started for **Free** now! - + ##
Why YOLOv5
@@ -395,19 +395,19 @@ Get started in seconds with our verified environments. Click each icon below for
- + - + - + - + - +
@@ -443,25 +443,25 @@ For YOLOv5 bugs and feature requests please visit [GitHub Issues](https://github
- - + + - - + + - - + + - - + + - - + + - - + + - +
[tta]: https://github.com/ultralytics/yolov5/issues/303 diff --git a/README.zh-CN.md b/README.zh-CN.md index 15232be3aa4f..ab76afbc5252 100644 --- a/README.zh-CN.md +++ b/README.zh-CN.md @@ -1,7 +1,7 @@

- +

[英文](README.md)\|[简体中文](README.zh-CN.md)
@@ -23,25 +23,25 @@ YOLOv5 🚀 是世界上最受欢迎的视觉 AI,代表 - - + + - - + + - - + + - - + + - - + + - - + + - +
@@ -245,20 +245,20 @@ python train.py --data coco.yaml --epochs 300 --weights '' --cfg yolov5n.yaml -
- +

- + - + - +
@@ -272,7 +272,7 @@ python train.py --data coco.yaml --epochs 300 --weights '' --cfg yolov5n.yaml - [Ultralytics HUB](https://bit.ly/ultralytics_hub) 是我们的⭐**新的**用于可视化数据集、训练 YOLOv5 🚀 模型并以无缝体验部署到现实世界的无代码解决方案。现在开始 **免费** 使用他! - + ##
为什么选择 YOLOv5
@@ -412,19 +412,19 @@ python export.py --weights yolov5s-cls.pt resnet50.pt efficientnet_b0.pt --inclu
- + - + - + - + - +
@@ -458,25 +458,25 @@ YOLOv5 在两种不同的 License 下可用:
- - + + - - + + - - + + - - + + - - + + - - + + - +
[tta]: https://github.com/ultralytics/yolov5/issues/303 diff --git a/classify/tutorial.ipynb b/classify/tutorial.ipynb index 06af62a1b4c1..03c1dd0bc0de 100644 --- a/classify/tutorial.ipynb +++ b/classify/tutorial.ipynb @@ -9,7 +9,7 @@ "
\n", "\n", " \n", - " \n", + " \n", "\n", "\n", "
\n", @@ -1222,7 +1222,7 @@ "source": [ "# 3. Train\n", "\n", - "

\n", + "

\n", "Close the active learning loop by sampling images from your inference conditions with the `roboflow` pip package\n", "

\n", "\n", diff --git a/segment/tutorial.ipynb b/segment/tutorial.ipynb index dc6599415480..cb1af34d9f17 100644 --- a/segment/tutorial.ipynb +++ b/segment/tutorial.ipynb @@ -9,7 +9,7 @@ "
\n", "\n", " \n", - " \n", + " \n", "\n", "\n", "
\n", @@ -216,7 +216,7 @@ "source": [ "# 3. Train\n", "\n", - "

\n", + "

\n", "Close the active learning loop by sampling images from your inference conditions with the `roboflow` pip package\n", "

\n", "\n", diff --git a/tutorial.ipynb b/tutorial.ipynb index e83617e9dce7..6308898b8b71 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -369,7 +369,7 @@ "
\n", "\n", " \n", - " \n", + " \n", "\n", "\n", "
\n", @@ -622,7 +622,7 @@ "source": [ "# 3. Train\n", "\n", - "

\n", + "

\n", "Close the active learning loop by sampling images from your inference conditions with the `roboflow` pip package\n", "

\n", "\n", From 9fcbf93a1f0afacecb8b41b86fb1304db1942928 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 6 Jan 2023 18:45:31 +0100 Subject: [PATCH 1635/1976] Created using Colaboratory --- tutorial.ipynb | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/tutorial.ipynb b/tutorial.ipynb index 6308898b8b71..c320d699a940 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -651,17 +651,17 @@ "cell_type": "code", "source": [ "#@title Select YOLOv5 🚀 logger {run: 'auto'}\n", - "logger = 'TensorBoard' #@param ['TensorBoard', 'Comet', 'ClearML']\n", + "logger = 'ClearML' #@param ['ClearML', 'Comet', 'TensorBoard']\n", "\n", - "if logger == 'TensorBoard':\n", - " %load_ext tensorboard\n", - " %tensorboard --logdir runs/train\n", + "if logger == 'ClearML':\n", + " %pip install -q clearml\n", + " import clearml; clearml.browser_login()\n", "elif logger == 'Comet':\n", " %pip install -q comet_ml\n", " import comet_ml; comet_ml.init()\n", - "elif logger == 'ClearML':\n", - " %pip install -q clearml\n", - " import clearml; clearml.browser_login()" + "elif logger == 'TensorBoard':\n", + " %load_ext tensorboard\n", + " %tensorboard --logdir runs/train" ], "metadata": { "id": "i3oKtE4g-aNn" From 79c05e5689817645bb12b7f77a3d8318582c0f05 Mon Sep 17 00:00:00 2001 From: Ayush Chaurasia Date: Sat, 7 Jan 2023 00:19:14 +0530 Subject: [PATCH 1636/1976] Add Neural Magic DeepSparse tutorial to README (#10698) * Update README.md Signed-off-by: Ayush Chaurasia * Update README.md Signed-off-by: Glenn Jocher * Update README.md Signed-off-by: Glenn Jocher * Update README.md Signed-off-by: Glenn Jocher Signed-off-by: Ayush Chaurasia Signed-off-by: Glenn Jocher Co-authored-by: Glenn Jocher --- README.md | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/README.md b/README.md index 8044252cb74b..a2245db46c14 100644 --- a/README.md +++ b/README.md @@ -223,7 +223,7 @@ python train.py --data coco.yaml --epochs 300 --weights '' --cfg yolov5n.yaml - - [Architecture Summary](https://github.com/ultralytics/yolov5/issues/6998) 🌟 NEW - [Roboflow for Datasets, Labeling, and Active Learning](https://github.com/ultralytics/yolov5/issues/4975)  🌟 NEW - [ClearML Logging](https://github.com/ultralytics/yolov5/tree/master/utils/loggers/clearml) 🌟 NEW -- [Deci Platform](https://github.com/ultralytics/yolov5/wiki/Deci-Platform) 🌟 NEW +- [YOLOv5 with Neural Magic's Deepsparse](https://bit.ly/yolov5-neuralmagic) 🌟 NEW - [Comet Logging](https://github.com/ultralytics/yolov5/tree/master/utils/loggers/comet) 🌟 NEW
@@ -247,13 +247,13 @@ python train.py --data coco.yaml --epochs 300 --weights '' --cfg yolov5n.yaml - - - + + -|Roboflow|ClearML ⭐ NEW|Comet ⭐ NEW|Deci ⭐ NEW| +|Roboflow|ClearML ⭐ NEW|Comet ⭐ NEW|Neural Magic ⭐ NEW| |:-:|:-:|:-:|:-:| -|Label and export your custom datasets directly to YOLOv5 for training with [Roboflow](https://roboflow.com/?ref=ultralytics)|Automatically track, visualize and even remotely train YOLOv5 using [ClearML](https://cutt.ly/yolov5-readme-clearml) (open-source!)|Free forever, [Comet](https://bit.ly/yolov5-readme-comet2) lets you save YOLOv5 models, resume training, and interactively visualise and debug predictions|Automatically compile and quantize YOLOv5 for better inference performance in one click at [Deci](https://bit.ly/yolov5-deci-platform)| +|Label and export your custom datasets directly to YOLOv5 for training with [Roboflow](https://roboflow.com/?ref=ultralytics)|Automatically track, visualize and even remotely train YOLOv5 using [ClearML](https://cutt.ly/yolov5-readme-clearml) (open-source!)|Free forever, [Comet](https://bit.ly/yolov5-readme-comet2) lets you save YOLOv5 models, resume training, and interactively visualise and debug predictions|Run YOLOv5 inference up to 6x faster with [Neural Magic DeepSparse](https://bit.ly/yolov5-neuralmagic)| ##
Ultralytics HUB
From fdc35b119ad21c7f205596dbb238f780c87040ec Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 6 Jan 2023 20:04:42 +0100 Subject: [PATCH 1637/1976] Update Ultralytics App banner URL (#10704) Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index a2245db46c14..80a4da2ade8e 100644 --- a/README.md +++ b/README.md @@ -417,7 +417,7 @@ Get started in seconds with our verified environments. Click each icon below for Run YOLOv5 models on your iOS or Android device by downloading the [Ultralytics App](https://ultralytics.com/app_install)! -Ultralytics mobile app +Ultralytics mobile app ##
Contribute
From 1ea901bd5257e8688a122a27afcb21d74b7c5fbc Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 9 Jan 2023 14:42:57 +0100 Subject: [PATCH 1638/1976] Migrate policies to ultralytics/.github (#10721) --- .github/CODE_OF_CONDUCT.md | 128 ------------------------------------- .github/SECURITY.md | 7 -- 2 files changed, 135 deletions(-) delete mode 100644 .github/CODE_OF_CONDUCT.md delete mode 100644 .github/SECURITY.md diff --git a/.github/CODE_OF_CONDUCT.md b/.github/CODE_OF_CONDUCT.md deleted file mode 100644 index 27e59e9aab38..000000000000 --- a/.github/CODE_OF_CONDUCT.md +++ /dev/null @@ -1,128 +0,0 @@ -# YOLOv5 🚀 Contributor Covenant Code of Conduct - -## Our Pledge - -We as members, contributors, and leaders pledge to make participation in our -community a harassment-free experience for everyone, regardless of age, body -size, visible or invisible disability, ethnicity, sex characteristics, gender -identity and expression, level of experience, education, socio-economic status, -nationality, personal appearance, race, religion, or sexual identity -and orientation. - -We pledge to act and interact in ways that contribute to an open, welcoming, -diverse, inclusive, and healthy community. - -## Our Standards - -Examples of behavior that contributes to a positive environment for our -community include: - -- Demonstrating empathy and kindness toward other people -- Being respectful of differing opinions, viewpoints, and experiences -- Giving and gracefully accepting constructive feedback -- Accepting responsibility and apologizing to those affected by our mistakes, - and learning from the experience -- Focusing on what is best not just for us as individuals, but for the - overall community - -Examples of unacceptable behavior include: - -- The use of sexualized language or imagery, and sexual attention or - advances of any kind -- Trolling, insulting or derogatory comments, and personal or political attacks -- Public or private harassment -- Publishing others' private information, such as a physical or email - address, without their explicit permission -- Other conduct which could reasonably be considered inappropriate in a - professional setting - -## Enforcement Responsibilities - -Community leaders are responsible for clarifying and enforcing our standards of -acceptable behavior and will take appropriate and fair corrective action in -response to any behavior that they deem inappropriate, threatening, offensive, -or harmful. - -Community leaders have the right and responsibility to remove, edit, or reject -comments, commits, code, wiki edits, issues, and other contributions that are -not aligned to this Code of Conduct, and will communicate reasons for moderation -decisions when appropriate. - -## Scope - -This Code of Conduct applies within all community spaces, and also applies when -an individual is officially representing the community in public spaces. -Examples of representing our community include using an official e-mail address, -posting via an official social media account, or acting as an appointed -representative at an online or offline event. - -## Enforcement - -Instances of abusive, harassing, or otherwise unacceptable behavior may be -reported to the community leaders responsible for enforcement at -hello@ultralytics.com. -All complaints will be reviewed and investigated promptly and fairly. - -All community leaders are obligated to respect the privacy and security of the -reporter of any incident. - -## Enforcement Guidelines - -Community leaders will follow these Community Impact Guidelines in determining -the consequences for any action they deem in violation of this Code of Conduct: - -### 1. Correction - -**Community Impact**: Use of inappropriate language or other behavior deemed -unprofessional or unwelcome in the community. - -**Consequence**: A private, written warning from community leaders, providing -clarity around the nature of the violation and an explanation of why the -behavior was inappropriate. A public apology may be requested. - -### 2. Warning - -**Community Impact**: A violation through a single incident or series -of actions. - -**Consequence**: A warning with consequences for continued behavior. No -interaction with the people involved, including unsolicited interaction with -those enforcing the Code of Conduct, for a specified period of time. This -includes avoiding interactions in community spaces as well as external channels -like social media. Violating these terms may lead to a temporary or -permanent ban. - -### 3. Temporary Ban - -**Community Impact**: A serious violation of community standards, including -sustained inappropriate behavior. - -**Consequence**: A temporary ban from any sort of interaction or public -communication with the community for a specified period of time. No public or -private interaction with the people involved, including unsolicited interaction -with those enforcing the Code of Conduct, is allowed during this period. -Violating these terms may lead to a permanent ban. - -### 4. Permanent Ban - -**Community Impact**: Demonstrating a pattern of violation of community -standards, including sustained inappropriate behavior, harassment of an -individual, or aggression toward or disparagement of classes of individuals. - -**Consequence**: A permanent ban from any sort of public interaction within -the community. - -## Attribution - -This Code of Conduct is adapted from the [Contributor Covenant][homepage], -version 2.0, available at -https://www.contributor-covenant.org/version/2/0/code_of_conduct.html. - -Community Impact Guidelines were inspired by [Mozilla's code of conduct -enforcement ladder](https://github.com/mozilla/diversity). - -For answers to common questions about this code of conduct, see the FAQ at -https://www.contributor-covenant.org/faq. Translations are available at -https://www.contributor-covenant.org/translations. - -[homepage]: https://www.contributor-covenant.org diff --git a/.github/SECURITY.md b/.github/SECURITY.md deleted file mode 100644 index aa3e8409da6b..000000000000 --- a/.github/SECURITY.md +++ /dev/null @@ -1,7 +0,0 @@ -# Security Policy - -We aim to make YOLOv5 🚀 as secure as possible! If you find potential vulnerabilities or have any concerns please let us know so we can investigate and take corrective action if needed. - -### Reporting a Vulnerability - -To report vulnerabilities please email us at hello@ultralytics.com or visit https://ultralytics.com/contact. Thank you! From caba2aed4a6c2ad85712acb7cb1dd22ed886dc95 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 9 Jan 2023 20:35:02 +0100 Subject: [PATCH 1639/1976] Update translate-readme.yml (#10725) Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- .github/workflows/translate-readme.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/translate-readme.yml b/.github/workflows/translate-readme.yml index 538ff375097e..2bb351ec7e81 100644 --- a/.github/workflows/translate-readme.yml +++ b/.github/workflows/translate-readme.yml @@ -19,7 +19,7 @@ jobs: uses: actions/setup-node@v3 with: node-version: 16 - # ISO Langusge Codes: https://cloud.google.com/translate/docs/languages + # ISO Language Codes: https://cloud.google.com/translate/docs/languages - name: Adding README - Chinese Simplified uses: dephraiim/translate-readme@main with: From 37d1e5e5df33f4a9bef75661e5a075927b058540 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E6=9B=BE=E9=80=B8=E5=A4=AB=EF=BC=88Zeng=20Yifu=EF=BC=89?= <41098760+Zengyf-CVer@users.noreply.github.com> Date: Tue, 10 Jan 2023 16:40:17 +0800 Subject: [PATCH 1640/1976] Update some Chinese content of Neural Magic (#10727) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Update README.zh-CN.md Signed-off-by: 曾逸夫(Zeng Yifu) <41098760+Zengyf-CVer@users.noreply.github.com> Signed-off-by: 曾逸夫(Zeng Yifu) <41098760+Zengyf-CVer@users.noreply.github.com> --- README.zh-CN.md | 16 +++------------- 1 file changed, 3 insertions(+), 13 deletions(-) diff --git a/README.zh-CN.md b/README.zh-CN.md index ab76afbc5252..8c6efadfd242 100644 --- a/README.zh-CN.md +++ b/README.zh-CN.md @@ -45,16 +45,6 @@ YOLOv5 🚀 是世界上最受欢迎的视觉 AI,代表
Ultralytics 直播会议 - - - ##
实例分割模型 ⭐ 新
@@ -260,12 +250,12 @@ python train.py --data coco.yaml --epochs 300 --weights '' --cfg yolov5n.yaml - - +
-| Roboflow | ClearML ⭐ 新 | Comet ⭐ 新 | Deci ⭐ 新 | +| Roboflow | ClearML ⭐ 新 | Comet ⭐ 新 | Neural Magic ⭐ 新 | | :-----------------------------------------------------------------------------------------------: | :------------------------------------------------------------------------------------: | :----------------------------------------------------------------------------------------------------: | :-------------------------------------------------------------------------------: | -| 将您的自定义数据集进行标注并直接导出到 YOLOv5 以进行训练 [Roboflow](https://roboflow.com/?ref=ultralytics) | 自动跟踪、可视化甚至远程训练 YOLOv5 [ClearML](https://cutt.ly/yolov5-readme-clearml)(开源!) | 永远免费,[Comet](https://bit.ly/yolov5-readme-comet)可让您保存 YOLOv5 模型、恢复训练以及交互式可视化和调试预测 | 一键自动编译量化 YOLOv5 以获得更好的推理性能[Deci](https://bit.ly/yolov5-deci-platform) | +| 将您的自定义数据集进行标注并直接导出到 YOLOv5 以进行训练 [Roboflow](https://roboflow.com/?ref=ultralytics) | 自动跟踪、可视化甚至远程训练 YOLOv5 [ClearML](https://cutt.ly/yolov5-readme-clearml)(开源!) | 永远免费,[Comet](https://bit.ly/yolov5-readme-comet)可让您保存 YOLOv5 模型、恢复训练以及交互式可视化和调试预测 | 使用 [Neural Magic DeepSparse](https://bit.ly/yolov5-neuralmagic),运行 YOLOv5 推理的速度最高可提高6倍 | ##
Ultralytics HUB
From cdd804d39ff84b413bde36a84006f51769b6043b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E6=9B=BE=E9=80=B8=E5=A4=AB=EF=BC=88Zeng=20Yifu=EF=BC=89?= <41098760+Zengyf-CVer@users.noreply.github.com> Date: Tue, 10 Jan 2023 22:05:41 +0800 Subject: [PATCH 1641/1976] Fix logo-neuralmagic.png image link (#10731) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Update README.zh-CN.md Signed-off-by: 曾逸夫(Zeng Yifu) <41098760+Zengyf-CVer@users.noreply.github.com> Signed-off-by: 曾逸夫(Zeng Yifu) <41098760+Zengyf-CVer@users.noreply.github.com> --- README.zh-CN.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.zh-CN.md b/README.zh-CN.md index 8c6efadfd242..c406f35820a7 100644 --- a/README.zh-CN.md +++ b/README.zh-CN.md @@ -249,7 +249,7 @@ python train.py --data coco.yaml --epochs 300 --weights '' --cfg yolov5n.yaml - - + From 0e24b7e2f584beea3f573ddb82c3b93558daeb1f Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 12 Jan 2023 17:43:12 +0100 Subject: [PATCH 1642/1976] PIL `.get_size()` deprecation fix (#10754) Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- utils/plots.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/plots.py b/utils/plots.py index d2f232de0e97..41a387200ba4 100644 --- a/utils/plots.py +++ b/utils/plots.py @@ -88,7 +88,7 @@ def box_label(self, box, label='', color=(128, 128, 128), txt_color=(255, 255, 2 if self.pil or not is_ascii(label): self.draw.rectangle(box, width=self.lw, outline=color) # box if label: - w, h = self.font.getsize(label) # text width, height + _, _, w, h = self.font.getbbox(label) # text width, height outside = box[1] - h >= 0 # label fits outside box self.draw.rectangle( (box[0], box[1] - h if outside else box[1], box[0] + w + 1, From bd10f0f6c72d3a0135b72f31b51057eb74c116eb Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 12 Jan 2023 18:01:36 +0100 Subject: [PATCH 1643/1976] Revert PIL deprecation fix Signed-off-by: Glenn Jocher --- utils/plots.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/utils/plots.py b/utils/plots.py index 41a387200ba4..f84aed9fb5c7 100644 --- a/utils/plots.py +++ b/utils/plots.py @@ -88,7 +88,8 @@ def box_label(self, box, label='', color=(128, 128, 128), txt_color=(255, 255, 2 if self.pil or not is_ascii(label): self.draw.rectangle(box, width=self.lw, outline=color) # box if label: - _, _, w, h = self.font.getbbox(label) # text width, height + w, h = self.font.getsize(label) # text width, height (WARNING: deprecated) in 9.2.0 + # _, _, w, h = self.font.getbbox(label) # text width, height (New) outside = box[1] - h >= 0 # label fits outside box self.draw.rectangle( (box[0], box[1] - h if outside else box[1], box[0] + w + 1, From 9650f16f41248b24a72276e2287185350939285d Mon Sep 17 00:00:00 2001 From: Wang Xin Date: Fri, 13 Jan 2023 02:35:05 +0800 Subject: [PATCH 1644/1976] Ignore *_paddle_model/ dir (#10745) Signed-off-by: Wang Xin Signed-off-by: Wang Xin --- .gitignore | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitignore b/.gitignore index 69a00843ea42..6bcedfac610d 100755 --- a/.gitignore +++ b/.gitignore @@ -60,6 +60,7 @@ VOC/ *_saved_model/ *_web_model/ *_openvino_model/ +*_paddle_model/ darknet53.conv.74 yolov3-tiny.conv.15 From 2b356c0ab24bc945d69ab66b67e8af755697b611 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 14 Jan 2023 12:40:15 +0100 Subject: [PATCH 1645/1976] Update Dockerfile (#10768) Signed-off-by: Glenn Jocher Signed-off-by: Glenn Jocher --- utils/docker/Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/docker/Dockerfile b/utils/docker/Dockerfile index 98e9c2927b87..c8b88357cb6d 100644 --- a/utils/docker/Dockerfile +++ b/utils/docker/Dockerfile @@ -54,7 +54,7 @@ ENV OMP_NUM_THREADS=1 # t=ultralytics/yolov5:latest tnew=ultralytics/yolov5:v6.2 && sudo docker pull $t && sudo docker tag $t $tnew && sudo docker push $tnew # Clean up -# docker system prune -a --volumes +# sudo docker system prune -a --volumes # Update Ubuntu drivers # https://www.maketecheasier.com/install-nvidia-drivers-ubuntu/ From 3a059125dd7b01c76b7a02b59814ed5bc32d9ac4 Mon Sep 17 00:00:00 2001 From: ZhuGeRoastedFish <77224640+ZhuGeRoastedFish@users.noreply.github.com> Date: Sat, 14 Jan 2023 21:43:27 +0800 Subject: [PATCH 1646/1976] Fx confusion-matrix xlabel typo (#10692) fix confusion-matrix xlabel typo Signed-off-by: ZhuGeRoastedFish <77224640+ZhuGeRoastedFish@users.noreply.github.com> Signed-off-by: ZhuGeRoastedFish <77224640+ZhuGeRoastedFish@users.noreply.github.com> Co-authored-by: Glenn Jocher --- utils/metrics.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/metrics.py b/utils/metrics.py index c01f823a77a1..7fb077774384 100644 --- a/utils/metrics.py +++ b/utils/metrics.py @@ -208,7 +208,7 @@ def plot(self, normalize=True, save_dir='', names=()): vmin=0.0, xticklabels=ticklabels, yticklabels=ticklabels).set_facecolor((1, 1, 1)) - ax.set_ylabel('True') + ax.set_xlabel('True') ax.set_ylabel('Predicted') ax.set_title('Confusion Matrix') fig.savefig(Path(save_dir) / 'confusion_matrix.png', dpi=250) From 3b6e27ad0ad990cc69c519e969a6094aacfb9e3e Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Sat, 14 Jan 2023 14:46:56 +0100 Subject: [PATCH 1647/1976] [pre-commit.ci] pre-commit suggestions (#10655) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/asottile/pyupgrade: v3.3.0 → v3.3.1](https://github.com/asottile/pyupgrade/compare/v3.3.0...v3.3.1) - [github.com/PyCQA/isort: 5.10.1 → 5.11.4](https://github.com/PyCQA/isort/compare/5.10.1...5.11.4) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Glenn Jocher --- .pre-commit-config.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 28dbc89223cf..f7ae077ee272 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -24,14 +24,14 @@ repos: - id: check-docstring-first - repo: https://github.com/asottile/pyupgrade - rev: v3.3.0 + rev: v3.3.1 hooks: - id: pyupgrade name: Upgrade code args: [ --py37-plus ] - repo: https://github.com/PyCQA/isort - rev: 5.10.1 + rev: 5.11.4 hooks: - id: isort name: Sort imports From 589edc7b012d45a5c8ad6231d7716f88cb6e43ca Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 15 Jan 2023 18:43:06 +0100 Subject: [PATCH 1648/1976] Ultralytics Live Session 3 - YOLOv8 https://youtu.be/IPcpYO5ITa8 (#10769) * Ultralytics Live Session 3 - YOLOv8 https://youtu.be/IPcpYO5ITa8 Ultralytics Live Session Ep.3 is here! Join us on January 18th at 18 CET as we dive into the latest advancements in YOLOv8, and demonstrate how to use this cutting-edge, SOTA model to improve your object detection, image segmentation, and image classification projects. See firsthand how YOLOv8's speed, accuracy, and ease of use make it a top choice for professionals and researchers alike. In addition to learning about the exciting new features and improvements of Ultralytics YOLOv8, you will also have the opportunity to ask questions and interact with our team during the live Q&A session. We encourage all of you to come prepared with any questions you may have. Don't miss out on this opportunity! To join the webinar, visit our YouTube Channel and turn on your notifications! https://youtu.be/IPcpYO5ITa8 Signed-off-by: Glenn Jocher * Update README.md Signed-off-by: Paula Derrenger <107626595+pderrenger@users.noreply.github.com> Signed-off-by: Glenn Jocher Signed-off-by: Paula Derrenger <107626595+pderrenger@users.noreply.github.com> Co-authored-by: Paula Derrenger <107626595+pderrenger@users.noreply.github.com> --- README.md | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/README.md b/README.md index 80a4da2ade8e..399ebe5666e2 100644 --- a/README.md +++ b/README.md @@ -45,6 +45,20 @@ To request an Enterprise License please complete the form at Ultralytics Live Session + +
+ +[Ultralytics Live Session 3](https://youtu.be/IPcpYO5ITa8) ✨ is here! Join us on January 18th at 18 CET as we dive into the latest advancements in YOLOv8, and demonstrate how to use this cutting-edge, SOTA model to improve your object detection, instance segmentation, and image classification projects. See firsthand how YOLOv8's speed, accuracy, and ease of use make it a top choice for professionals and researchers alike. + +In addition to learning about the exciting new features and improvements of Ultralytics YOLOv8, you will also have the opportunity to ask questions and interact with our team during the live Q&A session. We encourage all of you to come prepared with any questions you may have. + +Don't miss out on this opportunity! To join the webinar, visit our YouTube Channel and turn on your notifications! https://youtu.be/IPcpYO5ITa8 + + + +
+ ##
Segmentation ⭐ NEW
From c442a2e99321ebd72b242bc961824f82d46e4fd3 Mon Sep 17 00:00:00 2001 From: Paula Derrenger <107626595+pderrenger@users.noreply.github.com> Date: Tue, 17 Jan 2023 14:40:03 +0100 Subject: [PATCH 1649/1976] Update Ultralytics Live Session 3 - https://youtu.be/IPcpYO5ITa8 (#10782) * Update Date of Ultralytics Live Session 3 Signed-off-by: Paula Derrenger <107626595+pderrenger@users.noreply.github.com> * Update README.md Signed-off-by: Glenn Jocher Signed-off-by: Paula Derrenger <107626595+pderrenger@users.noreply.github.com> Signed-off-by: Glenn Jocher Co-authored-by: Glenn Jocher --- README.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index 399ebe5666e2..f19130f6f094 100644 --- a/README.md +++ b/README.md @@ -49,14 +49,14 @@ To request an Enterprise License please complete the form at -[Ultralytics Live Session 3](https://youtu.be/IPcpYO5ITa8) ✨ is here! Join us on January 18th at 18 CET as we dive into the latest advancements in YOLOv8, and demonstrate how to use this cutting-edge, SOTA model to improve your object detection, instance segmentation, and image classification projects. See firsthand how YOLOv8's speed, accuracy, and ease of use make it a top choice for professionals and researchers alike. +[Ultralytics Live Session 3](https://youtu.be/IPcpYO5ITa8) ✨ is here! Join us on January 24th at 18 CET as we dive into the latest advancements in YOLOv8, and demonstrate how to use this cutting-edge, SOTA model to improve your object detection, instance segmentation, and image classification projects. See firsthand how YOLOv8's speed, accuracy, and ease of use make it a top choice for professionals and researchers alike. -In addition to learning about the exciting new features and improvements of Ultralytics YOLOv8, you will also have the opportunity to ask questions and interact with our team during the live Q&A session. We encourage all of you to come prepared with any questions you may have. +In addition to learning about the exciting new features and improvements of Ultralytics YOLOv8, you will also have the opportunity to ask questions and interact with our team during the live Q&A session. We encourage you to come prepared with any questions you may have. -Don't miss out on this opportunity! To join the webinar, visit our YouTube Channel and turn on your notifications! https://youtu.be/IPcpYO5ITa8 +To join the webinar, visit our [YouTube Channel](https://www.youtube.com/@Ultralytics/streams) and turn on your notifications! - +
##
Segmentation ⭐ NEW
From 064365d8683fd002e9ad789c1e91fa3d021b44f0 Mon Sep 17 00:00:00 2001 From: Johan Bergman <35481994+duran67@users.noreply.github.com> Date: Fri, 20 Jan 2023 23:49:43 +0100 Subject: [PATCH 1650/1976] Update parse_opt() in export.py to work as in train.py (#10789) Update parse_opt() to work as in train.py Change parse_opt() be able to use parse_known_args(), same as in train.py, so export.main() can be called from other script without error. e.g.: from yolov5 import export opt = export.parse_opt(True) opt.weights = opt.include = ("torchscript", "onnx") opt.data = opt.imgsz = [, ] export.main(opt) Signed-off-by: Johan Bergman <35481994+duran67@users.noreply.github.com> Signed-off-by: Johan Bergman <35481994+duran67@users.noreply.github.com> --- export.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/export.py b/export.py index 7910178b2338..9ca3441bc66a 100644 --- a/export.py +++ b/export.py @@ -610,7 +610,7 @@ def run( return f # return list of exported files/dirs -def parse_opt(): +def parse_opt(known=False): parser = argparse.ArgumentParser() parser.add_argument('--data', type=str, default=ROOT / 'data/coco128.yaml', help='dataset.yaml path') parser.add_argument('--weights', nargs='+', type=str, default=ROOT / 'yolov5s.pt', help='model.pt path(s)') @@ -638,7 +638,7 @@ def parse_opt(): nargs='+', default=['torchscript'], help='torchscript, onnx, openvino, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs, paddle') - opt = parser.parse_args() + opt = parser.parse_known_args()[0] if known else parser.parse_args() print_args(vars(opt)) return opt From 6a62c94190583cca257bb091c6ced9d9c3b2dd3d Mon Sep 17 00:00:00 2001 From: Laughing <61612323+Laughing-q@users.noreply.github.com> Date: Sat, 4 Feb 2023 20:21:30 +0800 Subject: [PATCH 1651/1976] fix zero labels (#10820) update --- utils/augmentations.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/augmentations.py b/utils/augmentations.py index 1eae5db8f816..7ab75f17fb18 100644 --- a/utils/augmentations.py +++ b/utils/augmentations.py @@ -201,7 +201,7 @@ def random_perspective(im, # Transform label coordinates n = len(targets) if n: - use_segments = any(x.any() for x in segments) + use_segments = any(x.any() for x in segments) and len(segments) == n new = np.zeros((n, 4)) if use_segments: # warp segments segments = resample_segments(segments) # upsample From d02ee60512c50d9573bb7a136d8baade8a0bd332 Mon Sep 17 00:00:00 2001 From: Talia Bender <85292283+taliabender@users.noreply.github.com> Date: Sat, 4 Feb 2023 13:30:10 +0100 Subject: [PATCH 1652/1976] Update README.md (#10893) * Update README.md Signed-off-by: Talia Bender <85292283+taliabender@users.noreply.github.com> * Update README.md Signed-off-by: Glenn Jocher * Update README.md Signed-off-by: Glenn Jocher --------- Signed-off-by: Talia Bender <85292283+taliabender@users.noreply.github.com> Signed-off-by: Glenn Jocher Co-authored-by: Glenn Jocher --- README.md | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index f19130f6f094..2938710214b4 100644 --- a/README.md +++ b/README.md @@ -49,14 +49,17 @@ To request an Enterprise License please complete the form at -[Ultralytics Live Session 3](https://youtu.be/IPcpYO5ITa8) ✨ is here! Join us on January 24th at 18 CET as we dive into the latest advancements in YOLOv8, and demonstrate how to use this cutting-edge, SOTA model to improve your object detection, instance segmentation, and image classification projects. See firsthand how YOLOv8's speed, accuracy, and ease of use make it a top choice for professionals and researchers alike. +⚡️ Stay tuned for [Ultralytics Live Session 4](https://www.youtube.com/watch?v=FXIbVnat2eU) ⚡️ -In addition to learning about the exciting new features and improvements of Ultralytics YOLOv8, you will also have the opportunity to ask questions and interact with our team during the live Q&A session. We encourage you to come prepared with any questions you may have. +Over the past couple of years we found that 22% percent of you experience difficulty in deploying your vision AI models. To improve this step in the ML pipeline, we've partnered with [Neural Magic](https://bit.ly/yolov5-neuralmagic), whose DeepSparse tool takes advantage of sparsity and low-precision arithmetic within neural networks to offer exceptional performance on commodity hardware. + +Glenn will be joined by Michael Goin of Neural Magic on February 8th at 12 EST/18 CET to discuss how to achieve GPU-class performance for YOLOv5 on CPUs. Be sure to come prepared with any questions you have about the model deployment process! To join the webinar, visit our [YouTube Channel](https://www.youtube.com/@Ultralytics/streams) and turn on your notifications! - - + + + ##
Segmentation ⭐ NEW
From b8a2c47fa94011260e0980a217dd7ec0d537414e Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 6 Feb 2023 15:11:32 +0400 Subject: [PATCH 1653/1976] Update Dockerfile `FROM pytorch/pytorch:latest` (#10902) * Update Dockerfile `FROM pytorch/pytorch:latest` Signed-off-by: Glenn Jocher * isort * precommit * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * spelling * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update .pre-commit-config.yaml Signed-off-by: Glenn Jocher * Cleanup * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Cleanup * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Cleanup * Cleanup * Cleanup * Cleanup --------- Signed-off-by: Glenn Jocher Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 34 ++- README.md | 107 ++++--- README.zh-CN.md | 168 +++++------ classify/tutorial.ipynb | 2 +- classify/val.py | 6 +- utils/docker/Dockerfile | 33 ++- utils/loggers/__init__.py | 24 +- utils/loggers/clearml/README.md | 39 +-- utils/loggers/comet/README.md | 10 +- utils/loggers/wandb/README.md | 162 ----------- utils/loggers/wandb/log_dataset.py | 27 -- utils/loggers/wandb/sweep.py | 41 --- utils/loggers/wandb/sweep.yaml | 143 ---------- utils/loggers/wandb/wandb_utils.py | 434 ++--------------------------- 14 files changed, 250 insertions(+), 980 deletions(-) delete mode 100644 utils/loggers/wandb/README.md delete mode 100644 utils/loggers/wandb/log_dataset.py delete mode 100644 utils/loggers/wandb/sweep.py delete mode 100644 utils/loggers/wandb/sweep.yaml diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index f7ae077ee272..83425ad6cf78 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -4,6 +4,7 @@ default_language_version: python: python3.8 +exclude: 'docs/' # Define bot property if installed via https://github.com/marketplace/pre-commit-ci ci: autofix_prs: true @@ -28,13 +29,13 @@ repos: hooks: - id: pyupgrade name: Upgrade code - args: [ --py37-plus ] + args: [--py37-plus] - - repo: https://github.com/PyCQA/isort - rev: 5.11.4 - hooks: - - id: isort - name: Sort imports + # - repo: https://github.com/PyCQA/isort + # rev: 5.11.4 + # hooks: + # - id: isort + # name: Sort imports - repo: https://github.com/pre-commit/mirrors-yapf rev: v0.32.0 @@ -50,15 +51,22 @@ repos: additional_dependencies: - mdformat-gfm - mdformat-black - exclude: "README.md|README.zh-CN.md" - - - repo: https://github.com/asottile/yesqa - rev: v1.4.0 - hooks: - - id: yesqa + # exclude: "README.md|README.zh-CN.md|CONTRIBUTING.md" - repo: https://github.com/PyCQA/flake8 - rev: 6.0.0 + rev: 5.0.4 hooks: - id: flake8 name: PEP8 + + #- repo: https://github.com/codespell-project/codespell + # rev: v2.2.2 + # hooks: + # - id: codespell + # args: + # - --ignore-words-list=crate,nd + + #- repo: https://github.com/asottile/yesqa + # rev: v1.4.0 + # hooks: + # - id: yesqa diff --git a/README.md b/README.md index 2938710214b4..e836abf6d551 100644 --- a/README.md +++ b/README.md @@ -4,9 +4,10 @@

- [English](README.md) | [简体中文](README.zh-CN.md) -
-
+[English](README.md) | [简体中文](README.zh-CN.md) +
+ +
YOLOv5 CI YOLOv5 Citation Docker Pulls @@ -21,7 +22,7 @@ YOLOv5 🚀 is the world's most loved vision AI, representing Ultralytics Licensing. -
+
@@ -49,7 +50,7 @@ To request an Enterprise License please complete the form at -⚡️ Stay tuned for [Ultralytics Live Session 4](https://www.youtube.com/watch?v=FXIbVnat2eU) ⚡️ +⚡️ Stay tuned for [Ultralytics Live Session 4](https://www.youtube.com/watch?v=FXIbVnat2eU) ⚡️ Over the past couple of years we found that 22% percent of you experience difficulty in deploying your vision AI models. To improve this step in the ML pipeline, we've partnered with [Neural Magic](https://bit.ly/yolov5-neuralmagic), whose DeepSparse tool takes advantage of sparsity and low-precision arithmetic within neural networks to offer exceptional performance on commodity hardware. @@ -78,13 +79,13 @@ Our new YOLOv5 [release v7.0](https://github.com/ultralytics/yolov5/releases/v7. We trained YOLOv5 segmentations models on COCO for 300 epochs at image size 640 using A100 GPUs. We exported all models to ONNX FP32 for CPU speed tests and to TensorRT FP16 for GPU speed tests. We ran all speed tests on Google [Colab Pro](https://colab.research.google.com/signup) notebooks for easy reproducibility. -| Model | size
(pixels) | mAPbox
50-95 | mAPmask
50-95 | Train time
300 epochs
A100 (hours) | Speed
ONNX CPU
(ms) | Speed
TRT A100
(ms) | params
(M) | FLOPs
@640 (B) | -|----------------------------------------------------------------------------------------------------|-----------------------|----------------------|-----------------------|-----------------------------------------------|--------------------------------|--------------------------------|--------------------|------------------------| -| [YOLOv5n-seg](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5n-seg.pt) | 640 | 27.6 | 23.4 | 80:17 | **62.7** | **1.2** | **2.0** | **7.1** | -| [YOLOv5s-seg](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5s-seg.pt) | 640 | 37.6 | 31.7 | 88:16 | 173.3 | 1.4 | 7.6 | 26.4 | -| [YOLOv5m-seg](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5m-seg.pt) | 640 | 45.0 | 37.1 | 108:36 | 427.0 | 2.2 | 22.0 | 70.8 | -| [YOLOv5l-seg](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5l-seg.pt) | 640 | 49.0 | 39.9 | 66:43 (2x) | 857.4 | 2.9 | 47.9 | 147.7 | -| [YOLOv5x-seg](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5x-seg.pt) | 640 | **50.7** | **41.4** | 62:56 (3x) | 1579.2 | 4.5 | 88.8 | 265.7 | +| Model | size
(pixels) | mAPbox
50-95 | mAPmask
50-95 | Train time
300 epochs
A100 (hours) | Speed
ONNX CPU
(ms) | Speed
TRT A100
(ms) | params
(M) | FLOPs
@640 (B) | +| ------------------------------------------------------------------------------------------ | --------------------- | -------------------- | --------------------- | --------------------------------------------- | ------------------------------ | ------------------------------ | ------------------ | ---------------------- | +| [YOLOv5n-seg](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5n-seg.pt) | 640 | 27.6 | 23.4 | 80:17 | **62.7** | **1.2** | **2.0** | **7.1** | +| [YOLOv5s-seg](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5s-seg.pt) | 640 | 37.6 | 31.7 | 88:16 | 173.3 | 1.4 | 7.6 | 26.4 | +| [YOLOv5m-seg](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5m-seg.pt) | 640 | 45.0 | 37.1 | 108:36 | 427.0 | 2.2 | 22.0 | 70.8 | +| [YOLOv5l-seg](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5l-seg.pt) | 640 | 49.0 | 39.9 | 66:43 (2x) | 857.4 | 2.9 | 47.9 | 147.7 | +| [YOLOv5x-seg](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5x-seg.pt) | 640 | **50.7** | **41.4** | 62:56 (3x) | 1579.2 | 4.5 | 88.8 | 265.7 | - All checkpoints are trained to 300 epochs with SGD optimizer with `lr0=0.01` and `weight_decay=5e-5` at image size 640 and all default settings.
Runs logged to https://wandb.ai/glenn-jocher/YOLOv5_v70_official - **Accuracy** values are for single-model single-scale on COCO dataset.
Reproduce by `python segment/val.py --data coco.yaml --weights yolov5s-seg.pt` @@ -97,6 +98,7 @@ We trained YOLOv5 segmentations models on COCO for 300 epochs at image size 640
Segmentation Usage Examples  Open In Colab ### Train + YOLOv5 segmentation training supports auto-download COCO128-seg segmentation dataset with `--data coco128-seg.yaml` argument and manual download of COCO-segments dataset with `bash data/scripts/get_coco.sh --train --val --segments` and then `python train.py --data coco.yaml`. ```bash @@ -108,33 +110,41 @@ python -m torch.distributed.run --nproc_per_node 4 --master_port 1 segment/train ``` ### Val + Validate YOLOv5s-seg mask mAP on COCO dataset: + ```bash bash data/scripts/get_coco.sh --val --segments # download COCO val segments split (780MB, 5000 images) python segment/val.py --weights yolov5s-seg.pt --data coco.yaml --img 640 # validate ``` ### Predict + Use pretrained YOLOv5m-seg.pt to predict bus.jpg: + ```bash python segment/predict.py --weights yolov5m-seg.pt --data data/images/bus.jpg ``` + ```python -model = torch.hub.load('ultralytics/yolov5', 'custom', 'yolov5m-seg.pt') # load from PyTorch Hub (WARNING: inference not yet supported) +model = torch.hub.load( + "ultralytics/yolov5", "custom", "yolov5m-seg.pt" +) # load from PyTorch Hub (WARNING: inference not yet supported) ``` -![zidane](https://user-images.githubusercontent.com/26833433/203113421-decef4c4-183d-4a0a-a6c2-6435b33bc5d3.jpg) | ![bus](https://user-images.githubusercontent.com/26833433/203113416-11fe0025-69f7-4874-a0a6-65d0bfe2999a.jpg) ---- |--- +| ![zidane](https://user-images.githubusercontent.com/26833433/203113421-decef4c4-183d-4a0a-a6c2-6435b33bc5d3.jpg) | ![bus](https://user-images.githubusercontent.com/26833433/203113416-11fe0025-69f7-4874-a0a6-65d0bfe2999a.jpg) | +| ---------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------- | ### Export + Export YOLOv5s-seg model to ONNX and TensorRT: + ```bash python export.py --weights yolov5s-seg.pt --include onnx engine --img 640 --device 0 ```
- ##
Documentation
See the [YOLOv5 Docs](https://docs.ultralytics.com) for full documentation on training, testing and deployment. See below for quickstart examples. @@ -164,10 +174,10 @@ YOLOv5 [release](https://github.com/ultralytics/yolov5/releases). import torch # Model -model = torch.hub.load('ultralytics/yolov5', 'yolov5s') # or yolov5n - yolov5x6, custom +model = torch.hub.load("ultralytics/yolov5", "yolov5s") # or yolov5n - yolov5x6, custom # Images -img = 'https://ultralytics.com/images/zidane.jpg' # or file, Path, PIL, OpenCV, numpy, list +img = "https://ultralytics.com/images/zidane.jpg" # or file, Path, PIL, OpenCV, numpy, list # Inference results = model(img) @@ -245,7 +255,6 @@ python train.py --data coco.yaml --epochs 300 --weights '' --cfg yolov5n.yaml - - ##
Integrations

@@ -268,10 +277,9 @@ python train.py --data coco.yaml --epochs 300 --weights '' --cfg yolov5n.yaml - -|Roboflow|ClearML ⭐ NEW|Comet ⭐ NEW|Neural Magic ⭐ NEW| -|:-:|:-:|:-:|:-:| -|Label and export your custom datasets directly to YOLOv5 for training with [Roboflow](https://roboflow.com/?ref=ultralytics)|Automatically track, visualize and even remotely train YOLOv5 using [ClearML](https://cutt.ly/yolov5-readme-clearml) (open-source!)|Free forever, [Comet](https://bit.ly/yolov5-readme-comet2) lets you save YOLOv5 models, resume training, and interactively visualise and debug predictions|Run YOLOv5 inference up to 6x faster with [Neural Magic DeepSparse](https://bit.ly/yolov5-neuralmagic)| - +| Roboflow | ClearML ⭐ NEW | Comet ⭐ NEW | Neural Magic ⭐ NEW | +| :--------------------------------------------------------------------------------------------------------------------------: | :---------------------------------------------------------------------------------------------------------------------------------: | :--------------------------------------------------------------------------------------------------------------------------------------------------------: | :----------------------------------------------------------------------------------------------------: | +| Label and export your custom datasets directly to YOLOv5 for training with [Roboflow](https://roboflow.com/?ref=ultralytics) | Automatically track, visualize and even remotely train YOLOv5 using [ClearML](https://cutt.ly/yolov5-readme-clearml) (open-source!) | Free forever, [Comet](https://bit.ly/yolov5-readme-comet2) lets you save YOLOv5 models, resume training, and interactively visualise and debug predictions | Run YOLOv5 inference up to 6x faster with [Neural Magic DeepSparse](https://bit.ly/yolov5-neuralmagic) | ##
Ultralytics HUB
@@ -280,7 +288,6 @@ python train.py --data coco.yaml --epochs 300 --weights '' --cfg yolov5n.yaml - - ##
Why YOLOv5
YOLOv5 has been designed to be super easy to get started and simple to learn. We prioritize real-world results. @@ -303,19 +310,19 @@ YOLOv5 has been designed to be super easy to get started and simple to learn. We ### Pretrained Checkpoints -| Model | size
(pixels) | mAPval
50-95 | mAPval
50 | Speed
CPU b1
(ms) | Speed
V100 b1
(ms) | Speed
V100 b32
(ms) | params
(M) | FLOPs
@640 (B) | -|------------------------------------------------------------------------------------------------------|-----------------------|----------------------|-------------------|------------------------------|-------------------------------|--------------------------------|--------------------|------------------------| -| [YOLOv5n](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5n.pt) | 640 | 28.0 | 45.7 | **45** | **6.3** | **0.6** | **1.9** | **4.5** | -| [YOLOv5s](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5s.pt) | 640 | 37.4 | 56.8 | 98 | 6.4 | 0.9 | 7.2 | 16.5 | -| [YOLOv5m](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5m.pt) | 640 | 45.4 | 64.1 | 224 | 8.2 | 1.7 | 21.2 | 49.0 | -| [YOLOv5l](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5l.pt) | 640 | 49.0 | 67.3 | 430 | 10.1 | 2.7 | 46.5 | 109.1 | -| [YOLOv5x](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5x.pt) | 640 | 50.7 | 68.9 | 766 | 12.1 | 4.8 | 86.7 | 205.7 | -| | | | | | | | | | -| [YOLOv5n6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5n6.pt) | 1280 | 36.0 | 54.4 | 153 | 8.1 | 2.1 | 3.2 | 4.6 | -| [YOLOv5s6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5s6.pt) | 1280 | 44.8 | 63.7 | 385 | 8.2 | 3.6 | 12.6 | 16.8 | -| [YOLOv5m6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5m6.pt) | 1280 | 51.3 | 69.3 | 887 | 11.1 | 6.8 | 35.7 | 50.0 | -| [YOLOv5l6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5l6.pt) | 1280 | 53.7 | 71.3 | 1784 | 15.8 | 10.5 | 76.8 | 111.4 | -| [YOLOv5x6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5x6.pt)
+ [TTA][tta] | 1280
1536 | 55.0
**55.8** | 72.7
**72.7** | 3136
- | 26.2
- | 19.4
- | 140.7
- | 209.8
- | +| Model | size
(pixels) | mAPval
50-95 | mAPval
50 | Speed
CPU b1
(ms) | Speed
V100 b1
(ms) | Speed
V100 b32
(ms) | params
(M) | FLOPs
@640 (B) | +| ----------------------------------------------------------------------------------------------- | --------------------- | -------------------- | ----------------- | ---------------------------- | ----------------------------- | ------------------------------ | ------------------ | ---------------------- | +| [YOLOv5n](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5n.pt) | 640 | 28.0 | 45.7 | **45** | **6.3** | **0.6** | **1.9** | **4.5** | +| [YOLOv5s](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5s.pt) | 640 | 37.4 | 56.8 | 98 | 6.4 | 0.9 | 7.2 | 16.5 | +| [YOLOv5m](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5m.pt) | 640 | 45.4 | 64.1 | 224 | 8.2 | 1.7 | 21.2 | 49.0 | +| [YOLOv5l](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5l.pt) | 640 | 49.0 | 67.3 | 430 | 10.1 | 2.7 | 46.5 | 109.1 | +| [YOLOv5x](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5x.pt) | 640 | 50.7 | 68.9 | 766 | 12.1 | 4.8 | 86.7 | 205.7 | +| | | | | | | | | | +| [YOLOv5n6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5n6.pt) | 1280 | 36.0 | 54.4 | 153 | 8.1 | 2.1 | 3.2 | 4.6 | +| [YOLOv5s6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5s6.pt) | 1280 | 44.8 | 63.7 | 385 | 8.2 | 3.6 | 12.6 | 16.8 | +| [YOLOv5m6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5m6.pt) | 1280 | 51.3 | 69.3 | 887 | 11.1 | 6.8 | 35.7 | 50.0 | +| [YOLOv5l6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5l6.pt) | 1280 | 53.7 | 71.3 | 1784 | 15.8 | 10.5 | 76.8 | 111.4 | +| [YOLOv5x6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5x6.pt)
+ [TTA] | 1280
1536 | 55.0
**55.8** | 72.7
**72.7** | 3136
- | 26.2
- | 19.4
- | 140.7
- | 209.8
- |
Table Notes @@ -327,7 +334,6 @@ YOLOv5 has been designed to be super easy to get started and simple to learn. We
- ##
Classification ⭐ NEW
YOLOv5 [release v6.2](https://github.com/ultralytics/yolov5/releases) brings support for classification model training, validation and deployment! See full details in our [Release Notes](https://github.com/ultralytics/yolov5/releases/v6.2) and visit our [YOLOv5 Classification Colab Notebook](https://github.com/ultralytics/yolov5/blob/master/classify/tutorial.ipynb) for quickstart tutorials. @@ -340,18 +346,18 @@ YOLOv5 [release v6.2](https://github.com/ultralytics/yolov5/releases) brings sup We trained YOLOv5-cls classification models on ImageNet for 90 epochs using a 4xA100 instance, and we trained ResNet and EfficientNet models alongside with the same default training settings to compare. We exported all models to ONNX FP32 for CPU speed tests and to TensorRT FP16 for GPU speed tests. We ran all speed tests on Google [Colab Pro](https://colab.research.google.com/signup) for easy reproducibility. | Model | size
(pixels) | acc
top1 | acc
top5 | Training
90 epochs
4xA100 (hours) | Speed
ONNX CPU
(ms) | Speed
TensorRT V100
(ms) | params
(M) | FLOPs
@224 (B) | -|----------------------------------------------------------------------------------------------------|-----------------------|------------------|------------------|----------------------------------------------|--------------------------------|-------------------------------------|--------------------|------------------------| +| -------------------------------------------------------------------------------------------------- | --------------------- | ---------------- | ---------------- | -------------------------------------------- | ------------------------------ | ----------------------------------- | ------------------ | ---------------------- | | [YOLOv5n-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5n-cls.pt) | 224 | 64.6 | 85.4 | 7:59 | **3.3** | **0.5** | **2.5** | **0.5** | | [YOLOv5s-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5s-cls.pt) | 224 | 71.5 | 90.2 | 8:09 | 6.6 | 0.6 | 5.4 | 1.4 | | [YOLOv5m-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5m-cls.pt) | 224 | 75.9 | 92.9 | 10:06 | 15.5 | 0.9 | 12.9 | 3.9 | | [YOLOv5l-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5l-cls.pt) | 224 | 78.0 | 94.0 | 11:56 | 26.9 | 1.4 | 26.5 | 8.5 | | [YOLOv5x-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5x-cls.pt) | 224 | **79.0** | **94.4** | 15:04 | 54.3 | 1.8 | 48.1 | 15.9 | -| | +| | | | | | | | | | | [ResNet18](https://github.com/ultralytics/yolov5/releases/download/v6.2/resnet18.pt) | 224 | 70.3 | 89.5 | **6:47** | 11.2 | 0.5 | 11.7 | 3.7 | | [ResNet34](https://github.com/ultralytics/yolov5/releases/download/v6.2/resnet34.pt) | 224 | 73.9 | 91.8 | 8:33 | 20.6 | 0.9 | 21.8 | 7.4 | | [ResNet50](https://github.com/ultralytics/yolov5/releases/download/v6.2/resnet50.pt) | 224 | 76.8 | 93.4 | 11:10 | 23.4 | 1.0 | 25.6 | 8.5 | | [ResNet101](https://github.com/ultralytics/yolov5/releases/download/v6.2/resnet101.pt) | 224 | 78.5 | 94.3 | 17:10 | 42.1 | 1.9 | 44.5 | 15.9 | -| | +| | | | | | | | | | | [EfficientNet_b0](https://github.com/ultralytics/yolov5/releases/download/v6.2/efficientnet_b0.pt) | 224 | 75.1 | 92.4 | 13:03 | 12.5 | 1.3 | 5.3 | 1.0 | | [EfficientNet_b1](https://github.com/ultralytics/yolov5/releases/download/v6.2/efficientnet_b1.pt) | 224 | 76.4 | 93.2 | 17:04 | 14.9 | 1.6 | 7.8 | 1.5 | | [EfficientNet_b2](https://github.com/ultralytics/yolov5/releases/download/v6.2/efficientnet_b2.pt) | 224 | 76.6 | 93.4 | 17:10 | 15.9 | 1.6 | 9.1 | 1.7 | @@ -364,6 +370,7 @@ We trained YOLOv5-cls classification models on ImageNet for 90 epochs using a 4x - **Accuracy** values are for single-model single-scale on [ImageNet-1k](https://www.image-net.org/index.php) dataset.
Reproduce by `python classify/val.py --data ../datasets/imagenet --img 224` - **Speed** averaged over 100 inference images using a Google [Colab Pro](https://colab.research.google.com/signup) V100 High-RAM instance.
Reproduce by `python classify/val.py --data ../datasets/imagenet --img 224 --batch 1` - **Export** to ONNX at FP32 and TensorRT at FP16 done with `export.py`.
Reproduce by `python export.py --weights yolov5s-cls.pt --include engine onnx --imgsz 224` + @@ -371,6 +378,7 @@ We trained YOLOv5-cls classification models on ImageNet for 90 epochs using a 4x Classification Usage Examples  Open In Colab ### Train + YOLOv5 classification training supports auto-download of MNIST, Fashion-MNIST, CIFAR10, CIFAR100, Imagenette, Imagewoof, and ImageNet datasets with the `--data` argument. To start training on MNIST for example use `--data mnist`. ```bash @@ -382,28 +390,37 @@ python -m torch.distributed.run --nproc_per_node 4 --master_port 1 classify/trai ``` ### Val + Validate YOLOv5m-cls accuracy on ImageNet-1k dataset: + ```bash bash data/scripts/get_imagenet.sh --val # download ImageNet val split (6.3G, 50000 images) python classify/val.py --weights yolov5m-cls.pt --data ../datasets/imagenet --img 224 # validate ``` ### Predict + Use pretrained YOLOv5s-cls.pt to predict bus.jpg: + ```bash python classify/predict.py --weights yolov5s-cls.pt --data data/images/bus.jpg ``` + ```python -model = torch.hub.load('ultralytics/yolov5', 'custom', 'yolov5s-cls.pt') # load from PyTorch Hub +model = torch.hub.load( + "ultralytics/yolov5", "custom", "yolov5s-cls.pt" +) # load from PyTorch Hub ``` ### Export + Export a group of trained YOLOv5s-cls, ResNet and EfficientNet models to ONNX and TensorRT: + ```bash python export.py --weights yolov5s-cls.pt resnet50.pt efficientnet_b0.pt --include onnx engine --img 224 ``` - + ##
Environments
@@ -436,14 +453,13 @@ Run YOLOv5 models on your iOS or Android device by downloading the [Ultralytics Ultralytics mobile app - ##
Contribute
We love your input! We want to make contributing to YOLOv5 as easy and transparent as possible. Please see our [Contributing Guide](CONTRIBUTING.md) to get started, and fill out the [YOLOv5 Survey](https://ultralytics.com/survey?utm_source=github&utm_medium=social&utm_campaign=Survey) to send us feedback on your experiences. Thank you to all our contributors! -
+ ##
License
@@ -452,7 +468,6 @@ YOLOv5 is available under two different licenses: - **GPL-3.0 License**: See [LICENSE](https://github.com/ultralytics/yolov5/blob/master/LICENSE) file for details. - **Enterprise License**: Provides greater flexibility for commercial product development without the open-source requirements of GPL-3.0. Typical use cases are embedding Ultralytics software and AI models in commercial products and applications. Request an Enterprise License at [Ultralytics Licensing](https://ultralytics.com/license). - ##
Contact
For YOLOv5 bugs and feature requests please visit [GitHub Issues](https://github.com/ultralytics/yolov5/issues). For professional support please [Contact Us](https://ultralytics.com/contact). diff --git a/README.zh-CN.md b/README.zh-CN.md index c406f35820a7..b69d3921df99 100644 --- a/README.zh-CN.md +++ b/README.zh-CN.md @@ -4,9 +4,9 @@

-[英文](README.md)\|[简体中文](README.zh-CN.md)
+[英文](README.md)|[简体中文](README.zh-CN.md)
-
+
YOLOv5 CI YOLOv5 Citation Docker Pulls @@ -21,7 +21,7 @@ YOLOv5 🚀 是世界上最受欢迎的视觉 AI,代表Ultralytics 许可. -
+
@@ -61,18 +61,18 @@ YOLOv5 🚀 是世界上最受欢迎的视觉 AI,代表 教程 -- [训练自定义数据](https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data)🚀 推荐 -- [获得最佳训练结果的技巧](https://github.com/ultralytics/yolov5/wiki/Tips-for-Best-Training-Results)☘️ 推荐 -- [多 GPU 训练](https://github.com/ultralytics/yolov5/issues/475) -- [PyTorch Hub](https://github.com/ultralytics/yolov5/issues/36)🌟 新 -- [TFLite、ONNX、CoreML、TensorRT 导出](https://github.com/ultralytics/yolov5/issues/251)🚀 -- [NVIDIA Jetson Nano 部署](https://github.com/ultralytics/yolov5/issues/9627)🌟 新 -- [测试时数据增强 (TTA)](https://github.com/ultralytics/yolov5/issues/303) -- [模型集成](https://github.com/ultralytics/yolov5/issues/318) -- [模型修剪/稀疏度](https://github.com/ultralytics/yolov5/issues/304) -- [超参数进化](https://github.com/ultralytics/yolov5/issues/607) -- [使用冻结层进行迁移学习](https://github.com/ultralytics/yolov5/issues/1314) -- [架构总结](https://github.com/ultralytics/yolov5/issues/6998)🌟 新 -- [用于数据集、标签和主动学习的 Roboflow](https://github.com/ultralytics/yolov5/issues/4975)🌟 新 -- [ClearML 记录](https://github.com/ultralytics/yolov5/tree/master/utils/loggers/clearml)🌟 新 -- [Deci 平台](https://github.com/ultralytics/yolov5/wiki/Deci-Platform)🌟 新 -- [Comet Logging](https://github.com/ultralytics/yolov5/tree/master/utils/loggers/comet)🌟 新 +- [训练自定义数据](https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data)🚀 推荐 +- [获得最佳训练结果的技巧](https://github.com/ultralytics/yolov5/wiki/Tips-for-Best-Training-Results)☘️ 推荐 +- [多 GPU 训练](https://github.com/ultralytics/yolov5/issues/475) +- [PyTorch Hub](https://github.com/ultralytics/yolov5/issues/36)🌟 新 +- [TFLite、ONNX、CoreML、TensorRT 导出](https://github.com/ultralytics/yolov5/issues/251)🚀 +- [NVIDIA Jetson Nano 部署](https://github.com/ultralytics/yolov5/issues/9627)🌟 新 +- [测试时数据增强 (TTA)](https://github.com/ultralytics/yolov5/issues/303) +- [模型集成](https://github.com/ultralytics/yolov5/issues/318) +- [模型修剪/稀疏度](https://github.com/ultralytics/yolov5/issues/304) +- [超参数进化](https://github.com/ultralytics/yolov5/issues/607) +- [使用冻结层进行迁移学习](https://github.com/ultralytics/yolov5/issues/1314) +- [架构总结](https://github.com/ultralytics/yolov5/issues/6998)🌟 新 +- [用于数据集、标签和主动学习的 Roboflow](https://github.com/ultralytics/yolov5/issues/4975)🌟 新 +- [ClearML 记录](https://github.com/ultralytics/yolov5/tree/master/utils/loggers/clearml)🌟 新 +- [Deci 平台](https://github.com/ultralytics/yolov5/wiki/Deci-Platform)🌟 新 +- [Comet Logging](https://github.com/ultralytics/yolov5/tree/master/utils/loggers/comet)🌟 新 @@ -253,8 +255,8 @@ python train.py --data coco.yaml --epochs 300 --weights '' --cfg yolov5n.yaml -
-| Roboflow | ClearML ⭐ 新 | Comet ⭐ 新 | Neural Magic ⭐ 新 | -| :-----------------------------------------------------------------------------------------------: | :------------------------------------------------------------------------------------: | :----------------------------------------------------------------------------------------------------: | :-------------------------------------------------------------------------------: | +| Roboflow | ClearML ⭐ 新 | Comet ⭐ 新 | Neural Magic ⭐ 新 | +| :--------------------------------------------------------------------------------: | :-------------------------------------------------------------------------: | :-------------------------------------------------------------------------------: | :------------------------------------------------------------------------------------: | | 将您的自定义数据集进行标注并直接导出到 YOLOv5 以进行训练 [Roboflow](https://roboflow.com/?ref=ultralytics) | 自动跟踪、可视化甚至远程训练 YOLOv5 [ClearML](https://cutt.ly/yolov5-readme-clearml)(开源!) | 永远免费,[Comet](https://bit.ly/yolov5-readme-comet)可让您保存 YOLOv5 模型、恢复训练以及交互式可视化和调试预测 | 使用 [Neural Magic DeepSparse](https://bit.ly/yolov5-neuralmagic),运行 YOLOv5 推理的速度最高可提高6倍 | ##
Ultralytics HUB
@@ -277,36 +279,36 @@ YOLOv5 超级容易上手,简单易学。我们优先考虑现实世界的结
图表笔记 -- **COCO AP val** 表示 mAP@0.5:0.95 指标,在 [COCO val2017](http://cocodataset.org) 数据集的 5000 张图像上测得, 图像包含 256 到 1536 各种推理大小。 -- **显卡推理速度** 为在 [COCO val2017](http://cocodataset.org) 数据集上的平均推理时间,使用 [AWS p3.2xlarge](https://aws.amazon.com/ec2/instance-types/p3/) V100实例,batchsize 为 32 。 -- **EfficientDet** 数据来自 [google/automl](https://github.com/google/automl) , batchsize 为32。 -- **复现命令** 为 `python val.py --task study --data coco.yaml --iou 0.7 --weights yolov5n6.pt yolov5s6.pt yolov5m6.pt yolov5l6.pt yolov5x6.pt` +- **COCO AP val** 表示 mAP@0.5:0.95 指标,在 [COCO val2017](http://cocodataset.org) 数据集的 5000 张图像上测得, 图像包含 256 到 1536 各种推理大小。 +- **显卡推理速度** 为在 [COCO val2017](http://cocodataset.org) 数据集上的平均推理时间,使用 [AWS p3.2xlarge](https://aws.amazon.com/ec2/instance-types/p3/) V100实例,batchsize 为 32 。 +- **EfficientDet** 数据来自 [google/automl](https://github.com/google/automl) , batchsize 为32。 +- **复现命令** 为 `python val.py --task study --data coco.yaml --iou 0.7 --weights yolov5n6.pt yolov5s6.pt yolov5m6.pt yolov5l6.pt yolov5x6.pt`
### 预训练模型 -| 模型 | 尺寸
(像素) | mAPval
50-95 | mAPval
50 | 推理速度
CPU b1
(ms) | 推理速度
V100 b1
(ms) | 速度
V100 b32
(ms) | 参数量
(M) | FLOPs
@640 (B) | -| --------------------------------------------------------------------------------------------------- | ------------------- | -------------------- | ------------------- | ------------------------------- | -------------------------------- | ------------------------------ | ----------------- | ---------------------- | -| [YOLOv5n](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5n.pt) | 640 | 28.0 | 45.7 | **45** | **6.3** | **0.6** | **1.9** | **4.5** | -| [YOLOv5s](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5s.pt) | 640 | 37.4 | 56.8 | 98 | 6.4 | 0.9 | 7.2 | 16.5 | -| [YOLOv5m](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5m.pt) | 640 | 45.4 | 64.1 | 224 | 8.2 | 1.7 | 21.2 | 49.0 | -| [YOLOv5l](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5l.pt) | 640 | 49.0 | 67.3 | 430 | 10.1 | 2.7 | 46.5 | 109.1 | -| [YOLOv5x](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5x.pt) | 640 | 50.7 | 68.9 | 766 | 12.1 | 4.8 | 86.7 | 205.7 | -| | | | | | | | | | -| [YOLOv5n6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5n6.pt) | 1280 | 36.0 | 54.4 | 153 | 8.1 | 2.1 | 3.2 | 4.6 | -| [YOLOv5s6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5s6.pt) | 1280 | 44.8 | 63.7 | 385 | 8.2 | 3.6 | 12.6 | 16.8 | -| [YOLOv5m6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5m6.pt) | 1280 | 51.3 | 69.3 | 887 | 11.1 | 6.8 | 35.7 | 50.0 | -| [YOLOv5l6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5l6.pt) | 1280 | 53.7 | 71.3 | 1784 | 15.8 | 10.5 | 76.8 | 111.4 | -| [YOLOv5x6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5x6.pt)
+[TTA][tta] | 1280
1536 | 55.0
**55.8** | 72.7
**72.7** | 3136
- | 26.2
- | 19.4
- | 140.7
- | 209.8
- | +| 模型 | 尺寸
(像素) | mAPval
50-95 | mAPval
50 | 推理速度
CPU b1
(ms) | 推理速度
V100 b1
(ms) | 速度
V100 b32
(ms) | 参数量
(M) | FLOPs
@640 (B) | +| ---------------------------------------------------------------------------------------------- | --------------- | -------------------- | ----------------- | --------------------------- | ---------------------------- | --------------------------- | --------------- | ---------------------- | +| [YOLOv5n](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5n.pt) | 640 | 28.0 | 45.7 | **45** | **6.3** | **0.6** | **1.9** | **4.5** | +| [YOLOv5s](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5s.pt) | 640 | 37.4 | 56.8 | 98 | 6.4 | 0.9 | 7.2 | 16.5 | +| [YOLOv5m](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5m.pt) | 640 | 45.4 | 64.1 | 224 | 8.2 | 1.7 | 21.2 | 49.0 | +| [YOLOv5l](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5l.pt) | 640 | 49.0 | 67.3 | 430 | 10.1 | 2.7 | 46.5 | 109.1 | +| [YOLOv5x](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5x.pt) | 640 | 50.7 | 68.9 | 766 | 12.1 | 4.8 | 86.7 | 205.7 | +| | | | | | | | | | +| [YOLOv5n6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5n6.pt) | 1280 | 36.0 | 54.4 | 153 | 8.1 | 2.1 | 3.2 | 4.6 | +| [YOLOv5s6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5s6.pt) | 1280 | 44.8 | 63.7 | 385 | 8.2 | 3.6 | 12.6 | 16.8 | +| [YOLOv5m6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5m6.pt) | 1280 | 51.3 | 69.3 | 887 | 11.1 | 6.8 | 35.7 | 50.0 | +| [YOLOv5l6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5l6.pt) | 1280 | 53.7 | 71.3 | 1784 | 15.8 | 10.5 | 76.8 | 111.4 | +| [YOLOv5x6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5x6.pt)
+[TTA] | 1280
1536 | 55.0
**55.8** | 72.7
**72.7** | 3136
- | 26.2
- | 19.4
- | 140.7
- | 209.8
- |
笔记 -- 所有模型都使用默认配置,训练 300 epochs。n和s模型使用 [hyp.scratch-low.yaml](https://github.com/ultralytics/yolov5/blob/master/data/hyps/hyp.scratch-low.yaml) ,其他模型都使用 [hyp.scratch-high.yaml](https://github.com/ultralytics/yolov5/blob/master/data/hyps/hyp.scratch-high.yaml) 。 -- **mAPval**在单模型单尺度上计算,数据集使用 [COCO val2017](http://cocodataset.org) 。
复现命令 `python val.py --data coco.yaml --img 640 --conf 0.001 --iou 0.65` -- **推理速度**在 COCO val 图像总体时间上进行平均得到,测试环境使用[AWS p3.2xlarge](https://aws.amazon.com/ec2/instance-types/p3/)实例。 NMS 时间 (大约 1 ms/img) 不包括在内。
复现命令 `python val.py --data coco.yaml --img 640 --task speed --batch 1` -- **TTA** [测试时数据增强](https://github.com/ultralytics/yolov5/issues/303) 包括反射和尺度变换。
复现命令 `python val.py --data coco.yaml --img 1536 --iou 0.7 --augment` +- 所有模型都使用默认配置,训练 300 epochs。n和s模型使用 [hyp.scratch-low.yaml](https://github.com/ultralytics/yolov5/blob/master/data/hyps/hyp.scratch-low.yaml) ,其他模型都使用 [hyp.scratch-high.yaml](https://github.com/ultralytics/yolov5/blob/master/data/hyps/hyp.scratch-high.yaml) 。 +- \*\*mAPval\*\*在单模型单尺度上计算,数据集使用 [COCO val2017](http://cocodataset.org) 。
复现命令 `python val.py --data coco.yaml --img 640 --conf 0.001 --iou 0.65` +- **推理速度**在 COCO val 图像总体时间上进行平均得到,测试环境使用[AWS p3.2xlarge](https://aws.amazon.com/ec2/instance-types/p3/)实例。 NMS 时间 (大约 1 ms/img) 不包括在内。
复现命令 `python val.py --data coco.yaml --img 640 --task speed --batch 1` +- **TTA** [测试时数据增强](https://github.com/ultralytics/yolov5/issues/303) 包括反射和尺度变换。
复现命令 `python val.py --data coco.yaml --img 1536 --iou 0.7 --augment`
@@ -321,33 +323,33 @@ YOLOv5 [release v6.2](https://github.com/ultralytics/yolov5/releases) 带来对 我们使用 4xA100 实例在 ImageNet 上训练了 90 个 epochs 得到 YOLOv5-cls 分类模型,我们训练了 ResNet 和 EfficientNet 模型以及相同的默认训练设置以进行比较。我们将所有模型导出到 ONNX FP32 以进行 CPU 速度测试,并导出到 TensorRT FP16 以进行 GPU 速度测试。为了便于重现,我们在 Google 上进行了所有速度测试 [Colab Pro](https://colab.research.google.com/signup) 。 -| 模型 | 尺寸
(像素) | acc
top1 | acc
top5 | 训练时长
90 epochs
4xA100(小时) | 推理速度
ONNX CPU
(ms) | 推理速度
TensorRT V100
(ms) | 参数
(M) | FLOPs
@640 (B) | -| -------------------------------------------------------------------------------------------------- | ------------------- | ---------------- | ---------------- | ------------------------------------------ | --------------------------------- | -------------------------------------- | --------------- | -----------------------| -| [YOLOv5n-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5n-cls.pt) | 224 | 64.6 | 85.4 | 7:59 | **3.3** | **0.5** | **2.5** | **0.5** | -| [YOLOv5s-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5s-cls.pt) | 224 | 71.5 | 90.2 | 8:09 | 6.6 | 0.6 | 5.4 | 1.4 | -| [YOLOv5m-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5m-cls.pt) | 224 | 75.9 | 92.9 | 10:06 | 15.5 | 0.9 | 12.9 | 3.9 | -| [YOLOv5l-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5l-cls.pt) | 224 | 78.0 | 94.0 | 11:56 | 26.9 | 1.4 | 26.5 | 8.5 | -| [YOLOv5x-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5x-cls.pt) | 224 | **79.0** | **94.4** | 15:04 | 54.3 | 1.8 | 48.1 | 15.9 | -| | | | | | | | | | -| [ResNet18](https://github.com/ultralytics/yolov5/releases/download/v6.2/resnet18.pt) | 224 | 70.3 | 89.5 | **6:47** | 11.2 | 0.5 | 11.7 | 3.7 | -| [Resnetzch](https://github.com/ultralytics/yolov5/releases/download/v6.2/resnet34.pt) | 224 | 73.9 | 91.8 | 8:33 | 20.6 | 0.9 | 21.8 | 7.4 | -| [ResNet50](https://github.com/ultralytics/yolov5/releases/download/v6.2/resnet50.pt) | 224 | 76.8 | 93.4 | 11:10 | 23.4 | 1.0 | 25.6 | 8.5 | -| [ResNet101](https://github.com/ultralytics/yolov5/releases/download/v6.2/resnet101.pt) | 224 | 78.5 | 94.3 | 17:10 | 42.1 | 1.9 | 44.5 | 15.9 | -| | | | | | | | | | -| [EfficientNet_b0](https://github.com/ultralytics/yolov5/releases/download/v6.2/efficientnet_b0.pt) | 224 | 75.1 | 92.4 | 13:03 | 12.5 | 1.3 | 5.3 | 1.0 | -| [EfficientNet_b1](https://github.com/ultralytics/yolov5/releases/download/v6.2/efficientnet_b1.pt) | 224 | 76.4 | 93.2 | 17:04 | 14.9 | 1.6 | 7.8 | 1.5 | -| [EfficientNet_b2](https://github.com/ultralytics/yolov5/releases/download/v6.2/efficientnet_b2.pt) | 224 | 76.6 | 93.4 | 17:10 | 15.9 | 1.6 | 9.1 | 1.7 | -| [EfficientNet_b3](https://github.com/ultralytics/yolov5/releases/download/v6.2/efficientnet_b3.pt) | 224 | 77.7 | 94.0 | 19:19 | 18.9 | 1.9 | 12.2 | 2.4 | +| 模型 | 尺寸
(像素) | acc
top1 | acc
top5 | 训练时长
90 epochs
4xA100(小时) | 推理速度
ONNX CPU
(ms) | 推理速度
TensorRT V100
(ms) | 参数
(M) | FLOPs
@640 (B) | +| -------------------------------------------------------------------------------------------------- | --------------- | ---------------- | ---------------- | ------------------------------------ | ----------------------------- | ---------------------------------- | -------------- | ---------------------- | +| [YOLOv5n-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5n-cls.pt) | 224 | 64.6 | 85.4 | 7:59 | **3.3** | **0.5** | **2.5** | **0.5** | +| [YOLOv5s-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5s-cls.pt) | 224 | 71.5 | 90.2 | 8:09 | 6.6 | 0.6 | 5.4 | 1.4 | +| [YOLOv5m-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5m-cls.pt) | 224 | 75.9 | 92.9 | 10:06 | 15.5 | 0.9 | 12.9 | 3.9 | +| [YOLOv5l-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5l-cls.pt) | 224 | 78.0 | 94.0 | 11:56 | 26.9 | 1.4 | 26.5 | 8.5 | +| [YOLOv5x-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5x-cls.pt) | 224 | **79.0** | **94.4** | 15:04 | 54.3 | 1.8 | 48.1 | 15.9 | +| | | | | | | | | | +| [ResNet18](https://github.com/ultralytics/yolov5/releases/download/v6.2/resnet18.pt) | 224 | 70.3 | 89.5 | **6:47** | 11.2 | 0.5 | 11.7 | 3.7 | +| [Resnetzch](https://github.com/ultralytics/yolov5/releases/download/v6.2/resnet34.pt) | 224 | 73.9 | 91.8 | 8:33 | 20.6 | 0.9 | 21.8 | 7.4 | +| [ResNet50](https://github.com/ultralytics/yolov5/releases/download/v6.2/resnet50.pt) | 224 | 76.8 | 93.4 | 11:10 | 23.4 | 1.0 | 25.6 | 8.5 | +| [ResNet101](https://github.com/ultralytics/yolov5/releases/download/v6.2/resnet101.pt) | 224 | 78.5 | 94.3 | 17:10 | 42.1 | 1.9 | 44.5 | 15.9 | +| | | | | | | | | | +| [EfficientNet_b0](https://github.com/ultralytics/yolov5/releases/download/v6.2/efficientnet_b0.pt) | 224 | 75.1 | 92.4 | 13:03 | 12.5 | 1.3 | 5.3 | 1.0 | +| [EfficientNet_b1](https://github.com/ultralytics/yolov5/releases/download/v6.2/efficientnet_b1.pt) | 224 | 76.4 | 93.2 | 17:04 | 14.9 | 1.6 | 7.8 | 1.5 | +| [EfficientNet_b2](https://github.com/ultralytics/yolov5/releases/download/v6.2/efficientnet_b2.pt) | 224 | 76.6 | 93.4 | 17:10 | 15.9 | 1.6 | 9.1 | 1.7 | +| [EfficientNet_b3](https://github.com/ultralytics/yolov5/releases/download/v6.2/efficientnet_b3.pt) | 224 | 77.7 | 94.0 | 19:19 | 18.9 | 1.9 | 12.2 | 2.4 |
Table Notes (点击以展开) -- 所有模型都使用 SGD 优化器训练 90 个 epochs,都使用 `lr0=0.001` 和 `weight_decay=5e-5` 参数, 图像大小为 224 ,且都使用默认设置。
训练 log 可以查看 https://wandb.ai/glenn-jocher/YOLOv5-Classifier-v6-2 -- **准确性**都在单模型单尺度上计算,数据集使用 [ImageNet-1k](https://www.image-net.org/index.php) 。
复现命令 `python classify/val.py --data ../datasets/imagenet --img 224` -- **推理速度**是使用 100 个推理图像进行平均得到,测试环境使用谷歌 [Colab Pro](https://colab.research.google.com/signup) V100 高 RAM 实例。
复现命令 `python classify/val.py --data ../datasets/imagenet --img 224 --batch 1` -- **模型导出**到 FP32 的 ONNX 和 FP16 的 TensorRT 使用 `export.py` 。
复现命令 `python export.py --weights yolov5s-cls.pt --include engine onnx --imgsz 224` -
- +- 所有模型都使用 SGD 优化器训练 90 个 epochs,都使用 `lr0=0.001` 和 `weight_decay=5e-5` 参数, 图像大小为 224 ,且都使用默认设置。
训练 log 可以查看 https://wandb.ai/glenn-jocher/YOLOv5-Classifier-v6-2 +- **准确性**都在单模型单尺度上计算,数据集使用 [ImageNet-1k](https://www.image-net.org/index.php) 。
复现命令 `python classify/val.py --data ../datasets/imagenet --img 224` +- **推理速度**是使用 100 个推理图像进行平均得到,测试环境使用谷歌 [Colab Pro](https://colab.research.google.com/signup) V100 高 RAM 实例。
复现命令 `python classify/val.py --data ../datasets/imagenet --img 224 --batch 1` +- **模型导出**到 FP32 的 ONNX 和 FP16 的 TensorRT 使用 `export.py` 。
复现命令 `python export.py --weights yolov5s-cls.pt --include engine onnx --imgsz 224` + +
分类训练示例  Open In Colab @@ -382,7 +384,9 @@ python classify/predict.py --weights yolov5s-cls.pt --data data/images/bus.jpg ``` ```python -model = torch.hub.load('ultralytics/yolov5', 'custom', 'yolov5s-cls.pt') # load from PyTorch Hub +model = torch.hub.load( + "ultralytics/yolov5", "custom", "yolov5s-cls.pt" +) # load from PyTorch Hub ``` ### 模型导出 @@ -438,8 +442,8 @@ python export.py --weights yolov5s-cls.pt resnet50.pt efficientnet_b0.pt --inclu YOLOv5 在两种不同的 License 下可用: -- **GPL-3.0 License**: 查看 [License](https://github.com/ultralytics/yolov5/blob/master/LICENSE) 文件的详细信息。 -- **企业License**:在没有 GPL-3.0 开源要求的情况下为商业产品开发提供更大的灵活性。典型用例是将 Ultralytics 软件和 AI 模型嵌入到商业产品和应用程序中。在以下位置申请企业许可证 [Ultralytics 许可](https://ultralytics.com/license) 。 +- **GPL-3.0 License**: 查看 [License](https://github.com/ultralytics/yolov5/blob/master/LICENSE) 文件的详细信息。 +- **企业License**:在没有 GPL-3.0 开源要求的情况下为商业产品开发提供更大的灵活性。典型用例是将 Ultralytics 软件和 AI 模型嵌入到商业产品和应用程序中。在以下位置申请企业许可证 [Ultralytics 许可](https://ultralytics.com/license) 。 ##
联系我们
diff --git a/classify/tutorial.ipynb b/classify/tutorial.ipynb index 03c1dd0bc0de..cc18aa934039 100644 --- a/classify/tutorial.ipynb +++ b/classify/tutorial.ipynb @@ -78,7 +78,7 @@ "source": [ "# 1. Predict\n", "\n", - "`classify/predict.py` runs YOLOv5 Classifcation inference on a variety of sources, downloading models automatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases), and saving results to `runs/predict-cls`. Example inference sources are:\n", + "`classify/predict.py` runs YOLOv5 Classification inference on a variety of sources, downloading models automatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases), and saving results to `runs/predict-cls`. Example inference sources are:\n", "\n", "```shell\n", "python classify/predict.py --source 0 # webcam\n", diff --git a/classify/val.py b/classify/val.py index 8657036fb2a2..03ba817d5ea2 100644 --- a/classify/val.py +++ b/classify/val.py @@ -128,9 +128,9 @@ def run( LOGGER.info(f"{'Class':>24}{'Images':>12}{'top1_acc':>12}{'top5_acc':>12}") LOGGER.info(f"{'all':>24}{targets.shape[0]:>12}{top1:>12.3g}{top5:>12.3g}") for i, c in model.names.items(): - aci = acc[targets == i] - top1i, top5i = aci.mean(0).tolist() - LOGGER.info(f"{c:>24}{aci.shape[0]:>12}{top1i:>12.3g}{top5i:>12.3g}") + acc_i = acc[targets == i] + top1i, top5i = acc_i.mean(0).tolist() + LOGGER.info(f"{c:>24}{acc_i.shape[0]:>12}{top1i:>12.3g}{top5i:>12.3g}") # Print results t = tuple(x.t / len(dataloader.dataset.samples) * 1E3 for x in dt) # speeds per image diff --git a/utils/docker/Dockerfile b/utils/docker/Dockerfile index c8b88357cb6d..e18b2ac69678 100644 --- a/utils/docker/Dockerfile +++ b/utils/docker/Dockerfile @@ -3,23 +3,33 @@ # Image is CUDA-optimized for YOLOv5 single/multi-GPU training and inference # Start FROM NVIDIA PyTorch image https://ngc.nvidia.com/catalog/containers/nvidia:pytorch -FROM nvcr.io/nvidia/pytorch:22.12-py3 -RUN rm -rf /opt/pytorch # remove 1.2GB dir +# FROM docker.io/pytorch/pytorch:latest +FROM pytorch/pytorch:latest # Downloads to user config dir ADD https://ultralytics.com/assets/Arial.ttf https://ultralytics.com/assets/Arial.Unicode.ttf /root/.config/Ultralytics/ # Install linux packages -RUN apt update && apt install --no-install-recommends -y zip htop screen libgl1-mesa-glx +ENV DEBIAN_FRONTEND noninteractive +RUN apt update +RUN TZ=Etc/UTC apt install -y tzdata +RUN apt install --no-install-recommends -y gcc git zip curl htop libgl1-mesa-glx libglib2.0-0 libpython3-dev gnupg +# RUN alias python=python3 -# Install pip packages (uninstall torch nightly in favor of stable) +# Create working directory +RUN mkdir -p /usr/src/app +WORKDIR /usr/src/app + +# Copy contents +# COPY . /usr/src/app (issues as not a .git directory) +RUN git clone https://github.com/ultralytics/yolov5 /usr/src/app + +# Install pip packages COPY requirements.txt . -RUN python -m pip install --upgrade pip wheel -RUN pip uninstall -y Pillow torchtext torch torchvision -RUN pip install --no-cache -U pycocotools # install --upgrade -RUN pip install --no-cache -r requirements.txt albumentations comet gsutil notebook 'opencv-python<4.6.0.66' \ - Pillow>=9.1.0 ultralytics \ - --extra-index-url https://download.pytorch.org/whl/cu113 +RUN python3 -m pip install --upgrade pip wheel +RUN pip install --no-cache -r requirements.txt albumentations comet gsutil notebook \ + coremltools onnx onnx-simplifier onnxruntime openvino-dev>=2022.3 + # tensorflow tensorflowjs \ # Create working directory RUN mkdir -p /usr/src/app @@ -32,6 +42,9 @@ RUN git clone https://github.com/ultralytics/yolov5 /usr/src/app # Set environment variables ENV OMP_NUM_THREADS=1 +# Cleanup +ENV DEBIAN_FRONTEND teletype + # Usage Examples ------------------------------------------------------------------------------------------------------- diff --git a/utils/loggers/__init__.py b/utils/loggers/__init__.py index 22da87034f24..1e7f38e0d677 100644 --- a/utils/loggers/__init__.py +++ b/utils/loggers/__init__.py @@ -84,10 +84,6 @@ def __init__(self, save_dir=None, weights=None, opt=None, hyp=None, logger=None, self.csv = True # always log to csv # Messages - # if not wandb: - # prefix = colorstr('Weights & Biases: ') - # s = f"{prefix}run 'pip install wandb' to automatically track and visualize YOLOv5 🚀 runs in Weights & Biases" - # self.logger.info(s) if not clearml: prefix = colorstr('ClearML: ') s = f"{prefix}run 'pip install clearml' to automatically track, visualize and remotely train YOLOv5 🚀 in ClearML" @@ -105,14 +101,8 @@ def __init__(self, save_dir=None, weights=None, opt=None, hyp=None, logger=None, # W&B if wandb and 'wandb' in self.include: - wandb_artifact_resume = isinstance(self.opt.resume, str) and self.opt.resume.startswith('wandb-artifact://') - run_id = torch.load(self.weights).get('wandb_id') if self.opt.resume and not wandb_artifact_resume else None self.opt.hyp = self.hyp # add hyperparameters - self.wandb = WandbLogger(self.opt, run_id) - # temp warn. because nested artifacts not supported after 0.12.10 - # if pkg.parse_version(wandb.__version__) >= pkg.parse_version('0.12.11'): - # s = "YOLOv5 temporarily requires wandb version 0.12.10 or below. Some features may not work as expected." - # self.logger.warning(s) + self.wandb = WandbLogger(self.opt) else: self.wandb = None @@ -175,7 +165,7 @@ def on_pretrain_routine_end(self, labels, names): self.comet_logger.on_pretrain_routine_end(paths) def on_train_batch_end(self, model, ni, imgs, targets, paths, vals): - log_dict = dict(zip(self.keys[0:3], vals)) + log_dict = dict(zip(self.keys[:3], vals)) # Callback runs on train batch end # ni: number integrated batches (since train start) if self.plots: @@ -221,10 +211,10 @@ def on_val_end(self, nt, tp, fp, p, r, f1, ap, ap50, ap_class, confusion_matrix) # Callback runs on val end if self.wandb or self.clearml: files = sorted(self.save_dir.glob('val*.jpg')) - if self.wandb: - self.wandb.log({"Validation": [wandb.Image(str(f), caption=f.name) for f in files]}) - if self.clearml: - self.clearml.log_debug_samples(files, title='Validation') + if self.wandb: + self.wandb.log({"Validation": [wandb.Image(str(f), caption=f.name) for f in files]}) + if self.clearml: + self.clearml.log_debug_samples(files, title='Validation') if self.comet_logger: self.comet_logger.on_val_end(nt, tp, fp, p, r, f1, ap, ap50, ap_class, confusion_matrix) @@ -253,7 +243,7 @@ def on_fit_epoch_end(self, vals, epoch, best_fitness, fi): for i, name in enumerate(self.best_keys): self.wandb.wandb_run.summary[name] = best_results[i] # log best results in the summary self.wandb.log(x) - self.wandb.end_epoch(best_result=best_fitness == fi) + self.wandb.end_epoch() if self.clearml: self.clearml.current_epoch_logged_images = set() # reset epoch image limit diff --git a/utils/loggers/clearml/README.md b/utils/loggers/clearml/README.md index 3cf4c268583f..ca41c040193c 100644 --- a/utils/loggers/clearml/README.md +++ b/utils/loggers/clearml/README.md @@ -23,7 +23,6 @@ And so much more. It's up to you how many of these tools you want to use, you ca ![ClearML scalars dashboard](https://github.com/thepycoder/clearml_screenshots/raw/main/experiment_manager_with_compare.gif) -

@@ -35,15 +34,15 @@ Either sign up for free to the [ClearML Hosted Service](https://cutt.ly/yolov5-t 1. Install the `clearml` python package: - ```bash - pip install clearml - ``` + ```bash + pip install clearml + ``` 1. Connect the ClearML SDK to the server by [creating credentials](https://app.clear.ml/settings/workspace-configuration) (go right top to Settings -> Workspace -> Create new credentials), then execute the command below and follow the instructions: - ```bash - clearml-init - ``` + ```bash + clearml-init + ``` That's it! You're done 😎 @@ -60,18 +59,20 @@ pip install clearml>=1.2.0 This will enable integration with the YOLOv5 training script. Every training run from now on, will be captured and stored by the ClearML experiment manager. If you want to change the `project_name` or `task_name`, use the `--project` and `--name` arguments of the `train.py` script, by default the project will be called `YOLOv5` and the task `Training`. -PLEASE NOTE: ClearML uses `/` as a delimter for subprojects, so be careful when using `/` in your project name! +PLEASE NOTE: ClearML uses `/` as a delimiter for subprojects, so be careful when using `/` in your project name! ```bash python train.py --img 640 --batch 16 --epochs 3 --data coco128.yaml --weights yolov5s.pt --cache ``` or with custom project and task name: + ```bash python train.py --project my_project --name my_training --img 640 --batch 16 --epochs 3 --data coco128.yaml --weights yolov5s.pt --cache ``` This will capture: + - Source code + uncommitted changes - Installed packages - (Hyper)parameters @@ -94,7 +95,7 @@ There even more we can do with all of this information, like hyperparameter opti ## 🔗 Dataset Version Management -Versioning your data separately from your code is generally a good idea and makes it easy to aqcuire the latest version too. This repository supports supplying a dataset version ID and it will make sure to get the data if it's not there yet. Next to that, this workflow also saves the used dataset ID as part of the task parameters, so you will always know for sure which data was used in which experiment! +Versioning your data separately from your code is generally a good idea and makes it easy to acquire the latest version too. This repository supports supplying a dataset version ID, and it will make sure to get the data if it's not there yet. Next to that, this workflow also saves the used dataset ID as part of the task parameters, so you will always know for sure which data was used in which experiment! ![ClearML Dataset Interface](https://github.com/thepycoder/clearml_screenshots/raw/main/clearml_data.gif) @@ -112,6 +113,7 @@ The YOLOv5 repository supports a number of different datasets by using yaml file |_ LICENSE |_ README.txt ``` + But this can be any dataset you wish. Feel free to use your own, as long as you keep to this folder structure. Next, ⚠️**copy the corresponding yaml file to the root of the dataset folder**⚠️. This yaml files contains the information ClearML will need to properly use the dataset. You can make this yourself too, of course, just follow the structure of the example yamls. @@ -132,13 +134,15 @@ Basically we need the following keys: `path`, `train`, `test`, `val`, `nc`, `nam ### Upload Your Dataset -To get this dataset into ClearML as a versionned dataset, go to the dataset root folder and run the following command: +To get this dataset into ClearML as a versioned dataset, go to the dataset root folder and run the following command: + ```bash cd coco128 clearml-data sync --project YOLOv5 --name coco128 --folder . ``` The command `clearml-data sync` is actually a shorthand command. You could also run these commands one after the other: + ```bash # Optionally add --parent if you want to base # this version on another dataset version, so no duplicate files are uploaded! @@ -177,7 +181,7 @@ python utils/loggers/clearml/hpo.py ## 🤯 Remote Execution (advanced) -Running HPO locally is really handy, but what if we want to run our experiments on a remote machine instead? Maybe you have access to a very powerful GPU machine on-site or you have some budget to use cloud GPUs. +Running HPO locally is really handy, but what if we want to run our experiments on a remote machine instead? Maybe you have access to a very powerful GPU machine on-site, or you have some budget to use cloud GPUs. This is where the ClearML Agent comes into play. Check out what the agent can do here: - [YouTube video](https://youtu.be/MX3BrXnaULs) @@ -186,6 +190,7 @@ This is where the ClearML Agent comes into play. Check out what the agent can do In short: every experiment tracked by the experiment manager contains enough information to reproduce it on a different machine (installed packages, uncommitted changes etc.). So a ClearML agent does just that: it listens to a queue for incoming tasks and when it finds one, it recreates the environment and runs it while still reporting scalars, plots etc. to the experiment manager. You can turn any machine (a cloud VM, a local GPU machine, your own laptop ... ) into a ClearML agent by simply running: + ```bash clearml-agent daemon --queue [--docker] ``` @@ -194,11 +199,11 @@ clearml-agent daemon --queue [--docker] With our agent running, we can give it some work. Remember from the HPO section that we can clone a task and edit the hyperparameters? We can do that from the interface too! -🪄 Clone the experiment by right clicking it +🪄 Clone the experiment by right-clicking it 🎯 Edit the hyperparameters to what you wish them to be -⏳ Enqueue the task to any of the queues by right clicking it +⏳ Enqueue the task to any of the queues by right-clicking it ![Enqueue a task from the UI](https://github.com/thepycoder/clearml_screenshots/raw/main/enqueue.gif) @@ -206,7 +211,8 @@ With our agent running, we can give it some work. Remember from the HPO section Now you can clone a task like we explained above, or simply mark your current script by adding `task.execute_remotely()` and on execution it will be put into a queue, for the agent to start working on! -To run the YOLOv5 training script remotely, all you have to do is add this line to the training.py script after the clearml logger has been instatiated: +To run the YOLOv5 training script remotely, all you have to do is add this line to the training.py script after the clearml logger has been instantiated: + ```python # ... # Loggers @@ -214,16 +220,17 @@ data_dict = None if RANK in {-1, 0}: loggers = Loggers(save_dir, weights, opt, hyp, LOGGER) # loggers instance if loggers.clearml: - loggers.clearml.task.execute_remotely(queue='my_queue') # <------ ADD THIS LINE + loggers.clearml.task.execute_remotely(queue="my_queue") # <------ ADD THIS LINE # Data_dict is either None is user did not choose for ClearML dataset or is filled in by ClearML data_dict = loggers.clearml.data_dict # ... ``` + When running the training script after this change, python will run the script up until that line, after which it will package the code and send it to the queue instead! ### Autoscaling workers -ClearML comes with autoscalers too! This tool will automatically spin up new remote machines in the cloud of your choice (AWS, GCP, Azure) and turn them into ClearML agents for you whenever there are experiments detected in the queue. Once the tasks are processed, the autoscaler will automatically shut down the remote machines and you stop paying! +ClearML comes with autoscalers too! This tool will automatically spin up new remote machines in the cloud of your choice (AWS, GCP, Azure) and turn them into ClearML agents for you whenever there are experiments detected in the queue. Once the tasks are processed, the autoscaler will automatically shut down the remote machines, and you stop paying! Check out the autoscalers getting started video below. diff --git a/utils/loggers/comet/README.md b/utils/loggers/comet/README.md index 8a361e2b211d..47e6a45654b8 100644 --- a/utils/loggers/comet/README.md +++ b/utils/loggers/comet/README.md @@ -23,7 +23,7 @@ pip install comet_ml There are two ways to configure Comet with YOLOv5. -You can either set your credentials through enviroment variables +You can either set your credentials through environment variables **Environment Variables** @@ -49,11 +49,12 @@ project_name= # This will default to 'yolov5' python train.py --img 640 --batch 16 --epochs 5 --data coco128.yaml --weights yolov5s.pt ``` -That's it! Comet will automatically log your hyperparameters, command line arguments, training and valiation metrics. You can visualize and analyze your runs in the Comet UI +That's it! Comet will automatically log your hyperparameters, command line arguments, training and validation metrics. You can visualize and analyze your runs in the Comet UI yolo-ui # Try out an Example! + Check out an example of a [completed run here](https://www.comet.com/examples/comet-example-yolov5/a0e29e0e9b984e4a822db2a62d0cb357?experiment-tab=chart&showOutliers=true&smoothing=0&transformY=smoothing&xAxis=step&utm_source=yolov5&utm_medium=partner&utm_campaign=partner_yolov5_2022&utm_content=github) Or better yet, try it out yourself in this Colab Notebook @@ -65,6 +66,7 @@ Or better yet, try it out yourself in this Colab Notebook By default, Comet will log the following items ## Metrics + - Box Loss, Object Loss, Classification Loss for the training and validation data - mAP_0.5, mAP_0.5:0.95 metrics for the validation data. - Precision and Recall for the validation data @@ -121,7 +123,6 @@ You can control the frequency of logged predictions and the associated images by Here is an [example project using the Panel](https://www.comet.com/examples/comet-example-yolov5?shareable=YcwMiJaZSXfcEXpGOHDD12vA1&utm_source=yolov5&utm_medium=partner&utm_campaign=partner_yolov5_2022&utm_content=github) - ```shell python train.py \ --img 640 \ @@ -192,6 +193,7 @@ If you would like to use a dataset from Comet Artifacts, set the `path` variable # contents of artifact.yaml file path: "comet:///:" ``` + Then pass this file to your training script in the following way ```shell @@ -221,7 +223,7 @@ python train.py \ ## Hyperparameter Search with the Comet Optimizer -YOLOv5 is also integrated with Comet's Optimizer, making is simple to visualie hyperparameter sweeps in the Comet UI. +YOLOv5 is also integrated with Comet's Optimizer, making is simple to visualize hyperparameter sweeps in the Comet UI. ### Configuring an Optimizer Sweep diff --git a/utils/loggers/wandb/README.md b/utils/loggers/wandb/README.md deleted file mode 100644 index d78324b4c8e9..000000000000 --- a/utils/loggers/wandb/README.md +++ /dev/null @@ -1,162 +0,0 @@ -📚 This guide explains how to use **Weights & Biases** (W&B) with YOLOv5 🚀. UPDATED 29 September 2021. - -- [About Weights & Biases](#about-weights-&-biases) -- [First-Time Setup](#first-time-setup) -- [Viewing runs](#viewing-runs) -- [Disabling wandb](#disabling-wandb) -- [Advanced Usage: Dataset Versioning and Evaluation](#advanced-usage) -- [Reports: Share your work with the world!](#reports) - -## About Weights & Biases - -Think of [W&B](https://wandb.ai/site?utm_campaign=repo_yolo_wandbtutorial) like GitHub for machine learning models. With a few lines of code, save everything you need to debug, compare and reproduce your models — architecture, hyperparameters, git commits, model weights, GPU usage, and even datasets and predictions. - -Used by top researchers including teams at OpenAI, Lyft, Github, and MILA, W&B is part of the new standard of best practices for machine learning. How W&B can help you optimize your machine learning workflows: - -- [Debug](https://wandb.ai/wandb/getting-started/reports/Visualize-Debug-Machine-Learning-Models--VmlldzoyNzY5MDk#Free-2) model performance in real time -- [GPU usage](https://wandb.ai/wandb/getting-started/reports/Visualize-Debug-Machine-Learning-Models--VmlldzoyNzY5MDk#System-4) visualized automatically -- [Custom charts](https://wandb.ai/wandb/customizable-charts/reports/Powerful-Custom-Charts-To-Debug-Model-Peformance--VmlldzoyNzY4ODI) for powerful, extensible visualization -- [Share insights](https://wandb.ai/wandb/getting-started/reports/Visualize-Debug-Machine-Learning-Models--VmlldzoyNzY5MDk#Share-8) interactively with collaborators -- [Optimize hyperparameters](https://docs.wandb.com/sweeps) efficiently -- [Track](https://docs.wandb.com/artifacts) datasets, pipelines, and production models - -## First-Time Setup - -
- Toggle Details -When you first train, W&B will prompt you to create a new account and will generate an **API key** for you. If you are an existing user you can retrieve your key from https://wandb.ai/authorize. This key is used to tell W&B where to log your data. You only need to supply your key once, and then it is remembered on the same device. - -W&B will create a cloud **project** (default is 'YOLOv5') for your training runs, and each new training run will be provided a unique run **name** within that project as project/name. You can also manually set your project and run name as: - -```shell -$ python train.py --project ... --name ... -``` - -YOLOv5 notebook example: Open In Colab Open In Kaggle -Screen Shot 2021-09-29 at 10 23 13 PM - -
- -## Viewing Runs - -
- Toggle Details -Run information streams from your environment to the W&B cloud console as you train. This allows you to monitor and even cancel runs in realtime . All important information is logged: - -- Training & Validation losses -- Metrics: Precision, Recall, mAP@0.5, mAP@0.5:0.95 -- Learning Rate over time -- A bounding box debugging panel, showing the training progress over time -- GPU: Type, **GPU Utilization**, power, temperature, **CUDA memory usage** -- System: Disk I/0, CPU utilization, RAM memory usage -- Your trained model as W&B Artifact -- Environment: OS and Python types, Git repository and state, **training command** - -

Weights & Biases dashboard

-
- -## Disabling wandb - -- training after running `wandb disabled` inside that directory creates no wandb run - ![Screenshot (84)](https://user-images.githubusercontent.com/15766192/143441777-c780bdd7-7cb4-4404-9559-b4316030a985.png) - -- To enable wandb again, run `wandb online` - ![Screenshot (85)](https://user-images.githubusercontent.com/15766192/143441866-7191b2cb-22f0-4e0f-ae64-2dc47dc13078.png) - -## Advanced Usage - -You can leverage W&B artifacts and Tables integration to easily visualize and manage your datasets, models and training evaluations. Here are some quick examples to get you started. - -
-

1: Train and Log Evaluation simultaneousy

- This is an extension of the previous section, but it'll also training after uploading the dataset. This also evaluation Table - Evaluation table compares your predictions and ground truths across the validation set for each epoch. It uses the references to the already uploaded datasets, - so no images will be uploaded from your system more than once. -
- Usage - Code $ python train.py --upload_data val - -![Screenshot from 2021-11-21 17-40-06](https://user-images.githubusercontent.com/15766192/142761183-c1696d8c-3f38-45ab-991a-bb0dfd98ae7d.png) - -
- -

2. Visualize and Version Datasets

- Log, visualize, dynamically query, and understand your data with W&B Tables. You can use the following command to log your dataset as a W&B Table. This will generate a {dataset}_wandb.yaml file which can be used to train from dataset artifact. -
- Usage - Code $ python utils/logger/wandb/log_dataset.py --project ... --name ... --data .. - -![Screenshot (64)](https://user-images.githubusercontent.com/15766192/128486078-d8433890-98a3-4d12-8986-b6c0e3fc64b9.png) - -
- -

3: Train using dataset artifact

- When you upload a dataset as described in the first section, you get a new config file with an added `_wandb` to its name. This file contains the information that - can be used to train a model directly from the dataset artifact. This also logs evaluation -
- Usage - Code $ python train.py --data {data}_wandb.yaml - -![Screenshot (72)](https://user-images.githubusercontent.com/15766192/128979739-4cf63aeb-a76f-483f-8861-1c0100b938a5.png) - -
- -

4: Save model checkpoints as artifacts

- To enable saving and versioning checkpoints of your experiment, pass `--save_period n` with the base cammand, where `n` represents checkpoint interval. - You can also log both the dataset and model checkpoints simultaneously. If not passed, only the final model will be logged - -
- Usage - Code $ python train.py --save_period 1 - -![Screenshot (68)](https://user-images.githubusercontent.com/15766192/128726138-ec6c1f60-639d-437d-b4ee-3acd9de47ef3.png) - -
- -
- -

5: Resume runs from checkpoint artifacts.

-Any run can be resumed using artifacts if the --resume argument starts with wandb-artifact:// prefix followed by the run path, i.e, wandb-artifact://username/project/runid . This doesn't require the model checkpoint to be present on the local system. - -
- Usage - Code $ python train.py --resume wandb-artifact://{run_path} - -![Screenshot (70)](https://user-images.githubusercontent.com/15766192/128728988-4e84b355-6c87-41ae-a591-14aecf45343e.png) - -
- -

6: Resume runs from dataset artifact & checkpoint artifacts.

- Local dataset or model checkpoints are not required. This can be used to resume runs directly on a different device - The syntax is same as the previous section, but you'll need to lof both the dataset and model checkpoints as artifacts, i.e, set bot --upload_dataset or - train from _wandb.yaml file and set --save_period - -
- Usage - Code $ python train.py --resume wandb-artifact://{run_path} - -![Screenshot (70)](https://user-images.githubusercontent.com/15766192/128728988-4e84b355-6c87-41ae-a591-14aecf45343e.png) - -
- -
- -

Reports

-W&B Reports can be created from your saved runs for sharing online. Once a report is created you will receive a link you can use to publically share your results. Here is an example report created from the COCO128 tutorial trainings of all four YOLOv5 models ([link](https://wandb.ai/glenn-jocher/yolov5_tutorial/reports/YOLOv5-COCO128-Tutorial-Results--VmlldzozMDI5OTY)). - -Weights & Biases Reports - -## Environments - -YOLOv5 may be run in any of the following up-to-date verified environments (with all dependencies including [CUDA](https://developer.nvidia.com/cuda)/[CUDNN](https://developer.nvidia.com/cudnn), [Python](https://www.python.org/) and [PyTorch](https://pytorch.org/) preinstalled): - -- **Google Colab and Kaggle** notebooks with free GPU: Open In Colab Open In Kaggle -- **Google Cloud** Deep Learning VM. See [GCP Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/GCP-Quickstart) -- **Amazon** Deep Learning AMI. See [AWS Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/AWS-Quickstart) -- **Docker Image**. See [Docker Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/Docker-Quickstart) Docker Pulls - -## Status - -![CI CPU testing](https://github.com/ultralytics/yolov5/workflows/CI%20CPU%20testing/badge.svg) - -If this badge is green, all [YOLOv5 GitHub Actions](https://github.com/ultralytics/yolov5/actions) Continuous Integration (CI) tests are currently passing. CI tests verify correct operation of YOLOv5 training ([train.py](https://github.com/ultralytics/yolov5/blob/master/train.py)), validation ([val.py](https://github.com/ultralytics/yolov5/blob/master/val.py)), inference ([detect.py](https://github.com/ultralytics/yolov5/blob/master/detect.py)) and export ([export.py](https://github.com/ultralytics/yolov5/blob/master/export.py)) on macOS, Windows, and Ubuntu every 24 hours and on every commit. diff --git a/utils/loggers/wandb/log_dataset.py b/utils/loggers/wandb/log_dataset.py deleted file mode 100644 index 06e81fb69307..000000000000 --- a/utils/loggers/wandb/log_dataset.py +++ /dev/null @@ -1,27 +0,0 @@ -import argparse - -from wandb_utils import WandbLogger - -from utils.general import LOGGER - -WANDB_ARTIFACT_PREFIX = 'wandb-artifact://' - - -def create_dataset_artifact(opt): - logger = WandbLogger(opt, None, job_type='Dataset Creation') # TODO: return value unused - if not logger.wandb: - LOGGER.info("install wandb using `pip install wandb` to log the dataset") - - -if __name__ == '__main__': - parser = argparse.ArgumentParser() - parser.add_argument('--data', type=str, default='data/coco128.yaml', help='data.yaml path') - parser.add_argument('--single-cls', action='store_true', help='train as single-class dataset') - parser.add_argument('--project', type=str, default='YOLOv5', help='name of W&B Project') - parser.add_argument('--entity', default=None, help='W&B entity') - parser.add_argument('--name', type=str, default='log dataset', help='name of W&B run') - - opt = parser.parse_args() - opt.resume = False # Explicitly disallow resume check for dataset upload job - - create_dataset_artifact(opt) diff --git a/utils/loggers/wandb/sweep.py b/utils/loggers/wandb/sweep.py deleted file mode 100644 index d49ea6f2778b..000000000000 --- a/utils/loggers/wandb/sweep.py +++ /dev/null @@ -1,41 +0,0 @@ -import sys -from pathlib import Path - -import wandb - -FILE = Path(__file__).resolve() -ROOT = FILE.parents[3] # YOLOv5 root directory -if str(ROOT) not in sys.path: - sys.path.append(str(ROOT)) # add ROOT to PATH - -from train import parse_opt, train -from utils.callbacks import Callbacks -from utils.general import increment_path -from utils.torch_utils import select_device - - -def sweep(): - wandb.init() - # Get hyp dict from sweep agent. Copy because train() modifies parameters which confused wandb. - hyp_dict = vars(wandb.config).get("_items").copy() - - # Workaround: get necessary opt args - opt = parse_opt(known=True) - opt.batch_size = hyp_dict.get("batch_size") - opt.save_dir = str(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok or opt.evolve)) - opt.epochs = hyp_dict.get("epochs") - opt.nosave = True - opt.data = hyp_dict.get("data") - opt.weights = str(opt.weights) - opt.cfg = str(opt.cfg) - opt.data = str(opt.data) - opt.hyp = str(opt.hyp) - opt.project = str(opt.project) - device = select_device(opt.device, batch_size=opt.batch_size) - - # train - train(hyp_dict, opt, device, callbacks=Callbacks()) - - -if __name__ == "__main__": - sweep() diff --git a/utils/loggers/wandb/sweep.yaml b/utils/loggers/wandb/sweep.yaml deleted file mode 100644 index 688b1ea0285f..000000000000 --- a/utils/loggers/wandb/sweep.yaml +++ /dev/null @@ -1,143 +0,0 @@ -# Hyperparameters for training -# To set range- -# Provide min and max values as: -# parameter: -# -# min: scalar -# max: scalar -# OR -# -# Set a specific list of search space- -# parameter: -# values: [scalar1, scalar2, scalar3...] -# -# You can use grid, bayesian and hyperopt search strategy -# For more info on configuring sweeps visit - https://docs.wandb.ai/guides/sweeps/configuration - -program: utils/loggers/wandb/sweep.py -method: random -metric: - name: metrics/mAP_0.5 - goal: maximize - -parameters: - # hyperparameters: set either min, max range or values list - data: - value: "data/coco128.yaml" - batch_size: - values: [64] - epochs: - values: [10] - - lr0: - distribution: uniform - min: 1e-5 - max: 1e-1 - lrf: - distribution: uniform - min: 0.01 - max: 1.0 - momentum: - distribution: uniform - min: 0.6 - max: 0.98 - weight_decay: - distribution: uniform - min: 0.0 - max: 0.001 - warmup_epochs: - distribution: uniform - min: 0.0 - max: 5.0 - warmup_momentum: - distribution: uniform - min: 0.0 - max: 0.95 - warmup_bias_lr: - distribution: uniform - min: 0.0 - max: 0.2 - box: - distribution: uniform - min: 0.02 - max: 0.2 - cls: - distribution: uniform - min: 0.2 - max: 4.0 - cls_pw: - distribution: uniform - min: 0.5 - max: 2.0 - obj: - distribution: uniform - min: 0.2 - max: 4.0 - obj_pw: - distribution: uniform - min: 0.5 - max: 2.0 - iou_t: - distribution: uniform - min: 0.1 - max: 0.7 - anchor_t: - distribution: uniform - min: 2.0 - max: 8.0 - fl_gamma: - distribution: uniform - min: 0.0 - max: 4.0 - hsv_h: - distribution: uniform - min: 0.0 - max: 0.1 - hsv_s: - distribution: uniform - min: 0.0 - max: 0.9 - hsv_v: - distribution: uniform - min: 0.0 - max: 0.9 - degrees: - distribution: uniform - min: 0.0 - max: 45.0 - translate: - distribution: uniform - min: 0.0 - max: 0.9 - scale: - distribution: uniform - min: 0.0 - max: 0.9 - shear: - distribution: uniform - min: 0.0 - max: 10.0 - perspective: - distribution: uniform - min: 0.0 - max: 0.001 - flipud: - distribution: uniform - min: 0.0 - max: 1.0 - fliplr: - distribution: uniform - min: 0.0 - max: 1.0 - mosaic: - distribution: uniform - min: 0.0 - max: 1.0 - mixup: - distribution: uniform - min: 0.0 - max: 1.0 - copy_paste: - distribution: uniform - min: 0.0 - max: 1.0 diff --git a/utils/loggers/wandb/wandb_utils.py b/utils/loggers/wandb/wandb_utils.py index 238f4edbf2a0..6bc2ec510d0a 100644 --- a/utils/loggers/wandb/wandb_utils.py +++ b/utils/loggers/wandb/wandb_utils.py @@ -1,110 +1,32 @@ -"""Utilities and tools for tracking runs with Weights & Biases.""" +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license + +# WARNING ⚠️ wandb is deprecated and will be removed in future release. +# See supported integrations at https://github.com/ultralytics/yolov5#integrations import logging import os import sys from contextlib import contextmanager from pathlib import Path -from typing import Dict -import yaml -from tqdm import tqdm +from utils.general import LOGGER, colorstr FILE = Path(__file__).resolve() ROOT = FILE.parents[3] # YOLOv5 root directory if str(ROOT) not in sys.path: sys.path.append(str(ROOT)) # add ROOT to PATH - -from utils.dataloaders import LoadImagesAndLabels, img2label_paths -from utils.general import LOGGER, check_dataset, check_file +RANK = int(os.getenv('RANK', -1)) +DEPRECATION_WARNING = f"{colorstr('wandb')}: WARNING ⚠️ wandb is deprecated and will be removed in a future release. " \ + f"See supported integrations at https://github.com/ultralytics/yolov5#integrations." try: import wandb assert hasattr(wandb, '__version__') # verify package import not local dir + LOGGER.warning(DEPRECATION_WARNING) except (ImportError, AssertionError): wandb = None -RANK = int(os.getenv('RANK', -1)) -WANDB_ARTIFACT_PREFIX = 'wandb-artifact://' - - -def remove_prefix(from_string, prefix=WANDB_ARTIFACT_PREFIX): - return from_string[len(prefix):] - - -def check_wandb_config_file(data_config_file): - wandb_config = '_wandb.'.join(data_config_file.rsplit('.', 1)) # updated data.yaml path - if Path(wandb_config).is_file(): - return wandb_config - return data_config_file - - -def check_wandb_dataset(data_file): - is_trainset_wandb_artifact = False - is_valset_wandb_artifact = False - if isinstance(data_file, dict): - # In that case another dataset manager has already processed it and we don't have to - return data_file - if check_file(data_file) and data_file.endswith('.yaml'): - with open(data_file, errors='ignore') as f: - data_dict = yaml.safe_load(f) - is_trainset_wandb_artifact = isinstance(data_dict['train'], - str) and data_dict['train'].startswith(WANDB_ARTIFACT_PREFIX) - is_valset_wandb_artifact = isinstance(data_dict['val'], - str) and data_dict['val'].startswith(WANDB_ARTIFACT_PREFIX) - if is_trainset_wandb_artifact or is_valset_wandb_artifact: - return data_dict - else: - return check_dataset(data_file) - - -def get_run_info(run_path): - run_path = Path(remove_prefix(run_path, WANDB_ARTIFACT_PREFIX)) - run_id = run_path.stem - project = run_path.parent.stem - entity = run_path.parent.parent.stem - model_artifact_name = 'run_' + run_id + '_model' - return entity, project, run_id, model_artifact_name - - -def check_wandb_resume(opt): - process_wandb_config_ddp_mode(opt) if RANK not in [-1, 0] else None - if isinstance(opt.resume, str): - if opt.resume.startswith(WANDB_ARTIFACT_PREFIX): - if RANK not in [-1, 0]: # For resuming DDP runs - entity, project, run_id, model_artifact_name = get_run_info(opt.resume) - api = wandb.Api() - artifact = api.artifact(entity + '/' + project + '/' + model_artifact_name + ':latest') - modeldir = artifact.download() - opt.weights = str(Path(modeldir) / "last.pt") - return True - return None - - -def process_wandb_config_ddp_mode(opt): - with open(check_file(opt.data), errors='ignore') as f: - data_dict = yaml.safe_load(f) # data dict - train_dir, val_dir = None, None - if isinstance(data_dict['train'], str) and data_dict['train'].startswith(WANDB_ARTIFACT_PREFIX): - api = wandb.Api() - train_artifact = api.artifact(remove_prefix(data_dict['train']) + ':' + opt.artifact_alias) - train_dir = train_artifact.download() - train_path = Path(train_dir) / 'data/images/' - data_dict['train'] = str(train_path) - - if isinstance(data_dict['val'], str) and data_dict['val'].startswith(WANDB_ARTIFACT_PREFIX): - api = wandb.Api() - val_artifact = api.artifact(remove_prefix(data_dict['val']) + ':' + opt.artifact_alias) - val_dir = val_artifact.download() - val_path = Path(val_dir) / 'data/images/' - data_dict['val'] = str(val_path) - if train_dir or val_dir: - ddp_data_path = str(Path(val_dir) / 'wandb_local_data.yaml') - with open(ddp_data_path, 'w') as f: - yaml.safe_dump(data_dict, f) - opt.data = ddp_data_path - class WandbLogger(): """Log training runs, datasets, models, and predictions to Weights & Biases. @@ -132,38 +54,16 @@ def __init__(self, opt, run_id=None, job_type='Training'): job_type (str) -- To set the job_type for this run """ - # Temporary-fix - if opt.upload_dataset: - opt.upload_dataset = False - # LOGGER.info("Uploading Dataset functionality is not being supported temporarily due to a bug.") - # Pre-training routine -- self.job_type = job_type - self.wandb, self.wandb_run = wandb, None if not wandb else wandb.run + self.wandb, self.wandb_run = wandb, wandb.run if wandb else None self.val_artifact, self.train_artifact = None, None self.train_artifact_path, self.val_artifact_path = None, None self.result_artifact = None self.val_table, self.result_table = None, None - self.bbox_media_panel_images = [] - self.val_table_path_map = None self.max_imgs_to_log = 16 - self.wandb_artifact_data_dict = None self.data_dict = None - # It's more elegant to stick to 1 wandb.init call, - # but useful config data is overwritten in the WandbLogger's wandb.init call - if isinstance(opt.resume, str): # checks resume from artifact - if opt.resume.startswith(WANDB_ARTIFACT_PREFIX): - entity, project, run_id, model_artifact_name = get_run_info(opt.resume) - model_artifact_name = WANDB_ARTIFACT_PREFIX + model_artifact_name - assert wandb, 'install wandb to resume wandb runs' - # Resume wandb-artifact:// runs here| workaround for not overwriting wandb.config - self.wandb_run = wandb.init(id=run_id, - project=project, - entity=entity, - resume='allow', - allow_val_change=True) - opt.resume = model_artifact_name - elif self.wandb: + if self.wandb: self.wandb_run = wandb.init(config=opt, resume="allow", project='YOLOv5' if opt.project == 'runs/train' else Path(opt.project).stem, @@ -172,51 +72,15 @@ def __init__(self, opt, run_id=None, job_type='Training'): job_type=job_type, id=run_id, allow_val_change=True) if not wandb.run else wandb.run + if self.wandb_run: if self.job_type == 'Training': - if opt.upload_dataset: - if not opt.resume: - self.wandb_artifact_data_dict = self.check_and_upload_dataset(opt) - if isinstance(opt.data, dict): # This means another dataset manager has already processed the dataset info (e.g. ClearML) # and they will have stored the already processed dict in opt.data self.data_dict = opt.data - elif opt.resume: - # resume from artifact - if isinstance(opt.resume, str) and opt.resume.startswith(WANDB_ARTIFACT_PREFIX): - self.data_dict = dict(self.wandb_run.config.data_dict) - else: # local resume - self.data_dict = check_wandb_dataset(opt.data) - else: - self.data_dict = check_wandb_dataset(opt.data) - self.wandb_artifact_data_dict = self.wandb_artifact_data_dict or self.data_dict - - # write data_dict to config. useful for resuming from artifacts. Do this only when not resuming. - self.wandb_run.config.update({'data_dict': self.wandb_artifact_data_dict}, allow_val_change=True) self.setup_training(opt) - if self.job_type == 'Dataset Creation': - self.wandb_run.config.update({"upload_dataset": True}) - self.data_dict = self.check_and_upload_dataset(opt) - - def check_and_upload_dataset(self, opt): - """ - Check if the dataset format is compatible and upload it as W&B artifact - - arguments: - opt (namespace)-- Commandline arguments for current run - - returns: - Updated dataset info dictionary where local dataset paths are replaced by WAND_ARFACT_PREFIX links. - """ - assert wandb, 'Install wandb to upload dataset' - config_path = self.log_dataset_artifact(opt.data, opt.single_cls, - 'YOLOv5' if opt.project == 'runs/train' else Path(opt.project).stem) - with open(config_path, errors='ignore') as f: - wandb_data_dict = yaml.safe_load(f) - return wandb_data_dict - def setup_training(self, opt): """ Setup the necessary processes for training YOLO models: @@ -231,81 +95,18 @@ def setup_training(self, opt): self.log_dict, self.current_epoch = {}, 0 self.bbox_interval = opt.bbox_interval if isinstance(opt.resume, str): - modeldir, _ = self.download_model_artifact(opt) - if modeldir: - self.weights = Path(modeldir) / "last.pt" + model_dir, _ = self.download_model_artifact(opt) + if model_dir: + self.weights = Path(model_dir) / "last.pt" config = self.wandb_run.config opt.weights, opt.save_period, opt.batch_size, opt.bbox_interval, opt.epochs, opt.hyp, opt.imgsz = str( - self.weights), config.save_period, config.batch_size, config.bbox_interval, config.epochs,\ + self.weights), config.save_period, config.batch_size, config.bbox_interval, config.epochs, \ config.hyp, config.imgsz - data_dict = self.data_dict - if self.val_artifact is None: # If --upload_dataset is set, use the existing artifact, don't download - self.train_artifact_path, self.train_artifact = self.download_dataset_artifact( - data_dict.get('train'), opt.artifact_alias) - self.val_artifact_path, self.val_artifact = self.download_dataset_artifact( - data_dict.get('val'), opt.artifact_alias) - if self.train_artifact_path is not None: - train_path = Path(self.train_artifact_path) / 'data/images/' - data_dict['train'] = str(train_path) - if self.val_artifact_path is not None: - val_path = Path(self.val_artifact_path) / 'data/images/' - data_dict['val'] = str(val_path) - - if self.val_artifact is not None: - self.result_artifact = wandb.Artifact("run_" + wandb.run.id + "_progress", "evaluation") - columns = ["epoch", "id", "ground truth", "prediction"] - columns.extend(self.data_dict['names']) - self.result_table = wandb.Table(columns) - self.val_table = self.val_artifact.get("val") - if self.val_table_path_map is None: - self.map_val_table_path() if opt.bbox_interval == -1: self.bbox_interval = opt.bbox_interval = (opt.epochs // 10) if opt.epochs > 10 else 1 if opt.evolve or opt.noplots: self.bbox_interval = opt.bbox_interval = opt.epochs + 1 # disable bbox_interval - train_from_artifact = self.train_artifact_path is not None and self.val_artifact_path is not None - # Update the the data_dict to point to local artifacts dir - if train_from_artifact: - self.data_dict = data_dict - - def download_dataset_artifact(self, path, alias): - """ - download the model checkpoint artifact if the path starts with WANDB_ARTIFACT_PREFIX - - arguments: - path -- path of the dataset to be used for training - alias (str)-- alias of the artifact to be download/used for training - - returns: - (str, wandb.Artifact) -- path of the downladed dataset and it's corresponding artifact object if dataset - is found otherwise returns (None, None) - """ - if isinstance(path, str) and path.startswith(WANDB_ARTIFACT_PREFIX): - artifact_path = Path(remove_prefix(path, WANDB_ARTIFACT_PREFIX) + ":" + alias) - dataset_artifact = wandb.use_artifact(artifact_path.as_posix().replace("\\", "/")) - assert dataset_artifact is not None, "'Error: W&B dataset artifact doesn\'t exist'" - datadir = dataset_artifact.download() - return datadir, dataset_artifact - return None, None - - def download_model_artifact(self, opt): - """ - download the model checkpoint artifact if the resume path starts with WANDB_ARTIFACT_PREFIX - - arguments: - opt (namespace) -- Commandline arguments for this run - """ - if opt.resume.startswith(WANDB_ARTIFACT_PREFIX): - model_artifact = wandb.use_artifact(remove_prefix(opt.resume, WANDB_ARTIFACT_PREFIX) + ":latest") - assert model_artifact is not None, 'Error: W&B model artifact doesn\'t exist' - modeldir = model_artifact.download() - # epochs_trained = model_artifact.metadata.get('epochs_trained') - total_epochs = model_artifact.metadata.get('total_epochs') - is_finished = total_epochs is None - assert not is_finished, 'training is finished, can only resume incomplete runs.' - return modeldir, model_artifact - return None, None def log_model(self, path, opt, epoch, fitness_score, best_model=False): """ @@ -332,190 +133,8 @@ def log_model(self, path, opt, epoch, fitness_score, best_model=False): aliases=['latest', 'last', 'epoch ' + str(self.current_epoch), 'best' if best_model else '']) LOGGER.info(f"Saving model artifact on epoch {epoch + 1}") - def log_dataset_artifact(self, data_file, single_cls, project, overwrite_config=False): - """ - Log the dataset as W&B artifact and return the new data file with W&B links - - arguments: - data_file (str) -- the .yaml file with information about the dataset like - path, classes etc. - single_class (boolean) -- train multi-class data as single-class - project (str) -- project name. Used to construct the artifact path - overwrite_config (boolean) -- overwrites the data.yaml file if set to true otherwise creates a new - file with _wandb postfix. Eg -> data_wandb.yaml - - returns: - the new .yaml file with artifact links. it can be used to start training directly from artifacts - """ - upload_dataset = self.wandb_run.config.upload_dataset - log_val_only = isinstance(upload_dataset, str) and upload_dataset == 'val' - self.data_dict = check_dataset(data_file) # parse and check - data = dict(self.data_dict) - nc, names = (1, ['item']) if single_cls else (int(data['nc']), data['names']) - names = {k: v for k, v in enumerate(names)} # to index dictionary - - # log train set - if not log_val_only: - self.train_artifact = self.create_dataset_table(LoadImagesAndLabels(data['train'], rect=True, batch_size=1), - names, - name='train') if data.get('train') else None - if data.get('train'): - data['train'] = WANDB_ARTIFACT_PREFIX + str(Path(project) / 'train') - - self.val_artifact = self.create_dataset_table( - LoadImagesAndLabels(data['val'], rect=True, batch_size=1), names, name='val') if data.get('val') else None - if data.get('val'): - data['val'] = WANDB_ARTIFACT_PREFIX + str(Path(project) / 'val') - - path = Path(data_file) - # create a _wandb.yaml file with artifacts links if both train and test set are logged - if not log_val_only: - path = (path.stem if overwrite_config else path.stem + '_wandb') + '.yaml' # updated data.yaml path - path = ROOT / 'data' / path - data.pop('download', None) - data.pop('path', None) - with open(path, 'w') as f: - yaml.safe_dump(data, f) - LOGGER.info(f"Created dataset config file {path}") - - if self.job_type == 'Training': # builds correct artifact pipeline graph - if not log_val_only: - self.wandb_run.log_artifact( - self.train_artifact) # calling use_artifact downloads the dataset. NOT NEEDED! - self.wandb_run.use_artifact(self.val_artifact) - self.val_artifact.wait() - self.val_table = self.val_artifact.get('val') - self.map_val_table_path() - else: - self.wandb_run.log_artifact(self.train_artifact) - self.wandb_run.log_artifact(self.val_artifact) - return path - - def map_val_table_path(self): - """ - Map the validation dataset Table like name of file -> it's id in the W&B Table. - Useful for - referencing artifacts for evaluation. - """ - self.val_table_path_map = {} - LOGGER.info("Mapping dataset") - for i, data in enumerate(tqdm(self.val_table.data)): - self.val_table_path_map[data[3]] = data[0] - - def create_dataset_table(self, dataset: LoadImagesAndLabels, class_to_id: Dict[int, str], name: str = 'dataset'): - """ - Create and return W&B artifact containing W&B Table of the dataset. - - arguments: - dataset -- instance of LoadImagesAndLabels class used to iterate over the data to build Table - class_to_id -- hash map that maps class ids to labels - name -- name of the artifact - - returns: - dataset artifact to be logged or used - """ - # TODO: Explore multiprocessing to slpit this loop parallely| This is essential for speeding up the the logging - artifact = wandb.Artifact(name=name, type="dataset") - img_files = tqdm([dataset.path]) if isinstance(dataset.path, str) and Path(dataset.path).is_dir() else None - img_files = tqdm(dataset.im_files) if not img_files else img_files - for img_file in img_files: - if Path(img_file).is_dir(): - artifact.add_dir(img_file, name='data/images') - labels_path = 'labels'.join(dataset.path.rsplit('images', 1)) - artifact.add_dir(labels_path, name='data/labels') - else: - artifact.add_file(img_file, name='data/images/' + Path(img_file).name) - label_file = Path(img2label_paths([img_file])[0]) - artifact.add_file(str(label_file), name='data/labels/' + - label_file.name) if label_file.exists() else None - table = wandb.Table(columns=["id", "train_image", "Classes", "name"]) - class_set = wandb.Classes([{'id': id, 'name': name} for id, name in class_to_id.items()]) - for si, (img, labels, paths, shapes) in enumerate(tqdm(dataset)): - box_data, img_classes = [], {} - for cls, *xywh in labels[:, 1:].tolist(): - cls = int(cls) - box_data.append({ - "position": { - "middle": [xywh[0], xywh[1]], - "width": xywh[2], - "height": xywh[3]}, - "class_id": cls, - "box_caption": "%s" % (class_to_id[cls])}) - img_classes[cls] = class_to_id[cls] - boxes = {"ground_truth": {"box_data": box_data, "class_labels": class_to_id}} # inference-space - table.add_data(si, wandb.Image(paths, classes=class_set, boxes=boxes), list(img_classes.values()), - Path(paths).name) - artifact.add(table, name) - return artifact - - def log_training_progress(self, predn, path, names): - """ - Build evaluation Table. Uses reference from validation dataset table. - - arguments: - predn (list): list of predictions in the native space in the format - [xmin, ymin, xmax, ymax, confidence, class] - path (str): local path of the current evaluation image - names (dict(int, str)): hash map that maps class ids to labels - """ - class_set = wandb.Classes([{'id': id, 'name': name} for id, name in names.items()]) - box_data = [] - avg_conf_per_class = [0] * len(self.data_dict['names']) - pred_class_count = {} - for *xyxy, conf, cls in predn.tolist(): - if conf >= 0.25: - cls = int(cls) - box_data.append({ - "position": { - "minX": xyxy[0], - "minY": xyxy[1], - "maxX": xyxy[2], - "maxY": xyxy[3]}, - "class_id": cls, - "box_caption": f"{names[cls]} {conf:.3f}", - "scores": { - "class_score": conf}, - "domain": "pixel"}) - avg_conf_per_class[cls] += conf - - if cls in pred_class_count: - pred_class_count[cls] += 1 - else: - pred_class_count[cls] = 1 - - for pred_class in pred_class_count.keys(): - avg_conf_per_class[pred_class] = avg_conf_per_class[pred_class] / pred_class_count[pred_class] - - boxes = {"predictions": {"box_data": box_data, "class_labels": names}} # inference-space - id = self.val_table_path_map[Path(path).name] - self.result_table.add_data(self.current_epoch, id, self.val_table.data[id][1], - wandb.Image(self.val_table.data[id][1], boxes=boxes, classes=class_set), - *avg_conf_per_class) - def val_one_image(self, pred, predn, path, names, im): - """ - Log validation data for one image. updates the result Table if validation dataset is uploaded and log bbox media panel - - arguments: - pred (list): list of scaled predictions in the format - [xmin, ymin, xmax, ymax, confidence, class] - predn (list): list of predictions in the native space - [xmin, ymin, xmax, ymax, confidence, class] - path (str): local path of the current evaluation image - """ - if self.val_table and self.result_table: # Log Table if Val dataset is uploaded as artifact - self.log_training_progress(predn, path, names) - - if len(self.bbox_media_panel_images) < self.max_imgs_to_log and self.current_epoch > 0: - if self.current_epoch % self.bbox_interval == 0: - box_data = [{ - "position": { - "minX": xyxy[0], - "minY": xyxy[1], - "maxX": xyxy[2], - "maxY": xyxy[3]}, - "class_id": int(cls), - "box_caption": f"{names[int(cls)]} {conf:.3f}", - "scores": { - "class_score": conf}, - "domain": "pixel"} for *xyxy, conf, cls in pred.tolist()] - boxes = {"predictions": {"box_data": box_data, "class_labels": names}} # inference-space - self.bbox_media_panel_images.append(wandb.Image(im, boxes=boxes, caption=path.name)) + pass def log(self, log_dict): """ @@ -528,7 +147,7 @@ def log(self, log_dict): for key, value in log_dict.items(): self.log_dict[key] = value - def end_epoch(self, best_result=False): + def end_epoch(self): """ commit the log_dict, model artifacts and Tables to W&B and flush the log_dict. @@ -537,8 +156,6 @@ def end_epoch(self, best_result=False): """ if self.wandb_run: with all_logging_disabled(): - if self.bbox_media_panel_images: - self.log_dict["BoundingBoxDebugger"] = self.bbox_media_panel_images try: wandb.log(self.log_dict) except BaseException as e: @@ -547,21 +164,7 @@ def end_epoch(self, best_result=False): ) self.wandb_run.finish() self.wandb_run = None - self.log_dict = {} - self.bbox_media_panel_images = [] - if self.result_artifact: - self.result_artifact.add(self.result_table, 'result') - wandb.log_artifact(self.result_artifact, - aliases=[ - 'latest', 'last', 'epoch ' + str(self.current_epoch), - ('best' if best_result else '')]) - - wandb.log({"evaluation": self.result_table}) - columns = ["epoch", "id", "ground truth", "prediction"] - columns.extend(self.data_dict['names']) - self.result_table = wandb.Table(columns) - self.result_artifact = wandb.Artifact("run_" + wandb.run.id + "_progress", "evaluation") def finish_run(self): """ @@ -572,6 +175,7 @@ def finish_run(self): with all_logging_disabled(): wandb.log(self.log_dict) wandb.run.finish() + LOGGER.warning(DEPRECATION_WARNING) @contextmanager From b1a3126e5d9ffaddd2ae11362a0087c5541f08f1 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 6 Feb 2023 22:12:26 +0400 Subject: [PATCH 1654/1976] Bump docker/build-push-action from 3 to 4 (#10911) * Bump docker/build-push-action from 3 to 4 Bumps [docker/build-push-action](https://github.com/docker/build-push-action) from 3 to 4. - [Release notes](https://github.com/docker/build-push-action/releases) - [Commits](https://github.com/docker/build-push-action/compare/v3...v4) --- updated-dependencies: - dependency-name: docker/build-push-action dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] * Docker (#10913) * Dockerfile standardizations and improvements * README fixes * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Glenn Jocher Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .github/workflows/docker.yml | 6 +++--- README.md | 7 ------- utils/docker/Dockerfile | 2 +- utils/docker/Dockerfile-arm64 | 8 +++----- utils/docker/Dockerfile-cpu | 6 +++--- 5 files changed, 10 insertions(+), 19 deletions(-) diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index 1d0bd30b22cb..4f7fff00677c 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -29,7 +29,7 @@ jobs: password: ${{ secrets.DOCKERHUB_TOKEN }} - name: Build and push arm64 image - uses: docker/build-push-action@v3 + uses: docker/build-push-action@v4 continue-on-error: true with: context: . @@ -39,7 +39,7 @@ jobs: tags: ultralytics/yolov5:latest-arm64 - name: Build and push CPU image - uses: docker/build-push-action@v3 + uses: docker/build-push-action@v4 continue-on-error: true with: context: . @@ -48,7 +48,7 @@ jobs: tags: ultralytics/yolov5:latest-cpu - name: Build and push GPU image - uses: docker/build-push-action@v3 + uses: docker/build-push-action@v4 continue-on-error: true with: context: . diff --git a/README.md b/README.md index e836abf6d551..33468d0635ad 100644 --- a/README.md +++ b/README.md @@ -446,13 +446,6 @@ Get started in seconds with our verified environments. Click each icon below for
-##
App
- -Run YOLOv5 models on your iOS or Android device by downloading the [Ultralytics App](https://ultralytics.com/app_install)! - - -Ultralytics mobile app - ##
Contribute
We love your input! We want to make contributing to YOLOv5 as easy and transparent as possible. Please see our [Contributing Guide](CONTRIBUTING.md) to get started, and fill out the [YOLOv5 Survey](https://ultralytics.com/survey?utm_source=github&utm_medium=social&utm_campaign=Survey) to send us feedback on your experiences. Thank you to all our contributors! diff --git a/utils/docker/Dockerfile b/utils/docker/Dockerfile index e18b2ac69678..b9448101b94c 100644 --- a/utils/docker/Dockerfile +++ b/utils/docker/Dockerfile @@ -28,7 +28,7 @@ RUN git clone https://github.com/ultralytics/yolov5 /usr/src/app COPY requirements.txt . RUN python3 -m pip install --upgrade pip wheel RUN pip install --no-cache -r requirements.txt albumentations comet gsutil notebook \ - coremltools onnx onnx-simplifier onnxruntime openvino-dev>=2022.3 + coremltools onnx onnx-simplifier onnxruntime 'openvino-dev>=2022.3' # tensorflow tensorflowjs \ # Create working directory diff --git a/utils/docker/Dockerfile-arm64 b/utils/docker/Dockerfile-arm64 index eed1410793a1..aea764d3b86b 100644 --- a/utils/docker/Dockerfile-arm64 +++ b/utils/docker/Dockerfile-arm64 @@ -18,11 +18,9 @@ RUN apt install --no-install-recommends -y python3-pip git zip curl htop gcc lib # Install pip packages COPY requirements.txt . RUN python3 -m pip install --upgrade pip wheel -RUN pip install --no-cache -r requirements.txt ultralytics gsutil notebook \ - tensorflow-aarch64 - # tensorflowjs \ - # onnx onnx-simplifier onnxruntime \ - # coremltools openvino-dev \ +RUN pip install --no-cache -r requirements.txt albumentations comet gsutil notebook \ + coremltools onnx onnx-simplifier onnxruntime 'openvino-dev>=2022.3' + # tensorflow-aarch64 tensorflowjs \ # Create working directory RUN mkdir -p /usr/src/app diff --git a/utils/docker/Dockerfile-cpu b/utils/docker/Dockerfile-cpu index 558f81f00584..356c06df727d 100644 --- a/utils/docker/Dockerfile-cpu +++ b/utils/docker/Dockerfile-cpu @@ -18,9 +18,9 @@ RUN apt install --no-install-recommends -y python3-pip git zip curl htop libgl1- # Install pip packages COPY requirements.txt . RUN python3 -m pip install --upgrade pip wheel -RUN pip install --no-cache -r requirements.txt ultralytics albumentations gsutil notebook \ - coremltools onnx onnx-simplifier onnxruntime tensorflow-cpu tensorflowjs \ - # openvino-dev \ +RUN pip install --no-cache -r requirements.txt albumentations comet gsutil notebook \ + coremltools onnx onnx-simplifier onnxruntime 'openvino-dev>=2022.3' \ + # tensorflow tensorflowjs \ --extra-index-url https://download.pytorch.org/whl/cpu # Create working directory From 8b5a7d417929ac51ce27a1fb1264b01dab72d612 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 6 Feb 2023 22:41:03 +0400 Subject: [PATCH 1655/1976] Update Dockerfile (#10916) Signed-off-by: Glenn Jocher --- utils/docker/Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/docker/Dockerfile b/utils/docker/Dockerfile index b9448101b94c..c68b8dcdfd62 100644 --- a/utils/docker/Dockerfile +++ b/utils/docker/Dockerfile @@ -17,7 +17,7 @@ RUN apt install --no-install-recommends -y gcc git zip curl htop libgl1-mesa-glx # RUN alias python=python3 # Create working directory -RUN mkdir -p /usr/src/app +RUN rm -rf /usr/src/app && mkdir -p /usr/src/app WORKDIR /usr/src/app # Copy contents From 90f23519c854b96cf108a6179d214c54b3b5bda3 Mon Sep 17 00:00:00 2001 From: Izam Mohammed <106471909+izam-mohammed@users.noreply.github.com> Date: Tue, 7 Feb 2023 00:11:23 +0530 Subject: [PATCH 1656/1976] Improved the language in CONTRIBUTING.md (#10906) Signed-off-by: Izam Mohammed <106471909+izam-mohammed@users.noreply.github.com> Co-authored-by: Glenn Jocher --- CONTRIBUTING.md | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 7498f8995d40..71857faddb89 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -23,13 +23,13 @@ Select `requirements.txt` to update by clicking on it in GitHub. ### 2. Click 'Edit this file' -Button is in top-right corner. +The button is in the top-right corner.

PR_step2

### 3. Make Changes -Change `matplotlib` version from `3.2.2` to `3.3`. +Change the `matplotlib` version from `3.2.2` to `3.3`.

PR_step3

@@ -62,7 +62,7 @@ To allow your work to be integrated as seamlessly as possible, we advise you to: If you spot a problem with YOLOv5 please submit a Bug Report! For us to start investigating a possible problem we need to be able to reproduce it ourselves first. We've created a few -short guidelines below to help users provide what we need in order to get started. +short guidelines below to help users provide what we need to get started. When asking a question, people will be better able to provide help if you provide **code** that they can easily understand and use to **reproduce** the problem. This is referred to by community members as creating @@ -76,14 +76,14 @@ the problem should be: In addition to the above requirements, for [Ultralytics](https://ultralytics.com/) to provide assistance your code should be: -- ✅ **Current** – Verify that your code is up-to-date with current +- ✅ **Current** – Verify that your code is up-to-date with the current GitHub [master](https://github.com/ultralytics/yolov5/tree/master), and if necessary `git pull` or `git clone` a new copy to ensure your problem has not already been resolved by previous commits. - ✅ **Unmodified** – Your problem must be reproducible without any modifications to the codebase in this repository. [Ultralytics](https://ultralytics.com/) does not provide support for custom code ⚠️. If you believe your problem meets all of the above criteria, please close this issue and raise a new one using the 🐛 -**Bug Report** [template](https://github.com/ultralytics/yolov5/issues/new/choose) and providing +**Bug Report** [template](https://github.com/ultralytics/yolov5/issues/new/choose) and provide a [minimum reproducible example](https://stackoverflow.com/help/minimal-reproducible-example) to help us better understand and diagnose your problem. From 9ba18266b2e0ae085d975a987eb68d98a87155ce Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 7 Feb 2023 01:58:47 +0400 Subject: [PATCH 1657/1976] Update Dockerfile (#10917) * Update Dockerfile Signed-off-by: Glenn Jocher * Update Dockerfile-arm64 Signed-off-by: Glenn Jocher --------- Signed-off-by: Glenn Jocher --- utils/docker/Dockerfile | 8 -------- utils/docker/Dockerfile-arm64 | 2 +- 2 files changed, 1 insertion(+), 9 deletions(-) diff --git a/utils/docker/Dockerfile b/utils/docker/Dockerfile index c68b8dcdfd62..0349c50526e0 100644 --- a/utils/docker/Dockerfile +++ b/utils/docker/Dockerfile @@ -31,14 +31,6 @@ RUN pip install --no-cache -r requirements.txt albumentations comet gsutil noteb coremltools onnx onnx-simplifier onnxruntime 'openvino-dev>=2022.3' # tensorflow tensorflowjs \ -# Create working directory -RUN mkdir -p /usr/src/app -WORKDIR /usr/src/app - -# Copy contents -# COPY . /usr/src/app (issues as not a .git directory) -RUN git clone https://github.com/ultralytics/yolov5 /usr/src/app - # Set environment variables ENV OMP_NUM_THREADS=1 diff --git a/utils/docker/Dockerfile-arm64 b/utils/docker/Dockerfile-arm64 index aea764d3b86b..2b08f2baaf76 100644 --- a/utils/docker/Dockerfile-arm64 +++ b/utils/docker/Dockerfile-arm64 @@ -19,7 +19,7 @@ RUN apt install --no-install-recommends -y python3-pip git zip curl htop gcc lib COPY requirements.txt . RUN python3 -m pip install --upgrade pip wheel RUN pip install --no-cache -r requirements.txt albumentations comet gsutil notebook \ - coremltools onnx onnx-simplifier onnxruntime 'openvino-dev>=2022.3' + coremltools onnx onnxruntime 'openvino-dev>=2022.3' # tensorflow-aarch64 tensorflowjs \ # Create working directory From c3c8e71d7a58c8d07db5e015b5311a5fffda7f00 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 7 Feb 2023 02:15:54 +0400 Subject: [PATCH 1658/1976] Update Dockerfile-arm64 (#10918) Docker fixes --- utils/docker/Dockerfile-arm64 | 4 ++-- utils/docker/Dockerfile-cpu | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/utils/docker/Dockerfile-arm64 b/utils/docker/Dockerfile-arm64 index 2b08f2baaf76..0279dfb8c997 100644 --- a/utils/docker/Dockerfile-arm64 +++ b/utils/docker/Dockerfile-arm64 @@ -18,8 +18,8 @@ RUN apt install --no-install-recommends -y python3-pip git zip curl htop gcc lib # Install pip packages COPY requirements.txt . RUN python3 -m pip install --upgrade pip wheel -RUN pip install --no-cache -r requirements.txt albumentations comet gsutil notebook \ - coremltools onnx onnxruntime 'openvino-dev>=2022.3' +RUN pip install --no-cache -r requirements.txt albumentations gsutil notebook \ + coremltools onnx onnxruntime # tensorflow-aarch64 tensorflowjs \ # Create working directory diff --git a/utils/docker/Dockerfile-cpu b/utils/docker/Dockerfile-cpu index 356c06df727d..19b2962d4cab 100644 --- a/utils/docker/Dockerfile-cpu +++ b/utils/docker/Dockerfile-cpu @@ -18,7 +18,7 @@ RUN apt install --no-install-recommends -y python3-pip git zip curl htop libgl1- # Install pip packages COPY requirements.txt . RUN python3 -m pip install --upgrade pip wheel -RUN pip install --no-cache -r requirements.txt albumentations comet gsutil notebook \ +RUN pip install --no-cache -r requirements.txt albumentations gsutil notebook \ coremltools onnx onnx-simplifier onnxruntime 'openvino-dev>=2022.3' \ # tensorflow tensorflowjs \ --extra-index-url https://download.pytorch.org/whl/cpu From ea8508a638affa3cb150542ed733fc3aa70be3c2 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Wed, 8 Feb 2023 11:27:08 +0400 Subject: [PATCH 1659/1976] [pre-commit.ci] pre-commit suggestions (#10919) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - https://github.com/pre-commit/mirrors-yapf → https://github.com/google/yapf - [github.com/PyCQA/flake8: 5.0.4 → 6.0.0](https://github.com/PyCQA/flake8/compare/5.0.4...6.0.0) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 83425ad6cf78..b188048e63a6 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -37,7 +37,7 @@ repos: # - id: isort # name: Sort imports - - repo: https://github.com/pre-commit/mirrors-yapf + - repo: https://github.com/google/yapf rev: v0.32.0 hooks: - id: yapf @@ -54,7 +54,7 @@ repos: # exclude: "README.md|README.zh-CN.md|CONTRIBUTING.md" - repo: https://github.com/PyCQA/flake8 - rev: 5.0.4 + rev: 6.0.0 hooks: - id: flake8 name: PEP8 From cec1b9bc923cdd235baa3b9b5c80e3700bc9b1dc Mon Sep 17 00:00:00 2001 From: Mahmoud Hegab Date: Tue, 7 Feb 2023 23:32:29 -0800 Subject: [PATCH 1660/1976] add the dropout_p parameter (#10805) * add the dropout_p parameter Signed-off-by: Mahmoud Hegab * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Signed-off-by: Mahmoud Hegab Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Glenn Jocher --- models/common.py | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/models/common.py b/models/common.py index 8b5ec1c786d8..71340688d2e0 100644 --- a/models/common.py +++ b/models/common.py @@ -846,12 +846,19 @@ def forward(self, x): class Classify(nn.Module): # YOLOv5 classification head, i.e. x(b,c1,20,20) to x(b,c2) - def __init__(self, c1, c2, k=1, s=1, p=None, g=1): # ch_in, ch_out, kernel, stride, padding, groups + def __init__(self, + c1, + c2, + k=1, + s=1, + p=None, + g=1, + dropout_p=0.0): # ch_in, ch_out, kernel, stride, padding, groups, dropout probability super().__init__() c_ = 1280 # efficientnet_b0 size self.conv = Conv(c1, c_, k, s, autopad(k, p), g) self.pool = nn.AdaptiveAvgPool2d(1) # to x(b,c_,1,1) - self.drop = nn.Dropout(p=0.0, inplace=True) + self.drop = nn.Dropout(p=dropout_p, inplace=True) self.linear = nn.Linear(c_, c2) # to x(b,c2) def forward(self, x): From a3c0fd05216a1fdb9f1ba0aff2e5421819b871ed Mon Sep 17 00:00:00 2001 From: Snyk bot Date: Thu, 9 Feb 2023 07:45:58 +0000 Subject: [PATCH 1661/1976] [Snyk] Fix for 2 vulnerabilities (#10931) * fix: requirements.txt to reduce vulnerabilities The following vulnerabilities are fixed by pinning transitive dependencies: - https://snyk.io/vuln/SNYK-PYTHON-SETUPTOOLS-3180412 - https://snyk.io/vuln/SNYK-PYTHON-WHEEL-3180413 * Update requirements.txt Signed-off-by: Glenn Jocher --------- Signed-off-by: Glenn Jocher Co-authored-by: Glenn Jocher --- requirements.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/requirements.txt b/requirements.txt index c0e4a91d7dd1..ce205f43c5dd 100644 --- a/requirements.txt +++ b/requirements.txt @@ -39,6 +39,7 @@ seaborn>=0.11.0 # openvino-dev # OpenVINO export # Deploy ---------------------------------------------------------------------- +wheel>=0.38.0 # Snyk vulnerability fix # tritonclient[all]~=2.24.0 # Extras ---------------------------------------------------------------------- From 976fa99e5c1d7f5b49f8e7ae458ff3bf93459135 Mon Sep 17 00:00:00 2001 From: Snyk bot Date: Thu, 9 Feb 2023 07:52:22 +0000 Subject: [PATCH 1662/1976] [Snyk] Security upgrade gunicorn from 19.9.0 to 19.10.0 (#10933) fix: utils/google_app_engine/additional_requirements.txt to reduce vulnerabilities The following vulnerabilities are fixed by pinning transitive dependencies: - https://snyk.io/vuln/SNYK-PYTHON-GUNICORN-541164 --- utils/google_app_engine/additional_requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/google_app_engine/additional_requirements.txt b/utils/google_app_engine/additional_requirements.txt index 42d7ffc0eed8..b6b496feaa7b 100644 --- a/utils/google_app_engine/additional_requirements.txt +++ b/utils/google_app_engine/additional_requirements.txt @@ -1,4 +1,4 @@ # add these requirements in your app on top of the existing ones pip==21.1 Flask==1.0.2 -gunicorn==19.9.0 +gunicorn==19.10.0 From a270b4f1252b65bf60f3996cf9ec9ac01ce3a466 Mon Sep 17 00:00:00 2001 From: Snyk bot Date: Thu, 9 Feb 2023 07:54:04 +0000 Subject: [PATCH 1663/1976] [Snyk] Security upgrade setuptools from 39.0.1 to 65.5.1 (#10934) * fix: requirements.txt to reduce vulnerabilities The following vulnerabilities are fixed by pinning transitive dependencies: - https://snyk.io/vuln/SNYK-PYTHON-SETUPTOOLS-3180412 * Update requirements.txt Signed-off-by: Glenn Jocher --------- Signed-off-by: Glenn Jocher Co-authored-by: Glenn Jocher --- requirements.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/requirements.txt b/requirements.txt index ce205f43c5dd..eee15ddf93c4 100644 --- a/requirements.txt +++ b/requirements.txt @@ -39,6 +39,7 @@ seaborn>=0.11.0 # openvino-dev # OpenVINO export # Deploy ---------------------------------------------------------------------- +setuptools>=65.5.1 # Snyk vulnerability fix wheel>=0.38.0 # Snyk vulnerability fix # tritonclient[all]~=2.24.0 From e326252ee4af03b4514f20262b719bf0a9468161 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 9 Feb 2023 16:57:18 +0400 Subject: [PATCH 1664/1976] Security improvements (#10942) * Security improvements * Security improvements * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- utils/dataloaders.py | 2 +- utils/general.py | 11 ++++++----- 2 files changed, 7 insertions(+), 6 deletions(-) diff --git a/utils/dataloaders.py b/utils/dataloaders.py index cbb3114e94d8..02c2a79f5747 100644 --- a/utils/dataloaders.py +++ b/utils/dataloaders.py @@ -52,7 +52,7 @@ def get_hash(paths): # Returns a single hash value of a list of paths (files or dirs) size = sum(os.path.getsize(p) for p in paths if os.path.exists(p)) # sizes - h = hashlib.md5(str(size).encode()) # hash sizes + h = hashlib.sha256(str(size).encode()) # hash sizes h.update(''.join(paths).encode()) # hash paths return h.hexdigest() # return hash diff --git a/utils/general.py b/utils/general.py index 0bbcb6e7334c..63cc29bfb35d 100644 --- a/utils/general.py +++ b/utils/general.py @@ -14,6 +14,7 @@ import random import re import signal +import subprocess import sys import time import urllib @@ -551,7 +552,7 @@ def check_dataset(data, autodownload=True): r = None # success elif s.startswith('bash '): # bash script LOGGER.info(f'Running {s} ...') - r = os.system(s) + r = subprocess.run(s, shell=True) else: # python script r = exec(s, {'yaml': data}) # return None dt = f'({round(time.time() - t, 1)}s)' @@ -648,9 +649,9 @@ def download_one(url, dir): if is_zipfile(f): unzip_file(f, dir) # unzip elif is_tarfile(f): - os.system(f'tar xf {f} --directory {f.parent}') # unzip + subprocess.run(f'tar xf {f} --directory {f.parent}', shell=True) # unzip elif f.suffix == '.gz': - os.system(f'tar xfz {f} --directory {f.parent}') # unzip + subprocess.run(f'tar xfz {f} --directory {f.parent}', shell=True) # unzip if delete: f.unlink() # remove zip @@ -1022,7 +1023,7 @@ def print_mutation(keys, results, hyp, save_dir, bucket, prefix=colorstr('evolve if bucket: url = f'gs://{bucket}/evolve.csv' if gsutil_getsize(url) > (evolve_csv.stat().st_size if evolve_csv.exists() else 0): - os.system(f'gsutil cp {url} {save_dir}') # download evolve.csv if larger than local + subprocess.run(f'gsutil cp {url} {save_dir}', shell=True) # download evolve.csv if larger than local # Log to evolve.csv s = '' if evolve_csv.exists() else (('%20s,' * n % keys).rstrip(',') + '\n') # add header @@ -1046,7 +1047,7 @@ def print_mutation(keys, results, hyp, save_dir, bucket, prefix=colorstr('evolve for x in vals) + '\n\n') if bucket: - os.system(f'gsutil cp {evolve_csv} {evolve_yaml} gs://{bucket}') # upload + subprocess.run(f'gsutil cp {evolve_csv} {evolve_yaml} gs://{bucket}', shell=True) # upload def apply_classifier(x, model, img, im0): From 61407c93cc0cbabcfbd6de51a3c8293b99219e2e Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 9 Feb 2023 17:18:27 +0400 Subject: [PATCH 1665/1976] Security improvements for subprocess.run() (#10943) * Security improvements * Security improvements * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Signed-off-by: Glenn Jocher Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- utils/general.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/utils/general.py b/utils/general.py index 63cc29bfb35d..4d5e94bc98f9 100644 --- a/utils/general.py +++ b/utils/general.py @@ -649,9 +649,9 @@ def download_one(url, dir): if is_zipfile(f): unzip_file(f, dir) # unzip elif is_tarfile(f): - subprocess.run(f'tar xf {f} --directory {f.parent}', shell=True) # unzip + subprocess.run(['tar', 'xf', f, '--directory', f.parent], check=True) # unzip elif f.suffix == '.gz': - subprocess.run(f'tar xfz {f} --directory {f.parent}', shell=True) # unzip + subprocess.run(['tar', 'xfz', f, '--directory', f.parent], check=True) # unzip if delete: f.unlink() # remove zip @@ -1023,7 +1023,7 @@ def print_mutation(keys, results, hyp, save_dir, bucket, prefix=colorstr('evolve if bucket: url = f'gs://{bucket}/evolve.csv' if gsutil_getsize(url) > (evolve_csv.stat().st_size if evolve_csv.exists() else 0): - subprocess.run(f'gsutil cp {url} {save_dir}', shell=True) # download evolve.csv if larger than local + subprocess.run(['gsutil', 'cp', f'{url}', f'{save_dir}']) # download evolve.csv if larger than local # Log to evolve.csv s = '' if evolve_csv.exists() else (('%20s,' * n % keys).rstrip(',') + '\n') # add header @@ -1047,7 +1047,7 @@ def print_mutation(keys, results, hyp, save_dir, bucket, prefix=colorstr('evolve for x in vals) + '\n\n') if bucket: - subprocess.run(f'gsutil cp {evolve_csv} {evolve_yaml} gs://{bucket}', shell=True) # upload + subprocess.run(['gsutil', 'cp', f'{evolve_csv}', f'{evolve_yaml}', f'gs://{bucket}']) # upload def apply_classifier(x, model, img, im0): From 238da321cb365533a99d36a1e768d1d4259b6766 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 9 Feb 2023 17:58:24 +0400 Subject: [PATCH 1666/1976] Security3 (#10944) * Security improvements * Security improvements * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- segment/train.py | 4 +++- segment/val.py | 3 ++- train.py | 4 +++- utils/downloads.py | 3 ++- utils/general.py | 3 +-- val.py | 3 ++- 6 files changed, 13 insertions(+), 7 deletions(-) diff --git a/segment/train.py b/segment/train.py index 3f32d2100a75..883c8b0a2b62 100644 --- a/segment/train.py +++ b/segment/train.py @@ -19,6 +19,7 @@ import math import os import random +import subprocess import sys import time from copy import deepcopy @@ -597,7 +598,8 @@ def main(opt, callbacks=Callbacks()): # ei = [isinstance(x, (int, float)) for x in hyp.values()] # evolvable indices evolve_yaml, evolve_csv = save_dir / 'hyp_evolve.yaml', save_dir / 'evolve.csv' if opt.bucket: - os.system(f'gsutil cp gs://{opt.bucket}/evolve.csv {evolve_csv}') # download evolve.csv if exists + subprocess.run( + f'gsutil cp gs://{opt.bucket}/evolve.csv {evolve_csv}'.split()) # download evolve.csv if exists for _ in range(opt.evolve): # generations to evolve if evolve_csv.exists(): # if evolve.csv exists: select best hyps and mutate diff --git a/segment/val.py b/segment/val.py index 248d2bee9be1..8168b5407c1d 100644 --- a/segment/val.py +++ b/segment/val.py @@ -23,6 +23,7 @@ import argparse import json import os +import subprocess import sys from multiprocessing.pool import ThreadPool from pathlib import Path @@ -461,7 +462,7 @@ def main(opt): r, _, t = run(**vars(opt), plots=False) y.append(r + t) # results and times np.savetxt(f, y, fmt='%10.4g') # save - os.system('zip -r study.zip study_*.txt') + subprocess.run('zip -r study.zip study_*.txt'.split()) plot_val_study(x=x) # plot else: raise NotImplementedError(f'--task {opt.task} not in ("train", "val", "test", "speed", "study")') diff --git a/train.py b/train.py index 5d75f22b6335..db65f2c74c6c 100644 --- a/train.py +++ b/train.py @@ -19,6 +19,7 @@ import math import os import random +import subprocess import sys import time from copy import deepcopy @@ -571,7 +572,8 @@ def main(opt, callbacks=Callbacks()): # ei = [isinstance(x, (int, float)) for x in hyp.values()] # evolvable indices evolve_yaml, evolve_csv = save_dir / 'hyp_evolve.yaml', save_dir / 'evolve.csv' if opt.bucket: - os.system(f'gsutil cp gs://{opt.bucket}/evolve.csv {evolve_csv}') # download evolve.csv if exists + subprocess.run( + f'gsutil cp gs://{opt.bucket}/evolve.csv {evolve_csv}'.split()) # download evolve.csv if exists for _ in range(opt.evolve): # generations to evolve if evolve_csv.exists(): # if evolve.csv exists: select best hyps and mutate diff --git a/utils/downloads.py b/utils/downloads.py index 72ea87340eb9..a3ff9274066e 100644 --- a/utils/downloads.py +++ b/utils/downloads.py @@ -50,7 +50,8 @@ def safe_download(file, url, url2=None, min_bytes=1E0, error_msg=''): if file.exists(): file.unlink() # remove partial downloads LOGGER.info(f'ERROR: {e}\nRe-attempting {url2 or url} to {file}...') - os.system(f"curl -# -L '{url2 or url}' -o '{file}' --retry 3 -C -") # curl download, retry and resume on fail + subprocess.run( + f"curl -# -L '{url2 or url}' -o '{file}' --retry 3 -C -".split()) # curl download, retry and resume on fail finally: if not file.exists() or file.stat().st_size < min_bytes: # check if file.exists(): diff --git a/utils/general.py b/utils/general.py index 4d5e94bc98f9..4e5c7147fd40 100644 --- a/utils/general.py +++ b/utils/general.py @@ -631,8 +631,7 @@ def download_one(url, dir): for i in range(retry + 1): if curl: s = 'sS' if threads > 1 else '' # silent - r = os.system( - f'curl -# -{s}L "{url}" -o "{f}" --retry 9 -C -') # curl download with retry, continue + r = subprocess.run(f'curl -# -{s}L "{url}" -o "{f}" --retry 9 -C -'.split()) success = r == 0 else: torch.hub.download_url_to_file(url, f, progress=threads == 1) # torch download diff --git a/val.py b/val.py index 599aa1afdd4a..62fa2c980988 100644 --- a/val.py +++ b/val.py @@ -22,6 +22,7 @@ import argparse import json import os +import subprocess import sys from pathlib import Path @@ -397,7 +398,7 @@ def main(opt): r, _, t = run(**vars(opt), plots=False) y.append(r + t) # results and times np.savetxt(f, y, fmt='%10.4g') # save - os.system('zip -r study.zip study_*.txt') + subprocess.run('zip -r study.zip study_*.txt'.split()) plot_val_study(x=x) # plot else: raise NotImplementedError(f'--task {opt.task} not in ("train", "val", "test", "speed", "study")') From 35d6d9f408e5f1e02e5edc8f4bd6976bcf3bff8b Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 9 Feb 2023 20:32:58 +0400 Subject: [PATCH 1667/1976] Update Dockerfile-arm64 (#10945) * Update Dockerfile-arm64 Signed-off-by: Glenn Jocher * Update Dockerfile-cpu Signed-off-by: Glenn Jocher --------- Signed-off-by: Glenn Jocher --- utils/docker/Dockerfile-arm64 | 2 +- utils/docker/Dockerfile-cpu | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/utils/docker/Dockerfile-arm64 b/utils/docker/Dockerfile-arm64 index 0279dfb8c997..b2e381f089d2 100644 --- a/utils/docker/Dockerfile-arm64 +++ b/utils/docker/Dockerfile-arm64 @@ -3,7 +3,7 @@ # Image is aarch64-compatible for Apple M1 and other ARM architectures i.e. Jetson Nano and Raspberry Pi # Start FROM Ubuntu image https://hub.docker.com/_/ubuntu -FROM arm64v8/ubuntu:20.04 +FROM arm64v8/ubuntu:latest # Downloads to user config dir ADD https://ultralytics.com/assets/Arial.ttf https://ultralytics.com/assets/Arial.Unicode.ttf /root/.config/Ultralytics/ diff --git a/utils/docker/Dockerfile-cpu b/utils/docker/Dockerfile-cpu index 19b2962d4cab..dcc71924564b 100644 --- a/utils/docker/Dockerfile-cpu +++ b/utils/docker/Dockerfile-cpu @@ -3,7 +3,7 @@ # Image is CPU-optimized for ONNX, OpenVINO and PyTorch YOLOv5 deployments # Start FROM Ubuntu image https://hub.docker.com/_/ubuntu -FROM ubuntu:20.04 +FROM ubuntu:latest # Downloads to user config dir ADD https://ultralytics.com/assets/Arial.ttf https://ultralytics.com/assets/Arial.Unicode.ttf /root/.config/Ultralytics/ From a895e98172a595252d1f8b5064de344b7ecafbec Mon Sep 17 00:00:00 2001 From: Snyk bot Date: Thu, 9 Feb 2023 20:21:38 +0000 Subject: [PATCH 1668/1976] [Snyk] Security upgrade ubuntu from latest to rolling (#10946) * fix: utils/docker/Dockerfile-cpu to reduce vulnerabilities The following vulnerabilities are fixed with an upgrade: - https://snyk.io/vuln/SNYK-UBUNTU2204-OPENSSL-3314744 - https://snyk.io/vuln/SNYK-UBUNTU2204-OPENSSL-3314768 - https://snyk.io/vuln/SNYK-UBUNTU2204-OPENSSL-3314792 - https://snyk.io/vuln/SNYK-UBUNTU2204-OPENSSL-3314816 - https://snyk.io/vuln/SNYK-UBUNTU2204-OPENSSL-3314840 * Update Dockerfile-arm64 Signed-off-by: Glenn Jocher --------- Signed-off-by: Glenn Jocher Co-authored-by: Glenn Jocher --- utils/docker/Dockerfile-arm64 | 2 +- utils/docker/Dockerfile-cpu | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/utils/docker/Dockerfile-arm64 b/utils/docker/Dockerfile-arm64 index b2e381f089d2..7023c6a4bb1f 100644 --- a/utils/docker/Dockerfile-arm64 +++ b/utils/docker/Dockerfile-arm64 @@ -3,7 +3,7 @@ # Image is aarch64-compatible for Apple M1 and other ARM architectures i.e. Jetson Nano and Raspberry Pi # Start FROM Ubuntu image https://hub.docker.com/_/ubuntu -FROM arm64v8/ubuntu:latest +FROM arm64v8/ubuntu:rolling # Downloads to user config dir ADD https://ultralytics.com/assets/Arial.ttf https://ultralytics.com/assets/Arial.Unicode.ttf /root/.config/Ultralytics/ diff --git a/utils/docker/Dockerfile-cpu b/utils/docker/Dockerfile-cpu index dcc71924564b..06bad9a3790d 100644 --- a/utils/docker/Dockerfile-cpu +++ b/utils/docker/Dockerfile-cpu @@ -3,7 +3,7 @@ # Image is CPU-optimized for ONNX, OpenVINO and PyTorch YOLOv5 deployments # Start FROM Ubuntu image https://hub.docker.com/_/ubuntu -FROM ubuntu:latest +FROM ubuntu:rolling # Downloads to user config dir ADD https://ultralytics.com/assets/Arial.ttf https://ultralytics.com/assets/Arial.Unicode.ttf /root/.config/Ultralytics/ From fd38767ea84453107ec3a19893fb2dd4e5034216 Mon Sep 17 00:00:00 2001 From: Ayush Chaurasia Date: Fri, 10 Feb 2023 20:00:40 +0530 Subject: [PATCH 1669/1976] Update README and greetings with YOLOv8 info (#10735) * update * update * update * Update README.md Signed-off-by: Glenn Jocher * Update README.md Signed-off-by: Ayush Chaurasia * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update README.md Signed-off-by: Glenn Jocher * Update README.md Signed-off-by: Glenn Jocher * Update greetings.yml Signed-off-by: Glenn Jocher * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update greetings.yml Signed-off-by: Glenn Jocher * Update greetings.yml Signed-off-by: Glenn Jocher * Update greetings.yml Signed-off-by: Glenn Jocher * Update README * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update README * Update README * Update README with YOLOv8 * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update README with YOLOv8 * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update README with YOLOv8 * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update README with YOLOv8 * Update README with YOLOv8 --------- Signed-off-by: Glenn Jocher Signed-off-by: Ayush Chaurasia Co-authored-by: Glenn Jocher Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .github/workflows/greetings.yml | 12 +- README.md | 234 ++++++++++++++++---------------- README.zh-CN.md | 46 +++---- 3 files changed, 148 insertions(+), 144 deletions(-) diff --git a/.github/workflows/greetings.yml b/.github/workflows/greetings.yml index 5e1589c340ed..8d780a23e2b5 100644 --- a/.github/workflows/greetings.yml +++ b/.github/workflows/greetings.yml @@ -25,11 +25,9 @@ jobs: issue-message: | 👋 Hello @${{ github.actor }}, thank you for your interest in YOLOv5 🚀! Please visit our ⭐️ [Tutorials](https://github.com/ultralytics/yolov5/wiki#tutorials) to get started, where you can find quickstart guides for simple tasks like [Custom Data Training](https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data) all the way to advanced concepts like [Hyperparameter Evolution](https://github.com/ultralytics/yolov5/issues/607). - If this is a 🐛 Bug Report, please provide screenshots and **minimum viable code to reproduce your issue**, otherwise we can not help you. + If this is a 🐛 Bug Report, please provide a **minimum reproducible example** to help us debug it. - If this is a custom training ❓ Question, please provide as much information as possible, including dataset images, training logs, screenshots, and a public link to online [W&B logging](https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data#visualize) if available. - - For business inquiries or professional support requests please visit https://ultralytics.com or email support@ultralytics.com. + If this is a custom training ❓ Question, please provide as much information as possible, including dataset image examples and training logs, and verify you are following our [Tips for Best Training Results](https://github.com/ultralytics/yolov5/wiki/Tips-for-Best-Training-Results). ## Requirements @@ -55,3 +53,9 @@ jobs: If this badge is green, all [YOLOv5 GitHub Actions](https://github.com/ultralytics/yolov5/actions) Continuous Integration (CI) tests are currently passing. CI tests verify correct operation of YOLOv5 [training](https://github.com/ultralytics/yolov5/blob/master/train.py), [validation](https://github.com/ultralytics/yolov5/blob/master/val.py), [inference](https://github.com/ultralytics/yolov5/blob/master/detect.py), [export](https://github.com/ultralytics/yolov5/blob/master/export.py) and [benchmarks](https://github.com/ultralytics/yolov5/blob/master/benchmarks.py) on MacOS, Windows, and Ubuntu every 24 hours and on every commit. + ## YOLOv8 + + Ultralytics YOLOv8 🚀 is our new cutting-edge, state-of-the-art (SOTA) model released at [https://github.com/ultralytics/ultralytics](https://github.com/ultralytics/ultralytics). YOLOv8 is designed to be fast, accurate, and easy to use, making it an excellent choice for a wide range of object detection, image segmentation and image classification tasks. See the [YOLOv8 Docs] for details and get started with: + ```bash + pip install ultralytics + ``` diff --git a/README.md b/README.md index 33468d0635ad..e00ec478a85b 100644 --- a/README.md +++ b/README.md @@ -45,105 +45,25 @@ To request an Enterprise License please complete the form at
- -##
Ultralytics Live Session
- -
- -⚡️ Stay tuned for [Ultralytics Live Session 4](https://www.youtube.com/watch?v=FXIbVnat2eU) ⚡️ - -Over the past couple of years we found that 22% percent of you experience difficulty in deploying your vision AI models. To improve this step in the ML pipeline, we've partnered with [Neural Magic](https://bit.ly/yolov5-neuralmagic), whose DeepSparse tool takes advantage of sparsity and low-precision arithmetic within neural networks to offer exceptional performance on commodity hardware. - -Glenn will be joined by Michael Goin of Neural Magic on February 8th at 12 EST/18 CET to discuss how to achieve GPU-class performance for YOLOv5 on CPUs. Be sure to come prepared with any questions you have about the model deployment process! - -To join the webinar, visit our [YouTube Channel](https://www.youtube.com/@Ultralytics/streams) and turn on your notifications! - - - - -
- -##
Segmentation ⭐ NEW
- -
- - -
- -Our new YOLOv5 [release v7.0](https://github.com/ultralytics/yolov5/releases/v7.0) instance segmentation models are the fastest and most accurate in the world, beating all current [SOTA benchmarks](https://paperswithcode.com/sota/real-time-instance-segmentation-on-mscoco). We've made them super simple to train, validate and deploy. See full details in our [Release Notes](https://github.com/ultralytics/yolov5/releases/v7.0) and visit our [YOLOv5 Segmentation Colab Notebook](https://github.com/ultralytics/yolov5/blob/master/segment/tutorial.ipynb) for quickstart tutorials. - -
- Segmentation Checkpoints -
-We trained YOLOv5 segmentations models on COCO for 300 epochs at image size 640 using A100 GPUs. We exported all models to ONNX FP32 for CPU speed tests and to TensorRT FP16 for GPU speed tests. We ran all speed tests on Google [Colab Pro](https://colab.research.google.com/signup) notebooks for easy reproducibility. +##
YOLOv8 🚀 NEW
-| Model | size
(pixels) | mAPbox
50-95 | mAPmask
50-95 | Train time
300 epochs
A100 (hours) | Speed
ONNX CPU
(ms) | Speed
TRT A100
(ms) | params
(M) | FLOPs
@640 (B) | -| ------------------------------------------------------------------------------------------ | --------------------- | -------------------- | --------------------- | --------------------------------------------- | ------------------------------ | ------------------------------ | ------------------ | ---------------------- | -| [YOLOv5n-seg](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5n-seg.pt) | 640 | 27.6 | 23.4 | 80:17 | **62.7** | **1.2** | **2.0** | **7.1** | -| [YOLOv5s-seg](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5s-seg.pt) | 640 | 37.6 | 31.7 | 88:16 | 173.3 | 1.4 | 7.6 | 26.4 | -| [YOLOv5m-seg](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5m-seg.pt) | 640 | 45.0 | 37.1 | 108:36 | 427.0 | 2.2 | 22.0 | 70.8 | -| [YOLOv5l-seg](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5l-seg.pt) | 640 | 49.0 | 39.9 | 66:43 (2x) | 857.4 | 2.9 | 47.9 | 147.7 | -| [YOLOv5x-seg](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5x-seg.pt) | 640 | **50.7** | **41.4** | 62:56 (3x) | 1579.2 | 4.5 | 88.8 | 265.7 | +We are thrilled to announce the launch of Ultralytics YOLOv8 🚀, our NEW cutting-edge, state-of-the-art (SOTA) model +released at **[https://github.com/ultralytics/ultralytics](https://github.com/ultralytics/ultralytics)**. +YOLOv8 is designed to be fast, accurate, and easy to use, making it an excellent choice for a wide range of +object detection, image segmentation and image classification tasks. -- All checkpoints are trained to 300 epochs with SGD optimizer with `lr0=0.01` and `weight_decay=5e-5` at image size 640 and all default settings.
Runs logged to https://wandb.ai/glenn-jocher/YOLOv5_v70_official -- **Accuracy** values are for single-model single-scale on COCO dataset.
Reproduce by `python segment/val.py --data coco.yaml --weights yolov5s-seg.pt` -- **Speed** averaged over 100 inference images using a [Colab Pro](https://colab.research.google.com/signup) A100 High-RAM instance. Values indicate inference speed only (NMS adds about 1ms per image).
Reproduce by `python segment/val.py --data coco.yaml --weights yolov5s-seg.pt --batch 1` -- **Export** to ONNX at FP32 and TensorRT at FP16 done with `export.py`.
Reproduce by `python export.py --weights yolov5s-seg.pt --include engine --device 0 --half` - -
- -
- Segmentation Usage Examples  Open In Colab +See the [YOLOv8 Docs](https://docs.ultralytics.com) for details and get started with: -### Train - -YOLOv5 segmentation training supports auto-download COCO128-seg segmentation dataset with `--data coco128-seg.yaml` argument and manual download of COCO-segments dataset with `bash data/scripts/get_coco.sh --train --val --segments` and then `python train.py --data coco.yaml`. - -```bash -# Single-GPU -python segment/train.py --data coco128-seg.yaml --weights yolov5s-seg.pt --img 640 - -# Multi-GPU DDP -python -m torch.distributed.run --nproc_per_node 4 --master_port 1 segment/train.py --data coco128-seg.yaml --weights yolov5s-seg.pt --img 640 --device 0,1,2,3 -``` - -### Val - -Validate YOLOv5s-seg mask mAP on COCO dataset: - -```bash -bash data/scripts/get_coco.sh --val --segments # download COCO val segments split (780MB, 5000 images) -python segment/val.py --weights yolov5s-seg.pt --data coco.yaml --img 640 # validate -``` - -### Predict - -Use pretrained YOLOv5m-seg.pt to predict bus.jpg: - -```bash -python segment/predict.py --weights yolov5m-seg.pt --data data/images/bus.jpg -``` - -```python -model = torch.hub.load( - "ultralytics/yolov5", "custom", "yolov5m-seg.pt" -) # load from PyTorch Hub (WARNING: inference not yet supported) -``` - -| ![zidane](https://user-images.githubusercontent.com/26833433/203113421-decef4c4-183d-4a0a-a6c2-6435b33bc5d3.jpg) | ![bus](https://user-images.githubusercontent.com/26833433/203113416-11fe0025-69f7-4874-a0a6-65d0bfe2999a.jpg) | -| ---------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------- | - -### Export - -Export YOLOv5s-seg model to ONNX and TensorRT: - -```bash -python export.py --weights yolov5s-seg.pt --include onnx engine --img 640 --device 0 +```commandline +pip install ultralytics ``` -
+
+ + +
##
Documentation
@@ -312,17 +232,17 @@ YOLOv5 has been designed to be super easy to get started and simple to learn. We | Model | size
(pixels) | mAPval
50-95 | mAPval
50 | Speed
CPU b1
(ms) | Speed
V100 b1
(ms) | Speed
V100 b32
(ms) | params
(M) | FLOPs
@640 (B) | | ----------------------------------------------------------------------------------------------- | --------------------- | -------------------- | ----------------- | ---------------------------- | ----------------------------- | ------------------------------ | ------------------ | ---------------------- | -| [YOLOv5n](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5n.pt) | 640 | 28.0 | 45.7 | **45** | **6.3** | **0.6** | **1.9** | **4.5** | -| [YOLOv5s](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5s.pt) | 640 | 37.4 | 56.8 | 98 | 6.4 | 0.9 | 7.2 | 16.5 | -| [YOLOv5m](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5m.pt) | 640 | 45.4 | 64.1 | 224 | 8.2 | 1.7 | 21.2 | 49.0 | -| [YOLOv5l](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5l.pt) | 640 | 49.0 | 67.3 | 430 | 10.1 | 2.7 | 46.5 | 109.1 | -| [YOLOv5x](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5x.pt) | 640 | 50.7 | 68.9 | 766 | 12.1 | 4.8 | 86.7 | 205.7 | +| [YOLOv5n](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5n.pt) | 640 | 28.0 | 45.7 | **45** | **6.3** | **0.6** | **1.9** | **4.5** | +| [YOLOv5s](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5s.pt) | 640 | 37.4 | 56.8 | 98 | 6.4 | 0.9 | 7.2 | 16.5 | +| [YOLOv5m](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5m.pt) | 640 | 45.4 | 64.1 | 224 | 8.2 | 1.7 | 21.2 | 49.0 | +| [YOLOv5l](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5l.pt) | 640 | 49.0 | 67.3 | 430 | 10.1 | 2.7 | 46.5 | 109.1 | +| [YOLOv5x](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5x.pt) | 640 | 50.7 | 68.9 | 766 | 12.1 | 4.8 | 86.7 | 205.7 | | | | | | | | | | | -| [YOLOv5n6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5n6.pt) | 1280 | 36.0 | 54.4 | 153 | 8.1 | 2.1 | 3.2 | 4.6 | -| [YOLOv5s6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5s6.pt) | 1280 | 44.8 | 63.7 | 385 | 8.2 | 3.6 | 12.6 | 16.8 | -| [YOLOv5m6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5m6.pt) | 1280 | 51.3 | 69.3 | 887 | 11.1 | 6.8 | 35.7 | 50.0 | -| [YOLOv5l6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5l6.pt) | 1280 | 53.7 | 71.3 | 1784 | 15.8 | 10.5 | 76.8 | 111.4 | -| [YOLOv5x6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5x6.pt)
+ [TTA] | 1280
1536 | 55.0
**55.8** | 72.7
**72.7** | 3136
- | 26.2
- | 19.4
- | 140.7
- | 209.8
- | +| [YOLOv5n6](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5n6.pt) | 1280 | 36.0 | 54.4 | 153 | 8.1 | 2.1 | 3.2 | 4.6 | +| [YOLOv5s6](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5s6.pt) | 1280 | 44.8 | 63.7 | 385 | 8.2 | 3.6 | 12.6 | 16.8 | +| [YOLOv5m6](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5m6.pt) | 1280 | 51.3 | 69.3 | 887 | 11.1 | 6.8 | 35.7 | 50.0 | +| [YOLOv5l6](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5l6.pt) | 1280 | 53.7 | 71.3 | 1784 | 15.8 | 10.5 | 76.8 | 111.4 | +| [YOLOv5x6](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5x6.pt)
+ [TTA] | 1280
1536 | 55.0
**55.8** | 72.7
**72.7** | 3136
- | 26.2
- | 19.4
- | 140.7
- | 209.8
- |
Table Notes @@ -334,7 +254,87 @@ YOLOv5 has been designed to be super easy to get started and simple to learn. We
-##
Classification ⭐ NEW
+##
Segmentation
+ +Our new YOLOv5 [release v7.0](https://github.com/ultralytics/yolov5/releases/v7.0) instance segmentation models are the fastest and most accurate in the world, beating all current [SOTA benchmarks](https://paperswithcode.com/sota/real-time-instance-segmentation-on-mscoco). We've made them super simple to train, validate and deploy. See full details in our [Release Notes](https://github.com/ultralytics/yolov5/releases/v7.0) and visit our [YOLOv5 Segmentation Colab Notebook](https://github.com/ultralytics/yolov5/blob/master/segment/tutorial.ipynb) for quickstart tutorials. + +
+ Segmentation Checkpoints + +
+ + +
+ +We trained YOLOv5 segmentations models on COCO for 300 epochs at image size 640 using A100 GPUs. We exported all models to ONNX FP32 for CPU speed tests and to TensorRT FP16 for GPU speed tests. We ran all speed tests on Google [Colab Pro](https://colab.research.google.com/signup) notebooks for easy reproducibility. + +| Model | size
(pixels) | mAPbox
50-95 | mAPmask
50-95 | Train time
300 epochs
A100 (hours) | Speed
ONNX CPU
(ms) | Speed
TRT A100
(ms) | params
(M) | FLOPs
@640 (B) | +| ------------------------------------------------------------------------------------------ | --------------------- | -------------------- | --------------------- | --------------------------------------------- | ------------------------------ | ------------------------------ | ------------------ | ---------------------- | +| [YOLOv5n-seg](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5n-seg.pt) | 640 | 27.6 | 23.4 | 80:17 | **62.7** | **1.2** | **2.0** | **7.1** | +| [YOLOv5s-seg](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5s-seg.pt) | 640 | 37.6 | 31.7 | 88:16 | 173.3 | 1.4 | 7.6 | 26.4 | +| [YOLOv5m-seg](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5m-seg.pt) | 640 | 45.0 | 37.1 | 108:36 | 427.0 | 2.2 | 22.0 | 70.8 | +| [YOLOv5l-seg](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5l-seg.pt) | 640 | 49.0 | 39.9 | 66:43 (2x) | 857.4 | 2.9 | 47.9 | 147.7 | +| [YOLOv5x-seg](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5x-seg.pt) | 640 | **50.7** | **41.4** | 62:56 (3x) | 1579.2 | 4.5 | 88.8 | 265.7 | + +- All checkpoints are trained to 300 epochs with SGD optimizer with `lr0=0.01` and `weight_decay=5e-5` at image size 640 and all default settings.
Runs logged to https://wandb.ai/glenn-jocher/YOLOv5_v70_official +- **Accuracy** values are for single-model single-scale on COCO dataset.
Reproduce by `python segment/val.py --data coco.yaml --weights yolov5s-seg.pt` +- **Speed** averaged over 100 inference images using a [Colab Pro](https://colab.research.google.com/signup) A100 High-RAM instance. Values indicate inference speed only (NMS adds about 1ms per image).
Reproduce by `python segment/val.py --data coco.yaml --weights yolov5s-seg.pt --batch 1` +- **Export** to ONNX at FP32 and TensorRT at FP16 done with `export.py`.
Reproduce by `python export.py --weights yolov5s-seg.pt --include engine --device 0 --half` + +
+ +
+ Segmentation Usage Examples  Open In Colab + +### Train + +YOLOv5 segmentation training supports auto-download COCO128-seg segmentation dataset with `--data coco128-seg.yaml` argument and manual download of COCO-segments dataset with `bash data/scripts/get_coco.sh --train --val --segments` and then `python train.py --data coco.yaml`. + +```bash +# Single-GPU +python segment/train.py --data coco128-seg.yaml --weights yolov5s-seg.pt --img 640 + +# Multi-GPU DDP +python -m torch.distributed.run --nproc_per_node 4 --master_port 1 segment/train.py --data coco128-seg.yaml --weights yolov5s-seg.pt --img 640 --device 0,1,2,3 +``` + +### Val + +Validate YOLOv5s-seg mask mAP on COCO dataset: + +```bash +bash data/scripts/get_coco.sh --val --segments # download COCO val segments split (780MB, 5000 images) +python segment/val.py --weights yolov5s-seg.pt --data coco.yaml --img 640 # validate +``` + +### Predict + +Use pretrained YOLOv5m-seg.pt to predict bus.jpg: + +```bash +python segment/predict.py --weights yolov5m-seg.pt --data data/images/bus.jpg +``` + +```python +model = torch.hub.load( + "ultralytics/yolov5", "custom", "yolov5m-seg.pt" +) # load from PyTorch Hub (WARNING: inference not yet supported) +``` + +| ![zidane](https://user-images.githubusercontent.com/26833433/203113421-decef4c4-183d-4a0a-a6c2-6435b33bc5d3.jpg) | ![bus](https://user-images.githubusercontent.com/26833433/203113416-11fe0025-69f7-4874-a0a6-65d0bfe2999a.jpg) | +| ---------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------- | + +### Export + +Export YOLOv5s-seg model to ONNX and TensorRT: + +```bash +python export.py --weights yolov5s-seg.pt --include onnx engine --img 640 --device 0 +``` + +
+ +##
Classification
YOLOv5 [release v6.2](https://github.com/ultralytics/yolov5/releases) brings support for classification model training, validation and deployment! See full details in our [Release Notes](https://github.com/ultralytics/yolov5/releases/v6.2) and visit our [YOLOv5 Classification Colab Notebook](https://github.com/ultralytics/yolov5/blob/master/classify/tutorial.ipynb) for quickstart tutorials. @@ -347,21 +347,21 @@ We trained YOLOv5-cls classification models on ImageNet for 90 epochs using a 4x | Model | size
(pixels) | acc
top1 | acc
top5 | Training
90 epochs
4xA100 (hours) | Speed
ONNX CPU
(ms) | Speed
TensorRT V100
(ms) | params
(M) | FLOPs
@224 (B) | | -------------------------------------------------------------------------------------------------- | --------------------- | ---------------- | ---------------- | -------------------------------------------- | ------------------------------ | ----------------------------------- | ------------------ | ---------------------- | -| [YOLOv5n-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5n-cls.pt) | 224 | 64.6 | 85.4 | 7:59 | **3.3** | **0.5** | **2.5** | **0.5** | -| [YOLOv5s-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5s-cls.pt) | 224 | 71.5 | 90.2 | 8:09 | 6.6 | 0.6 | 5.4 | 1.4 | -| [YOLOv5m-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5m-cls.pt) | 224 | 75.9 | 92.9 | 10:06 | 15.5 | 0.9 | 12.9 | 3.9 | -| [YOLOv5l-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5l-cls.pt) | 224 | 78.0 | 94.0 | 11:56 | 26.9 | 1.4 | 26.5 | 8.5 | -| [YOLOv5x-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5x-cls.pt) | 224 | **79.0** | **94.4** | 15:04 | 54.3 | 1.8 | 48.1 | 15.9 | +| [YOLOv5n-cls](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5n-cls.pt) | 224 | 64.6 | 85.4 | 7:59 | **3.3** | **0.5** | **2.5** | **0.5** | +| [YOLOv5s-cls](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5s-cls.pt) | 224 | 71.5 | 90.2 | 8:09 | 6.6 | 0.6 | 5.4 | 1.4 | +| [YOLOv5m-cls](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5m-cls.pt) | 224 | 75.9 | 92.9 | 10:06 | 15.5 | 0.9 | 12.9 | 3.9 | +| [YOLOv5l-cls](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5l-cls.pt) | 224 | 78.0 | 94.0 | 11:56 | 26.9 | 1.4 | 26.5 | 8.5 | +| [YOLOv5x-cls](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5x-cls.pt) | 224 | **79.0** | **94.4** | 15:04 | 54.3 | 1.8 | 48.1 | 15.9 | | | | | | | | | | | -| [ResNet18](https://github.com/ultralytics/yolov5/releases/download/v6.2/resnet18.pt) | 224 | 70.3 | 89.5 | **6:47** | 11.2 | 0.5 | 11.7 | 3.7 | -| [ResNet34](https://github.com/ultralytics/yolov5/releases/download/v6.2/resnet34.pt) | 224 | 73.9 | 91.8 | 8:33 | 20.6 | 0.9 | 21.8 | 7.4 | -| [ResNet50](https://github.com/ultralytics/yolov5/releases/download/v6.2/resnet50.pt) | 224 | 76.8 | 93.4 | 11:10 | 23.4 | 1.0 | 25.6 | 8.5 | -| [ResNet101](https://github.com/ultralytics/yolov5/releases/download/v6.2/resnet101.pt) | 224 | 78.5 | 94.3 | 17:10 | 42.1 | 1.9 | 44.5 | 15.9 | +| [ResNet18](https://github.com/ultralytics/yolov5/releases/download/v7.0/resnet18.pt) | 224 | 70.3 | 89.5 | **6:47** | 11.2 | 0.5 | 11.7 | 3.7 | +| [ResNet34](https://github.com/ultralytics/yolov5/releases/download/v7.0/resnet34.pt) | 224 | 73.9 | 91.8 | 8:33 | 20.6 | 0.9 | 21.8 | 7.4 | +| [ResNet50](https://github.com/ultralytics/yolov5/releases/download/v7.0/resnet50.pt) | 224 | 76.8 | 93.4 | 11:10 | 23.4 | 1.0 | 25.6 | 8.5 | +| [ResNet101](https://github.com/ultralytics/yolov5/releases/download/v7.0/resnet101.pt) | 224 | 78.5 | 94.3 | 17:10 | 42.1 | 1.9 | 44.5 | 15.9 | | | | | | | | | | | -| [EfficientNet_b0](https://github.com/ultralytics/yolov5/releases/download/v6.2/efficientnet_b0.pt) | 224 | 75.1 | 92.4 | 13:03 | 12.5 | 1.3 | 5.3 | 1.0 | -| [EfficientNet_b1](https://github.com/ultralytics/yolov5/releases/download/v6.2/efficientnet_b1.pt) | 224 | 76.4 | 93.2 | 17:04 | 14.9 | 1.6 | 7.8 | 1.5 | -| [EfficientNet_b2](https://github.com/ultralytics/yolov5/releases/download/v6.2/efficientnet_b2.pt) | 224 | 76.6 | 93.4 | 17:10 | 15.9 | 1.6 | 9.1 | 1.7 | -| [EfficientNet_b3](https://github.com/ultralytics/yolov5/releases/download/v6.2/efficientnet_b3.pt) | 224 | 77.7 | 94.0 | 19:19 | 18.9 | 1.9 | 12.2 | 2.4 | +| [EfficientNet_b0](https://github.com/ultralytics/yolov5/releases/download/v7.0/efficientnet_b0.pt) | 224 | 75.1 | 92.4 | 13:03 | 12.5 | 1.3 | 5.3 | 1.0 | +| [EfficientNet_b1](https://github.com/ultralytics/yolov5/releases/download/v7.0/efficientnet_b1.pt) | 224 | 76.4 | 93.2 | 17:04 | 14.9 | 1.6 | 7.8 | 1.5 | +| [EfficientNet_b2](https://github.com/ultralytics/yolov5/releases/download/v7.0/efficientnet_b2.pt) | 224 | 76.6 | 93.4 | 17:10 | 15.9 | 1.6 | 9.1 | 1.7 | +| [EfficientNet_b3](https://github.com/ultralytics/yolov5/releases/download/v7.0/efficientnet_b3.pt) | 224 | 77.7 | 94.0 | 19:19 | 18.9 | 1.9 | 12.2 | 2.4 |
Table Notes (click to expand) @@ -463,7 +463,7 @@ YOLOv5 is available under two different licenses: ##
Contact
-For YOLOv5 bugs and feature requests please visit [GitHub Issues](https://github.com/ultralytics/yolov5/issues). For professional support please [Contact Us](https://ultralytics.com/contact). +For YOLOv5 bug reports and feature requests please visit [GitHub Issues](https://github.com/ultralytics/yolov5/issues) or the [Ultralytics Community Forum](https://community.ultralytics.com/).
diff --git a/README.zh-CN.md b/README.zh-CN.md index b69d3921df99..bd38e8f457be 100644 --- a/README.zh-CN.md +++ b/README.zh-CN.md @@ -290,17 +290,17 @@ YOLOv5 超级容易上手,简单易学。我们优先考虑现实世界的结 | 模型 | 尺寸
(像素) | mAPval
50-95 | mAPval
50 | 推理速度
CPU b1
(ms) | 推理速度
V100 b1
(ms) | 速度
V100 b32
(ms) | 参数量
(M) | FLOPs
@640 (B) | | ---------------------------------------------------------------------------------------------- | --------------- | -------------------- | ----------------- | --------------------------- | ---------------------------- | --------------------------- | --------------- | ---------------------- | -| [YOLOv5n](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5n.pt) | 640 | 28.0 | 45.7 | **45** | **6.3** | **0.6** | **1.9** | **4.5** | -| [YOLOv5s](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5s.pt) | 640 | 37.4 | 56.8 | 98 | 6.4 | 0.9 | 7.2 | 16.5 | -| [YOLOv5m](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5m.pt) | 640 | 45.4 | 64.1 | 224 | 8.2 | 1.7 | 21.2 | 49.0 | -| [YOLOv5l](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5l.pt) | 640 | 49.0 | 67.3 | 430 | 10.1 | 2.7 | 46.5 | 109.1 | -| [YOLOv5x](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5x.pt) | 640 | 50.7 | 68.9 | 766 | 12.1 | 4.8 | 86.7 | 205.7 | +| [YOLOv5n](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5n.pt) | 640 | 28.0 | 45.7 | **45** | **6.3** | **0.6** | **1.9** | **4.5** | +| [YOLOv5s](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5s.pt) | 640 | 37.4 | 56.8 | 98 | 6.4 | 0.9 | 7.2 | 16.5 | +| [YOLOv5m](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5m.pt) | 640 | 45.4 | 64.1 | 224 | 8.2 | 1.7 | 21.2 | 49.0 | +| [YOLOv5l](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5l.pt) | 640 | 49.0 | 67.3 | 430 | 10.1 | 2.7 | 46.5 | 109.1 | +| [YOLOv5x](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5x.pt) | 640 | 50.7 | 68.9 | 766 | 12.1 | 4.8 | 86.7 | 205.7 | | | | | | | | | | | -| [YOLOv5n6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5n6.pt) | 1280 | 36.0 | 54.4 | 153 | 8.1 | 2.1 | 3.2 | 4.6 | -| [YOLOv5s6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5s6.pt) | 1280 | 44.8 | 63.7 | 385 | 8.2 | 3.6 | 12.6 | 16.8 | -| [YOLOv5m6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5m6.pt) | 1280 | 51.3 | 69.3 | 887 | 11.1 | 6.8 | 35.7 | 50.0 | -| [YOLOv5l6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5l6.pt) | 1280 | 53.7 | 71.3 | 1784 | 15.8 | 10.5 | 76.8 | 111.4 | -| [YOLOv5x6](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5x6.pt)
+[TTA] | 1280
1536 | 55.0
**55.8** | 72.7
**72.7** | 3136
- | 26.2
- | 19.4
- | 140.7
- | 209.8
- | +| [YOLOv5n6](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5n6.pt) | 1280 | 36.0 | 54.4 | 153 | 8.1 | 2.1 | 3.2 | 4.6 | +| [YOLOv5s6](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5s6.pt) | 1280 | 44.8 | 63.7 | 385 | 8.2 | 3.6 | 12.6 | 16.8 | +| [YOLOv5m6](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5m6.pt) | 1280 | 51.3 | 69.3 | 887 | 11.1 | 6.8 | 35.7 | 50.0 | +| [YOLOv5l6](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5l6.pt) | 1280 | 53.7 | 71.3 | 1784 | 15.8 | 10.5 | 76.8 | 111.4 | +| [YOLOv5x6](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5x6.pt)
+[TTA] | 1280
1536 | 55.0
**55.8** | 72.7
**72.7** | 3136
- | 26.2
- | 19.4
- | 140.7
- | 209.8
- |
笔记 @@ -325,21 +325,21 @@ YOLOv5 [release v6.2](https://github.com/ultralytics/yolov5/releases) 带来对 | 模型 | 尺寸
(像素) | acc
top1 | acc
top5 | 训练时长
90 epochs
4xA100(小时) | 推理速度
ONNX CPU
(ms) | 推理速度
TensorRT V100
(ms) | 参数
(M) | FLOPs
@640 (B) | | -------------------------------------------------------------------------------------------------- | --------------- | ---------------- | ---------------- | ------------------------------------ | ----------------------------- | ---------------------------------- | -------------- | ---------------------- | -| [YOLOv5n-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5n-cls.pt) | 224 | 64.6 | 85.4 | 7:59 | **3.3** | **0.5** | **2.5** | **0.5** | -| [YOLOv5s-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5s-cls.pt) | 224 | 71.5 | 90.2 | 8:09 | 6.6 | 0.6 | 5.4 | 1.4 | -| [YOLOv5m-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5m-cls.pt) | 224 | 75.9 | 92.9 | 10:06 | 15.5 | 0.9 | 12.9 | 3.9 | -| [YOLOv5l-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5l-cls.pt) | 224 | 78.0 | 94.0 | 11:56 | 26.9 | 1.4 | 26.5 | 8.5 | -| [YOLOv5x-cls](https://github.com/ultralytics/yolov5/releases/download/v6.2/yolov5x-cls.pt) | 224 | **79.0** | **94.4** | 15:04 | 54.3 | 1.8 | 48.1 | 15.9 | +| [YOLOv5n-cls](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5n-cls.pt) | 224 | 64.6 | 85.4 | 7:59 | **3.3** | **0.5** | **2.5** | **0.5** | +| [YOLOv5s-cls](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5s-cls.pt) | 224 | 71.5 | 90.2 | 8:09 | 6.6 | 0.6 | 5.4 | 1.4 | +| [YOLOv5m-cls](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5m-cls.pt) | 224 | 75.9 | 92.9 | 10:06 | 15.5 | 0.9 | 12.9 | 3.9 | +| [YOLOv5l-cls](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5l-cls.pt) | 224 | 78.0 | 94.0 | 11:56 | 26.9 | 1.4 | 26.5 | 8.5 | +| [YOLOv5x-cls](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5x-cls.pt) | 224 | **79.0** | **94.4** | 15:04 | 54.3 | 1.8 | 48.1 | 15.9 | | | | | | | | | | | -| [ResNet18](https://github.com/ultralytics/yolov5/releases/download/v6.2/resnet18.pt) | 224 | 70.3 | 89.5 | **6:47** | 11.2 | 0.5 | 11.7 | 3.7 | -| [Resnetzch](https://github.com/ultralytics/yolov5/releases/download/v6.2/resnet34.pt) | 224 | 73.9 | 91.8 | 8:33 | 20.6 | 0.9 | 21.8 | 7.4 | -| [ResNet50](https://github.com/ultralytics/yolov5/releases/download/v6.2/resnet50.pt) | 224 | 76.8 | 93.4 | 11:10 | 23.4 | 1.0 | 25.6 | 8.5 | -| [ResNet101](https://github.com/ultralytics/yolov5/releases/download/v6.2/resnet101.pt) | 224 | 78.5 | 94.3 | 17:10 | 42.1 | 1.9 | 44.5 | 15.9 | +| [ResNet18](https://github.com/ultralytics/yolov5/releases/download/v7.0/resnet18.pt) | 224 | 70.3 | 89.5 | **6:47** | 11.2 | 0.5 | 11.7 | 3.7 | +| [Resnetzch](https://github.com/ultralytics/yolov5/releases/download/v7.0/resnet34.pt) | 224 | 73.9 | 91.8 | 8:33 | 20.6 | 0.9 | 21.8 | 7.4 | +| [ResNet50](https://github.com/ultralytics/yolov5/releases/download/v7.0/resnet50.pt) | 224 | 76.8 | 93.4 | 11:10 | 23.4 | 1.0 | 25.6 | 8.5 | +| [ResNet101](https://github.com/ultralytics/yolov5/releases/download/v7.0/resnet101.pt) | 224 | 78.5 | 94.3 | 17:10 | 42.1 | 1.9 | 44.5 | 15.9 | | | | | | | | | | | -| [EfficientNet_b0](https://github.com/ultralytics/yolov5/releases/download/v6.2/efficientnet_b0.pt) | 224 | 75.1 | 92.4 | 13:03 | 12.5 | 1.3 | 5.3 | 1.0 | -| [EfficientNet_b1](https://github.com/ultralytics/yolov5/releases/download/v6.2/efficientnet_b1.pt) | 224 | 76.4 | 93.2 | 17:04 | 14.9 | 1.6 | 7.8 | 1.5 | -| [EfficientNet_b2](https://github.com/ultralytics/yolov5/releases/download/v6.2/efficientnet_b2.pt) | 224 | 76.6 | 93.4 | 17:10 | 15.9 | 1.6 | 9.1 | 1.7 | -| [EfficientNet_b3](https://github.com/ultralytics/yolov5/releases/download/v6.2/efficientnet_b3.pt) | 224 | 77.7 | 94.0 | 19:19 | 18.9 | 1.9 | 12.2 | 2.4 | +| [EfficientNet_b0](https://github.com/ultralytics/yolov5/releases/download/v7.0/efficientnet_b0.pt) | 224 | 75.1 | 92.4 | 13:03 | 12.5 | 1.3 | 5.3 | 1.0 | +| [EfficientNet_b1](https://github.com/ultralytics/yolov5/releases/download/v7.0/efficientnet_b1.pt) | 224 | 76.4 | 93.2 | 17:04 | 14.9 | 1.6 | 7.8 | 1.5 | +| [EfficientNet_b2](https://github.com/ultralytics/yolov5/releases/download/v7.0/efficientnet_b2.pt) | 224 | 76.6 | 93.4 | 17:10 | 15.9 | 1.6 | 9.1 | 1.7 | +| [EfficientNet_b3](https://github.com/ultralytics/yolov5/releases/download/v7.0/efficientnet_b3.pt) | 224 | 77.7 | 94.0 | 19:19 | 18.9 | 1.9 | 12.2 | 2.4 |
Table Notes (点击以展开) From 80e54473905c08b1c4c771056a0f5c1a261736d8 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 10 Feb 2023 18:59:22 +0400 Subject: [PATCH 1670/1976] Dockerfile `openssl` security update (#10949) Signed-off-by: Glenn Jocher --- utils/docker/Dockerfile | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/utils/docker/Dockerfile b/utils/docker/Dockerfile index 0349c50526e0..54927c03eb80 100644 --- a/utils/docker/Dockerfile +++ b/utils/docker/Dockerfile @@ -16,6 +16,10 @@ RUN TZ=Etc/UTC apt install -y tzdata RUN apt install --no-install-recommends -y gcc git zip curl htop libgl1-mesa-glx libglib2.0-0 libpython3-dev gnupg # RUN alias python=python3 +# Security updates +# https://security.snyk.io/vuln/SNYK-UBUNTU1804-OPENSSL-3314796 +RUN apt install --no-install-recommends -y openssl + # Create working directory RUN rm -rf /usr/src/app && mkdir -p /usr/src/app WORKDIR /usr/src/app From d389840f66bb95c150d8c0e4d97759b07d21e821 Mon Sep 17 00:00:00 2001 From: David Strahm Date: Fri, 10 Feb 2023 16:11:08 +0100 Subject: [PATCH 1671/1976] Allow int8 quantization for export_tfjs (#10948) * Allow int8 quantization for export_tfjs --int8 param currently has no effect on export_tfjs. With this change, ` python export.py --weights ../path/to/best.pt --include tfjs --int8` will add the --quantize_uint8 param to the tensorflowjs_converter script, greatly reducing model size for web usage. Signed-off-by: David Strahm * Update Dockerfile Signed-off-by: Glenn Jocher --------- Signed-off-by: David Strahm Signed-off-by: Glenn Jocher Co-authored-by: Glenn Jocher --- export.py | 8 +++++--- utils/docker/Dockerfile | 9 ++++----- 2 files changed, 9 insertions(+), 8 deletions(-) diff --git a/export.py b/export.py index 9ca3441bc66a..1bf0532dde34 100644 --- a/export.py +++ b/export.py @@ -426,7 +426,7 @@ def export_edgetpu(file, prefix=colorstr('Edge TPU:')): @try_export -def export_tfjs(file, prefix=colorstr('TensorFlow.js:')): +def export_tfjs(file, int8, prefix=colorstr('TensorFlow.js:')): # YOLOv5 TensorFlow.js export check_requirements('tensorflowjs') import tensorflowjs as tfjs @@ -436,7 +436,9 @@ def export_tfjs(file, prefix=colorstr('TensorFlow.js:')): f_pb = file.with_suffix('.pb') # *.pb path f_json = f'{f}/model.json' # *.json path - cmd = f'tensorflowjs_converter --input_format=tf_frozen_model ' \ + int8_export = ' --quantize_uint8 ' if int8 else '' + + cmd = f'tensorflowjs_converter --input_format=tf_frozen_model {int8_export}' \ f'--output_node_names=Identity,Identity_1,Identity_2,Identity_3 {f_pb} {f}' subprocess.run(cmd.split()) @@ -588,7 +590,7 @@ def run( f[8], _ = export_edgetpu(file) add_tflite_metadata(f[8] or f[7], metadata, num_outputs=len(s_model.outputs)) if tfjs: - f[9], _ = export_tfjs(file) + f[9], _ = export_tfjs(file, int8) if paddle: # PaddlePaddle f[10], _ = export_paddle(model, im, file, metadata) diff --git a/utils/docker/Dockerfile b/utils/docker/Dockerfile index 54927c03eb80..cfe7b0635c28 100644 --- a/utils/docker/Dockerfile +++ b/utils/docker/Dockerfile @@ -13,13 +13,12 @@ ADD https://ultralytics.com/assets/Arial.ttf https://ultralytics.com/assets/Aria ENV DEBIAN_FRONTEND noninteractive RUN apt update RUN TZ=Etc/UTC apt install -y tzdata -RUN apt install --no-install-recommends -y gcc git zip curl htop libgl1-mesa-glx libglib2.0-0 libpython3-dev gnupg +RUN apt install --no-install-recommends -y gcc git zip curl htop libgl1-mesa-glx libglib2.0-0 libpython3-dev gnupg \ + # Security updates + # https://security.snyk.io/vuln/SNYK-UBUNTU1804-OPENSSL-3314796 + openssl # RUN alias python=python3 -# Security updates -# https://security.snyk.io/vuln/SNYK-UBUNTU1804-OPENSSL-3314796 -RUN apt install --no-install-recommends -y openssl - # Create working directory RUN rm -rf /usr/src/app && mkdir -p /usr/src/app WORKDIR /usr/src/app From 5c3eba664e228d0416285e94954a8a42751bf98b Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 10 Feb 2023 21:19:08 +0400 Subject: [PATCH 1672/1976] Update Dockerfile `apt upgrade openssl` (#10951) Update Dockerfile upgrade `openssl` Signed-off-by: Glenn Jocher --- utils/docker/Dockerfile | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/utils/docker/Dockerfile b/utils/docker/Dockerfile index cfe7b0635c28..b5d2af9fb08e 100644 --- a/utils/docker/Dockerfile +++ b/utils/docker/Dockerfile @@ -13,12 +13,13 @@ ADD https://ultralytics.com/assets/Arial.ttf https://ultralytics.com/assets/Aria ENV DEBIAN_FRONTEND noninteractive RUN apt update RUN TZ=Etc/UTC apt install -y tzdata -RUN apt install --no-install-recommends -y gcc git zip curl htop libgl1-mesa-glx libglib2.0-0 libpython3-dev gnupg \ - # Security updates - # https://security.snyk.io/vuln/SNYK-UBUNTU1804-OPENSSL-3314796 - openssl +RUN apt install --no-install-recommends -y gcc git zip curl htop libgl1-mesa-glx libglib2.0-0 libpython3-dev gnupg # RUN alias python=python3 +# Security updates +# https://security.snyk.io/vuln/SNYK-UBUNTU1804-OPENSSL-3314796 +RUN apt upgrade --no-install-recommends -y openssl + # Create working directory RUN rm -rf /usr/src/app && mkdir -p /usr/src/app WORKDIR /usr/src/app From 416a132ceab4d0e2cd4857e8c1e02950c10d80d7 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 11 Feb 2023 03:07:13 +0400 Subject: [PATCH 1673/1976] Update README.md (#10952) * Update README.md Signed-off-by: Glenn Jocher * Update README.zh-CN.md Signed-off-by: Glenn Jocher * Update partner logo hosting * Update partner logo hosting --------- Signed-off-by: Glenn Jocher --- README.md | 11 ++++++----- README.zh-CN.md | 11 ++++++----- 2 files changed, 12 insertions(+), 10 deletions(-) diff --git a/README.md b/README.md index e00ec478a85b..25d12b289d09 100644 --- a/README.md +++ b/README.md @@ -185,16 +185,16 @@ python train.py --data coco.yaml --epochs 300 --weights '' --cfg yolov5n.yaml - | Roboflow | ClearML ⭐ NEW | Comet ⭐ NEW | Neural Magic ⭐ NEW | @@ -452,7 +452,8 @@ We love your input! We want to make contributing to YOLOv5 as easy and transpare - + + ##
License
diff --git a/README.zh-CN.md b/README.zh-CN.md index bd38e8f457be..c581842c6556 100644 --- a/README.zh-CN.md +++ b/README.zh-CN.md @@ -243,16 +243,16 @@ python train.py --data coco.yaml --epochs 300 --weights '' --cfg yolov5n.yaml - | Roboflow | ClearML ⭐ 新 | Comet ⭐ 新 | Neural Magic ⭐ 新 | @@ -436,7 +436,8 @@ python export.py --weights yolov5s-cls.pt resnet50.pt efficientnet_b0.pt --inclu - + + ##
License
From 25c17370dd0bc6f6b42cc29592750cf3334797dd Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 11 Feb 2023 12:45:18 +0400 Subject: [PATCH 1674/1976] Update greetings.yml (#10955) * Update greetings.yml Signed-off-by: Glenn Jocher * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Signed-off-by: Glenn Jocher Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .github/workflows/greetings.yml | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/.github/workflows/greetings.yml b/.github/workflows/greetings.yml index 8d780a23e2b5..42a2463585a8 100644 --- a/.github/workflows/greetings.yml +++ b/.github/workflows/greetings.yml @@ -53,9 +53,13 @@ jobs: If this badge is green, all [YOLOv5 GitHub Actions](https://github.com/ultralytics/yolov5/actions) Continuous Integration (CI) tests are currently passing. CI tests verify correct operation of YOLOv5 [training](https://github.com/ultralytics/yolov5/blob/master/train.py), [validation](https://github.com/ultralytics/yolov5/blob/master/val.py), [inference](https://github.com/ultralytics/yolov5/blob/master/detect.py), [export](https://github.com/ultralytics/yolov5/blob/master/export.py) and [benchmarks](https://github.com/ultralytics/yolov5/blob/master/benchmarks.py) on MacOS, Windows, and Ubuntu every 24 hours and on every commit. - ## YOLOv8 + ## Introducing YOLOv8 🚀 - Ultralytics YOLOv8 🚀 is our new cutting-edge, state-of-the-art (SOTA) model released at [https://github.com/ultralytics/ultralytics](https://github.com/ultralytics/ultralytics). YOLOv8 is designed to be fast, accurate, and easy to use, making it an excellent choice for a wide range of object detection, image segmentation and image classification tasks. See the [YOLOv8 Docs] for details and get started with: + We're excited to announce the launch of our latest state-of-the-art (SOTA) object detection model for 2023 - [YOLOv8](https://github.com/ultralytics/ultralytics) 🚀! + + Designed to be fast, accurate, and easy to use, YOLOv8 is an ideal choice for a wide range of object detection, image segmentation and image classification tasks. With YOLOv8, you'll be able to quickly and accurately detect objects in real-time, streamline your workflows, and achieve new levels of accuracy in your projects. + + Check out our [YOLOv8 Docs](https://docs.ultralytics.com/) for details and get started with: ```bash pip install ultralytics ``` From fa4bdbe14d33b3aa74e2eac5bdb940cc4b337198 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 11 Feb 2023 14:24:49 +0400 Subject: [PATCH 1675/1976] Update README.zh-CN.md (#10956) * Update README.zh-CN.md Signed-off-by: Glenn Jocher * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update README.md Signed-off-by: Glenn Jocher * Update README.zh-CN.md Signed-off-by: Glenn Jocher * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Signed-off-by: Glenn Jocher Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- README.md | 2 +- README.zh-CN.md | 181 +++++++++++++++++++++++++----------------------- 2 files changed, 97 insertions(+), 86 deletions(-) diff --git a/README.md b/README.md index 25d12b289d09..3a0e2fe1a188 100644 --- a/README.md +++ b/README.md @@ -1,7 +1,7 @@

- +

[English](README.md) | [简体中文](README.zh-CN.md) diff --git a/README.zh-CN.md b/README.zh-CN.md index c581842c6556..c25dc0c3326a 100644 --- a/README.zh-CN.md +++ b/README.zh-CN.md @@ -1,7 +1,7 @@

- +

[英文](README.md)|[简体中文](README.zh-CN.md)
@@ -45,87 +45,23 @@ YOLOv5 🚀 是世界上最受欢迎的视觉 AI,代表实例分割模型 ⭐ 新
- - - -我们新的 YOLOv5 [release v7.0](https://github.com/ultralytics/yolov5/releases/v7.0) 实例分割模型是世界上最快和最准确的模型,击败所有当前 [SOTA 基准](https://paperswithcode.com/sota/real-time-instance-segmentation-on-mscoco)。我们使它非常易于训练、验证和部署。更多细节请查看 [发行说明](https://github.com/ultralytics/yolov5/releases/v7.0) 或访问我们的 [YOLOv5 分割 Colab 笔记本](https://github.com/ultralytics/yolov5/blob/master/segment/tutorial.ipynb) 以快速入门。 - -
- 实例分割模型列表 - -
- -我们使用 A100 GPU 在 COCO 上以 640 图像大小训练了 300 epochs 得到 YOLOv5 分割模型。我们将所有模型导出到 ONNX FP32 以进行 CPU 速度测试,并导出到 TensorRT FP16 以进行 GPU 速度测试。为了便于再现,我们在 Google [Colab Pro](https://colab.research.google.com/signup) 上进行了所有速度测试。 - -| 模型 | 尺寸
(像素) | mAPbox
50-95 | mAPmask
50-95 | 训练时长
300 epochs
A100 GPU(小时) | 推理速度
ONNX CPU
(ms) | 推理速度
TRT A100
(ms) | 参数量
(M) | FLOPs
@640 (B) | -| ------------------------------------------------------------------------------------------ | --------------- | -------------------- | --------------------- | --------------------------------------- | ----------------------------- | ----------------------------- | --------------- | ---------------------- | -| [YOLOv5n-seg](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5n-seg.pt) | 640 | 27.6 | 23.4 | 80:17 | **62.7** | **1.2** | **2.0** | **7.1** | -| [YOLOv5s-seg](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5s-seg.pt) | 640 | 37.6 | 31.7 | 88:16 | 173.3 | 1.4 | 7.6 | 26.4 | -| [YOLOv5m-seg](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5m-seg.pt) | 640 | 45.0 | 37.1 | 108:36 | 427.0 | 2.2 | 22.0 | 70.8 | -| [YOLOv5l-seg](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5l-seg.pt) | 640 | 49.0 | 39.9 | 66:43 (2x) | 857.4 | 2.9 | 47.9 | 147.7 | -| [YOLOv5x-seg](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5x-seg.pt) | 640 | **50.7** | **41.4** | 62:56 (3x) | 1579.2 | 4.5 | 88.8 | 265.7 | - -- 所有模型使用 SGD 优化器训练, 都使用 `lr0=0.01` 和 `weight_decay=5e-5` 参数, 图像大小为 640 。
训练 log 可以查看 https://wandb.ai/glenn-jocher/YOLOv5_v70_official -- **准确性**结果都在 COCO 数据集上,使用单模型单尺度测试得到。
复现命令 `python segment/val.py --data coco.yaml --weights yolov5s-seg.pt` -- **推理速度**是使用 100 张图像推理时间进行平均得到,测试环境使用 [Colab Pro](https://colab.research.google.com/signup) 上 A100 高 RAM 实例。结果仅表示推理速度(NMS 每张图像增加约 1 毫秒)。
复现命令 `python segment/val.py --data coco.yaml --weights yolov5s-seg.pt --batch 1` -- **模型转换**到 FP32 的 ONNX 和 FP16 的 TensorRT 脚本为 `export.py`.
运行命令 `python export.py --weights yolov5s-seg.pt --include engine --device 0 --half` - -
- -
- 分割模型使用示例  Open In Colab - -### 训练 - -YOLOv5分割训练支持自动下载 COCO128-seg 分割数据集,用户仅需在启动指令中包含 `--data coco128-seg.yaml` 参数。 若要手动下载,使用命令 `bash data/scripts/get_coco.sh --train --val --segments`, 在下载完毕后,使用命令 `python train.py --data coco.yaml` 开启训练。 - -```bash -# 单 GPU -python segment/train.py --data coco128-seg.yaml --weights yolov5s-seg.pt --img 640 +##
YOLOv8 🚀 NEW
-# 多 GPU, DDP 模式 -python -m torch.distributed.run --nproc_per_node 4 --master_port 1 segment/train.py --data coco128-seg.yaml --weights yolov5s-seg.pt --img 640 --device 0,1,2,3 -``` +We are thrilled to announce the launch of Ultralytics YOLOv8 🚀, our NEW cutting-edge, state-of-the-art (SOTA) model +released at **[https://github.com/ultralytics/ultralytics](https://github.com/ultralytics/ultralytics)**. +YOLOv8 is designed to be fast, accurate, and easy to use, making it an excellent choice for a wide range of +object detection, image segmentation and image classification tasks. -### 验证 - -在 COCO 数据集上验证 YOLOv5s-seg mask mAP: +See the [YOLOv8 Docs](https://docs.ultralytics.com) for details and get started with: -```bash -bash data/scripts/get_coco.sh --val --segments # 下载 COCO val segments 数据集 (780MB, 5000 images) -python segment/val.py --weights yolov5s-seg.pt --data coco.yaml --img 640 # 验证 +```commandline +pip install ultralytics ``` -### 预测 - -使用预训练的 YOLOv5m-seg.pt 来预测 bus.jpg: - -```bash -python segment/predict.py --weights yolov5m-seg.pt --data data/images/bus.jpg -``` - -```python -model = torch.hub.load( - "ultralytics/yolov5", "custom", "yolov5m-seg.pt" -) # 从load from PyTorch Hub 加载模型 (WARNING: 推理暂未支持) -``` - -| ![zidane](https://user-images.githubusercontent.com/26833433/203113421-decef4c4-183d-4a0a-a6c2-6435b33bc5d3.jpg) | ![bus](https://user-images.githubusercontent.com/26833433/203113416-11fe0025-69f7-4874-a0a6-65d0bfe2999a.jpg) | -| ---------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------- | - -### 模型导出 - -将 YOLOv5s-seg 模型导出到 ONNX 和 TensorRT: - -```bash -python export.py --weights yolov5s-seg.pt --include onnx engine --img 640 --device 0 -``` - -
+
+ + +
##
文档
@@ -312,6 +248,88 @@ YOLOv5 超级容易上手,简单易学。我们优先考虑现实世界的结
+##
实例分割模型 ⭐ 新
+ +我们新的 YOLOv5 [release v7.0](https://github.com/ultralytics/yolov5/releases/v7.0) 实例分割模型是世界上最快和最准确的模型,击败所有当前 [SOTA 基准](https://paperswithcode.com/sota/real-time-instance-segmentation-on-mscoco)。我们使它非常易于训练、验证和部署。更多细节请查看 [发行说明](https://github.com/ultralytics/yolov5/releases/v7.0) 或访问我们的 [YOLOv5 分割 Colab 笔记本](https://github.com/ultralytics/yolov5/blob/master/segment/tutorial.ipynb) 以快速入门。 + +
+ 实例分割模型列表 + +
+ +
+ + +
+ +我们使用 A100 GPU 在 COCO 上以 640 图像大小训练了 300 epochs 得到 YOLOv5 分割模型。我们将所有模型导出到 ONNX FP32 以进行 CPU 速度测试,并导出到 TensorRT FP16 以进行 GPU 速度测试。为了便于再现,我们在 Google [Colab Pro](https://colab.research.google.com/signup) 上进行了所有速度测试。 + +| 模型 | 尺寸
(像素) | mAPbox
50-95 | mAPmask
50-95 | 训练时长
300 epochs
A100 GPU(小时) | 推理速度
ONNX CPU
(ms) | 推理速度
TRT A100
(ms) | 参数量
(M) | FLOPs
@640 (B) | +| ------------------------------------------------------------------------------------------ | --------------- | -------------------- | --------------------- | --------------------------------------- | ----------------------------- | ----------------------------- | --------------- | ---------------------- | +| [YOLOv5n-seg](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5n-seg.pt) | 640 | 27.6 | 23.4 | 80:17 | **62.7** | **1.2** | **2.0** | **7.1** | +| [YOLOv5s-seg](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5s-seg.pt) | 640 | 37.6 | 31.7 | 88:16 | 173.3 | 1.4 | 7.6 | 26.4 | +| [YOLOv5m-seg](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5m-seg.pt) | 640 | 45.0 | 37.1 | 108:36 | 427.0 | 2.2 | 22.0 | 70.8 | +| [YOLOv5l-seg](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5l-seg.pt) | 640 | 49.0 | 39.9 | 66:43 (2x) | 857.4 | 2.9 | 47.9 | 147.7 | +| [YOLOv5x-seg](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5x-seg.pt) | 640 | **50.7** | **41.4** | 62:56 (3x) | 1579.2 | 4.5 | 88.8 | 265.7 | + +- 所有模型使用 SGD 优化器训练, 都使用 `lr0=0.01` 和 `weight_decay=5e-5` 参数, 图像大小为 640 。
训练 log 可以查看 https://wandb.ai/glenn-jocher/YOLOv5_v70_official +- **准确性**结果都在 COCO 数据集上,使用单模型单尺度测试得到。
复现命令 `python segment/val.py --data coco.yaml --weights yolov5s-seg.pt` +- **推理速度**是使用 100 张图像推理时间进行平均得到,测试环境使用 [Colab Pro](https://colab.research.google.com/signup) 上 A100 高 RAM 实例。结果仅表示推理速度(NMS 每张图像增加约 1 毫秒)。
复现命令 `python segment/val.py --data coco.yaml --weights yolov5s-seg.pt --batch 1` +- **模型转换**到 FP32 的 ONNX 和 FP16 的 TensorRT 脚本为 `export.py`.
运行命令 `python export.py --weights yolov5s-seg.pt --include engine --device 0 --half` + +
+ +
+ 分割模型使用示例  Open In Colab + +### 训练 + +YOLOv5分割训练支持自动下载 COCO128-seg 分割数据集,用户仅需在启动指令中包含 `--data coco128-seg.yaml` 参数。 若要手动下载,使用命令 `bash data/scripts/get_coco.sh --train --val --segments`, 在下载完毕后,使用命令 `python train.py --data coco.yaml` 开启训练。 + +```bash +# 单 GPU +python segment/train.py --data coco128-seg.yaml --weights yolov5s-seg.pt --img 640 + +# 多 GPU, DDP 模式 +python -m torch.distributed.run --nproc_per_node 4 --master_port 1 segment/train.py --data coco128-seg.yaml --weights yolov5s-seg.pt --img 640 --device 0,1,2,3 +``` + +### 验证 + +在 COCO 数据集上验证 YOLOv5s-seg mask mAP: + +```bash +bash data/scripts/get_coco.sh --val --segments # 下载 COCO val segments 数据集 (780MB, 5000 images) +python segment/val.py --weights yolov5s-seg.pt --data coco.yaml --img 640 # 验证 +``` + +### 预测 + +使用预训练的 YOLOv5m-seg.pt 来预测 bus.jpg: + +```bash +python segment/predict.py --weights yolov5m-seg.pt --data data/images/bus.jpg +``` + +```python +model = torch.hub.load( + "ultralytics/yolov5", "custom", "yolov5m-seg.pt" +) # 从load from PyTorch Hub 加载模型 (WARNING: 推理暂未支持) +``` + +| ![zidane](https://user-images.githubusercontent.com/26833433/203113421-decef4c4-183d-4a0a-a6c2-6435b33bc5d3.jpg) | ![bus](https://user-images.githubusercontent.com/26833433/203113416-11fe0025-69f7-4874-a0a6-65d0bfe2999a.jpg) | +| ---------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------- | + +### 模型导出 + +将 YOLOv5s-seg 模型导出到 ONNX 和 TensorRT: + +```bash +python export.py --weights yolov5s-seg.pt --include onnx engine --img 640 --device 0 +``` + +
+ ##
分类网络 ⭐ 新
YOLOv5 [release v6.2](https://github.com/ultralytics/yolov5/releases) 带来对分类模型训练、验证和部署的支持!详情请查看 [发行说明](https://github.com/ultralytics/yolov5/releases/v6.2) 或访问我们的 [YOLOv5 分类 Colab 笔记本](https://github.com/ultralytics/yolov5/blob/master/classify/tutorial.ipynb) 以快速入门。 @@ -423,13 +441,6 @@ python export.py --weights yolov5s-cls.pt resnet50.pt efficientnet_b0.pt --inclu
-##
APP
- -通过下载 [Ultralytics APP](https://ultralytics.com/app_install) ,以在您的 iOS 或 Android 设备上运行 YOLOv5 模型! - - -Ultralytics mobile app - ##
贡献
我们喜欢您的意见或建议!我们希望尽可能简单和透明地为 YOLOv5 做出贡献。请看我们的 [投稿指南](CONTRIBUTING.md),并填写 [YOLOv5调查](https://ultralytics.com/survey?utm_source=github&utm_medium=social&utm_campaign=Survey) 向我们发送您的体验反馈。感谢我们所有的贡献者! @@ -448,7 +459,7 @@ YOLOv5 在两种不同的 License 下可用: ##
联系我们
-若发现 YOLOv5 的 bug 或有功能需求,请访问 [GitHub 问题](https://github.com/ultralytics/yolov5/issues) 。如需专业支持,请 [联系我们](https://ultralytics.com/contact) 。 +请访问 [GitHub Issues](https://github.com/ultralytics/yolov5/issues) 或 [Ultralytics Community Forum](https://community.ultralytis.com) 以报告 YOLOv5 错误和请求功能。
From 1a2eb532cec4b5f0a4b295554b3c73ae9f7fff3a Mon Sep 17 00:00:00 2001 From: Aarni Koskela Date: Mon, 13 Feb 2023 15:38:27 +0200 Subject: [PATCH 1676/1976] Fix return value check for subprocess.run (#10972) Subprocess.run does not return an integer. Regressed in #10944 --- utils/general.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/utils/general.py b/utils/general.py index 4e5c7147fd40..01f0a3bddc7d 100644 --- a/utils/general.py +++ b/utils/general.py @@ -631,8 +631,8 @@ def download_one(url, dir): for i in range(retry + 1): if curl: s = 'sS' if threads > 1 else '' # silent - r = subprocess.run(f'curl -# -{s}L "{url}" -o "{f}" --retry 9 -C -'.split()) - success = r == 0 + proc = subprocess.run(f'curl -# -{s}L "{url}" -o "{f}" --retry 9 -C -'.split()) + success = proc.returncode == 0 else: torch.hub.download_url_to_file(url, f, progress=threads == 1) # torch download success = f.is_file() From a2de5c5bf61f1165ffeb4af51dc5b24e8d04bff6 Mon Sep 17 00:00:00 2001 From: Aarni Koskela Date: Mon, 13 Feb 2023 16:00:31 +0200 Subject: [PATCH 1677/1976] Subprocess improvements (#10973) * Use list-form arguments for subprocess.run calls where possible Augments #10944 * Deduplicate curl code * Avoid eval() to parse integer --------- Signed-off-by: Glenn Jocher Co-authored-by: Glenn Jocher --- classify/train.py | 2 +- export.py | 35 ++++++++++++++++++++++++++--------- segment/train.py | 8 ++++++-- segment/val.py | 2 +- train.py | 8 ++++++-- utils/downloads.py | 29 +++++++++++++++++++++++++---- utils/general.py | 6 ++---- val.py | 2 +- 8 files changed, 68 insertions(+), 24 deletions(-) diff --git a/classify/train.py b/classify/train.py index 4767be77bd61..8ae2fdd52828 100644 --- a/classify/train.py +++ b/classify/train.py @@ -78,7 +78,7 @@ def train(opt, device): LOGGER.info(f'\nDataset not found ⚠️, missing path {data_dir}, attempting download...') t = time.time() if str(data) == 'imagenet': - subprocess.run(f"bash {ROOT / 'data/scripts/get_imagenet.sh'}", shell=True, check=True) + subprocess.run(["bash", str(ROOT / 'data/scripts/get_imagenet.sh')], shell=True, check=True) else: url = f'https://github.com/ultralytics/yolov5/releases/download/v1.0/{data}.zip' download(url, dir=data_dir.parent) diff --git a/export.py b/export.py index 1bf0532dde34..2c9fb77d17be 100644 --- a/export.py +++ b/export.py @@ -194,8 +194,15 @@ def export_openvino(file, metadata, half, prefix=colorstr('OpenVINO:')): LOGGER.info(f'\n{prefix} starting export with openvino {ie.__version__}...') f = str(file).replace('.pt', f'_openvino_model{os.sep}') - cmd = f"mo --input_model {file.with_suffix('.onnx')} --output_dir {f} --data_type {'FP16' if half else 'FP32'}" - subprocess.run(cmd.split(), check=True, env=os.environ) # export + args = [ + "mo", + "--input_model", + str(file.with_suffix('.onnx')), + "--output_dir", + f, + "--data_type", + ("FP16" if half else "FP32"),] + subprocess.run(args, check=True, env=os.environ) # export yaml_save(Path(f) / file.with_suffix('.yaml').name, metadata) # add metadata.yaml return f, None @@ -420,8 +427,15 @@ def export_edgetpu(file, prefix=colorstr('Edge TPU:')): f = str(file).replace('.pt', '-int8_edgetpu.tflite') # Edge TPU model f_tfl = str(file).replace('.pt', '-int8.tflite') # TFLite model - cmd = f"edgetpu_compiler -s -d -k 10 --out_dir {file.parent} {f_tfl}" - subprocess.run(cmd.split(), check=True) + subprocess.run([ + 'edgetpu_compiler', + '-s', + '-d', + '-k', + '10', + '--out_dir', + str(file.parent), + f_tfl,], check=True) return f, None @@ -436,11 +450,14 @@ def export_tfjs(file, int8, prefix=colorstr('TensorFlow.js:')): f_pb = file.with_suffix('.pb') # *.pb path f_json = f'{f}/model.json' # *.json path - int8_export = ' --quantize_uint8 ' if int8 else '' - - cmd = f'tensorflowjs_converter --input_format=tf_frozen_model {int8_export}' \ - f'--output_node_names=Identity,Identity_1,Identity_2,Identity_3 {f_pb} {f}' - subprocess.run(cmd.split()) + args = [ + 'tensorflowjs_converter', + '--input_format=tf_frozen_model', + '--quantize_uint8' if int8 else '', + '--output_node_names=Identity,Identity_1,Identity_2,Identity_3', + str(f_pb), + str(f),] + subprocess.run([arg for arg in args if arg], check=True) json = Path(f_json).read_text() with open(f_json, 'w') as j: # sort JSON Identity_* in ascending order diff --git a/segment/train.py b/segment/train.py index 883c8b0a2b62..4914f9613a3d 100644 --- a/segment/train.py +++ b/segment/train.py @@ -598,8 +598,12 @@ def main(opt, callbacks=Callbacks()): # ei = [isinstance(x, (int, float)) for x in hyp.values()] # evolvable indices evolve_yaml, evolve_csv = save_dir / 'hyp_evolve.yaml', save_dir / 'evolve.csv' if opt.bucket: - subprocess.run( - f'gsutil cp gs://{opt.bucket}/evolve.csv {evolve_csv}'.split()) # download evolve.csv if exists + # download evolve.csv if exists + subprocess.run([ + 'gsutil', + 'cp', + f'gs://{opt.bucket}/evolve.csv', + str(evolve_csv),]) for _ in range(opt.evolve): # generations to evolve if evolve_csv.exists(): # if evolve.csv exists: select best hyps and mutate diff --git a/segment/val.py b/segment/val.py index 8168b5407c1d..665b540a5490 100644 --- a/segment/val.py +++ b/segment/val.py @@ -462,7 +462,7 @@ def main(opt): r, _, t = run(**vars(opt), plots=False) y.append(r + t) # results and times np.savetxt(f, y, fmt='%10.4g') # save - subprocess.run('zip -r study.zip study_*.txt'.split()) + subprocess.run(['zip', '-r', 'study.zip', 'study_*.txt']) plot_val_study(x=x) # plot else: raise NotImplementedError(f'--task {opt.task} not in ("train", "val", "test", "speed", "study")') diff --git a/train.py b/train.py index db65f2c74c6c..ccda0a7fe2e3 100644 --- a/train.py +++ b/train.py @@ -572,8 +572,12 @@ def main(opt, callbacks=Callbacks()): # ei = [isinstance(x, (int, float)) for x in hyp.values()] # evolvable indices evolve_yaml, evolve_csv = save_dir / 'hyp_evolve.yaml', save_dir / 'evolve.csv' if opt.bucket: - subprocess.run( - f'gsutil cp gs://{opt.bucket}/evolve.csv {evolve_csv}'.split()) # download evolve.csv if exists + # download evolve.csv if exists + subprocess.run([ + 'gsutil', + 'cp', + f'gs://{opt.bucket}/evolve.csv', + str(evolve_csv),]) for _ in range(opt.evolve): # generations to evolve if evolve_csv.exists(): # if evolve.csv exists: select best hyps and mutate diff --git a/utils/downloads.py b/utils/downloads.py index a3ff9274066e..2610f3c66aac 100644 --- a/utils/downloads.py +++ b/utils/downloads.py @@ -26,8 +26,10 @@ def is_url(url, check=True): def gsutil_getsize(url=''): # gs://bucket/file size https://cloud.google.com/storage/docs/gsutil/commands/du - s = subprocess.check_output(f'gsutil du {url}', shell=True).decode('utf-8') - return eval(s.split(' ')[0]) if len(s) else 0 # bytes + output = subprocess.check_output(['gsutil', 'du', url], shell=True, encoding='utf-8') + if output: + return int(output.split()[0]) + return 0 def url_getsize(url='https://ultralytics.com/images/bus.jpg'): @@ -36,6 +38,25 @@ def url_getsize(url='https://ultralytics.com/images/bus.jpg'): return int(response.headers.get('content-length', -1)) +def curl_download(url, filename, *, silent: bool = False) -> bool: + """ + Download a file from a url to a filename using curl. + """ + silent_option = 'sS' if silent else '' # silent + proc = subprocess.run([ + 'curl', + '-#', + f'-{silent_option}L', + url, + '--output', + filename, + '--retry', + '9', + '-C', + '-',]) + return proc.returncode == 0 + + def safe_download(file, url, url2=None, min_bytes=1E0, error_msg=''): # Attempts to download file from url or url2, checks and removes incomplete downloads < min_bytes from utils.general import LOGGER @@ -50,8 +71,8 @@ def safe_download(file, url, url2=None, min_bytes=1E0, error_msg=''): if file.exists(): file.unlink() # remove partial downloads LOGGER.info(f'ERROR: {e}\nRe-attempting {url2 or url} to {file}...') - subprocess.run( - f"curl -# -L '{url2 or url}' -o '{file}' --retry 3 -C -".split()) # curl download, retry and resume on fail + # curl download, retry and resume on fail + curl_download(url2 or url, file) finally: if not file.exists() or file.stat().st_size < min_bytes: # check if file.exists(): diff --git a/utils/general.py b/utils/general.py index 01f0a3bddc7d..a6af4f3216dd 100644 --- a/utils/general.py +++ b/utils/general.py @@ -38,7 +38,7 @@ import yaml from utils import TryExcept, emojis -from utils.downloads import gsutil_getsize +from utils.downloads import gsutil_getsize, curl_download from utils.metrics import box_iou, fitness FILE = Path(__file__).resolve() @@ -630,9 +630,7 @@ def download_one(url, dir): LOGGER.info(f'Downloading {url} to {f}...') for i in range(retry + 1): if curl: - s = 'sS' if threads > 1 else '' # silent - proc = subprocess.run(f'curl -# -{s}L "{url}" -o "{f}" --retry 9 -C -'.split()) - success = proc.returncode == 0 + success = curl_download(url, f, silent=(threads > 1)) else: torch.hub.download_url_to_file(url, f, progress=threads == 1) # torch download success = f.is_file() diff --git a/val.py b/val.py index 62fa2c980988..7829afb68b79 100644 --- a/val.py +++ b/val.py @@ -398,7 +398,7 @@ def main(opt): r, _, t = run(**vars(opt), plots=False) y.append(r + t) # results and times np.savetxt(f, y, fmt='%10.4g') # save - subprocess.run('zip -r study.zip study_*.txt'.split()) + subprocess.run(['zip', '-r', 'study.zip', 'study_*.txt']) plot_val_study(x=x) # plot else: raise NotImplementedError(f'--task {opt.task} not in ("train", "val", "test", "speed", "study")') From 4d28fec3b8b663fa8225634ca8eeb4446505527e Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 13 Feb 2023 20:27:22 +0400 Subject: [PATCH 1678/1976] Update README.md (#10975) @pderrenger YOLOv5 HUB copy updates Signed-off-by: Glenn Jocher --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 3a0e2fe1a188..2a28ea11490a 100644 --- a/README.md +++ b/README.md @@ -203,7 +203,7 @@ python train.py --data coco.yaml --epochs 300 --weights '' --cfg yolov5n.yaml - ##
Ultralytics HUB
-[Ultralytics HUB](https://bit.ly/ultralytics_hub) is our ⭐ **NEW** no-code solution to visualize datasets, train YOLOv5 🚀 models, and deploy to the real world in a seamless experience. Get started for **Free** now! +Experience seamless AI with [Ultralytics HUB](https://bit.ly/ultralytics_hub) ⭐, the all-in-one solution for data visualization, YOLOv5 🚀 model training and deployment, without any coding. Transform images into actionable insights and bring your AI visions to life with ease using our cutting-edge platform and user-friendly [Ultralytics App](https://ultralytics.com/app_install). Start your journey for **Free** now!
From e7b60999ad88a40bfb84c539bed1e6ec11249af2 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 15 Feb 2023 20:28:20 +0400 Subject: [PATCH 1679/1976] Fix Comet link (#10990) @DN6 fixes YOLOv5 Comet link we chatted about Signed-off-by: Glenn Jocher --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 2a28ea11490a..16dfd9fca085 100644 --- a/README.md +++ b/README.md @@ -190,7 +190,7 @@ python train.py --data coco.yaml --epochs 300 --weights '' --cfg yolov5n.yaml - - + From e4d836080f68dd14ae9becaa7b50c510ac1db54f Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 15 Feb 2023 20:31:41 +0400 Subject: [PATCH 1680/1976] Update README.zh-CN.md (#10991) Signed-off-by: Glenn Jocher --- README.zh-CN.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.zh-CN.md b/README.zh-CN.md index c25dc0c3326a..17c046c8d98d 100644 --- a/README.zh-CN.md +++ b/README.zh-CN.md @@ -184,7 +184,7 @@ python train.py --data coco.yaml --epochs 300 --weights '' --cfg yolov5n.yaml - - + @@ -193,7 +193,7 @@ python train.py --data coco.yaml --epochs 300 --weights '' --cfg yolov5n.yaml - | Roboflow | ClearML ⭐ 新 | Comet ⭐ 新 | Neural Magic ⭐ 新 | | :--------------------------------------------------------------------------------: | :-------------------------------------------------------------------------: | :-------------------------------------------------------------------------------: | :------------------------------------------------------------------------------------: | -| 将您的自定义数据集进行标注并直接导出到 YOLOv5 以进行训练 [Roboflow](https://roboflow.com/?ref=ultralytics) | 自动跟踪、可视化甚至远程训练 YOLOv5 [ClearML](https://cutt.ly/yolov5-readme-clearml)(开源!) | 永远免费,[Comet](https://bit.ly/yolov5-readme-comet)可让您保存 YOLOv5 模型、恢复训练以及交互式可视化和调试预测 | 使用 [Neural Magic DeepSparse](https://bit.ly/yolov5-neuralmagic),运行 YOLOv5 推理的速度最高可提高6倍 | +| 将您的自定义数据集进行标注并直接导出到 YOLOv5 以进行训练 [Roboflow](https://roboflow.com/?ref=ultralytics) | 自动跟踪、可视化甚至远程训练 YOLOv5 [ClearML](https://cutt.ly/yolov5-readme-clearml)(开源!) | 永远免费,[Comet](https://bit.ly/yolov5-readme-comet2)可让您保存 YOLOv5 模型、恢复训练以及交互式可视化和调试预测 | 使用 [Neural Magic DeepSparse](https://bit.ly/yolov5-neuralmagic),运行 YOLOv5 推理的速度最高可提高6倍 | ##
Ultralytics HUB
From 4dd1caaf9af97ca56d7938a4baf3be8d0ea0a3ce Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 16 Feb 2023 21:07:55 +0400 Subject: [PATCH 1681/1976] Update README.md (#10992) * Update README.md Signed-off-by: Glenn Jocher * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Signed-off-by: Glenn Jocher Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- README.zh-CN.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.zh-CN.md b/README.zh-CN.md index 17c046c8d98d..800a670cfb4f 100644 --- a/README.zh-CN.md +++ b/README.zh-CN.md @@ -191,8 +191,8 @@ python train.py --data coco.yaml --epochs 300 --weights '' --cfg yolov5n.yaml -
-| Roboflow | ClearML ⭐ 新 | Comet ⭐ 新 | Neural Magic ⭐ 新 | -| :--------------------------------------------------------------------------------: | :-------------------------------------------------------------------------: | :-------------------------------------------------------------------------------: | :------------------------------------------------------------------------------------: | +| Roboflow | ClearML ⭐ 新 | Comet ⭐ 新 | Neural Magic ⭐ 新 | +| :--------------------------------------------------------------------------------: | :-------------------------------------------------------------------------: | :--------------------------------------------------------------------------------: | :------------------------------------------------------------------------------------: | | 将您的自定义数据集进行标注并直接导出到 YOLOv5 以进行训练 [Roboflow](https://roboflow.com/?ref=ultralytics) | 自动跟踪、可视化甚至远程训练 YOLOv5 [ClearML](https://cutt.ly/yolov5-readme-clearml)(开源!) | 永远免费,[Comet](https://bit.ly/yolov5-readme-comet2)可让您保存 YOLOv5 模型、恢复训练以及交互式可视化和调试预测 | 使用 [Neural Magic DeepSparse](https://bit.ly/yolov5-neuralmagic),运行 YOLOv5 推理的速度最高可提高6倍 | ##
Ultralytics HUB
From 6d283ec167b60a0160eb275323a9b13b563ff804 Mon Sep 17 00:00:00 2001 From: Snyk bot Date: Thu, 16 Feb 2023 17:08:13 +0000 Subject: [PATCH 1682/1976] [Snyk] Security upgrade werkzeug from 1.0.1 to 2.2.3 (#10995) * fix: utils/google_app_engine/additional_requirements.txt to reduce vulnerabilities The following vulnerabilities are fixed by pinning transitive dependencies: - https://snyk.io/vuln/SNYK-PYTHON-WERKZEUG-3319935 - https://snyk.io/vuln/SNYK-PYTHON-WERKZEUG-3319936 * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Glenn Jocher --- utils/google_app_engine/additional_requirements.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/utils/google_app_engine/additional_requirements.txt b/utils/google_app_engine/additional_requirements.txt index b6b496feaa7b..d5b76758c876 100644 --- a/utils/google_app_engine/additional_requirements.txt +++ b/utils/google_app_engine/additional_requirements.txt @@ -2,3 +2,4 @@ pip==21.1 Flask==1.0.2 gunicorn==19.10.0 +werkzeug>=2.2.3 # not directly required, pinned by Snyk to avoid a vulnerability From 226a5e43cbceff5de43a71c4fb3f3f7478a9bb03 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 16 Feb 2023 23:48:42 +0400 Subject: [PATCH 1683/1976] Update ci-testing.yml benchmarks to Python 3.10 (#10997) Signed-off-by: Glenn Jocher --- .github/workflows/ci-testing.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci-testing.yml b/.github/workflows/ci-testing.yml index f31bb6e6ce3c..f9c62d623042 100644 --- a/.github/workflows/ci-testing.yml +++ b/.github/workflows/ci-testing.yml @@ -18,7 +18,7 @@ jobs: fail-fast: false matrix: os: [ ubuntu-latest ] - python-version: [ '3.9' ] # requires python<=3.9 + python-version: [ '3.10' ] # requires python<=3.10 model: [ yolov5n ] steps: - uses: actions/checkout@v3 From 34e1bc8ee3cabc809bb3302b0cc6de4f6dcce10e Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 17 Feb 2023 13:53:45 +0400 Subject: [PATCH 1684/1976] Update downloads.py (#11005) * Update downloads.py Signed-off-by: Glenn Jocher * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Signed-off-by: Glenn Jocher Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- utils/downloads.py | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/utils/downloads.py b/utils/downloads.py index 2610f3c66aac..e739919540b4 100644 --- a/utils/downloads.py +++ b/utils/downloads.py @@ -120,11 +120,9 @@ def github_assets(repository, version='latest'): file.parent.mkdir(parents=True, exist_ok=True) # make parent dir (if required) if name in assets: - url3 = 'https://drive.google.com/drive/folders/1EFQTEUeXWSFww0luse2jB9M1QNZQGwNl' # backup gdrive mirror - safe_download( - file, - url=f'https://github.com/{repo}/releases/download/{tag}/{name}', - min_bytes=1E5, - error_msg=f'{file} missing, try downloading from https://github.com/{repo}/releases/{tag} or {url3}') + safe_download(file, + url=f'https://github.com/{repo}/releases/download/{tag}/{name}', + min_bytes=1E5, + error_msg=f'{file} missing, try downloading from https://github.com/{repo}/releases/{tag}') return str(file) From 7a972e86c4e5009830d5e6faacadfe6e1ed2efff Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 18 Feb 2023 01:06:24 +0100 Subject: [PATCH 1685/1976] Update .pre-commit-config.yaml (#11009) * Update .pre-commit-config.yaml Signed-off-by: Glenn Jocher * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update __init__.py Signed-off-by: Glenn Jocher * Update .pre-commit-config.yaml Signed-off-by: Glenn Jocher * Pre-commit updates * Pre-commit updates --------- Signed-off-by: Glenn Jocher Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 35 ++--- benchmarks.py | 2 +- classify/predict.py | 4 +- classify/train.py | 26 ++-- classify/tutorial.ipynb | 2 +- classify/val.py | 8 +- detect.py | 2 +- export.py | 26 ++-- models/common.py | 16 +- models/segment/yolov5m-seg.yaml | 2 +- models/segment/yolov5s-seg.yaml | 2 +- models/tf.py | 12 +- segment/predict.py | 2 +- segment/train.py | 14 +- segment/tutorial.ipynb | 2 +- segment/val.py | 16 +- train.py | 6 +- tutorial.ipynb | 2 +- utils/__init__.py | 2 +- utils/dataloaders.py | 34 ++--- utils/downloads.py | 2 +- utils/flask_rest_api/example_request.py | 8 +- utils/flask_rest_api/restapi.py | 22 +-- utils/general.py | 48 +++--- utils/loggers/__init__.py | 16 +- utils/loggers/clearml/clearml_utils.py | 6 +- utils/loggers/comet/__init__.py | 192 ++++++++++++------------ utils/loggers/comet/comet_utils.py | 42 +++--- utils/loggers/comet/hpo.py | 32 ++-- utils/loggers/wandb/wandb_utils.py | 10 +- utils/metrics.py | 10 +- utils/plots.py | 2 +- utils/segment/dataloaders.py | 32 ++-- utils/segment/loss.py | 12 +- utils/segment/metrics.py | 90 +++++------ utils/segment/plots.py | 20 +-- utils/torch_utils.py | 4 +- utils/triton.py | 14 +- val.py | 4 +- 39 files changed, 389 insertions(+), 392 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index b188048e63a6..c5162378ab81 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,8 +1,5 @@ -# Define hooks for code formations -# Will be applied on any updated commit files if a user has installed and linked commit hook - -default_language_version: - python: python3.8 +# Ultralytics YOLO 🚀, GPL-3.0 license +# Pre-commit hooks. For more information see https://github.com/pre-commit/pre-commit-hooks/blob/main/README.md exclude: 'docs/' # Define bot property if installed via https://github.com/marketplace/pre-commit-ci @@ -16,13 +13,13 @@ repos: - repo: https://github.com/pre-commit/pre-commit-hooks rev: v4.4.0 hooks: - # - id: end-of-file-fixer + - id: end-of-file-fixer - id: trailing-whitespace - id: check-case-conflict - id: check-yaml - - id: check-toml - - id: pretty-format-json - id: check-docstring-first + - id: double-quote-string-fixer + - id: detect-private-key - repo: https://github.com/asottile/pyupgrade rev: v3.3.1 @@ -31,11 +28,11 @@ repos: name: Upgrade code args: [--py37-plus] - # - repo: https://github.com/PyCQA/isort - # rev: 5.11.4 - # hooks: - # - id: isort - # name: Sort imports + - repo: https://github.com/PyCQA/isort + rev: 5.12.0 + hooks: + - id: isort + name: Sort imports - repo: https://github.com/google/yapf rev: v0.32.0 @@ -59,12 +56,12 @@ repos: - id: flake8 name: PEP8 - #- repo: https://github.com/codespell-project/codespell - # rev: v2.2.2 - # hooks: - # - id: codespell - # args: - # - --ignore-words-list=crate,nd + - repo: https://github.com/codespell-project/codespell + rev: v2.2.2 + hooks: + - id: codespell + args: + - --ignore-words-list=crate,nd,strack,dota #- repo: https://github.com/asottile/yesqa # rev: v1.4.0 diff --git a/benchmarks.py b/benchmarks.py index 03d7d693a936..09108b8a7cc4 100644 --- a/benchmarks.py +++ b/benchmarks.py @@ -164,6 +164,6 @@ def main(opt): test(**vars(opt)) if opt.test else run(**vars(opt)) -if __name__ == "__main__": +if __name__ == '__main__': opt = parse_opt() main(opt) diff --git a/classify/predict.py b/classify/predict.py index 5a5edabda42c..5f0d40787b52 100644 --- a/classify/predict.py +++ b/classify/predict.py @@ -179,7 +179,7 @@ def run( vid_writer[i].write(im0) # Print time (inference-only) - LOGGER.info(f"{s}{dt[1].dt * 1E3:.1f}ms") + LOGGER.info(f'{s}{dt[1].dt * 1E3:.1f}ms') # Print results t = tuple(x.t / seen * 1E3 for x in dt) # speeds per image @@ -221,6 +221,6 @@ def main(opt): run(**vars(opt)) -if __name__ == "__main__": +if __name__ == '__main__': opt = parse_opt() main(opt) diff --git a/classify/train.py b/classify/train.py index 8ae2fdd52828..b752a3c1fe32 100644 --- a/classify/train.py +++ b/classify/train.py @@ -78,7 +78,7 @@ def train(opt, device): LOGGER.info(f'\nDataset not found ⚠️, missing path {data_dir}, attempting download...') t = time.time() if str(data) == 'imagenet': - subprocess.run(["bash", str(ROOT / 'data/scripts/get_imagenet.sh')], shell=True, check=True) + subprocess.run(['bash', str(ROOT / 'data/scripts/get_imagenet.sh')], shell=True, check=True) else: url = f'https://github.com/ultralytics/yolov5/releases/download/v1.0/{data}.zip' download(url, dir=data_dir.parent) @@ -220,11 +220,11 @@ def train(opt, device): # Log metrics = { - "train/loss": tloss, - f"{val}/loss": vloss, - "metrics/accuracy_top1": top1, - "metrics/accuracy_top5": top5, - "lr/0": optimizer.param_groups[0]['lr']} # learning rate + 'train/loss': tloss, + f'{val}/loss': vloss, + 'metrics/accuracy_top1': top1, + 'metrics/accuracy_top5': top5, + 'lr/0': optimizer.param_groups[0]['lr']} # learning rate logger.log_metrics(metrics, epoch) # Save model @@ -251,11 +251,11 @@ def train(opt, device): if RANK in {-1, 0} and final_epoch: LOGGER.info(f'\nTraining complete ({(time.time() - t0) / 3600:.3f} hours)' f"\nResults saved to {colorstr('bold', save_dir)}" - f"\nPredict: python classify/predict.py --weights {best} --source im.jpg" - f"\nValidate: python classify/val.py --weights {best} --data {data_dir}" - f"\nExport: python export.py --weights {best} --include onnx" + f'\nPredict: python classify/predict.py --weights {best} --source im.jpg' + f'\nValidate: python classify/val.py --weights {best} --data {data_dir}' + f'\nExport: python export.py --weights {best} --include onnx' f"\nPyTorch Hub: model = torch.hub.load('ultralytics/yolov5', 'custom', '{best}')" - f"\nVisualize: https://netron.app\n") + f'\nVisualize: https://netron.app\n') # Plot examples images, labels = (x[:25] for x in next(iter(testloader))) # first 25 images and labels @@ -263,7 +263,7 @@ def train(opt, device): file = imshow_cls(images, labels, pred, model.names, verbose=False, f=save_dir / 'test_images.jpg') # Log results - meta = {"epochs": epochs, "top1_acc": best_fitness, "date": datetime.now().isoformat()} + meta = {'epochs': epochs, 'top1_acc': best_fitness, 'date': datetime.now().isoformat()} logger.log_images(file, name='Test Examples (true-predicted)', epoch=epoch) logger.log_model(best, epochs, metadata=meta) @@ -310,7 +310,7 @@ def main(opt): assert torch.cuda.device_count() > LOCAL_RANK, 'insufficient CUDA devices for DDP command' torch.cuda.set_device(LOCAL_RANK) device = torch.device('cuda', LOCAL_RANK) - dist.init_process_group(backend="nccl" if dist.is_nccl_available() else "gloo") + dist.init_process_group(backend='nccl' if dist.is_nccl_available() else 'gloo') # Parameters opt.save_dir = increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok) # increment run @@ -328,6 +328,6 @@ def run(**kwargs): return opt -if __name__ == "__main__": +if __name__ == '__main__': opt = parse_opt() main(opt) diff --git a/classify/tutorial.ipynb b/classify/tutorial.ipynb index cc18aa934039..58723608bdbe 100644 --- a/classify/tutorial.ipynb +++ b/classify/tutorial.ipynb @@ -1477,4 +1477,4 @@ }, "nbformat": 4, "nbformat_minor": 0 -} \ No newline at end of file +} diff --git a/classify/val.py b/classify/val.py index 03ba817d5ea2..4edd5a1f5e9e 100644 --- a/classify/val.py +++ b/classify/val.py @@ -100,7 +100,7 @@ def run( pred, targets, loss, dt = [], [], 0, (Profile(), Profile(), Profile()) n = len(dataloader) # number of batches action = 'validating' if dataloader.dataset.root.stem == 'val' else 'testing' - desc = f"{pbar.desc[:-36]}{action:>36}" if pbar else f"{action}" + desc = f'{pbar.desc[:-36]}{action:>36}' if pbar else f'{action}' bar = tqdm(dataloader, desc, n, not training, bar_format=TQDM_BAR_FORMAT, position=0) with torch.cuda.amp.autocast(enabled=device.type != 'cpu'): for images, labels in bar: @@ -123,14 +123,14 @@ def run( top1, top5 = acc.mean(0).tolist() if pbar: - pbar.desc = f"{pbar.desc[:-36]}{loss:>12.3g}{top1:>12.3g}{top5:>12.3g}" + pbar.desc = f'{pbar.desc[:-36]}{loss:>12.3g}{top1:>12.3g}{top5:>12.3g}' if verbose: # all classes LOGGER.info(f"{'Class':>24}{'Images':>12}{'top1_acc':>12}{'top5_acc':>12}") LOGGER.info(f"{'all':>24}{targets.shape[0]:>12}{top1:>12.3g}{top5:>12.3g}") for i, c in model.names.items(): acc_i = acc[targets == i] top1i, top5i = acc_i.mean(0).tolist() - LOGGER.info(f"{c:>24}{acc_i.shape[0]:>12}{top1i:>12.3g}{top5i:>12.3g}") + LOGGER.info(f'{c:>24}{acc_i.shape[0]:>12}{top1i:>12.3g}{top5i:>12.3g}') # Print results t = tuple(x.t / len(dataloader.dataset.samples) * 1E3 for x in dt) # speeds per image @@ -165,6 +165,6 @@ def main(opt): run(**vars(opt)) -if __name__ == "__main__": +if __name__ == '__main__': opt = parse_opt() main(opt) diff --git a/detect.py b/detect.py index 2d13401f78bd..3f32d7a50d6b 100644 --- a/detect.py +++ b/detect.py @@ -256,6 +256,6 @@ def main(opt): run(**vars(opt)) -if __name__ == "__main__": +if __name__ == '__main__': opt = parse_opt() main(opt) diff --git a/export.py b/export.py index 2c9fb77d17be..e8287704866a 100644 --- a/export.py +++ b/export.py @@ -120,7 +120,7 @@ def export_torchscript(model, im, file, optimize, prefix=colorstr('TorchScript:' f = file.with_suffix('.torchscript') ts = torch.jit.trace(model, im, strict=False) - d = {"shape": im.shape, "stride": int(max(model.stride)), "names": model.names} + d = {'shape': im.shape, 'stride': int(max(model.stride)), 'names': model.names} extra_files = {'config.txt': json.dumps(d)} # torch._C.ExtraFilesMap() if optimize: # https://pytorch.org/tutorials/recipes/mobile_interpreter.html optimize_for_mobile(ts)._save_for_lite_interpreter(str(f), _extra_files=extra_files) @@ -195,13 +195,13 @@ def export_openvino(file, metadata, half, prefix=colorstr('OpenVINO:')): f = str(file).replace('.pt', f'_openvino_model{os.sep}') args = [ - "mo", - "--input_model", + 'mo', + '--input_model', str(file.with_suffix('.onnx')), - "--output_dir", + '--output_dir', f, - "--data_type", - ("FP16" if half else "FP32"),] + '--data_type', + ('FP16' if half else 'FP32'),] subprocess.run(args, check=True, env=os.environ) # export yaml_save(Path(f) / file.with_suffix('.yaml').name, metadata) # add metadata.yaml return f, None @@ -237,7 +237,7 @@ def export_coreml(model, im, file, int8, half, prefix=colorstr('CoreML:')): if bits < 32: if MACOS: # quantization only supported on macOS with warnings.catch_warnings(): - warnings.filterwarnings("ignore", category=DeprecationWarning) # suppress numpy==1.20 float warning + warnings.filterwarnings('ignore', category=DeprecationWarning) # suppress numpy==1.20 float warning ct_model = ct.models.neural_network.quantization_utils.quantize_weights(ct_model, bits, mode) else: print(f'{prefix} quantization only supported on macOS, skipping...') @@ -293,7 +293,7 @@ def export_engine(model, im, file, half, dynamic, simplify, workspace=4, verbose if dynamic: if im.shape[0] <= 1: - LOGGER.warning(f"{prefix} WARNING ⚠️ --dynamic model requires maximum --batch-size argument") + LOGGER.warning(f'{prefix} WARNING ⚠️ --dynamic model requires maximum --batch-size argument') profile = builder.create_optimization_profile() for inp in inputs: profile.set_shape(inp.name, (1, *im.shape[1:]), (max(1, im.shape[0] // 2), *im.shape[1:]), im.shape) @@ -403,7 +403,7 @@ def export_tflite(keras_model, im, file, int8, data, nms, agnostic_nms, prefix=c converter.target_spec.supported_ops.append(tf.lite.OpsSet.SELECT_TF_OPS) tflite_model = converter.convert() - open(f, "wb").write(tflite_model) + open(f, 'wb').write(tflite_model) return f, None @@ -618,14 +618,14 @@ def run( det &= not seg # segmentation models inherit from SegmentationModel(DetectionModel) dir = Path('segment' if seg else 'classify' if cls else '') h = '--half' if half else '' # --half FP16 inference arg - s = "# WARNING ⚠️ ClassificationModel not yet supported for PyTorch Hub AutoShape inference" if cls else \ - "# WARNING ⚠️ SegmentationModel not yet supported for PyTorch Hub AutoShape inference" if seg else '' + s = '# WARNING ⚠️ ClassificationModel not yet supported for PyTorch Hub AutoShape inference' if cls else \ + '# WARNING ⚠️ SegmentationModel not yet supported for PyTorch Hub AutoShape inference' if seg else '' LOGGER.info(f'\nExport complete ({time.time() - t:.1f}s)' f"\nResults saved to {colorstr('bold', file.parent.resolve())}" f"\nDetect: python {dir / ('detect.py' if det else 'predict.py')} --weights {f[-1]} {h}" f"\nValidate: python {dir / 'val.py'} --weights {f[-1]} {h}" f"\nPyTorch Hub: model = torch.hub.load('ultralytics/yolov5', 'custom', '{f[-1]}') {s}" - f"\nVisualize: https://netron.app") + f'\nVisualize: https://netron.app') return f # return list of exported files/dirs @@ -667,6 +667,6 @@ def main(opt): run(**vars(opt)) -if __name__ == "__main__": +if __name__ == '__main__': opt = parse_opt() main(opt) diff --git a/models/common.py b/models/common.py index 71340688d2e0..f416ddf25eb8 100644 --- a/models/common.py +++ b/models/common.py @@ -380,11 +380,11 @@ def __init__(self, weights='yolov5s.pt', device=torch.device('cpu'), dnn=False, w = next(Path(w).glob('*.xml')) # get *.xml file from *_openvino_model dir network = ie.read_model(model=w, weights=Path(w).with_suffix('.bin')) if network.get_parameters()[0].get_layout().empty: - network.get_parameters()[0].set_layout(Layout("NCHW")) + network.get_parameters()[0].set_layout(Layout('NCHW')) batch_dim = get_batch(network) if batch_dim.is_static: batch_size = batch_dim.get_length() - executable_network = ie.compile_model(network, device_name="CPU") # device_name="MYRIAD" for Intel NCS2 + executable_network = ie.compile_model(network, device_name='CPU') # device_name="MYRIAD" for Intel NCS2 stride, names = self._load_metadata(Path(w).with_suffix('.yaml')) # load metadata elif engine: # TensorRT LOGGER.info(f'Loading {w} for TensorRT inference...') @@ -431,7 +431,7 @@ def __init__(self, weights='yolov5s.pt', device=torch.device('cpu'), dnn=False, import tensorflow as tf def wrap_frozen_graph(gd, inputs, outputs): - x = tf.compat.v1.wrap_function(lambda: tf.compat.v1.import_graph_def(gd, name=""), []) # wrapped + x = tf.compat.v1.wrap_function(lambda: tf.compat.v1.import_graph_def(gd, name=''), []) # wrapped ge = x.graph.as_graph_element return x.prune(tf.nest.map_structure(ge, inputs), tf.nest.map_structure(ge, outputs)) @@ -445,7 +445,7 @@ def gd_outputs(gd): gd = tf.Graph().as_graph_def() # TF GraphDef with open(w, 'rb') as f: gd.ParseFromString(f.read()) - frozen_func = wrap_frozen_graph(gd, inputs="x:0", outputs=gd_outputs(gd)) + frozen_func = wrap_frozen_graph(gd, inputs='x:0', outputs=gd_outputs(gd)) elif tflite or edgetpu: # https://www.tensorflow.org/lite/guide/python#install_tensorflow_lite_for_python try: # https://coral.ai/docs/edgetpu/tflite-python/#update-existing-tf-lite-code-for-the-edge-tpu from tflite_runtime.interpreter import Interpreter, load_delegate @@ -467,9 +467,9 @@ def gd_outputs(gd): output_details = interpreter.get_output_details() # outputs # load metadata with contextlib.suppress(zipfile.BadZipFile): - with zipfile.ZipFile(w, "r") as model: + with zipfile.ZipFile(w, 'r') as model: meta_file = model.namelist()[0] - meta = ast.literal_eval(model.read(meta_file).decode("utf-8")) + meta = ast.literal_eval(model.read(meta_file).decode('utf-8')) stride, names = int(meta['stride']), meta['names'] elif tfjs: # TF.js raise NotImplementedError('ERROR: YOLOv5 TF.js inference is not supported') @@ -491,7 +491,7 @@ def gd_outputs(gd): check_requirements('tritonclient[all]') from utils.triton import TritonRemoteModel model = TritonRemoteModel(url=w) - nhwc = model.runtime.startswith("tensorflow") + nhwc = model.runtime.startswith('tensorflow') else: raise NotImplementedError(f'ERROR: {w} is not a supported format') @@ -608,7 +608,7 @@ def _model_type(p='path/to/model.pt'): url = urlparse(p) # if url may be Triton inference server types = [s in Path(p).name for s in sf] types[8] &= not types[9] # tflite &= not edgetpu - triton = not any(types) and all([any(s in url.scheme for s in ["http", "grpc"]), url.netloc]) + triton = not any(types) and all([any(s in url.scheme for s in ['http', 'grpc']), url.netloc]) return types + [triton] @staticmethod diff --git a/models/segment/yolov5m-seg.yaml b/models/segment/yolov5m-seg.yaml index f73d1992ac19..07ec25ba264d 100644 --- a/models/segment/yolov5m-seg.yaml +++ b/models/segment/yolov5m-seg.yaml @@ -45,4 +45,4 @@ head: [-1, 3, C3, [1024, False]], # 23 (P5/32-large) [[17, 20, 23], 1, Segment, [nc, anchors, 32, 256]], # Detect(P3, P4, P5) - ] \ No newline at end of file + ] diff --git a/models/segment/yolov5s-seg.yaml b/models/segment/yolov5s-seg.yaml index 7cbdb36b425c..a827814e1399 100644 --- a/models/segment/yolov5s-seg.yaml +++ b/models/segment/yolov5s-seg.yaml @@ -45,4 +45,4 @@ head: [-1, 3, C3, [1024, False]], # 23 (P5/32-large) [[17, 20, 23], 1, Segment, [nc, anchors, 32, 256]], # Detect(P3, P4, P5) - ] \ No newline at end of file + ] diff --git a/models/tf.py b/models/tf.py index 3f3dc8dbe7e7..8290cf2e57f5 100644 --- a/models/tf.py +++ b/models/tf.py @@ -356,7 +356,7 @@ class TFUpsample(keras.layers.Layer): # TF version of torch.nn.Upsample() def __init__(self, size, scale_factor, mode, w=None): # warning: all arguments needed including 'w' super().__init__() - assert scale_factor % 2 == 0, "scale_factor must be multiple of 2" + assert scale_factor % 2 == 0, 'scale_factor must be multiple of 2' self.upsample = lambda x: tf.image.resize(x, (x.shape[1] * scale_factor, x.shape[2] * scale_factor), mode) # self.upsample = keras.layers.UpSampling2D(size=scale_factor, interpolation=mode) # with default arguments: align_corners=False, half_pixel_centers=False @@ -371,7 +371,7 @@ class TFConcat(keras.layers.Layer): # TF version of torch.concat() def __init__(self, dimension=1, w=None): super().__init__() - assert dimension == 1, "convert only NCHW to NHWC concat" + assert dimension == 1, 'convert only NCHW to NHWC concat' self.d = 3 def call(self, inputs): @@ -523,17 +523,17 @@ def _nms(x, topk_all=100, iou_thres=0.45, conf_thres=0.25): # agnostic NMS selected_boxes = tf.gather(boxes, selected_inds) padded_boxes = tf.pad(selected_boxes, paddings=[[0, topk_all - tf.shape(selected_boxes)[0]], [0, 0]], - mode="CONSTANT", + mode='CONSTANT', constant_values=0.0) selected_scores = tf.gather(scores_inp, selected_inds) padded_scores = tf.pad(selected_scores, paddings=[[0, topk_all - tf.shape(selected_boxes)[0]]], - mode="CONSTANT", + mode='CONSTANT', constant_values=-1.0) selected_classes = tf.gather(class_inds, selected_inds) padded_classes = tf.pad(selected_classes, paddings=[[0, topk_all - tf.shape(selected_boxes)[0]]], - mode="CONSTANT", + mode='CONSTANT', constant_values=-1.0) valid_detections = tf.shape(selected_inds)[0] return padded_boxes, padded_scores, padded_classes, valid_detections @@ -603,6 +603,6 @@ def main(opt): run(**vars(opt)) -if __name__ == "__main__": +if __name__ == '__main__': opt = parse_opt() main(opt) diff --git a/segment/predict.py b/segment/predict.py index e9093baa1cc7..d82df89a85b0 100644 --- a/segment/predict.py +++ b/segment/predict.py @@ -279,6 +279,6 @@ def main(opt): run(**vars(opt)) -if __name__ == "__main__": +if __name__ == '__main__': opt = parse_opt() main(opt) diff --git a/segment/train.py b/segment/train.py index 4914f9613a3d..2e71de131a8d 100644 --- a/segment/train.py +++ b/segment/train.py @@ -139,7 +139,7 @@ def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictio # Batch size if RANK == -1 and batch_size == -1: # single-GPU only, estimate best batch size batch_size = check_train_batch_size(model, imgsz, amp) - logger.update_params({"batch_size": batch_size}) + logger.update_params({'batch_size': batch_size}) # loggers.on_params_update({"batch_size": batch_size}) # Optimizer @@ -341,10 +341,10 @@ def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictio # Mosaic plots if plots: if ni < 3: - plot_images_and_masks(imgs, targets, masks, paths, save_dir / f"train_batch{ni}.jpg") + plot_images_and_masks(imgs, targets, masks, paths, save_dir / f'train_batch{ni}.jpg') if ni == 10: files = sorted(save_dir.glob('train*.jpg')) - logger.log_images(files, "Mosaics", epoch) + logger.log_images(files, 'Mosaics', epoch) # end batch ------------------------------------------------------------------------------------------------ # Scheduler @@ -454,8 +454,8 @@ def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictio files = ['results.png', 'confusion_matrix.png', *(f'{x}_curve.png' for x in ('F1', 'PR', 'P', 'R'))] files = [(save_dir / f) for f in files if (save_dir / f).exists()] # filter LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}") - logger.log_images(files, "Results", epoch + 1) - logger.log_images(sorted(save_dir.glob('val*.jpg')), "Validation", epoch + 1) + logger.log_images(files, 'Results', epoch + 1) + logger.log_images(sorted(save_dir.glob('val*.jpg')), 'Validation', epoch + 1) torch.cuda.empty_cache() return results @@ -548,7 +548,7 @@ def main(opt, callbacks=Callbacks()): assert torch.cuda.device_count() > LOCAL_RANK, 'insufficient CUDA devices for DDP command' torch.cuda.set_device(LOCAL_RANK) device = torch.device('cuda', LOCAL_RANK) - dist.init_process_group(backend="nccl" if dist.is_nccl_available() else "gloo") + dist.init_process_group(backend='nccl' if dist.is_nccl_available() else 'gloo') # Train if not opt.evolve: @@ -659,6 +659,6 @@ def run(**kwargs): return opt -if __name__ == "__main__": +if __name__ == '__main__': opt = parse_opt() main(opt) diff --git a/segment/tutorial.ipynb b/segment/tutorial.ipynb index cb1af34d9f17..cb52045bcb25 100644 --- a/segment/tutorial.ipynb +++ b/segment/tutorial.ipynb @@ -591,4 +591,4 @@ }, "nbformat": 4, "nbformat_minor": 0 -} \ No newline at end of file +} diff --git a/segment/val.py b/segment/val.py index 665b540a5490..a7f95fe9b6fc 100644 --- a/segment/val.py +++ b/segment/val.py @@ -70,8 +70,8 @@ def save_one_json(predn, jdict, path, class_map, pred_masks): from pycocotools.mask import encode def single_encode(x): - rle = encode(np.asarray(x[:, :, None], order="F", dtype="uint8"))[0] - rle["counts"] = rle["counts"].decode("utf-8") + rle = encode(np.asarray(x[:, :, None], order='F', dtype='uint8'))[0] + rle['counts'] = rle['counts'].decode('utf-8') return rle image_id = int(path.stem) if path.stem.isnumeric() else path.stem @@ -105,7 +105,7 @@ def process_batch(detections, labels, iouv, pred_masks=None, gt_masks=None, over gt_masks = gt_masks.repeat(nl, 1, 1) # shape(1,640,640) -> (n,640,640) gt_masks = torch.where(gt_masks == index, 1.0, 0.0) if gt_masks.shape[1:] != pred_masks.shape[1:]: - gt_masks = F.interpolate(gt_masks[None], pred_masks.shape[1:], mode="bilinear", align_corners=False)[0] + gt_masks = F.interpolate(gt_masks[None], pred_masks.shape[1:], mode='bilinear', align_corners=False)[0] gt_masks = gt_masks.gt_(0.5) iou = mask_iou(gt_masks.view(gt_masks.shape[0], -1), pred_masks.view(pred_masks.shape[0], -1)) else: # boxes @@ -231,8 +231,8 @@ def run( if isinstance(names, (list, tuple)): # old format names = dict(enumerate(names)) class_map = coco80_to_coco91_class() if is_coco else list(range(1000)) - s = ('%22s' + '%11s' * 10) % ('Class', 'Images', 'Instances', 'Box(P', "R", "mAP50", "mAP50-95)", "Mask(P", "R", - "mAP50", "mAP50-95)") + s = ('%22s' + '%11s' * 10) % ('Class', 'Images', 'Instances', 'Box(P', 'R', 'mAP50', 'mAP50-95)', 'Mask(P', 'R', + 'mAP50', 'mAP50-95)') dt = Profile(), Profile(), Profile() metrics = Metrics() loss = torch.zeros(4, device=device) @@ -343,7 +343,7 @@ def run( # Print results pf = '%22s' + '%11i' * 2 + '%11.3g' * 8 # print format - LOGGER.info(pf % ("all", seen, nt.sum(), *metrics.mean_results())) + LOGGER.info(pf % ('all', seen, nt.sum(), *metrics.mean_results())) if nt.sum() == 0: LOGGER.warning(f'WARNING ⚠️ no labels found in {task} set, can not compute metrics without labels') @@ -369,7 +369,7 @@ def run( if save_json and len(jdict): w = Path(weights[0] if isinstance(weights, list) else weights).stem if weights is not None else '' # weights anno_json = str(Path('../datasets/coco/annotations/instances_val2017.json')) # annotations - pred_json = str(save_dir / f"{w}_predictions.json") # predictions + pred_json = str(save_dir / f'{w}_predictions.json') # predictions LOGGER.info(f'\nEvaluating pycocotools mAP... saving {pred_json}...') with open(pred_json, 'w') as f: json.dump(jdict, f) @@ -468,6 +468,6 @@ def main(opt): raise NotImplementedError(f'--task {opt.task} not in ("train", "val", "test", "speed", "study")') -if __name__ == "__main__": +if __name__ == '__main__': opt = parse_opt() main(opt) diff --git a/train.py b/train.py index ccda0a7fe2e3..c4e3aac3561a 100644 --- a/train.py +++ b/train.py @@ -148,7 +148,7 @@ def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictio # Batch size if RANK == -1 and batch_size == -1: # single-GPU only, estimate best batch size batch_size = check_train_batch_size(model, imgsz, amp) - loggers.on_params_update({"batch_size": batch_size}) + loggers.on_params_update({'batch_size': batch_size}) # Optimizer nbs = 64 # nominal batch size @@ -522,7 +522,7 @@ def main(opt, callbacks=Callbacks()): assert torch.cuda.device_count() > LOCAL_RANK, 'insufficient CUDA devices for DDP command' torch.cuda.set_device(LOCAL_RANK) device = torch.device('cuda', LOCAL_RANK) - dist.init_process_group(backend="nccl" if dist.is_nccl_available() else "gloo") + dist.init_process_group(backend='nccl' if dist.is_nccl_available() else 'gloo') # Train if not opt.evolve: @@ -635,6 +635,6 @@ def run(**kwargs): return opt -if __name__ == "__main__": +if __name__ == '__main__': opt = parse_opt() main(opt) diff --git a/tutorial.ipynb b/tutorial.ipynb index c320d699a940..32af68b57945 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -973,4 +973,4 @@ "outputs": [] } ] -} \ No newline at end of file +} diff --git a/utils/__init__.py b/utils/__init__.py index 7bf3efe6b8c7..d158c5515a12 100644 --- a/utils/__init__.py +++ b/utils/__init__.py @@ -69,7 +69,7 @@ def notebook_init(verbose=True): if verbose: gb = 1 << 30 # bytes to GiB (1024 ** 3) ram = psutil.virtual_memory().total - total, used, free = shutil.disk_usage("/") + total, used, free = shutil.disk_usage('/') display.clear_output() s = f'({os.cpu_count()} CPUs, {ram / gb:.1f} GB RAM, {(total - free) / gb:.1f}/{total / gb:.1f} GB disk)' else: diff --git a/utils/dataloaders.py b/utils/dataloaders.py index 02c2a79f5747..7687a2ba2665 100644 --- a/utils/dataloaders.py +++ b/utils/dataloaders.py @@ -89,7 +89,7 @@ def exif_transpose(image): if method is not None: image = image.transpose(method) del exif[0x0112] - image.info["exif"] = exif.tobytes() + image.info['exif'] = exif.tobytes() return image @@ -212,11 +212,11 @@ def __init__(self, source, img_size=640, stride=32, auto=True, transforms=None): # Parse monitor shape monitor = self.sct.monitors[self.screen] - self.top = monitor["top"] if top is None else (monitor["top"] + top) - self.left = monitor["left"] if left is None else (monitor["left"] + left) - self.width = width or monitor["width"] - self.height = height or monitor["height"] - self.monitor = {"left": self.left, "top": self.top, "width": self.width, "height": self.height} + self.top = monitor['top'] if top is None else (monitor['top'] + top) + self.left = monitor['left'] if left is None else (monitor['left'] + left) + self.width = width or monitor['width'] + self.height = height or monitor['height'] + self.monitor = {'left': self.left, 'top': self.top, 'width': self.width, 'height': self.height} def __iter__(self): return self @@ -224,7 +224,7 @@ def __iter__(self): def __next__(self): # mss screen capture: get raw pixels from the screen as np array im0 = np.array(self.sct.grab(self.monitor))[:, :, :3] # [:, :, :3] BGRA to BGR - s = f"screen {self.screen} (LTWH): {self.left},{self.top},{self.width},{self.height}: " + s = f'screen {self.screen} (LTWH): {self.left},{self.top},{self.width},{self.height}: ' if self.transforms: im = self.transforms(im0) # transforms @@ -239,7 +239,7 @@ def __next__(self): class LoadImages: # YOLOv5 image/video dataloader, i.e. `python detect.py --source image.jpg/vid.mp4` def __init__(self, path, img_size=640, stride=32, auto=True, transforms=None, vid_stride=1): - if isinstance(path, str) and Path(path).suffix == ".txt": # *.txt file with img/vid/dir on each line + if isinstance(path, str) and Path(path).suffix == '.txt': # *.txt file with img/vid/dir on each line path = Path(path).read_text().rsplit() files = [] for p in sorted(path) if isinstance(path, (list, tuple)) else [path]: @@ -358,7 +358,7 @@ def __init__(self, sources='file.streams', img_size=640, stride=32, auto=True, t # YouTube format i.e. 'https://www.youtube.com/watch?v=Zgi9g1ksQHc' or 'https://youtu.be/Zgi9g1ksQHc' check_requirements(('pafy', 'youtube_dl==2020.12.2')) import pafy - s = pafy.new(s).getbest(preftype="mp4").url # YouTube URL + s = pafy.new(s).getbest(preftype='mp4').url # YouTube URL s = eval(s) if s.isnumeric() else s # i.e. s = '0' local webcam if s == 0: assert not is_colab(), '--source 0 webcam unsupported on Colab. Rerun command in a local environment.' @@ -373,7 +373,7 @@ def __init__(self, sources='file.streams', img_size=640, stride=32, auto=True, t _, self.imgs[i] = cap.read() # guarantee first frame self.threads[i] = Thread(target=self.update, args=([i, cap, s]), daemon=True) - LOGGER.info(f"{st} Success ({self.frames[i]} frames {w}x{h} at {self.fps[i]:.2f} FPS)") + LOGGER.info(f'{st} Success ({self.frames[i]} frames {w}x{h} at {self.fps[i]:.2f} FPS)') self.threads[i].start() LOGGER.info('') # newline @@ -495,7 +495,7 @@ def __init__(self, # Display cache nf, nm, ne, nc, n = cache.pop('results') # found, missing, empty, corrupt, total if exists and LOCAL_RANK in {-1, 0}: - d = f"Scanning {cache_path}... {nf} images, {nm + ne} backgrounds, {nc} corrupt" + d = f'Scanning {cache_path}... {nf} images, {nm + ne} backgrounds, {nc} corrupt' tqdm(None, desc=prefix + d, total=n, initial=n, bar_format=TQDM_BAR_FORMAT) # display cache results if cache['msgs']: LOGGER.info('\n'.join(cache['msgs'])) # display warnings @@ -598,8 +598,8 @@ def check_cache_ram(self, safety_margin=0.1, prefix=''): mem = psutil.virtual_memory() cache = mem_required * (1 + safety_margin) < mem.available # to cache or not to cache, that is the question if not cache: - LOGGER.info(f"{prefix}{mem_required / gb:.1f}GB RAM required, " - f"{mem.available / gb:.1f}/{mem.total / gb:.1f}GB available, " + LOGGER.info(f'{prefix}{mem_required / gb:.1f}GB RAM required, ' + f'{mem.available / gb:.1f}/{mem.total / gb:.1f}GB available, ' f"{'caching images ✅' if cache else 'not caching images ⚠️'}") return cache @@ -607,7 +607,7 @@ def cache_labels(self, path=Path('./labels.cache'), prefix=''): # Cache dataset labels, check images and read shapes x = {} # dict nm, nf, ne, nc, msgs = 0, 0, 0, 0, [] # number missing, found, empty, corrupt, messages - desc = f"{prefix}Scanning {path.parent / path.stem}..." + desc = f'{prefix}Scanning {path.parent / path.stem}...' with Pool(NUM_THREADS) as pool: pbar = tqdm(pool.imap(verify_image_label, zip(self.im_files, self.label_files, repeat(prefix))), desc=desc, @@ -622,7 +622,7 @@ def cache_labels(self, path=Path('./labels.cache'), prefix=''): x[im_file] = [lb, shape, segments] if msg: msgs.append(msg) - pbar.desc = f"{desc} {nf} images, {nm + ne} backgrounds, {nc} corrupt" + pbar.desc = f'{desc} {nf} images, {nm + ne} backgrounds, {nc} corrupt' pbar.close() if msgs: @@ -1063,7 +1063,7 @@ def __init__(self, path='coco128.yaml', autodownload=False): if zipped: data['path'] = data_dir except Exception as e: - raise Exception("error/HUB/dataset_stats/yaml_load") from e + raise Exception('error/HUB/dataset_stats/yaml_load') from e check_dataset(data, autodownload) # download dataset if missing self.hub_dir = Path(data['path'] + '-hub') @@ -1188,7 +1188,7 @@ def __getitem__(self, i): else: # read image im = cv2.imread(f) # BGR if self.album_transforms: - sample = self.album_transforms(image=cv2.cvtColor(im, cv2.COLOR_BGR2RGB))["image"] + sample = self.album_transforms(image=cv2.cvtColor(im, cv2.COLOR_BGR2RGB))['image'] else: sample = self.torch_transforms(im) return sample, j diff --git a/utils/downloads.py b/utils/downloads.py index e739919540b4..643b529fba3b 100644 --- a/utils/downloads.py +++ b/utils/downloads.py @@ -77,7 +77,7 @@ def safe_download(file, url, url2=None, min_bytes=1E0, error_msg=''): if not file.exists() or file.stat().st_size < min_bytes: # check if file.exists(): file.unlink() # remove partial downloads - LOGGER.info(f"ERROR: {assert_msg}\n{error_msg}") + LOGGER.info(f'ERROR: {assert_msg}\n{error_msg}') LOGGER.info('') diff --git a/utils/flask_rest_api/example_request.py b/utils/flask_rest_api/example_request.py index 773ad8932967..952e5dcb90fa 100644 --- a/utils/flask_rest_api/example_request.py +++ b/utils/flask_rest_api/example_request.py @@ -7,13 +7,13 @@ import requests -DETECTION_URL = "http://localhost:5000/v1/object-detection/yolov5s" -IMAGE = "zidane.jpg" +DETECTION_URL = 'http://localhost:5000/v1/object-detection/yolov5s' +IMAGE = 'zidane.jpg' # Read image -with open(IMAGE, "rb") as f: +with open(IMAGE, 'rb') as f: image_data = f.read() -response = requests.post(DETECTION_URL, files={"image": image_data}).json() +response = requests.post(DETECTION_URL, files={'image': image_data}).json() pprint.pprint(response) diff --git a/utils/flask_rest_api/restapi.py b/utils/flask_rest_api/restapi.py index 8482435c861e..9258b1a68860 100644 --- a/utils/flask_rest_api/restapi.py +++ b/utils/flask_rest_api/restapi.py @@ -13,36 +13,36 @@ app = Flask(__name__) models = {} -DETECTION_URL = "/v1/object-detection/" +DETECTION_URL = '/v1/object-detection/' -@app.route(DETECTION_URL, methods=["POST"]) +@app.route(DETECTION_URL, methods=['POST']) def predict(model): - if request.method != "POST": + if request.method != 'POST': return - if request.files.get("image"): + if request.files.get('image'): # Method 1 # with request.files["image"] as f: # im = Image.open(io.BytesIO(f.read())) # Method 2 - im_file = request.files["image"] + im_file = request.files['image'] im_bytes = im_file.read() im = Image.open(io.BytesIO(im_bytes)) if model in models: results = models[model](im, size=640) # reduce size=320 for faster inference - return results.pandas().xyxy[0].to_json(orient="records") + return results.pandas().xyxy[0].to_json(orient='records') -if __name__ == "__main__": - parser = argparse.ArgumentParser(description="Flask API exposing YOLOv5 model") - parser.add_argument("--port", default=5000, type=int, help="port number") +if __name__ == '__main__': + parser = argparse.ArgumentParser(description='Flask API exposing YOLOv5 model') + parser.add_argument('--port', default=5000, type=int, help='port number') parser.add_argument('--model', nargs='+', default=['yolov5s'], help='model(s) to run, i.e. --model yolov5n yolov5s') opt = parser.parse_args() for m in opt.model: - models[m] = torch.hub.load("ultralytics/yolov5", m, force_reload=True, skip_validation=True) + models[m] = torch.hub.load('ultralytics/yolov5', m, force_reload=True, skip_validation=True) - app.run(host="0.0.0.0", port=opt.port) # debug=True causes Restarting with stat + app.run(host='0.0.0.0', port=opt.port) # debug=True causes Restarting with stat diff --git a/utils/general.py b/utils/general.py index a6af4f3216dd..b6efe6bb8732 100644 --- a/utils/general.py +++ b/utils/general.py @@ -38,7 +38,7 @@ import yaml from utils import TryExcept, emojis -from utils.downloads import gsutil_getsize, curl_download +from utils.downloads import curl_download, gsutil_getsize from utils.metrics import box_iou, fitness FILE = Path(__file__).resolve() @@ -90,11 +90,11 @@ def is_kaggle(): def is_docker() -> bool: """Check if the process runs inside a docker container.""" - if Path("/.dockerenv").exists(): + if Path('/.dockerenv').exists(): return True try: # check if docker is in control groups - with open("/proc/self/cgroup") as file: - return any("docker" in line for line in file) + with open('/proc/self/cgroup') as file: + return any('docker' in line for line in file) except OSError: return False @@ -113,7 +113,7 @@ def is_writeable(dir, test=False): return False -LOGGING_NAME = "yolov5" +LOGGING_NAME = 'yolov5' def set_logging(name=LOGGING_NAME, verbose=True): @@ -121,21 +121,21 @@ def set_logging(name=LOGGING_NAME, verbose=True): rank = int(os.getenv('RANK', -1)) # rank in world for Multi-GPU trainings level = logging.INFO if verbose and rank in {-1, 0} else logging.ERROR logging.config.dictConfig({ - "version": 1, - "disable_existing_loggers": False, - "formatters": { + 'version': 1, + 'disable_existing_loggers': False, + 'formatters': { name: { - "format": "%(message)s"}}, - "handlers": { + 'format': '%(message)s'}}, + 'handlers': { name: { - "class": "logging.StreamHandler", - "formatter": name, - "level": level,}}, - "loggers": { + 'class': 'logging.StreamHandler', + 'formatter': name, + 'level': level,}}, + 'loggers': { name: { - "level": level, - "handlers": [name], - "propagate": False,}}}) + 'level': level, + 'handlers': [name], + 'propagate': False,}}}) set_logging(LOGGING_NAME) # run before defining LOGGER @@ -218,7 +218,7 @@ def __exit__(self, exc_type, exc_val, exc_tb): def methods(instance): # Get class/instance methods - return [f for f in dir(instance) if callable(getattr(instance, f)) and not f.startswith("__")] + return [f for f in dir(instance) if callable(getattr(instance, f)) and not f.startswith('__')] def print_args(args: Optional[dict] = None, show_file=True, show_func=False): @@ -299,7 +299,7 @@ def check_online(): def run_once(): # Check once try: - socket.create_connection(("1.1.1.1", 443), 5) # check host accessibility + socket.create_connection(('1.1.1.1', 443), 5) # check host accessibility return True except OSError: return False @@ -386,7 +386,7 @@ def check_requirements(requirements=ROOT / 'requirements.txt', exclude=(), insta check_python() # check python version if isinstance(requirements, Path): # requirements.txt file file = requirements.resolve() - assert file.exists(), f"{prefix} {file} not found, check failed." + assert file.exists(), f'{prefix} {file} not found, check failed.' with file.open() as f: requirements = [f'{x.name}{x.specifier}' for x in pkg.parse_requirements(f) if x.name not in exclude] elif isinstance(requirements, str): @@ -450,7 +450,7 @@ def check_suffix(file='yolov5s.pt', suffix=('.pt',), msg=''): for f in file if isinstance(file, (list, tuple)) else [file]: s = Path(f).suffix.lower() # file suffix if len(s): - assert s in suffix, f"{msg}{f} acceptable suffix is {suffix}" + assert s in suffix, f'{msg}{f} acceptable suffix is {suffix}' def check_yaml(file, suffix=('.yaml', '.yml')): @@ -556,8 +556,8 @@ def check_dataset(data, autodownload=True): else: # python script r = exec(s, {'yaml': data}) # return None dt = f'({round(time.time() - t, 1)}s)' - s = f"success ✅ {dt}, saved to {colorstr('bold', DATASETS_DIR)}" if r in (0, None) else f"failure {dt} ❌" - LOGGER.info(f"Dataset download {s}") + s = f"success ✅ {dt}, saved to {colorstr('bold', DATASETS_DIR)}" if r in (0, None) else f'failure {dt} ❌' + LOGGER.info(f'Dataset download {s}') check_font('Arial.ttf' if is_ascii(data['names']) else 'Arial.Unicode.ttf', progress=True) # download fonts return data # dictionary @@ -673,7 +673,7 @@ def make_divisible(x, divisor): def clean_str(s): # Cleans a string by replacing special characters with underscore _ - return re.sub(pattern="[|@#!¡·$€%&()=?¿^*;:,¨´><+]", repl="_", string=s) + return re.sub(pattern='[|@#!¡·$€%&()=?¿^*;:,¨´><+]', repl='_', string=s) def one_cycle(y1=0.0, y2=1.0, steps=100): diff --git a/utils/loggers/__init__.py b/utils/loggers/__init__.py index 1e7f38e0d677..9de1f226233c 100644 --- a/utils/loggers/__init__.py +++ b/utils/loggers/__init__.py @@ -121,8 +121,8 @@ def __init__(self, save_dir=None, weights=None, opt=None, hyp=None, logger=None, # Comet if comet_ml and 'comet' in self.include: - if isinstance(self.opt.resume, str) and self.opt.resume.startswith("comet://"): - run_id = self.opt.resume.split("/")[-1] + if isinstance(self.opt.resume, str) and self.opt.resume.startswith('comet://'): + run_id = self.opt.resume.split('/')[-1] self.comet_logger = CometLogger(self.opt, self.hyp, run_id=run_id) else: @@ -158,7 +158,7 @@ def on_pretrain_routine_end(self, labels, names): plot_labels(labels, names, self.save_dir) paths = self.save_dir.glob('*labels*.jpg') # training labels if self.wandb: - self.wandb.log({"Labels": [wandb.Image(str(x), caption=x.name) for x in paths]}) + self.wandb.log({'Labels': [wandb.Image(str(x), caption=x.name) for x in paths]}) # if self.clearml: # pass # ClearML saves these images automatically using hooks if self.comet_logger: @@ -212,7 +212,7 @@ def on_val_end(self, nt, tp, fp, p, r, f1, ap, ap50, ap_class, confusion_matrix) if self.wandb or self.clearml: files = sorted(self.save_dir.glob('val*.jpg')) if self.wandb: - self.wandb.log({"Validation": [wandb.Image(str(f), caption=f.name) for f in files]}) + self.wandb.log({'Validation': [wandb.Image(str(f), caption=f.name) for f in files]}) if self.clearml: self.clearml.log_debug_samples(files, title='Validation') @@ -279,7 +279,7 @@ def on_train_end(self, last, best, epoch, results): if self.wandb: self.wandb.log(dict(zip(self.keys[3:10], results))) - self.wandb.log({"Results": [wandb.Image(str(f), caption=f.name) for f in files]}) + self.wandb.log({'Results': [wandb.Image(str(f), caption=f.name) for f in files]}) # Calling wandb.log. TODO: Refactor this into WandbLogger.log_model if not self.opt.evolve: wandb.log_artifact(str(best if best.exists() else last), @@ -329,7 +329,7 @@ def __init__(self, opt, console_logger, include=('tb', 'wandb')): if wandb and 'wandb' in self.include: self.wandb = wandb.init(project=web_project_name(str(opt.project)), - name=None if opt.name == "exp" else opt.name, + name=None if opt.name == 'exp' else opt.name, config=opt) else: self.wandb = None @@ -370,12 +370,12 @@ def log_graph(self, model, imgsz=(640, 640)): def log_model(self, model_path, epoch=0, metadata={}): # Log model to all loggers if self.wandb: - art = wandb.Artifact(name=f"run_{wandb.run.id}_model", type="model", metadata=metadata) + art = wandb.Artifact(name=f'run_{wandb.run.id}_model', type='model', metadata=metadata) art.add_file(str(model_path)) wandb.log_artifact(art) def update_params(self, params): - # Update the paramters logged + # Update the parameters logged if self.wandb: wandb.run.config.update(params, allow_val_change=True) diff --git a/utils/loggers/clearml/clearml_utils.py b/utils/loggers/clearml/clearml_utils.py index 3457727a96a4..2764abe90da8 100644 --- a/utils/loggers/clearml/clearml_utils.py +++ b/utils/loggers/clearml/clearml_utils.py @@ -25,7 +25,7 @@ def construct_dataset(clearml_info_string): dataset_root_path = Path(dataset.get_local_copy()) # We'll search for the yaml file definition in the dataset - yaml_filenames = list(glob.glob(str(dataset_root_path / "*.yaml")) + glob.glob(str(dataset_root_path / "*.yml"))) + yaml_filenames = list(glob.glob(str(dataset_root_path / '*.yaml')) + glob.glob(str(dataset_root_path / '*.yml'))) if len(yaml_filenames) > 1: raise ValueError('More than one yaml file was found in the dataset root, cannot determine which one contains ' 'the dataset definition this way.') @@ -100,7 +100,7 @@ def __init__(self, opt, hyp): self.task.connect(opt, name='Args') # Make sure the code is easily remotely runnable by setting the docker image to use by the remote agent - self.task.set_base_docker("ultralytics/yolov5:latest", + self.task.set_base_docker('ultralytics/yolov5:latest', docker_arguments='--ipc=host -e="CLEARML_AGENT_SKIP_PYTHON_ENV_INSTALL=1"', docker_setup_bash_script='pip install clearml') @@ -150,7 +150,7 @@ def log_image_with_boxes(self, image_path, boxes, class_names, image, conf_thres class_name = class_names[int(class_nr)] confidence_percentage = round(float(conf) * 100, 2) - label = f"{class_name}: {confidence_percentage}%" + label = f'{class_name}: {confidence_percentage}%' if conf > conf_threshold: annotator.rectangle(box.cpu().numpy(), outline=color) diff --git a/utils/loggers/comet/__init__.py b/utils/loggers/comet/__init__.py index b0318f88d6a6..d4599841c9fc 100644 --- a/utils/loggers/comet/__init__.py +++ b/utils/loggers/comet/__init__.py @@ -17,7 +17,7 @@ # Project Configuration config = comet_ml.config.get_config() - COMET_PROJECT_NAME = config.get_string(os.getenv("COMET_PROJECT_NAME"), "comet.project_name", default="yolov5") + COMET_PROJECT_NAME = config.get_string(os.getenv('COMET_PROJECT_NAME'), 'comet.project_name', default='yolov5') except (ModuleNotFoundError, ImportError): comet_ml = None COMET_PROJECT_NAME = None @@ -31,32 +31,32 @@ from utils.general import check_dataset, scale_boxes, xywh2xyxy from utils.metrics import box_iou -COMET_PREFIX = "comet://" +COMET_PREFIX = 'comet://' -COMET_MODE = os.getenv("COMET_MODE", "online") +COMET_MODE = os.getenv('COMET_MODE', 'online') # Model Saving Settings -COMET_MODEL_NAME = os.getenv("COMET_MODEL_NAME", "yolov5") +COMET_MODEL_NAME = os.getenv('COMET_MODEL_NAME', 'yolov5') # Dataset Artifact Settings -COMET_UPLOAD_DATASET = os.getenv("COMET_UPLOAD_DATASET", "false").lower() == "true" +COMET_UPLOAD_DATASET = os.getenv('COMET_UPLOAD_DATASET', 'false').lower() == 'true' # Evaluation Settings -COMET_LOG_CONFUSION_MATRIX = os.getenv("COMET_LOG_CONFUSION_MATRIX", "true").lower() == "true" -COMET_LOG_PREDICTIONS = os.getenv("COMET_LOG_PREDICTIONS", "true").lower() == "true" -COMET_MAX_IMAGE_UPLOADS = int(os.getenv("COMET_MAX_IMAGE_UPLOADS", 100)) +COMET_LOG_CONFUSION_MATRIX = os.getenv('COMET_LOG_CONFUSION_MATRIX', 'true').lower() == 'true' +COMET_LOG_PREDICTIONS = os.getenv('COMET_LOG_PREDICTIONS', 'true').lower() == 'true' +COMET_MAX_IMAGE_UPLOADS = int(os.getenv('COMET_MAX_IMAGE_UPLOADS', 100)) # Confusion Matrix Settings -CONF_THRES = float(os.getenv("CONF_THRES", 0.001)) -IOU_THRES = float(os.getenv("IOU_THRES", 0.6)) +CONF_THRES = float(os.getenv('CONF_THRES', 0.001)) +IOU_THRES = float(os.getenv('IOU_THRES', 0.6)) # Batch Logging Settings -COMET_LOG_BATCH_METRICS = os.getenv("COMET_LOG_BATCH_METRICS", "false").lower() == "true" -COMET_BATCH_LOGGING_INTERVAL = os.getenv("COMET_BATCH_LOGGING_INTERVAL", 1) -COMET_PREDICTION_LOGGING_INTERVAL = os.getenv("COMET_PREDICTION_LOGGING_INTERVAL", 1) -COMET_LOG_PER_CLASS_METRICS = os.getenv("COMET_LOG_PER_CLASS_METRICS", "false").lower() == "true" +COMET_LOG_BATCH_METRICS = os.getenv('COMET_LOG_BATCH_METRICS', 'false').lower() == 'true' +COMET_BATCH_LOGGING_INTERVAL = os.getenv('COMET_BATCH_LOGGING_INTERVAL', 1) +COMET_PREDICTION_LOGGING_INTERVAL = os.getenv('COMET_PREDICTION_LOGGING_INTERVAL', 1) +COMET_LOG_PER_CLASS_METRICS = os.getenv('COMET_LOG_PER_CLASS_METRICS', 'false').lower() == 'true' -RANK = int(os.getenv("RANK", -1)) +RANK = int(os.getenv('RANK', -1)) to_pil = T.ToPILImage() @@ -66,7 +66,7 @@ class CometLogger: with Comet """ - def __init__(self, opt, hyp, run_id=None, job_type="Training", **experiment_kwargs) -> None: + def __init__(self, opt, hyp, run_id=None, job_type='Training', **experiment_kwargs) -> None: self.job_type = job_type self.opt = opt self.hyp = hyp @@ -87,52 +87,52 @@ def __init__(self, opt, hyp, run_id=None, job_type="Training", **experiment_kwar # Default parameters to pass to Experiment objects self.default_experiment_kwargs = { - "log_code": False, - "log_env_gpu": True, - "log_env_cpu": True, - "project_name": COMET_PROJECT_NAME,} + 'log_code': False, + 'log_env_gpu': True, + 'log_env_cpu': True, + 'project_name': COMET_PROJECT_NAME,} self.default_experiment_kwargs.update(experiment_kwargs) self.experiment = self._get_experiment(self.comet_mode, run_id) self.data_dict = self.check_dataset(self.opt.data) - self.class_names = self.data_dict["names"] - self.num_classes = self.data_dict["nc"] + self.class_names = self.data_dict['names'] + self.num_classes = self.data_dict['nc'] self.logged_images_count = 0 self.max_images = COMET_MAX_IMAGE_UPLOADS if run_id is None: - self.experiment.log_other("Created from", "YOLOv5") + self.experiment.log_other('Created from', 'YOLOv5') if not isinstance(self.experiment, comet_ml.OfflineExperiment): - workspace, project_name, experiment_id = self.experiment.url.split("/")[-3:] + workspace, project_name, experiment_id = self.experiment.url.split('/')[-3:] self.experiment.log_other( - "Run Path", - f"{workspace}/{project_name}/{experiment_id}", + 'Run Path', + f'{workspace}/{project_name}/{experiment_id}', ) self.log_parameters(vars(opt)) self.log_parameters(self.opt.hyp) self.log_asset_data( self.opt.hyp, - name="hyperparameters.json", - metadata={"type": "hyp-config-file"}, + name='hyperparameters.json', + metadata={'type': 'hyp-config-file'}, ) self.log_asset( - f"{self.opt.save_dir}/opt.yaml", - metadata={"type": "opt-config-file"}, + f'{self.opt.save_dir}/opt.yaml', + metadata={'type': 'opt-config-file'}, ) self.comet_log_confusion_matrix = COMET_LOG_CONFUSION_MATRIX - if hasattr(self.opt, "conf_thres"): + if hasattr(self.opt, 'conf_thres'): self.conf_thres = self.opt.conf_thres else: self.conf_thres = CONF_THRES - if hasattr(self.opt, "iou_thres"): + if hasattr(self.opt, 'iou_thres'): self.iou_thres = self.opt.iou_thres else: self.iou_thres = IOU_THRES - self.log_parameters({"val_iou_threshold": self.iou_thres, "val_conf_threshold": self.conf_thres}) + self.log_parameters({'val_iou_threshold': self.iou_thres, 'val_conf_threshold': self.conf_thres}) self.comet_log_predictions = COMET_LOG_PREDICTIONS if self.opt.bbox_interval == -1: @@ -147,22 +147,22 @@ def __init__(self, opt, hyp, run_id=None, job_type="Training", **experiment_kwar self.comet_log_per_class_metrics = COMET_LOG_PER_CLASS_METRICS self.experiment.log_others({ - "comet_mode": COMET_MODE, - "comet_max_image_uploads": COMET_MAX_IMAGE_UPLOADS, - "comet_log_per_class_metrics": COMET_LOG_PER_CLASS_METRICS, - "comet_log_batch_metrics": COMET_LOG_BATCH_METRICS, - "comet_log_confusion_matrix": COMET_LOG_CONFUSION_MATRIX, - "comet_model_name": COMET_MODEL_NAME,}) + 'comet_mode': COMET_MODE, + 'comet_max_image_uploads': COMET_MAX_IMAGE_UPLOADS, + 'comet_log_per_class_metrics': COMET_LOG_PER_CLASS_METRICS, + 'comet_log_batch_metrics': COMET_LOG_BATCH_METRICS, + 'comet_log_confusion_matrix': COMET_LOG_CONFUSION_MATRIX, + 'comet_model_name': COMET_MODEL_NAME,}) # Check if running the Experiment with the Comet Optimizer - if hasattr(self.opt, "comet_optimizer_id"): - self.experiment.log_other("optimizer_id", self.opt.comet_optimizer_id) - self.experiment.log_other("optimizer_objective", self.opt.comet_optimizer_objective) - self.experiment.log_other("optimizer_metric", self.opt.comet_optimizer_metric) - self.experiment.log_other("optimizer_parameters", json.dumps(self.hyp)) + if hasattr(self.opt, 'comet_optimizer_id'): + self.experiment.log_other('optimizer_id', self.opt.comet_optimizer_id) + self.experiment.log_other('optimizer_objective', self.opt.comet_optimizer_objective) + self.experiment.log_other('optimizer_metric', self.opt.comet_optimizer_metric) + self.experiment.log_other('optimizer_parameters', json.dumps(self.hyp)) def _get_experiment(self, mode, experiment_id=None): - if mode == "offline": + if mode == 'offline': if experiment_id is not None: return comet_ml.ExistingOfflineExperiment( previous_experiment=experiment_id, @@ -182,11 +182,11 @@ def _get_experiment(self, mode, experiment_id=None): return comet_ml.Experiment(**self.default_experiment_kwargs) except ValueError: - logger.warning("COMET WARNING: " - "Comet credentials have not been set. " - "Comet will default to offline logging. " - "Please set your credentials to enable online logging.") - return self._get_experiment("offline", experiment_id) + logger.warning('COMET WARNING: ' + 'Comet credentials have not been set. ' + 'Comet will default to offline logging. ' + 'Please set your credentials to enable online logging.') + return self._get_experiment('offline', experiment_id) return @@ -210,12 +210,12 @@ def log_model(self, path, opt, epoch, fitness_score, best_model=False): return model_metadata = { - "fitness_score": fitness_score[-1], - "epochs_trained": epoch + 1, - "save_period": opt.save_period, - "total_epochs": opt.epochs,} + 'fitness_score': fitness_score[-1], + 'epochs_trained': epoch + 1, + 'save_period': opt.save_period, + 'total_epochs': opt.epochs,} - model_files = glob.glob(f"{path}/*.pt") + model_files = glob.glob(f'{path}/*.pt') for model_path in model_files: name = Path(model_path).name @@ -232,12 +232,12 @@ def check_dataset(self, data_file): data_config = yaml.safe_load(f) if data_config['path'].startswith(COMET_PREFIX): - path = data_config['path'].replace(COMET_PREFIX, "") + path = data_config['path'].replace(COMET_PREFIX, '') data_dict = self.download_dataset_artifact(path) return data_dict - self.log_asset(self.opt.data, metadata={"type": "data-config-file"}) + self.log_asset(self.opt.data, metadata={'type': 'data-config-file'}) return check_dataset(data_file) @@ -253,8 +253,8 @@ def log_predictions(self, image, labelsn, path, shape, predn): filtered_detections = detections[mask] filtered_labels = labelsn[mask] - image_id = path.split("/")[-1].split(".")[0] - image_name = f"{image_id}_curr_epoch_{self.experiment.curr_epoch}" + image_id = path.split('/')[-1].split('.')[0] + image_name = f'{image_id}_curr_epoch_{self.experiment.curr_epoch}' if image_name not in self.logged_image_names: native_scale_image = PIL.Image.open(path) self.log_image(native_scale_image, name=image_name) @@ -263,22 +263,22 @@ def log_predictions(self, image, labelsn, path, shape, predn): metadata = [] for cls, *xyxy in filtered_labels.tolist(): metadata.append({ - "label": f"{self.class_names[int(cls)]}-gt", - "score": 100, - "box": { - "x": xyxy[0], - "y": xyxy[1], - "x2": xyxy[2], - "y2": xyxy[3]},}) + 'label': f'{self.class_names[int(cls)]}-gt', + 'score': 100, + 'box': { + 'x': xyxy[0], + 'y': xyxy[1], + 'x2': xyxy[2], + 'y2': xyxy[3]},}) for *xyxy, conf, cls in filtered_detections.tolist(): metadata.append({ - "label": f"{self.class_names[int(cls)]}", - "score": conf * 100, - "box": { - "x": xyxy[0], - "y": xyxy[1], - "x2": xyxy[2], - "y2": xyxy[3]},}) + 'label': f'{self.class_names[int(cls)]}', + 'score': conf * 100, + 'box': { + 'x': xyxy[0], + 'y': xyxy[1], + 'x2': xyxy[2], + 'y2': xyxy[3]},}) self.metadata_dict[image_name] = metadata self.logged_images_count += 1 @@ -305,35 +305,35 @@ def preprocess_prediction(self, image, labels, shape, pred): return predn, labelsn def add_assets_to_artifact(self, artifact, path, asset_path, split): - img_paths = sorted(glob.glob(f"{asset_path}/*")) + img_paths = sorted(glob.glob(f'{asset_path}/*')) label_paths = img2label_paths(img_paths) for image_file, label_file in zip(img_paths, label_paths): image_logical_path, label_logical_path = map(lambda x: os.path.relpath(x, path), [image_file, label_file]) try: - artifact.add(image_file, logical_path=image_logical_path, metadata={"split": split}) - artifact.add(label_file, logical_path=label_logical_path, metadata={"split": split}) + artifact.add(image_file, logical_path=image_logical_path, metadata={'split': split}) + artifact.add(label_file, logical_path=label_logical_path, metadata={'split': split}) except ValueError as e: logger.error('COMET ERROR: Error adding file to Artifact. Skipping file.') - logger.error(f"COMET ERROR: {e}") + logger.error(f'COMET ERROR: {e}') continue return artifact def upload_dataset_artifact(self): - dataset_name = self.data_dict.get("dataset_name", "yolov5-dataset") - path = str((ROOT / Path(self.data_dict["path"])).resolve()) + dataset_name = self.data_dict.get('dataset_name', 'yolov5-dataset') + path = str((ROOT / Path(self.data_dict['path'])).resolve()) metadata = self.data_dict.copy() - for key in ["train", "val", "test"]: + for key in ['train', 'val', 'test']: split_path = metadata.get(key) if split_path is not None: - metadata[key] = split_path.replace(path, "") + metadata[key] = split_path.replace(path, '') - artifact = comet_ml.Artifact(name=dataset_name, artifact_type="dataset", metadata=metadata) + artifact = comet_ml.Artifact(name=dataset_name, artifact_type='dataset', metadata=metadata) for key in metadata.keys(): - if key in ["train", "val", "test"]: + if key in ['train', 'val', 'test']: if isinstance(self.upload_dataset, str) and (key != self.upload_dataset): continue @@ -352,13 +352,13 @@ def download_dataset_artifact(self, artifact_path): metadata = logged_artifact.metadata data_dict = metadata.copy() - data_dict["path"] = artifact_save_dir + data_dict['path'] = artifact_save_dir - metadata_names = metadata.get("names") + metadata_names = metadata.get('names') if type(metadata_names) == dict: - data_dict["names"] = {int(k): v for k, v in metadata.get("names").items()} + data_dict['names'] = {int(k): v for k, v in metadata.get('names').items()} elif type(metadata_names) == list: - data_dict["names"] = {int(k): v for k, v in zip(range(len(metadata_names)), metadata_names)} + data_dict['names'] = {int(k): v for k, v in zip(range(len(metadata_names)), metadata_names)} else: raise "Invalid 'names' field in dataset yaml file. Please use a list or dictionary" @@ -366,13 +366,13 @@ def download_dataset_artifact(self, artifact_path): return data_dict def update_data_paths(self, data_dict): - path = data_dict.get("path", "") + path = data_dict.get('path', '') - for split in ["train", "val", "test"]: + for split in ['train', 'val', 'test']: if data_dict.get(split): split_path = data_dict.get(split) - data_dict[split] = (f"{path}/{split_path}" if isinstance(split, str) else [ - f"{path}/{x}" for x in split_path]) + data_dict[split] = (f'{path}/{split_path}' if isinstance(split, str) else [ + f'{path}/{x}' for x in split_path]) return data_dict @@ -413,11 +413,11 @@ def on_train_batch_end(self, log_dict, step): def on_train_end(self, files, save_dir, last, best, epoch, results): if self.comet_log_predictions: curr_epoch = self.experiment.curr_epoch - self.experiment.log_asset_data(self.metadata_dict, "image-metadata.json", epoch=curr_epoch) + self.experiment.log_asset_data(self.metadata_dict, 'image-metadata.json', epoch=curr_epoch) for f in files: - self.log_asset(f, metadata={"epoch": epoch}) - self.log_asset(f"{save_dir}/results.csv", metadata={"epoch": epoch}) + self.log_asset(f, metadata={'epoch': epoch}) + self.log_asset(f'{save_dir}/results.csv', metadata={'epoch': epoch}) if not self.opt.evolve: model_path = str(best if best.exists() else last) @@ -481,7 +481,7 @@ def on_val_end(self, nt, tp, fp, p, r, f1, ap, ap50, ap_class, confusion_matrix) if self.comet_log_confusion_matrix: epoch = self.experiment.curr_epoch class_names = list(self.class_names.values()) - class_names.append("background") + class_names.append('background') num_classes = len(class_names) self.experiment.log_confusion_matrix( @@ -491,7 +491,7 @@ def on_val_end(self, nt, tp, fp, p, r, f1, ap, ap50, ap_class, confusion_matrix) epoch=epoch, column_label='Actual Category', row_label='Predicted Category', - file_name=f"confusion-matrix-epoch-{epoch}.json", + file_name=f'confusion-matrix-epoch-{epoch}.json', ) def on_fit_epoch_end(self, result, epoch): diff --git a/utils/loggers/comet/comet_utils.py b/utils/loggers/comet/comet_utils.py index 3cbd45156b57..27600761ad28 100644 --- a/utils/loggers/comet/comet_utils.py +++ b/utils/loggers/comet/comet_utils.py @@ -11,28 +11,28 @@ logger = logging.getLogger(__name__) -COMET_PREFIX = "comet://" -COMET_MODEL_NAME = os.getenv("COMET_MODEL_NAME", "yolov5") -COMET_DEFAULT_CHECKPOINT_FILENAME = os.getenv("COMET_DEFAULT_CHECKPOINT_FILENAME", "last.pt") +COMET_PREFIX = 'comet://' +COMET_MODEL_NAME = os.getenv('COMET_MODEL_NAME', 'yolov5') +COMET_DEFAULT_CHECKPOINT_FILENAME = os.getenv('COMET_DEFAULT_CHECKPOINT_FILENAME', 'last.pt') def download_model_checkpoint(opt, experiment): - model_dir = f"{opt.project}/{experiment.name}" + model_dir = f'{opt.project}/{experiment.name}' os.makedirs(model_dir, exist_ok=True) model_name = COMET_MODEL_NAME model_asset_list = experiment.get_model_asset_list(model_name) if len(model_asset_list) == 0: - logger.error(f"COMET ERROR: No checkpoints found for model name : {model_name}") + logger.error(f'COMET ERROR: No checkpoints found for model name : {model_name}') return model_asset_list = sorted( model_asset_list, - key=lambda x: x["step"], + key=lambda x: x['step'], reverse=True, ) - logged_checkpoint_map = {asset["fileName"]: asset["assetId"] for asset in model_asset_list} + logged_checkpoint_map = {asset['fileName']: asset['assetId'] for asset in model_asset_list} resource_url = urlparse(opt.weights) checkpoint_filename = resource_url.query @@ -44,22 +44,22 @@ def download_model_checkpoint(opt, experiment): checkpoint_filename = COMET_DEFAULT_CHECKPOINT_FILENAME if asset_id is None: - logger.error(f"COMET ERROR: Checkpoint {checkpoint_filename} not found in the given Experiment") + logger.error(f'COMET ERROR: Checkpoint {checkpoint_filename} not found in the given Experiment') return try: - logger.info(f"COMET INFO: Downloading checkpoint {checkpoint_filename}") + logger.info(f'COMET INFO: Downloading checkpoint {checkpoint_filename}') asset_filename = checkpoint_filename - model_binary = experiment.get_asset(asset_id, return_type="binary", stream=False) - model_download_path = f"{model_dir}/{asset_filename}" - with open(model_download_path, "wb") as f: + model_binary = experiment.get_asset(asset_id, return_type='binary', stream=False) + model_download_path = f'{model_dir}/{asset_filename}' + with open(model_download_path, 'wb') as f: f.write(model_binary) opt.weights = model_download_path except Exception as e: - logger.warning("COMET WARNING: Unable to download checkpoint from Comet") + logger.warning('COMET WARNING: Unable to download checkpoint from Comet') logger.exception(e) @@ -75,9 +75,9 @@ def set_opt_parameters(opt, experiment): resume_string = opt.resume for asset in asset_list: - if asset["fileName"] == "opt.yaml": - asset_id = asset["assetId"] - asset_binary = experiment.get_asset(asset_id, return_type="binary", stream=False) + if asset['fileName'] == 'opt.yaml': + asset_id = asset['assetId'] + asset_binary = experiment.get_asset(asset_id, return_type='binary', stream=False) opt_dict = yaml.safe_load(asset_binary) for key, value in opt_dict.items(): setattr(opt, key, value) @@ -85,11 +85,11 @@ def set_opt_parameters(opt, experiment): # Save hyperparameters to YAML file # Necessary to pass checks in training script - save_dir = f"{opt.project}/{experiment.name}" + save_dir = f'{opt.project}/{experiment.name}' os.makedirs(save_dir, exist_ok=True) - hyp_yaml_path = f"{save_dir}/hyp.yaml" - with open(hyp_yaml_path, "w") as f: + hyp_yaml_path = f'{save_dir}/hyp.yaml' + with open(hyp_yaml_path, 'w') as f: yaml.dump(opt.hyp, f) opt.hyp = hyp_yaml_path @@ -113,7 +113,7 @@ def check_comet_weights(opt): if opt.weights.startswith(COMET_PREFIX): api = comet_ml.API() resource = urlparse(opt.weights) - experiment_path = f"{resource.netloc}{resource.path}" + experiment_path = f'{resource.netloc}{resource.path}' experiment = api.get(experiment_path) download_model_checkpoint(opt, experiment) return True @@ -140,7 +140,7 @@ def check_comet_resume(opt): if opt.resume.startswith(COMET_PREFIX): api = comet_ml.API() resource = urlparse(opt.resume) - experiment_path = f"{resource.netloc}{resource.path}" + experiment_path = f'{resource.netloc}{resource.path}' experiment = api.get(experiment_path) set_opt_parameters(opt, experiment) download_model_checkpoint(opt, experiment) diff --git a/utils/loggers/comet/hpo.py b/utils/loggers/comet/hpo.py index 7dd5c92e8de1..fc49115c1358 100644 --- a/utils/loggers/comet/hpo.py +++ b/utils/loggers/comet/hpo.py @@ -21,7 +21,7 @@ # Project Configuration config = comet_ml.config.get_config() -COMET_PROJECT_NAME = config.get_string(os.getenv("COMET_PROJECT_NAME"), "comet.project_name", default="yolov5") +COMET_PROJECT_NAME = config.get_string(os.getenv('COMET_PROJECT_NAME'), 'comet.project_name', default='yolov5') def get_args(known=False): @@ -68,30 +68,30 @@ def get_args(known=False): parser.add_argument('--artifact_alias', type=str, default='latest', help='W&B: Version of dataset artifact to use') # Comet Arguments - parser.add_argument("--comet_optimizer_config", type=str, help="Comet: Path to a Comet Optimizer Config File.") - parser.add_argument("--comet_optimizer_id", type=str, help="Comet: ID of the Comet Optimizer sweep.") - parser.add_argument("--comet_optimizer_objective", type=str, help="Comet: Set to 'minimize' or 'maximize'.") - parser.add_argument("--comet_optimizer_metric", type=str, help="Comet: Metric to Optimize.") - parser.add_argument("--comet_optimizer_workers", + parser.add_argument('--comet_optimizer_config', type=str, help='Comet: Path to a Comet Optimizer Config File.') + parser.add_argument('--comet_optimizer_id', type=str, help='Comet: ID of the Comet Optimizer sweep.') + parser.add_argument('--comet_optimizer_objective', type=str, help="Comet: Set to 'minimize' or 'maximize'.") + parser.add_argument('--comet_optimizer_metric', type=str, help='Comet: Metric to Optimize.') + parser.add_argument('--comet_optimizer_workers', type=int, default=1, - help="Comet: Number of Parallel Workers to use with the Comet Optimizer.") + help='Comet: Number of Parallel Workers to use with the Comet Optimizer.') return parser.parse_known_args()[0] if known else parser.parse_args() def run(parameters, opt): - hyp_dict = {k: v for k, v in parameters.items() if k not in ["epochs", "batch_size"]} + hyp_dict = {k: v for k, v in parameters.items() if k not in ['epochs', 'batch_size']} opt.save_dir = str(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok or opt.evolve)) - opt.batch_size = parameters.get("batch_size") - opt.epochs = parameters.get("epochs") + opt.batch_size = parameters.get('batch_size') + opt.epochs = parameters.get('epochs') device = select_device(opt.device, batch_size=opt.batch_size) train(hyp_dict, opt, device, callbacks=Callbacks()) -if __name__ == "__main__": +if __name__ == '__main__': opt = get_args(known=True) opt.weights = str(opt.weights) @@ -99,7 +99,7 @@ def run(parameters, opt): opt.data = str(opt.data) opt.project = str(opt.project) - optimizer_id = os.getenv("COMET_OPTIMIZER_ID") + optimizer_id = os.getenv('COMET_OPTIMIZER_ID') if optimizer_id is None: with open(opt.comet_optimizer_config) as f: optimizer_config = json.load(f) @@ -110,9 +110,9 @@ def run(parameters, opt): opt.comet_optimizer_id = optimizer.id status = optimizer.status() - opt.comet_optimizer_objective = status["spec"]["objective"] - opt.comet_optimizer_metric = status["spec"]["metric"] + opt.comet_optimizer_objective = status['spec']['objective'] + opt.comet_optimizer_metric = status['spec']['metric'] - logger.info("COMET INFO: Starting Hyperparameter Sweep") + logger.info('COMET INFO: Starting Hyperparameter Sweep') for parameter in optimizer.get_parameters(): - run(parameter["parameters"], opt) + run(parameter['parameters'], opt) diff --git a/utils/loggers/wandb/wandb_utils.py b/utils/loggers/wandb/wandb_utils.py index 6bc2ec510d0a..c8ab38197381 100644 --- a/utils/loggers/wandb/wandb_utils.py +++ b/utils/loggers/wandb/wandb_utils.py @@ -17,7 +17,7 @@ sys.path.append(str(ROOT)) # add ROOT to PATH RANK = int(os.getenv('RANK', -1)) DEPRECATION_WARNING = f"{colorstr('wandb')}: WARNING ⚠️ wandb is deprecated and will be removed in a future release. " \ - f"See supported integrations at https://github.com/ultralytics/yolov5#integrations." + f'See supported integrations at https://github.com/ultralytics/yolov5#integrations.' try: import wandb @@ -65,7 +65,7 @@ def __init__(self, opt, run_id=None, job_type='Training'): self.data_dict = None if self.wandb: self.wandb_run = wandb.init(config=opt, - resume="allow", + resume='allow', project='YOLOv5' if opt.project == 'runs/train' else Path(opt.project).stem, entity=opt.entity, name=opt.name if opt.name != 'exp' else None, @@ -97,7 +97,7 @@ def setup_training(self, opt): if isinstance(opt.resume, str): model_dir, _ = self.download_model_artifact(opt) if model_dir: - self.weights = Path(model_dir) / "last.pt" + self.weights = Path(model_dir) / 'last.pt' config = self.wandb_run.config opt.weights, opt.save_period, opt.batch_size, opt.bbox_interval, opt.epochs, opt.hyp, opt.imgsz = str( self.weights), config.save_period, config.batch_size, config.bbox_interval, config.epochs, \ @@ -131,7 +131,7 @@ def log_model(self, path, opt, epoch, fitness_score, best_model=False): model_artifact.add_file(str(path / 'last.pt'), name='last.pt') wandb.log_artifact(model_artifact, aliases=['latest', 'last', 'epoch ' + str(self.current_epoch), 'best' if best_model else '']) - LOGGER.info(f"Saving model artifact on epoch {epoch + 1}") + LOGGER.info(f'Saving model artifact on epoch {epoch + 1}') def val_one_image(self, pred, predn, path, names, im): pass @@ -160,7 +160,7 @@ def end_epoch(self): wandb.log(self.log_dict) except BaseException as e: LOGGER.info( - f"An error occurred in wandb logger. The training will proceed without interruption. More info\n{e}" + f'An error occurred in wandb logger. The training will proceed without interruption. More info\n{e}' ) self.wandb_run.finish() self.wandb_run = None diff --git a/utils/metrics.py b/utils/metrics.py index 7fb077774384..95f364c23f34 100644 --- a/utils/metrics.py +++ b/utils/metrics.py @@ -28,7 +28,7 @@ def smooth(y, f=0.05): return np.convolve(yp, np.ones(nf) / nf, mode='valid') # y-smoothed -def ap_per_class(tp, conf, pred_cls, target_cls, plot=False, save_dir='.', names=(), eps=1e-16, prefix=""): +def ap_per_class(tp, conf, pred_cls, target_cls, plot=False, save_dir='.', names=(), eps=1e-16, prefix=''): """ Compute the average precision, given the recall and precision curves. Source: https://github.com/rafaelpadilla/Object-Detection-Metrics. # Arguments @@ -194,14 +194,14 @@ def plot(self, normalize=True, save_dir='', names=()): nc, nn = self.nc, len(names) # number of classes, names sn.set(font_scale=1.0 if nc < 50 else 0.8) # for label size labels = (0 < nn < 99) and (nn == nc) # apply names to ticklabels - ticklabels = (names + ['background']) if labels else "auto" + ticklabels = (names + ['background']) if labels else 'auto' with warnings.catch_warnings(): warnings.simplefilter('ignore') # suppress empty matrix RuntimeWarning: All-NaN slice encountered sn.heatmap(array, ax=ax, annot=nc < 30, annot_kws={ - "size": 8}, + 'size': 8}, cmap='Blues', fmt='.2f', square=True, @@ -331,7 +331,7 @@ def plot_pr_curve(px, py, ap, save_dir=Path('pr_curve.png'), names=()): ax.set_ylabel('Precision') ax.set_xlim(0, 1) ax.set_ylim(0, 1) - ax.legend(bbox_to_anchor=(1.04, 1), loc="upper left") + ax.legend(bbox_to_anchor=(1.04, 1), loc='upper left') ax.set_title('Precision-Recall Curve') fig.savefig(save_dir, dpi=250) plt.close(fig) @@ -354,7 +354,7 @@ def plot_mc_curve(px, py, save_dir=Path('mc_curve.png'), names=(), xlabel='Confi ax.set_ylabel(ylabel) ax.set_xlim(0, 1) ax.set_ylim(0, 1) - ax.legend(bbox_to_anchor=(1.04, 1), loc="upper left") + ax.legend(bbox_to_anchor=(1.04, 1), loc='upper left') ax.set_title(f'{ylabel}-Confidence Curve') fig.savefig(save_dir, dpi=250) plt.close(fig) diff --git a/utils/plots.py b/utils/plots.py index f84aed9fb5c7..24c618c80b59 100644 --- a/utils/plots.py +++ b/utils/plots.py @@ -450,7 +450,7 @@ def imshow_cls(im, labels=None, pred=None, names=None, nmax=25, verbose=False, f plt.savefig(f, dpi=300, bbox_inches='tight') plt.close() if verbose: - LOGGER.info(f"Saving {f}") + LOGGER.info(f'Saving {f}') if labels is not None: LOGGER.info('True: ' + ' '.join(f'{names[i]:3s}' for i in labels[:nmax])) if pred is not None: diff --git a/utils/segment/dataloaders.py b/utils/segment/dataloaders.py index d66b36115e3f..097a5d5cb058 100644 --- a/utils/segment/dataloaders.py +++ b/utils/segment/dataloaders.py @@ -95,7 +95,7 @@ def __init__( stride=32, pad=0, min_items=0, - prefix="", + prefix='', downsample_ratio=1, overlap=False, ): @@ -116,7 +116,7 @@ def __getitem__(self, index): shapes = None # MixUp augmentation - if random.random() < hyp["mixup"]: + if random.random() < hyp['mixup']: img, labels, segments = mixup(img, labels, segments, *self.load_mosaic(random.randint(0, self.n - 1))) else: @@ -147,11 +147,11 @@ def __getitem__(self, index): img, labels, segments = random_perspective(img, labels, segments=segments, - degrees=hyp["degrees"], - translate=hyp["translate"], - scale=hyp["scale"], - shear=hyp["shear"], - perspective=hyp["perspective"]) + degrees=hyp['degrees'], + translate=hyp['translate'], + scale=hyp['scale'], + shear=hyp['shear'], + perspective=hyp['perspective']) nl = len(labels) # number of labels if nl: @@ -177,17 +177,17 @@ def __getitem__(self, index): nl = len(labels) # update after albumentations # HSV color-space - augment_hsv(img, hgain=hyp["hsv_h"], sgain=hyp["hsv_s"], vgain=hyp["hsv_v"]) + augment_hsv(img, hgain=hyp['hsv_h'], sgain=hyp['hsv_s'], vgain=hyp['hsv_v']) # Flip up-down - if random.random() < hyp["flipud"]: + if random.random() < hyp['flipud']: img = np.flipud(img) if nl: labels[:, 2] = 1 - labels[:, 2] masks = torch.flip(masks, dims=[1]) # Flip left-right - if random.random() < hyp["fliplr"]: + if random.random() < hyp['fliplr']: img = np.fliplr(img) if nl: labels[:, 1] = 1 - labels[:, 1] @@ -251,15 +251,15 @@ def load_mosaic(self, index): # img4, labels4 = replicate(img4, labels4) # replicate # Augment - img4, labels4, segments4 = copy_paste(img4, labels4, segments4, p=self.hyp["copy_paste"]) + img4, labels4, segments4 = copy_paste(img4, labels4, segments4, p=self.hyp['copy_paste']) img4, labels4, segments4 = random_perspective(img4, labels4, segments4, - degrees=self.hyp["degrees"], - translate=self.hyp["translate"], - scale=self.hyp["scale"], - shear=self.hyp["shear"], - perspective=self.hyp["perspective"], + degrees=self.hyp['degrees'], + translate=self.hyp['translate'], + scale=self.hyp['scale'], + shear=self.hyp['shear'], + perspective=self.hyp['perspective'], border=self.mosaic_border) # border to remove return img4, labels4, segments4 diff --git a/utils/segment/loss.py b/utils/segment/loss.py index b45b2c27e0a0..2a8a4c680f6f 100644 --- a/utils/segment/loss.py +++ b/utils/segment/loss.py @@ -83,7 +83,7 @@ def __call__(self, preds, targets, masks): # predictions, targets, model # Mask regression if tuple(masks.shape[-2:]) != (mask_h, mask_w): # downsample - masks = F.interpolate(masks[None], (mask_h, mask_w), mode="nearest")[0] + masks = F.interpolate(masks[None], (mask_h, mask_w), mode='nearest')[0] marea = xywhn[i][:, 2:].prod(1) # mask width, height normalized mxyxy = xywh2xyxy(xywhn[i] * torch.tensor([mask_w, mask_h, mask_w, mask_h], device=self.device)) for bi in b.unique(): @@ -101,10 +101,10 @@ def __call__(self, preds, targets, masks): # predictions, targets, model if self.autobalance: self.balance = [x / self.balance[self.ssi] for x in self.balance] - lbox *= self.hyp["box"] - lobj *= self.hyp["obj"] - lcls *= self.hyp["cls"] - lseg *= self.hyp["box"] / bs + lbox *= self.hyp['box'] + lobj *= self.hyp['obj'] + lcls *= self.hyp['cls'] + lseg *= self.hyp['box'] / bs loss = lbox + lobj + lcls + lseg return loss * bs, torch.cat((lbox, lseg, lobj, lcls)).detach() @@ -112,7 +112,7 @@ def __call__(self, preds, targets, masks): # predictions, targets, model def single_mask_loss(self, gt_mask, pred, proto, xyxy, area): # Mask loss for one image pred_mask = (pred @ proto.view(self.nm, -1)).view(-1, *proto.shape[1:]) # (n,32) @ (32,80,80) -> (n,80,80) - loss = F.binary_cross_entropy_with_logits(pred_mask, gt_mask, reduction="none") + loss = F.binary_cross_entropy_with_logits(pred_mask, gt_mask, reduction='none') return (crop_mask(loss, xyxy).mean(dim=(1, 2)) / area).mean() def build_targets(self, p, targets): diff --git a/utils/segment/metrics.py b/utils/segment/metrics.py index b09ce23fb9e3..c9f137e38ead 100644 --- a/utils/segment/metrics.py +++ b/utils/segment/metrics.py @@ -21,7 +21,7 @@ def ap_per_class_box_and_mask( pred_cls, target_cls, plot=False, - save_dir=".", + save_dir='.', names=(), ): """ @@ -37,7 +37,7 @@ def ap_per_class_box_and_mask( plot=plot, save_dir=save_dir, names=names, - prefix="Box")[2:] + prefix='Box')[2:] results_masks = ap_per_class(tp_m, conf, pred_cls, @@ -45,21 +45,21 @@ def ap_per_class_box_and_mask( plot=plot, save_dir=save_dir, names=names, - prefix="Mask")[2:] + prefix='Mask')[2:] results = { - "boxes": { - "p": results_boxes[0], - "r": results_boxes[1], - "ap": results_boxes[3], - "f1": results_boxes[2], - "ap_class": results_boxes[4]}, - "masks": { - "p": results_masks[0], - "r": results_masks[1], - "ap": results_masks[3], - "f1": results_masks[2], - "ap_class": results_masks[4]}} + 'boxes': { + 'p': results_boxes[0], + 'r': results_boxes[1], + 'ap': results_boxes[3], + 'f1': results_boxes[2], + 'ap_class': results_boxes[4]}, + 'masks': { + 'p': results_masks[0], + 'r': results_masks[1], + 'ap': results_masks[3], + 'f1': results_masks[2], + 'ap_class': results_masks[4]}} return results @@ -159,8 +159,8 @@ def update(self, results): Args: results: Dict{'boxes': Dict{}, 'masks': Dict{}} """ - self.metric_box.update(list(results["boxes"].values())) - self.metric_mask.update(list(results["masks"].values())) + self.metric_box.update(list(results['boxes'].values())) + self.metric_mask.update(list(results['masks'].values())) def mean_results(self): return self.metric_box.mean_results() + self.metric_mask.mean_results() @@ -178,33 +178,33 @@ def ap_class_index(self): KEYS = [ - "train/box_loss", - "train/seg_loss", # train loss - "train/obj_loss", - "train/cls_loss", - "metrics/precision(B)", - "metrics/recall(B)", - "metrics/mAP_0.5(B)", - "metrics/mAP_0.5:0.95(B)", # metrics - "metrics/precision(M)", - "metrics/recall(M)", - "metrics/mAP_0.5(M)", - "metrics/mAP_0.5:0.95(M)", # metrics - "val/box_loss", - "val/seg_loss", # val loss - "val/obj_loss", - "val/cls_loss", - "x/lr0", - "x/lr1", - "x/lr2",] + 'train/box_loss', + 'train/seg_loss', # train loss + 'train/obj_loss', + 'train/cls_loss', + 'metrics/precision(B)', + 'metrics/recall(B)', + 'metrics/mAP_0.5(B)', + 'metrics/mAP_0.5:0.95(B)', # metrics + 'metrics/precision(M)', + 'metrics/recall(M)', + 'metrics/mAP_0.5(M)', + 'metrics/mAP_0.5:0.95(M)', # metrics + 'val/box_loss', + 'val/seg_loss', # val loss + 'val/obj_loss', + 'val/cls_loss', + 'x/lr0', + 'x/lr1', + 'x/lr2',] BEST_KEYS = [ - "best/epoch", - "best/precision(B)", - "best/recall(B)", - "best/mAP_0.5(B)", - "best/mAP_0.5:0.95(B)", - "best/precision(M)", - "best/recall(M)", - "best/mAP_0.5(M)", - "best/mAP_0.5:0.95(M)",] + 'best/epoch', + 'best/precision(B)', + 'best/recall(B)', + 'best/mAP_0.5(B)', + 'best/mAP_0.5:0.95(B)', + 'best/precision(M)', + 'best/recall(M)', + 'best/mAP_0.5(M)', + 'best/mAP_0.5:0.95(M)',] diff --git a/utils/segment/plots.py b/utils/segment/plots.py index 9b90900b3772..3ba097624fcd 100644 --- a/utils/segment/plots.py +++ b/utils/segment/plots.py @@ -108,13 +108,13 @@ def plot_images_and_masks(images, targets, masks, paths=None, fname='images.jpg' annotator.im.save(fname) # save -def plot_results_with_masks(file="path/to/results.csv", dir="", best=True): +def plot_results_with_masks(file='path/to/results.csv', dir='', best=True): # Plot training results.csv. Usage: from utils.plots import *; plot_results('path/to/results.csv') save_dir = Path(file).parent if file else Path(dir) fig, ax = plt.subplots(2, 8, figsize=(18, 6), tight_layout=True) ax = ax.ravel() - files = list(save_dir.glob("results*.csv")) - assert len(files), f"No results.csv files found in {save_dir.resolve()}, nothing to plot." + files = list(save_dir.glob('results*.csv')) + assert len(files), f'No results.csv files found in {save_dir.resolve()}, nothing to plot.' for f in files: try: data = pd.read_csv(f) @@ -125,19 +125,19 @@ def plot_results_with_masks(file="path/to/results.csv", dir="", best=True): for i, j in enumerate([1, 2, 3, 4, 5, 6, 9, 10, 13, 14, 15, 16, 7, 8, 11, 12]): y = data.values[:, j] # y[y == 0] = np.nan # don't show zero values - ax[i].plot(x, y, marker=".", label=f.stem, linewidth=2, markersize=2) + ax[i].plot(x, y, marker='.', label=f.stem, linewidth=2, markersize=2) if best: # best - ax[i].scatter(index, y[index], color="r", label=f"best:{index}", marker="*", linewidth=3) - ax[i].set_title(s[j] + f"\n{round(y[index], 5)}") + ax[i].scatter(index, y[index], color='r', label=f'best:{index}', marker='*', linewidth=3) + ax[i].set_title(s[j] + f'\n{round(y[index], 5)}') else: # last - ax[i].scatter(x[-1], y[-1], color="r", label="last", marker="*", linewidth=3) - ax[i].set_title(s[j] + f"\n{round(y[-1], 5)}") + ax[i].scatter(x[-1], y[-1], color='r', label='last', marker='*', linewidth=3) + ax[i].set_title(s[j] + f'\n{round(y[-1], 5)}') # if j in [8, 9, 10]: # share train and val loss y axes # ax[i].get_shared_y_axes().join(ax[i], ax[i - 5]) except Exception as e: - print(f"Warning: Plotting error for {f}: {e}") + print(f'Warning: Plotting error for {f}: {e}') ax[1].legend() - fig.savefig(save_dir / "results.png", dpi=200) + fig.savefig(save_dir / 'results.png', dpi=200) plt.close() diff --git a/utils/torch_utils.py b/utils/torch_utils.py index 77549b005ceb..5b67b3fa7a06 100644 --- a/utils/torch_utils.py +++ b/utils/torch_utils.py @@ -291,7 +291,7 @@ def model_info(model, verbose=False, imgsz=640): fs = '' name = Path(model.yaml_file).stem.replace('yolov5', 'YOLOv5') if hasattr(model, 'yaml_file') else 'Model' - LOGGER.info(f"{name} summary: {len(list(model.modules()))} layers, {n_p} parameters, {n_g} gradients{fs}") + LOGGER.info(f'{name} summary: {len(list(model.modules()))} layers, {n_p} parameters, {n_g} gradients{fs}') def scale_img(img, ratio=1.0, same_shape=False, gs=32): # img(16,3,256,416) @@ -342,7 +342,7 @@ def smart_optimizer(model, name='Adam', lr=0.001, momentum=0.9, decay=1e-5): optimizer.add_param_group({'params': g[0], 'weight_decay': decay}) # add g0 with weight_decay optimizer.add_param_group({'params': g[1], 'weight_decay': 0.0}) # add g1 (BatchNorm2d weights) LOGGER.info(f"{colorstr('optimizer:')} {type(optimizer).__name__}(lr={lr}) with parameter groups " - f"{len(g[1])} weight(decay=0.0), {len(g[0])} weight(decay={decay}), {len(g[2])} bias") + f'{len(g[1])} weight(decay=0.0), {len(g[0])} weight(decay={decay}), {len(g[2])} bias') return optimizer diff --git a/utils/triton.py b/utils/triton.py index a94ef0ad197d..25928021477e 100644 --- a/utils/triton.py +++ b/utils/triton.py @@ -21,7 +21,7 @@ def __init__(self, url: str): """ parsed_url = urlparse(url) - if parsed_url.scheme == "grpc": + if parsed_url.scheme == 'grpc': from tritonclient.grpc import InferenceServerClient, InferInput self.client = InferenceServerClient(parsed_url.netloc) # Triton GRPC client @@ -31,7 +31,7 @@ def __init__(self, url: str): def create_input_placeholders() -> typing.List[InferInput]: return [ - InferInput(i['name'], [int(s) for s in i["shape"]], i['datatype']) for i in self.metadata['inputs']] + InferInput(i['name'], [int(s) for s in i['shape']], i['datatype']) for i in self.metadata['inputs']] else: from tritonclient.http import InferenceServerClient, InferInput @@ -43,14 +43,14 @@ def create_input_placeholders() -> typing.List[InferInput]: def create_input_placeholders() -> typing.List[InferInput]: return [ - InferInput(i['name'], [int(s) for s in i["shape"]], i['datatype']) for i in self.metadata['inputs']] + InferInput(i['name'], [int(s) for s in i['shape']], i['datatype']) for i in self.metadata['inputs']] self._create_input_placeholders_fn = create_input_placeholders @property def runtime(self): """Returns the model runtime""" - return self.metadata.get("backend", self.metadata.get("platform")) + return self.metadata.get('backend', self.metadata.get('platform')) def __call__(self, *args, **kwargs) -> typing.Union[torch.Tensor, typing.Tuple[torch.Tensor, ...]]: """ Invokes the model. Parameters can be provided via args or kwargs. @@ -68,14 +68,14 @@ def __call__(self, *args, **kwargs) -> typing.Union[torch.Tensor, typing.Tuple[t def _create_inputs(self, *args, **kwargs): args_len, kwargs_len = len(args), len(kwargs) if not args_len and not kwargs_len: - raise RuntimeError("No inputs provided.") + raise RuntimeError('No inputs provided.') if args_len and kwargs_len: - raise RuntimeError("Cannot specify args and kwargs at the same time") + raise RuntimeError('Cannot specify args and kwargs at the same time') placeholders = self._create_input_placeholders_fn() if args_len: if args_len != len(placeholders): - raise RuntimeError(f"Expected {len(placeholders)} inputs, got {args_len}.") + raise RuntimeError(f'Expected {len(placeholders)} inputs, got {args_len}.') for input, value in zip(placeholders, args): input.set_data_from_numpy(value.cpu().numpy()) else: diff --git a/val.py b/val.py index 7829afb68b79..d4073b42fe78 100644 --- a/val.py +++ b/val.py @@ -304,7 +304,7 @@ def run( if save_json and len(jdict): w = Path(weights[0] if isinstance(weights, list) else weights).stem if weights is not None else '' # weights anno_json = str(Path('../datasets/coco/annotations/instances_val2017.json')) # annotations - pred_json = str(save_dir / f"{w}_predictions.json") # predictions + pred_json = str(save_dir / f'{w}_predictions.json') # predictions LOGGER.info(f'\nEvaluating pycocotools mAP... saving {pred_json}...') with open(pred_json, 'w') as f: json.dump(jdict, f) @@ -404,6 +404,6 @@ def main(opt): raise NotImplementedError(f'--task {opt.task} not in ("train", "val", "test", "speed", "study")') -if __name__ == "__main__": +if __name__ == '__main__': opt = parse_opt() main(opt) From 4db6757ef9d43f49a780ff29deb06b28e96fbe84 Mon Sep 17 00:00:00 2001 From: imyhxy Date: Mon, 20 Feb 2023 18:23:13 +0800 Subject: [PATCH 1686/1976] Fixed access 'names' from a DistributedDataParallel module (#11023) --- classify/train.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/classify/train.py b/classify/train.py index b752a3c1fe32..ae2363ccf056 100644 --- a/classify/train.py +++ b/classify/train.py @@ -44,7 +44,7 @@ check_requirements, colorstr, download, increment_path, init_seeds, print_args, yaml_save) from utils.loggers import GenericLogger from utils.plots import imshow_cls -from utils.torch_utils import (ModelEMA, model_info, reshape_classifier_output, select_device, smart_DDP, +from utils.torch_utils import (ModelEMA, de_parallel, model_info, reshape_classifier_output, select_device, smart_DDP, smart_optimizer, smartCrossEntropyLoss, torch_distributed_zero_first) LOCAL_RANK = int(os.getenv('LOCAL_RANK', -1)) # https://pytorch.org/docs/stable/elastic/run.html @@ -260,7 +260,7 @@ def train(opt, device): # Plot examples images, labels = (x[:25] for x in next(iter(testloader))) # first 25 images and labels pred = torch.max(ema.ema(images.to(device)), 1)[1] - file = imshow_cls(images, labels, pred, model.names, verbose=False, f=save_dir / 'test_images.jpg') + file = imshow_cls(images, labels, pred, de_parallel(model).names, verbose=False, f=save_dir / 'test_images.jpg') # Log results meta = {'epochs': epochs, 'top1_acc': best_fitness, 'date': datetime.now().isoformat()} From feca55719bab7dad14284f77a096da387094dbde Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 24 Feb 2023 20:09:02 -0800 Subject: [PATCH 1687/1976] Update "YOLOv5 is out of date" msg (#11061) --- utils/general.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/general.py b/utils/general.py index b6efe6bb8732..b7e38b3a1a50 100644 --- a/utils/general.py +++ b/utils/general.py @@ -338,7 +338,7 @@ def check_git_status(repo='ultralytics/yolov5', branch='master'): n = int(check_output(f'git rev-list {local_branch}..{remote}/{branch} --count', shell=True)) # commits behind if n > 0: pull = 'git pull' if remote == 'origin' else f'git pull {remote} {branch}' - s += f"⚠️ YOLOv5 is out of date by {n} commit{'s' * (n > 1)}. Use `{pull}` or `git clone {url}` to update." + s += f"⚠️ YOLOv5 is out of date by {n} commit{'s' * (n > 1)}. Use '{pull}' or 'git clone {url}' to update." else: s += f'up to date with {url} ✅' LOGGER.info(s) From 6559d8fcebd1c6abe4f5e100cff82d8fdda3f232 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 24 Feb 2023 21:34:04 -0800 Subject: [PATCH 1688/1976] Update ci-testing.yml (#11062) * Update ci-testing.yml Signed-off-by: Glenn Jocher * Update ci-testing.yml Signed-off-by: Glenn Jocher --------- Signed-off-by: Glenn Jocher --- .github/workflows/ci-testing.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/ci-testing.yml b/.github/workflows/ci-testing.yml index f9c62d623042..83438094b6f6 100644 --- a/.github/workflows/ci-testing.yml +++ b/.github/workflows/ci-testing.yml @@ -79,10 +79,10 @@ jobs: - uses: actions/setup-python@v4 with: python-version: ${{ matrix.python-version }} - - name: Get cache dir - # https://github.com/actions/cache/blob/master/examples.md#multiple-oss-in-a-workflow + - name: Get cache dir # https://github.com/actions/cache/blob/master/examples.md#multiple-oss-in-a-workflow id: pip-cache - run: echo "::set-output name=dir::$(pip cache dir)" + run: echo "dir=$(pip cache dir)" >> $GITHUB_OUTPUT + shell: bash # for Windows compatibility - name: Cache pip uses: actions/cache@v3 with: From b8731d855fce77120bf6401f689fb0accd66c2a6 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 25 Feb 2023 14:16:03 -0800 Subject: [PATCH 1689/1976] Update requirements.txt (#11065) Signed-off-by: Glenn Jocher --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index eee15ddf93c4..3e6e39d8cc07 100644 --- a/requirements.txt +++ b/requirements.txt @@ -40,7 +40,7 @@ seaborn>=0.11.0 # Deploy ---------------------------------------------------------------------- setuptools>=65.5.1 # Snyk vulnerability fix -wheel>=0.38.0 # Snyk vulnerability fix +wheel>=0.38.4 # Snyk vulnerability fix # tritonclient[all]~=2.24.0 # Extras ---------------------------------------------------------------------- From b005788f36fd329a840879fcfb5975bc5902ada8 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 25 Feb 2023 15:15:50 -0800 Subject: [PATCH 1690/1976] Update requirements.txt (#11067) Signed-off-by: Glenn Jocher --- requirements.txt | 1 - 1 file changed, 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 3e6e39d8cc07..7aa4732d6d78 100644 --- a/requirements.txt +++ b/requirements.txt @@ -40,7 +40,6 @@ seaborn>=0.11.0 # Deploy ---------------------------------------------------------------------- setuptools>=65.5.1 # Snyk vulnerability fix -wheel>=0.38.4 # Snyk vulnerability fix # tritonclient[all]~=2.24.0 # Extras ---------------------------------------------------------------------- From 7dee52f94d28e09142717ffff95ee689982364d1 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 25 Feb 2023 15:58:59 -0800 Subject: [PATCH 1691/1976] Update requirements.txt (#11068) Signed-off-by: Glenn Jocher --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 7aa4732d6d78..d67c44c9d812 100644 --- a/requirements.txt +++ b/requirements.txt @@ -2,7 +2,7 @@ # Usage: pip install -r requirements.txt # Base ------------------------------------------------------------------------ -gitpython +gitpython>=3.1.30 ipython # interactive notebook matplotlib>=3.2.2 numpy>=1.18.5 From 3c0a6e664bc3847ab9cca3df66195de6acfeb012 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 25 Feb 2023 16:15:07 -0800 Subject: [PATCH 1692/1976] Security fixes for IPython (#11069) Signed-off-by: Glenn Jocher --- models/common.py | 9 ++++++--- requirements.txt | 2 +- utils/__init__.py | 6 ++++-- utils/general.py | 19 +++++++++++++------ 4 files changed, 24 insertions(+), 12 deletions(-) diff --git a/models/common.py b/models/common.py index f416ddf25eb8..aa8ae674eb47 100644 --- a/models/common.py +++ b/models/common.py @@ -21,14 +21,13 @@ import requests import torch import torch.nn as nn -from IPython.display import display from PIL import Image from torch.cuda import amp from utils import TryExcept from utils.dataloaders import exif_transpose, letterbox from utils.general import (LOGGER, ROOT, Profile, check_requirements, check_suffix, check_version, colorstr, - increment_path, is_notebook, make_divisible, non_max_suppression, scale_boxes, xywh2xyxy, + increment_path, is_jupyter, make_divisible, non_max_suppression, scale_boxes, xywh2xyxy, xyxy2xywh, yaml_load) from utils.plots import Annotator, colors, save_one_box from utils.torch_utils import copy_attr, smart_inference_mode @@ -767,7 +766,11 @@ def _run(self, pprint=False, show=False, save=False, crop=False, render=False, l im = Image.fromarray(im.astype(np.uint8)) if isinstance(im, np.ndarray) else im # from np if show: - display(im) if is_notebook() else im.show(self.files[i]) + if is_jupyter(): + from IPython.display import display + display(im) + else: + im.show(self.files[i]) if save: f = self.files[i] im.save(save_dir / f) # save diff --git a/requirements.txt b/requirements.txt index d67c44c9d812..11cb9aaaf99e 100644 --- a/requirements.txt +++ b/requirements.txt @@ -3,7 +3,6 @@ # Base ------------------------------------------------------------------------ gitpython>=3.1.30 -ipython # interactive notebook matplotlib>=3.2.2 numpy>=1.18.5 opencv-python>=4.1.1 @@ -43,6 +42,7 @@ setuptools>=65.5.1 # Snyk vulnerability fix # tritonclient[all]~=2.24.0 # Extras ---------------------------------------------------------------------- +# ipython # interactive notebook # mss # screenshots # albumentations>=1.0.3 # pycocotools>=2.0.6 # COCO mAP diff --git a/utils/__init__.py b/utils/__init__.py index d158c5515a12..5b9fcd517e03 100644 --- a/utils/__init__.py +++ b/utils/__init__.py @@ -60,17 +60,19 @@ def notebook_init(verbose=True): check_font() import psutil - from IPython import display # to display images and clear console output if is_colab(): shutil.rmtree('/content/sample_data', ignore_errors=True) # remove colab /sample_data directory # System info + display = None if verbose: gb = 1 << 30 # bytes to GiB (1024 ** 3) ram = psutil.virtual_memory().total total, used, free = shutil.disk_usage('/') - display.clear_output() + with contextlib.suppress(Exception): # clear display if ipython is installed + from IPython import display + display.clear_output() s = f'({os.cpu_count()} CPUs, {ram / gb:.1f} GB RAM, {(total - free) / gb:.1f}/{total / gb:.1f} GB disk)' else: s = '' diff --git a/utils/general.py b/utils/general.py index b7e38b3a1a50..74620460070e 100644 --- a/utils/general.py +++ b/utils/general.py @@ -29,7 +29,6 @@ from zipfile import ZipFile, is_zipfile import cv2 -import IPython import numpy as np import pandas as pd import pkg_resources as pkg @@ -77,10 +76,18 @@ def is_colab(): return 'google.colab' in sys.modules -def is_notebook(): - # Is environment a Jupyter notebook? Verified on Colab, Jupyterlab, Kaggle, Paperspace - ipython_type = str(type(IPython.get_ipython())) - return 'colab' in ipython_type or 'zmqshell' in ipython_type +def is_jupyter(): + """ + Check if the current script is running inside a Jupyter Notebook. + Verified on Colab, Jupyterlab, Kaggle, Paperspace. + + Returns: + bool: True if running inside a Jupyter Notebook, False otherwise. + """ + with contextlib.suppress(Exception): + from IPython import get_ipython + return get_ipython() is not None + return False def is_kaggle(): @@ -429,7 +436,7 @@ def check_img_size(imgsz, s=32, floor=0): def check_imshow(warn=False): # Check if environment supports image displays try: - assert not is_notebook() + assert not is_jupyter() assert not is_docker() cv2.imshow('test', np.zeros((1, 1, 3))) cv2.waitKey(1) From 5ca8e822c8e75cde1d613dea8bfa49009fdc3618 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 27 Feb 2023 10:55:05 -0800 Subject: [PATCH 1693/1976] Update export.py (#11077) Signed-off-by: Glenn Jocher --- export.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/export.py b/export.py index e8287704866a..e167b2088cb1 100644 --- a/export.py +++ b/export.py @@ -413,7 +413,7 @@ def export_edgetpu(file, prefix=colorstr('Edge TPU:')): cmd = 'edgetpu_compiler --version' help_url = 'https://coral.ai/docs/edgetpu/compiler/' assert platform.system() == 'Linux', f'export only supported on Linux. See {help_url}' - if subprocess.run(f'{cmd} >/dev/null', shell=True).returncode != 0: + if subprocess.run(f'{cmd} > /dev/null 2>&1', shell=True).returncode != 0: LOGGER.info(f'\n{prefix} export requires Edge TPU compiler. Attempting install from {help_url}') sudo = subprocess.run('sudo --version >/dev/null', shell=True).returncode == 0 # sudo installed on system for c in ( From 5c91daeaecaeca709b8b6d13bd571d068fdbd003 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 27 Feb 2023 19:55:23 -0800 Subject: [PATCH 1694/1976] Update ci-testing.yml (#11079) * Update ci-testing.yml Signed-off-by: Glenn Jocher * Update ci-testing.yml Signed-off-by: Glenn Jocher * Update ci-testing.yml Signed-off-by: Glenn Jocher * Update ci-testing.yml Signed-off-by: Glenn Jocher * Update ci-testing.yml Signed-off-by: Glenn Jocher --------- Signed-off-by: Glenn Jocher --- .github/workflows/ci-testing.yml | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/.github/workflows/ci-testing.yml b/.github/workflows/ci-testing.yml index 83438094b6f6..7c74fe6fe652 100644 --- a/.github/workflows/ci-testing.yml +++ b/.github/workflows/ci-testing.yml @@ -25,12 +25,16 @@ jobs: - uses: actions/setup-python@v4 with: python-version: ${{ matrix.python-version }} - #- name: Cache pip - # uses: actions/cache@v3 - # with: - # path: ~/.cache/pip - # key: ${{ runner.os }}-Benchmarks-${{ hashFiles('requirements.txt') }} - # restore-keys: ${{ runner.os }}-Benchmarks- + - name: Get cache dir # https://github.com/actions/cache/blob/master/examples.md#multiple-oss-in-a-workflow + id: pip-cache + run: echo "dir=$(pip cache dir)" >> $GITHUB_OUTPUT + shell: bash # for Windows compatibility + - name: Cache pip + uses: actions/cache@v3 + with: + path: ${{ steps.pip-cache.outputs.dir }} + key: ${{ runner.os }}-${{ matrix.python-version }}-pip-${{ hashFiles('requirements.txt') }} + restore-keys: ${{ runner.os }}-${{ matrix.python-version }}-pip- - name: Install requirements run: | python -m pip install --upgrade pip wheel From 85f6019e5af2641e33139e97415b7bd1dc72d779 Mon Sep 17 00:00:00 2001 From: Iker Lluvia Date: Mon, 6 Mar 2023 22:54:34 +0100 Subject: [PATCH 1695/1976] Rename evolve folder if default project name (#11108) Save logs to 'runs/evolve-seg' if default project name, 'runs/train-seg' Signed-off-by: Iker Lluvia --- segment/train.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/segment/train.py b/segment/train.py index 2e71de131a8d..c6ac2d5e23d2 100644 --- a/segment/train.py +++ b/segment/train.py @@ -530,8 +530,8 @@ def main(opt, callbacks=Callbacks()): check_file(opt.data), check_yaml(opt.cfg), check_yaml(opt.hyp), str(opt.weights), str(opt.project) # checks assert len(opt.cfg) or len(opt.weights), 'either --cfg or --weights must be specified' if opt.evolve: - if opt.project == str(ROOT / 'runs/train'): # if default project name, rename to runs/evolve - opt.project = str(ROOT / 'runs/evolve') + if opt.project == str(ROOT / 'runs/train-seg'): # if default project name, rename to runs/evolve-seg + opt.project = str(ROOT / 'runs/evolve-seg') opt.exist_ok, opt.resume = opt.resume, False # pass resume to exist_ok and disable resume if opt.name == 'cfg': opt.name = Path(opt.cfg).stem # use model.yaml as name From ea05d5cb6c0dc01ef254761f0b140ceab17f9fd3 Mon Sep 17 00:00:00 2001 From: Iker Lluvia Date: Thu, 9 Mar 2023 23:47:53 +0100 Subject: [PATCH 1696/1976] Correct mutation adding the missing parameters (#11109) * Correct mutation adding the missing parameters Correct mutation considering the higher number of segmentation parameters compared to object detection. Fixes #9730 Signed-off-by: Iker Lluvia * Use already defined segmentation keys from segment/metrics.py --------- Signed-off-by: Iker Lluvia Co-authored-by: Glenn Jocher --- segment/train.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/segment/train.py b/segment/train.py index c6ac2d5e23d2..8ed75ba63e7c 100644 --- a/segment/train.py +++ b/segment/train.py @@ -629,7 +629,7 @@ def main(opt, callbacks=Callbacks()): while all(v == 1): # mutate until a change occurs (prevent duplicates) v = (g * (npr.random(ng) < mp) * npr.randn(ng) * npr.random() * s + 1).clip(0.3, 3.0) for i, k in enumerate(hyp.keys()): # plt.hist(v.ravel(), 300) - hyp[k] = float(x[i + 7] * v[i]) # mutate + hyp[k] = float(x[i + 12] * v[i]) # mutate # Constrain to limits for k, v in meta.items(): @@ -641,7 +641,7 @@ def main(opt, callbacks=Callbacks()): results = train(hyp.copy(), opt, device, callbacks) callbacks = Callbacks() # Write mutation results - print_mutation(KEYS, results, hyp.copy(), save_dir, opt.bucket) + print_mutation(KEYS[4:16], results, hyp.copy(), save_dir, opt.bucket) # Plot results plot_evolve(evolve_csv) From 5543b89466d072a9f8f2e31f8257a1ccc7f588e9 Mon Sep 17 00:00:00 2001 From: Sheng Hu Date: Fri, 10 Mar 2023 06:55:02 +0800 Subject: [PATCH 1697/1976] Fix a visualization bug (#11134) Fix a visualization bug reported here: https://github.com/ultralytics/yolov5/issues/11133 Signed-off-by: Sheng Hu Co-authored-by: Glenn Jocher --- utils/segment/plots.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/segment/plots.py b/utils/segment/plots.py index 3ba097624fcd..1b22ec838ac9 100644 --- a/utils/segment/plots.py +++ b/utils/segment/plots.py @@ -54,7 +54,7 @@ def plot_images_and_masks(images, targets, masks, paths=None, fname='images.jpg' x, y = int(w * (i // ns)), int(h * (i % ns)) # block origin annotator.rectangle([x, y, x + w, y + h], None, (255, 255, 255), width=2) # borders if paths: - annotator.text((x + 5, y + 5 + h), text=Path(paths[i]).name[:40], txt_color=(220, 220, 220)) # filenames + annotator.text((x + 5, y + 5), text=Path(paths[i]).name[:40], txt_color=(220, 220, 220)) # filenames if len(targets) > 0: idx = targets[:, 0] == i ti = targets[idx] # image targets From 3e55763d45f9c5f8217e4dad5ba1e6c1f42e3bf8 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 12 Mar 2023 02:10:38 +0100 Subject: [PATCH 1698/1976] Update ci-testing.yml (#11154) Signed-off-by: Glenn Jocher --- .github/workflows/ci-testing.yml | 22 ++-------------------- 1 file changed, 2 insertions(+), 20 deletions(-) diff --git a/.github/workflows/ci-testing.yml b/.github/workflows/ci-testing.yml index 7c74fe6fe652..a6f47bb8811c 100644 --- a/.github/workflows/ci-testing.yml +++ b/.github/workflows/ci-testing.yml @@ -25,16 +25,7 @@ jobs: - uses: actions/setup-python@v4 with: python-version: ${{ matrix.python-version }} - - name: Get cache dir # https://github.com/actions/cache/blob/master/examples.md#multiple-oss-in-a-workflow - id: pip-cache - run: echo "dir=$(pip cache dir)" >> $GITHUB_OUTPUT - shell: bash # for Windows compatibility - - name: Cache pip - uses: actions/cache@v3 - with: - path: ${{ steps.pip-cache.outputs.dir }} - key: ${{ runner.os }}-${{ matrix.python-version }}-pip-${{ hashFiles('requirements.txt') }} - restore-keys: ${{ runner.os }}-${{ matrix.python-version }}-pip- + cache: 'pip' # caching pip dependencies - name: Install requirements run: | python -m pip install --upgrade pip wheel @@ -83,16 +74,7 @@ jobs: - uses: actions/setup-python@v4 with: python-version: ${{ matrix.python-version }} - - name: Get cache dir # https://github.com/actions/cache/blob/master/examples.md#multiple-oss-in-a-workflow - id: pip-cache - run: echo "dir=$(pip cache dir)" >> $GITHUB_OUTPUT - shell: bash # for Windows compatibility - - name: Cache pip - uses: actions/cache@v3 - with: - path: ${{ steps.pip-cache.outputs.dir }} - key: ${{ runner.os }}-${{ matrix.python-version }}-pip-${{ hashFiles('requirements.txt') }} - restore-keys: ${{ runner.os }}-${{ matrix.python-version }}-pip- + cache: 'pip' # caching pip dependencies - name: Install requirements run: | python -m pip install --upgrade pip wheel From ef7039ea1806669c4cd4e8eb5abd3182a66883f9 Mon Sep 17 00:00:00 2001 From: Jonas Heinle Date: Thu, 23 Mar 2023 11:28:31 +0100 Subject: [PATCH 1699/1976] Error in tensor shape of docstring (#11206) Corrected the tensor shape in the doc string. The incoming masks are stacked in dim=0 therefore the doc is wrong Signed-off-by: Jonas Heinle --- utils/segment/general.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/segment/general.py b/utils/segment/general.py index 9da894538665..f1b2f1dd120f 100644 --- a/utils/segment/general.py +++ b/utils/segment/general.py @@ -10,7 +10,7 @@ def crop_mask(masks, boxes): Vectorized by Chong (thanks Chong). Args: - - masks should be a size [h, w, n] tensor of masks + - masks should be a size [n, h, w] tensor of masks - boxes should be a size [n, 4] tensor of bbox coords in relative point form """ From 78a90c9661a05e8e1b7cc52a0989e4895fd96981 Mon Sep 17 00:00:00 2001 From: Sheng Hu Date: Thu, 23 Mar 2023 18:29:44 +0800 Subject: [PATCH 1700/1976] Remove duplicate assignment code (#11178) Signed-off-by: Sheng Hu --- utils/segment/loss.py | 1 - 1 file changed, 1 deletion(-) diff --git a/utils/segment/loss.py b/utils/segment/loss.py index 2a8a4c680f6f..caeff3cad586 100644 --- a/utils/segment/loss.py +++ b/utils/segment/loss.py @@ -16,7 +16,6 @@ def __init__(self, model, autobalance=False, overlap=False): self.overlap = overlap device = next(model.parameters()).device # get model device h = model.hyp # hyperparameters - self.device = device # Define criteria BCEcls = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h['cls_pw']], device=device)) From d223460f3a4b4151437b15ac83990cea4b0f42e2 Mon Sep 17 00:00:00 2001 From: imyhxy Date: Fri, 24 Mar 2023 03:27:46 +0800 Subject: [PATCH 1701/1976] Fixed creation of empty directories in path was entered mistaken (#11174) FFixed creation of empty directories in path was entered mistaken Co-authored-by: Glenn Jocher --- utils/downloads.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/downloads.py b/utils/downloads.py index 643b529fba3b..88f523742b5b 100644 --- a/utils/downloads.py +++ b/utils/downloads.py @@ -118,8 +118,8 @@ def github_assets(repository, version='latest'): except Exception: tag = release - file.parent.mkdir(parents=True, exist_ok=True) # make parent dir (if required) if name in assets: + file.parent.mkdir(parents=True, exist_ok=True) # make parent dir (if required) safe_download(file, url=f'https://github.com/{repo}/releases/download/{tag}/{name}', min_bytes=1E5, From 6dd17516c8610fca8a75b8c003866c1bbe921daa Mon Sep 17 00:00:00 2001 From: Eljas Hyyrynen Date: Thu, 23 Mar 2023 21:29:11 +0200 Subject: [PATCH 1702/1976] dataloaders: fix class filtering for segmentation (#11171) * dataloaders: fix class filtering for segmentation self.segments[i] and segment[j] are lists so they cannot be indexed with booleans self.segments is a tuple so it has to be converted into a list first * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Glenn Jocher --- utils/dataloaders.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/utils/dataloaders.py b/utils/dataloaders.py index 7687a2ba2665..28d5b7974cf8 100644 --- a/utils/dataloaders.py +++ b/utils/dataloaders.py @@ -531,13 +531,14 @@ def __init__(self, # Update labels include_class = [] # filter labels to include only these classes (optional) + self.segments = list(self.segments) include_class_array = np.array(include_class).reshape(1, -1) for i, (label, segment) in enumerate(zip(self.labels, self.segments)): if include_class: j = (label[:, 0:1] == include_class_array).any(1) self.labels[i] = label[j] if segment: - self.segments[i] = segment[j] + self.segments[i] = [segment[idx] for idx, elem in enumerate(j) if elem] if single_cls: # single-class training, merge all classes into 0 self.labels[i][:, 0] = 0 From 81f81d3e97e78bf69c88652b4717edcd037c2f4b Mon Sep 17 00:00:00 2001 From: imyhxy Date: Fri, 24 Mar 2023 03:30:10 +0800 Subject: [PATCH 1703/1976] Fixed randrange warnings in Python>=3.10 (#11161) Co-authored-by: Glenn Jocher --- segment/train.py | 2 +- train.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/segment/train.py b/segment/train.py index 8ed75ba63e7c..de5f7035e6b6 100644 --- a/segment/train.py +++ b/segment/train.py @@ -299,7 +299,7 @@ def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictio # Multi-scale if opt.multi_scale: - sz = random.randrange(imgsz * 0.5, imgsz * 1.5 + gs) // gs * gs # size + sz = random.randrange(int(imgsz * 0.5), int(imgsz * 1.5) + gs) // gs * gs # size sf = sz / max(imgs.shape[2:]) # scale factor if sf != 1: ns = [math.ceil(x * sf / gs) * gs for x in imgs.shape[2:]] # new shape (stretched to gs-multiple) diff --git a/train.py b/train.py index c4e3aac3561a..960f24c3ecc7 100644 --- a/train.py +++ b/train.py @@ -299,7 +299,7 @@ def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictio # Multi-scale if opt.multi_scale: - sz = random.randrange(imgsz * 0.5, imgsz * 1.5 + gs) // gs * gs # size + sz = random.randrange(int(imgsz * 0.5), int(imgsz * 1.5) + gs) // gs * gs # size sf = sz / max(imgs.shape[2:]) # scale factor if sf != 1: ns = [math.ceil(x * sf / gs) * gs for x in imgs.shape[2:]] # new shape (stretched to gs-multiple) From f095258e0cdda729e5d67a84ea4115921b3e7c3d Mon Sep 17 00:00:00 2001 From: Grzegorz K <2612193+grzegorzk@users.noreply.github.com> Date: Thu, 23 Mar 2023 20:34:03 +0100 Subject: [PATCH 1704/1976] Sync signature of cv2.imread and cv2.imwrite (#11209) Co-authored-by: Glenn Jocher --- utils/general.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/utils/general.py b/utils/general.py index 74620460070e..adb924257162 100644 --- a/utils/general.py +++ b/utils/general.py @@ -1119,13 +1119,13 @@ def increment_path(path, exist_ok=False, sep='', mkdir=False): imshow_ = cv2.imshow # copy to avoid recursion errors -def imread(path, flags=cv2.IMREAD_COLOR): - return cv2.imdecode(np.fromfile(path, np.uint8), flags) +def imread(filename, flags=cv2.IMREAD_COLOR): + return cv2.imdecode(np.fromfile(filename, np.uint8), flags) -def imwrite(path, im): +def imwrite(filename, img): try: - cv2.imencode(Path(path).suffix, im)[1].tofile(path) + cv2.imencode(Path(filename).suffix, img)[1].tofile(filename) return True except Exception: return False From 52e2fde1b3af9fbdbb4abb63d0ed40c1f0096a39 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 23 Mar 2023 20:35:13 +0100 Subject: [PATCH 1705/1976] Update social icons: add TikTok, remove Facebook (#11226) Add TikTok, remove Facebook Signed-off-by: Glenn Jocher --- README.md | 47 +++++++++++++++++++++-------------------------- 1 file changed, 21 insertions(+), 26 deletions(-) diff --git a/README.md b/README.md index 16dfd9fca085..7ddf47272962 100644 --- a/README.md +++ b/README.md @@ -23,27 +23,25 @@ YOLOv5 🚀 is the world's most loved vision AI, representing Ultralytics Licensing.
- - - - - - - - - - - - - - - - - - - - -
+ + + + + + + + + + + + + + + + + + +
@@ -477,14 +475,11 @@ For YOLOv5 bug reports and feature requests please visit [GitHub Issues](https:/ - - - - - + + From b96f35ce75effc96f1a20efddd836fa17501b4f5 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 23 Mar 2023 20:39:57 +0100 Subject: [PATCH 1706/1976] Update README.zh-CN.md social icons, add TikTok and remove Facebook (#11227) Update README.zh-CN.md Signed-off-by: Glenn Jocher --- README.zh-CN.md | 46 ++++++++++++++++++++-------------------------- 1 file changed, 20 insertions(+), 26 deletions(-) diff --git a/README.zh-CN.md b/README.zh-CN.md index 800a670cfb4f..30d5ece9728e 100644 --- a/README.zh-CN.md +++ b/README.zh-CN.md @@ -22,27 +22,24 @@ YOLOv5 🚀 是世界上最受欢迎的视觉 AI,代表Ultralytics 许可.
- - - - - - - - - - - - - - - - - - - - -
+ + + + + + + + + + + + + + + + + + ##
YOLOv8 🚀 NEW
@@ -472,14 +469,11 @@ YOLOv5 在两种不同的 License 下可用: - - - - - + + From b54fd0ac28b921756d8eaa66cdd12f1ba55833df Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 26 Mar 2023 20:27:43 +0200 Subject: [PATCH 1707/1976] FROM pytorch/pytorch:2.0.0-cuda11.7-cudnn8-runtime (#11246) Signed-off-by: Glenn Jocher --- utils/docker/Dockerfile | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/utils/docker/Dockerfile b/utils/docker/Dockerfile index b5d2af9fb08e..811ad4a6c9cb 100644 --- a/utils/docker/Dockerfile +++ b/utils/docker/Dockerfile @@ -2,9 +2,8 @@ # Builds ultralytics/yolov5:latest image on DockerHub https://hub.docker.com/r/ultralytics/yolov5 # Image is CUDA-optimized for YOLOv5 single/multi-GPU training and inference -# Start FROM NVIDIA PyTorch image https://ngc.nvidia.com/catalog/containers/nvidia:pytorch -# FROM docker.io/pytorch/pytorch:latest -FROM pytorch/pytorch:latest +# Start FROM PyTorch image https://hub.docker.com/r/pytorch/pytorch +FROM pytorch/pytorch:2.0.0-cuda11.7-cudnn8-runtime # Downloads to user config dir ADD https://ultralytics.com/assets/Arial.ttf https://ultralytics.com/assets/Arial.Unicode.ttf /root/.config/Ultralytics/ From 789551ffd1ff4c43f53129454e39e5a35d1ec905 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 27 Mar 2023 13:48:32 +0200 Subject: [PATCH 1708/1976] Bump actions/stale from 7 to 8 (#11249) Bumps [actions/stale](https://github.com/actions/stale) from 7 to 8. - [Release notes](https://github.com/actions/stale/releases) - [Changelog](https://github.com/actions/stale/blob/main/CHANGELOG.md) - [Commits](https://github.com/actions/stale/compare/v7...v8) --- updated-dependencies: - dependency-name: actions/stale dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/stale.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml index b21e9c00e6c5..470dc6197b51 100644 --- a/.github/workflows/stale.yml +++ b/.github/workflows/stale.yml @@ -9,7 +9,7 @@ jobs: stale: runs-on: ubuntu-latest steps: - - uses: actions/stale@v7 + - uses: actions/stale@v8 with: repo-token: ${{ secrets.GITHUB_TOKEN }} stale-issue-message: | From 0c8de3fca4a702f8ff5c435e67f378d1fce70243 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 28 Mar 2023 02:38:50 +0200 Subject: [PATCH 1709/1976] Update YOLOv5 tutorials on docs.ultralytics.com (#11254) * Update README.md Signed-off-by: Glenn Jocher * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update README.md Signed-off-by: Glenn Jocher * Update README.zh-CN.md Signed-off-by: Glenn Jocher * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Signed-off-by: Glenn Jocher Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- README.md | 35 +++++++++++++++++------------------ README.zh-CN.md | 32 ++++++++++++++++---------------- 2 files changed, 33 insertions(+), 34 deletions(-) diff --git a/README.md b/README.md index 7ddf47272962..cb1540737a14 100644 --- a/README.md +++ b/README.md @@ -65,7 +65,7 @@ pip install ultralytics ##
Documentation
-See the [YOLOv5 Docs](https://docs.ultralytics.com) for full documentation on training, testing and deployment. See below for quickstart examples. +See the [YOLOv5 Docs](https://docs.ultralytics.com/yolov5) for full documentation on training, testing and deployment. See below for quickstart examples.
Install @@ -153,23 +153,22 @@ python train.py --data coco.yaml --epochs 300 --weights '' --cfg yolov5n.yaml -
Tutorials -- [Train Custom Data](https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data)  🚀 RECOMMENDED -- [Tips for Best Training Results](https://github.com/ultralytics/yolov5/wiki/Tips-for-Best-Training-Results)  ☘️ - RECOMMENDED -- [Multi-GPU Training](https://github.com/ultralytics/yolov5/issues/475) -- [PyTorch Hub](https://github.com/ultralytics/yolov5/issues/36) 🌟 NEW -- [TFLite, ONNX, CoreML, TensorRT Export](https://github.com/ultralytics/yolov5/issues/251) 🚀 -- [NVIDIA Jetson Nano Deployment](https://github.com/ultralytics/yolov5/issues/9627) 🌟 NEW -- [Test-Time Augmentation (TTA)](https://github.com/ultralytics/yolov5/issues/303) -- [Model Ensembling](https://github.com/ultralytics/yolov5/issues/318) -- [Model Pruning/Sparsity](https://github.com/ultralytics/yolov5/issues/304) -- [Hyperparameter Evolution](https://github.com/ultralytics/yolov5/issues/607) -- [Transfer Learning with Frozen Layers](https://github.com/ultralytics/yolov5/issues/1314) -- [Architecture Summary](https://github.com/ultralytics/yolov5/issues/6998) 🌟 NEW -- [Roboflow for Datasets, Labeling, and Active Learning](https://github.com/ultralytics/yolov5/issues/4975)  🌟 NEW -- [ClearML Logging](https://github.com/ultralytics/yolov5/tree/master/utils/loggers/clearml) 🌟 NEW -- [YOLOv5 with Neural Magic's Deepsparse](https://bit.ly/yolov5-neuralmagic) 🌟 NEW -- [Comet Logging](https://github.com/ultralytics/yolov5/tree/master/utils/loggers/comet) 🌟 NEW +- [Train Custom Data](https://docs.ultralytics.com/yolov5/train_custom_data) 🚀 RECOMMENDED +- [Tips for Best Training Results](https://docs.ultralytics.com/yolov5/tips_for_best_training_results) ☘️ RECOMMENDED +- [Multi-GPU Training](https://docs.ultralytics.com/yolov5/multi_gpu_training) +- [PyTorch Hub](https://docs.ultralytics.com/yolov5/pytorch_hub) 🌟 NEW +- [TFLite, ONNX, CoreML, TensorRT Export](https://docs.ultralytics.com/yolov5/export) 🚀 +- [NVIDIA Jetson platform Deployment](https://docs.ultralytics.com/yolov5/jetson_nano) 🌟 NEW +- [Test-Time Augmentation (TTA)](https://docs.ultralytics.com/yolov5/tta) +- [Model Ensembling](https://docs.ultralytics.com/yolov5/ensemble) +- [Model Pruning/Sparsity](https://docs.ultralytics.com/yolov5/pruning_sparsity) +- [Hyperparameter Evolution](https://docs.ultralytics.com/yolov5/hyp_evolution) +- [Transfer Learning with Frozen Layers](https://docs.ultralytics.com/yolov5/transfer_learn_frozen) +- [Architecture Summary](https://docs.ultralytics.com/yolov5/architecture) 🌟 NEW +- [Roboflow for Datasets, Labeling, and Active Learning](https://docs.ultralytics.com/yolov5/roboflow) +- [ClearML Logging](https://docs.ultralytics.com/yolov5/clearml) 🌟 NEW +- [YOLOv5 with Neural Magic's Deepsparse](https://docs.ultralytics.com/yolov5/neural_magic) 🌟 NEW +- [Comet Logging](https://docs.ultralytics.com/yolov5/comet) 🌟 NEW
diff --git a/README.zh-CN.md b/README.zh-CN.md index 30d5ece9728e..9a819598be7e 100644 --- a/README.zh-CN.md +++ b/README.zh-CN.md @@ -147,22 +147,22 @@ python train.py --data coco.yaml --epochs 300 --weights '' --cfg yolov5n.yaml -
教程 -- [训练自定义数据](https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data)🚀 推荐 -- [获得最佳训练结果的技巧](https://github.com/ultralytics/yolov5/wiki/Tips-for-Best-Training-Results)☘️ 推荐 -- [多 GPU 训练](https://github.com/ultralytics/yolov5/issues/475) -- [PyTorch Hub](https://github.com/ultralytics/yolov5/issues/36)🌟 新 -- [TFLite、ONNX、CoreML、TensorRT 导出](https://github.com/ultralytics/yolov5/issues/251)🚀 -- [NVIDIA Jetson Nano 部署](https://github.com/ultralytics/yolov5/issues/9627)🌟 新 -- [测试时数据增强 (TTA)](https://github.com/ultralytics/yolov5/issues/303) -- [模型集成](https://github.com/ultralytics/yolov5/issues/318) -- [模型修剪/稀疏度](https://github.com/ultralytics/yolov5/issues/304) -- [超参数进化](https://github.com/ultralytics/yolov5/issues/607) -- [使用冻结层进行迁移学习](https://github.com/ultralytics/yolov5/issues/1314) -- [架构总结](https://github.com/ultralytics/yolov5/issues/6998)🌟 新 -- [用于数据集、标签和主动学习的 Roboflow](https://github.com/ultralytics/yolov5/issues/4975)🌟 新 -- [ClearML 记录](https://github.com/ultralytics/yolov5/tree/master/utils/loggers/clearml)🌟 新 -- [Deci 平台](https://github.com/ultralytics/yolov5/wiki/Deci-Platform)🌟 新 -- [Comet Logging](https://github.com/ultralytics/yolov5/tree/master/utils/loggers/comet)🌟 新 +- [训练自定义数据](https://docs.ultralytics.com/yolov5/train_custom_data) 🚀 推荐 +- [获得最佳训练结果的技巧](https://docs.ultralytics.com/yolov5/tips_for_best_training_results) ☘️ 推荐 +- [多 GPU 训练](https://docs.ultralytics.com/yolov5/multi_gpu_training) +- [PyTorch Hub](https://docs.ultralytics.com/yolov5/pytorch_hub) 🌟 新 +- [TFLite, ONNX, CoreML, TensorRT 导出](https://docs.ultralytics.com/yolov5/export) 🚀 +- [NVIDIA Jetson 平台部署](https://docs.ultralytics.com/yolov5/jetson_nano) 🌟 新 +- [测试时增强(TTA)](https://docs.ultralytics.com/yolov5/tta) +- [模型集成](https://docs.ultralytics.com/yolov5/ensemble) +- [模型剪枝/稀疏性](https://docs.ultralytics.com/yolov5/pruning_sparsity) +- [超参数进化](https://docs.ultralytics.com/yolov5/hyp_evolution) +- [冻结层的迁移学习](https://docs.ultralytics.com/yolov5/transfer_learn_frozen) +- [架构概述](https://docs.ultralytics.com/yolov5/architecture) 🌟 新 +- [Roboflow 用于数据集、标签和主动学习](https://docs.ultralytics.com/yolov5/roboflow) +- [ClearML 日志记录](https://docs.ultralytics.com/yolov5/clearml) 🌟 新 +- [YOLOv5 与 Neural Magic 的 Deepsparse](https://docs.ultralytics.com/yolov5/neural_magic) 🌟 新 +- [Comet 日志记录](https://docs.ultralytics.com/yolov5/comet) 🌟 新
From a82132c10bdc0463815a83884fdd85267fc58fdb Mon Sep 17 00:00:00 2001 From: Grzegorz K <2612193+grzegorzk@users.noreply.github.com> Date: Thu, 30 Mar 2023 11:39:11 +0200 Subject: [PATCH 1710/1976] Do not monkey-patch cv2 methods for unaware caller (#11210) --- utils/general.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/utils/general.py b/utils/general.py index adb924257162..68c0736f65fa 100644 --- a/utils/general.py +++ b/utils/general.py @@ -1135,6 +1135,7 @@ def imshow(path, im): imshow_(path.encode('unicode_escape').decode(), im) -cv2.imread, cv2.imwrite, cv2.imshow = imread, imwrite, imshow # redefine +if Path(inspect.stack()[0].filename).parent.parent.as_posix() in inspect.stack()[-1].filename: + cv2.imread, cv2.imwrite, cv2.imshow = imread, imwrite, imshow # redefine # Variables ------------------------------------------------------------------------------------------------------------ From cca5e21995679c4fce32d67a69e2ec89fe131c0e Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 1 Apr 2023 13:02:51 +0200 Subject: [PATCH 1711/1976] Update greetings.yml (#11287) * Update greeting * Cleanup README * Created using Colaboratory * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Created using Colaboratory * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .github/workflows/greetings.yml | 2 +- README.md | 2 +- README.zh-CN.md | 2 +- requirements.txt | 1 - tutorial.ipynb | 12 +++--------- 5 files changed, 6 insertions(+), 13 deletions(-) diff --git a/.github/workflows/greetings.yml b/.github/workflows/greetings.yml index 42a2463585a8..a4eca919a5b3 100644 --- a/.github/workflows/greetings.yml +++ b/.github/workflows/greetings.yml @@ -23,7 +23,7 @@ jobs: - ✅ Reduce changes to the absolute **minimum** required for your bug fix or feature addition. _"It is not daily increase but daily decrease, hack away the unessential. The closer to the source, the less wastage there is."_ — Bruce Lee issue-message: | - 👋 Hello @${{ github.actor }}, thank you for your interest in YOLOv5 🚀! Please visit our ⭐️ [Tutorials](https://github.com/ultralytics/yolov5/wiki#tutorials) to get started, where you can find quickstart guides for simple tasks like [Custom Data Training](https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data) all the way to advanced concepts like [Hyperparameter Evolution](https://github.com/ultralytics/yolov5/issues/607). + 👋 Hello @${{ github.actor }}, thank you for your interest in YOLOv5 🚀! Please visit our ⭐️ [Tutorials](https://docs.ultralytics.com/yolov5/#tutorials) to get started, where you can find quickstart guides for simple tasks like [Custom Data Training](https://docs.ultralytics.com/yolov5/train_custom_data/) all the way to advanced concepts like [Hyperparameter Evolution](https://docs.ultralytics.com/yolov5/hyp_evolution/). If this is a 🐛 Bug Report, please provide a **minimum reproducible example** to help us debug it. diff --git a/README.md b/README.md index cb1540737a14..9c991abf0179 100644 --- a/README.md +++ b/README.md @@ -165,7 +165,7 @@ python train.py --data coco.yaml --epochs 300 --weights '' --cfg yolov5n.yaml - - [Hyperparameter Evolution](https://docs.ultralytics.com/yolov5/hyp_evolution) - [Transfer Learning with Frozen Layers](https://docs.ultralytics.com/yolov5/transfer_learn_frozen) - [Architecture Summary](https://docs.ultralytics.com/yolov5/architecture) 🌟 NEW -- [Roboflow for Datasets, Labeling, and Active Learning](https://docs.ultralytics.com/yolov5/roboflow) +- [Roboflow for Datasets](https://docs.ultralytics.com/yolov5/roboflow) - [ClearML Logging](https://docs.ultralytics.com/yolov5/clearml) 🌟 NEW - [YOLOv5 with Neural Magic's Deepsparse](https://docs.ultralytics.com/yolov5/neural_magic) 🌟 NEW - [Comet Logging](https://docs.ultralytics.com/yolov5/comet) 🌟 NEW diff --git a/README.zh-CN.md b/README.zh-CN.md index 9a819598be7e..761e61634dfb 100644 --- a/README.zh-CN.md +++ b/README.zh-CN.md @@ -159,7 +159,7 @@ python train.py --data coco.yaml --epochs 300 --weights '' --cfg yolov5n.yaml - - [超参数进化](https://docs.ultralytics.com/yolov5/hyp_evolution) - [冻结层的迁移学习](https://docs.ultralytics.com/yolov5/transfer_learn_frozen) - [架构概述](https://docs.ultralytics.com/yolov5/architecture) 🌟 新 -- [Roboflow 用于数据集、标签和主动学习](https://docs.ultralytics.com/yolov5/roboflow) +- [Roboflow](https://docs.ultralytics.com/yolov5/roboflow) - [ClearML 日志记录](https://docs.ultralytics.com/yolov5/clearml) 🌟 新 - [YOLOv5 与 Neural Magic 的 Deepsparse](https://docs.ultralytics.com/yolov5/neural_magic) 🌟 新 - [Comet 日志记录](https://docs.ultralytics.com/yolov5/comet) 🌟 新 diff --git a/requirements.txt b/requirements.txt index 11cb9aaaf99e..fc7193604607 100644 --- a/requirements.txt +++ b/requirements.txt @@ -46,5 +46,4 @@ setuptools>=65.5.1 # Snyk vulnerability fix # mss # screenshots # albumentations>=1.0.3 # pycocotools>=2.0.6 # COCO mAP -# roboflow # ultralytics # HUB https://hub.ultralytics.com diff --git a/tutorial.ipynb b/tutorial.ipynb index 32af68b57945..0d1f387cf040 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -632,19 +632,13 @@ "automatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases)\n", "- **[Datasets](https://github.com/ultralytics/yolov5/tree/master/data)** available for autodownload include: [COCO](https://github.com/ultralytics/yolov5/blob/master/data/coco.yaml), [COCO128](https://github.com/ultralytics/yolov5/blob/master/data/coco128.yaml), [VOC](https://github.com/ultralytics/yolov5/blob/master/data/VOC.yaml), [Argoverse](https://github.com/ultralytics/yolov5/blob/master/data/Argoverse.yaml), [VisDrone](https://github.com/ultralytics/yolov5/blob/master/data/VisDrone.yaml), [GlobalWheat](https://github.com/ultralytics/yolov5/blob/master/data/GlobalWheat2020.yaml), [xView](https://github.com/ultralytics/yolov5/blob/master/data/xView.yaml), [Objects365](https://github.com/ultralytics/yolov5/blob/master/data/Objects365.yaml), [SKU-110K](https://github.com/ultralytics/yolov5/blob/master/data/SKU-110K.yaml).\n", "- **Training Results** are saved to `runs/train/` with incrementing run directories, i.e. `runs/train/exp2`, `runs/train/exp3` etc.\n", - "

\n", + "
\n", "\n", "A **Mosaic Dataloader** is used for training which combines 4 images into 1 mosaic.\n", "\n", - "## Train on Custom Data with Roboflow 🌟 NEW\n", - "\n", - "[Roboflow](https://roboflow.com/?ref=ultralytics) enables you to easily **organize, label, and prepare** a high quality dataset with your own custom data. Roboflow also makes it easy to establish an active learning pipeline, collaborate with your team on dataset improvement, and integrate directly into your model building workflow with the `roboflow` pip package.\n", - "\n", - "- Custom Training Example: [https://blog.roboflow.com/how-to-train-yolov5-on-a-custom-dataset/](https://blog.roboflow.com/how-to-train-yolov5-on-a-custom-dataset/?ref=ultralytics)\n", - "- Custom Training Notebook: [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/roboflow-ai/yolov5-custom-training-tutorial/blob/main/yolov5-custom-training.ipynb)\n", - "
\n", + "## Label a dataset on Roboflow (optional)\n", "\n", - "

Label images lightning fast (including with model-assisted labeling)" + "[Roboflow](https://roboflow.com/?ref=ultralytics) enables you to easily **organize, label, and prepare** a high quality dataset with your own custom data. Roboflow also makes it easy to establish an active learning pipeline, collaborate with your team on dataset improvement, and integrate directly into your model building workflow with the `roboflow` pip package." ] }, { From 23c492321290266810e08fa5ee9a23fc9d6a571f Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 4 Apr 2023 13:12:53 +0200 Subject: [PATCH 1712/1976] [pre-commit.ci] pre-commit suggestions (#11293) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/codespell-project/codespell: v2.2.2 → v2.2.4](https://github.com/codespell-project/codespell/compare/v2.2.2...v2.2.4) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index c5162378ab81..750ced690531 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -57,7 +57,7 @@ repos: name: PEP8 - repo: https://github.com/codespell-project/codespell - rev: v2.2.2 + rev: v2.2.4 hooks: - id: codespell args: From 9dd0ad30c0668cc467b4496f45de2a85a901830d Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 8 Apr 2023 18:07:38 +0200 Subject: [PATCH 1713/1976] Update issue YAMLs (#11318) * Update issue YAMLs Signed-off-by: Glenn Jocher * Update config.yml Signed-off-by: Glenn Jocher --------- Signed-off-by: Glenn Jocher --- .github/ISSUE_TEMPLATE/config.yml | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml index 4db7cefb2707..776e2b37cadb 100644 --- a/.github/ISSUE_TEMPLATE/config.yml +++ b/.github/ISSUE_TEMPLATE/config.yml @@ -1,8 +1,11 @@ blank_issues_enabled: true contact_links: + - name: 📄 Docs + url: https://docs.ultralytics.com/yolov5 + about: Full Ultralytics YOLOv5 Documentation - name: 💬 Forum url: https://community.ultralytics.com/ about: Ask on Ultralytics Community Forum - - name: Stack Overflow - url: https://stackoverflow.com/search?q=YOLOv5 - about: Ask on Stack Overflow with 'YOLOv5' tag + - name: 🎧 Discord + url: https://discord.gg/n6cFeSPZdD + about: Ask on Ultralytics Discord From 71244aed2ccc57646f3c944cf6a95f20a5a03088 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 9 Apr 2023 13:31:57 +0200 Subject: [PATCH 1714/1976] Update config.yml (#11321) Signed-off-by: Glenn Jocher --- .github/ISSUE_TEMPLATE/config.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml index 776e2b37cadb..743feb957ff1 100644 --- a/.github/ISSUE_TEMPLATE/config.yml +++ b/.github/ISSUE_TEMPLATE/config.yml @@ -2,7 +2,7 @@ blank_issues_enabled: true contact_links: - name: 📄 Docs url: https://docs.ultralytics.com/yolov5 - about: Full Ultralytics YOLOv5 Documentation + about: View Ultralytics YOLOv5 Docs - name: 💬 Forum url: https://community.ultralytics.com/ about: Ask on Ultralytics Community Forum From 20a7368373b592d9832a10f79a7a527d1976e321 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 9 Apr 2023 16:18:24 +0200 Subject: [PATCH 1715/1976] Created using Colaboratory --- tutorial.ipynb | 633 +++++++++++-------------------------------------- 1 file changed, 134 insertions(+), 499 deletions(-) diff --git a/tutorial.ipynb b/tutorial.ipynb index 0d1f387cf040..8753a3205d90 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -4,360 +4,13 @@ "metadata": { "colab": { "name": "YOLOv5 Tutorial", - "provenance": [], - "toc_visible": true + "provenance": [] }, "kernelspec": { "name": "python3", "display_name": "Python 3" }, - "accelerator": "GPU", - "widgets": { - "application/vnd.jupyter.widget-state+json": { - "1f7df330663048998adcf8a45bc8f69b": { - "model_module": "@jupyter-widgets/controls", - "model_name": "HBoxModel", - "model_module_version": "1.5.0", - "state": { - "_dom_classes": [], - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "_model_name": "HBoxModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/controls", - "_view_module_version": "1.5.0", - "_view_name": "HBoxView", - "box_style": "", - "children": [ - "IPY_MODEL_e896e6096dd244c59d7955e2035cd729", - "IPY_MODEL_a6ff238c29984b24bf6d0bd175c19430", - "IPY_MODEL_3c085ba3f3fd4c3c8a6bb41b41ce1479" - ], - "layout": "IPY_MODEL_16b0c8aa6e0f427e8a54d3791abb7504" - } - }, - "e896e6096dd244c59d7955e2035cd729": { - "model_module": "@jupyter-widgets/controls", - "model_name": "HTMLModel", - "model_module_version": "1.5.0", - "state": { - "_dom_classes": [], - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "_model_name": "HTMLModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/controls", - "_view_module_version": "1.5.0", - "_view_name": "HTMLView", - "description": "", - "description_tooltip": null, - "layout": "IPY_MODEL_c7b2dd0f78384cad8e400b282996cdf5", - "placeholder": "​", - "style": "IPY_MODEL_6a27e43b0e434edd82ee63f0a91036ca", - "value": "100%" - } - }, - "a6ff238c29984b24bf6d0bd175c19430": { - "model_module": "@jupyter-widgets/controls", - "model_name": "FloatProgressModel", - "model_module_version": "1.5.0", - "state": { - "_dom_classes": [], - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "_model_name": "FloatProgressModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/controls", - "_view_module_version": "1.5.0", - "_view_name": "ProgressView", - "bar_style": "success", - "description": "", - "description_tooltip": null, - "layout": "IPY_MODEL_cce0e6c0c4ec442cb47e65c674e02e92", - "max": 818322941, - "min": 0, - "orientation": "horizontal", - "style": "IPY_MODEL_c5b9f38e2f0d4f9aa97fe87265263743", - "value": 818322941 - } - }, - "3c085ba3f3fd4c3c8a6bb41b41ce1479": { - "model_module": "@jupyter-widgets/controls", - "model_name": "HTMLModel", - "model_module_version": "1.5.0", - "state": { - "_dom_classes": [], - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "_model_name": "HTMLModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/controls", - "_view_module_version": "1.5.0", - "_view_name": "HTMLView", - "description": "", - "description_tooltip": null, - "layout": "IPY_MODEL_df554fb955c7454696beac5a82889386", - "placeholder": "​", - "style": "IPY_MODEL_74e9112a87a242f4831b7d68c7da6333", - "value": " 780M/780M [00:05<00:00, 126MB/s]" - } - }, - "16b0c8aa6e0f427e8a54d3791abb7504": { - "model_module": "@jupyter-widgets/base", - "model_name": "LayoutModel", - "model_module_version": "1.2.0", - "state": { - "_model_module": "@jupyter-widgets/base", - "_model_module_version": "1.2.0", - "_model_name": "LayoutModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "1.2.0", - "_view_name": "LayoutView", - "align_content": null, - "align_items": null, - "align_self": null, - "border": null, - "bottom": null, - "display": null, - "flex": null, - "flex_flow": null, - "grid_area": null, - "grid_auto_columns": null, - "grid_auto_flow": null, - "grid_auto_rows": null, - "grid_column": null, - "grid_gap": null, - "grid_row": null, - "grid_template_areas": null, - "grid_template_columns": null, - "grid_template_rows": null, - "height": null, - "justify_content": null, - "justify_items": null, - "left": null, - "margin": null, - "max_height": null, - "max_width": null, - "min_height": null, - "min_width": null, - "object_fit": null, - "object_position": null, - "order": null, - "overflow": null, - "overflow_x": null, - "overflow_y": null, - "padding": null, - "right": null, - "top": null, - "visibility": null, - "width": null - } - }, - "c7b2dd0f78384cad8e400b282996cdf5": { - "model_module": "@jupyter-widgets/base", - "model_name": "LayoutModel", - "model_module_version": "1.2.0", - "state": { - "_model_module": "@jupyter-widgets/base", - "_model_module_version": "1.2.0", - "_model_name": "LayoutModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "1.2.0", - "_view_name": "LayoutView", - "align_content": null, - "align_items": null, - "align_self": null, - "border": null, - "bottom": null, - "display": null, - "flex": null, - "flex_flow": null, - "grid_area": null, - "grid_auto_columns": null, - "grid_auto_flow": null, - "grid_auto_rows": null, - "grid_column": null, - "grid_gap": null, - "grid_row": null, - "grid_template_areas": null, - "grid_template_columns": null, - "grid_template_rows": null, - "height": null, - "justify_content": null, - "justify_items": null, - "left": null, - "margin": null, - "max_height": null, - "max_width": null, - "min_height": null, - "min_width": null, - "object_fit": null, - "object_position": null, - "order": null, - "overflow": null, - "overflow_x": null, - "overflow_y": null, - "padding": null, - "right": null, - "top": null, - "visibility": null, - "width": null - } - }, - "6a27e43b0e434edd82ee63f0a91036ca": { - "model_module": "@jupyter-widgets/controls", - "model_name": "DescriptionStyleModel", - "model_module_version": "1.5.0", - "state": { - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "_model_name": "DescriptionStyleModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "1.2.0", - "_view_name": "StyleView", - "description_width": "" - } - }, - "cce0e6c0c4ec442cb47e65c674e02e92": { - "model_module": "@jupyter-widgets/base", - "model_name": "LayoutModel", - "model_module_version": "1.2.0", - "state": { - "_model_module": "@jupyter-widgets/base", - "_model_module_version": "1.2.0", - "_model_name": "LayoutModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "1.2.0", - "_view_name": "LayoutView", - "align_content": null, - "align_items": null, - "align_self": null, - "border": null, - "bottom": null, - "display": null, - "flex": null, - "flex_flow": null, - "grid_area": null, - "grid_auto_columns": null, - "grid_auto_flow": null, - "grid_auto_rows": null, - "grid_column": null, - "grid_gap": null, - "grid_row": null, - "grid_template_areas": null, - "grid_template_columns": null, - "grid_template_rows": null, - "height": null, - "justify_content": null, - "justify_items": null, - "left": null, - "margin": null, - "max_height": null, - "max_width": null, - "min_height": null, - "min_width": null, - "object_fit": null, - "object_position": null, - "order": null, - "overflow": null, - "overflow_x": null, - "overflow_y": null, - "padding": null, - "right": null, - "top": null, - "visibility": null, - "width": null - } - }, - "c5b9f38e2f0d4f9aa97fe87265263743": { - "model_module": "@jupyter-widgets/controls", - "model_name": "ProgressStyleModel", - "model_module_version": "1.5.0", - "state": { - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "_model_name": "ProgressStyleModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "1.2.0", - "_view_name": "StyleView", - "bar_color": null, - "description_width": "" - } - }, - "df554fb955c7454696beac5a82889386": { - "model_module": "@jupyter-widgets/base", - "model_name": "LayoutModel", - "model_module_version": "1.2.0", - "state": { - "_model_module": "@jupyter-widgets/base", - "_model_module_version": "1.2.0", - "_model_name": "LayoutModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "1.2.0", - "_view_name": "LayoutView", - "align_content": null, - "align_items": null, - "align_self": null, - "border": null, - "bottom": null, - "display": null, - "flex": null, - "flex_flow": null, - "grid_area": null, - "grid_auto_columns": null, - "grid_auto_flow": null, - "grid_auto_rows": null, - "grid_column": null, - "grid_gap": null, - "grid_row": null, - "grid_template_areas": null, - "grid_template_columns": null, - "grid_template_rows": null, - "height": null, - "justify_content": null, - "justify_items": null, - "left": null, - "margin": null, - "max_height": null, - "max_width": null, - "min_height": null, - "min_width": null, - "object_fit": null, - "object_position": null, - "order": null, - "overflow": null, - "overflow_x": null, - "overflow_y": null, - "padding": null, - "right": null, - "top": null, - "visibility": null, - "width": null - } - }, - "74e9112a87a242f4831b7d68c7da6333": { - "model_module": "@jupyter-widgets/controls", - "model_name": "DescriptionStyleModel", - "model_module_version": "1.5.0", - "state": { - "_model_module": "@jupyter-widgets/controls", - "_model_module_version": "1.5.0", - "_model_name": "DescriptionStyleModel", - "_view_count": null, - "_view_module": "@jupyter-widgets/base", - "_view_module_version": "1.2.0", - "_view_name": "StyleView", - "description_width": "" - } - } - } - } + "accelerator": "GPU" }, "cells": [ { @@ -378,7 +31,7 @@ " \"Open\n", "
\n", "\n", - "This YOLOv5 🚀 notebook by Ultralytics presents simple train, validate and predict examples to help start your AI adventure.
See GitHub for community support or contact us for professional support.\n", + "This YOLOv5 🚀 notebook by Ultralytics presents simple train, validate and predict examples to help start your AI adventure.
We hope that the resources in this notebook will help you get the most out of YOLOv5. Please browse the YOLOv5 Docs for details, raise an issue on GitHub for support, and join our Discord community for questions and discussions!\n", "\n", "" ] @@ -401,7 +54,7 @@ "colab": { "base_uri": "https://localhost:8080/" }, - "outputId": "f9f016ad-3dcf-4bd2-e1c3-d5b79efc6f32" + "outputId": "e8225db4-e61d-4640-8b1f-8bfce3331cea" }, "source": [ "!git clone https://github.com/ultralytics/yolov5 # clone\n", @@ -412,20 +65,20 @@ "import utils\n", "display = utils.notebook_init() # checks" ], - "execution_count": null, + "execution_count": 1, "outputs": [ { "output_type": "stream", "name": "stderr", "text": [ - "YOLOv5 🚀 v7.0-1-gb32f67f Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n" + "YOLOv5 🚀 v7.0-136-g71244ae Python-3.9.16 torch-2.0.0+cu118 CUDA:0 (Tesla T4, 15102MiB)\n" ] }, { "output_type": "stream", "name": "stdout", "text": [ - "Setup complete ✅ (2 CPUs, 12.7 GB RAM, 22.6/78.2 GB disk)\n" + "Setup complete ✅ (2 CPUs, 12.7 GB RAM, 23.3/166.8 GB disk)\n" ] } ] @@ -459,29 +112,29 @@ "colab": { "base_uri": "https://localhost:8080/" }, - "outputId": "b4db5c49-f501-4505-cf0d-a1d35236c485" + "outputId": "284ef04b-1596-412f-88f6-948828dd2b49" }, "source": [ "!python detect.py --weights yolov5s.pt --img 640 --conf 0.25 --source data/images\n", "# display.Image(filename='runs/detect/exp/zidane.jpg', width=600)" ], - "execution_count": null, + "execution_count": 13, "outputs": [ { "output_type": "stream", "name": "stdout", "text": [ "\u001b[34m\u001b[1mdetect: \u001b[0mweights=['yolov5s.pt'], source=data/images, data=data/coco128.yaml, imgsz=[640, 640], conf_thres=0.25, iou_thres=0.45, max_det=1000, device=, view_img=False, save_txt=False, save_conf=False, save_crop=False, nosave=False, classes=None, agnostic_nms=False, augment=False, visualize=False, update=False, project=runs/detect, name=exp, exist_ok=False, line_thickness=3, hide_labels=False, hide_conf=False, half=False, dnn=False, vid_stride=1\n", - "YOLOv5 🚀 v7.0-1-gb32f67f Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n", + "YOLOv5 🚀 v7.0-136-g71244ae Python-3.9.16 torch-2.0.0+cu118 CUDA:0 (Tesla T4, 15102MiB)\n", "\n", "Downloading https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5s.pt to yolov5s.pt...\n", - "100% 14.1M/14.1M [00:00<00:00, 116MB/s] \n", + "100% 14.1M/14.1M [00:00<00:00, 24.5MB/s]\n", "\n", "Fusing layers... \n", "YOLOv5s summary: 213 layers, 7225885 parameters, 0 gradients\n", - "image 1/2 /content/yolov5/data/images/bus.jpg: 640x480 4 persons, 1 bus, 17.0ms\n", - "image 2/2 /content/yolov5/data/images/zidane.jpg: 384x640 2 persons, 2 ties, 14.3ms\n", - "Speed: 0.5ms pre-process, 15.7ms inference, 18.6ms NMS per image at shape (1, 3, 640, 640)\n", + "image 1/2 /content/yolov5/data/images/bus.jpg: 640x480 4 persons, 1 bus, 41.5ms\n", + "image 2/2 /content/yolov5/data/images/zidane.jpg: 384x640 2 persons, 2 ties, 60.0ms\n", + "Speed: 0.5ms pre-process, 50.8ms inference, 37.7ms NMS per image at shape (1, 3, 640, 640)\n", "Results saved to \u001b[1mruns/detect/exp\u001b[0m\n" ] } @@ -512,44 +165,23 @@ "metadata": { "id": "WQPtK1QYVaD_", "colab": { - "base_uri": "https://localhost:8080/", - "height": 49, - "referenced_widgets": [ - "1f7df330663048998adcf8a45bc8f69b", - "e896e6096dd244c59d7955e2035cd729", - "a6ff238c29984b24bf6d0bd175c19430", - "3c085ba3f3fd4c3c8a6bb41b41ce1479", - "16b0c8aa6e0f427e8a54d3791abb7504", - "c7b2dd0f78384cad8e400b282996cdf5", - "6a27e43b0e434edd82ee63f0a91036ca", - "cce0e6c0c4ec442cb47e65c674e02e92", - "c5b9f38e2f0d4f9aa97fe87265263743", - "df554fb955c7454696beac5a82889386", - "74e9112a87a242f4831b7d68c7da6333" - ] + "base_uri": "https://localhost:8080/" }, - "outputId": "c7d0a0d2-abfb-44c3-d60d-f99d0e7aabad" + "outputId": "cf7d52f0-281c-4c96-a488-79f5908f8426" }, "source": [ "# Download COCO val\n", "torch.hub.download_url_to_file('https://ultralytics.com/assets/coco2017val.zip', 'tmp.zip') # download (780M - 5000 images)\n", "!unzip -q tmp.zip -d ../datasets && rm tmp.zip # unzip" ], - "execution_count": null, + "execution_count": 3, "outputs": [ { - "output_type": "display_data", - "data": { - "text/plain": [ - " 0%| | 0.00/780M [00:00

\n", + "

\n", "Close the active learning loop by sampling images from your inference conditions with the `roboflow` pip package\n", "

\n", "\n", @@ -645,14 +277,14 @@ "cell_type": "code", "source": [ "#@title Select YOLOv5 🚀 logger {run: 'auto'}\n", - "logger = 'ClearML' #@param ['ClearML', 'Comet', 'TensorBoard']\n", + "logger = 'Comet' #@param ['Comet', 'ClearML', 'TensorBoard']\n", "\n", - "if logger == 'ClearML':\n", - " %pip install -q clearml\n", - " import clearml; clearml.browser_login()\n", - "elif logger == 'Comet':\n", + "if logger == 'Comet':\n", " %pip install -q comet_ml\n", " import comet_ml; comet_ml.init()\n", + "elif logger == 'ClearML':\n", + " %pip install -q clearml\n", + " import clearml; clearml.browser_login()\n", "elif logger == 'TensorBoard':\n", " %load_ext tensorboard\n", " %tensorboard --logdir runs/train" @@ -670,21 +302,24 @@ "colab": { "base_uri": "https://localhost:8080/" }, - "outputId": "721b9028-767f-4a05-c964-692c245f7398" + "outputId": "bbeeea2b-04fc-4185-aa64-258690495b5a" }, "source": [ "# Train YOLOv5s on COCO128 for 3 epochs\n", "!python train.py --img 640 --batch 16 --epochs 3 --data coco128.yaml --weights yolov5s.pt --cache" ], - "execution_count": null, + "execution_count": 5, "outputs": [ { "output_type": "stream", "name": "stdout", "text": [ + "2023-04-09 14:11:38.063605: I tensorflow/core/platform/cpu_feature_guard.cc:182] This TensorFlow binary is optimized to use available CPU instructions in performance-critical operations.\n", + "To enable the following instructions: AVX2 AVX512F FMA, in other operations, rebuild TensorFlow with the appropriate compiler flags.\n", + "2023-04-09 14:11:39.026661: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Could not find TensorRT\n", "\u001b[34m\u001b[1mtrain: \u001b[0mweights=yolov5s.pt, cfg=, data=coco128.yaml, hyp=data/hyps/hyp.scratch-low.yaml, epochs=3, batch_size=16, imgsz=640, rect=False, resume=False, nosave=False, noval=False, noautoanchor=False, noplots=False, evolve=None, bucket=, cache=ram, image_weights=False, device=, multi_scale=False, single_cls=False, optimizer=SGD, sync_bn=False, workers=8, project=runs/train, name=exp, exist_ok=False, quad=False, cos_lr=False, label_smoothing=0.0, patience=100, freeze=[0], save_period=-1, seed=0, local_rank=-1, entity=None, upload_dataset=False, bbox_interval=-1, artifact_alias=latest\n", "\u001b[34m\u001b[1mgithub: \u001b[0mup to date with https://github.com/ultralytics/yolov5 ✅\n", - "YOLOv5 🚀 v7.0-1-gb32f67f Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n", + "YOLOv5 🚀 v7.0-136-g71244ae Python-3.9.16 torch-2.0.0+cu118 CUDA:0 (Tesla T4, 15102MiB)\n", "\n", "\u001b[34m\u001b[1mhyperparameters: \u001b[0mlr0=0.01, lrf=0.01, momentum=0.937, weight_decay=0.0005, warmup_epochs=3.0, warmup_momentum=0.8, warmup_bias_lr=0.1, box=0.05, cls=0.5, cls_pw=1.0, obj=1.0, obj_pw=1.0, iou_t=0.2, anchor_t=4.0, fl_gamma=0.0, hsv_h=0.015, hsv_s=0.7, hsv_v=0.4, degrees=0.0, translate=0.1, scale=0.5, shear=0.0, perspective=0.0, flipud=0.0, fliplr=0.5, mosaic=1.0, mixup=0.0, copy_paste=0.0\n", "\u001b[34m\u001b[1mClearML: \u001b[0mrun 'pip install clearml' to automatically track, visualize and remotely train YOLOv5 🚀 in ClearML\n", @@ -693,8 +328,8 @@ "\n", "Dataset not found ⚠️, missing paths ['/content/datasets/coco128/images/train2017']\n", "Downloading https://ultralytics.com/assets/coco128.zip to coco128.zip...\n", - "100% 6.66M/6.66M [00:00<00:00, 261MB/s]\n", - "Dataset download success ✅ (0.3s), saved to \u001b[1m/content/datasets\u001b[0m\n", + "100% 6.66M/6.66M [00:00<00:00, 75.6MB/s]\n", + "Dataset download success ✅ (0.6s), saved to \u001b[1m/content/datasets\u001b[0m\n", "\n", " from n params module arguments \n", " 0 -1 1 3520 models.common.Conv [3, 32, 6, 2, 2] \n", @@ -728,11 +363,11 @@ "\u001b[34m\u001b[1mAMP: \u001b[0mchecks passed ✅\n", "\u001b[34m\u001b[1moptimizer:\u001b[0m SGD(lr=0.01) with parameter groups 57 weight(decay=0.0), 60 weight(decay=0.0005), 60 bias\n", "\u001b[34m\u001b[1malbumentations: \u001b[0mBlur(p=0.01, blur_limit=(3, 7)), MedianBlur(p=0.01, blur_limit=(3, 7)), ToGray(p=0.01), CLAHE(p=0.01, clip_limit=(1, 4.0), tile_grid_size=(8, 8))\n", - "\u001b[34m\u001b[1mtrain: \u001b[0mScanning /content/datasets/coco128/labels/train2017... 126 images, 2 backgrounds, 0 corrupt: 100% 128/128 [00:00<00:00, 1911.57it/s]\n", + "\u001b[34m\u001b[1mtrain: \u001b[0mScanning /content/datasets/coco128/labels/train2017... 126 images, 2 backgrounds, 0 corrupt: 100% 128/128 [00:00<00:00, 1709.36it/s]\n", "\u001b[34m\u001b[1mtrain: \u001b[0mNew cache created: /content/datasets/coco128/labels/train2017.cache\n", - "\u001b[34m\u001b[1mtrain: \u001b[0mCaching images (0.1GB ram): 100% 128/128 [00:00<00:00, 229.69it/s]\n", + "\u001b[34m\u001b[1mtrain: \u001b[0mCaching images (0.1GB ram): 100% 128/128 [00:00<00:00, 264.35it/s]\n", "\u001b[34m\u001b[1mval: \u001b[0mScanning /content/datasets/coco128/labels/train2017.cache... 126 images, 2 backgrounds, 0 corrupt: 100% 128/128 [00:00 Date: Sun, 9 Apr 2023 22:34:18 +0800 Subject: [PATCH 1716/1976] Update requirements.txt (#11294) * Update requirements.txt Signed-off-by: whx-s <127172288+whx-s@users.noreply.github.com> * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Signed-off-by: whx-s <127172288+whx-s@users.noreply.github.com> Co-authored-by: Glenn Jocher Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- requirements.txt | 2 +- tutorial.ipynb | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/requirements.txt b/requirements.txt index fc7193604607..baf7a2757f1d 100644 --- a/requirements.txt +++ b/requirements.txt @@ -3,7 +3,7 @@ # Base ------------------------------------------------------------------------ gitpython>=3.1.30 -matplotlib>=3.2.2 +matplotlib>=3.3 numpy>=1.18.5 opencv-python>=4.1.1 Pillow>=7.1.2 diff --git a/tutorial.ipynb b/tutorial.ipynb index 8753a3205d90..d2b54c9c60ef 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -602,4 +602,4 @@ "outputs": [] } ] -} \ No newline at end of file +} From f7c656278cec73d9dd987629c3486924fe612957 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 9 Apr 2023 17:51:25 +0200 Subject: [PATCH 1717/1976] Update general.py (#11322) * Update general.py Signed-off-by: Glenn Jocher * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Signed-off-by: Glenn Jocher Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- utils/general.py | 1 + 1 file changed, 1 insertion(+) diff --git a/utils/general.py b/utils/general.py index 68c0736f65fa..0e76792a08d5 100644 --- a/utils/general.py +++ b/utils/general.py @@ -58,6 +58,7 @@ cv2.setNumThreads(0) # prevent OpenCV from multithreading (incompatible with PyTorch DataLoader) os.environ['NUMEXPR_MAX_THREADS'] = str(NUM_THREADS) # NumExpr max threads os.environ['OMP_NUM_THREADS'] = '1' if platform.system() == 'darwin' else str(NUM_THREADS) # OpenMP (PyTorch and SciPy) +os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # suppress verbose TF compiler warnings in Colab def is_ascii(s=''): From 1db95338cf5091db8e3e67395e4487da0e1ee51d Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 9 Apr 2023 18:15:53 +0200 Subject: [PATCH 1718/1976] Update PULL_REQUEST_TEMPLATE.md (#11323) * Update PULL_REQUEST_TEMPLATE.md Signed-off-by: Glenn Jocher * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update PULL_REQUEST_TEMPLATE.md Signed-off-by: Glenn Jocher --------- Signed-off-by: Glenn Jocher Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .github/PULL_REQUEST_TEMPLATE.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index f25b017ace8b..51f9803a57a5 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -6,4 +6,8 @@ Thank you for submitting a YOLOv5 🚀 Pull Request! We want to make contributin - Provide before and after profiling/inference/training results to help us quantify the improvement your PR provides (if applicable). Please see our ✅ [Contributing Guide](https://github.com/ultralytics/yolov5/blob/master/CONTRIBUTING.md) for more details. + +Note that Copilot will summarize this PR below, do not modify the 'copilot:all' line. --> + +copilot:all From 34cf749958d2dd3ed1205f6bb07e0f20f6e2372d Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 14 Apr 2023 14:36:16 +0200 Subject: [PATCH 1719/1976] Update LICENSE to AGPL-3.0 (#11359) * Update LICENSE to AGPL-3.0 This pull request updates the license of the YOLOv5 project from GNU General Public License v3.0 (GPL-3.0) to GNU Affero General Public License v3.0 (AGPL-3.0). We at Ultralytics have decided to make this change in order to better protect our intellectual property and ensure that any modifications made to the YOLOv5 source code will be shared back with the community when used over a network. AGPL-3.0 is very similar to GPL-3.0, but with an additional clause to address the use of software over a network. This change ensures that if someone modifies YOLOv5 and provides it as a service over a network (e.g., through a web application or API), they must also make the source code of their modified version available to users of the service. This update includes the following changes: - Replace the `LICENSE` file with the AGPL-3.0 license text - Update the license reference in the `README.md` file - Update the license headers in source code files We believe that this change will promote a more collaborative environment and help drive further innovation within the YOLOv5 community. Please review the changes and let us know if you have any questions or concerns. Signed-off-by: Glenn Jocher * Update headers to AGPL-3.0 --------- Signed-off-by: Glenn Jocher --- .github/workflows/ci-testing.yml | 2 +- .github/workflows/docker.yml | 2 +- .github/workflows/greetings.yml | 2 +- .github/workflows/stale.yml | 2 +- .github/workflows/translate-readme.yml | 2 +- .pre-commit-config.yaml | 2 +- CITATION.cff | 2 +- CONTRIBUTING.md | 2 +- LICENSE | 153 +++++++++++------------- README.md | 4 +- README.zh-CN.md | 4 +- benchmarks.py | 2 +- classify/predict.py | 2 +- classify/train.py | 2 +- classify/val.py | 2 +- data/Argoverse.yaml | 2 +- data/GlobalWheat2020.yaml | 2 +- data/ImageNet.yaml | 2 +- data/Objects365.yaml | 2 +- data/SKU-110K.yaml | 2 +- data/VOC.yaml | 2 +- data/VisDrone.yaml | 2 +- data/coco.yaml | 2 +- data/coco128-seg.yaml | 2 +- data/coco128.yaml | 2 +- data/hyps/hyp.Objects365.yaml | 2 +- data/hyps/hyp.VOC.yaml | 2 +- data/hyps/hyp.no-augmentation.yaml | 2 +- data/hyps/hyp.scratch-high.yaml | 2 +- data/hyps/hyp.scratch-low.yaml | 2 +- data/hyps/hyp.scratch-med.yaml | 2 +- data/scripts/download_weights.sh | 2 +- data/scripts/get_coco.sh | 2 +- data/scripts/get_coco128.sh | 2 +- data/scripts/get_imagenet.sh | 2 +- data/xView.yaml | 2 +- detect.py | 2 +- export.py | 2 +- hubconf.py | 2 +- models/common.py | 2 +- models/experimental.py | 2 +- models/hub/anchors.yaml | 2 +- models/hub/yolov3-spp.yaml | 2 +- models/hub/yolov3-tiny.yaml | 2 +- models/hub/yolov3.yaml | 2 +- models/hub/yolov5-bifpn.yaml | 2 +- models/hub/yolov5-fpn.yaml | 2 +- models/hub/yolov5-p2.yaml | 2 +- models/hub/yolov5-p34.yaml | 2 +- models/hub/yolov5-p6.yaml | 2 +- models/hub/yolov5-p7.yaml | 2 +- models/hub/yolov5-panet.yaml | 2 +- models/hub/yolov5l6.yaml | 2 +- models/hub/yolov5m6.yaml | 2 +- models/hub/yolov5n6.yaml | 2 +- models/hub/yolov5s-LeakyReLU.yaml | 2 +- models/hub/yolov5s-ghost.yaml | 2 +- models/hub/yolov5s-transformer.yaml | 2 +- models/hub/yolov5s6.yaml | 2 +- models/hub/yolov5x6.yaml | 2 +- models/segment/yolov5l-seg.yaml | 2 +- models/segment/yolov5m-seg.yaml | 2 +- models/segment/yolov5n-seg.yaml | 2 +- models/segment/yolov5s-seg.yaml | 2 +- models/segment/yolov5x-seg.yaml | 2 +- models/tf.py | 2 +- models/yolo.py | 2 +- models/yolov5l.yaml | 2 +- models/yolov5m.yaml | 2 +- models/yolov5n.yaml | 2 +- models/yolov5s.yaml | 2 +- models/yolov5x.yaml | 2 +- segment/predict.py | 2 +- segment/train.py | 2 +- segment/val.py | 2 +- train.py | 2 +- utils/__init__.py | 2 +- utils/activations.py | 2 +- utils/augmentations.py | 2 +- utils/autoanchor.py | 2 +- utils/autobatch.py | 2 +- utils/callbacks.py | 2 +- utils/dataloaders.py | 2 +- utils/docker/Dockerfile | 2 +- utils/docker/Dockerfile-arm64 | 2 +- utils/docker/Dockerfile-cpu | 2 +- utils/downloads.py | 2 +- utils/flask_rest_api/example_request.py | 2 +- utils/flask_rest_api/restapi.py | 2 +- utils/general.py | 2 +- utils/loggers/__init__.py | 2 +- utils/loggers/wandb/wandb_utils.py | 2 +- utils/loss.py | 2 +- utils/metrics.py | 2 +- utils/plots.py | 2 +- utils/segment/augmentations.py | 2 +- utils/segment/dataloaders.py | 2 +- utils/segment/metrics.py | 2 +- utils/torch_utils.py | 2 +- utils/triton.py | 2 +- val.py | 2 +- 101 files changed, 172 insertions(+), 185 deletions(-) diff --git a/.github/workflows/ci-testing.yml b/.github/workflows/ci-testing.yml index a6f47bb8811c..bff95f654552 100644 --- a/.github/workflows/ci-testing.yml +++ b/.github/workflows/ci-testing.yml @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license # YOLOv5 Continuous Integration (CI) GitHub Actions tests name: YOLOv5 CI diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index 4f7fff00677c..190b48875fa6 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license # Builds ultralytics/yolov5:latest images on DockerHub https://hub.docker.com/r/ultralytics/yolov5 name: Publish Docker Images diff --git a/.github/workflows/greetings.yml b/.github/workflows/greetings.yml index a4eca919a5b3..337a563803db 100644 --- a/.github/workflows/greetings.yml +++ b/.github/workflows/greetings.yml @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license name: Greetings diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml index 470dc6197b51..734350441c61 100644 --- a/.github/workflows/stale.yml +++ b/.github/workflows/stale.yml @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license name: Close stale issues on: diff --git a/.github/workflows/translate-readme.yml b/.github/workflows/translate-readme.yml index 2bb351ec7e81..d5e2be26f523 100644 --- a/.github/workflows/translate-readme.yml +++ b/.github/workflows/translate-readme.yml @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license # README translation action to translate README.md to Chinese as README.zh-CN.md on any change to README.md name: Translate README diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 750ced690531..8bd40484c522 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,4 +1,4 @@ -# Ultralytics YOLO 🚀, GPL-3.0 license +# Ultralytics YOLO 🚀, AGPL-3.0 license # Pre-commit hooks. For more information see https://github.com/pre-commit/pre-commit-hooks/blob/main/README.md exclude: 'docs/' diff --git a/CITATION.cff b/CITATION.cff index 8e2cf1148b92..c277230d922f 100644 --- a/CITATION.cff +++ b/CITATION.cff @@ -10,5 +10,5 @@ preferred-citation: version: 7.0 doi: 10.5281/zenodo.3908559 date-released: 2020-5-29 - license: GPL-3.0 + license: AGPL-3.0 url: "https://github.com/ultralytics/yolov5" diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 71857faddb89..6e9ce5998d9f 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -90,4 +90,4 @@ understand and diagnose your problem. ## License By contributing, you agree that your contributions will be licensed under -the [GPL-3.0 license](https://choosealicense.com/licenses/gpl-3.0/) +the [AGPL-3.0 license](https://choosealicense.com/licenses/agpl-3.0/) diff --git a/LICENSE b/LICENSE index 92b370f0e0e1..be3f7b28e564 100644 --- a/LICENSE +++ b/LICENSE @@ -1,23 +1,21 @@ -GNU GENERAL PUBLIC LICENSE - Version 3, 29 June 2007 + GNU AFFERO GENERAL PUBLIC LICENSE + Version 3, 19 November 2007 - Copyright (C) 2007 Free Software Foundation, Inc. + Copyright (C) 2007 Free Software Foundation, Inc. Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. Preamble - The GNU General Public License is a free, copyleft license for -software and other kinds of works. + The GNU Affero General Public License is a free, copyleft license for +software and other kinds of works, specifically designed to ensure +cooperation with the community in the case of network server software. The licenses for most software and other practical works are designed to take away your freedom to share and change the works. By contrast, -the GNU General Public License is intended to guarantee your freedom to +our General Public Licenses are intended to guarantee your freedom to share and change all versions of a program--to make sure it remains free -software for all its users. We, the Free Software Foundation, use the -GNU General Public License for most of our software; it applies also to -any other work released this way by its authors. You can apply it to -your programs, too. +software for all its users. When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you @@ -26,44 +24,34 @@ them if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs, and that you know you can do these things. - To protect your rights, we need to prevent others from denying you -these rights or asking you to surrender the rights. Therefore, you have -certain responsibilities if you distribute copies of the software, or if -you modify it: responsibilities to respect the freedom of others. - - For example, if you distribute copies of such a program, whether -gratis or for a fee, you must pass on to the recipients the same -freedoms that you received. You must make sure that they, too, receive -or can get the source code. And you must show them these terms so they -know their rights. - - Developers that use the GNU GPL protect your rights with two steps: -(1) assert copyright on the software, and (2) offer you this License -giving you legal permission to copy, distribute and/or modify it. - - For the developers' and authors' protection, the GPL clearly explains -that there is no warranty for this free software. For both users' and -authors' sake, the GPL requires that modified versions be marked as -changed, so that their problems will not be attributed erroneously to -authors of previous versions. - - Some devices are designed to deny users access to install or run -modified versions of the software inside them, although the manufacturer -can do so. This is fundamentally incompatible with the aim of -protecting users' freedom to change the software. The systematic -pattern of such abuse occurs in the area of products for individuals to -use, which is precisely where it is most unacceptable. Therefore, we -have designed this version of the GPL to prohibit the practice for those -products. If such problems arise substantially in other domains, we -stand ready to extend this provision to those domains in future versions -of the GPL, as needed to protect the freedom of users. - - Finally, every program is threatened constantly by software patents. -States should not allow patents to restrict development and use of -software on general-purpose computers, but in those that do, we wish to -avoid the special danger that patents applied to a free program could -make it effectively proprietary. To prevent this, the GPL assures that -patents cannot be used to render the program non-free. + Developers that use our General Public Licenses protect your rights +with two steps: (1) assert copyright on the software, and (2) offer +you this License which gives you legal permission to copy, distribute +and/or modify the software. + + A secondary benefit of defending all users' freedom is that +improvements made in alternate versions of the program, if they +receive widespread use, become available for other developers to +incorporate. Many developers of free software are heartened and +encouraged by the resulting cooperation. However, in the case of +software used on network servers, this result may fail to come about. +The GNU General Public License permits making a modified version and +letting the public access it on a server without ever releasing its +source code to the public. + + The GNU Affero General Public License is designed specifically to +ensure that, in such cases, the modified source code becomes available +to the community. It requires the operator of a network server to +provide the source code of the modified version running there to the +users of that server. Therefore, public use of a modified version, on +a publicly accessible server, gives the public access to the source +code of the modified version. + + An older license, called the Affero General Public License and +published by Affero, was designed to accomplish similar goals. This is +a different license, not a version of the Affero GPL, but Affero has +released a new version of the Affero GPL which permits relicensing under +this license. The precise terms and conditions for copying, distribution and modification follow. @@ -72,7 +60,7 @@ modification follow. 0. Definitions. - "This License" refers to version 3 of the GNU General Public License. + "This License" refers to version 3 of the GNU Affero General Public License. "Copyright" also means copyright-like laws that apply to other kinds of works, such as semiconductor masks. @@ -549,35 +537,45 @@ to collect a royalty for further conveying from those to whom you convey the Program, the only way you could satisfy both those terms and this License would be to refrain entirely from conveying the Program. - 13. Use with the GNU Affero General Public License. + 13. Remote Network Interaction; Use with the GNU General Public License. + + Notwithstanding any other provision of this License, if you modify the +Program, your modified version must prominently offer all users +interacting with it remotely through a computer network (if your version +supports such interaction) an opportunity to receive the Corresponding +Source of your version by providing access to the Corresponding Source +from a network server at no charge, through some standard or customary +means of facilitating copying of software. This Corresponding Source +shall include the Corresponding Source for any work covered by version 3 +of the GNU General Public License that is incorporated pursuant to the +following paragraph. Notwithstanding any other provision of this License, you have permission to link or combine any covered work with a work licensed -under version 3 of the GNU Affero General Public License into a single +under version 3 of the GNU General Public License into a single combined work, and to convey the resulting work. The terms of this License will continue to apply to the part which is the covered work, -but the special requirements of the GNU Affero General Public License, -section 13, concerning interaction through a network will apply to the -combination as such. +but the work with which it is combined will remain governed by version +3 of the GNU General Public License. 14. Revised Versions of this License. The Free Software Foundation may publish revised and/or new versions of -the GNU General Public License from time to time. Such new versions will -be similar in spirit to the present version, but may differ in detail to +the GNU Affero General Public License from time to time. Such new versions +will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. Each version is given a distinguishing version number. If the -Program specifies that a certain numbered version of the GNU General +Program specifies that a certain numbered version of the GNU Affero General Public License "or any later version" applies to it, you have the option of following the terms and conditions either of that numbered version or of any later version published by the Free Software Foundation. If the Program does not specify a version number of the -GNU General Public License, you may choose any version ever published +GNU Affero General Public License, you may choose any version ever published by the Free Software Foundation. If the Program specifies that a proxy can decide which future -versions of the GNU General Public License can be used, that proxy's +versions of the GNU Affero General Public License can be used, that proxy's public statement of acceptance of a version permanently authorizes you to choose that version for the Program. @@ -635,40 +633,29 @@ the "copyright" line and a pointer to where the full notice is found. Copyright (C) This program is free software: you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by + it under the terms of the GNU Affero General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. + GNU Affero General Public License for more details. - You should have received a copy of the GNU General Public License - along with this program. If not, see . + You should have received a copy of the GNU Affero General Public License + along with this program. If not, see . Also add information on how to contact you by electronic and paper mail. - If the program does terminal interaction, make it output a short -notice like this when it starts in an interactive mode: - - Copyright (C) - This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. - This is free software, and you are welcome to redistribute it - under certain conditions; type `show c' for details. - -The hypothetical commands `show w' and `show c' should show the appropriate -parts of the General Public License. Of course, your program's commands -might be different; for a GUI interface, you would use an "about box". + If your software can interact with users remotely through a computer +network, you should also make sure that it provides a way for users to +get its source. For example, if your program is a web application, its +interface could display a "Source" link that leads users to an archive +of the code. There are many ways you could offer source, and different +solutions will be better for different programs; see section 13 for the +specific requirements. You should also get your employer (if you work as a programmer) or school, if any, to sign a "copyright disclaimer" for the program, if necessary. -For more information on this, and how to apply and follow the GNU GPL, see -. - - The GNU General Public License does not permit incorporating your program -into proprietary programs. If your program is a subroutine library, you -may consider it more useful to permit linking proprietary applications with -the library. If this is what you want to do, use the GNU Lesser General -Public License instead of this License. But first, please read -. +For more information on this, and how to apply and follow the GNU AGPL, see +. diff --git a/README.md b/README.md index 9c991abf0179..f48a4dd73122 100644 --- a/README.md +++ b/README.md @@ -456,8 +456,8 @@ We love your input! We want to make contributing to YOLOv5 as easy and transpare YOLOv5 is available under two different licenses: -- **GPL-3.0 License**: See [LICENSE](https://github.com/ultralytics/yolov5/blob/master/LICENSE) file for details. -- **Enterprise License**: Provides greater flexibility for commercial product development without the open-source requirements of GPL-3.0. Typical use cases are embedding Ultralytics software and AI models in commercial products and applications. Request an Enterprise License at [Ultralytics Licensing](https://ultralytics.com/license). +- **AGPL-3.0 License**: See [LICENSE](https://github.com/ultralytics/yolov5/blob/master/LICENSE) file for details. +- **Enterprise License**: Provides greater flexibility for commercial product development without the open-source requirements of AGPL-3.0. Typical use cases are embedding Ultralytics software and AI models in commercial products and applications. Request an Enterprise License at [Ultralytics Licensing](https://ultralytics.com/license). ##
Contact
diff --git a/README.zh-CN.md b/README.zh-CN.md index 761e61634dfb..0a696e591d0d 100644 --- a/README.zh-CN.md +++ b/README.zh-CN.md @@ -451,8 +451,8 @@ python export.py --weights yolov5s-cls.pt resnet50.pt efficientnet_b0.pt --inclu YOLOv5 在两种不同的 License 下可用: -- **GPL-3.0 License**: 查看 [License](https://github.com/ultralytics/yolov5/blob/master/LICENSE) 文件的详细信息。 -- **企业License**:在没有 GPL-3.0 开源要求的情况下为商业产品开发提供更大的灵活性。典型用例是将 Ultralytics 软件和 AI 模型嵌入到商业产品和应用程序中。在以下位置申请企业许可证 [Ultralytics 许可](https://ultralytics.com/license) 。 +- **AGPL-3.0 License**: 查看 [License](https://github.com/ultralytics/yolov5/blob/master/LICENSE) 文件的详细信息。 +- **企业License**:在没有 AGPL-3.0 开源要求的情况下为商业产品开发提供更大的灵活性。典型用例是将 Ultralytics 软件和 AI 模型嵌入到商业产品和应用程序中。在以下位置申请企业许可证 [Ultralytics 许可](https://ultralytics.com/license) 。 ##
联系我们
diff --git a/benchmarks.py b/benchmarks.py index 09108b8a7cc4..fc3073965ab3 100644 --- a/benchmarks.py +++ b/benchmarks.py @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license """ Run YOLOv5 benchmarks on all supported export formats diff --git a/classify/predict.py b/classify/predict.py index 5f0d40787b52..c1b6650d4bd0 100644 --- a/classify/predict.py +++ b/classify/predict.py @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license """ Run YOLOv5 classification inference on images, videos, directories, globs, YouTube, webcam, streams, etc. diff --git a/classify/train.py b/classify/train.py index ae2363ccf056..8b8327f173ef 100644 --- a/classify/train.py +++ b/classify/train.py @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license """ Train a YOLOv5 classifier model on a classification dataset diff --git a/classify/val.py b/classify/val.py index 4edd5a1f5e9e..643489d64d36 100644 --- a/classify/val.py +++ b/classify/val.py @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license """ Validate a trained YOLOv5 classification model on a classification dataset diff --git a/data/Argoverse.yaml b/data/Argoverse.yaml index 558151dc849e..8a65407a6333 100644 --- a/data/Argoverse.yaml +++ b/data/Argoverse.yaml @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license # Argoverse-HD dataset (ring-front-center camera) http://www.cs.cmu.edu/~mengtial/proj/streaming/ by Argo AI # Example usage: python train.py --data Argoverse.yaml # parent diff --git a/data/GlobalWheat2020.yaml b/data/GlobalWheat2020.yaml index 01812d031bc5..7b02ac95dd95 100644 --- a/data/GlobalWheat2020.yaml +++ b/data/GlobalWheat2020.yaml @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license # Global Wheat 2020 dataset http://www.global-wheat.com/ by University of Saskatchewan # Example usage: python train.py --data GlobalWheat2020.yaml # parent diff --git a/data/ImageNet.yaml b/data/ImageNet.yaml index 14f12950605f..5fdcb63f89a5 100644 --- a/data/ImageNet.yaml +++ b/data/ImageNet.yaml @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license # ImageNet-1k dataset https://www.image-net.org/index.php by Stanford University # Simplified class names from https://github.com/anishathalye/imagenet-simple-labels # Example usage: python classify/train.py --data imagenet diff --git a/data/Objects365.yaml b/data/Objects365.yaml index 05b26a1f4796..bb2aa34cd4a4 100644 --- a/data/Objects365.yaml +++ b/data/Objects365.yaml @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license # Objects365 dataset https://www.objects365.org/ by Megvii # Example usage: python train.py --data Objects365.yaml # parent diff --git a/data/SKU-110K.yaml b/data/SKU-110K.yaml index edae7171c660..a943eecdeee6 100644 --- a/data/SKU-110K.yaml +++ b/data/SKU-110K.yaml @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license # SKU-110K retail items dataset https://github.com/eg4000/SKU110K_CVPR19 by Trax Retail # Example usage: python train.py --data SKU-110K.yaml # parent diff --git a/data/VOC.yaml b/data/VOC.yaml index 27d38109c53a..104856f0c9c7 100644 --- a/data/VOC.yaml +++ b/data/VOC.yaml @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license # PASCAL VOC dataset http://host.robots.ox.ac.uk/pascal/VOC by University of Oxford # Example usage: python train.py --data VOC.yaml # parent diff --git a/data/VisDrone.yaml b/data/VisDrone.yaml index a8bcf8e628ec..2a13904dc8dd 100644 --- a/data/VisDrone.yaml +++ b/data/VisDrone.yaml @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license # VisDrone2019-DET dataset https://github.com/VisDrone/VisDrone-Dataset by Tianjin University # Example usage: python train.py --data VisDrone.yaml # parent diff --git a/data/coco.yaml b/data/coco.yaml index d64dfc7fed76..ea32cb6269a3 100644 --- a/data/coco.yaml +++ b/data/coco.yaml @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license # COCO 2017 dataset http://cocodataset.org by Microsoft # Example usage: python train.py --data coco.yaml # parent diff --git a/data/coco128-seg.yaml b/data/coco128-seg.yaml index 5e81910cc456..0a2499c00a1a 100644 --- a/data/coco128-seg.yaml +++ b/data/coco128-seg.yaml @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license # COCO128-seg dataset https://www.kaggle.com/ultralytics/coco128 (first 128 images from COCO train2017) by Ultralytics # Example usage: python train.py --data coco128.yaml # parent diff --git a/data/coco128.yaml b/data/coco128.yaml index 12556736a571..0cb53120be2c 100644 --- a/data/coco128.yaml +++ b/data/coco128.yaml @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license # COCO128 dataset https://www.kaggle.com/ultralytics/coco128 (first 128 images from COCO train2017) by Ultralytics # Example usage: python train.py --data coco128.yaml # parent diff --git a/data/hyps/hyp.Objects365.yaml b/data/hyps/hyp.Objects365.yaml index 74971740f7c7..c4b6e8051d7b 100644 --- a/data/hyps/hyp.Objects365.yaml +++ b/data/hyps/hyp.Objects365.yaml @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license # Hyperparameters for Objects365 training # python train.py --weights yolov5m.pt --data Objects365.yaml --evolve # See Hyperparameter Evolution tutorial for details https://github.com/ultralytics/yolov5#tutorials diff --git a/data/hyps/hyp.VOC.yaml b/data/hyps/hyp.VOC.yaml index 0aa4e7d9f8f5..ce20dbbddbdb 100644 --- a/data/hyps/hyp.VOC.yaml +++ b/data/hyps/hyp.VOC.yaml @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license # Hyperparameters for VOC training # python train.py --batch 128 --weights yolov5m6.pt --data VOC.yaml --epochs 50 --img 512 --hyp hyp.scratch-med.yaml --evolve # See Hyperparameter Evolution tutorial for details https://github.com/ultralytics/yolov5#tutorials diff --git a/data/hyps/hyp.no-augmentation.yaml b/data/hyps/hyp.no-augmentation.yaml index 8fbd5b262afa..0ae796c16dc2 100644 --- a/data/hyps/hyp.no-augmentation.yaml +++ b/data/hyps/hyp.no-augmentation.yaml @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license # Hyperparameters when using Albumentations frameworks # python train.py --hyp hyp.no-augmentation.yaml # See https://github.com/ultralytics/yolov5/pull/3882 for YOLOv5 + Albumentations Usage examples diff --git a/data/hyps/hyp.scratch-high.yaml b/data/hyps/hyp.scratch-high.yaml index 123cc8407413..0a0f4ec21621 100644 --- a/data/hyps/hyp.scratch-high.yaml +++ b/data/hyps/hyp.scratch-high.yaml @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license # Hyperparameters for high-augmentation COCO training from scratch # python train.py --batch 32 --cfg yolov5m6.yaml --weights '' --data coco.yaml --img 1280 --epochs 300 # See tutorials for hyperparameter evolution https://github.com/ultralytics/yolov5#tutorials diff --git a/data/hyps/hyp.scratch-low.yaml b/data/hyps/hyp.scratch-low.yaml index b9ef1d55a3b6..9d722568f526 100644 --- a/data/hyps/hyp.scratch-low.yaml +++ b/data/hyps/hyp.scratch-low.yaml @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license # Hyperparameters for low-augmentation COCO training from scratch # python train.py --batch 64 --cfg yolov5n6.yaml --weights '' --data coco.yaml --img 640 --epochs 300 --linear # See tutorials for hyperparameter evolution https://github.com/ultralytics/yolov5#tutorials diff --git a/data/hyps/hyp.scratch-med.yaml b/data/hyps/hyp.scratch-med.yaml index d6867d7557ba..f6abb090bb04 100644 --- a/data/hyps/hyp.scratch-med.yaml +++ b/data/hyps/hyp.scratch-med.yaml @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license # Hyperparameters for medium-augmentation COCO training from scratch # python train.py --batch 32 --cfg yolov5m6.yaml --weights '' --data coco.yaml --img 1280 --epochs 300 # See tutorials for hyperparameter evolution https://github.com/ultralytics/yolov5#tutorials diff --git a/data/scripts/download_weights.sh b/data/scripts/download_weights.sh index 31e0a15569f2..e408959b32b2 100755 --- a/data/scripts/download_weights.sh +++ b/data/scripts/download_weights.sh @@ -1,5 +1,5 @@ #!/bin/bash -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license # Download latest models from https://github.com/ultralytics/yolov5/releases # Example usage: bash data/scripts/download_weights.sh # parent diff --git a/data/scripts/get_coco.sh b/data/scripts/get_coco.sh index 0d388b0a12a8..0bb276140b07 100755 --- a/data/scripts/get_coco.sh +++ b/data/scripts/get_coco.sh @@ -1,5 +1,5 @@ #!/bin/bash -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license # Download COCO 2017 dataset http://cocodataset.org # Example usage: bash data/scripts/get_coco.sh # parent diff --git a/data/scripts/get_coco128.sh b/data/scripts/get_coco128.sh index e7ddce89b115..2bfd6a2b32ed 100755 --- a/data/scripts/get_coco128.sh +++ b/data/scripts/get_coco128.sh @@ -1,5 +1,5 @@ #!/bin/bash -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license # Download COCO128 dataset https://www.kaggle.com/ultralytics/coco128 (first 128 images from COCO train2017) # Example usage: bash data/scripts/get_coco128.sh # parent diff --git a/data/scripts/get_imagenet.sh b/data/scripts/get_imagenet.sh index 6026d502e8f3..1df0fc7b66cc 100755 --- a/data/scripts/get_imagenet.sh +++ b/data/scripts/get_imagenet.sh @@ -1,5 +1,5 @@ #!/bin/bash -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license # Download ILSVRC2012 ImageNet dataset https://image-net.org # Example usage: bash data/scripts/get_imagenet.sh # parent diff --git a/data/xView.yaml b/data/xView.yaml index 770ab7870449..5e013ac9056d 100644 --- a/data/xView.yaml +++ b/data/xView.yaml @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license # DIUx xView 2018 Challenge https://challenge.xviewdataset.org by U.S. National Geospatial-Intelligence Agency (NGA) # -------- DOWNLOAD DATA MANUALLY and jar xf val_images.zip to 'datasets/xView' before running train command! -------- # Example usage: python train.py --data xView.yaml diff --git a/detect.py b/detect.py index 3f32d7a50d6b..64d6f149a614 100644 --- a/detect.py +++ b/detect.py @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license """ Run YOLOv5 detection inference on images, videos, directories, globs, YouTube, webcam, streams, etc. diff --git a/export.py b/export.py index e167b2088cb1..73f4bb8c0870 100644 --- a/export.py +++ b/export.py @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license """ Export a YOLOv5 PyTorch model to other formats. TensorFlow exports authored by https://github.com/zldrobit diff --git a/hubconf.py b/hubconf.py index 41af8e39d14d..9d820a54f290 100644 --- a/hubconf.py +++ b/hubconf.py @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license """ PyTorch Hub models https://pytorch.org/hub/ultralytics_yolov5 diff --git a/models/common.py b/models/common.py index aa8ae674eb47..b1c24ad378dc 100644 --- a/models/common.py +++ b/models/common.py @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license """ Common modules """ diff --git a/models/experimental.py b/models/experimental.py index 02d35b9ebd11..d60d1808da11 100644 --- a/models/experimental.py +++ b/models/experimental.py @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license """ Experimental modules """ diff --git a/models/hub/anchors.yaml b/models/hub/anchors.yaml index e4d7beb06e07..df2f668b022c 100644 --- a/models/hub/anchors.yaml +++ b/models/hub/anchors.yaml @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license # Default anchors for COCO data diff --git a/models/hub/yolov3-spp.yaml b/models/hub/yolov3-spp.yaml index c66982158ce8..4a71ed405277 100644 --- a/models/hub/yolov3-spp.yaml +++ b/models/hub/yolov3-spp.yaml @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license # Parameters nc: 80 # number of classes diff --git a/models/hub/yolov3-tiny.yaml b/models/hub/yolov3-tiny.yaml index b28b44315248..50b47e282df4 100644 --- a/models/hub/yolov3-tiny.yaml +++ b/models/hub/yolov3-tiny.yaml @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license # Parameters nc: 80 # number of classes diff --git a/models/hub/yolov3.yaml b/models/hub/yolov3.yaml index d1ef91290a8d..c5e21098f893 100644 --- a/models/hub/yolov3.yaml +++ b/models/hub/yolov3.yaml @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license # Parameters nc: 80 # number of classes diff --git a/models/hub/yolov5-bifpn.yaml b/models/hub/yolov5-bifpn.yaml index 504815f5cfa0..9dbdd4ee0580 100644 --- a/models/hub/yolov5-bifpn.yaml +++ b/models/hub/yolov5-bifpn.yaml @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license # Parameters nc: 80 # number of classes diff --git a/models/hub/yolov5-fpn.yaml b/models/hub/yolov5-fpn.yaml index a23e9c6fbf9f..2292eb1185a0 100644 --- a/models/hub/yolov5-fpn.yaml +++ b/models/hub/yolov5-fpn.yaml @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license # Parameters nc: 80 # number of classes diff --git a/models/hub/yolov5-p2.yaml b/models/hub/yolov5-p2.yaml index 554117dda59a..2c0ae44841cc 100644 --- a/models/hub/yolov5-p2.yaml +++ b/models/hub/yolov5-p2.yaml @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license # Parameters nc: 80 # number of classes diff --git a/models/hub/yolov5-p34.yaml b/models/hub/yolov5-p34.yaml index dbf0f850083e..60ae3b4b6f30 100644 --- a/models/hub/yolov5-p34.yaml +++ b/models/hub/yolov5-p34.yaml @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license # Parameters nc: 80 # number of classes diff --git a/models/hub/yolov5-p6.yaml b/models/hub/yolov5-p6.yaml index a17202f22044..a9e1b5f90c72 100644 --- a/models/hub/yolov5-p6.yaml +++ b/models/hub/yolov5-p6.yaml @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license # Parameters nc: 80 # number of classes diff --git a/models/hub/yolov5-p7.yaml b/models/hub/yolov5-p7.yaml index edd7d13a34a6..a502412f0887 100644 --- a/models/hub/yolov5-p7.yaml +++ b/models/hub/yolov5-p7.yaml @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license # Parameters nc: 80 # number of classes diff --git a/models/hub/yolov5-panet.yaml b/models/hub/yolov5-panet.yaml index ccfbf900691c..5595e2573823 100644 --- a/models/hub/yolov5-panet.yaml +++ b/models/hub/yolov5-panet.yaml @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license # Parameters nc: 80 # number of classes diff --git a/models/hub/yolov5l6.yaml b/models/hub/yolov5l6.yaml index 632c2cb699e3..651dbb0251ae 100644 --- a/models/hub/yolov5l6.yaml +++ b/models/hub/yolov5l6.yaml @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license # Parameters nc: 80 # number of classes diff --git a/models/hub/yolov5m6.yaml b/models/hub/yolov5m6.yaml index ecc53fd68ba6..059b12b46929 100644 --- a/models/hub/yolov5m6.yaml +++ b/models/hub/yolov5m6.yaml @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license # Parameters nc: 80 # number of classes diff --git a/models/hub/yolov5n6.yaml b/models/hub/yolov5n6.yaml index 0c0c71d32551..5052e7cbfc8b 100644 --- a/models/hub/yolov5n6.yaml +++ b/models/hub/yolov5n6.yaml @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license # Parameters nc: 80 # number of classes diff --git a/models/hub/yolov5s-LeakyReLU.yaml b/models/hub/yolov5s-LeakyReLU.yaml index 3a179bf3311c..0368a78dcbb4 100644 --- a/models/hub/yolov5s-LeakyReLU.yaml +++ b/models/hub/yolov5s-LeakyReLU.yaml @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license # Parameters nc: 80 # number of classes diff --git a/models/hub/yolov5s-ghost.yaml b/models/hub/yolov5s-ghost.yaml index ff9519c3f1aa..ce5238fa5dfc 100644 --- a/models/hub/yolov5s-ghost.yaml +++ b/models/hub/yolov5s-ghost.yaml @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license # Parameters nc: 80 # number of classes diff --git a/models/hub/yolov5s-transformer.yaml b/models/hub/yolov5s-transformer.yaml index 100d7c447527..f5267163453c 100644 --- a/models/hub/yolov5s-transformer.yaml +++ b/models/hub/yolov5s-transformer.yaml @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license # Parameters nc: 80 # number of classes diff --git a/models/hub/yolov5s6.yaml b/models/hub/yolov5s6.yaml index a28fb559482b..2f39b0379e74 100644 --- a/models/hub/yolov5s6.yaml +++ b/models/hub/yolov5s6.yaml @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license # Parameters nc: 80 # number of classes diff --git a/models/hub/yolov5x6.yaml b/models/hub/yolov5x6.yaml index ba795c4aad31..e1edbcb8634c 100644 --- a/models/hub/yolov5x6.yaml +++ b/models/hub/yolov5x6.yaml @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license # Parameters nc: 80 # number of classes diff --git a/models/segment/yolov5l-seg.yaml b/models/segment/yolov5l-seg.yaml index 4782de11dd2d..71f80cc08054 100644 --- a/models/segment/yolov5l-seg.yaml +++ b/models/segment/yolov5l-seg.yaml @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license # Parameters nc: 80 # number of classes diff --git a/models/segment/yolov5m-seg.yaml b/models/segment/yolov5m-seg.yaml index 07ec25ba264d..2b8e1db2818a 100644 --- a/models/segment/yolov5m-seg.yaml +++ b/models/segment/yolov5m-seg.yaml @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license # Parameters nc: 80 # number of classes diff --git a/models/segment/yolov5n-seg.yaml b/models/segment/yolov5n-seg.yaml index c28225ab4a50..1f67f8e3dfb0 100644 --- a/models/segment/yolov5n-seg.yaml +++ b/models/segment/yolov5n-seg.yaml @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license # Parameters nc: 80 # number of classes diff --git a/models/segment/yolov5s-seg.yaml b/models/segment/yolov5s-seg.yaml index a827814e1399..2ff2524ca9b5 100644 --- a/models/segment/yolov5s-seg.yaml +++ b/models/segment/yolov5s-seg.yaml @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license # Parameters nc: 80 # number of classes diff --git a/models/segment/yolov5x-seg.yaml b/models/segment/yolov5x-seg.yaml index 5d0c4524a99c..589f65c76f95 100644 --- a/models/segment/yolov5x-seg.yaml +++ b/models/segment/yolov5x-seg.yaml @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license # Parameters nc: 80 # number of classes diff --git a/models/tf.py b/models/tf.py index 8290cf2e57f5..bc0a465d7edd 100644 --- a/models/tf.py +++ b/models/tf.py @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license """ TensorFlow, Keras and TFLite versions of YOLOv5 Authored by https://github.com/zldrobit in PR https://github.com/ultralytics/yolov5/pull/1127 diff --git a/models/yolo.py b/models/yolo.py index ed21c067ee93..18d2542bfb48 100644 --- a/models/yolo.py +++ b/models/yolo.py @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license """ YOLO-specific modules diff --git a/models/yolov5l.yaml b/models/yolov5l.yaml index ce8a5de46a27..31362f876932 100644 --- a/models/yolov5l.yaml +++ b/models/yolov5l.yaml @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license # Parameters nc: 80 # number of classes diff --git a/models/yolov5m.yaml b/models/yolov5m.yaml index ad13ab370ff6..a76900c5a2e2 100644 --- a/models/yolov5m.yaml +++ b/models/yolov5m.yaml @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license # Parameters nc: 80 # number of classes diff --git a/models/yolov5n.yaml b/models/yolov5n.yaml index 8a28a40d6e20..aba96cfc54f4 100644 --- a/models/yolov5n.yaml +++ b/models/yolov5n.yaml @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license # Parameters nc: 80 # number of classes diff --git a/models/yolov5s.yaml b/models/yolov5s.yaml index f35beabb1e1c..5d05364c4936 100644 --- a/models/yolov5s.yaml +++ b/models/yolov5s.yaml @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license # Parameters nc: 80 # number of classes diff --git a/models/yolov5x.yaml b/models/yolov5x.yaml index f617a027d8a2..4bdd93915da5 100644 --- a/models/yolov5x.yaml +++ b/models/yolov5x.yaml @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license # Parameters nc: 80 # number of classes diff --git a/segment/predict.py b/segment/predict.py index d82df89a85b0..4d4d6036358a 100644 --- a/segment/predict.py +++ b/segment/predict.py @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license """ Run YOLOv5 segmentation inference on images, videos, directories, streams, etc. diff --git a/segment/train.py b/segment/train.py index de5f7035e6b6..7e600f77d571 100644 --- a/segment/train.py +++ b/segment/train.py @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license """ Train a YOLOv5 segment model on a segment dataset Models and datasets download automatically from the latest YOLOv5 release. diff --git a/segment/val.py b/segment/val.py index a7f95fe9b6fc..c0575fd59a91 100644 --- a/segment/val.py +++ b/segment/val.py @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license """ Validate a trained YOLOv5 segment model on a segment dataset diff --git a/train.py b/train.py index 960f24c3ecc7..7c403ee6d680 100644 --- a/train.py +++ b/train.py @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license """ Train a YOLOv5 model on a custom dataset. Models and datasets download automatically from the latest YOLOv5 release. diff --git a/utils/__init__.py b/utils/__init__.py index 5b9fcd517e03..6c10857df079 100644 --- a/utils/__init__.py +++ b/utils/__init__.py @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license """ utils/initialization """ diff --git a/utils/activations.py b/utils/activations.py index 084ce8c41230..e4d4bbde5ec8 100644 --- a/utils/activations.py +++ b/utils/activations.py @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license """ Activation functions """ diff --git a/utils/augmentations.py b/utils/augmentations.py index 7ab75f17fb18..52e2e346e36e 100644 --- a/utils/augmentations.py +++ b/utils/augmentations.py @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license """ Image augmentation functions """ diff --git a/utils/autoanchor.py b/utils/autoanchor.py index bb5cf6e6965e..4c11ab3decec 100644 --- a/utils/autoanchor.py +++ b/utils/autoanchor.py @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license """ AutoAnchor utils """ diff --git a/utils/autobatch.py b/utils/autobatch.py index bdeb91c3d2bd..aa763b888462 100644 --- a/utils/autobatch.py +++ b/utils/autobatch.py @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license """ Auto-batch utils """ diff --git a/utils/callbacks.py b/utils/callbacks.py index 166d8938322d..ccebba02bcaa 100644 --- a/utils/callbacks.py +++ b/utils/callbacks.py @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license """ Callback utils """ diff --git a/utils/dataloaders.py b/utils/dataloaders.py index 28d5b7974cf8..a5bd86d49d7e 100644 --- a/utils/dataloaders.py +++ b/utils/dataloaders.py @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license """ Dataloaders and dataset utils """ diff --git a/utils/docker/Dockerfile b/utils/docker/Dockerfile index 811ad4a6c9cb..ff657dea2bf2 100644 --- a/utils/docker/Dockerfile +++ b/utils/docker/Dockerfile @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license # Builds ultralytics/yolov5:latest image on DockerHub https://hub.docker.com/r/ultralytics/yolov5 # Image is CUDA-optimized for YOLOv5 single/multi-GPU training and inference diff --git a/utils/docker/Dockerfile-arm64 b/utils/docker/Dockerfile-arm64 index 7023c6a4bb1f..556ae59a8700 100644 --- a/utils/docker/Dockerfile-arm64 +++ b/utils/docker/Dockerfile-arm64 @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license # Builds ultralytics/yolov5:latest-arm64 image on DockerHub https://hub.docker.com/r/ultralytics/yolov5 # Image is aarch64-compatible for Apple M1 and other ARM architectures i.e. Jetson Nano and Raspberry Pi diff --git a/utils/docker/Dockerfile-cpu b/utils/docker/Dockerfile-cpu index 06bad9a3790d..8e8c23bf952f 100644 --- a/utils/docker/Dockerfile-cpu +++ b/utils/docker/Dockerfile-cpu @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license # Builds ultralytics/yolov5:latest-cpu image on DockerHub https://hub.docker.com/r/ultralytics/yolov5 # Image is CPU-optimized for ONNX, OpenVINO and PyTorch YOLOv5 deployments diff --git a/utils/downloads.py b/utils/downloads.py index 88f523742b5b..629537d5ade6 100644 --- a/utils/downloads.py +++ b/utils/downloads.py @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license """ Download utils """ diff --git a/utils/flask_rest_api/example_request.py b/utils/flask_rest_api/example_request.py index 952e5dcb90fa..256ad1319c82 100644 --- a/utils/flask_rest_api/example_request.py +++ b/utils/flask_rest_api/example_request.py @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license """ Perform test request """ diff --git a/utils/flask_rest_api/restapi.py b/utils/flask_rest_api/restapi.py index 9258b1a68860..ae4756b276e4 100644 --- a/utils/flask_rest_api/restapi.py +++ b/utils/flask_rest_api/restapi.py @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license """ Run a Flask REST API exposing one or more YOLOv5s models """ diff --git a/utils/general.py b/utils/general.py index 0e76792a08d5..053aeacd651d 100644 --- a/utils/general.py +++ b/utils/general.py @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license """ General utils """ diff --git a/utils/loggers/__init__.py b/utils/loggers/__init__.py index 9de1f226233c..d428cdba6196 100644 --- a/utils/loggers/__init__.py +++ b/utils/loggers/__init__.py @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license """ Logging utils """ diff --git a/utils/loggers/wandb/wandb_utils.py b/utils/loggers/wandb/wandb_utils.py index c8ab38197381..4ea32b1d4c6e 100644 --- a/utils/loggers/wandb/wandb_utils.py +++ b/utils/loggers/wandb/wandb_utils.py @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license # WARNING ⚠️ wandb is deprecated and will be removed in future release. # See supported integrations at https://github.com/ultralytics/yolov5#integrations diff --git a/utils/loss.py b/utils/loss.py index 9b9c3d9f8018..26cca8797315 100644 --- a/utils/loss.py +++ b/utils/loss.py @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license """ Loss functions """ diff --git a/utils/metrics.py b/utils/metrics.py index 95f364c23f34..5646f40e9860 100644 --- a/utils/metrics.py +++ b/utils/metrics.py @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license """ Model validation metrics """ diff --git a/utils/plots.py b/utils/plots.py index 24c618c80b59..d1284b950269 100644 --- a/utils/plots.py +++ b/utils/plots.py @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license """ Plotting utils """ diff --git a/utils/segment/augmentations.py b/utils/segment/augmentations.py index 169addedf0f5..f8154b834869 100644 --- a/utils/segment/augmentations.py +++ b/utils/segment/augmentations.py @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license """ Image augmentation functions """ diff --git a/utils/segment/dataloaders.py b/utils/segment/dataloaders.py index 097a5d5cb058..3ee826dba69c 100644 --- a/utils/segment/dataloaders.py +++ b/utils/segment/dataloaders.py @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license """ Dataloaders """ diff --git a/utils/segment/metrics.py b/utils/segment/metrics.py index c9f137e38ead..6020fa062ba5 100644 --- a/utils/segment/metrics.py +++ b/utils/segment/metrics.py @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license """ Model validation metrics """ diff --git a/utils/torch_utils.py b/utils/torch_utils.py index 5b67b3fa7a06..d9e060ab99df 100644 --- a/utils/torch_utils.py +++ b/utils/torch_utils.py @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license """ PyTorch utils """ diff --git a/utils/triton.py b/utils/triton.py index 25928021477e..b5153dad940d 100644 --- a/utils/triton.py +++ b/utils/triton.py @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license """ Utils to interact with the Triton Inference Server """ diff --git a/val.py b/val.py index d4073b42fe78..3d01f1a5996d 100644 --- a/val.py +++ b/val.py @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license """ Validate a trained YOLOv5 detection model on a detection dataset From 2118e3b88e16f24f600959886f9576fd873cc293 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 14 Apr 2023 14:37:02 +0200 Subject: [PATCH 1720/1976] Update check_requirements.py (#11358) Update general.py Signed-off-by: Glenn Jocher --- utils/general.py | 38 +++++++++++++++++++++++++++----------- 1 file changed, 27 insertions(+), 11 deletions(-) diff --git a/utils/general.py b/utils/general.py index 053aeacd651d..221191005754 100644 --- a/utils/general.py +++ b/utils/general.py @@ -388,10 +388,23 @@ def check_version(current='0.0.0', minimum='0.0.0', name='version ', pinned=Fals @TryExcept() -def check_requirements(requirements=ROOT / 'requirements.txt', exclude=(), install=True, cmds=''): - # Check installed dependencies meet YOLOv5 requirements (pass *.txt file or list of packages or single package str) +def check_requirements(requirements=ROOT.parent / 'requirements.txt', exclude=(), install=True, cmds=''): + """ + Check if installed dependencies meet YOLOv5 requirements and attempt to auto-update if needed. + + Args: + requirements (Union[Path, str, List[str]]): Path to a requirements.txt file, a single package requirement as a + string, or a list of package requirements as strings. + exclude (Tuple[str]): Tuple of package names to exclude from checking. + install (bool): If True, attempt to auto-update packages that don't meet requirements. + cmds (str): Additional commands to pass to the pip install command when auto-updating. + + Returns: + None + """ prefix = colorstr('red', 'bold', 'requirements:') check_python() # check python version + file = None if isinstance(requirements, Path): # requirements.txt file file = requirements.resolve() assert file.exists(), f'{prefix} {file} not found, check failed.' @@ -400,22 +413,25 @@ def check_requirements(requirements=ROOT / 'requirements.txt', exclude=(), insta elif isinstance(requirements, str): requirements = [requirements] - s = '' - n = 0 + s = '' # console string + n = 0 # number of packages updates for r in requirements: try: pkg.require(r) except (pkg.VersionConflict, pkg.DistributionNotFound): # exception if requirements not met - s += f'"{r}" ' - n += 1 + try: # attempt to import (slower but more accurate) + import importlib + importlib.import_module(next(pkg.parse_requirements(r)).name) + except ImportError: + s += f'"{r}" ' + n += 1 if s and install and AUTOINSTALL: # check environment variable - LOGGER.info(f"{prefix} YOLOv5 requirement{'s' * (n > 1)} {s}not found, attempting AutoUpdate...") + LOGGER.info(f"{prefix} YOLOv8 requirement{'s' * (n > 1)} {s}not found, attempting AutoUpdate...") try: - # assert check_online(), "AutoUpdate skipped (offline)" - LOGGER.info(check_output(f'pip install {s} {cmds}', shell=True).decode()) - source = file if 'file' in locals() else requirements - s = f"{prefix} {n} package{'s' * (n > 1)} updated per {source}\n" \ + assert check_online(), 'AutoUpdate skipped (offline)' + LOGGER.info(subprocess.check_output(f'pip install {s} {cmds}', shell=True).decode()) + s = f"{prefix} {n} package{'s' * (n > 1)} updated per {file or requirements}\n" \ f"{prefix} ⚠️ {colorstr('bold', 'Restart runtime or rerun command for updates to take effect')}\n" LOGGER.info(s) except Exception as e: From 60e29e2d86e25c8678232652edcd920e35e836bd Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 14 Apr 2023 14:47:07 +0200 Subject: [PATCH 1721/1976] Update check_requirements() (#11360) Update general.py Signed-off-by: Glenn Jocher --- utils/general.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/general.py b/utils/general.py index 221191005754..3d7fd20c48d1 100644 --- a/utils/general.py +++ b/utils/general.py @@ -427,7 +427,7 @@ def check_requirements(requirements=ROOT.parent / 'requirements.txt', exclude=() n += 1 if s and install and AUTOINSTALL: # check environment variable - LOGGER.info(f"{prefix} YOLOv8 requirement{'s' * (n > 1)} {s}not found, attempting AutoUpdate...") + LOGGER.info(f"{prefix} YOLOv5 requirement{'s' * (n > 1)} {s}not found, attempting AutoUpdate...") try: assert check_online(), 'AutoUpdate skipped (offline)' LOGGER.info(subprocess.check_output(f'pip install {s} {cmds}', shell=True).decode()) From a66fa8314c7527043b010135aa9dd25c95900e8f Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 14 Apr 2023 18:11:15 +0200 Subject: [PATCH 1722/1976] Add NMS to CoreML exports (#11361) * Add NMS to CoreML exports * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- export.py | 150 +++++++++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 148 insertions(+), 2 deletions(-) diff --git a/export.py b/export.py index 73f4bb8c0870..5f8e1c4821da 100644 --- a/export.py +++ b/export.py @@ -77,6 +77,25 @@ MACOS = platform.system() == 'Darwin' # macOS environment +class iOSModel(torch.nn.Module): + + def __init__(self, model, im): + super().__init__() + b, c, h, w = im.shape # batch, channel, height, width + self.model = model + self.nc = model.nc # number of classes + if w == h: + self.normalize = 1. / w + else: + self.normalize = torch.tensor([1. / w, 1. / h, 1. / w, 1. / h]) # broadcast (slower, smaller) + # np = model(im)[0].shape[1] # number of points + # self.normalize = torch.tensor([1. / w, 1. / h, 1. / w, 1. / h]).expand(np, 4) # explicit (faster, larger) + + def forward(self, x): + xywh, conf, cls = self.model(x)[0].squeeze().split((4, 1, self.nc), 1) + return cls * conf, xywh * self.normalize # confidence (3780, 80), coordinates (3780, 4) + + def export_formats(): # YOLOv5 export formats x = [ @@ -223,7 +242,7 @@ def export_paddle(model, im, file, metadata, prefix=colorstr('PaddlePaddle:')): @try_export -def export_coreml(model, im, file, int8, half, prefix=colorstr('CoreML:')): +def export_coreml(model, im, file, int8, half, nms, prefix=colorstr('CoreML:')): # YOLOv5 CoreML export check_requirements('coremltools') import coremltools as ct @@ -231,6 +250,8 @@ def export_coreml(model, im, file, int8, half, prefix=colorstr('CoreML:')): LOGGER.info(f'\n{prefix} starting export with coremltools {ct.__version__}...') f = file.with_suffix('.mlmodel') + if nms: + model = iOSModel(model, im) ts = torch.jit.trace(model, im, strict=False) # TorchScript model ct_model = ct.convert(ts, inputs=[ct.ImageType('image', shape=im.shape, scale=1 / 255, bias=[0, 0, 0])]) bits, mode = (8, 'kmeans_lut') if int8 else (16, 'linear') if half else (32, None) @@ -506,6 +527,129 @@ def add_tflite_metadata(file, metadata, num_outputs): tmp_file.unlink() +def pipeline_coreml(model, im, file, names, y, prefix=colorstr('CoreML Pipeline:')): + # YOLOv5 CoreML pipeline + import coremltools as ct + from PIL import Image + + print(f'{prefix} starting pipeline with coremltools {ct.__version__}...') + batch_size, ch, h, w = list(im.shape) # BCHW + t = time.time() + + # Output shapes + spec = model.get_spec() + out0, out1 = iter(spec.description.output) + if platform.system() == 'Darwin': + img = Image.new('RGB', (w, h)) # img(192 width, 320 height) + # img = torch.zeros((*opt.img_size, 3)).numpy() # img size(320,192,3) iDetection + out = model.predict({'image': img}) + out0_shape, out1_shape = out[out0.name].shape, out[out1.name].shape + else: # linux and windows can not run model.predict(), get sizes from pytorch output y + s = tuple(y[0].shape) + out0_shape, out1_shape = (s[1], s[2] - 5), (s[1], 4) # (3780, 80), (3780, 4) + + # Checks + nx, ny = spec.description.input[0].type.imageType.width, spec.description.input[0].type.imageType.height + na, nc = out0_shape + # na, nc = out0.type.multiArrayType.shape # number anchors, classes + assert len(names) == nc, f'{len(names)} names found for nc={nc}' # check + + # Define output shapes (missing) + out0.type.multiArrayType.shape[:] = out0_shape # (3780, 80) + out1.type.multiArrayType.shape[:] = out1_shape # (3780, 4) + # spec.neuralNetwork.preprocessing[0].featureName = '0' + + # Flexible input shapes + # from coremltools.models.neural_network import flexible_shape_utils + # s = [] # shapes + # s.append(flexible_shape_utils.NeuralNetworkImageSize(320, 192)) + # s.append(flexible_shape_utils.NeuralNetworkImageSize(640, 384)) # (height, width) + # flexible_shape_utils.add_enumerated_image_sizes(spec, feature_name='image', sizes=s) + # r = flexible_shape_utils.NeuralNetworkImageSizeRange() # shape ranges + # r.add_height_range((192, 640)) + # r.add_width_range((192, 640)) + # flexible_shape_utils.update_image_size_range(spec, feature_name='image', size_range=r) + + # Print + print(spec.description) + + # Model from spec + model = ct.models.MLModel(spec) + + # 3. Create NMS protobuf + nms_spec = ct.proto.Model_pb2.Model() + nms_spec.specificationVersion = 5 + for i in range(2): + decoder_output = model._spec.description.output[i].SerializeToString() + nms_spec.description.input.add() + nms_spec.description.input[i].ParseFromString(decoder_output) + nms_spec.description.output.add() + nms_spec.description.output[i].ParseFromString(decoder_output) + + nms_spec.description.output[0].name = 'confidence' + nms_spec.description.output[1].name = 'coordinates' + + output_sizes = [nc, 4] + for i in range(2): + ma_type = nms_spec.description.output[i].type.multiArrayType + ma_type.shapeRange.sizeRanges.add() + ma_type.shapeRange.sizeRanges[0].lowerBound = 0 + ma_type.shapeRange.sizeRanges[0].upperBound = -1 + ma_type.shapeRange.sizeRanges.add() + ma_type.shapeRange.sizeRanges[1].lowerBound = output_sizes[i] + ma_type.shapeRange.sizeRanges[1].upperBound = output_sizes[i] + del ma_type.shape[:] + + nms = nms_spec.nonMaximumSuppression + nms.confidenceInputFeatureName = out0.name # 1x507x80 + nms.coordinatesInputFeatureName = out1.name # 1x507x4 + nms.confidenceOutputFeatureName = 'confidence' + nms.coordinatesOutputFeatureName = 'coordinates' + nms.iouThresholdInputFeatureName = 'iouThreshold' + nms.confidenceThresholdInputFeatureName = 'confidenceThreshold' + nms.iouThreshold = 0.45 + nms.confidenceThreshold = 0.25 + nms.pickTop.perClass = True + nms.stringClassLabels.vector.extend(names.values()) + nms_model = ct.models.MLModel(nms_spec) + + # 4. Pipeline models together + pipeline = ct.models.pipeline.Pipeline(input_features=[('image', ct.models.datatypes.Array(3, ny, nx)), + ('iouThreshold', ct.models.datatypes.Double()), + ('confidenceThreshold', ct.models.datatypes.Double())], + output_features=['confidence', 'coordinates']) + pipeline.add_model(model) + pipeline.add_model(nms_model) + + # Correct datatypes + pipeline.spec.description.input[0].ParseFromString(model._spec.description.input[0].SerializeToString()) + pipeline.spec.description.output[0].ParseFromString(nms_model._spec.description.output[0].SerializeToString()) + pipeline.spec.description.output[1].ParseFromString(nms_model._spec.description.output[1].SerializeToString()) + + # Update metadata + pipeline.spec.specificationVersion = 5 + pipeline.spec.description.metadata.versionString = 'https://github.com/ultralytics/yolov5' + pipeline.spec.description.metadata.shortDescription = 'https://github.com/ultralytics/yolov5' + pipeline.spec.description.metadata.author = 'glenn.jocher@ultralytics.com' + pipeline.spec.description.metadata.license = 'https://github.com/ultralytics/yolov5/blob/master/LICENSE' + pipeline.spec.description.metadata.userDefined.update({ + 'classes': ','.join(names.values()), + 'iou_threshold': str(nms.iouThreshold), + 'confidence_threshold': str(nms.confidenceThreshold)}) + + # Save the model + f = file.with_suffix('.mlmodel') # filename + model = ct.models.MLModel(pipeline.spec) + model.input_description['image'] = 'Input image' + model.input_description['iouThreshold'] = f'(optional) IOU Threshold override (default: {nms.iouThreshold})' + model.input_description['confidenceThreshold'] = \ + f'(optional) Confidence Threshold override (default: {nms.confidenceThreshold})' + model.output_description['confidence'] = 'Boxes × Class confidence (see user-defined metadata "classes")' + model.output_description['coordinates'] = 'Boxes × [x, y, width, height] (relative to image size)' + model.save(f) # pipelined + print(f'{prefix} pipeline success ({time.time() - t:.2f}s), saved as {f} ({file_size(f):.1f} MB)') + + @smart_inference_mode() def run( data=ROOT / 'data/coco128.yaml', # 'dataset.yaml path' @@ -584,7 +728,9 @@ def run( if xml: # OpenVINO f[3], _ = export_openvino(file, metadata, half) if coreml: # CoreML - f[4], _ = export_coreml(model, im, file, int8, half) + f[4], ct_model = export_coreml(model, im, file, int8, half, nms) + if nms: + pipeline_coreml(ct_model, im, file, model.names, y) if any((saved_model, pb, tflite, edgetpu, tfjs)): # TensorFlow formats assert not tflite or not tfjs, 'TFLite and TF.js models must be exported separately, please pass only one type.' assert not isinstance(model, ClassificationModel), 'ClassificationModel export to TF formats not yet supported.' From 94714fe6addff41e4984ff510f70cc415a131725 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 15 Apr 2023 00:51:17 +0200 Subject: [PATCH 1723/1976] Update requirements.txt to comment tensorboard (#11362) * Update requirements.txt comment tensorboard Signed-off-by: Glenn Jocher * Make tensorboard optional * Update __init__.py Signed-off-by: Glenn Jocher --------- Signed-off-by: Glenn Jocher --- requirements.txt | 2 +- utils/loggers/__init__.py | 6 +++++- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/requirements.txt b/requirements.txt index baf7a2757f1d..e1d98a82ceac 100644 --- a/requirements.txt +++ b/requirements.txt @@ -18,7 +18,7 @@ tqdm>=4.64.0 # protobuf<=3.20.1 # https://github.com/ultralytics/yolov5/issues/8012 # Logging --------------------------------------------------------------------- -tensorboard>=2.4.1 +# tensorboard>=2.4.1 # clearml>=1.2.0 # comet diff --git a/utils/loggers/__init__.py b/utils/loggers/__init__.py index d428cdba6196..9b4c1d13b778 100644 --- a/utils/loggers/__init__.py +++ b/utils/loggers/__init__.py @@ -9,7 +9,6 @@ import pkg_resources as pkg import torch -from torch.utils.tensorboard import SummaryWriter from utils.general import LOGGER, colorstr, cv2 from utils.loggers.clearml.clearml_utils import ClearmlLogger @@ -20,6 +19,11 @@ LOGGERS = ('csv', 'tb', 'wandb', 'clearml', 'comet') # *.csv, TensorBoard, Weights & Biases, ClearML RANK = int(os.getenv('RANK', -1)) +try: + from torch.utils.tensorboard import SummaryWriter +except ImportError: + SummaryWriter = lambda *args: None # None = SummaryWriter(str) + try: import wandb From 4408d02e7aef15b6f7c524a7dd7ee8ac9715f715 Mon Sep 17 00:00:00 2001 From: abuod0 <97100904+abuod0@users.noreply.github.com> Date: Tue, 18 Apr 2023 18:21:35 +0800 Subject: [PATCH 1724/1976] Fixing onnx and Tensorflow versionsm for Jetson (#11377) Signed-off-by: abuod0 <97100904+abuod0@users.noreply.github.com> --- requirements.txt | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/requirements.txt b/requirements.txt index e1d98a82ceac..65924c9feec4 100644 --- a/requirements.txt +++ b/requirements.txt @@ -28,12 +28,12 @@ seaborn>=0.11.0 # Export ---------------------------------------------------------------------- # coremltools>=6.0 # CoreML export -# onnx>=1.12.0 # ONNX export +# onnx>=1.10.0 # ONNX export # onnx-simplifier>=0.4.1 # ONNX simplifier # nvidia-pyindex # TensorRT export # nvidia-tensorrt # TensorRT export # scikit-learn<=1.1.2 # CoreML quantization -# tensorflow>=2.4.1 # TF exports (-cpu, -aarch64, -macos) +# tensorflow>=2.4.0 # TF exports (-cpu, -aarch64, -macos) # tensorflowjs>=3.9.0 # TF.js export # openvino-dev # OpenVINO export From aa7c45c2cff71d498f42e9762c73cb9e5b0c699a Mon Sep 17 00:00:00 2001 From: Paula Derrenger <107626595+pderrenger@users.noreply.github.com> Date: Tue, 18 Apr 2023 12:22:14 +0200 Subject: [PATCH 1725/1976] Update Ultralytics HUB Copy with Mention of YOLOv8 (#11375) Signed-off-by: Paula Derrenger <107626595+pderrenger@users.noreply.github.com> Co-authored-by: Glenn Jocher --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index f48a4dd73122..e4258aa32592 100644 --- a/README.md +++ b/README.md @@ -200,7 +200,7 @@ python train.py --data coco.yaml --epochs 300 --weights '' --cfg yolov5n.yaml - ##
Ultralytics HUB
-Experience seamless AI with [Ultralytics HUB](https://bit.ly/ultralytics_hub) ⭐, the all-in-one solution for data visualization, YOLOv5 🚀 model training and deployment, without any coding. Transform images into actionable insights and bring your AI visions to life with ease using our cutting-edge platform and user-friendly [Ultralytics App](https://ultralytics.com/app_install). Start your journey for **Free** now! +Experience seamless AI with [Ultralytics HUB](https://bit.ly/ultralytics_hub) ⭐, the all-in-one solution for data visualization, YOLOv5 and YOLOv8 🚀 model training and deployment, without any coding. Transform images into actionable insights and bring your AI visions to life with ease using our cutting-edge platform and user-friendly [Ultralytics App](https://ultralytics.com/app_install). Start your journey for **Free** now! From fb1e746b8a0a2219d767184c4982a5e4c9ed5067 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 21 Apr 2023 13:50:28 +0200 Subject: [PATCH 1726/1976] Docs update for new YOLOv5 URLs (#11409) * Update YOLOv5 Docs Tutorials * Update YOLOv5 Docs Tutorials * Update actions * Update actions * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update actions * Update actions * Update actions * Update actions * Update actions * Update actions --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .github/workflows/greetings.yml | 12 +++++------ .github/workflows/links.yml | 38 +++++++++++++++++++++++++++++++++ .github/workflows/stale.yml | 33 +++++++++++++++++----------- README.md | 36 +++++++++++++++---------------- README.zh-CN.md | 38 ++++++++++++++++----------------- utils/loggers/comet/README.md | 2 +- 6 files changed, 102 insertions(+), 57 deletions(-) create mode 100644 .github/workflows/links.yml diff --git a/.github/workflows/greetings.yml b/.github/workflows/greetings.yml index 337a563803db..3712ea9dec8a 100644 --- a/.github/workflows/greetings.yml +++ b/.github/workflows/greetings.yml @@ -23,11 +23,11 @@ jobs: - ✅ Reduce changes to the absolute **minimum** required for your bug fix or feature addition. _"It is not daily increase but daily decrease, hack away the unessential. The closer to the source, the less wastage there is."_ — Bruce Lee issue-message: | - 👋 Hello @${{ github.actor }}, thank you for your interest in YOLOv5 🚀! Please visit our ⭐️ [Tutorials](https://docs.ultralytics.com/yolov5/#tutorials) to get started, where you can find quickstart guides for simple tasks like [Custom Data Training](https://docs.ultralytics.com/yolov5/train_custom_data/) all the way to advanced concepts like [Hyperparameter Evolution](https://docs.ultralytics.com/yolov5/hyp_evolution/). + 👋 Hello @${{ github.actor }}, thank you for your interest in YOLOv5 🚀! Please visit our ⭐️ [Tutorials](https://docs.ultralytics.com/yolov5/) to get started, where you can find quickstart guides for simple tasks like [Custom Data Training](https://docs.ultralytics.com/yolov5/tutorials/train_custom_data/) all the way to advanced concepts like [Hyperparameter Evolution](https://docs.ultralytics.com/yolov5/tutorials/hyperparameter_evolution/). If this is a 🐛 Bug Report, please provide a **minimum reproducible example** to help us debug it. - If this is a custom training ❓ Question, please provide as much information as possible, including dataset image examples and training logs, and verify you are following our [Tips for Best Training Results](https://github.com/ultralytics/yolov5/wiki/Tips-for-Best-Training-Results). + If this is a custom training ❓ Question, please provide as much information as possible, including dataset image examples and training logs, and verify you are following our [Tips for Best Training Results](https://docs.ultralytics.com/yolov5/tutorials/tips_for_best_training_results/). ## Requirements @@ -43,15 +43,15 @@ jobs: YOLOv5 may be run in any of the following up-to-date verified environments (with all dependencies including [CUDA](https://developer.nvidia.com/cuda)/[CUDNN](https://developer.nvidia.com/cudnn), [Python](https://www.python.org/) and [PyTorch](https://pytorch.org/) preinstalled): - **Notebooks** with free GPU: Run on Gradient Open In Colab Open In Kaggle - - **Google Cloud** Deep Learning VM. See [GCP Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/GCP-Quickstart) - - **Amazon** Deep Learning AMI. See [AWS Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/AWS-Quickstart) - - **Docker Image**. See [Docker Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/Docker-Quickstart) Docker Pulls + - **Google Cloud** Deep Learning VM. See [GCP Quickstart Guide](https://docs.ultralytics.com/yolov5/environments/google_cloud_quickstart_tutorial/) + - **Amazon** Deep Learning AMI. See [AWS Quickstart Guide](https://docs.ultralytics.com/aws_quickstart_tutorial/) + - **Docker Image**. See [Docker Quickstart Guide](https://docs.ultralytics.com/yolov5/environments/docker_image_quickstart_tutorial/) Docker Pulls ## Status YOLOv5 CI - If this badge is green, all [YOLOv5 GitHub Actions](https://github.com/ultralytics/yolov5/actions) Continuous Integration (CI) tests are currently passing. CI tests verify correct operation of YOLOv5 [training](https://github.com/ultralytics/yolov5/blob/master/train.py), [validation](https://github.com/ultralytics/yolov5/blob/master/val.py), [inference](https://github.com/ultralytics/yolov5/blob/master/detect.py), [export](https://github.com/ultralytics/yolov5/blob/master/export.py) and [benchmarks](https://github.com/ultralytics/yolov5/blob/master/benchmarks.py) on MacOS, Windows, and Ubuntu every 24 hours and on every commit. + If this badge is green, all [YOLOv5 GitHub Actions](https://github.com/ultralytics/yolov5/actions) Continuous Integration (CI) tests are currently passing. CI tests verify correct operation of YOLOv5 [training](https://github.com/ultralytics/yolov5/blob/master/train.py), [validation](https://github.com/ultralytics/yolov5/blob/master/val.py), [inference](https://github.com/ultralytics/yolov5/blob/master/detect.py), [export](https://github.com/ultralytics/yolov5/blob/master/export.py) and [benchmarks](https://github.com/ultralytics/yolov5/blob/master/benchmarks.py) on macOS, Windows, and Ubuntu every 24 hours and on every commit. ## Introducing YOLOv8 🚀 diff --git a/.github/workflows/links.yml b/.github/workflows/links.yml new file mode 100644 index 000000000000..b3e82df34947 --- /dev/null +++ b/.github/workflows/links.yml @@ -0,0 +1,38 @@ +# Ultralytics YOLO 🚀, AGPL-3.0 license +# YOLO Continuous Integration (CI) GitHub Actions tests + +name: Check Broken links + +on: + push: + branches: [master] + pull_request: + branches: [master] + workflow_dispatch: + schedule: + - cron: '0 0 * * *' # runs at 00:00 UTC every day + +jobs: + Links: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + + - name: Test Markdown and HTML links + uses: lycheeverse/lychee-action@v1.6.1 + with: + fail: true + # accept 429(Instagram, 'too many requests'), 999(LinkedIn, 'unknown status code'), Timeout(Twitter) + args: --accept 429,999 --exclude-loopback --exclude twitter.com --exclude-mail './**/*.md' './**/*.html' + env: + GITHUB_TOKEN: ${{secrets.GITHUB_TOKEN}} + + - name: Test Markdown, HTML, YAML, Python and Notebook links + if: github.event_name == 'workflow_dispatch' + uses: lycheeverse/lychee-action@v1.6.1 + with: + fail: true + # accept 429(Instagram, 'too many requests'), 999(LinkedIn, 'unknown status code'), Timeout(Twitter) + args: --accept 429,999 --exclude-loopback --exclude twitter.com,url.com --exclude-mail './**/*.md' './**/*.html' './**/*.yml' './**/*.yaml' './**/*.py' './**/*.ipynb' + env: + GITHUB_TOKEN: ${{secrets.GITHUB_TOKEN}} diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml index 734350441c61..65c8f70798f1 100644 --- a/.github/workflows/stale.yml +++ b/.github/workflows/stale.yml @@ -12,26 +12,33 @@ jobs: - uses: actions/stale@v8 with: repo-token: ${{ secrets.GITHUB_TOKEN }} + stale-issue-message: | - 👋 Hello, this issue has been automatically marked as stale because it has not had recent activity. Please note it will be closed if no further activity occurs. + 👋 Hello there! We wanted to give you a friendly reminder that this issue has not had any recent activity and may be closed soon, but don't worry - you can always reopen it if needed. If you still have any questions or concerns, please feel free to let us know how we can help. - Access additional [YOLOv5](https://ultralytics.com/yolov5) 🚀 resources: - - **Wiki** – https://github.com/ultralytics/yolov5/wiki - - **Tutorials** – https://github.com/ultralytics/yolov5#tutorials - - **Docs** – https://docs.ultralytics.com + For additional resources and information, please see the links below: - Access additional [Ultralytics](https://ultralytics.com) ⚡ resources: - - **Ultralytics HUB** – https://ultralytics.com/hub - - **Vision API** – https://ultralytics.com/yolov5 - - **About Us** – https://ultralytics.com/about - - **Join Our Team** – https://ultralytics.com/work - - **Contact Us** – https://ultralytics.com/contact + - **Docs**: https://docs.ultralytics.com + - **HUB**: https://hub.ultralytics.com + - **Community**: https://community.ultralytics.com Feel free to inform us of any other **issues** you discover or **feature requests** that come to mind in the future. Pull Requests (PRs) are also always welcomed! - Thank you for your contributions to YOLOv5 🚀 and Vision AI ⭐! + Thank you for your contributions to YOLO 🚀 and Vision AI ⭐ + + stale-pr-message: | + 👋 Hello there! We wanted to let you know that we've decided to close this pull request due to inactivity. We appreciate the effort you put into contributing to our project, but unfortunately, not all contributions are suitable or aligned with our product roadmap. + + We hope you understand our decision, and please don't let it discourage you from contributing to open source projects in the future. We value all of our community members and their contributions, and we encourage you to keep exploring new projects and ways to get involved. + + For additional resources and information, please see the links below: + + - **Docs**: https://docs.ultralytics.com + - **HUB**: https://hub.ultralytics.com + - **Community**: https://community.ultralytics.com + + Thank you for your contributions to YOLO 🚀 and Vision AI ⭐ - stale-pr-message: 'This pull request has been automatically marked as stale because it has not had recent activity. It will be closed if no further activity occurs. Thank you for your contributions YOLOv5 🚀 and Vision AI ⭐.' days-before-issue-stale: 30 days-before-issue-close: 10 days-before-pr-stale: 90 diff --git a/README.md b/README.md index e4258aa32592..7a43245a1f45 100644 --- a/README.md +++ b/README.md @@ -153,22 +153,22 @@ python train.py --data coco.yaml --epochs 300 --weights '' --cfg yolov5n.yaml -
Tutorials -- [Train Custom Data](https://docs.ultralytics.com/yolov5/train_custom_data) 🚀 RECOMMENDED -- [Tips for Best Training Results](https://docs.ultralytics.com/yolov5/tips_for_best_training_results) ☘️ RECOMMENDED -- [Multi-GPU Training](https://docs.ultralytics.com/yolov5/multi_gpu_training) -- [PyTorch Hub](https://docs.ultralytics.com/yolov5/pytorch_hub) 🌟 NEW -- [TFLite, ONNX, CoreML, TensorRT Export](https://docs.ultralytics.com/yolov5/export) 🚀 -- [NVIDIA Jetson platform Deployment](https://docs.ultralytics.com/yolov5/jetson_nano) 🌟 NEW -- [Test-Time Augmentation (TTA)](https://docs.ultralytics.com/yolov5/tta) -- [Model Ensembling](https://docs.ultralytics.com/yolov5/ensemble) -- [Model Pruning/Sparsity](https://docs.ultralytics.com/yolov5/pruning_sparsity) -- [Hyperparameter Evolution](https://docs.ultralytics.com/yolov5/hyp_evolution) -- [Transfer Learning with Frozen Layers](https://docs.ultralytics.com/yolov5/transfer_learn_frozen) -- [Architecture Summary](https://docs.ultralytics.com/yolov5/architecture) 🌟 NEW -- [Roboflow for Datasets](https://docs.ultralytics.com/yolov5/roboflow) -- [ClearML Logging](https://docs.ultralytics.com/yolov5/clearml) 🌟 NEW -- [YOLOv5 with Neural Magic's Deepsparse](https://docs.ultralytics.com/yolov5/neural_magic) 🌟 NEW -- [Comet Logging](https://docs.ultralytics.com/yolov5/comet) 🌟 NEW +- [Train Custom Data](https://docs.ultralytics.com/yolov5/tutorials/train_custom_data) 🚀 RECOMMENDED +- [Tips for Best Training Results](https://docs.ultralytics.com/yolov5/tutorials/tips_for_best_training_results) ☘️ +- [Multi-GPU Training](https://docs.ultralytics.com/yolov5/tutorials/multi_gpu_training) +- [PyTorch Hub](https://docs.ultralytics.com/yolov5/tutorials/pytorch_hub_model_loading) 🌟 NEW +- [TFLite, ONNX, CoreML, TensorRT Export](https://docs.ultralytics.com/yolov5/tutorials/model_export) 🚀 +- [NVIDIA Jetson platform Deployment](https://docs.ultralytics.com/yolov5/tutorials/running_on_jetson_nano) 🌟 NEW +- [Test-Time Augmentation (TTA)](https://docs.ultralytics.com/yolov5/tutorials/test_time_augmentation) +- [Model Ensembling](https://docs.ultralytics.com/yolov5/tutorials/model_ensembling) +- [Model Pruning/Sparsity](https://docs.ultralytics.com/yolov5/tutorials/model_pruning_and_sparsity) +- [Hyperparameter Evolution](https://docs.ultralytics.com/yolov5/tutorials/hyperparameter_evolution) +- [Transfer Learning with Frozen Layers](https://docs.ultralytics.com/yolov5/tutorials/transfer_learning_with_frozen_layers) +- [Architecture Summary](https://docs.ultralytics.com/yolov5/tutorials/architecture_description) 🌟 NEW +- [Roboflow for Datasets, Labeling, and Active Learning](https://docs.ultralytics.com/yolov5/tutorials/roboflow_datasets_integration) +- [ClearML Logging](https://docs.ultralytics.com/yolov5/tutorials/clearml_logging_integration) 🌟 NEW +- [YOLOv5 with Neural Magic's Deepsparse](https://docs.ultralytics.com/yolov5/tutorials/neural_magic_pruning_quantization) 🌟 NEW +- [Comet Logging](https://docs.ultralytics.com/yolov5/tutorials/comet_logging_integration) 🌟 NEW
@@ -436,10 +436,10 @@ Get started in seconds with our verified environments. Click each icon below for - + - + diff --git a/README.zh-CN.md b/README.zh-CN.md index 0a696e591d0d..6396ecffbe79 100644 --- a/README.zh-CN.md +++ b/README.zh-CN.md @@ -147,22 +147,22 @@ python train.py --data coco.yaml --epochs 300 --weights '' --cfg yolov5n.yaml -
教程 -- [训练自定义数据](https://docs.ultralytics.com/yolov5/train_custom_data) 🚀 推荐 -- [获得最佳训练结果的技巧](https://docs.ultralytics.com/yolov5/tips_for_best_training_results) ☘️ 推荐 -- [多 GPU 训练](https://docs.ultralytics.com/yolov5/multi_gpu_training) -- [PyTorch Hub](https://docs.ultralytics.com/yolov5/pytorch_hub) 🌟 新 -- [TFLite, ONNX, CoreML, TensorRT 导出](https://docs.ultralytics.com/yolov5/export) 🚀 -- [NVIDIA Jetson 平台部署](https://docs.ultralytics.com/yolov5/jetson_nano) 🌟 新 -- [测试时增强(TTA)](https://docs.ultralytics.com/yolov5/tta) -- [模型集成](https://docs.ultralytics.com/yolov5/ensemble) -- [模型剪枝/稀疏性](https://docs.ultralytics.com/yolov5/pruning_sparsity) -- [超参数进化](https://docs.ultralytics.com/yolov5/hyp_evolution) -- [冻结层的迁移学习](https://docs.ultralytics.com/yolov5/transfer_learn_frozen) -- [架构概述](https://docs.ultralytics.com/yolov5/architecture) 🌟 新 -- [Roboflow](https://docs.ultralytics.com/yolov5/roboflow) -- [ClearML 日志记录](https://docs.ultralytics.com/yolov5/clearml) 🌟 新 -- [YOLOv5 与 Neural Magic 的 Deepsparse](https://docs.ultralytics.com/yolov5/neural_magic) 🌟 新 -- [Comet 日志记录](https://docs.ultralytics.com/yolov5/comet) 🌟 新 +- [训练自定义数据](https://docs.ultralytics.com/yolov5/tutorials/train_custom_data) 🚀 推荐 +- [获得最佳训练结果的技巧](https://docs.ultralytics.com/yolov5/tutorials/tips_for_best_training_results) ☘️ +- [多GPU训练](https://docs.ultralytics.com/yolov5/tutorials/multi_gpu_training) +- [PyTorch Hub](https://docs.ultralytics.com/yolov5/tutorials/pytorch_hub_model_loading) 🌟 新 +- [TFLite,ONNX,CoreML,TensorRT导出](https://docs.ultralytics.com/yolov5/tutorials/model_export) 🚀 +- [NVIDIA Jetson平台部署](https://docs.ultralytics.com/yolov5/tutorials/running_on_jetson_nano) 🌟 新 +- [测试时增强 (TTA)](https://docs.ultralytics.com/yolov5/tutorials/test_time_augmentation) +- [模型集成](https://docs.ultralytics.com/yolov5/tutorials/model_ensembling) +- [模型剪枝/稀疏](https://docs.ultralytics.com/yolov5/tutorials/model_pruning_and_sparsity) +- [超参数进化](https://docs.ultralytics.com/yolov5/tutorials/hyperparameter_evolution) +- [冻结层的迁移学习](https://docs.ultralytics.com/yolov5/tutorials/transfer_learning_with_frozen_layers) +- [架构概述](https://docs.ultralytics.com/yolov5/tutorials/architecture_description) 🌟 新 +- [Roboflow用于数据集、标注和主动学习](https://docs.ultralytics.com/yolov5/tutorials/roboflow_datasets_integration) +- [ClearML日志记录](https://docs.ultralytics.com/yolov5/tutorials/clearml_logging_integration) 🌟 新 +- [使用Neural Magic的Deepsparse的YOLOv5](https://docs.ultralytics.com/yolov5/tutorials/neural_magic_pruning_quantization) 🌟 新 +- [Comet日志记录](https://docs.ultralytics.com/yolov5/tutorials/comet_logging_integration) 🌟 新
@@ -431,10 +431,10 @@ python export.py --weights yolov5s-cls.pt resnet50.pt efficientnet_b0.pt --inclu - + - + @@ -456,7 +456,7 @@ YOLOv5 在两种不同的 License 下可用: ##
联系我们
-请访问 [GitHub Issues](https://github.com/ultralytics/yolov5/issues) 或 [Ultralytics Community Forum](https://community.ultralytis.com) 以报告 YOLOv5 错误和请求功能。 +请访问 [GitHub Issues](https://github.com/ultralytics/yolov5/issues) 或 [Ultralytics Community Forum](https://community.ultralytics.com) 以报告 YOLOv5 错误和请求功能。
diff --git a/utils/loggers/comet/README.md b/utils/loggers/comet/README.md index 47e6a45654b8..aee8d16a336c 100644 --- a/utils/loggers/comet/README.md +++ b/utils/loggers/comet/README.md @@ -164,7 +164,7 @@ env COMET_LOG_PER_CLASS_METRICS=true python train.py \ If you would like to store your data using [Comet Artifacts](https://www.comet.com/docs/v2/guides/data-management/using-artifacts/#learn-more?utm_source=yolov5&utm_medium=partner&utm_campaign=partner_yolov5_2022&utm_content=github), you can do so using the `upload_dataset` flag. -The dataset be organized in the way described in the [YOLOv5 documentation](https://docs.ultralytics.com/tutorials/train-custom-datasets/#3-organize-directories). The dataset config `yaml` file must follow the same format as that of the `coco128.yaml` file. +The dataset be organized in the way described in the [YOLOv5 documentation](https://docs.ultralytics.com/yolov5/tutorials/train_custom_data/). The dataset config `yaml` file must follow the same format as that of the `coco128.yaml` file. ```shell python train.py \ From ea191def0a9f04b03feb1a535c816c6107c0ff7b Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 21 Apr 2023 22:11:03 +0200 Subject: [PATCH 1727/1976] Update greetings.yml (#11411) Signed-off-by: Glenn Jocher --- .github/workflows/greetings.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/greetings.yml b/.github/workflows/greetings.yml index 3712ea9dec8a..8aca12d3c370 100644 --- a/.github/workflows/greetings.yml +++ b/.github/workflows/greetings.yml @@ -44,7 +44,7 @@ jobs: - **Notebooks** with free GPU: Run on Gradient Open In Colab Open In Kaggle - **Google Cloud** Deep Learning VM. See [GCP Quickstart Guide](https://docs.ultralytics.com/yolov5/environments/google_cloud_quickstart_tutorial/) - - **Amazon** Deep Learning AMI. See [AWS Quickstart Guide](https://docs.ultralytics.com/aws_quickstart_tutorial/) + - **Amazon** Deep Learning AMI. See [AWS Quickstart Guide](https://docs.ultralytics.com/yolov5/environments/aws_quickstart_tutorial/) - **Docker Image**. See [Docker Quickstart Guide](https://docs.ultralytics.com/yolov5/environments/docker_image_quickstart_tutorial/) Docker Pulls ## Status From be61a64c47b9324ed2a0c0c1af007c06966da915 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 21 Apr 2023 23:47:29 +0200 Subject: [PATCH 1728/1976] Update links to https://docs.ultralytics.com (#11412) * Update links * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- README.md | 8 ++++---- README.zh-CN.md | 8 ++++---- classify/tutorial.ipynb | 10 +++++----- hubconf.py | 2 +- segment/train.py | 8 +++++--- segment/tutorial.ipynb | 10 +++++----- train.py | 8 +++++--- tutorial.ipynb | 10 +++++----- utils/dataloaders.py | 2 +- utils/loggers/__init__.py | 2 +- 10 files changed, 36 insertions(+), 32 deletions(-) diff --git a/README.md b/README.md index 7a43245a1f45..7bc7a6ce8ca4 100644 --- a/README.md +++ b/README.md @@ -85,7 +85,7 @@ pip install -r requirements.txt # install
Inference -YOLOv5 [PyTorch Hub](https://github.com/ultralytics/yolov5/issues/36) inference. [Models](https://github.com/ultralytics/yolov5/tree/master/models) download automatically from the latest +YOLOv5 [PyTorch Hub](https://docs.ultralytics.com/yolov5/tutorials/pytorch_hub_model_loading) inference. [Models](https://github.com/ultralytics/yolov5/tree/master/models) download automatically from the latest YOLOv5 [release](https://github.com/ultralytics/yolov5/releases). ```python @@ -134,7 +134,7 @@ The commands below reproduce YOLOv5 [COCO](https://github.com/ultralytics/yolov5 results. [Models](https://github.com/ultralytics/yolov5/tree/master/models) and [datasets](https://github.com/ultralytics/yolov5/tree/master/data) download automatically from the latest YOLOv5 [release](https://github.com/ultralytics/yolov5/releases). Training times for YOLOv5n/s/m/l/x are -1/2/4/6/8 days on a V100 GPU ([Multi-GPU](https://github.com/ultralytics/yolov5/issues/475) times faster). Use the +1/2/4/6/8 days on a V100 GPU ([Multi-GPU](https://docs.ultralytics.com/yolov5/tutorials/multi_gpu_training) times faster). Use the largest `--batch-size` possible, or pass `--batch-size -1` for YOLOv5 [AutoBatch](https://github.com/ultralytics/yolov5/pull/5092). Batch sizes shown for V100-16GB. @@ -247,7 +247,7 @@ YOLOv5 has been designed to be super easy to get started and simple to learn. We - All checkpoints are trained to 300 epochs with default settings. Nano and Small models use [hyp.scratch-low.yaml](https://github.com/ultralytics/yolov5/blob/master/data/hyps/hyp.scratch-low.yaml) hyps, all others use [hyp.scratch-high.yaml](https://github.com/ultralytics/yolov5/blob/master/data/hyps/hyp.scratch-high.yaml). - **mAPval** values are for single-model single-scale on [COCO val2017](http://cocodataset.org) dataset.
Reproduce by `python val.py --data coco.yaml --img 640 --conf 0.001 --iou 0.65` - **Speed** averaged over COCO val images using a [AWS p3.2xlarge](https://aws.amazon.com/ec2/instance-types/p3/) instance. NMS times (~1 ms/img) not included.
Reproduce by `python val.py --data coco.yaml --img 640 --task speed --batch 1` -- **TTA** [Test Time Augmentation](https://github.com/ultralytics/yolov5/issues/303) includes reflection and scale augmentations.
Reproduce by `python val.py --data coco.yaml --img 1536 --iou 0.7 --augment` +- **TTA** [Test Time Augmentation](https://docs.ultralytics.com/yolov5/tutorials/test_time_augmentation) includes reflection and scale augmentations.
Reproduce by `python val.py --data coco.yaml --img 1536 --iou 0.7 --augment`
@@ -484,4 +484,4 @@ For YOLOv5 bug reports and feature requests please visit [GitHub Issues](https:/
-[tta]: https://github.com/ultralytics/yolov5/issues/303 +[tta]: https://docs.ultralytics.com/yolov5/tutorials/test_time_augmentation diff --git a/README.zh-CN.md b/README.zh-CN.md index 6396ecffbe79..c7733488bd02 100644 --- a/README.zh-CN.md +++ b/README.zh-CN.md @@ -80,7 +80,7 @@ pip install -r requirements.txt # install
推理 -使用 YOLOv5 [PyTorch Hub](https://github.com/ultralytics/yolov5/issues/36) 推理。最新 [模型](https://github.com/ultralytics/yolov5/tree/master/models) 将自动的从 +使用 YOLOv5 [PyTorch Hub](https://docs.ultralytics.com/yolov5/tutorials/pytorch_hub_model_loading) 推理。最新 [模型](https://github.com/ultralytics/yolov5/tree/master/models) 将自动的从 YOLOv5 [release](https://github.com/ultralytics/yolov5/releases) 中下载。 ```python @@ -128,7 +128,7 @@ python detect.py --weights yolov5s.pt --source 0 # 下面的命令重现 YOLOv5 在 [COCO](https://github.com/ultralytics/yolov5/blob/master/data/scripts/get_coco.sh) 数据集上的结果。 最新的 [模型](https://github.com/ultralytics/yolov5/tree/master/models) 和 [数据集](https://github.com/ultralytics/yolov5/tree/master/data) 将自动的从 YOLOv5 [release](https://github.com/ultralytics/yolov5/releases) 中下载。 -YOLOv5n/s/m/l/x 在 V100 GPU 的训练时间为 1/2/4/6/8 天( [多GPU](https://github.com/ultralytics/yolov5/issues/475) 训练速度更快)。 +YOLOv5n/s/m/l/x 在 V100 GPU 的训练时间为 1/2/4/6/8 天( [多GPU](https://docs.ultralytics.com/yolov5/tutorials/multi_gpu_training) 训练速度更快)。 尽可能使用更大的 `--batch-size` ,或通过 `--batch-size -1` 实现 YOLOv5 [自动批处理](https://github.com/ultralytics/yolov5/pull/5092) 。下方显示的 batchsize 适用于 V100-16GB。 @@ -241,7 +241,7 @@ YOLOv5 超级容易上手,简单易学。我们优先考虑现实世界的结 - 所有模型都使用默认配置,训练 300 epochs。n和s模型使用 [hyp.scratch-low.yaml](https://github.com/ultralytics/yolov5/blob/master/data/hyps/hyp.scratch-low.yaml) ,其他模型都使用 [hyp.scratch-high.yaml](https://github.com/ultralytics/yolov5/blob/master/data/hyps/hyp.scratch-high.yaml) 。 - \*\*mAPval\*\*在单模型单尺度上计算,数据集使用 [COCO val2017](http://cocodataset.org) 。
复现命令 `python val.py --data coco.yaml --img 640 --conf 0.001 --iou 0.65` - **推理速度**在 COCO val 图像总体时间上进行平均得到,测试环境使用[AWS p3.2xlarge](https://aws.amazon.com/ec2/instance-types/p3/)实例。 NMS 时间 (大约 1 ms/img) 不包括在内。
复现命令 `python val.py --data coco.yaml --img 640 --task speed --batch 1` -- **TTA** [测试时数据增强](https://github.com/ultralytics/yolov5/issues/303) 包括反射和尺度变换。
复现命令 `python val.py --data coco.yaml --img 1536 --iou 0.7 --augment` +- **TTA** [测试时数据增强](https://docs.ultralytics.com/yolov5/tutorials/test_time_augmentation) 包括反射和尺度变换。
复现命令 `python val.py --data coco.yaml --img 1536 --iou 0.7 --augment`
@@ -479,4 +479,4 @@ YOLOv5 在两种不同的 License 下可用: -[tta]: https://github.com/ultralytics/yolov5/issues/303 +[tta]: https://docs.ultralytics.com/yolov5/tutorials/test_time_augmentation diff --git a/classify/tutorial.ipynb b/classify/tutorial.ipynb index 58723608bdbe..ddf67c5519f5 100644 --- a/classify/tutorial.ipynb +++ b/classify/tutorial.ipynb @@ -1350,7 +1350,7 @@ "export COMET_API_KEY= # 2. paste API key\n", "python train.py --img 640 --epochs 3 --data coco128.yaml --weights yolov5s.pt # 3. train\n", "```\n", - "To learn more about all of the supported Comet features for this integration, check out the [Comet Tutorial](https://github.com/ultralytics/yolov5/tree/master/utils/loggers/comet). If you'd like to learn more about Comet, head over to our [documentation](https://www.comet.com/docs/v2/?utm_source=yolov5&utm_medium=partner&utm_campaign=partner_yolov5_2022&utm_content=yolov5_colab). Get started by trying out the Comet Colab Notebook:\n", + "To learn more about all of the supported Comet features for this integration, check out the [Comet Tutorial](https://docs.ultralytics.com/yolov5/tutorials/comet_logging_integration). If you'd like to learn more about Comet, head over to our [documentation](https://www.comet.com/docs/v2/?utm_source=yolov5&utm_medium=partner&utm_campaign=partner_yolov5_2022&utm_content=yolov5_colab). Get started by trying out the Comet Colab Notebook:\n", "[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1RG0WOQyxlDlo5Km8GogJpIEJlg_5lyYO?usp=sharing)\n", "\n", "\n", @@ -1372,7 +1372,7 @@ "\n", "You'll get all the great expected features from an experiment manager: live updates, model upload, experiment comparison etc. but ClearML also tracks uncommitted changes and installed packages for example. Thanks to that ClearML Tasks (which is what we call experiments) are also reproducible on different machines! With only 1 extra line, we can schedule a YOLOv5 training task on a queue to be executed by any number of ClearML Agents (workers).\n", "\n", - "You can use ClearML Data to version your dataset and then pass it to YOLOv5 simply using its unique ID. This will help you keep track of your data without adding extra hassle. Explore the [ClearML Tutorial](https://github.com/ultralytics/yolov5/tree/master/utils/loggers/clearml) for details!\n", + "You can use ClearML Data to version your dataset and then pass it to YOLOv5 simply using its unique ID. This will help you keep track of your data without adding extra hassle. Explore the [ClearML Tutorial](https://docs.ultralytics.com/yolov5/tutorials/clearml_logging_integration) for details!\n", "\n", "\n", "\"ClearML" @@ -1404,9 +1404,9 @@ "YOLOv5 may be run in any of the following up-to-date verified environments (with all dependencies including [CUDA](https://developer.nvidia.com/cuda)/[CUDNN](https://developer.nvidia.com/cudnn), [Python](https://www.python.org/) and [PyTorch](https://pytorch.org/) preinstalled):\n", "\n", "- **Notebooks** with free GPU: \"Run \"Open \"Open\n", - "- **Google Cloud** Deep Learning VM. See [GCP Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/GCP-Quickstart)\n", - "- **Amazon** Deep Learning AMI. See [AWS Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/AWS-Quickstart)\n", - "- **Docker Image**. See [Docker Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/Docker-Quickstart) \"Docker\n" + "- **Google Cloud** Deep Learning VM. See [GCP Quickstart Guide](https://docs.ultralytics.com/yolov5/environments/google_cloud_quickstart_tutorial/)\n", + "- **Amazon** Deep Learning AMI. See [AWS Quickstart Guide](https://docs.ultralytics.com/yolov5/environments/aws_quickstart_tutorial/)\n", + "- **Docker Image**. See [Docker Quickstart Guide](https://docs.ultralytics.com/yolov5/environments/docker_image_quickstart_tutorial/) \"Docker\n" ] }, { diff --git a/hubconf.py b/hubconf.py index 9d820a54f290..73caf06685da 100644 --- a/hubconf.py +++ b/hubconf.py @@ -73,7 +73,7 @@ def _create(name, pretrained=True, channels=3, classes=80, autoshape=True, verbo return model.to(device) except Exception as e: - help_url = 'https://github.com/ultralytics/yolov5/issues/36' + help_url = 'https://docs.ultralytics.com/yolov5/tutorials/pytorch_hub_model_loading' s = f'{e}. Cache may be out of date, try `force_reload=True` or see {help_url} for help.' raise Exception(s) from e diff --git a/segment/train.py b/segment/train.py index 7e600f77d571..073fc742005b 100644 --- a/segment/train.py +++ b/segment/train.py @@ -12,7 +12,7 @@ Models: https://github.com/ultralytics/yolov5/tree/master/models Datasets: https://github.com/ultralytics/yolov5/tree/master/data -Tutorial: https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data +Tutorial: https://docs.ultralytics.com/yolov5/tutorials/train_custom_data """ import argparse @@ -167,8 +167,10 @@ def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictio # DP mode if cuda and RANK == -1 and torch.cuda.device_count() > 1: - LOGGER.warning('WARNING ⚠️ DP not recommended, use torch.distributed.run for best DDP Multi-GPU results.\n' - 'See Multi-GPU Tutorial at https://github.com/ultralytics/yolov5/issues/475 to get started.') + LOGGER.warning( + 'WARNING ⚠️ DP not recommended, use torch.distributed.run for best DDP Multi-GPU results.\n' + 'See Multi-GPU Tutorial at https://docs.ultralytics.com/yolov5/tutorials/multi_gpu_training to get started.' + ) model = torch.nn.DataParallel(model) # SyncBatchNorm diff --git a/segment/tutorial.ipynb b/segment/tutorial.ipynb index cb52045bcb25..6e5caf53b8ff 100644 --- a/segment/tutorial.ipynb +++ b/segment/tutorial.ipynb @@ -463,7 +463,7 @@ "export COMET_API_KEY= # 2. paste API key\n", "python train.py --img 640 --epochs 3 --data coco128.yaml --weights yolov5s.pt # 3. train\n", "```\n", - "To learn more about all of the supported Comet features for this integration, check out the [Comet Tutorial](https://github.com/ultralytics/yolov5/tree/master/utils/loggers/comet). If you'd like to learn more about Comet, head over to our [documentation](https://www.comet.com/docs/v2/?utm_source=yolov5&utm_medium=partner&utm_campaign=partner_yolov5_2022&utm_content=yolov5_colab). Get started by trying out the Comet Colab Notebook:\n", + "To learn more about all of the supported Comet features for this integration, check out the [Comet Tutorial](https://docs.ultralytics.com/yolov5/tutorials/comet_logging_integration). If you'd like to learn more about Comet, head over to our [documentation](https://www.comet.com/docs/v2/?utm_source=yolov5&utm_medium=partner&utm_campaign=partner_yolov5_2022&utm_content=yolov5_colab). Get started by trying out the Comet Colab Notebook:\n", "[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1RG0WOQyxlDlo5Km8GogJpIEJlg_5lyYO?usp=sharing)\n", "\n", "\n", @@ -485,7 +485,7 @@ "\n", "You'll get all the great expected features from an experiment manager: live updates, model upload, experiment comparison etc. but ClearML also tracks uncommitted changes and installed packages for example. Thanks to that ClearML Tasks (which is what we call experiments) are also reproducible on different machines! With only 1 extra line, we can schedule a YOLOv5 training task on a queue to be executed by any number of ClearML Agents (workers).\n", "\n", - "You can use ClearML Data to version your dataset and then pass it to YOLOv5 simply using its unique ID. This will help you keep track of your data without adding extra hassle. Explore the [ClearML Tutorial](https://github.com/ultralytics/yolov5/tree/master/utils/loggers/clearml) for details!\n", + "You can use ClearML Data to version your dataset and then pass it to YOLOv5 simply using its unique ID. This will help you keep track of your data without adding extra hassle. Explore the [ClearML Tutorial](https://docs.ultralytics.com/yolov5/tutorials/clearml_logging_integration) for details!\n", "\n", "\n", "\"ClearML" @@ -517,9 +517,9 @@ "YOLOv5 may be run in any of the following up-to-date verified environments (with all dependencies including [CUDA](https://developer.nvidia.com/cuda)/[CUDNN](https://developer.nvidia.com/cudnn), [Python](https://www.python.org/) and [PyTorch](https://pytorch.org/) preinstalled):\n", "\n", "- **Notebooks** with free GPU: \"Run \"Open \"Open\n", - "- **Google Cloud** Deep Learning VM. See [GCP Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/GCP-Quickstart)\n", - "- **Amazon** Deep Learning AMI. See [AWS Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/AWS-Quickstart)\n", - "- **Docker Image**. See [Docker Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/Docker-Quickstart) \"Docker\n" + "- **Google Cloud** Deep Learning VM. See [GCP Quickstart Guide](https://docs.ultralytics.com/yolov5/environments/google_cloud_quickstart_tutorial/)\n", + "- **Amazon** Deep Learning AMI. See [AWS Quickstart Guide](https://docs.ultralytics.com/yolov5/environments/aws_quickstart_tutorial/)\n", + "- **Docker Image**. See [Docker Quickstart Guide](https://docs.ultralytics.com/yolov5/environments/docker_image_quickstart_tutorial/) \"Docker\n" ] }, { diff --git a/train.py b/train.py index 7c403ee6d680..216da6399028 100644 --- a/train.py +++ b/train.py @@ -12,7 +12,7 @@ Models: https://github.com/ultralytics/yolov5/tree/master/models Datasets: https://github.com/ultralytics/yolov5/tree/master/data -Tutorial: https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data +Tutorial: https://docs.ultralytics.com/yolov5/tutorials/train_custom_data """ import argparse @@ -175,8 +175,10 @@ def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictio # DP mode if cuda and RANK == -1 and torch.cuda.device_count() > 1: - LOGGER.warning('WARNING ⚠️ DP not recommended, use torch.distributed.run for best DDP Multi-GPU results.\n' - 'See Multi-GPU Tutorial at https://github.com/ultralytics/yolov5/issues/475 to get started.') + LOGGER.warning( + 'WARNING ⚠️ DP not recommended, use torch.distributed.run for best DDP Multi-GPU results.\n' + 'See Multi-GPU Tutorial at https://docs.ultralytics.com/yolov5/tutorials/multi_gpu_training to get started.' + ) model = torch.nn.DataParallel(model) # SyncBatchNorm diff --git a/tutorial.ipynb b/tutorial.ipynb index d2b54c9c60ef..be87068822af 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -498,7 +498,7 @@ "export COMET_API_KEY= # 2. paste API key\n", "python train.py --img 640 --epochs 3 --data coco128.yaml --weights yolov5s.pt # 3. train\n", "```\n", - "To learn more about all of the supported Comet features for this integration, check out the [Comet Tutorial](https://github.com/ultralytics/yolov5/tree/master/utils/loggers/comet). If you'd like to learn more about Comet, head over to our [documentation](https://www.comet.com/docs/v2/?utm_source=yolov5&utm_medium=partner&utm_campaign=partner_yolov5_2022&utm_content=yolov5_colab). Get started by trying out the Comet Colab Notebook:\n", + "To learn more about all of the supported Comet features for this integration, check out the [Comet Tutorial](https://docs.ultralytics.com/yolov5/tutorials/comet_logging_integration). If you'd like to learn more about Comet, head over to our [documentation](https://www.comet.com/docs/v2/?utm_source=yolov5&utm_medium=partner&utm_campaign=partner_yolov5_2022&utm_content=yolov5_colab). Get started by trying out the Comet Colab Notebook:\n", "[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1RG0WOQyxlDlo5Km8GogJpIEJlg_5lyYO?usp=sharing)\n", "\n", "\n", @@ -520,7 +520,7 @@ "\n", "You'll get all the great expected features from an experiment manager: live updates, model upload, experiment comparison etc. but ClearML also tracks uncommitted changes and installed packages for example. Thanks to that ClearML Tasks (which is what we call experiments) are also reproducible on different machines! With only 1 extra line, we can schedule a YOLOv5 training task on a queue to be executed by any number of ClearML Agents (workers).\n", "\n", - "You can use ClearML Data to version your dataset and then pass it to YOLOv5 simply using its unique ID. This will help you keep track of your data without adding extra hassle. Explore the [ClearML Tutorial](https://github.com/ultralytics/yolov5/tree/master/utils/loggers/clearml) for details!\n", + "You can use ClearML Data to version your dataset and then pass it to YOLOv5 simply using its unique ID. This will help you keep track of your data without adding extra hassle. Explore the [ClearML Tutorial](https://docs.ultralytics.com/yolov5/tutorials/clearml_logging_integration) for details!\n", "\n", "\n", "\"ClearML" @@ -555,9 +555,9 @@ "YOLOv5 may be run in any of the following up-to-date verified environments (with all dependencies including [CUDA](https://developer.nvidia.com/cuda)/[CUDNN](https://developer.nvidia.com/cudnn), [Python](https://www.python.org/) and [PyTorch](https://pytorch.org/) preinstalled):\n", "\n", "- **Notebooks** with free GPU: \"Run \"Open \"Open\n", - "- **Google Cloud** Deep Learning VM. See [GCP Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/GCP-Quickstart)\n", - "- **Amazon** Deep Learning AMI. See [AWS Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/AWS-Quickstart)\n", - "- **Docker Image**. See [Docker Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/Docker-Quickstart) \"Docker\n" + "- **Google Cloud** Deep Learning VM. See [GCP Quickstart Guide](https://docs.ultralytics.com/yolov5/environments/google_cloud_quickstart_tutorial/)\n", + "- **Amazon** Deep Learning AMI. See [AWS Quickstart Guide](https://docs.ultralytics.com/yolov5/environments/aws_quickstart_tutorial/)\n", + "- **Docker Image**. See [Docker Quickstart Guide](https://docs.ultralytics.com/yolov5/environments/docker_image_quickstart_tutorial/) \"Docker\n" ] }, { diff --git a/utils/dataloaders.py b/utils/dataloaders.py index a5bd86d49d7e..26201c3c78fc 100644 --- a/utils/dataloaders.py +++ b/utils/dataloaders.py @@ -36,7 +36,7 @@ from utils.torch_utils import torch_distributed_zero_first # Parameters -HELP_URL = 'See https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data' +HELP_URL = 'See https://docs.ultralytics.com/yolov5/tutorials/train_custom_data' IMG_FORMATS = 'bmp', 'dng', 'jpeg', 'jpg', 'mpo', 'png', 'tif', 'tiff', 'webp', 'pfm' # include image suffixes VID_FORMATS = 'asf', 'avi', 'gif', 'm4v', 'mkv', 'mov', 'mp4', 'mpeg', 'mpg', 'ts', 'wmv' # include video suffixes LOCAL_RANK = int(os.getenv('LOCAL_RANK', -1)) # https://pytorch.org/docs/stable/elastic/run.html diff --git a/utils/loggers/__init__.py b/utils/loggers/__init__.py index 9b4c1d13b778..c7c283b728ac 100644 --- a/utils/loggers/__init__.py +++ b/utils/loggers/__init__.py @@ -118,7 +118,7 @@ def __init__(self, save_dir=None, weights=None, opt=None, hyp=None, logger=None, self.clearml = None prefix = colorstr('ClearML: ') LOGGER.warning(f'{prefix}WARNING ⚠️ ClearML is installed but not configured, skipping ClearML logging.' - f' See https://github.com/ultralytics/yolov5/tree/master/utils/loggers/clearml#readme') + f' See https://docs.ultralytics.com/yolov5/tutorials/clearml_logging_integration#readme') else: self.clearml = None From 3e14883825bb0f981e82aecb1da0e43e109fea68 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 23 Apr 2023 01:49:44 +0200 Subject: [PATCH 1729/1976] [Snyk] Security upgrade ubuntu from rolling to 22.10 (#11389) * fix: utils/docker/Dockerfile-cpu to reduce vulnerabilities The following vulnerabilities are fixed with an upgrade: - https://snyk.io/vuln/SNYK-UBUNTU2210-GNUTLS28-3319585 - https://snyk.io/vuln/SNYK-UBUNTU2210-SYSTEMD-3148007 - https://snyk.io/vuln/SNYK-UBUNTU2210-SYSTEMD-3148007 - https://snyk.io/vuln/SNYK-UBUNTU2210-SYSTEMD-3180315 - https://snyk.io/vuln/SNYK-UBUNTU2210-TAR-3261142 * Update Dockerfile-arm64 Signed-off-by: Glenn Jocher --------- Signed-off-by: Glenn Jocher Co-authored-by: snyk-bot --- utils/docker/Dockerfile-arm64 | 2 +- utils/docker/Dockerfile-cpu | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/utils/docker/Dockerfile-arm64 b/utils/docker/Dockerfile-arm64 index 556ae59a8700..7b5c610e5071 100644 --- a/utils/docker/Dockerfile-arm64 +++ b/utils/docker/Dockerfile-arm64 @@ -3,7 +3,7 @@ # Image is aarch64-compatible for Apple M1 and other ARM architectures i.e. Jetson Nano and Raspberry Pi # Start FROM Ubuntu image https://hub.docker.com/_/ubuntu -FROM arm64v8/ubuntu:rolling +FROM arm64v8/ubuntu:22.10 # Downloads to user config dir ADD https://ultralytics.com/assets/Arial.ttf https://ultralytics.com/assets/Arial.Unicode.ttf /root/.config/Ultralytics/ diff --git a/utils/docker/Dockerfile-cpu b/utils/docker/Dockerfile-cpu index 8e8c23bf952f..613bdffa4768 100644 --- a/utils/docker/Dockerfile-cpu +++ b/utils/docker/Dockerfile-cpu @@ -3,7 +3,7 @@ # Image is CPU-optimized for ONNX, OpenVINO and PyTorch YOLOv5 deployments # Start FROM Ubuntu image https://hub.docker.com/_/ubuntu -FROM ubuntu:rolling +FROM ubuntu:22.10 # Downloads to user config dir ADD https://ultralytics.com/assets/Arial.ttf https://ultralytics.com/assets/Arial.Unicode.ttf /root/.config/Ultralytics/ From c35d9aedecdab9df481e7ab4059557c852f34f00 Mon Sep 17 00:00:00 2001 From: Shayan Mousavinia <45814390+ShAmoNiA@users.noreply.github.com> Date: Sun, 23 Apr 2023 19:34:17 +0330 Subject: [PATCH 1730/1976] Update hyp.no-augmentation.yaml (#11420) remove one zero in "hsv_s: 00" Signed-off-by: Shayan Mousavinia <45814390+ShAmoNiA@users.noreply.github.com> --- data/hyps/hyp.no-augmentation.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/data/hyps/hyp.no-augmentation.yaml b/data/hyps/hyp.no-augmentation.yaml index 0ae796c16dc2..8da18150538b 100644 --- a/data/hyps/hyp.no-augmentation.yaml +++ b/data/hyps/hyp.no-augmentation.yaml @@ -21,7 +21,7 @@ anchor_t: 4.0 # anchor-multiple threshold # this parameters are all zero since we want to use albumentation framework fl_gamma: 0.0 # focal loss gamma (efficientDet default gamma=1.5) hsv_h: 0 # image HSV-Hue augmentation (fraction) -hsv_s: 00 # image HSV-Saturation augmentation (fraction) +hsv_s: 0 # image HSV-Saturation augmentation (fraction) hsv_v: 0 # image HSV-Value augmentation (fraction) degrees: 0.0 # image rotation (+/- deg) translate: 0 # image translation (+/- fraction) From ff6a9ac842f3a09941ac3dca5355cfa896b5f5d7 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 23 Apr 2023 18:38:25 +0200 Subject: [PATCH 1731/1976] Add Slack notification of CI errors (#11421) * Update links.yml Signed-off-by: Glenn Jocher * Update links * Update links --------- Signed-off-by: Glenn Jocher --- .github/workflows/ci-testing.yml | 16 ++++++++++++++++ .github/workflows/links.yml | 4 ++-- 2 files changed, 18 insertions(+), 2 deletions(-) diff --git a/.github/workflows/ci-testing.yml b/.github/workflows/ci-testing.yml index bff95f654552..1ad6087921d6 100644 --- a/.github/workflows/ci-testing.yml +++ b/.github/workflows/ci-testing.yml @@ -45,6 +45,14 @@ jobs: python detect.py --weights ${{ matrix.model }}.onnx --img 320 python segment/predict.py --weights ${{ matrix.model }}-seg.onnx --img 320 python classify/predict.py --weights ${{ matrix.model }}-cls.onnx --img 224 + - name: Notify on failure + if: failure() && github.repository == 'ultralytics/yolov5' && (github.event_name == 'schedule' || github.event_name == 'push') + uses: slackapi/slack-github-action@v1.23.0 + with: + payload: | + {"text": " GitHub Actions error for ${{ github.workflow }} ❌\n\n\n*Repository:* https://github.com/${{ github.repository }}\n*Action:* https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}\n*Author:* ${{ github.actor }}\n*Event:* ${{ github.event_name }}\n*Job Status:* ${{ job.status }}\n"} + env: + SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL_YOLO }} Tests: timeout-minutes: 60 @@ -151,3 +159,11 @@ jobs: for path in '$m', '$b': model = torch.hub.load('.', 'custom', path=path, source='local') EOF + - name: Notify on failure + if: failure() && github.repository == 'ultralytics/yolov5' && (github.event_name == 'schedule' || github.event_name == 'push') + uses: slackapi/slack-github-action@v1.23.0 + with: + payload: | + {"text": " GitHub Actions error for ${{ github.workflow }} ❌\n\n\n*Repository:* https://github.com/${{ github.repository }}\n*Action:* https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}\n*Author:* ${{ github.actor }}\n*Event:* ${{ github.event_name }}\n*Job Status:* ${{ job.status }}\n"} + env: + SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL_YOLO }} diff --git a/.github/workflows/links.yml b/.github/workflows/links.yml index b3e82df34947..f64037201660 100644 --- a/.github/workflows/links.yml +++ b/.github/workflows/links.yml @@ -23,7 +23,7 @@ jobs: with: fail: true # accept 429(Instagram, 'too many requests'), 999(LinkedIn, 'unknown status code'), Timeout(Twitter) - args: --accept 429,999 --exclude-loopback --exclude twitter.com --exclude-mail './**/*.md' './**/*.html' + args: --accept 429,999 --exclude-loopback --exclude twitter.com --exclude-path '**/ci-testing.yaml' --exclude-mail './**/*.md' './**/*.html' env: GITHUB_TOKEN: ${{secrets.GITHUB_TOKEN}} @@ -33,6 +33,6 @@ jobs: with: fail: true # accept 429(Instagram, 'too many requests'), 999(LinkedIn, 'unknown status code'), Timeout(Twitter) - args: --accept 429,999 --exclude-loopback --exclude twitter.com,url.com --exclude-mail './**/*.md' './**/*.html' './**/*.yml' './**/*.yaml' './**/*.py' './**/*.ipynb' + args: --accept 429,999 --exclude-loopback --exclude twitter.com,url.com --exclude-path '**/ci-testing.yaml' --exclude-mail './**/*.md' './**/*.html' './**/*.yml' './**/*.yaml' './**/*.py' './**/*.ipynb' env: GITHUB_TOKEN: ${{secrets.GITHUB_TOKEN}} From f3ee5960671f7d48c2a71cf666a97318661192af Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 24 Apr 2023 17:14:31 +0200 Subject: [PATCH 1732/1976] Bump lycheeverse/lychee-action from 1.6.1 to 1.7.0 (#11427) Bumps [lycheeverse/lychee-action](https://github.com/lycheeverse/lychee-action) from 1.6.1 to 1.7.0. - [Release notes](https://github.com/lycheeverse/lychee-action/releases) - [Commits](https://github.com/lycheeverse/lychee-action/compare/v1.6.1...v1.7.0) --- updated-dependencies: - dependency-name: lycheeverse/lychee-action dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/links.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/links.yml b/.github/workflows/links.yml index f64037201660..a5413318030f 100644 --- a/.github/workflows/links.yml +++ b/.github/workflows/links.yml @@ -19,7 +19,7 @@ jobs: - uses: actions/checkout@v3 - name: Test Markdown and HTML links - uses: lycheeverse/lychee-action@v1.6.1 + uses: lycheeverse/lychee-action@v1.7.0 with: fail: true # accept 429(Instagram, 'too many requests'), 999(LinkedIn, 'unknown status code'), Timeout(Twitter) @@ -29,7 +29,7 @@ jobs: - name: Test Markdown, HTML, YAML, Python and Notebook links if: github.event_name == 'workflow_dispatch' - uses: lycheeverse/lychee-action@v1.6.1 + uses: lycheeverse/lychee-action@v1.7.0 with: fail: true # accept 429(Instagram, 'too many requests'), 999(LinkedIn, 'unknown status code'), Timeout(Twitter) From 8ecc7276ecdd9c409b3dc8b9051142569009c6f4 Mon Sep 17 00:00:00 2001 From: Hongxiao Xiang <83959468+xhx787@users.noreply.github.com> Date: Tue, 25 Apr 2023 23:45:42 +0800 Subject: [PATCH 1733/1976] Update README.md and README.zh-CN.md (#11434) revise readme.md --- README.md | 4 ++-- README.zh-CN.md | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index 7bc7a6ce8ca4..5326816ce52c 100644 --- a/README.md +++ b/README.md @@ -309,7 +309,7 @@ python segment/val.py --weights yolov5s-seg.pt --data coco.yaml --img 640 # val Use pretrained YOLOv5m-seg.pt to predict bus.jpg: ```bash -python segment/predict.py --weights yolov5m-seg.pt --data data/images/bus.jpg +python segment/predict.py --weights yolov5m-seg.pt --source data/images/bus.jpg ``` ```python @@ -400,7 +400,7 @@ python classify/val.py --weights yolov5m-cls.pt --data ../datasets/imagenet --im Use pretrained YOLOv5s-cls.pt to predict bus.jpg: ```bash -python classify/predict.py --weights yolov5s-cls.pt --data data/images/bus.jpg +python classify/predict.py --weights yolov5s-cls.pt --source data/images/bus.jpg ``` ```python diff --git a/README.zh-CN.md b/README.zh-CN.md index c7733488bd02..913f817a3c14 100644 --- a/README.zh-CN.md +++ b/README.zh-CN.md @@ -305,7 +305,7 @@ python segment/val.py --weights yolov5s-seg.pt --data coco.yaml --img 640 # 验 使用预训练的 YOLOv5m-seg.pt 来预测 bus.jpg: ```bash -python segment/predict.py --weights yolov5m-seg.pt --data data/images/bus.jpg +python segment/predict.py --weights yolov5m-seg.pt --source data/images/bus.jpg ``` ```python @@ -395,7 +395,7 @@ python classify/val.py --weights yolov5m-cls.pt --data ../datasets/imagenet --im 使用预训练的 YOLOv5s-cls.pt 来预测 bus.jpg: ```bash -python classify/predict.py --weights yolov5s-cls.pt --data data/images/bus.jpg +python classify/predict.py --weights yolov5s-cls.pt --source data/images/bus.jpg ``` ```python From 22a3cbb97df70fd40221e17738cba2d97e266fad Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 29 Apr 2023 13:50:49 +0200 Subject: [PATCH 1734/1976] Improve Slack notification (#11458) * Improve Slack notification * Improve Slack notification * Improve Slack notification --- .github/workflows/ci-testing.yml | 20 +++++++++----------- .github/workflows/codeql-analysis.yml | 1 + .github/workflows/docker.yml | 1 + 3 files changed, 11 insertions(+), 11 deletions(-) diff --git a/.github/workflows/ci-testing.yml b/.github/workflows/ci-testing.yml index 1ad6087921d6..7de084fef06d 100644 --- a/.github/workflows/ci-testing.yml +++ b/.github/workflows/ci-testing.yml @@ -45,14 +45,6 @@ jobs: python detect.py --weights ${{ matrix.model }}.onnx --img 320 python segment/predict.py --weights ${{ matrix.model }}-seg.onnx --img 320 python classify/predict.py --weights ${{ matrix.model }}-cls.onnx --img 224 - - name: Notify on failure - if: failure() && github.repository == 'ultralytics/yolov5' && (github.event_name == 'schedule' || github.event_name == 'push') - uses: slackapi/slack-github-action@v1.23.0 - with: - payload: | - {"text": " GitHub Actions error for ${{ github.workflow }} ❌\n\n\n*Repository:* https://github.com/${{ github.repository }}\n*Action:* https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}\n*Author:* ${{ github.actor }}\n*Event:* ${{ github.event_name }}\n*Job Status:* ${{ job.status }}\n"} - env: - SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL_YOLO }} Tests: timeout-minutes: 60 @@ -159,11 +151,17 @@ jobs: for path in '$m', '$b': model = torch.hub.load('.', 'custom', path=path, source='local') EOF - - name: Notify on failure - if: failure() && github.repository == 'ultralytics/yolov5' && (github.event_name == 'schedule' || github.event_name == 'push') + + Summary: + runs-on: ubuntu-latest + needs: [Benchmarks, Tests] # Add job names that you want to check for failure + if: always() # This ensures the job runs even if previous jobs fail + steps: + - name: Check for failure and notify + if: ${{ needs.Benchmarks.result == 'failure' || needs.Tests.result == 'failure' }} # Check if any of the jobs failed uses: slackapi/slack-github-action@v1.23.0 with: payload: | - {"text": " GitHub Actions error for ${{ github.workflow }} ❌\n\n\n*Repository:* https://github.com/${{ github.repository }}\n*Action:* https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}\n*Author:* ${{ github.actor }}\n*Event:* ${{ github.event_name }}\n*Job Status:* ${{ job.status }}\n"} + {"text": " GitHub Actions error for ${{ github.workflow }} ❌\n\n\n*Repository:* https://github.com/${{ github.repository }}\n*Action:* https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}\n*Author:* ${{ github.actor }}\n*Event:* ${{ github.event_name }}\n"} env: SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL_YOLO }} diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index b6f751096d9a..05db12dabd1a 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -6,6 +6,7 @@ name: "CodeQL" on: schedule: - cron: '0 0 1 * *' # Runs at 00:00 UTC on the 1st of every month + workflow_dispatch: jobs: analyze: diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index 190b48875fa6..13e79216fc20 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -6,6 +6,7 @@ name: Publish Docker Images on: push: branches: [ master ] + workflow_dispatch: jobs: docker: From 5178d415fb4b1346b953ca79a7d52b806b6d261b Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 29 Apr 2023 17:56:10 +0200 Subject: [PATCH 1735/1976] Add discord links (#11459) * Add discord links * Update README.md Signed-off-by: Glenn Jocher * Add discord links --------- Signed-off-by: Glenn Jocher --- README.md | 12 ++++++++++-- README.zh-CN.md | 12 ++++++++++-- 2 files changed, 20 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index 5326816ce52c..0cefb443b90a 100644 --- a/README.md +++ b/README.md @@ -20,7 +20,9 @@ YOLOv5 🚀 is the world's most loved vision AI, representing Ultralytics open-source research into future vision AI methods, incorporating lessons learned and best practices evolved over thousands of hours of research and development. -To request an Enterprise License please complete the form at Ultralytics Licensing. +We hope that the resources here will help you get the most out of YOLOv5. Please browse the YOLOv5 Docs for details, raise an issue on GitHub for support, and join our Discord community for questions and discussions! + +To request an Enterprise License please complete the form at [Ultralytics Licensing](https://ultralytics.com/license). @@ -461,7 +466,7 @@ YOLOv5 is available under two different licenses: ##
Contact
-For YOLOv5 bug reports and feature requests please visit [GitHub Issues](https://github.com/ultralytics/yolov5/issues) or the [Ultralytics Community Forum](https://community.ultralytics.com/). +For YOLOv5 bug reports and feature requests please visit [GitHub Issues](https://github.com/ultralytics/yolov5/issues), and join our [Discord](https://discord.gg/n6cFeSPZdD) community for questions and discussions!
@@ -482,6 +487,9 @@ For YOLOv5 bug reports and feature requests please visit [GitHub Issues](https:/ + + +
[tta]: https://docs.ultralytics.com/yolov5/tutorials/test_time_augmentation diff --git a/README.zh-CN.md b/README.zh-CN.md index 913f817a3c14..4b4d9500ae9a 100644 --- a/README.zh-CN.md +++ b/README.zh-CN.md @@ -19,7 +19,9 @@ YOLOv5 🚀 是世界上最受欢迎的视觉 AI,代表 Ultralytics 对未来视觉 AI 方法的开源研究,结合在数千小时的研究和开发中积累的经验教训和最佳实践。 -如果要申请企业许可证,请填写表格Ultralytics 许可. +我们希望这里的资源能帮助您充分利用 YOLOv5。请浏览 YOLOv5 文档 了解详细信息,在 GitHub 上提交问题以获得支持,并加入我们的 Discord 社区进行问题和讨论! + +如需申请企业许可,请在 [Ultralytics Licensing](https://ultralytics.com/license) 处填写表格 @@ -456,7 +461,7 @@ YOLOv5 在两种不同的 License 下可用: ##
联系我们
-请访问 [GitHub Issues](https://github.com/ultralytics/yolov5/issues) 或 [Ultralytics Community Forum](https://community.ultralytics.com) 以报告 YOLOv5 错误和请求功能。 +对于 YOLOv5 的错误报告和功能请求,请访问 [GitHub Issues](https://github.com/ultralytics/yolov5/issues),并加入我们的 [Discord](https://discord.gg/n6cFeSPZdD) 社区进行问题和讨论!
@@ -477,6 +482,9 @@ YOLOv5 在两种不同的 License 下可用: + + +
[tta]: https://docs.ultralytics.com/yolov5/tutorials/test_time_augmentation From 8211a033ef9abe51ac0885989e0807f87e215abf Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 29 Apr 2023 20:51:02 +0200 Subject: [PATCH 1736/1976] Add https://docs.ultralytics.com/help/contributing (#11460) * Add https://docs.ultralytics.com/help/contributing * Add https://docs.ultralytics.com/help/contributing --- .github/ISSUE_TEMPLATE/bug-report.yml | 2 +- .github/ISSUE_TEMPLATE/feature-request.yml | 2 +- .github/PULL_REQUEST_TEMPLATE.md | 2 +- README.md | 2 +- README.zh-CN.md | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/.github/ISSUE_TEMPLATE/bug-report.yml b/.github/ISSUE_TEMPLATE/bug-report.yml index fcb64138b088..b82e55a2ec2f 100644 --- a/.github/ISSUE_TEMPLATE/bug-report.yml +++ b/.github/ISSUE_TEMPLATE/bug-report.yml @@ -80,6 +80,6 @@ body: label: Are you willing to submit a PR? description: > (Optional) We encourage you to submit a [Pull Request](https://github.com/ultralytics/yolov5/pulls) (PR) to help improve YOLOv5 for everyone, especially if you have a good understanding of how to implement a fix or feature. - See the YOLOv5 [Contributing Guide](https://github.com/ultralytics/yolov5/blob/master/CONTRIBUTING.md) to get started. + See the YOLOv5 [Contributing Guide](https://docs.ultralytics.com/help/contributing) to get started. options: - label: Yes I'd like to help by submitting a PR! diff --git a/.github/ISSUE_TEMPLATE/feature-request.yml b/.github/ISSUE_TEMPLATE/feature-request.yml index 68ef985186ef..1d3d53df217e 100644 --- a/.github/ISSUE_TEMPLATE/feature-request.yml +++ b/.github/ISSUE_TEMPLATE/feature-request.yml @@ -45,6 +45,6 @@ body: label: Are you willing to submit a PR? description: > (Optional) We encourage you to submit a [Pull Request](https://github.com/ultralytics/yolov5/pulls) (PR) to help improve YOLOv5 for everyone, especially if you have a good understanding of how to implement a fix or feature. - See the YOLOv5 [Contributing Guide](https://github.com/ultralytics/yolov5/blob/master/CONTRIBUTING.md) to get started. + See the YOLOv5 [Contributing Guide](https://docs.ultralytics.com/help/contributing) to get started. options: - label: Yes I'd like to help by submitting a PR! diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index 51f9803a57a5..d96d5afd2836 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -5,7 +5,7 @@ Thank you for submitting a YOLOv5 🚀 Pull Request! We want to make contributin - Link this PR to a YOLOv5 [issue](https://github.com/ultralytics/yolov5/issues) to help us understand what bug fix or feature is being implemented. - Provide before and after profiling/inference/training results to help us quantify the improvement your PR provides (if applicable). -Please see our ✅ [Contributing Guide](https://github.com/ultralytics/yolov5/blob/master/CONTRIBUTING.md) for more details. +Please see our ✅ [Contributing Guide](https://docs.ultralytics.com/help/contributing) for more details. Note that Copilot will summarize this PR below, do not modify the 'copilot:all' line. --> diff --git a/README.md b/README.md index 0cefb443b90a..37f683343f53 100644 --- a/README.md +++ b/README.md @@ -450,7 +450,7 @@ Get started in seconds with our verified environments. Click each icon below for ##
Contribute
-We love your input! We want to make contributing to YOLOv5 as easy and transparent as possible. Please see our [Contributing Guide](CONTRIBUTING.md) to get started, and fill out the [YOLOv5 Survey](https://ultralytics.com/survey?utm_source=github&utm_medium=social&utm_campaign=Survey) to send us feedback on your experiences. Thank you to all our contributors! +We love your input! We want to make contributing to YOLOv5 as easy and transparent as possible. Please see our [Contributing Guide](https://docs.ultralytics.com/help/contributing/) to get started, and fill out the [YOLOv5 Survey](https://ultralytics.com/survey?utm_source=github&utm_medium=social&utm_campaign=Survey) to send us feedback on your experiences. Thank you to all our contributors! diff --git a/README.zh-CN.md b/README.zh-CN.md index 4b4d9500ae9a..da60d3fe0573 100644 --- a/README.zh-CN.md +++ b/README.zh-CN.md @@ -445,7 +445,7 @@ python export.py --weights yolov5s-cls.pt resnet50.pt efficientnet_b0.pt --inclu ##
贡献
-我们喜欢您的意见或建议!我们希望尽可能简单和透明地为 YOLOv5 做出贡献。请看我们的 [投稿指南](CONTRIBUTING.md),并填写 [YOLOv5调查](https://ultralytics.com/survey?utm_source=github&utm_medium=social&utm_campaign=Survey) 向我们发送您的体验反馈。感谢我们所有的贡献者! +我们喜欢您的意见或建议!我们希望尽可能简单和透明地为 YOLOv5 做出贡献。请看我们的 [投稿指南](https://docs.ultralytics.com/help/contributing/),并填写 [YOLOv5调查](https://ultralytics.com/survey?utm_source=github&utm_medium=social&utm_campaign=Survey) 向我们发送您的体验反馈。感谢我们所有的贡献者! From 1d65a5ac9069b3b60b97c3c805d1250f2993d5e2 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 1 May 2023 10:35:51 +0200 Subject: [PATCH 1737/1976] Add links to https://docs.ultralytics.com/help/ (#11462) --- .github/ISSUE_TEMPLATE/bug-report.yml | 2 +- CONTRIBUTING.md | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/ISSUE_TEMPLATE/bug-report.yml b/.github/ISSUE_TEMPLATE/bug-report.yml index b82e55a2ec2f..04f9c76fde1f 100644 --- a/.github/ISSUE_TEMPLATE/bug-report.yml +++ b/.github/ISSUE_TEMPLATE/bug-report.yml @@ -62,7 +62,7 @@ body: label: Minimal Reproducible Example description: > When asking a question, people will be better able to provide help if you provide code that they can easily understand and use to **reproduce** the problem. - This is referred to by community members as creating a [minimal reproducible example](https://stackoverflow.com/help/minimal-reproducible-example). + This is referred to by community members as creating a [minimal reproducible example](https://docs.ultralytics.com/help/minimum_reproducible_example/). placeholder: | ``` # Code to reproduce your issue here diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 6e9ce5998d9f..95d88b9830d6 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -66,7 +66,7 @@ short guidelines below to help users provide what we need to get started. When asking a question, people will be better able to provide help if you provide **code** that they can easily understand and use to **reproduce** the problem. This is referred to by community members as creating -a [minimum reproducible example](https://stackoverflow.com/help/minimal-reproducible-example). Your code that reproduces +a [minimum reproducible example](https://docs.ultralytics.com/help/minimum_reproducible_example/). Your code that reproduces the problem should be: - ✅ **Minimal** – Use as little code as possible that still produces the same problem @@ -84,7 +84,7 @@ should be: If you believe your problem meets all of the above criteria, please close this issue and raise a new one using the 🐛 **Bug Report** [template](https://github.com/ultralytics/yolov5/issues/new/choose) and provide -a [minimum reproducible example](https://stackoverflow.com/help/minimal-reproducible-example) to help us better +a [minimum reproducible example](https://docs.ultralytics.com/help/minimum_reproducible_example/) to help us better understand and diagnose your problem. ## License From 867f7f0e22c268590dcf036167b3dc5e87f8d8a2 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 1 May 2023 14:33:31 +0200 Subject: [PATCH 1738/1976] Update links.yml (#11463) * Update links.yml Signed-off-by: Glenn Jocher * Update links.yml Signed-off-by: Glenn Jocher * Update ci-testing.yml Signed-off-by: Glenn Jocher * Update links.yml Signed-off-by: Glenn Jocher * Update links.yml Signed-off-by: Glenn Jocher * Update ci-testing.yml Signed-off-by: Glenn Jocher * updates --------- Signed-off-by: Glenn Jocher --- .github/workflows/ci-testing.yml | 2 +- .github/workflows/links.yml | 36 +++++++++++++++++++------------- 2 files changed, 22 insertions(+), 16 deletions(-) diff --git a/.github/workflows/ci-testing.yml b/.github/workflows/ci-testing.yml index 7de084fef06d..e71a4b8f16ac 100644 --- a/.github/workflows/ci-testing.yml +++ b/.github/workflows/ci-testing.yml @@ -158,7 +158,7 @@ jobs: if: always() # This ensures the job runs even if previous jobs fail steps: - name: Check for failure and notify - if: ${{ needs.Benchmarks.result == 'failure' || needs.Tests.result == 'failure' }} # Check if any of the jobs failed + if: (needs.Benchmarks.result == 'failure' || needs.Tests.result == 'failure') && github.repository == 'ultralytics/yolov5' && (github.event_name == 'schedule' || github.event_name == 'push') uses: slackapi/slack-github-action@v1.23.0 with: payload: | diff --git a/.github/workflows/links.yml b/.github/workflows/links.yml index a5413318030f..306689f46507 100644 --- a/.github/workflows/links.yml +++ b/.github/workflows/links.yml @@ -1,5 +1,6 @@ # Ultralytics YOLO 🚀, AGPL-3.0 license -# YOLO Continuous Integration (CI) GitHub Actions tests +# YOLO Continuous Integration (CI) GitHub Actions tests broken link checker +# Accept 429(Instagram, 'too many requests'), 999(LinkedIn, 'unknown status code'), Timeout(Twitter) name: Check Broken links @@ -18,21 +19,26 @@ jobs: steps: - uses: actions/checkout@v3 - - name: Test Markdown and HTML links - uses: lycheeverse/lychee-action@v1.7.0 + - name: Download and install lychee + run: | + LYCHEE_URL=$(curl -s https://api.github.com/repos/lycheeverse/lychee/releases/latest | grep "browser_download_url" | grep "x86_64-unknown-linux-gnu.tar.gz" | cut -d '"' -f 4) + curl -L $LYCHEE_URL -o lychee.tar.gz + tar xzf lychee.tar.gz + sudo mv lychee /usr/local/bin + + - name: Test Markdown and HTML links with retry + uses: nick-invision/retry@v2 with: - fail: true - # accept 429(Instagram, 'too many requests'), 999(LinkedIn, 'unknown status code'), Timeout(Twitter) - args: --accept 429,999 --exclude-loopback --exclude twitter.com --exclude-path '**/ci-testing.yaml' --exclude-mail './**/*.md' './**/*.html' - env: - GITHUB_TOKEN: ${{secrets.GITHUB_TOKEN}} + timeout_minutes: 5 + retry_wait_seconds: 60 + max_attempts: 3 + command: lychee --accept 429,999 --exclude-loopback --exclude twitter.com --exclude-path '**/ci.yaml' --exclude-mail --github-token ${{ secrets.GITHUB_TOKEN }} './**/*.md' './**/*.html' - - name: Test Markdown, HTML, YAML, Python and Notebook links + - name: Test Markdown, HTML, YAML, Python and Notebook links with retry if: github.event_name == 'workflow_dispatch' - uses: lycheeverse/lychee-action@v1.7.0 + uses: nick-invision/retry@v2 with: - fail: true - # accept 429(Instagram, 'too many requests'), 999(LinkedIn, 'unknown status code'), Timeout(Twitter) - args: --accept 429,999 --exclude-loopback --exclude twitter.com,url.com --exclude-path '**/ci-testing.yaml' --exclude-mail './**/*.md' './**/*.html' './**/*.yml' './**/*.yaml' './**/*.py' './**/*.ipynb' - env: - GITHUB_TOKEN: ${{secrets.GITHUB_TOKEN}} + timeout_minutes: 5 + retry_wait_seconds: 60 + max_attempts: 3 + command: lychee --accept 429,999 --exclude-loopback --exclude twitter.com,url.com --exclude-path '**/ci.yaml' --exclude-mail --github-token ${{ secrets.GITHUB_TOKEN }} './**/*.md' './**/*.html' './**/*.yml' './**/*.yaml' './**/*.py' './**/*.ipynb' From cb93837f8d3b6da4f9b94836ac31ad5d57a09c09 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 2 May 2023 22:14:10 +0200 Subject: [PATCH 1739/1976] [pre-commit.ci] pre-commit suggestions (#11466) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * [pre-commit.ci] pre-commit suggestions updates: - [github.com/asottile/pyupgrade: v3.3.1 → v3.3.2](https://github.com/asottile/pyupgrade/compare/v3.3.1...v3.3.2) - [github.com/google/yapf: v0.32.0 → v0.33.0](https://github.com/google/yapf/compare/v0.32.0...v0.33.0) * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 4 ++-- models/common.py | 5 +++-- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 8bd40484c522..defb1372625e 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -22,7 +22,7 @@ repos: - id: detect-private-key - repo: https://github.com/asottile/pyupgrade - rev: v3.3.1 + rev: v3.3.2 hooks: - id: pyupgrade name: Upgrade code @@ -35,7 +35,7 @@ repos: name: Sort imports - repo: https://github.com/google/yapf - rev: v0.32.0 + rev: v0.33.0 hooks: - id: yapf name: YAPF formatting diff --git a/models/common.py b/models/common.py index b1c24ad378dc..4ef03760c65d 100644 --- a/models/common.py +++ b/models/common.py @@ -353,8 +353,9 @@ def __init__(self, weights='yolov5s.pt', device=torch.device('cpu'), dnn=False, model.half() if fp16 else model.float() if extra_files['config.txt']: # load metadata dict d = json.loads(extra_files['config.txt'], - object_hook=lambda d: {int(k) if k.isdigit() else k: v - for k, v in d.items()}) + object_hook=lambda d: { + int(k) if k.isdigit() else k: v + for k, v in d.items()}) stride, names = int(d['stride']), d['names'] elif dnn: # ONNX OpenCV DNN LOGGER.info(f'Loading {w} for ONNX OpenCV DNN inference...') From c3e4e94e944de3b41b3398e2f78e596384739339 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 2 May 2023 22:14:24 +0200 Subject: [PATCH 1740/1976] Bump flask from 1.0.2 to 2.3.2 in /utils/google_app_engine (#11465) Bumps [flask](https://github.com/pallets/flask) from 1.0.2 to 2.3.2. - [Release notes](https://github.com/pallets/flask/releases) - [Changelog](https://github.com/pallets/flask/blob/main/CHANGES.rst) - [Commits](https://github.com/pallets/flask/compare/1.0.2...2.3.2) --- updated-dependencies: - dependency-name: flask dependency-type: direct:production ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- utils/google_app_engine/additional_requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/google_app_engine/additional_requirements.txt b/utils/google_app_engine/additional_requirements.txt index d5b76758c876..fce1511588e3 100644 --- a/utils/google_app_engine/additional_requirements.txt +++ b/utils/google_app_engine/additional_requirements.txt @@ -1,5 +1,5 @@ # add these requirements in your app on top of the existing ones pip==21.1 -Flask==1.0.2 +Flask==2.3.2 gunicorn==19.10.0 werkzeug>=2.2.3 # not directly required, pinned by Snyk to avoid a vulnerability From 016e046262b87a78dcbbab99e56169ef16141f4e Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 8 May 2023 23:14:17 +0200 Subject: [PATCH 1741/1976] Set Comet as default logger for Segment, Classify notebooks (#11504) * Created using Colaboratory * Created using Colaboratory * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- classify/tutorial.ipynb | 13 +++++++------ segment/tutorial.ipynb | 13 +++++++------ 2 files changed, 14 insertions(+), 12 deletions(-) diff --git a/classify/tutorial.ipynb b/classify/tutorial.ipynb index ddf67c5519f5..75eebd8e1132 100644 --- a/classify/tutorial.ipynb +++ b/classify/tutorial.ipynb @@ -1255,16 +1255,17 @@ "outputs": [], "source": [ "#@title Select YOLOv5 🚀 logger {run: 'auto'}\n", - "logger = 'TensorBoard' #@param ['TensorBoard', 'Comet', 'ClearML']\n", + "logger = 'Comet' #@param ['Comet', 'ClearML', 'TensorBoard']\n", "\n", - "if logger == 'TensorBoard':\n", - " %load_ext tensorboard\n", - " %tensorboard --logdir runs/train\n", - "elif logger == 'Comet':\n", + "if logger == 'Comet':\n", " %pip install -q comet_ml\n", " import comet_ml; comet_ml.init()\n", "elif logger == 'ClearML':\n", - " import clearml; clearml.browser_login()" + " %pip install -q clearml\n", + " import clearml; clearml.browser_login()\n", + "elif logger == 'TensorBoard':\n", + " %load_ext tensorboard\n", + " %tensorboard --logdir runs/train" ] }, { diff --git a/segment/tutorial.ipynb b/segment/tutorial.ipynb index 6e5caf53b8ff..f2aee9e26b33 100644 --- a/segment/tutorial.ipynb +++ b/segment/tutorial.ipynb @@ -250,16 +250,17 @@ "outputs": [], "source": [ "#@title Select YOLOv5 🚀 logger {run: 'auto'}\n", - "logger = 'TensorBoard' #@param ['TensorBoard', 'Comet', 'ClearML']\n", + "logger = 'Comet' #@param ['Comet', 'ClearML', 'TensorBoard']\n", "\n", - "if logger == 'TensorBoard':\n", - " %load_ext tensorboard\n", - " %tensorboard --logdir runs/train-seg\n", - "elif logger == 'Comet':\n", + "if logger == 'Comet':\n", " %pip install -q comet_ml\n", " import comet_ml; comet_ml.init()\n", "elif logger == 'ClearML':\n", - " import clearml; clearml.browser_login()" + " %pip install -q clearml\n", + " import clearml; clearml.browser_login()\n", + "elif logger == 'TensorBoard':\n", + " %load_ext tensorboard\n", + " %tensorboard --logdir runs/train" ] }, { From b72efd2306e5b8cac4e1d1a78dcf384a8ba230ff Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 11 May 2023 18:50:06 +0200 Subject: [PATCH 1742/1976] Fix `check_requirements()` txt not found bug (#11518) Fix check_requirements() txt not found bug @AyushExel @kalenmike fixes "requirements.txt" not found warning. Signed-off-by: Glenn Jocher --- utils/general.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/general.py b/utils/general.py index 3d7fd20c48d1..42c6d8c8aab5 100644 --- a/utils/general.py +++ b/utils/general.py @@ -388,7 +388,7 @@ def check_version(current='0.0.0', minimum='0.0.0', name='version ', pinned=Fals @TryExcept() -def check_requirements(requirements=ROOT.parent / 'requirements.txt', exclude=(), install=True, cmds=''): +def check_requirements(requirements=ROOT / 'requirements.txt', exclude=(), install=True, cmds=''): """ Check if installed dependencies meet YOLOv5 requirements and attempt to auto-update if needed. From e382d4c37cd840337381e138ef71bb638d1bb16b Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 11 May 2023 18:51:06 +0200 Subject: [PATCH 1743/1976] Update links.yml with 301,302 (#11517) * Update links.yml with 302 Signed-off-by: Glenn Jocher * Update links.yml Signed-off-by: Glenn Jocher * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update links.yml Signed-off-by: Glenn Jocher --------- Signed-off-by: Glenn Jocher Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .github/workflows/links.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/links.yml b/.github/workflows/links.yml index 306689f46507..d3a599fd4a51 100644 --- a/.github/workflows/links.yml +++ b/.github/workflows/links.yml @@ -32,7 +32,7 @@ jobs: timeout_minutes: 5 retry_wait_seconds: 60 max_attempts: 3 - command: lychee --accept 429,999 --exclude-loopback --exclude twitter.com --exclude-path '**/ci.yaml' --exclude-mail --github-token ${{ secrets.GITHUB_TOKEN }} './**/*.md' './**/*.html' + command: lychee --accept 301,302,429,999 --exclude-loopback --exclude twitter.com --exclude-path '**/ci.yaml' --exclude-mail --github-token ${{ secrets.GITHUB_TOKEN }} './**/*.md' './**/*.html' - name: Test Markdown, HTML, YAML, Python and Notebook links with retry if: github.event_name == 'workflow_dispatch' @@ -41,4 +41,4 @@ jobs: timeout_minutes: 5 retry_wait_seconds: 60 max_attempts: 3 - command: lychee --accept 429,999 --exclude-loopback --exclude twitter.com,url.com --exclude-path '**/ci.yaml' --exclude-mail --github-token ${{ secrets.GITHUB_TOKEN }} './**/*.md' './**/*.html' './**/*.yml' './**/*.yaml' './**/*.py' './**/*.ipynb' + command: lychee --accept 301,302,429,999 --exclude-loopback --exclude twitter.com,url.com --exclude-path '**/ci.yaml' --exclude-mail --github-token ${{ secrets.GITHUB_TOKEN }} './**/*.md' './**/*.html' './**/*.yml' './**/*.yaml' './**/*.py' './**/*.ipynb' From 54e95159c78e389eb5dcd343e98b045a3b61f230 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 14 May 2023 02:35:28 +0200 Subject: [PATCH 1744/1976] Update links.yml (#11526) Signed-off-by: Glenn Jocher --- .github/workflows/links.yml | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/.github/workflows/links.yml b/.github/workflows/links.yml index d3a599fd4a51..cd65b961f7f9 100644 --- a/.github/workflows/links.yml +++ b/.github/workflows/links.yml @@ -5,10 +5,6 @@ name: Check Broken links on: - push: - branches: [master] - pull_request: - branches: [master] workflow_dispatch: schedule: - cron: '0 0 * * *' # runs at 00:00 UTC every day @@ -32,7 +28,7 @@ jobs: timeout_minutes: 5 retry_wait_seconds: 60 max_attempts: 3 - command: lychee --accept 301,302,429,999 --exclude-loopback --exclude twitter.com --exclude-path '**/ci.yaml' --exclude-mail --github-token ${{ secrets.GITHUB_TOKEN }} './**/*.md' './**/*.html' + command: lychee --accept 429,999 --exclude-loopback --exclude 'https?://(www\.)?(twitter\.com|instagram\.com)' --exclude-path '**/ci.yaml' --exclude-mail --github-token ${{ secrets.GITHUB_TOKEN }} './**/*.md' './**/*.html' - name: Test Markdown, HTML, YAML, Python and Notebook links with retry if: github.event_name == 'workflow_dispatch' @@ -41,4 +37,4 @@ jobs: timeout_minutes: 5 retry_wait_seconds: 60 max_attempts: 3 - command: lychee --accept 301,302,429,999 --exclude-loopback --exclude twitter.com,url.com --exclude-path '**/ci.yaml' --exclude-mail --github-token ${{ secrets.GITHUB_TOKEN }} './**/*.md' './**/*.html' './**/*.yml' './**/*.yaml' './**/*.py' './**/*.ipynb' + command: lychee --accept 429,999 --exclude-loopback --exclude 'https?://(www\.)?(twitter\.com|instagram\.com|url\.com)' --exclude-path '**/ci.yaml' --exclude-mail --github-token ${{ secrets.GITHUB_TOKEN }} './**/*.md' './**/*.html' './**/*.yml' './**/*.yaml' './**/*.py' './**/*.ipynb' From 5deff1471dede726f6399be43e7073ee7ed3a7d4 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 14 May 2023 21:44:53 +0200 Subject: [PATCH 1745/1976] Add `ultralytics` pip package (#11529) * Add ultralytics pip package * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Add requirements.txt locations * Update requirements.txt Signed-off-by: Glenn Jocher --------- Signed-off-by: Glenn Jocher Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- classify/train.py | 2 +- requirements.txt | 2 +- segment/train.py | 2 +- train.py | 2 +- utils/general.py | 52 +---------------------------------------------- 5 files changed, 5 insertions(+), 55 deletions(-) diff --git a/classify/train.py b/classify/train.py index 8b8327f173ef..ecbea1d8c0de 100644 --- a/classify/train.py +++ b/classify/train.py @@ -300,7 +300,7 @@ def main(opt): if RANK in {-1, 0}: print_args(vars(opt)) check_git_status() - check_requirements() + check_requirements(ROOT / 'requirements.txt') # DDP mode device = select_device(opt.device, batch_size=opt.batch_size) diff --git a/requirements.txt b/requirements.txt index 65924c9feec4..ae0a21f003e8 100644 --- a/requirements.txt +++ b/requirements.txt @@ -15,6 +15,7 @@ thop>=0.1.1 # FLOPs computation torch>=1.7.0 # see https://pytorch.org/get-started/locally (recommended) torchvision>=0.8.1 tqdm>=4.64.0 +ultralytics>=8.0.100 # protobuf<=3.20.1 # https://github.com/ultralytics/yolov5/issues/8012 # Logging --------------------------------------------------------------------- @@ -46,4 +47,3 @@ setuptools>=65.5.1 # Snyk vulnerability fix # mss # screenshots # albumentations>=1.0.3 # pycocotools>=2.0.6 # COCO mAP -# ultralytics # HUB https://hub.ultralytics.com diff --git a/segment/train.py b/segment/train.py index 073fc742005b..5f1fa4a1e453 100644 --- a/segment/train.py +++ b/segment/train.py @@ -511,7 +511,7 @@ def main(opt, callbacks=Callbacks()): if RANK in {-1, 0}: print_args(vars(opt)) check_git_status() - check_requirements() + check_requirements(ROOT / 'requirements.txt') # Resume if opt.resume and not opt.evolve: # resume from specified or most recent last.pt diff --git a/train.py b/train.py index 216da6399028..48eeb09468fe 100644 --- a/train.py +++ b/train.py @@ -485,7 +485,7 @@ def main(opt, callbacks=Callbacks()): if RANK in {-1, 0}: print_args(vars(opt)) check_git_status() - check_requirements() + check_requirements(ROOT / 'requirements.txt') # Resume (from specified or most recent last.pt) if opt.resume and not check_comet_resume(opt) and not opt.evolve: diff --git a/utils/general.py b/utils/general.py index 42c6d8c8aab5..e95b07486619 100644 --- a/utils/general.py +++ b/utils/general.py @@ -35,6 +35,7 @@ import torch import torchvision import yaml +from ultralytics.yolo.utils.checks import check_requirements from utils import TryExcept, emojis from utils.downloads import curl_download, gsutil_getsize @@ -387,57 +388,6 @@ def check_version(current='0.0.0', minimum='0.0.0', name='version ', pinned=Fals return result -@TryExcept() -def check_requirements(requirements=ROOT / 'requirements.txt', exclude=(), install=True, cmds=''): - """ - Check if installed dependencies meet YOLOv5 requirements and attempt to auto-update if needed. - - Args: - requirements (Union[Path, str, List[str]]): Path to a requirements.txt file, a single package requirement as a - string, or a list of package requirements as strings. - exclude (Tuple[str]): Tuple of package names to exclude from checking. - install (bool): If True, attempt to auto-update packages that don't meet requirements. - cmds (str): Additional commands to pass to the pip install command when auto-updating. - - Returns: - None - """ - prefix = colorstr('red', 'bold', 'requirements:') - check_python() # check python version - file = None - if isinstance(requirements, Path): # requirements.txt file - file = requirements.resolve() - assert file.exists(), f'{prefix} {file} not found, check failed.' - with file.open() as f: - requirements = [f'{x.name}{x.specifier}' for x in pkg.parse_requirements(f) if x.name not in exclude] - elif isinstance(requirements, str): - requirements = [requirements] - - s = '' # console string - n = 0 # number of packages updates - for r in requirements: - try: - pkg.require(r) - except (pkg.VersionConflict, pkg.DistributionNotFound): # exception if requirements not met - try: # attempt to import (slower but more accurate) - import importlib - importlib.import_module(next(pkg.parse_requirements(r)).name) - except ImportError: - s += f'"{r}" ' - n += 1 - - if s and install and AUTOINSTALL: # check environment variable - LOGGER.info(f"{prefix} YOLOv5 requirement{'s' * (n > 1)} {s}not found, attempting AutoUpdate...") - try: - assert check_online(), 'AutoUpdate skipped (offline)' - LOGGER.info(subprocess.check_output(f'pip install {s} {cmds}', shell=True).decode()) - s = f"{prefix} {n} package{'s' * (n > 1)} updated per {file or requirements}\n" \ - f"{prefix} ⚠️ {colorstr('bold', 'Restart runtime or rerun command for updates to take effect')}\n" - LOGGER.info(s) - except Exception as e: - LOGGER.warning(f'{prefix} ❌ {e}') - - def check_img_size(imgsz, s=32, floor=0): # Verify image size is a multiple of stride s in each dimension if isinstance(imgsz, int): # integer i.e. img_size=640 From ec2b8538cdf1ec3a37f7f2ffff4d591f322cc410 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?F=C3=A1bio=20Franco=20Uechi?= <308613+fabito@users.noreply.github.com> Date: Tue, 16 May 2023 21:19:16 +1200 Subject: [PATCH 1746/1976] Fix fp16 (`--half`) support for `TritonRemoteModel` model type (#10787) * Fix fp16 (--half) support for TritonRemoteModel * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Glenn Jocher --- models/common.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/models/common.py b/models/common.py index 4ef03760c65d..16537703e730 100644 --- a/models/common.py +++ b/models/common.py @@ -333,7 +333,7 @@ def __init__(self, weights='yolov5s.pt', device=torch.device('cpu'), dnn=False, super().__init__() w = str(weights[0] if isinstance(weights, list) else weights) pt, jit, onnx, xml, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs, paddle, triton = self._model_type(w) - fp16 &= pt or jit or onnx or engine # FP16 + fp16 &= pt or jit or onnx or engine or triton # FP16 nhwc = coreml or saved_model or pb or tflite or edgetpu # BHWC formats (vs torch BCWH) stride = 32 # default stride cuda = torch.cuda.is_available() and device.type != 'cpu' # use CUDA From eef637cd04096c6e8eee029491c32ca7dfbe0173 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 21 May 2023 00:18:12 +0200 Subject: [PATCH 1747/1976] Update `check_requirements()` ROOT (#11557) --- classify/predict.py | 2 +- classify/val.py | 2 +- data/Objects365.yaml | 2 +- detect.py | 2 +- hubconf.py | 4 ++-- segment/predict.py | 2 +- segment/val.py | 2 +- val.py | 2 +- 8 files changed, 9 insertions(+), 9 deletions(-) diff --git a/classify/predict.py b/classify/predict.py index c1b6650d4bd0..9b64810d4d63 100644 --- a/classify/predict.py +++ b/classify/predict.py @@ -217,7 +217,7 @@ def parse_opt(): def main(opt): - check_requirements(exclude=('tensorboard', 'thop')) + check_requirements(ROOT / 'requirements.txt', exclude=('tensorboard', 'thop')) run(**vars(opt)) diff --git a/classify/val.py b/classify/val.py index 643489d64d36..4b92e9f105db 100644 --- a/classify/val.py +++ b/classify/val.py @@ -161,7 +161,7 @@ def parse_opt(): def main(opt): - check_requirements(exclude=('tensorboard', 'thop')) + check_requirements(ROOT / 'requirements.txt', exclude=('tensorboard', 'thop')) run(**vars(opt)) diff --git a/data/Objects365.yaml b/data/Objects365.yaml index bb2aa34cd4a4..990f5a56b39e 100644 --- a/data/Objects365.yaml +++ b/data/Objects365.yaml @@ -388,7 +388,7 @@ download: | from utils.general import Path, check_requirements, download, np, xyxy2xywhn - check_requirements(('pycocotools>=2.0',)) + check_requirements('pycocotools>=2.0') from pycocotools.coco import COCO # Make Directories diff --git a/detect.py b/detect.py index 64d6f149a614..216c9dbd5880 100644 --- a/detect.py +++ b/detect.py @@ -252,7 +252,7 @@ def parse_opt(): def main(opt): - check_requirements(exclude=('tensorboard', 'thop')) + check_requirements(ROOT / 'requirements.txt', exclude=('tensorboard', 'thop')) run(**vars(opt)) diff --git a/hubconf.py b/hubconf.py index 73caf06685da..f0192698fbe3 100644 --- a/hubconf.py +++ b/hubconf.py @@ -34,12 +34,12 @@ def _create(name, pretrained=True, channels=3, classes=80, autoshape=True, verbo from models.experimental import attempt_load from models.yolo import ClassificationModel, DetectionModel, SegmentationModel from utils.downloads import attempt_download - from utils.general import LOGGER, check_requirements, intersect_dicts, logging + from utils.general import LOGGER, ROOT, check_requirements, intersect_dicts, logging from utils.torch_utils import select_device if not verbose: LOGGER.setLevel(logging.WARNING) - check_requirements(exclude=('opencv-python', 'tensorboard', 'thop')) + check_requirements(ROOT / 'requirements.txt', exclude=('opencv-python', 'tensorboard', 'thop')) name = Path(name) path = name.with_suffix('.pt') if name.suffix == '' and not name.is_dir() else name # checkpoint path try: diff --git a/segment/predict.py b/segment/predict.py index 4d4d6036358a..6a4d5eff3fc1 100644 --- a/segment/predict.py +++ b/segment/predict.py @@ -275,7 +275,7 @@ def parse_opt(): def main(opt): - check_requirements(exclude=('tensorboard', 'thop')) + check_requirements(ROOT / 'requirements.txt', exclude=('tensorboard', 'thop')) run(**vars(opt)) diff --git a/segment/val.py b/segment/val.py index c0575fd59a91..dc8081840e37 100644 --- a/segment/val.py +++ b/segment/val.py @@ -434,7 +434,7 @@ def parse_opt(): def main(opt): - check_requirements(requirements=ROOT / 'requirements.txt', exclude=('tensorboard', 'thop')) + check_requirements(ROOT / 'requirements.txt', exclude=('tensorboard', 'thop')) if opt.task in ('train', 'val', 'test'): # run normally if opt.conf_thres > 0.001: # https://github.com/ultralytics/yolov5/issues/1466 diff --git a/val.py b/val.py index 3d01f1a5996d..71268651d29b 100644 --- a/val.py +++ b/val.py @@ -370,7 +370,7 @@ def parse_opt(): def main(opt): - check_requirements(exclude=('tensorboard', 'thop')) + check_requirements(ROOT / 'requirements.txt', exclude=('tensorboard', 'thop')) if opt.task in ('train', 'val', 'test'): # run normally if opt.conf_thres > 0.001: # https://github.com/ultralytics/yolov5/issues/1466 From 4298c5dc3aa5c9a20f4e95e3a350903994e2e75e Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 22 May 2023 14:09:35 +0200 Subject: [PATCH 1748/1976] Bump slackapi/slack-github-action from 1.23.0 to 1.24.0 (#11566) Bumps [slackapi/slack-github-action](https://github.com/slackapi/slack-github-action) from 1.23.0 to 1.24.0. - [Release notes](https://github.com/slackapi/slack-github-action/releases) - [Commits](https://github.com/slackapi/slack-github-action/compare/v1.23.0...v1.24.0) --- updated-dependencies: - dependency-name: slackapi/slack-github-action dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/ci-testing.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci-testing.yml b/.github/workflows/ci-testing.yml index e71a4b8f16ac..2bc33eb49eba 100644 --- a/.github/workflows/ci-testing.yml +++ b/.github/workflows/ci-testing.yml @@ -159,7 +159,7 @@ jobs: steps: - name: Check for failure and notify if: (needs.Benchmarks.result == 'failure' || needs.Tests.result == 'failure') && github.repository == 'ultralytics/yolov5' && (github.event_name == 'schedule' || github.event_name == 'push') - uses: slackapi/slack-github-action@v1.23.0 + uses: slackapi/slack-github-action@v1.24.0 with: payload: | {"text": " GitHub Actions error for ${{ github.workflow }} ❌\n\n\n*Repository:* https://github.com/${{ github.repository }}\n*Action:* https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}\n*Author:* ${{ github.actor }}\n*Event:* ${{ github.event_name }}\n"} From 6e04b94fa9fb12ff66b2329660de8a5a8e5f1b1d Mon Sep 17 00:00:00 2001 From: Peter van Lunteren Date: Mon, 22 May 2023 14:12:10 +0200 Subject: [PATCH 1749/1976] add smoothing line to results.png to improve readability (#11536) Signed-off-by: Peter van Lunteren Co-authored-by: Glenn Jocher --- utils/plots.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/utils/plots.py b/utils/plots.py index d1284b950269..754fabb84e37 100644 --- a/utils/plots.py +++ b/utils/plots.py @@ -18,6 +18,7 @@ import seaborn as sn import torch from PIL import Image, ImageDraw, ImageFont +from scipy.ndimage.filters import gaussian_filter1d from utils import TryExcept, threaded from utils.general import (CONFIG_DIR, FONT, LOGGER, check_font, check_requirements, clip_boxes, increment_path, @@ -500,7 +501,8 @@ def plot_results(file='path/to/results.csv', dir=''): for i, j in enumerate([1, 2, 3, 4, 5, 8, 9, 10, 6, 7]): y = data.values[:, j].astype('float') # y[y == 0] = np.nan # don't show zero values - ax[i].plot(x, y, marker='.', label=f.stem, linewidth=2, markersize=8) + ax[i].plot(x, y, marker='.', label=f.stem, linewidth=2, markersize=8) # actual results + ax[i].plot(x, gaussian_filter1d(y, sigma=3), ':', label='smooth', linewidth=2) # smoothing line ax[i].set_title(s[j], fontsize=12) # if j in [8, 9, 10]: # share train and val loss y axes # ax[i].get_shared_y_axes().join(ax[i], ax[i - 5]) From c3c130416323f3766d4abe95c2ff88bc9e2264dd Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 23 May 2023 12:12:30 +0200 Subject: [PATCH 1750/1976] Update LinkedIn URL (#11576) --- README.md | 4 ++-- README.zh-CN.md | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index 37f683343f53..22b6260d3101 100644 --- a/README.md +++ b/README.md @@ -28,7 +28,7 @@ To request an Enterprise License please complete the form at [Ultralytics Licens - + @@ -473,7 +473,7 @@ For YOLOv5 bug reports and feature requests please visit [GitHub Issues](https:/ - + diff --git a/README.zh-CN.md b/README.zh-CN.md index da60d3fe0573..db909b04268d 100644 --- a/README.zh-CN.md +++ b/README.zh-CN.md @@ -27,7 +27,7 @@ YOLOv5 🚀 是世界上最受欢迎的视觉 AI,代表 - + @@ -468,7 +468,7 @@ YOLOv5 在两种不同的 License 下可用: - + From 573334200866d5400325c0e8430c7154f7f23a59 Mon Sep 17 00:00:00 2001 From: Snyk bot Date: Tue, 30 May 2023 17:32:46 +0100 Subject: [PATCH 1751/1976] [Snyk] Security upgrade numpy from 1.21.3 to 1.22.2 (#11531) fix: requirements.txt to reduce vulnerabilities The following vulnerabilities are fixed by pinning transitive dependencies: - https://snyk.io/vuln/SNYK-PYTHON-NUMPY-2321964 - https://snyk.io/vuln/SNYK-PYTHON-NUMPY-2321966 - https://snyk.io/vuln/SNYK-PYTHON-NUMPY-2321970 From 5eb7f7ddc034b9ad07578a6f954c58170579ebc6 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 31 May 2023 12:22:52 +0200 Subject: [PATCH 1752/1976] Update requirements.txt `ultralytics>=8.0.111` (#11630) --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index ae0a21f003e8..069cafe7e14f 100644 --- a/requirements.txt +++ b/requirements.txt @@ -15,7 +15,7 @@ thop>=0.1.1 # FLOPs computation torch>=1.7.0 # see https://pytorch.org/get-started/locally (recommended) torchvision>=0.8.1 tqdm>=4.64.0 -ultralytics>=8.0.100 +ultralytics>=8.0.111 # protobuf<=3.20.1 # https://github.com/ultralytics/yolov5/issues/8012 # Logging --------------------------------------------------------------------- From 5f11555e0796f9471185a16dc79860c236f656f8 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 1 Jun 2023 13:06:22 +0200 Subject: [PATCH 1753/1976] Update ci-testing.yml (#11642) Signed-off-by: Glenn Jocher --- .github/workflows/ci-testing.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci-testing.yml b/.github/workflows/ci-testing.yml index 2bc33eb49eba..79de62c8caaf 100644 --- a/.github/workflows/ci-testing.yml +++ b/.github/workflows/ci-testing.yml @@ -158,7 +158,7 @@ jobs: if: always() # This ensures the job runs even if previous jobs fail steps: - name: Check for failure and notify - if: (needs.Benchmarks.result == 'failure' || needs.Tests.result == 'failure') && github.repository == 'ultralytics/yolov5' && (github.event_name == 'schedule' || github.event_name == 'push') + if: (needs.Benchmarks.result == 'failure' || needs.Tests.result == 'failure' || needs.Benchmarks.result == 'cancelled' || needs.Tests.result == 'cancelled') && github.repository == 'ultralytics/yolov5' && (github.event_name == 'schedule' || github.event_name == 'push') uses: slackapi/slack-github-action@v1.24.0 with: payload: | From 76ea9ed3a4d42fe835e172672132f13cf5286648 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 6 Jun 2023 00:29:58 +0200 Subject: [PATCH 1754/1976] [pre-commit.ci] pre-commit suggestions (#11661) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/asottile/pyupgrade: v3.3.2 → v3.4.0](https://github.com/asottile/pyupgrade/compare/v3.3.2...v3.4.0) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index defb1372625e..c6fd99767667 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -22,7 +22,7 @@ repos: - id: detect-private-key - repo: https://github.com/asottile/pyupgrade - rev: v3.3.2 + rev: v3.4.0 hooks: - id: pyupgrade name: Upgrade code From 89c3040e734e8a0185fb49c667184600bb827f25 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 6 Jun 2023 14:48:13 +0200 Subject: [PATCH 1755/1976] Fix OpenVINO export (#11666) * Fix OpenVINO export Resolves https://github.com/ultralytics/yolov5/issues/11645 Signed-off-by: Glenn Jocher * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update export.py Signed-off-by: Glenn Jocher --------- Signed-off-by: Glenn Jocher Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- export.py | 23 ++++++++++------------- 1 file changed, 10 insertions(+), 13 deletions(-) diff --git a/export.py b/export.py index 5f8e1c4821da..7754ff12f28a 100644 --- a/export.py +++ b/export.py @@ -207,21 +207,18 @@ def export_onnx(model, im, file, opset, dynamic, simplify, prefix=colorstr('ONNX @try_export def export_openvino(file, metadata, half, prefix=colorstr('OpenVINO:')): # YOLOv5 OpenVINO export - check_requirements('openvino-dev') # requires openvino-dev: https://pypi.org/project/openvino-dev/ - import openvino.inference_engine as ie + check_requirements('openvino-dev>=2022.3') # requires openvino-dev: https://pypi.org/project/openvino-dev/ + import openvino.runtime as ov # noqa + from openvino.tools import mo # noqa - LOGGER.info(f'\n{prefix} starting export with openvino {ie.__version__}...') - f = str(file).replace('.pt', f'_openvino_model{os.sep}') + LOGGER.info(f'\n{prefix} starting export with openvino {ov.__version__}...') + f = str(file).replace(file.suffix, f'_openvino_model{os.sep}') + f_onnx = file.with_suffix('.onnx') + f_ov = str(Path(f) / file.with_suffix('.xml').name) - args = [ - 'mo', - '--input_model', - str(file.with_suffix('.onnx')), - '--output_dir', - f, - '--data_type', - ('FP16' if half else 'FP32'),] - subprocess.run(args, check=True, env=os.environ) # export + ov_model = mo.convert_model(f_onnx, model_name=file.stem, framework='onnx', compress_to_fp16=half) # export + + ov.serialize(ov_model, f_ov) # save yaml_save(Path(f) / file.with_suffix('.yaml').name, metadata) # add metadata.yaml return f, None From a199480ba6bb527598df11abbc1d679ccda82670 Mon Sep 17 00:00:00 2001 From: wuhongsheng <664116298@qq.com> Date: Thu, 8 Jun 2023 05:28:01 +0800 Subject: [PATCH 1756/1976] Fix the bug that tensorRT batch_size does not take effect (#11672) * Fix the bug that tensorRT batch_size does not take effect Signed-off-by: wuhongsheng <664116298@qq.com> * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Signed-off-by: wuhongsheng <664116298@qq.com> Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- benchmarks.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/benchmarks.py b/benchmarks.py index fc3073965ab3..b590ff63cb01 100644 --- a/benchmarks.py +++ b/benchmarks.py @@ -76,7 +76,12 @@ def run( if f == '-': w = weights # PyTorch format else: - w = export.run(weights=weights, imgsz=[imgsz], include=[f], device=device, half=half)[-1] # all others + w = export.run(weights=weights, + imgsz=[imgsz], + include=[f], + batch_size=batch_size, + device=device, + half=half)[-1] # all others assert suffix in str(w), 'export failed' # Validate From 98acd111b110a60843291edcf95e708d73abfe5d Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 15 Jun 2023 13:49:19 +0200 Subject: [PATCH 1757/1976] Update Comet integration (#11648) * Update Comet * Update Comet * Update Comet * Add default Experiment Name * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update tutorial.ipynb Signed-off-by: Glenn Jocher * Update tutorial.ipynb Signed-off-by: Glenn Jocher * Update tutorial.ipynb Signed-off-by: Glenn Jocher * Update tutorial.ipynb Signed-off-by: Glenn Jocher --------- Signed-off-by: Glenn Jocher Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- segment/tutorial.ipynb | 2 +- train.py | 5 +++++ tutorial.ipynb | 2 +- utils/__init__.py | 2 +- utils/loggers/__init__.py | 12 ++++-------- utils/loggers/comet/__init__.py | 5 +++-- 6 files changed, 15 insertions(+), 13 deletions(-) diff --git a/segment/tutorial.ipynb b/segment/tutorial.ipynb index f2aee9e26b33..0ece0f60e4d6 100644 --- a/segment/tutorial.ipynb +++ b/segment/tutorial.ipynb @@ -63,7 +63,7 @@ "source": [ "!git clone https://github.com/ultralytics/yolov5 # clone\n", "%cd yolov5\n", - "%pip install -qr requirements.txt # install\n", + "%pip install -qr requirements.txt comet_ml # install\n", "\n", "import torch\n", "import utils\n", diff --git a/train.py b/train.py index 48eeb09468fe..b5044deb9b5c 100644 --- a/train.py +++ b/train.py @@ -26,6 +26,11 @@ from datetime import datetime from pathlib import Path +try: + import comet_ml # must be imported before torch (if installed) +except ImportError: + comet_ml = None + import numpy as np import torch import torch.distributed as dist diff --git a/tutorial.ipynb b/tutorial.ipynb index be87068822af..42c6bc632aec 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -59,7 +59,7 @@ "source": [ "!git clone https://github.com/ultralytics/yolov5 # clone\n", "%cd yolov5\n", - "%pip install -qr requirements.txt # install\n", + "%pip install -qr requirements.txt comet_ml # install\n", "\n", "import torch\n", "import utils\n", diff --git a/utils/__init__.py b/utils/__init__.py index 6c10857df079..bccac42711e6 100644 --- a/utils/__init__.py +++ b/utils/__init__.py @@ -54,7 +54,7 @@ def notebook_init(verbose=True): import os import shutil - from utils.general import check_font, check_requirements, is_colab + from utils.general import check_font, is_colab from utils.torch_utils import select_device # imports check_font() diff --git a/utils/loggers/__init__.py b/utils/loggers/__init__.py index c7c283b728ac..ba7d2790e613 100644 --- a/utils/loggers/__init__.py +++ b/utils/loggers/__init__.py @@ -46,15 +46,15 @@ clearml = None try: - if RANK not in [0, -1]: - comet_ml = None - else: + if RANK in {0, -1}: import comet_ml assert hasattr(comet_ml, '__version__') # verify package import not local dir from utils.loggers.comet import CometLogger -except (ModuleNotFoundError, ImportError, AssertionError): + else: + comet_ml = None +except (ImportError, AssertionError): comet_ml = None @@ -88,10 +88,6 @@ def __init__(self, save_dir=None, weights=None, opt=None, hyp=None, logger=None, self.csv = True # always log to csv # Messages - if not clearml: - prefix = colorstr('ClearML: ') - s = f"{prefix}run 'pip install clearml' to automatically track, visualize and remotely train YOLOv5 🚀 in ClearML" - self.logger.info(s) if not comet_ml: prefix = colorstr('Comet: ') s = f"{prefix}run 'pip install comet_ml' to automatically track and visualize YOLOv5 🚀 runs in Comet" diff --git a/utils/loggers/comet/__init__.py b/utils/loggers/comet/__init__.py index d4599841c9fc..aac06b11c8c0 100644 --- a/utils/loggers/comet/__init__.py +++ b/utils/loggers/comet/__init__.py @@ -18,7 +18,7 @@ # Project Configuration config = comet_ml.config.get_config() COMET_PROJECT_NAME = config.get_string(os.getenv('COMET_PROJECT_NAME'), 'comet.project_name', default='yolov5') -except (ModuleNotFoundError, ImportError): +except ImportError: comet_ml = None COMET_PROJECT_NAME = None @@ -82,7 +82,7 @@ def __init__(self, opt, hyp, run_id=None, job_type='Training', **experiment_kwar self.comet_log_batch_interval = COMET_BATCH_LOGGING_INTERVAL # Dataset Artifact Settings - self.upload_dataset = self.opt.upload_dataset if self.opt.upload_dataset else COMET_UPLOAD_DATASET + self.upload_dataset = self.opt.upload_dataset or COMET_UPLOAD_DATASET self.resume = self.opt.resume # Default parameters to pass to Experiment objects @@ -93,6 +93,7 @@ def __init__(self, opt, hyp, run_id=None, job_type='Training', **experiment_kwar 'project_name': COMET_PROJECT_NAME,} self.default_experiment_kwargs.update(experiment_kwargs) self.experiment = self._get_experiment(self.comet_mode, run_id) + self.experiment.set_name(self.opt.name) self.data_dict = self.check_dataset(self.opt.data) self.class_names = self.data_dict['names'] From 9bb50b4ffee5fcdcfd381ac2b885d1303c767650 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 15 Jun 2023 13:52:17 +0200 Subject: [PATCH 1758/1976] Remove Python 3.7 from tests (#11708) Signed-off-by: Glenn Jocher --- .github/workflows/ci-testing.yml | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/.github/workflows/ci-testing.yml b/.github/workflows/ci-testing.yml index 79de62c8caaf..13f9787ed8c9 100644 --- a/.github/workflows/ci-testing.yml +++ b/.github/workflows/ci-testing.yml @@ -57,10 +57,7 @@ jobs: model: [ yolov5n ] include: - os: ubuntu-latest - python-version: '3.7' # '3.6.8' min - model: yolov5n - - os: ubuntu-latest - python-version: '3.8' + python-version: '3.8' # '3.6.8' min model: yolov5n - os: ubuntu-latest python-version: '3.9' From 3812a1a29f7874a370967eca1bd77a69820df88f Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 15 Jun 2023 21:15:59 +0200 Subject: [PATCH 1759/1976] Update Discord invite URLs (#11713) --- .github/ISSUE_TEMPLATE/config.yml | 2 +- README.md | 8 ++++---- README.zh-CN.md | 8 ++++---- tutorial.ipynb | 2 +- 4 files changed, 10 insertions(+), 10 deletions(-) diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml index 743feb957ff1..d95f385351fc 100644 --- a/.github/ISSUE_TEMPLATE/config.yml +++ b/.github/ISSUE_TEMPLATE/config.yml @@ -7,5 +7,5 @@ contact_links: url: https://community.ultralytics.com/ about: Ask on Ultralytics Community Forum - name: 🎧 Discord - url: https://discord.gg/n6cFeSPZdD + url: https://discord.gg/7aegy5d8 about: Ask on Ultralytics Discord diff --git a/README.md b/README.md index 22b6260d3101..4d2aa9b886c8 100644 --- a/README.md +++ b/README.md @@ -20,7 +20,7 @@ YOLOv5 🚀 is the world's most loved vision AI, representing Ultralytics open-source research into future vision AI methods, incorporating lessons learned and best practices evolved over thousands of hours of research and development. -We hope that the resources here will help you get the most out of YOLOv5. Please browse the YOLOv5 Docs for details, raise an issue on GitHub for support, and join our Discord community for questions and discussions! +We hope that the resources here will help you get the most out of YOLOv5. Please browse the YOLOv5 Docs for details, raise an issue on GitHub for support, and join our Discord community for questions and discussions! To request an Enterprise License please complete the form at [Ultralytics Licensing](https://ultralytics.com/license). @@ -43,7 +43,7 @@ To request an Enterprise License please complete the form at [Ultralytics Licens - + @@ -466,7 +466,7 @@ YOLOv5 is available under two different licenses: ##
Contact
-For YOLOv5 bug reports and feature requests please visit [GitHub Issues](https://github.com/ultralytics/yolov5/issues), and join our [Discord](https://discord.gg/n6cFeSPZdD) community for questions and discussions! +For YOLOv5 bug reports and feature requests please visit [GitHub Issues](https://github.com/ultralytics/yolov5/issues), and join our [Discord](https://discord.gg/7aegy5d8) community for questions and discussions!
@@ -488,7 +488,7 @@ For YOLOv5 bug reports and feature requests please visit [GitHub Issues](https:/ - +
diff --git a/README.zh-CN.md b/README.zh-CN.md index db909b04268d..5fe8e2d8b106 100644 --- a/README.zh-CN.md +++ b/README.zh-CN.md @@ -19,7 +19,7 @@ YOLOv5 🚀 是世界上最受欢迎的视觉 AI,代表 Ultralytics 对未来视觉 AI 方法的开源研究,结合在数千小时的研究和开发中积累的经验教训和最佳实践。 -我们希望这里的资源能帮助您充分利用 YOLOv5。请浏览 YOLOv5 文档 了解详细信息,在 GitHub 上提交问题以获得支持,并加入我们的 Discord 社区进行问题和讨论! +我们希望这里的资源能帮助您充分利用 YOLOv5。请浏览 YOLOv5 文档 了解详细信息,在 GitHub 上提交问题以获得支持,并加入我们的 Discord 社区进行问题和讨论! 如需申请企业许可,请在 [Ultralytics Licensing](https://ultralytics.com/license) 处填写表格 @@ -42,7 +42,7 @@ YOLOv5 🚀 是世界上最受欢迎的视觉 AI,代表 - + @@ -461,7 +461,7 @@ YOLOv5 在两种不同的 License 下可用: ##
联系我们
-对于 YOLOv5 的错误报告和功能请求,请访问 [GitHub Issues](https://github.com/ultralytics/yolov5/issues),并加入我们的 [Discord](https://discord.gg/n6cFeSPZdD) 社区进行问题和讨论! +对于 YOLOv5 的错误报告和功能请求,请访问 [GitHub Issues](https://github.com/ultralytics/yolov5/issues),并加入我们的 [Discord](https://discord.gg/7aegy5d8) 社区进行问题和讨论!
@@ -483,7 +483,7 @@ YOLOv5 在两种不同的 License 下可用: - +
diff --git a/tutorial.ipynb b/tutorial.ipynb index 42c6bc632aec..513bfdefcdeb 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -31,7 +31,7 @@ " \"Open\n", "
\n", "\n", - "This YOLOv5 🚀 notebook by Ultralytics presents simple train, validate and predict examples to help start your AI adventure.
We hope that the resources in this notebook will help you get the most out of YOLOv5. Please browse the YOLOv5 Docs for details, raise an issue on GitHub for support, and join our Discord community for questions and discussions!\n", + "This YOLOv5 🚀 notebook by Ultralytics presents simple train, validate and predict examples to help start your AI adventure.
We hope that the resources in this notebook will help you get the most out of YOLOv5. Please browse the YOLOv5 Docs for details, raise an issue on GitHub for support, and join our Discord community for questions and discussions!\n", "\n", "" ] From 7e2139256143e2ae8befc392379f63cf97c3c061 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E7=90=AA=E4=BA=9A=E5=A8=9C=E8=8A=BD=E8=A1=A3=E8=B4=B4?= =?UTF-8?q?=E8=B4=B4?= <39751846+kisaragychihaya@users.noreply.github.com> Date: Sun, 18 Jun 2023 02:50:10 +0800 Subject: [PATCH 1760/1976] Add OpenVINO NNCF Support (Using --int8 flag) (#11706) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Add OpenVINO NNCF support * Add openvino to flag help text Using --int8 --data your_dataset.yaml to quant your ov model Signed-off-by: 琪亚娜芽衣贴贴 <39751846+kisaragychihaya@users.noreply.github.com> * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update export.py Redundant Signed-off-by: Glenn Jocher --------- Signed-off-by: 琪亚娜芽衣贴贴 <39751846+kisaragychihaya@users.noreply.github.com> Signed-off-by: Glenn Jocher Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Glenn Jocher --- export.py | 58 ++++++++++++++++++++++++++++++++++++++++++++++++++----- 1 file changed, 53 insertions(+), 5 deletions(-) diff --git a/export.py b/export.py index 7754ff12f28a..815a27cb0d43 100644 --- a/export.py +++ b/export.py @@ -205,7 +205,7 @@ def export_onnx(model, im, file, opset, dynamic, simplify, prefix=colorstr('ONNX @try_export -def export_openvino(file, metadata, half, prefix=colorstr('OpenVINO:')): +def export_openvino(file, metadata, half, int8, data, prefix=colorstr('OpenVINO:')): # YOLOv5 OpenVINO export check_requirements('openvino-dev>=2022.3') # requires openvino-dev: https://pypi.org/project/openvino-dev/ import openvino.runtime as ov # noqa @@ -215,8 +215,56 @@ def export_openvino(file, metadata, half, prefix=colorstr('OpenVINO:')): f = str(file).replace(file.suffix, f'_openvino_model{os.sep}') f_onnx = file.with_suffix('.onnx') f_ov = str(Path(f) / file.with_suffix('.xml').name) - - ov_model = mo.convert_model(f_onnx, model_name=file.stem, framework='onnx', compress_to_fp16=half) # export + if int8: + check_requirements('nncf') + import nncf + import numpy as np + from openvino.runtime import Core + + from utils.dataloaders import create_dataloader, letterbox + core = Core() + onnx_model = core.read_model(f_onnx) # export + + def prepare_input_tensor(image: np.ndarray): + input_tensor = image.astype(np.float32) # uint8 to fp16/32 + input_tensor /= 255.0 # 0 - 255 to 0.0 - 1.0 + + if input_tensor.ndim == 3: + input_tensor = np.expand_dims(input_tensor, 0) + return input_tensor + + def gen_dataloader(yaml_path, task='train', imgsz=640, workers=4): + data_yaml = check_yaml(yaml_path) + data = check_dataset(data_yaml) + dataloader = create_dataloader(data[task], + imgsz=imgsz, + batch_size=1, + stride=32, + pad=0.5, + single_cls=False, + rect=False, + workers=workers)[0] + return dataloader + + # noqa: F811 + + def transform_fn(data_item): + """ + Quantization transform function. Extracts and preprocess input data from dataloader item for quantization. + Parameters: + data_item: Tuple with data item produced by DataLoader during iteration + Returns: + input_tensor: Input data for quantization + """ + img = data_item[0].numpy() + input_tensor = prepare_input_tensor(img) + return input_tensor + + ds = gen_dataloader(data) + quantization_dataset = nncf.Dataset(ds, transform_fn) + ov_model = nncf.quantize(onnx_model, quantization_dataset, preset=nncf.QuantizationPreset.MIXED) + else: + ov_model = mo.convert_model(f_onnx, model_name=file.stem, framework='onnx', compress_to_fp16=half) # export ov.serialize(ov_model, f_ov) # save yaml_save(Path(f) / file.with_suffix('.yaml').name, metadata) # add metadata.yaml @@ -723,7 +771,7 @@ def run( if onnx or xml: # OpenVINO requires ONNX f[2], _ = export_onnx(model, im, file, opset, dynamic, simplify) if xml: # OpenVINO - f[3], _ = export_openvino(file, metadata, half) + f[3], _ = export_openvino(file, metadata, half, int8, data) if coreml: # CoreML f[4], ct_model = export_coreml(model, im, file, int8, half, nms) if nms: @@ -783,7 +831,7 @@ def parse_opt(known=False): parser.add_argument('--inplace', action='store_true', help='set YOLOv5 Detect() inplace=True') parser.add_argument('--keras', action='store_true', help='TF: use Keras') parser.add_argument('--optimize', action='store_true', help='TorchScript: optimize for mobile') - parser.add_argument('--int8', action='store_true', help='CoreML/TF INT8 quantization') + parser.add_argument('--int8', action='store_true', help='CoreML/TF/OpenVINO INT8 quantization') parser.add_argument('--dynamic', action='store_true', help='ONNX/TF/TensorRT: dynamic axes') parser.add_argument('--simplify', action='store_true', help='ONNX: simplify model') parser.add_argument('--opset', type=int, default=17, help='ONNX: opset version') From 878d9c8d5b21253ee3a086b69a94fbbf55e56088 Mon Sep 17 00:00:00 2001 From: hackerrajeshkumar <120269593+hackerrajeshkumar@users.noreply.github.com> Date: Sun, 18 Jun 2023 00:21:50 +0530 Subject: [PATCH 1761/1976] Update export.py (#11638) * Update export.py Signed-off-by: hackerrajeshkumar <120269593+hackerrajeshkumar@users.noreply.github.com> * Update export.py Signed-off-by: Glenn Jocher --------- Signed-off-by: hackerrajeshkumar <120269593+hackerrajeshkumar@users.noreply.github.com> Signed-off-by: Glenn Jocher Co-authored-by: Glenn Jocher --- export.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/export.py b/export.py index 815a27cb0d43..9399f0bbd221 100644 --- a/export.py +++ b/export.py @@ -581,7 +581,7 @@ def pipeline_coreml(model, im, file, names, y, prefix=colorstr('CoreML Pipeline: batch_size, ch, h, w = list(im.shape) # BCHW t = time.time() - # Output shapes + # YOLOv5 Output shapes spec = model.get_spec() out0, out1 = iter(spec.description.output) if platform.system() == 'Darwin': From f616dc5af217a7193d80b79e119d7a40798750ad Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 18 Jun 2023 15:44:56 +0200 Subject: [PATCH 1762/1976] Uninstall `wandb` from notebook environments (#11729) Uninstall W&B that are present in notebooks Resolves unwanted W&B install issues in https://www.kaggle.com/code/ultralytics/yolov8/comments#2306977 Signed-off-by: Glenn Jocher --- utils/__init__.py | 1 + 1 file changed, 1 insertion(+) diff --git a/utils/__init__.py b/utils/__init__.py index bccac42711e6..4687889bfc56 100644 --- a/utils/__init__.py +++ b/utils/__init__.py @@ -61,6 +61,7 @@ def notebook_init(verbose=True): import psutil + os.system('pip uninstall -y wandb') if is_colab(): shutil.rmtree('/content/sample_data', ignore_errors=True) # remove colab /sample_data directory From 2334aa733872bc4bb3e1a1ba90e5fd319399596f Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 18 Jun 2023 16:09:41 +0200 Subject: [PATCH 1763/1976] Uninstall `wandb` from notebook environments (#11730) * Uninstall `wandb` from notebook environments Due to undesired behavior in https://www.kaggle.com/code/ultralytics/yolov8/comments#2306977 Signed-off-by: Glenn Jocher * fix import * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Signed-off-by: Glenn Jocher Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- utils/__init__.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/utils/__init__.py b/utils/__init__.py index 4687889bfc56..2a7652b52c1b 100644 --- a/utils/__init__.py +++ b/utils/__init__.py @@ -54,6 +54,8 @@ def notebook_init(verbose=True): import os import shutil + from ultralytics.yolo.utils.checks import check_requirements + from utils.general import check_font, is_colab from utils.torch_utils import select_device # imports @@ -61,7 +63,8 @@ def notebook_init(verbose=True): import psutil - os.system('pip uninstall -y wandb') + if check_requirements('wandb', install=False): + os.system('pip uninstall -y wandb') # eliminate unexpected account creation prompt with infinite hang if is_colab(): shutil.rmtree('/content/sample_data', ignore_errors=True) # remove colab /sample_data directory From 0acc5cf6e529265b32d4c66166118e3efcff8bdb Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 25 Jun 2023 18:04:34 +0200 Subject: [PATCH 1764/1976] Add PyPI badges to README (#11770) * Update README.md Signed-off-by: Glenn Jocher * Update README.zh-CN.md Signed-off-by: Glenn Jocher * Update README.zh-CN.md Signed-off-by: Glenn Jocher --------- Signed-off-by: Glenn Jocher --- README.md | 4 +++- README.zh-CN.md | 12 ++++++------ 2 files changed, 9 insertions(+), 7 deletions(-) diff --git a/README.md b/README.md index 4d2aa9b886c8..943f125fec14 100644 --- a/README.md +++ b/README.md @@ -59,7 +59,9 @@ object detection, image segmentation and image classification tasks. See the [YOLOv8 Docs](https://docs.ultralytics.com) for details and get started with: -```commandline +[![PyPI version](https://badge.fury.io/py/ultralytics.svg)](https://badge.fury.io/py/ultralytics) [![Downloads](https://static.pepy.tech/badge/ultralytics)](https://pepy.tech/project/ultralytics) + +```bash pip install ultralytics ``` diff --git a/README.zh-CN.md b/README.zh-CN.md index 5fe8e2d8b106..3a65d268a336 100644 --- a/README.zh-CN.md +++ b/README.zh-CN.md @@ -47,14 +47,14 @@ YOLOv5 🚀 是世界上最受欢迎的视觉 AI,代表YOLOv8 🚀 NEW +##
YOLOv8 🚀 新品
-We are thrilled to announce the launch of Ultralytics YOLOv8 🚀, our NEW cutting-edge, state-of-the-art (SOTA) model -released at **[https://github.com/ultralytics/ultralytics](https://github.com/ultralytics/ultralytics)**. -YOLOv8 is designed to be fast, accurate, and easy to use, making it an excellent choice for a wide range of -object detection, image segmentation and image classification tasks. +我们很高兴宣布 Ultralytics YOLOv8 🚀 的发布,这是我们新推出的领先水平、最先进的(SOTA)模型,发布于 **[https://github.com/ultralytics/ultralytics](https://github.com/ultralytics/ultralytics)**。 +YOLOv8 旨在快速、准确且易于使用,使其成为广泛的物体检测、图像分割和图像分类任务的极佳选择。 -See the [YOLOv8 Docs](https://docs.ultralytics.com) for details and get started with: +请查看 [YOLOv8 文档](https://docs.ultralytics.com)了解详细信息,并开始使用: + +[![PyPI 版本](https://badge.fury.io/py/ultralytics.svg)](https://badge.fury.io/py/ultralytics) [![下载量](https://static.pepy.tech/badge/ultralytics)](https://pepy.tech/project/ultralytics) ```commandline pip install ultralytics From 0004c745daeebb0eb6455b90c3e8abb308e61320 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 26 Jun 2023 19:14:04 +0200 Subject: [PATCH 1765/1976] Update Discord invite URLs (#11774) Update Discord invite link --- .github/ISSUE_TEMPLATE/config.yml | 2 +- README.md | 8 ++++---- README.zh-CN.md | 8 ++++---- tutorial.ipynb | 2 +- 4 files changed, 10 insertions(+), 10 deletions(-) diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml index d95f385351fc..947377c20a4f 100644 --- a/.github/ISSUE_TEMPLATE/config.yml +++ b/.github/ISSUE_TEMPLATE/config.yml @@ -7,5 +7,5 @@ contact_links: url: https://community.ultralytics.com/ about: Ask on Ultralytics Community Forum - name: 🎧 Discord - url: https://discord.gg/7aegy5d8 + url: https://discord.gg/bNc8wwx7Qy about: Ask on Ultralytics Discord diff --git a/README.md b/README.md index 943f125fec14..bcd7344cafe3 100644 --- a/README.md +++ b/README.md @@ -20,7 +20,7 @@ YOLOv5 🚀 is the world's most loved vision AI, representing
Ultralytics open-source research into future vision AI methods, incorporating lessons learned and best practices evolved over thousands of hours of research and development. -We hope that the resources here will help you get the most out of YOLOv5. Please browse the YOLOv5 Docs for details, raise an issue on GitHub for support, and join our Discord community for questions and discussions! +We hope that the resources here will help you get the most out of YOLOv5. Please browse the YOLOv5 Docs for details, raise an issue on GitHub for support, and join our Discord community for questions and discussions! To request an Enterprise License please complete the form at [Ultralytics Licensing](https://ultralytics.com/license). @@ -43,7 +43,7 @@ To request an Enterprise License please complete the form at [Ultralytics Licens - + @@ -468,7 +468,7 @@ YOLOv5 is available under two different licenses: ##
Contact
-For YOLOv5 bug reports and feature requests please visit [GitHub Issues](https://github.com/ultralytics/yolov5/issues), and join our [Discord](https://discord.gg/7aegy5d8) community for questions and discussions! +For YOLOv5 bug reports and feature requests please visit [GitHub Issues](https://github.com/ultralytics/yolov5/issues), and join our [Discord](https://discord.gg/bNc8wwx7Qy) community for questions and discussions!
@@ -490,7 +490,7 @@ For YOLOv5 bug reports and feature requests please visit [GitHub Issues](https:/ - +
diff --git a/README.zh-CN.md b/README.zh-CN.md index 3a65d268a336..37f29635f86c 100644 --- a/README.zh-CN.md +++ b/README.zh-CN.md @@ -19,7 +19,7 @@ YOLOv5 🚀 是世界上最受欢迎的视觉 AI,代表 Ultralytics 对未来视觉 AI 方法的开源研究,结合在数千小时的研究和开发中积累的经验教训和最佳实践。 -我们希望这里的资源能帮助您充分利用 YOLOv5。请浏览 YOLOv5 文档 了解详细信息,在 GitHub 上提交问题以获得支持,并加入我们的 Discord 社区进行问题和讨论! +我们希望这里的资源能帮助您充分利用 YOLOv5。请浏览 YOLOv5 文档 了解详细信息,在 GitHub 上提交问题以获得支持,并加入我们的 Discord 社区进行问题和讨论! 如需申请企业许可,请在 [Ultralytics Licensing](https://ultralytics.com/license) 处填写表格 @@ -42,7 +42,7 @@ YOLOv5 🚀 是世界上最受欢迎的视觉 AI,代表 - + @@ -461,7 +461,7 @@ YOLOv5 在两种不同的 License 下可用: ##
联系我们
-对于 YOLOv5 的错误报告和功能请求,请访问 [GitHub Issues](https://github.com/ultralytics/yolov5/issues),并加入我们的 [Discord](https://discord.gg/7aegy5d8) 社区进行问题和讨论! +对于 YOLOv5 的错误报告和功能请求,请访问 [GitHub Issues](https://github.com/ultralytics/yolov5/issues),并加入我们的 [Discord](https://discord.gg/bNc8wwx7Qy) 社区进行问题和讨论!
@@ -483,7 +483,7 @@ YOLOv5 在两种不同的 License 下可用: - +
diff --git a/tutorial.ipynb b/tutorial.ipynb index 513bfdefcdeb..baf8e5a9e325 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -31,7 +31,7 @@ " \"Open\n", "
\n", "\n", - "This YOLOv5 🚀 notebook by Ultralytics presents simple train, validate and predict examples to help start your AI adventure.
We hope that the resources in this notebook will help you get the most out of YOLOv5. Please browse the YOLOv5 Docs for details, raise an issue on GitHub for support, and join our Discord community for questions and discussions!\n", + "This YOLOv5 🚀 notebook by Ultralytics presents simple train, validate and predict examples to help start your AI adventure.
We hope that the resources in this notebook will help you get the most out of YOLOv5. Please browse the YOLOv5 Docs for details, raise an issue on GitHub for support, and join our Discord community for questions and discussions!\n", "\n", "" ] From d19439afeba525a351705d03c3ea1a6f0465ef8b Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 3 Jul 2023 01:48:43 +0200 Subject: [PATCH 1766/1976] Fix PIL.ANTIALIAS deprecation (#11801) --- models/common.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/models/common.py b/models/common.py index 16537703e730..a0ee085d05fc 100644 --- a/models/common.py +++ b/models/common.py @@ -541,7 +541,7 @@ def forward(self, im, augment=False, visualize=False): elif self.coreml: # CoreML im = im.cpu().numpy() im = Image.fromarray((im[0] * 255).astype('uint8')) - # im = im.resize((192, 320), Image.ANTIALIAS) + # im = im.resize((192, 320), Image.BILINEAR) y = self.model.predict({'image': im}) # coordinates are xywh normalized if 'confidence' in y: box = xywh2xyxy(y['coordinates'] * [[w, h, w, h]]) # xyxy pixels From a453a455590becf0cb6db1892705b4bd4c4e5a4a Mon Sep 17 00:00:00 2001 From: tino926 Date: Mon, 3 Jul 2023 19:39:38 +0800 Subject: [PATCH 1767/1976] remove objects with iscrowd=True in Objects365 (#11788) Signed-off-by: tino926 --- data/Objects365.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/data/Objects365.yaml b/data/Objects365.yaml index 990f5a56b39e..d4045e2f852c 100644 --- a/data/Objects365.yaml +++ b/data/Objects365.yaml @@ -428,7 +428,7 @@ download: | path = Path(im["file_name"]) # image filename try: with open(labels / path.with_suffix('.txt').name, 'a') as file: - annIds = coco.getAnnIds(imgIds=im["id"], catIds=catIds, iscrowd=None) + annIds = coco.getAnnIds(imgIds=im["id"], catIds=catIds, iscrowd=False) for a in coco.loadAnns(annIds): x, y, w, h = a['bbox'] # bounding box in xywh (xy top-left corner) xyxy = np.array([x, y, x + w, y + h])[None] # pixels(1,4) From b1a180842deafd361aad2ae8db8d88cbbbbe211f Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 4 Jul 2023 10:10:11 +0200 Subject: [PATCH 1768/1976] [pre-commit.ci] pre-commit suggestions (#11805) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/asottile/pyupgrade: v3.4.0 → v3.8.0](https://github.com/asottile/pyupgrade/compare/v3.4.0...v3.8.0) - [github.com/google/yapf: v0.33.0 → v0.40.0](https://github.com/google/yapf/compare/v0.33.0...v0.40.0) - [github.com/codespell-project/codespell: v2.2.4 → v2.2.5](https://github.com/codespell-project/codespell/compare/v2.2.4...v2.2.5) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index c6fd99767667..3c5081e709dc 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -22,7 +22,7 @@ repos: - id: detect-private-key - repo: https://github.com/asottile/pyupgrade - rev: v3.4.0 + rev: v3.8.0 hooks: - id: pyupgrade name: Upgrade code @@ -35,7 +35,7 @@ repos: name: Sort imports - repo: https://github.com/google/yapf - rev: v0.33.0 + rev: v0.40.0 hooks: - id: yapf name: YAPF formatting @@ -57,7 +57,7 @@ repos: name: PEP8 - repo: https://github.com/codespell-project/codespell - rev: v2.2.4 + rev: v2.2.5 hooks: - id: codespell args: From 0b3f751a6f1b3ae425bd86912d91bd58ed6af49a Mon Sep 17 00:00:00 2001 From: Sergiu Waxmann <47978446+sergiuwaxmann@users.noreply.github.com> Date: Tue, 4 Jul 2023 22:33:59 +0200 Subject: [PATCH 1769/1976] Update Discord link (#11804) docs: update discord link --- .github/ISSUE_TEMPLATE/config.yml | 2 +- README.md | 8 ++++---- README.zh-CN.md | 8 ++++---- tutorial.ipynb | 2 +- 4 files changed, 10 insertions(+), 10 deletions(-) diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml index 947377c20a4f..264ab2687f16 100644 --- a/.github/ISSUE_TEMPLATE/config.yml +++ b/.github/ISSUE_TEMPLATE/config.yml @@ -7,5 +7,5 @@ contact_links: url: https://community.ultralytics.com/ about: Ask on Ultralytics Community Forum - name: 🎧 Discord - url: https://discord.gg/bNc8wwx7Qy + url: https://discord.gg/2wNGbc6g9X about: Ask on Ultralytics Discord diff --git a/README.md b/README.md index bcd7344cafe3..6ffc4bc14711 100644 --- a/README.md +++ b/README.md @@ -20,7 +20,7 @@ YOLOv5 🚀 is the world's most loved vision AI, representing Ultralytics open-source research into future vision AI methods, incorporating lessons learned and best practices evolved over thousands of hours of research and development. -We hope that the resources here will help you get the most out of YOLOv5. Please browse the YOLOv5 Docs for details, raise an issue on GitHub for support, and join our Discord community for questions and discussions! +We hope that the resources here will help you get the most out of YOLOv5. Please browse the YOLOv5 Docs for details, raise an issue on GitHub for support, and join our Discord community for questions and discussions! To request an Enterprise License please complete the form at [Ultralytics Licensing](https://ultralytics.com/license). @@ -43,7 +43,7 @@ To request an Enterprise License please complete the form at [Ultralytics Licens - + @@ -468,7 +468,7 @@ YOLOv5 is available under two different licenses: ##
Contact
-For YOLOv5 bug reports and feature requests please visit [GitHub Issues](https://github.com/ultralytics/yolov5/issues), and join our [Discord](https://discord.gg/bNc8wwx7Qy) community for questions and discussions! +For YOLOv5 bug reports and feature requests please visit [GitHub Issues](https://github.com/ultralytics/yolov5/issues), and join our [Discord](https://discord.gg/2wNGbc6g9X) community for questions and discussions!
@@ -490,7 +490,7 @@ For YOLOv5 bug reports and feature requests please visit [GitHub Issues](https:/ - +
diff --git a/README.zh-CN.md b/README.zh-CN.md index 37f29635f86c..cf2620667326 100644 --- a/README.zh-CN.md +++ b/README.zh-CN.md @@ -19,7 +19,7 @@ YOLOv5 🚀 是世界上最受欢迎的视觉 AI,代表 Ultralytics 对未来视觉 AI 方法的开源研究,结合在数千小时的研究和开发中积累的经验教训和最佳实践。 -我们希望这里的资源能帮助您充分利用 YOLOv5。请浏览 YOLOv5 文档 了解详细信息,在 GitHub 上提交问题以获得支持,并加入我们的 Discord 社区进行问题和讨论! +我们希望这里的资源能帮助您充分利用 YOLOv5。请浏览 YOLOv5 文档 了解详细信息,在 GitHub 上提交问题以获得支持,并加入我们的 Discord 社区进行问题和讨论! 如需申请企业许可,请在 [Ultralytics Licensing](https://ultralytics.com/license) 处填写表格 @@ -42,7 +42,7 @@ YOLOv5 🚀 是世界上最受欢迎的视觉 AI,代表 - + @@ -461,7 +461,7 @@ YOLOv5 在两种不同的 License 下可用: ##
联系我们
-对于 YOLOv5 的错误报告和功能请求,请访问 [GitHub Issues](https://github.com/ultralytics/yolov5/issues),并加入我们的 [Discord](https://discord.gg/bNc8wwx7Qy) 社区进行问题和讨论! +对于 YOLOv5 的错误报告和功能请求,请访问 [GitHub Issues](https://github.com/ultralytics/yolov5/issues),并加入我们的 [Discord](https://discord.gg/2wNGbc6g9X) 社区进行问题和讨论!
@@ -483,7 +483,7 @@ YOLOv5 在两种不同的 License 下可用: - +
diff --git a/tutorial.ipynb b/tutorial.ipynb index baf8e5a9e325..56c6ed9c3081 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -31,7 +31,7 @@ " \"Open\n", "
\n", "\n", - "This YOLOv5 🚀 notebook by Ultralytics presents simple train, validate and predict examples to help start your AI adventure.
We hope that the resources in this notebook will help you get the most out of YOLOv5. Please browse the YOLOv5 Docs for details, raise an issue on GitHub for support, and join our Discord community for questions and discussions!\n", + "This YOLOv5 🚀 notebook by Ultralytics presents simple train, validate and predict examples to help start your AI adventure.
We hope that the resources in this notebook will help you get the most out of YOLOv5. Please browse the YOLOv5 Docs for details, raise an issue on GitHub for support, and join our Discord community for questions and discussions!\n", "\n", "" ] From 459dd49c02afb6d252a1e20b2e56fd1c2dd1acd9 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 5 Jul 2023 00:42:57 +0200 Subject: [PATCH 1770/1976] Update setup.cfg (#11814) * Update setup.cfg Signed-off-by: Glenn Jocher * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update .pre-commit-config.yaml Signed-off-by: Glenn Jocher * Update export.py Signed-off-by: Glenn Jocher * Update downloads.py Signed-off-by: Glenn Jocher * Update yolo.py Signed-off-by: Glenn Jocher --------- Signed-off-by: Glenn Jocher Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 16 ++++++++++------ export.py | 8 ++++---- models/tf.py | 4 ++-- models/yolo.py | 8 ++++---- segment/train.py | 2 +- setup.cfg | 14 ++++++++------ train.py | 2 +- utils/augmentations.py | 2 +- utils/callbacks.py | 2 +- utils/downloads.py | 3 +-- utils/general.py | 6 +++--- utils/loggers/comet/__init__.py | 12 ++++++------ utils/segment/metrics.py | 4 ++-- utils/torch_utils.py | 4 ++-- 14 files changed, 46 insertions(+), 41 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 3c5081e709dc..192a0ff3cba2 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,7 +16,7 @@ repos: - id: end-of-file-fixer - id: trailing-whitespace - id: check-case-conflict - - id: check-yaml + # - id: check-yaml - id: check-docstring-first - id: double-quote-string-fixer - id: detect-private-key @@ -26,7 +26,6 @@ repos: hooks: - id: pyupgrade name: Upgrade code - args: [--py37-plus] - repo: https://github.com/PyCQA/isort rev: 5.12.0 @@ -63,7 +62,12 @@ repos: args: - --ignore-words-list=crate,nd,strack,dota - #- repo: https://github.com/asottile/yesqa - # rev: v1.4.0 - # hooks: - # - id: yesqa +# - repo: https://github.com/asottile/yesqa +# rev: v1.4.0 +# hooks: +# - id: yesqa + +# - repo: https://github.com/asottile/dead +# rev: v1.5.0 +# hooks: +# - id: dead diff --git a/export.py b/export.py index 9399f0bbd221..5755774d2f54 100644 --- a/export.py +++ b/export.py @@ -110,7 +110,7 @@ def export_formats(): ['TensorFlow Lite', 'tflite', '.tflite', True, False], ['TensorFlow Edge TPU', 'edgetpu', '_edgetpu.tflite', False, False], ['TensorFlow.js', 'tfjs', '_web_model', False, False], - ['PaddlePaddle', 'paddle', '_paddle_model', True, True],] + ['PaddlePaddle', 'paddle', '_paddle_model', True, True], ] return pd.DataFrame(x, columns=['Format', 'Argument', 'Suffix', 'CPU', 'GPU']) @@ -221,7 +221,7 @@ def export_openvino(file, metadata, half, int8, data, prefix=colorstr('OpenVINO: import numpy as np from openvino.runtime import Core - from utils.dataloaders import create_dataloader, letterbox + from utils.dataloaders import create_dataloader core = Core() onnx_model = core.read_model(f_onnx) # export @@ -501,7 +501,7 @@ def export_edgetpu(file, prefix=colorstr('Edge TPU:')): '10', '--out_dir', str(file.parent), - f_tfl,], check=True) + f_tfl, ], check=True) return f, None @@ -522,7 +522,7 @@ def export_tfjs(file, int8, prefix=colorstr('TensorFlow.js:')): '--quantize_uint8' if int8 else '', '--output_node_names=Identity,Identity_1,Identity_2,Identity_3', str(f_pb), - str(f),] + str(f), ] subprocess.run([arg for arg in args if arg], check=True) json = Path(f_json).read_text() diff --git a/models/tf.py b/models/tf.py index bc0a465d7edd..62ba3ebf0782 100644 --- a/models/tf.py +++ b/models/tf.py @@ -310,7 +310,7 @@ def call(self, inputs): y = tf.concat([xy, wh, tf.sigmoid(y[..., 4:5 + self.nc]), y[..., 5 + self.nc:]], -1) z.append(tf.reshape(y, [-1, self.na * ny * nx, self.no])) - return tf.transpose(x, [0, 2, 1, 3]) if self.training else (tf.concat(z, 1),) + return tf.transpose(x, [0, 2, 1, 3]) if self.training else (tf.concat(z, 1), ) @staticmethod def _make_grid(nx=20, ny=20): @@ -486,7 +486,7 @@ def predict(self, iou_thres, conf_thres, clip_boxes=False) - return (nms,) + return (nms, ) return x # output [1,6300,85] = [xywh, conf, class0, class1, ...] # x = x[0] # [x(1,6300,85), ...] to x(6300,85) # xywh = x[..., :4] # x(6300,4) boxes diff --git a/models/yolo.py b/models/yolo.py index 18d2542bfb48..4f4d567bec73 100644 --- a/models/yolo.py +++ b/models/yolo.py @@ -21,8 +21,8 @@ if platform.system() != 'Windows': ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative -from models.common import * -from models.experimental import * +from models.common import * # noqa +from models.experimental import * # noqa from utils.autoanchor import check_anchor_order from utils.general import LOGGER, check_version, check_yaml, make_divisible, print_args from utils.plots import feature_visualization @@ -76,7 +76,7 @@ def forward(self, x): y = torch.cat((xy, wh, conf), 4) z.append(y.view(bs, self.na * nx * ny, self.no)) - return x if self.training else (torch.cat(z, 1),) if self.export else (torch.cat(z, 1), x) + return x if self.training else (torch.cat(z, 1), ) if self.export else (torch.cat(z, 1), x) def _make_grid(self, nx=20, ny=20, i=0, torch_1_10=check_version(torch.__version__, '1.10.0')): d = self.anchors[i].device @@ -126,7 +126,7 @@ def _forward_once(self, x, profile=False, visualize=False): def _profile_one_layer(self, m, x, dt): c = m == self.model[-1] # is final layer, copy input as inplace fix - o = thop.profile(m, inputs=(x.copy() if c else x,), verbose=False)[0] / 1E9 * 2 if thop else 0 # FLOPs + o = thop.profile(m, inputs=(x.copy() if c else x, ), verbose=False)[0] / 1E9 * 2 if thop else 0 # FLOPs t = time_sync() for _ in range(10): m(x.copy() if c else x) diff --git a/segment/train.py b/segment/train.py index 5f1fa4a1e453..2ae09c1cbf66 100644 --- a/segment/train.py +++ b/segment/train.py @@ -605,7 +605,7 @@ def main(opt, callbacks=Callbacks()): 'gsutil', 'cp', f'gs://{opt.bucket}/evolve.csv', - str(evolve_csv),]) + str(evolve_csv), ]) for _ in range(opt.evolve): # generations to evolve if evolve_csv.exists(): # if evolve.csv exists: select best hyps and mutate diff --git a/setup.cfg b/setup.cfg index d7c4cb3e1a4d..2cde6a494836 100644 --- a/setup.cfg +++ b/setup.cfg @@ -3,7 +3,7 @@ # Local usage: pip install pre-commit, pre-commit run --all-files [metadata] -license_file = LICENSE +license_files = LICENSE description_file = README.md [tool:pytest] @@ -25,17 +25,19 @@ verbose = 2 # https://pep8.readthedocs.io/en/latest/intro.html#error-codes format = pylint # see: https://www.flake8rules.com/ -ignore = E731,F405,E402,F401,W504,E127,E231,E501,F403 +ignore = E731,F405,E402,W504,E501 # E731: Do not assign a lambda expression, use a def # F405: name may be undefined, or defined from star imports: module # E402: module level import not at top of file - # F401: module imported but unused # W504: line break after binary operator - # E127: continuation line over-indented for visual indent - # E231: missing whitespace after ‘,’, ‘;’, or ‘:’ # E501: line too long + # removed: + # F401: module imported but unused + # E231: missing whitespace after ‘,’, ‘;’, or ‘:’ + # E127: continuation line over-indented for visual indent # F403: ‘from module import *’ used; unable to detect undefined names + [isort] # https://pycqa.github.io/isort/docs/configuration/options.html line_length = 120 @@ -48,7 +50,7 @@ spaces_before_comment = 2 COLUMN_LIMIT = 120 COALESCE_BRACKETS = True SPACES_AROUND_POWER_OPERATOR = True -SPACE_BETWEEN_ENDING_COMMA_AND_CLOSING_BRACKET = False +SPACE_BETWEEN_ENDING_COMMA_AND_CLOSING_BRACKET = True SPLIT_BEFORE_CLOSING_BRACKET = False SPLIT_BEFORE_FIRST_ARGUMENT = False # EACH_DICT_ENTRY_ON_SEPARATE_LINE = False diff --git a/train.py b/train.py index b5044deb9b5c..004c8eeda121 100644 --- a/train.py +++ b/train.py @@ -584,7 +584,7 @@ def main(opt, callbacks=Callbacks()): 'gsutil', 'cp', f'gs://{opt.bucket}/evolve.csv', - str(evolve_csv),]) + str(evolve_csv), ]) for _ in range(opt.evolve): # generations to evolve if evolve_csv.exists(): # if evolve.csv exists: select best hyps and mutate diff --git a/utils/augmentations.py b/utils/augmentations.py index 52e2e346e36e..1e609303e209 100644 --- a/utils/augmentations.py +++ b/utils/augmentations.py @@ -330,7 +330,7 @@ def classify_albumentations( if vflip > 0: T += [A.VerticalFlip(p=vflip)] if jitter > 0: - color_jitter = (float(jitter),) * 3 # repeat value for brightness, contrast, satuaration, 0 hue + color_jitter = (float(jitter), ) * 3 # repeat value for brightness, contrast, satuaration, 0 hue T += [A.ColorJitter(*color_jitter, 0)] else: # Use fixed crop for eval set (reproducibility) T = [A.SmallestMaxSize(max_size=size), A.CenterCrop(height=size, width=size)] diff --git a/utils/callbacks.py b/utils/callbacks.py index ccebba02bcaa..c90fa824cdb4 100644 --- a/utils/callbacks.py +++ b/utils/callbacks.py @@ -32,7 +32,7 @@ def __init__(self): 'on_model_save': [], 'on_train_end': [], 'on_params_update': [], - 'teardown': [],} + 'teardown': [], } self.stop_training = False # set True to interrupt training def register_action(self, hook, name='', callback=None): diff --git a/utils/downloads.py b/utils/downloads.py index 629537d5ade6..9298259d4ab1 100644 --- a/utils/downloads.py +++ b/utils/downloads.py @@ -4,7 +4,6 @@ """ import logging -import os import subprocess import urllib from pathlib import Path @@ -53,7 +52,7 @@ def curl_download(url, filename, *, silent: bool = False) -> bool: '--retry', '9', '-C', - '-',]) + '-', ]) return proc.returncode == 0 diff --git a/utils/general.py b/utils/general.py index e95b07486619..049e5f6be414 100644 --- a/utils/general.py +++ b/utils/general.py @@ -139,12 +139,12 @@ def set_logging(name=LOGGING_NAME, verbose=True): name: { 'class': 'logging.StreamHandler', 'formatter': name, - 'level': level,}}, + 'level': level, }}, 'loggers': { name: { 'level': level, 'handlers': [name], - 'propagate': False,}}}) + 'propagate': False, }}}) set_logging(LOGGING_NAME) # run before defining LOGGER @@ -416,7 +416,7 @@ def check_imshow(warn=False): return False -def check_suffix(file='yolov5s.pt', suffix=('.pt',), msg=''): +def check_suffix(file='yolov5s.pt', suffix=('.pt', ), msg=''): # Check file(s) for acceptable suffix if file and suffix: if isinstance(suffix, str): diff --git a/utils/loggers/comet/__init__.py b/utils/loggers/comet/__init__.py index aac06b11c8c0..839fcf907e8f 100644 --- a/utils/loggers/comet/__init__.py +++ b/utils/loggers/comet/__init__.py @@ -90,7 +90,7 @@ def __init__(self, opt, hyp, run_id=None, job_type='Training', **experiment_kwar 'log_code': False, 'log_env_gpu': True, 'log_env_cpu': True, - 'project_name': COMET_PROJECT_NAME,} + 'project_name': COMET_PROJECT_NAME, } self.default_experiment_kwargs.update(experiment_kwargs) self.experiment = self._get_experiment(self.comet_mode, run_id) self.experiment.set_name(self.opt.name) @@ -153,7 +153,7 @@ def __init__(self, opt, hyp, run_id=None, job_type='Training', **experiment_kwar 'comet_log_per_class_metrics': COMET_LOG_PER_CLASS_METRICS, 'comet_log_batch_metrics': COMET_LOG_BATCH_METRICS, 'comet_log_confusion_matrix': COMET_LOG_CONFUSION_MATRIX, - 'comet_model_name': COMET_MODEL_NAME,}) + 'comet_model_name': COMET_MODEL_NAME, }) # Check if running the Experiment with the Comet Optimizer if hasattr(self.opt, 'comet_optimizer_id'): @@ -170,7 +170,7 @@ def _get_experiment(self, mode, experiment_id=None): **self.default_experiment_kwargs, ) - return comet_ml.OfflineExperiment(**self.default_experiment_kwargs,) + return comet_ml.OfflineExperiment(**self.default_experiment_kwargs, ) else: try: @@ -214,7 +214,7 @@ def log_model(self, path, opt, epoch, fitness_score, best_model=False): 'fitness_score': fitness_score[-1], 'epochs_trained': epoch + 1, 'save_period': opt.save_period, - 'total_epochs': opt.epochs,} + 'total_epochs': opt.epochs, } model_files = glob.glob(f'{path}/*.pt') for model_path in model_files: @@ -270,7 +270,7 @@ def log_predictions(self, image, labelsn, path, shape, predn): 'x': xyxy[0], 'y': xyxy[1], 'x2': xyxy[2], - 'y2': xyxy[3]},}) + 'y2': xyxy[3]}, }) for *xyxy, conf, cls in filtered_detections.tolist(): metadata.append({ 'label': f'{self.class_names[int(cls)]}', @@ -279,7 +279,7 @@ def log_predictions(self, image, labelsn, path, shape, predn): 'x': xyxy[0], 'y': xyxy[1], 'x2': xyxy[2], - 'y2': xyxy[3]},}) + 'y2': xyxy[3]}, }) self.metadata_dict[image_name] = metadata self.logged_images_count += 1 diff --git a/utils/segment/metrics.py b/utils/segment/metrics.py index 6020fa062ba5..787961bee1bf 100644 --- a/utils/segment/metrics.py +++ b/utils/segment/metrics.py @@ -196,7 +196,7 @@ def ap_class_index(self): 'val/cls_loss', 'x/lr0', 'x/lr1', - 'x/lr2',] + 'x/lr2', ] BEST_KEYS = [ 'best/epoch', @@ -207,4 +207,4 @@ def ap_class_index(self): 'best/precision(M)', 'best/recall(M)', 'best/mAP_0.5(M)', - 'best/mAP_0.5:0.95(M)',] + 'best/mAP_0.5:0.95(M)', ] diff --git a/utils/torch_utils.py b/utils/torch_utils.py index d9e060ab99df..13a356f3238c 100644 --- a/utils/torch_utils.py +++ b/utils/torch_utils.py @@ -170,7 +170,7 @@ def profile(input, ops, n=10, device=None): m = m.half() if hasattr(m, 'half') and isinstance(x, torch.Tensor) and x.dtype is torch.float16 else m tf, tb, t = 0, 0, [0, 0, 0] # dt forward, backward try: - flops = thop.profile(m, inputs=(x,), verbose=False)[0] / 1E9 * 2 # GFLOPs + flops = thop.profile(m, inputs=(x, ), verbose=False)[0] / 1E9 * 2 # GFLOPs except Exception: flops = 0 @@ -284,7 +284,7 @@ def model_info(model, verbose=False, imgsz=640): p = next(model.parameters()) stride = max(int(model.stride.max()), 32) if hasattr(model, 'stride') else 32 # max stride im = torch.empty((1, p.shape[1], stride, stride), device=p.device) # input image in BCHW format - flops = thop.profile(deepcopy(model), inputs=(im,), verbose=False)[0] / 1E9 * 2 # stride GFLOPs + flops = thop.profile(deepcopy(model), inputs=(im, ), verbose=False)[0] / 1E9 * 2 # stride GFLOPs imgsz = imgsz if isinstance(imgsz, list) else [imgsz, imgsz] # expand if int/float fs = f', {flops * imgsz[0] / stride * imgsz[1] / stride:.1f} GFLOPs' # 640x640 GFLOPs except Exception: From 485da42273839d20ea6bdaf142fd02c1027aba61 Mon Sep 17 00:00:00 2001 From: Dhruv Nair Date: Thu, 6 Jul 2023 21:36:28 +0530 Subject: [PATCH 1771/1976] Comet updates (#11818) * update colab link * fix path check for datasets * apply yapf * run precommit hooks * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- utils/loggers/comet/README.md | 2 +- utils/loggers/comet/__init__.py | 28 +++++++++++++++++++--------- 2 files changed, 20 insertions(+), 10 deletions(-) diff --git a/utils/loggers/comet/README.md b/utils/loggers/comet/README.md index aee8d16a336c..3ad52b01b4e9 100644 --- a/utils/loggers/comet/README.md +++ b/utils/loggers/comet/README.md @@ -59,7 +59,7 @@ Check out an example of a [completed run here](https://www.comet.com/examples/co Or better yet, try it out yourself in this Colab Notebook -[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1RG0WOQyxlDlo5Km8GogJpIEJlg_5lyYO?usp=sharing) +[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/comet-ml/comet-examples/blob/master/integrations/model-training/yolov5/notebooks/Comet_and_YOLOv5.ipynb) # Log automatically diff --git a/utils/loggers/comet/__init__.py b/utils/loggers/comet/__init__.py index 839fcf907e8f..ad7fa5521c6d 100644 --- a/utils/loggers/comet/__init__.py +++ b/utils/loggers/comet/__init__.py @@ -42,7 +42,7 @@ COMET_UPLOAD_DATASET = os.getenv('COMET_UPLOAD_DATASET', 'false').lower() == 'true' # Evaluation Settings -COMET_LOG_CONFUSION_MATRIX = os.getenv('COMET_LOG_CONFUSION_MATRIX', 'true').lower() == 'true' +COMET_LOG_CONFUSION_MATRIX = (os.getenv('COMET_LOG_CONFUSION_MATRIX', 'true').lower() == 'true') COMET_LOG_PREDICTIONS = os.getenv('COMET_LOG_PREDICTIONS', 'true').lower() == 'true' COMET_MAX_IMAGE_UPLOADS = int(os.getenv('COMET_MAX_IMAGE_UPLOADS', 100)) @@ -51,10 +51,10 @@ IOU_THRES = float(os.getenv('IOU_THRES', 0.6)) # Batch Logging Settings -COMET_LOG_BATCH_METRICS = os.getenv('COMET_LOG_BATCH_METRICS', 'false').lower() == 'true' +COMET_LOG_BATCH_METRICS = (os.getenv('COMET_LOG_BATCH_METRICS', 'false').lower() == 'true') COMET_BATCH_LOGGING_INTERVAL = os.getenv('COMET_BATCH_LOGGING_INTERVAL', 1) COMET_PREDICTION_LOGGING_INTERVAL = os.getenv('COMET_PREDICTION_LOGGING_INTERVAL', 1) -COMET_LOG_PER_CLASS_METRICS = os.getenv('COMET_LOG_PER_CLASS_METRICS', 'false').lower() == 'true' +COMET_LOG_PER_CLASS_METRICS = (os.getenv('COMET_LOG_PER_CLASS_METRICS', 'false').lower() == 'true') RANK = int(os.getenv('RANK', -1)) @@ -137,7 +137,7 @@ def __init__(self, opt, hyp, run_id=None, job_type='Training', **experiment_kwar self.comet_log_predictions = COMET_LOG_PREDICTIONS if self.opt.bbox_interval == -1: - self.comet_log_prediction_interval = 1 if self.opt.epochs < 10 else self.opt.epochs // 10 + self.comet_log_prediction_interval = (1 if self.opt.epochs < 10 else self.opt.epochs // 10) else: self.comet_log_prediction_interval = self.opt.bbox_interval @@ -232,7 +232,8 @@ def check_dataset(self, data_file): with open(data_file) as f: data_config = yaml.safe_load(f) - if data_config['path'].startswith(COMET_PREFIX): + path = data_config.get('path') + if path and path.startswith(COMET_PREFIX): path = data_config['path'].replace(COMET_PREFIX, '') data_dict = self.download_dataset_artifact(path) @@ -313,8 +314,16 @@ def add_assets_to_artifact(self, artifact, path, asset_path, split): image_logical_path, label_logical_path = map(lambda x: os.path.relpath(x, path), [image_file, label_file]) try: - artifact.add(image_file, logical_path=image_logical_path, metadata={'split': split}) - artifact.add(label_file, logical_path=label_logical_path, metadata={'split': split}) + artifact.add( + image_file, + logical_path=image_logical_path, + metadata={'split': split}, + ) + artifact.add( + label_file, + logical_path=label_logical_path, + metadata={'split': split}, + ) except ValueError as e: logger.error('COMET ERROR: Error adding file to Artifact. Skipping file.') logger.error(f'COMET ERROR: {e}') @@ -476,8 +485,9 @@ def on_val_end(self, nt, tp, fp, p, r, f1, ap, ap50, ap_class, confusion_matrix) 'f1': f1[i], 'true_positives': tp[i], 'false_positives': fp[i], - 'support': nt[c]}, - prefix=class_name) + 'support': nt[c], }, + prefix=class_name, + ) if self.comet_log_confusion_matrix: epoch = self.experiment.curr_epoch From 52d96222bbfa1b1b5a6d4f1086ae9d07b914879f Mon Sep 17 00:00:00 2001 From: malashinroman <30839152+malashinroman@users.noreply.github.com> Date: Sun, 23 Jul 2023 04:51:12 +0300 Subject: [PATCH 1772/1976] add non-fixed path option to coco's instances_val2017.json (#11860) add non-fixed path option fo coco annotations --- val.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/val.py b/val.py index 71268651d29b..8da3ef7667aa 100644 --- a/val.py +++ b/val.py @@ -304,6 +304,8 @@ def run( if save_json and len(jdict): w = Path(weights[0] if isinstance(weights, list) else weights).stem if weights is not None else '' # weights anno_json = str(Path('../datasets/coco/annotations/instances_val2017.json')) # annotations + if not os.path.exists(anno_json): + anno_json = os.path.join(data['path'], 'annotations', 'instances_val2017.json') pred_json = str(save_dir / f'{w}_predictions.json') # predictions LOGGER.info(f'\nEvaluating pycocotools mAP... saving {pred_json}...') with open(pred_json, 'w') as f: From 7a70d09732f4098b4fcdec267f66ad39eb86c940 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 23 Jul 2023 03:58:51 +0200 Subject: [PATCH 1773/1976] Update README License and Contact sections (#11891) * Update README.md Signed-off-by: Glenn Jocher * Update README.zh-CN.md Signed-off-by: Glenn Jocher * Update README.zh-CN.md Signed-off-by: Glenn Jocher * Update README.zh-CN.md Signed-off-by: Glenn Jocher * Update README.md Signed-off-by: Glenn Jocher --------- Signed-off-by: Glenn Jocher --- README.md | 6 +++--- README.zh-CN.md | 12 ++++++------ 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/README.md b/README.md index 6ffc4bc14711..002d5afcb432 100644 --- a/README.md +++ b/README.md @@ -461,10 +461,10 @@ We love your input! We want to make contributing to YOLOv5 as easy and transpare ##
License
-YOLOv5 is available under two different licenses: +Ultralytics offers two licensing options to accommodate diverse use cases: -- **AGPL-3.0 License**: See [LICENSE](https://github.com/ultralytics/yolov5/blob/master/LICENSE) file for details. -- **Enterprise License**: Provides greater flexibility for commercial product development without the open-source requirements of AGPL-3.0. Typical use cases are embedding Ultralytics software and AI models in commercial products and applications. Request an Enterprise License at [Ultralytics Licensing](https://ultralytics.com/license). +- **AGPL-3.0 License**: This [OSI-approved](https://opensource.org/licenses/) open-source license is ideal for students and enthusiasts, promoting open collaboration and knowledge sharing. See the [LICENSE](https://github.com/ultralytics/yolov5/blob/master/LICENSE) file for more details. +- **Enterprise License**: Designed for commercial use, this license permits seamless integration of Ultralytics software and AI models into commercial goods and services, bypassing the open-source requirements of AGPL-3.0. If your scenario involves embedding our solutions into a commercial offering, reach out through [Ultralytics Licensing](https://ultralytics.com/license). ##
Contact
diff --git a/README.zh-CN.md b/README.zh-CN.md index cf2620667326..d9763992e980 100644 --- a/README.zh-CN.md +++ b/README.zh-CN.md @@ -452,16 +452,16 @@ python export.py --weights yolov5s-cls.pt resnet50.pt efficientnet_b0.pt --inclu -##
License
+##
许可证
-YOLOv5 在两种不同的 License 下可用: +Ultralytics 提供两种许可证选项以适应各种使用场景: -- **AGPL-3.0 License**: 查看 [License](https://github.com/ultralytics/yolov5/blob/master/LICENSE) 文件的详细信息。 -- **企业License**:在没有 AGPL-3.0 开源要求的情况下为商业产品开发提供更大的灵活性。典型用例是将 Ultralytics 软件和 AI 模型嵌入到商业产品和应用程序中。在以下位置申请企业许可证 [Ultralytics 许可](https://ultralytics.com/license) 。 +- **AGPL-3.0 许可证**:这个[OSI 批准](https://opensource.org/licenses/)的开源许可证非常适合学生和爱好者,可以推动开放的协作和知识分享。请查看[LICENSE](https://github.com/ultralytics/yolov5/blob/master/LICENSE) 文件以了解更多细节。 +- **企业许可证**:专为商业用途设计,该许可证允许将 Ultralytics 的软件和 AI 模型无缝集成到商业产品和服务中,从而绕过 AGPL-3.0 的开源要求。如果您的场景涉及将我们的解决方案嵌入到商业产品中,请通过 [Ultralytics Licensing](https://ultralytics.com/license)与我们联系。 -##
联系我们
+##
联系方式
-对于 YOLOv5 的错误报告和功能请求,请访问 [GitHub Issues](https://github.com/ultralytics/yolov5/issues),并加入我们的 [Discord](https://discord.gg/2wNGbc6g9X) 社区进行问题和讨论! +对于 Ultralytics 的错误报告和功能请求,请访问 [GitHub Issues](https://github.com/ultralytics/yolov5/issues),并加入我们的 [Discord](https://discord.gg/2wNGbc6g9X) 社区进行问题和讨论!
From acdf73b86066f9da0716eb5963627f31bd1d185c Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 24 Jul 2023 16:34:42 +0200 Subject: [PATCH 1774/1976] Update Discord invite to https://ultralytics.com/discord (#11894) --- .github/ISSUE_TEMPLATE/config.yml | 2 +- README.md | 8 ++++---- README.zh-CN.md | 8 ++++---- tutorial.ipynb | 2 +- 4 files changed, 10 insertions(+), 10 deletions(-) diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml index 264ab2687f16..37080927c0b9 100644 --- a/.github/ISSUE_TEMPLATE/config.yml +++ b/.github/ISSUE_TEMPLATE/config.yml @@ -7,5 +7,5 @@ contact_links: url: https://community.ultralytics.com/ about: Ask on Ultralytics Community Forum - name: 🎧 Discord - url: https://discord.gg/2wNGbc6g9X + url: https://ultralytics.com/discord about: Ask on Ultralytics Discord diff --git a/README.md b/README.md index 002d5afcb432..a27fe2474fd8 100644 --- a/README.md +++ b/README.md @@ -20,7 +20,7 @@ YOLOv5 🚀 is the world's most loved vision AI, representing Ultralytics open-source research into future vision AI methods, incorporating lessons learned and best practices evolved over thousands of hours of research and development. -We hope that the resources here will help you get the most out of YOLOv5. Please browse the YOLOv5 Docs for details, raise an issue on GitHub for support, and join our Discord community for questions and discussions! +We hope that the resources here will help you get the most out of YOLOv5. Please browse the YOLOv5 Docs for details, raise an issue on GitHub for support, and join our Discord community for questions and discussions! To request an Enterprise License please complete the form at [Ultralytics Licensing](https://ultralytics.com/license). @@ -43,7 +43,7 @@ To request an Enterprise License please complete the form at [Ultralytics Licens - +
@@ -468,7 +468,7 @@ Ultralytics offers two licensing options to accommodate diverse use cases: ##
Contact
-For YOLOv5 bug reports and feature requests please visit [GitHub Issues](https://github.com/ultralytics/yolov5/issues), and join our [Discord](https://discord.gg/2wNGbc6g9X) community for questions and discussions! +For YOLOv5 bug reports and feature requests please visit [GitHub Issues](https://github.com/ultralytics/yolov5/issues), and join our [Discord](https://ultralytics.com/discord) community for questions and discussions!
@@ -490,7 +490,7 @@ For YOLOv5 bug reports and feature requests please visit [GitHub Issues](https:/ - +
diff --git a/README.zh-CN.md b/README.zh-CN.md index d9763992e980..fe382af359a0 100644 --- a/README.zh-CN.md +++ b/README.zh-CN.md @@ -19,7 +19,7 @@ YOLOv5 🚀 是世界上最受欢迎的视觉 AI,代表 Ultralytics 对未来视觉 AI 方法的开源研究,结合在数千小时的研究和开发中积累的经验教训和最佳实践。 -我们希望这里的资源能帮助您充分利用 YOLOv5。请浏览 YOLOv5 文档 了解详细信息,在 GitHub 上提交问题以获得支持,并加入我们的 Discord 社区进行问题和讨论! +我们希望这里的资源能帮助您充分利用 YOLOv5。请浏览 YOLOv5 文档 了解详细信息,在 GitHub 上提交问题以获得支持,并加入我们的 Discord 社区进行问题和讨论! 如需申请企业许可,请在 [Ultralytics Licensing](https://ultralytics.com/license) 处填写表格 @@ -42,7 +42,7 @@ YOLOv5 🚀 是世界上最受欢迎的视觉 AI,代表 - + @@ -461,7 +461,7 @@ Ultralytics 提供两种许可证选项以适应各种使用场景: ##
联系方式
-对于 Ultralytics 的错误报告和功能请求,请访问 [GitHub Issues](https://github.com/ultralytics/yolov5/issues),并加入我们的 [Discord](https://discord.gg/2wNGbc6g9X) 社区进行问题和讨论! +对于 Ultralytics 的错误报告和功能请求,请访问 [GitHub Issues](https://github.com/ultralytics/yolov5/issues),并加入我们的 [Discord](https://ultralytics.com/discord) 社区进行问题和讨论!
@@ -483,7 +483,7 @@ Ultralytics 提供两种许可证选项以适应各种使用场景: - +
diff --git a/tutorial.ipynb b/tutorial.ipynb index 56c6ed9c3081..db29c800e908 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -31,7 +31,7 @@ " \"Open\n", "
\n", "\n", - "This YOLOv5 🚀 notebook by Ultralytics presents simple train, validate and predict examples to help start your AI adventure.
We hope that the resources in this notebook will help you get the most out of YOLOv5. Please browse the YOLOv5 Docs for details, raise an issue on GitHub for support, and join our Discord community for questions and discussions!\n", + "This YOLOv5 🚀 notebook by Ultralytics presents simple train, validate and predict examples to help start your AI adventure.
We hope that the resources in this notebook will help you get the most out of YOLOv5. Please browse the YOLOv5 Docs for details, raise an issue on GitHub for support, and join our Discord community for questions and discussions!\n", "\n", "" ] From d43d87666c1a5cc529fe132bb09a79ebd7af4a96 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 30 Jul 2023 13:38:55 +0200 Subject: [PATCH 1775/1976] Update requirements.txt to `ultralytics 8.0.145` (#11920) --- requirements.txt | 2 +- utils/__init__.py | 2 +- utils/general.py | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/requirements.txt b/requirements.txt index 069cafe7e14f..a49036b39d16 100644 --- a/requirements.txt +++ b/requirements.txt @@ -15,7 +15,7 @@ thop>=0.1.1 # FLOPs computation torch>=1.7.0 # see https://pytorch.org/get-started/locally (recommended) torchvision>=0.8.1 tqdm>=4.64.0 -ultralytics>=8.0.111 +ultralytics>=8.0.145 # protobuf<=3.20.1 # https://github.com/ultralytics/yolov5/issues/8012 # Logging --------------------------------------------------------------------- diff --git a/utils/__init__.py b/utils/__init__.py index 2a7652b52c1b..4c7379c87466 100644 --- a/utils/__init__.py +++ b/utils/__init__.py @@ -54,7 +54,7 @@ def notebook_init(verbose=True): import os import shutil - from ultralytics.yolo.utils.checks import check_requirements + from ultralytics.utils.checks import check_requirements from utils.general import check_font, is_colab from utils.torch_utils import select_device # imports diff --git a/utils/general.py b/utils/general.py index 049e5f6be414..8a265c741b91 100644 --- a/utils/general.py +++ b/utils/general.py @@ -35,7 +35,7 @@ import torch import torchvision import yaml -from ultralytics.yolo.utils.checks import check_requirements +from ultralytics.utils.checks import check_requirements from utils import TryExcept, emojis from utils.downloads import curl_download, gsutil_getsize From 34c21874f863d64c73478c5fc29cf4b2720d06e2 Mon Sep 17 00:00:00 2001 From: Adrian Boguszewski Date: Sun, 30 Jul 2023 14:21:04 +0200 Subject: [PATCH 1776/1976] Bringing OpenVINO updates from YOLOv8 to YOLOv5 (#11912) Bringing OpenVINO updates from yolov8 to volov5 Co-authored-by: Glenn Jocher --- export.py | 4 ++-- models/common.py | 16 ++++++++-------- requirements.txt | 2 +- utils/docker/Dockerfile | 2 +- utils/docker/Dockerfile-cpu | 2 +- 5 files changed, 13 insertions(+), 13 deletions(-) diff --git a/export.py b/export.py index 5755774d2f54..92d42472dfc4 100644 --- a/export.py +++ b/export.py @@ -207,7 +207,7 @@ def export_onnx(model, im, file, opset, dynamic, simplify, prefix=colorstr('ONNX @try_export def export_openvino(file, metadata, half, int8, data, prefix=colorstr('OpenVINO:')): # YOLOv5 OpenVINO export - check_requirements('openvino-dev>=2022.3') # requires openvino-dev: https://pypi.org/project/openvino-dev/ + check_requirements('openvino-dev>=2023.0') # requires openvino-dev: https://pypi.org/project/openvino-dev/ import openvino.runtime as ov # noqa from openvino.tools import mo # noqa @@ -216,7 +216,7 @@ def export_openvino(file, metadata, half, int8, data, prefix=colorstr('OpenVINO: f_onnx = file.with_suffix('.onnx') f_ov = str(Path(f) / file.with_suffix('.xml').name) if int8: - check_requirements('nncf') + check_requirements('nncf>=2.4.0') # requires at least version 2.4.0 to use the post-training quantization import nncf import numpy as np from openvino.runtime import Core diff --git a/models/common.py b/models/common.py index a0ee085d05fc..4ab1341e42cf 100644 --- a/models/common.py +++ b/models/common.py @@ -373,18 +373,18 @@ def __init__(self, weights='yolov5s.pt', device=torch.device('cpu'), dnn=False, stride, names = int(meta['stride']), eval(meta['names']) elif xml: # OpenVINO LOGGER.info(f'Loading {w} for OpenVINO inference...') - check_requirements('openvino') # requires openvino-dev: https://pypi.org/project/openvino-dev/ + check_requirements('openvino>=2023.0') # requires openvino-dev: https://pypi.org/project/openvino-dev/ from openvino.runtime import Core, Layout, get_batch - ie = Core() + core = Core() if not Path(w).is_file(): # if not *.xml w = next(Path(w).glob('*.xml')) # get *.xml file from *_openvino_model dir - network = ie.read_model(model=w, weights=Path(w).with_suffix('.bin')) - if network.get_parameters()[0].get_layout().empty: - network.get_parameters()[0].set_layout(Layout('NCHW')) - batch_dim = get_batch(network) + ov_model = core.read_model(model=w, weights=Path(w).with_suffix('.bin')) + if ov_model.get_parameters()[0].get_layout().empty: + ov_model.get_parameters()[0].set_layout(Layout('NCHW')) + batch_dim = get_batch(ov_model) if batch_dim.is_static: batch_size = batch_dim.get_length() - executable_network = ie.compile_model(network, device_name='CPU') # device_name="MYRIAD" for Intel NCS2 + ov_compiled_model = core.compile_model(ov_model, device_name='AUTO') # AUTO selects best available device stride, names = self._load_metadata(Path(w).with_suffix('.yaml')) # load metadata elif engine: # TensorRT LOGGER.info(f'Loading {w} for TensorRT inference...') @@ -524,7 +524,7 @@ def forward(self, im, augment=False, visualize=False): y = self.session.run(self.output_names, {self.session.get_inputs()[0].name: im}) elif self.xml: # OpenVINO im = im.cpu().numpy() # FP32 - y = list(self.executable_network([im]).values()) + y = list(self.ov_compiled_model(im).values()) elif self.engine: # TensorRT if self.dynamic and im.shape != self.bindings['images'].shape: i = self.model.get_binding_index('images') diff --git a/requirements.txt b/requirements.txt index a49036b39d16..0e68f8026dfa 100644 --- a/requirements.txt +++ b/requirements.txt @@ -36,7 +36,7 @@ seaborn>=0.11.0 # scikit-learn<=1.1.2 # CoreML quantization # tensorflow>=2.4.0 # TF exports (-cpu, -aarch64, -macos) # tensorflowjs>=3.9.0 # TF.js export -# openvino-dev # OpenVINO export +# openvino-dev>=2023.0 # OpenVINO export # Deploy ---------------------------------------------------------------------- setuptools>=65.5.1 # Snyk vulnerability fix diff --git a/utils/docker/Dockerfile b/utils/docker/Dockerfile index ff657dea2bf2..4346fc823c16 100644 --- a/utils/docker/Dockerfile +++ b/utils/docker/Dockerfile @@ -31,7 +31,7 @@ RUN git clone https://github.com/ultralytics/yolov5 /usr/src/app COPY requirements.txt . RUN python3 -m pip install --upgrade pip wheel RUN pip install --no-cache -r requirements.txt albumentations comet gsutil notebook \ - coremltools onnx onnx-simplifier onnxruntime 'openvino-dev>=2022.3' + coremltools onnx onnx-simplifier onnxruntime 'openvino-dev>=2023.0' # tensorflow tensorflowjs \ # Set environment variables diff --git a/utils/docker/Dockerfile-cpu b/utils/docker/Dockerfile-cpu index 613bdffa4768..91adf64d3a0a 100644 --- a/utils/docker/Dockerfile-cpu +++ b/utils/docker/Dockerfile-cpu @@ -19,7 +19,7 @@ RUN apt install --no-install-recommends -y python3-pip git zip curl htop libgl1- COPY requirements.txt . RUN python3 -m pip install --upgrade pip wheel RUN pip install --no-cache -r requirements.txt albumentations gsutil notebook \ - coremltools onnx onnx-simplifier onnxruntime 'openvino-dev>=2022.3' \ + coremltools onnx onnx-simplifier onnxruntime 'openvino-dev>=2023.0' \ # tensorflow tensorflowjs \ --extra-index-url https://download.pytorch.org/whl/cpu From 3f76b803dac04f17c6dec5b1d5b2b96d27aa7e9f Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 1 Aug 2023 14:40:56 +0200 Subject: [PATCH 1777/1976] [Snyk] Security upgrade numpy from 1.21.3 to 1.22.2 (#11928) fix: requirements.txt to reduce vulnerabilities The following vulnerabilities are fixed by pinning transitive dependencies: - https://snyk.io/vuln/SNYK-PYTHON-NUMPY-2321964 - https://snyk.io/vuln/SNYK-PYTHON-NUMPY-2321966 - https://snyk.io/vuln/SNYK-PYTHON-NUMPY-2321970 Co-authored-by: snyk-bot --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 0e68f8026dfa..2c96974fe338 100644 --- a/requirements.txt +++ b/requirements.txt @@ -4,7 +4,7 @@ # Base ------------------------------------------------------------------------ gitpython>=3.1.30 matplotlib>=3.3 -numpy>=1.18.5 +numpy>=1.22.2 opencv-python>=4.1.1 Pillow>=7.1.2 psutil # system resources From 05e4c0543bca005bc6a9298c7804dec2d34867c2 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 1 Aug 2023 14:42:08 +0200 Subject: [PATCH 1778/1976] [Snyk] Security upgrade ubuntu from 22.10 to lunar-20230615 (#11927) * fix: utils/docker/Dockerfile-cpu to reduce vulnerabilities The following vulnerabilities are fixed with an upgrade: - https://snyk.io/vuln/SNYK-UBUNTU2210-SYSTEMD-5721329 - https://snyk.io/vuln/SNYK-UBUNTU2210-SYSTEMD-5721343 - https://snyk.io/vuln/SNYK-UBUNTU2210-SYSTEMD-5721343 - https://snyk.io/vuln/SNYK-UBUNTU2210-SYSTEMD-5721350 - https://snyk.io/vuln/SNYK-UBUNTU2210-SYSTEMD-5721350 * Update Dockerfile-cpu Signed-off-by: Glenn Jocher --------- Signed-off-by: Glenn Jocher Co-authored-by: snyk-bot --- utils/docker/Dockerfile-cpu | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/utils/docker/Dockerfile-cpu b/utils/docker/Dockerfile-cpu index 91adf64d3a0a..17316986c9d5 100644 --- a/utils/docker/Dockerfile-cpu +++ b/utils/docker/Dockerfile-cpu @@ -3,18 +3,20 @@ # Image is CPU-optimized for ONNX, OpenVINO and PyTorch YOLOv5 deployments # Start FROM Ubuntu image https://hub.docker.com/_/ubuntu -FROM ubuntu:22.10 +FROM ubuntu:lunar-20230615 # Downloads to user config dir ADD https://ultralytics.com/assets/Arial.ttf https://ultralytics.com/assets/Arial.Unicode.ttf /root/.config/Ultralytics/ # Install linux packages -ENV DEBIAN_FRONTEND noninteractive -RUN apt update -RUN TZ=Etc/UTC apt install -y tzdata -RUN apt install --no-install-recommends -y python3-pip git zip curl htop libgl1-mesa-glx libglib2.0-0 libpython3-dev gnupg +# g++ required to build 'tflite_support' and 'lap' packages, libusb-1.0-0 required for 'tflite_support' package +RUN apt update \ + && apt install --no-install-recommends -y python3-pip git zip curl htop libgl1-mesa-glx libglib2.0-0 libpython3-dev gnupg g++ libusb-1.0-0 # RUN alias python=python3 +# Remove python3.11/EXTERNALLY-MANAGED or use 'pip install --break-system-packages' avoid 'externally-managed-environment' Ubuntu nightly error +RUN rm -rf /usr/lib/python3.11/EXTERNALLY-MANAGED + # Install pip packages COPY requirements.txt . RUN python3 -m pip install --upgrade pip wheel @@ -30,7 +32,6 @@ WORKDIR /usr/src/app # Copy contents # COPY . /usr/src/app (issues as not a .git directory) RUN git clone https://github.com/ultralytics/yolov5 /usr/src/app -ENV DEBIAN_FRONTEND teletype # Usage Examples ------------------------------------------------------------------------------------------------------- From 2270f0d1e761d049d07aba159f637d44750353fc Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 1 Aug 2023 20:56:35 +0200 Subject: [PATCH 1779/1976] Import Annotator class from `ultralytics` package (#11930) * Import Annotator class from `ultralytics` package * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Import Annotator class from `ultralytics` package * Update requirements.txt Signed-off-by: Glenn Jocher --------- Signed-off-by: Glenn Jocher Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- classify/predict.py | 5 +- detect.py | 3 +- models/common.py | 2 +- requirements.txt | 2 +- segment/predict.py | 3 +- utils/loggers/clearml/clearml_utils.py | 3 +- utils/plots.py | 124 +------------------------ utils/segment/plots.py | 2 +- 8 files changed, 15 insertions(+), 129 deletions(-) diff --git a/classify/predict.py b/classify/predict.py index 9b64810d4d63..1cbbc094a41e 100644 --- a/classify/predict.py +++ b/classify/predict.py @@ -43,12 +43,13 @@ sys.path.append(str(ROOT)) # add ROOT to PATH ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative +from ultralytics.utils.plotting import Annotator + from models.common import DetectMultiBackend from utils.augmentations import classify_transforms from utils.dataloaders import IMG_FORMATS, VID_FORMATS, LoadImages, LoadScreenshots, LoadStreams from utils.general import (LOGGER, Profile, check_file, check_img_size, check_imshow, check_requirements, colorstr, cv2, increment_path, print_args, strip_optimizer) -from utils.plots import Annotator from utils.torch_utils import select_device, smart_inference_mode @@ -144,7 +145,7 @@ def run( # Write results text = '\n'.join(f'{prob[j]:.2f} {names[j]}' for j in top5i) if save_img or view_img: # Add bbox to image - annotator.text((32, 32), text, txt_color=(255, 255, 255)) + annotator.text([32, 32], text, txt_color=(255, 255, 255)) if save_txt: # Write to file with open(f'{txt_path}.txt', 'a') as f: f.write(text + '\n') diff --git a/detect.py b/detect.py index 216c9dbd5880..8934a093f45e 100644 --- a/detect.py +++ b/detect.py @@ -42,11 +42,12 @@ sys.path.append(str(ROOT)) # add ROOT to PATH ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative +from ultralytics.utils.plotting import Annotator, colors, save_one_box + from models.common import DetectMultiBackend from utils.dataloaders import IMG_FORMATS, VID_FORMATS, LoadImages, LoadScreenshots, LoadStreams from utils.general import (LOGGER, Profile, check_file, check_img_size, check_imshow, check_requirements, colorstr, cv2, increment_path, non_max_suppression, print_args, scale_boxes, strip_optimizer, xyxy2xywh) -from utils.plots import Annotator, colors, save_one_box from utils.torch_utils import select_device, smart_inference_mode diff --git a/models/common.py b/models/common.py index 4ab1341e42cf..4e03b9c7e47e 100644 --- a/models/common.py +++ b/models/common.py @@ -23,13 +23,13 @@ import torch.nn as nn from PIL import Image from torch.cuda import amp +from ultralytics.utils.plotting import Annotator, colors, save_one_box from utils import TryExcept from utils.dataloaders import exif_transpose, letterbox from utils.general import (LOGGER, ROOT, Profile, check_requirements, check_suffix, check_version, colorstr, increment_path, is_jupyter, make_divisible, non_max_suppression, scale_boxes, xywh2xyxy, xyxy2xywh, yaml_load) -from utils.plots import Annotator, colors, save_one_box from utils.torch_utils import copy_attr, smart_inference_mode diff --git a/requirements.txt b/requirements.txt index 2c96974fe338..2ba6c2a1cb88 100644 --- a/requirements.txt +++ b/requirements.txt @@ -15,7 +15,7 @@ thop>=0.1.1 # FLOPs computation torch>=1.7.0 # see https://pytorch.org/get-started/locally (recommended) torchvision>=0.8.1 tqdm>=4.64.0 -ultralytics>=8.0.145 +ultralytics>=8.0.146 # protobuf<=3.20.1 # https://github.com/ultralytics/yolov5/issues/8012 # Logging --------------------------------------------------------------------- diff --git a/segment/predict.py b/segment/predict.py index 6a4d5eff3fc1..77e8df79066a 100644 --- a/segment/predict.py +++ b/segment/predict.py @@ -42,12 +42,13 @@ sys.path.append(str(ROOT)) # add ROOT to PATH ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative +from ultralytics.utils.plotting import Annotator, colors, save_one_box + from models.common import DetectMultiBackend from utils.dataloaders import IMG_FORMATS, VID_FORMATS, LoadImages, LoadScreenshots, LoadStreams from utils.general import (LOGGER, Profile, check_file, check_img_size, check_imshow, check_requirements, colorstr, cv2, increment_path, non_max_suppression, print_args, scale_boxes, scale_segments, strip_optimizer) -from utils.plots import Annotator, colors, save_one_box from utils.segment.general import masks2segments, process_mask, process_mask_native from utils.torch_utils import select_device, smart_inference_mode diff --git a/utils/loggers/clearml/clearml_utils.py b/utils/loggers/clearml/clearml_utils.py index 2764abe90da8..4e999bfee5db 100644 --- a/utils/loggers/clearml/clearml_utils.py +++ b/utils/loggers/clearml/clearml_utils.py @@ -5,8 +5,7 @@ import numpy as np import yaml - -from utils.plots import Annotator, colors +from ultralytics.utils.plotting import Annotator, colors try: import clearml diff --git a/utils/plots.py b/utils/plots.py index 754fabb84e37..db6f94a6674d 100644 --- a/utils/plots.py +++ b/utils/plots.py @@ -8,7 +8,6 @@ import os from copy import copy from pathlib import Path -from urllib.error import URLError import cv2 import matplotlib @@ -17,14 +16,13 @@ import pandas as pd import seaborn as sn import torch -from PIL import Image, ImageDraw, ImageFont +from PIL import Image, ImageDraw from scipy.ndimage.filters import gaussian_filter1d +from ultralytics.utils.plotting import Annotator from utils import TryExcept, threaded -from utils.general import (CONFIG_DIR, FONT, LOGGER, check_font, check_requirements, clip_boxes, increment_path, - is_ascii, xywh2xyxy, xyxy2xywh) +from utils.general import LOGGER, clip_boxes, increment_path, xywh2xyxy, xyxy2xywh from utils.metrics import fitness -from utils.segment.general import scale_image # Settings RANK = int(os.getenv('RANK', -1)) @@ -53,120 +51,6 @@ def hex2rgb(h): # rgb order (PIL) colors = Colors() # create instance for 'from utils.plots import colors' -def check_pil_font(font=FONT, size=10): - # Return a PIL TrueType Font, downloading to CONFIG_DIR if necessary - font = Path(font) - font = font if font.exists() else (CONFIG_DIR / font.name) - try: - return ImageFont.truetype(str(font) if font.exists() else font.name, size) - except Exception: # download if missing - try: - check_font(font) - return ImageFont.truetype(str(font), size) - except TypeError: - check_requirements('Pillow>=8.4.0') # known issue https://github.com/ultralytics/yolov5/issues/5374 - except URLError: # not online - return ImageFont.load_default() - - -class Annotator: - # YOLOv5 Annotator for train/val mosaics and jpgs and detect/hub inference annotations - def __init__(self, im, line_width=None, font_size=None, font='Arial.ttf', pil=False, example='abc'): - assert im.data.contiguous, 'Image not contiguous. Apply np.ascontiguousarray(im) to Annotator() input images.' - non_ascii = not is_ascii(example) # non-latin labels, i.e. asian, arabic, cyrillic - self.pil = pil or non_ascii - if self.pil: # use PIL - self.im = im if isinstance(im, Image.Image) else Image.fromarray(im) - self.draw = ImageDraw.Draw(self.im) - self.font = check_pil_font(font='Arial.Unicode.ttf' if non_ascii else font, - size=font_size or max(round(sum(self.im.size) / 2 * 0.035), 12)) - else: # use cv2 - self.im = im - self.lw = line_width or max(round(sum(im.shape) / 2 * 0.003), 2) # line width - - def box_label(self, box, label='', color=(128, 128, 128), txt_color=(255, 255, 255)): - # Add one xyxy box to image with label - if self.pil or not is_ascii(label): - self.draw.rectangle(box, width=self.lw, outline=color) # box - if label: - w, h = self.font.getsize(label) # text width, height (WARNING: deprecated) in 9.2.0 - # _, _, w, h = self.font.getbbox(label) # text width, height (New) - outside = box[1] - h >= 0 # label fits outside box - self.draw.rectangle( - (box[0], box[1] - h if outside else box[1], box[0] + w + 1, - box[1] + 1 if outside else box[1] + h + 1), - fill=color, - ) - # self.draw.text((box[0], box[1]), label, fill=txt_color, font=self.font, anchor='ls') # for PIL>8.0 - self.draw.text((box[0], box[1] - h if outside else box[1]), label, fill=txt_color, font=self.font) - else: # cv2 - p1, p2 = (int(box[0]), int(box[1])), (int(box[2]), int(box[3])) - cv2.rectangle(self.im, p1, p2, color, thickness=self.lw, lineType=cv2.LINE_AA) - if label: - tf = max(self.lw - 1, 1) # font thickness - w, h = cv2.getTextSize(label, 0, fontScale=self.lw / 3, thickness=tf)[0] # text width, height - outside = p1[1] - h >= 3 - p2 = p1[0] + w, p1[1] - h - 3 if outside else p1[1] + h + 3 - cv2.rectangle(self.im, p1, p2, color, -1, cv2.LINE_AA) # filled - cv2.putText(self.im, - label, (p1[0], p1[1] - 2 if outside else p1[1] + h + 2), - 0, - self.lw / 3, - txt_color, - thickness=tf, - lineType=cv2.LINE_AA) - - def masks(self, masks, colors, im_gpu, alpha=0.5, retina_masks=False): - """Plot masks at once. - Args: - masks (tensor): predicted masks on cuda, shape: [n, h, w] - colors (List[List[Int]]): colors for predicted masks, [[r, g, b] * n] - im_gpu (tensor): img is in cuda, shape: [3, h, w], range: [0, 1] - alpha (float): mask transparency: 0.0 fully transparent, 1.0 opaque - """ - if self.pil: - # convert to numpy first - self.im = np.asarray(self.im).copy() - if len(masks) == 0: - self.im[:] = im_gpu.permute(1, 2, 0).contiguous().cpu().numpy() * 255 - colors = torch.tensor(colors, device=im_gpu.device, dtype=torch.float32) / 255.0 - colors = colors[:, None, None] # shape(n,1,1,3) - masks = masks.unsqueeze(3) # shape(n,h,w,1) - masks_color = masks * (colors * alpha) # shape(n,h,w,3) - - inv_alph_masks = (1 - masks * alpha).cumprod(0) # shape(n,h,w,1) - mcs = (masks_color * inv_alph_masks).sum(0) * 2 # mask color summand shape(n,h,w,3) - - im_gpu = im_gpu.flip(dims=[0]) # flip channel - im_gpu = im_gpu.permute(1, 2, 0).contiguous() # shape(h,w,3) - im_gpu = im_gpu * inv_alph_masks[-1] + mcs - im_mask = (im_gpu * 255).byte().cpu().numpy() - self.im[:] = im_mask if retina_masks else scale_image(im_gpu.shape, im_mask, self.im.shape) - if self.pil: - # convert im back to PIL and update draw - self.fromarray(self.im) - - def rectangle(self, xy, fill=None, outline=None, width=1): - # Add rectangle to image (PIL-only) - self.draw.rectangle(xy, fill, outline, width) - - def text(self, xy, text, txt_color=(255, 255, 255), anchor='top'): - # Add text to image (PIL-only) - if anchor == 'bottom': # start y from font bottom - w, h = self.font.getsize(text) # text width, height - xy[1] += 1 - h - self.draw.text(xy, text, fill=txt_color, font=self.font) - - def fromarray(self, im): - # Update self.im from a numpy array - self.im = im if isinstance(im, Image.Image) else Image.fromarray(im) - self.draw = ImageDraw.Draw(self.im) - - def result(self): - # Return annotated image as array - return np.asarray(self.im) - - def feature_visualization(x, module_type, stage, n=32, save_dir=Path('runs/detect/exp')): """ x: Features to be visualized @@ -266,7 +150,7 @@ def plot_images(images, targets, paths=None, fname='images.jpg', names=None): x, y = int(w * (i // ns)), int(h * (i % ns)) # block origin annotator.rectangle([x, y, x + w, y + h], None, (255, 255, 255), width=2) # borders if paths: - annotator.text((x + 5, y + 5), text=Path(paths[i]).name[:40], txt_color=(220, 220, 220)) # filenames + annotator.text([x + 5, y + 5], text=Path(paths[i]).name[:40], txt_color=(220, 220, 220)) # filenames if len(targets) > 0: ti = targets[targets[:, 0] == i] # image targets boxes = xywh2xyxy(ti[:, 2:6]).T diff --git a/utils/segment/plots.py b/utils/segment/plots.py index 1b22ec838ac9..f9938cd1b06a 100644 --- a/utils/segment/plots.py +++ b/utils/segment/plots.py @@ -54,7 +54,7 @@ def plot_images_and_masks(images, targets, masks, paths=None, fname='images.jpg' x, y = int(w * (i // ns)), int(h * (i % ns)) # block origin annotator.rectangle([x, y, x + w, y + h], None, (255, 255, 255), width=2) # borders if paths: - annotator.text((x + 5, y + 5), text=Path(paths[i]).name[:40], txt_color=(220, 220, 220)) # filenames + annotator.text([x + 5, y + 5], text=Path(paths[i]).name[:40], txt_color=(220, 220, 220)) # filenames if len(targets) > 0: idx = targets[:, 0] == i ti = targets[idx] # image targets From f9f023ab4aa82493b968e888aba16772d9e6bf82 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 1 Aug 2023 22:32:44 +0200 Subject: [PATCH 1780/1976] [Snyk] Security upgrade numpy from 1.21.3 to 1.22.2 (#11931) fix: requirements.txt to reduce vulnerabilities The following vulnerabilities are fixed by pinning transitive dependencies: - https://snyk.io/vuln/SNYK-PYTHON-NUMPY-2321964 - https://snyk.io/vuln/SNYK-PYTHON-NUMPY-2321966 - https://snyk.io/vuln/SNYK-PYTHON-NUMPY-2321970 Co-authored-by: snyk-bot From 08974153767a3ab28b8be38c6ae203230518377d Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 3 Aug 2023 01:52:44 +0200 Subject: [PATCH 1781/1976] Update requirements.txt to `ultralytics>=8.0.147` (#11936) Signed-off-by: Glenn Jocher --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 2ba6c2a1cb88..ee9e7dbcfb80 100644 --- a/requirements.txt +++ b/requirements.txt @@ -15,7 +15,7 @@ thop>=0.1.1 # FLOPs computation torch>=1.7.0 # see https://pytorch.org/get-started/locally (recommended) torchvision>=0.8.1 tqdm>=4.64.0 -ultralytics>=8.0.146 +ultralytics>=8.0.147 # protobuf<=3.20.1 # https://github.com/ultralytics/yolov5/issues/8012 # Logging --------------------------------------------------------------------- From cffd24843c395bb7277def848beff4ff50d7a2fd Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 8 Aug 2023 01:52:45 +0200 Subject: [PATCH 1782/1976] Replace `type(1) == int` with `isinstance(1, int)` (#11951) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * [pre-commit.ci] pre-commit suggestions updates: - [github.com/asottile/pyupgrade: v3.8.0 → v3.10.1](https://github.com/asottile/pyupgrade/compare/v3.8.0...v3.10.1) - [github.com/PyCQA/flake8: 6.0.0 → 6.1.0](https://github.com/PyCQA/flake8/compare/6.0.0...6.1.0) * Update __init__.py Signed-off-by: Glenn Jocher * Update __init__.py Signed-off-by: Glenn Jocher --------- Signed-off-by: Glenn Jocher Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Glenn Jocher --- .pre-commit-config.yaml | 4 ++-- utils/loggers/comet/__init__.py | 7 +++---- 2 files changed, 5 insertions(+), 6 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 192a0ff3cba2..b8099b97890e 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -22,7 +22,7 @@ repos: - id: detect-private-key - repo: https://github.com/asottile/pyupgrade - rev: v3.8.0 + rev: v3.10.1 hooks: - id: pyupgrade name: Upgrade code @@ -50,7 +50,7 @@ repos: # exclude: "README.md|README.zh-CN.md|CONTRIBUTING.md" - repo: https://github.com/PyCQA/flake8 - rev: 6.0.0 + rev: 6.1.0 hooks: - id: flake8 name: PEP8 diff --git a/utils/loggers/comet/__init__.py b/utils/loggers/comet/__init__.py index ad7fa5521c6d..c14a5f885696 100644 --- a/utils/loggers/comet/__init__.py +++ b/utils/loggers/comet/__init__.py @@ -365,15 +365,14 @@ def download_dataset_artifact(self, artifact_path): data_dict['path'] = artifact_save_dir metadata_names = metadata.get('names') - if type(metadata_names) == dict: + if isinstance(metadata_names, dict): data_dict['names'] = {int(k): v for k, v in metadata.get('names').items()} - elif type(metadata_names) == list: + elif isinstance(metadata_names, list): data_dict['names'] = {int(k): v for k, v in zip(range(len(metadata_names)), metadata_names)} else: raise "Invalid 'names' field in dataset yaml file. Please use a list or dictionary" - data_dict = self.update_data_paths(data_dict) - return data_dict + return self.update_data_paths(data_dict) def update_data_paths(self, data_dict): path = data_dict.get('path', '') From 38737bdafa7253b5de640bb0a661704522d5ba69 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 8 Aug 2023 01:56:02 +0200 Subject: [PATCH 1783/1976] Fix missing `ultralytics` package on `torch.hub.load()` (#11950) * Fix missing `ultralytics` package on `torch.hub.load()` https://github.com/ultralytics/yolov5/issues/11945 Signed-off-by: Glenn Jocher * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update common.py Signed-off-by: Glenn Jocher * Update common.py Signed-off-by: Glenn Jocher * Update general.py Signed-off-by: Glenn Jocher --------- Signed-off-by: Glenn Jocher Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- models/common.py | 12 ++++++++++++ utils/general.py | 10 ++++++++++ 2 files changed, 22 insertions(+) diff --git a/models/common.py b/models/common.py index 4e03b9c7e47e..75cc4e97bbc7 100644 --- a/models/common.py +++ b/models/common.py @@ -23,6 +23,18 @@ import torch.nn as nn from PIL import Image from torch.cuda import amp + +# Import 'ultralytics' package or install if if missing +try: + import ultralytics + + assert hasattr(ultralytics, '__version__') # verify package is not directory +except (ImportError, AssertionError): + import os + + os.system('pip install -U ultralytics') + import ultralytics + from ultralytics.utils.plotting import Annotator, colors, save_one_box from utils import TryExcept diff --git a/utils/general.py b/utils/general.py index 8a265c741b91..017103752ef7 100644 --- a/utils/general.py +++ b/utils/general.py @@ -35,6 +35,16 @@ import torch import torchvision import yaml + +# Import 'ultralytics' package or install if if missing +try: + import ultralytics + + assert hasattr(ultralytics, '__version__') # verify package is not directory +except (ImportError, AssertionError): + os.system('pip install -U ultralytics') + import ultralytics + from ultralytics.utils.checks import check_requirements from utils import TryExcept, emojis From 934d5d1a8a1be20050496375e90f0630314a2ce3 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 8 Aug 2023 02:10:01 +0200 Subject: [PATCH 1784/1976] Created using Colaboratory --- tutorial.ipynb | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/tutorial.ipynb b/tutorial.ipynb index db29c800e908..440e370ce724 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -65,7 +65,7 @@ "import utils\n", "display = utils.notebook_init() # checks" ], - "execution_count": 1, + "execution_count": null, "outputs": [ { "output_type": "stream", @@ -95,7 +95,7 @@ "\n", "```shell\n", "python detect.py --source 0 # webcam\n", - " img.jpg # image \n", + " img.jpg # image\n", " vid.mp4 # video\n", " screen # screenshot\n", " path/ # directory\n", @@ -118,7 +118,7 @@ "!python detect.py --weights yolov5s.pt --img 640 --conf 0.25 --source data/images\n", "# display.Image(filename='runs/detect/exp/zidane.jpg', width=600)" ], - "execution_count": 13, + "execution_count": null, "outputs": [ { "output_type": "stream", @@ -174,7 +174,7 @@ "torch.hub.download_url_to_file('https://ultralytics.com/assets/coco2017val.zip', 'tmp.zip') # download (780M - 5000 images)\n", "!unzip -q tmp.zip -d ../datasets && rm tmp.zip # unzip" ], - "execution_count": 3, + "execution_count": null, "outputs": [ { "output_type": "stream", @@ -198,7 +198,7 @@ "# Validate YOLOv5s on COCO val\n", "!python val.py --weights yolov5s.pt --data coco.yaml --img 640 --half" ], - "execution_count": 4, + "execution_count": null, "outputs": [ { "output_type": "stream", @@ -308,7 +308,7 @@ "# Train YOLOv5s on COCO128 for 3 epochs\n", "!python train.py --img 640 --batch 16 --epochs 3 --data coco128.yaml --weights yolov5s.pt --cache" ], - "execution_count": 5, + "execution_count": null, "outputs": [ { "output_type": "stream", @@ -539,7 +539,7 @@ "\n", "Training results are automatically logged with [Tensorboard](https://www.tensorflow.org/tensorboard) and [CSV](https://github.com/ultralytics/yolov5/pull/4148) loggers to `runs/train`, with a new experiment directory created for each new training as `runs/train/exp2`, `runs/train/exp3`, etc.\n", "\n", - "This directory contains train and val statistics, mosaics, labels, predictions and augmentated mosaics, as well as metrics and charts including precision-recall (PR) curves and confusion matrices. \n", + "This directory contains train and val statistics, mosaics, labels, predictions and augmentated mosaics, as well as metrics and charts including precision-recall (PR) curves and confusion matrices.\n", "\n", "\"Local\n" ] @@ -593,7 +593,7 @@ "# YOLOv5 PyTorch HUB Inference (DetectionModels only)\n", "import torch\n", "\n", - "model = torch.hub.load('ultralytics/yolov5', 'yolov5s', force_reload=True) # yolov5n - yolov5x6 or custom\n", + "model = torch.hub.load('ultralytics/yolov5', 'yolov5s', force_reload=True, trust_repo=True) # or yolov5n - yolov5x6 or custom\n", "im = 'https://ultralytics.com/images/zidane.jpg' # file, Path, PIL.Image, OpenCV, nparray, list\n", "results = model(im) # inference\n", "results.print() # or .show(), .save(), .crop(), .pandas(), etc." @@ -602,4 +602,4 @@ "outputs": [] } ] -} +} \ No newline at end of file From df48c205c5fc7be5af6b067da1f7cb3efb770d88 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 8 Aug 2023 02:13:55 +0200 Subject: [PATCH 1785/1976] Update notebooks `torch.hub.load()` examples (#11952) * Update tutorial.ipynb Signed-off-by: Glenn Jocher * Update tutorial.ipynb Signed-off-by: Glenn Jocher * Update tutorial.ipynb Signed-off-by: Glenn Jocher * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update tutorial.ipynb Signed-off-by: Glenn Jocher --------- Signed-off-by: Glenn Jocher Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- classify/tutorial.ipynb | 2 +- segment/tutorial.ipynb | 2 +- tutorial.ipynb | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/classify/tutorial.ipynb b/classify/tutorial.ipynb index 75eebd8e1132..844da0c418f5 100644 --- a/classify/tutorial.ipynb +++ b/classify/tutorial.ipynb @@ -1445,7 +1445,7 @@ "# YOLOv5 PyTorch HUB Inference (DetectionModels only)\n", "import torch\n", "\n", - "model = torch.hub.load('ultralytics/yolov5', 'yolov5s') # yolov5n - yolov5x6 or custom\n", + "model = torch.hub.load('ultralytics/yolov5', 'yolov5s', force_reload=True, trust_repo=True) # or yolov5n - yolov5x6 or custom\n", "im = 'https://ultralytics.com/images/zidane.jpg' # file, Path, PIL.Image, OpenCV, nparray, list\n", "results = model(im) # inference\n", "results.print() # or .show(), .save(), .crop(), .pandas(), etc." diff --git a/segment/tutorial.ipynb b/segment/tutorial.ipynb index 0ece0f60e4d6..cd215e7f3467 100644 --- a/segment/tutorial.ipynb +++ b/segment/tutorial.ipynb @@ -558,7 +558,7 @@ "# YOLOv5 PyTorch HUB Inference (DetectionModels only)\n", "import torch\n", "\n", - "model = torch.hub.load('ultralytics/yolov5', 'yolov5s-seg') # yolov5n - yolov5x6 or custom\n", + "model = torch.hub.load('ultralytics/yolov5', 'yolov5s-seg', force_reload=True, trust_repo=True) # or yolov5n - yolov5x6 or custom\n", "im = 'https://ultralytics.com/images/zidane.jpg' # file, Path, PIL.Image, OpenCV, nparray, list\n", "results = model(im) # inference\n", "results.print() # or .show(), .save(), .crop(), .pandas(), etc." diff --git a/tutorial.ipynb b/tutorial.ipynb index 440e370ce724..f666dbde7e8c 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -602,4 +602,4 @@ "outputs": [] } ] -} \ No newline at end of file +} From 1a779ee2b11f48d3ce51ac61f80a6453c8c4e8bc Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 11 Aug 2023 00:12:11 +0200 Subject: [PATCH 1786/1976] Bump `torch>=1.8.0` and `torchvision>=0.9.0` (#11970) --- .github/workflows/ci-testing.yml | 8 ++++---- .github/workflows/greetings.yml | 2 +- README.md | 4 ++-- README.zh-CN.md | 2 +- models/experimental.py | 4 ++-- requirements.txt | 4 ++-- 6 files changed, 12 insertions(+), 12 deletions(-) diff --git a/.github/workflows/ci-testing.yml b/.github/workflows/ci-testing.yml index 13f9787ed8c9..80ae42955605 100644 --- a/.github/workflows/ci-testing.yml +++ b/.github/workflows/ci-testing.yml @@ -63,9 +63,9 @@ jobs: python-version: '3.9' model: yolov5n - os: ubuntu-latest - python-version: '3.8' # torch 1.7.0 requires python >=3.6, <=3.8 + python-version: '3.8' # torch 1.8.0 requires python >=3.6, <=3.8 model: yolov5n - torch: '1.7.0' # min torch version CI https://pypi.org/project/torchvision/ + torch: '1.8.0' # min torch version CI https://pypi.org/project/torchvision/ steps: - uses: actions/checkout@v3 - uses: actions/setup-python@v4 @@ -75,8 +75,8 @@ jobs: - name: Install requirements run: | python -m pip install --upgrade pip wheel - if [ "${{ matrix.torch }}" == "1.7.0" ]; then - pip install -r requirements.txt torch==1.7.0 torchvision==0.8.1 --extra-index-url https://download.pytorch.org/whl/cpu + if [ "${{ matrix.torch }}" == "1.8.0" ]; then + pip install -r requirements.txt torch==1.8.0 torchvision==0.9.0 --extra-index-url https://download.pytorch.org/whl/cpu else pip install -r requirements.txt --extra-index-url https://download.pytorch.org/whl/cpu fi diff --git a/.github/workflows/greetings.yml b/.github/workflows/greetings.yml index 8aca12d3c370..2203b2c60a94 100644 --- a/.github/workflows/greetings.yml +++ b/.github/workflows/greetings.yml @@ -31,7 +31,7 @@ jobs: ## Requirements - [**Python>=3.7.0**](https://www.python.org/) with all [requirements.txt](https://github.com/ultralytics/yolov5/blob/master/requirements.txt) installed including [**PyTorch>=1.7**](https://pytorch.org/get-started/locally/). To get started: + [**Python>=3.7.0**](https://www.python.org/) with all [requirements.txt](https://github.com/ultralytics/yolov5/blob/master/requirements.txt) installed including [**PyTorch>=1.8**](https://pytorch.org/get-started/locally/). To get started: ```bash git clone https://github.com/ultralytics/yolov5 # clone cd yolov5 diff --git a/README.md b/README.md index a27fe2474fd8..b9941b74ef34 100644 --- a/README.md +++ b/README.md @@ -78,8 +78,8 @@ See the [YOLOv5 Docs](https://docs.ultralytics.com/yolov5) for full documentatio Install Clone repo and install [requirements.txt](https://github.com/ultralytics/yolov5/blob/master/requirements.txt) in a -[**Python>=3.7.0**](https://www.python.org/) environment, including -[**PyTorch>=1.7**](https://pytorch.org/get-started/locally/). +[**Python>=3.8.0**](https://www.python.org/) environment, including +[**PyTorch>=1.8**](https://pytorch.org/get-started/locally/). ```bash git clone https://github.com/ultralytics/yolov5 # clone diff --git a/README.zh-CN.md b/README.zh-CN.md index fe382af359a0..49503c3c8b68 100644 --- a/README.zh-CN.md +++ b/README.zh-CN.md @@ -72,7 +72,7 @@ pip install ultralytics
安装 -克隆 repo,并要求在 [**Python>=3.7.0**](https://www.python.org/) 环境中安装 [requirements.txt](https://github.com/ultralytics/yolov5/blob/master/requirements.txt) ,且要求 [**PyTorch>=1.7**](https://pytorch.org/get-started/locally/) 。 +克隆 repo,并要求在 [**Python>=3.7.0**](https://www.python.org/) 环境中安装 [requirements.txt](https://github.com/ultralytics/yolov5/blob/master/requirements.txt) ,且要求 [**PyTorch>=1.8**](https://pytorch.org/get-started/locally/) 。 ```bash git clone https://github.com/ultralytics/yolov5 # clone diff --git a/models/experimental.py b/models/experimental.py index d60d1808da11..11f75e2254b3 100644 --- a/models/experimental.py +++ b/models/experimental.py @@ -87,11 +87,11 @@ def attempt_load(weights, device=None, inplace=True, fuse=True): model.append(ckpt.fuse().eval() if fuse and hasattr(ckpt, 'fuse') else ckpt.eval()) # model in eval mode - # Module compatibility updates + # Module updates for m in model.modules(): t = type(m) if t in (nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6, nn.SiLU, Detect, Model): - m.inplace = inplace # torch 1.7.0 compatibility + m.inplace = inplace if t is Detect and not isinstance(m.anchor_grid, list): delattr(m, 'anchor_grid') setattr(m, 'anchor_grid', [torch.zeros(1)] * m.nl) diff --git a/requirements.txt b/requirements.txt index ee9e7dbcfb80..33bb7dba2611 100644 --- a/requirements.txt +++ b/requirements.txt @@ -12,8 +12,8 @@ PyYAML>=5.3.1 requests>=2.23.0 scipy>=1.4.1 thop>=0.1.1 # FLOPs computation -torch>=1.7.0 # see https://pytorch.org/get-started/locally (recommended) -torchvision>=0.8.1 +torch>=1.8.0 # see https://pytorch.org/get-started/locally (recommended) +torchvision>=0.9.0 tqdm>=4.64.0 ultralytics>=8.0.147 # protobuf<=3.20.1 # https://github.com/ultralytics/yolov5/issues/8012 From 493981cd3b87112fe0e32fe20478b83b3ef33659 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 11 Aug 2023 00:53:13 +0200 Subject: [PATCH 1787/1976] Bump `Python>=3.8.0` (#11971) --- .github/workflows/greetings.yml | 2 +- README.zh-CN.md | 2 +- utils/general.py | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/greetings.yml b/.github/workflows/greetings.yml index 2203b2c60a94..3058d78b0a66 100644 --- a/.github/workflows/greetings.yml +++ b/.github/workflows/greetings.yml @@ -31,7 +31,7 @@ jobs: ## Requirements - [**Python>=3.7.0**](https://www.python.org/) with all [requirements.txt](https://github.com/ultralytics/yolov5/blob/master/requirements.txt) installed including [**PyTorch>=1.8**](https://pytorch.org/get-started/locally/). To get started: + [**Python>=3.8.0**](https://www.python.org/) with all [requirements.txt](https://github.com/ultralytics/yolov5/blob/master/requirements.txt) installed including [**PyTorch>=1.8**](https://pytorch.org/get-started/locally/). To get started: ```bash git clone https://github.com/ultralytics/yolov5 # clone cd yolov5 diff --git a/README.zh-CN.md b/README.zh-CN.md index 49503c3c8b68..d8b2a900bf96 100644 --- a/README.zh-CN.md +++ b/README.zh-CN.md @@ -72,7 +72,7 @@ pip install ultralytics
安装 -克隆 repo,并要求在 [**Python>=3.7.0**](https://www.python.org/) 环境中安装 [requirements.txt](https://github.com/ultralytics/yolov5/blob/master/requirements.txt) ,且要求 [**PyTorch>=1.8**](https://pytorch.org/get-started/locally/) 。 +克隆 repo,并要求在 [**Python>=3.8.0**](https://www.python.org/) 环境中安装 [requirements.txt](https://github.com/ultralytics/yolov5/blob/master/requirements.txt) ,且要求 [**PyTorch>=1.8**](https://pytorch.org/get-started/locally/) 。 ```bash git clone https://github.com/ultralytics/yolov5 # clone diff --git a/utils/general.py b/utils/general.py index 017103752ef7..135141e21436 100644 --- a/utils/general.py +++ b/utils/general.py @@ -381,7 +381,7 @@ def check_git_info(path='.'): return {'remote': None, 'branch': None, 'commit': None} -def check_python(minimum='3.7.0'): +def check_python(minimum='3.8.0'): # Check current python version vs. required python version check_version(platform.python_version(), minimum, name='Python ', hard=True) From dd104811c2e0419529aa7177ce160321e9694837 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 12 Aug 2023 11:40:24 +0200 Subject: [PATCH 1788/1976] Update links.yml (#11978) Signed-off-by: Glenn Jocher --- .github/workflows/links.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/links.yml b/.github/workflows/links.yml index cd65b961f7f9..98803c7e1a43 100644 --- a/.github/workflows/links.yml +++ b/.github/workflows/links.yml @@ -28,7 +28,7 @@ jobs: timeout_minutes: 5 retry_wait_seconds: 60 max_attempts: 3 - command: lychee --accept 429,999 --exclude-loopback --exclude 'https?://(www\.)?(twitter\.com|instagram\.com)' --exclude-path '**/ci.yaml' --exclude-mail --github-token ${{ secrets.GITHUB_TOKEN }} './**/*.md' './**/*.html' + command: lychee --accept 429,999 --exclude-loopback --exclude 'https?://(www\.)?(linkedin\.com|twitter\.com|instagram\.com)' --exclude-path '**/ci.yaml' --exclude-mail --github-token ${{ secrets.GITHUB_TOKEN }} './**/*.md' './**/*.html' - name: Test Markdown, HTML, YAML, Python and Notebook links with retry if: github.event_name == 'workflow_dispatch' @@ -37,4 +37,4 @@ jobs: timeout_minutes: 5 retry_wait_seconds: 60 max_attempts: 3 - command: lychee --accept 429,999 --exclude-loopback --exclude 'https?://(www\.)?(twitter\.com|instagram\.com|url\.com)' --exclude-path '**/ci.yaml' --exclude-mail --github-token ${{ secrets.GITHUB_TOKEN }} './**/*.md' './**/*.html' './**/*.yml' './**/*.yaml' './**/*.py' './**/*.ipynb' + command: lychee --accept 429,999 --exclude-loopback --exclude 'https?://(www\.)?(linkedin\.com|twitter\.com|instagram\.com|url\.com)' --exclude-path '**/ci.yaml' --exclude-mail --github-token ${{ secrets.GITHUB_TOKEN }} './**/*.md' './**/*.html' './**/*.yml' './**/*.yaml' './**/*.py' './**/*.ipynb' From 94e943e609f296fc2b0eddf32f3f9b28ad1da106 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 23 Aug 2023 20:55:36 +0200 Subject: [PATCH 1789/1976] Update links.yml to exclude Kaggle (#12022) --- .github/workflows/links.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/links.yml b/.github/workflows/links.yml index 98803c7e1a43..4bee94534b06 100644 --- a/.github/workflows/links.yml +++ b/.github/workflows/links.yml @@ -28,7 +28,7 @@ jobs: timeout_minutes: 5 retry_wait_seconds: 60 max_attempts: 3 - command: lychee --accept 429,999 --exclude-loopback --exclude 'https?://(www\.)?(linkedin\.com|twitter\.com|instagram\.com)' --exclude-path '**/ci.yaml' --exclude-mail --github-token ${{ secrets.GITHUB_TOKEN }} './**/*.md' './**/*.html' + command: lychee --accept 429,999 --exclude-loopback --exclude 'https?://(www\.)?(linkedin\.com|twitter\.com|instagram\.com|kaggle\.com)' --exclude-path '**/ci.yaml' --exclude-mail --github-token ${{ secrets.GITHUB_TOKEN }} './**/*.md' './**/*.html' - name: Test Markdown, HTML, YAML, Python and Notebook links with retry if: github.event_name == 'workflow_dispatch' @@ -37,4 +37,4 @@ jobs: timeout_minutes: 5 retry_wait_seconds: 60 max_attempts: 3 - command: lychee --accept 429,999 --exclude-loopback --exclude 'https?://(www\.)?(linkedin\.com|twitter\.com|instagram\.com|url\.com)' --exclude-path '**/ci.yaml' --exclude-mail --github-token ${{ secrets.GITHUB_TOKEN }} './**/*.md' './**/*.html' './**/*.yml' './**/*.yaml' './**/*.py' './**/*.ipynb' + command: lychee --accept 429,999 --exclude-loopback --exclude 'https?://(www\.)?(linkedin\.com|twitter\.com|instagram\.com|kaggle\.com|url\.com)' --exclude-path '**/ci.yaml' --exclude-mail --github-token ${{ secrets.GITHUB_TOKEN }} './**/*.md' './**/*.html' './**/*.yml' './**/*.yaml' './**/*.py' './**/*.ipynb' From 9974d51f1b155b7145cceec8cd6b51691668ca96 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 27 Aug 2023 23:04:07 +0200 Subject: [PATCH 1790/1976] Update README.md (#12040) * Update README.md Signed-off-by: Glenn Jocher * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Signed-off-by: Glenn Jocher Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> From 378ed7496799e0c12c1957900561258fbab44db3 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 31 Aug 2023 15:45:22 +0200 Subject: [PATCH 1791/1976] Docker COPY with checkout `fetch-depth: 0` (#12066) Docker COPY with checkout fetch-depth: 0 --- .github/workflows/docker.yml | 2 ++ utils/docker/Dockerfile | 3 +-- utils/docker/Dockerfile-arm64 | 3 +-- utils/docker/Dockerfile-cpu | 3 +-- 4 files changed, 5 insertions(+), 6 deletions(-) diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index 13e79216fc20..cbb1371b8ee9 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -16,6 +16,8 @@ jobs: steps: - name: Checkout repo uses: actions/checkout@v3 + with: + fetch-depth: 0 # copy full .git directory to access full git history in Docker images - name: Set up QEMU uses: docker/setup-qemu-action@v2 diff --git a/utils/docker/Dockerfile b/utils/docker/Dockerfile index 4346fc823c16..d1fc87f7c84c 100644 --- a/utils/docker/Dockerfile +++ b/utils/docker/Dockerfile @@ -24,8 +24,7 @@ RUN rm -rf /usr/src/app && mkdir -p /usr/src/app WORKDIR /usr/src/app # Copy contents -# COPY . /usr/src/app (issues as not a .git directory) -RUN git clone https://github.com/ultralytics/yolov5 /usr/src/app +COPY . /usr/src/app # Install pip packages COPY requirements.txt . diff --git a/utils/docker/Dockerfile-arm64 b/utils/docker/Dockerfile-arm64 index 7b5c610e5071..802703e8d3f7 100644 --- a/utils/docker/Dockerfile-arm64 +++ b/utils/docker/Dockerfile-arm64 @@ -27,8 +27,7 @@ RUN mkdir -p /usr/src/app WORKDIR /usr/src/app # Copy contents -# COPY . /usr/src/app (issues as not a .git directory) -RUN git clone https://github.com/ultralytics/yolov5 /usr/src/app +COPY . /usr/src/app ENV DEBIAN_FRONTEND teletype diff --git a/utils/docker/Dockerfile-cpu b/utils/docker/Dockerfile-cpu index 17316986c9d5..2d1363a91ee5 100644 --- a/utils/docker/Dockerfile-cpu +++ b/utils/docker/Dockerfile-cpu @@ -30,8 +30,7 @@ RUN mkdir -p /usr/src/app WORKDIR /usr/src/app # Copy contents -# COPY . /usr/src/app (issues as not a .git directory) -RUN git clone https://github.com/ultralytics/yolov5 /usr/src/app +COPY . /usr/src/app # Usage Examples ------------------------------------------------------------------------------------------------------- From 8c30c583b0ab400ae497f488863c32654a3cf5c0 Mon Sep 17 00:00:00 2001 From: Akash A Desai <62583018+akashAD98@users.noreply.github.com> Date: Mon, 4 Sep 2023 16:22:33 +0530 Subject: [PATCH 1792/1976] Add new `--save-csv` argument to detect.py (#12042) * Update detect.py added support for saving result in csv,used for testing Signed-off-by: Akash A Desai <62583018+akashAD98@users.noreply.github.com> * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update detect.py Signed-off-by: Akash A Desai <62583018+akashAD98@users.noreply.github.com> * Update detect.py changed save_in_csv to save_csv Signed-off-by: Akash A Desai <62583018+akashAD98@users.noreply.github.com> * Update detect.py Signed-off-by: Akash A Desai <62583018+akashAD98@users.noreply.github.com> * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Signed-off-by: Akash A Desai <62583018+akashAD98@users.noreply.github.com> Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- detect.py | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/detect.py b/detect.py index 8934a093f45e..e18a9883d627 100644 --- a/detect.py +++ b/detect.py @@ -29,6 +29,7 @@ """ import argparse +import csv import os import platform import sys @@ -63,6 +64,7 @@ def run( device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu view_img=False, # show results save_txt=False, # save results to *.txt + save_csv=False, # save results in CSV format save_conf=False, # save confidences in --save-txt labels save_crop=False, # save cropped prediction boxes nosave=False, # do not save images/videos @@ -135,6 +137,18 @@ def run( # Second-stage classifier (optional) # pred = utils.general.apply_classifier(pred, classifier_model, im, im0s) + # Define the path for the CSV file + csv_path = save_dir / 'predictions.csv' + + # Create or append to the CSV file + def write_to_csv(image_name, prediction, confidence): + data = {'Image Name': image_name, 'Prediction': prediction, 'Confidence': confidence} + with open(csv_path, mode='a', newline='') as f: + writer = csv.DictWriter(f, fieldnames=data.keys()) + if not csv_path.is_file(): + writer.writeheader() + writer.writerow(data) + # Process predictions for i, det in enumerate(pred): # per image seen += 1 @@ -162,6 +176,14 @@ def run( # Write results for *xyxy, conf, cls in reversed(det): + c = int(cls) # integer class + label = names[c] if hide_conf else f'{names[c]}' + confidence = float(conf) + confidence_str = f'{confidence:.2f}' + + if save_csv: + write_to_csv(p.name, label, confidence_str) + if save_txt: # Write to file xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh line = (cls, *xywh, conf) if save_conf else (cls, *xywh) # label format @@ -229,6 +251,7 @@ def parse_opt(): parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') parser.add_argument('--view-img', action='store_true', help='show results') parser.add_argument('--save-txt', action='store_true', help='save results to *.txt') + parser.add_argument('--save-csv', action='store_true', help='save results in CSV format') parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels') parser.add_argument('--save-crop', action='store_true', help='save cropped prediction boxes') parser.add_argument('--nosave', action='store_true', help='do not save images/videos') From a6659d05051e01c8feca7ecb348c1cce7d67aaaa Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 5 Sep 2023 21:31:00 +0200 Subject: [PATCH 1793/1976] [pre-commit.ci] pre-commit suggestions (#12079) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/executablebooks/mdformat: 0.7.16 → 0.7.17](https://github.com/executablebooks/mdformat/compare/0.7.16...0.7.17) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index b8099b97890e..ff2190614378 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -40,7 +40,7 @@ repos: name: YAPF formatting - repo: https://github.com/executablebooks/mdformat - rev: 0.7.16 + rev: 0.7.17 hooks: - id: mdformat name: MD formatting From ec690e42af3745c41c16b60f264a8be275dab69d Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 10 Sep 2023 04:59:27 +0200 Subject: [PATCH 1794/1976] Update YouTube URL https://youtu.be/LNwODJXcvt4 (#12106) Update example YouTube URL --- README.md | 2 +- README.zh-CN.md | 2 +- classify/predict.py | 2 +- classify/tutorial.ipynb | 2 +- detect.py | 2 +- segment/predict.py | 2 +- segment/tutorial.ipynb | 2 +- tutorial.ipynb | 2 +- utils/dataloaders.py | 2 +- 9 files changed, 9 insertions(+), 9 deletions(-) diff --git a/README.md b/README.md index b9941b74ef34..8903b95c0024 100644 --- a/README.md +++ b/README.md @@ -128,7 +128,7 @@ python detect.py --weights yolov5s.pt --source 0 # list.txt # list of images list.streams # list of streams 'path/*.jpg' # glob - 'https://youtu.be/Zgi9g1ksQHc' # YouTube + 'https://youtu.be/LNwODJXcvt4' # YouTube 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream ``` diff --git a/README.zh-CN.md b/README.zh-CN.md index d8b2a900bf96..d5fe368f287b 100644 --- a/README.zh-CN.md +++ b/README.zh-CN.md @@ -121,7 +121,7 @@ python detect.py --weights yolov5s.pt --source 0 # list.txt # list of images list.streams # list of streams 'path/*.jpg' # glob - 'https://youtu.be/Zgi9g1ksQHc' # YouTube + 'https://youtu.be/LNwODJXcvt4' # YouTube 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream ``` diff --git a/classify/predict.py b/classify/predict.py index 1cbbc094a41e..653c374f768f 100644 --- a/classify/predict.py +++ b/classify/predict.py @@ -11,7 +11,7 @@ list.txt # list of images list.streams # list of streams 'path/*.jpg' # glob - 'https://youtu.be/Zgi9g1ksQHc' # YouTube + 'https://youtu.be/LNwODJXcvt4' # YouTube 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream Usage - formats: diff --git a/classify/tutorial.ipynb b/classify/tutorial.ipynb index 844da0c418f5..f85715ca844e 100644 --- a/classify/tutorial.ipynb +++ b/classify/tutorial.ipynb @@ -87,7 +87,7 @@ " screen # screenshot\n", " path/ # directory\n", " 'path/*.jpg' # glob\n", - " 'https://youtu.be/Zgi9g1ksQHc' # YouTube\n", + " 'https://youtu.be/LNwODJXcvt4' # YouTube\n", " 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream\n", "```" ] diff --git a/detect.py b/detect.py index e18a9883d627..fd9637138dd6 100644 --- a/detect.py +++ b/detect.py @@ -11,7 +11,7 @@ list.txt # list of images list.streams # list of streams 'path/*.jpg' # glob - 'https://youtu.be/Zgi9g1ksQHc' # YouTube + 'https://youtu.be/LNwODJXcvt4' # YouTube 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream Usage - formats: diff --git a/segment/predict.py b/segment/predict.py index 77e8df79066a..113bc472e637 100644 --- a/segment/predict.py +++ b/segment/predict.py @@ -11,7 +11,7 @@ list.txt # list of images list.streams # list of streams 'path/*.jpg' # glob - 'https://youtu.be/Zgi9g1ksQHc' # YouTube + 'https://youtu.be/LNwODJXcvt4' # YouTube 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream Usage - formats: diff --git a/segment/tutorial.ipynb b/segment/tutorial.ipynb index cd215e7f3467..0e6091869b7c 100644 --- a/segment/tutorial.ipynb +++ b/segment/tutorial.ipynb @@ -87,7 +87,7 @@ " screen # screenshot\n", " path/ # directory\n", " 'path/*.jpg' # glob\n", - " 'https://youtu.be/Zgi9g1ksQHc' # YouTube\n", + " 'https://youtu.be/LNwODJXcvt4' # YouTube\n", " 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream\n", "```" ] diff --git a/tutorial.ipynb b/tutorial.ipynb index f666dbde7e8c..d63117dbe0ed 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -100,7 +100,7 @@ " screen # screenshot\n", " path/ # directory\n", " 'path/*.jpg' # glob\n", - " 'https://youtu.be/Zgi9g1ksQHc' # YouTube\n", + " 'https://youtu.be/LNwODJXcvt4' # YouTube\n", " 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream\n", "```" ] diff --git a/utils/dataloaders.py b/utils/dataloaders.py index 26201c3c78fc..1fbd0361ded4 100644 --- a/utils/dataloaders.py +++ b/utils/dataloaders.py @@ -355,7 +355,7 @@ def __init__(self, sources='file.streams', img_size=640, stride=32, auto=True, t # Start thread to read frames from video stream st = f'{i + 1}/{n}: {s}... ' if urlparse(s).hostname in ('www.youtube.com', 'youtube.com', 'youtu.be'): # if source is YouTube video - # YouTube format i.e. 'https://www.youtube.com/watch?v=Zgi9g1ksQHc' or 'https://youtu.be/Zgi9g1ksQHc' + # YouTube format i.e. 'https://www.youtube.com/watch?v=Zgi9g1ksQHc' or 'https://youtu.be/LNwODJXcvt4' check_requirements(('pafy', 'youtube_dl==2020.12.2')) import pafy s = pafy.new(s).getbest(preftype='mp4').url # YouTube URL From 8c45e51c74aa66c51300ceb58ef4f94f6f46e699 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 11 Sep 2023 20:48:02 +0200 Subject: [PATCH 1795/1976] Bump actions/checkout from 3 to 4 (#12109) Bumps [actions/checkout](https://github.com/actions/checkout) from 3 to 4. - [Release notes](https://github.com/actions/checkout/releases) - [Changelog](https://github.com/actions/checkout/blob/main/CHANGELOG.md) - [Commits](https://github.com/actions/checkout/compare/v3...v4) --- updated-dependencies: - dependency-name: actions/checkout dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/ci-testing.yml | 4 ++-- .github/workflows/codeql-analysis.yml | 2 +- .github/workflows/docker.yml | 2 +- .github/workflows/links.yml | 2 +- .github/workflows/translate-readme.yml | 2 +- 5 files changed, 6 insertions(+), 6 deletions(-) diff --git a/.github/workflows/ci-testing.yml b/.github/workflows/ci-testing.yml index 80ae42955605..b0e974aa2a2b 100644 --- a/.github/workflows/ci-testing.yml +++ b/.github/workflows/ci-testing.yml @@ -21,7 +21,7 @@ jobs: python-version: [ '3.10' ] # requires python<=3.10 model: [ yolov5n ] steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - uses: actions/setup-python@v4 with: python-version: ${{ matrix.python-version }} @@ -67,7 +67,7 @@ jobs: model: yolov5n torch: '1.8.0' # min torch version CI https://pypi.org/project/torchvision/ steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - uses: actions/setup-python@v4 with: python-version: ${{ matrix.python-version }} diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index 05db12dabd1a..53af304ee992 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -23,7 +23,7 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@v3 + uses: actions/checkout@v4 # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index cbb1371b8ee9..77a7c9720286 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -15,7 +15,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout repo - uses: actions/checkout@v3 + uses: actions/checkout@v4 with: fetch-depth: 0 # copy full .git directory to access full git history in Docker images diff --git a/.github/workflows/links.yml b/.github/workflows/links.yml index 4bee94534b06..c2665fd3f237 100644 --- a/.github/workflows/links.yml +++ b/.github/workflows/links.yml @@ -13,7 +13,7 @@ jobs: Links: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Download and install lychee run: | diff --git a/.github/workflows/translate-readme.yml b/.github/workflows/translate-readme.yml index d5e2be26f523..7a4dd3f32265 100644 --- a/.github/workflows/translate-readme.yml +++ b/.github/workflows/translate-readme.yml @@ -14,7 +14,7 @@ jobs: Translate: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Setup Node.js uses: actions/setup-node@v3 with: From 9e97ac37f5e76ee535d06a919fd0e9209fae1d5a Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 14 Sep 2023 01:32:48 +0200 Subject: [PATCH 1796/1976] [Snyk] Security upgrade numpy from 1.21.3 to 1.22.2 (#12118) fix: requirements.txt to reduce vulnerabilities The following vulnerabilities are fixed by pinning transitive dependencies: - https://snyk.io/vuln/SNYK-PYTHON-NUMPY-2321964 - https://snyk.io/vuln/SNYK-PYTHON-NUMPY-2321966 - https://snyk.io/vuln/SNYK-PYTHON-NUMPY-2321970 Co-authored-by: snyk-bot From 8a94907db168d47b33e51753ab6a20ee1ccd0b60 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 18 Sep 2023 14:55:29 +0200 Subject: [PATCH 1797/1976] Bump docker/build-push-action from 4 to 5 (#12135) Bumps [docker/build-push-action](https://github.com/docker/build-push-action) from 4 to 5. - [Release notes](https://github.com/docker/build-push-action/releases) - [Commits](https://github.com/docker/build-push-action/compare/v4...v5) --- updated-dependencies: - dependency-name: docker/build-push-action dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/docker.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index 77a7c9720286..3f6d9ed3978a 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -32,7 +32,7 @@ jobs: password: ${{ secrets.DOCKERHUB_TOKEN }} - name: Build and push arm64 image - uses: docker/build-push-action@v4 + uses: docker/build-push-action@v5 continue-on-error: true with: context: . @@ -42,7 +42,7 @@ jobs: tags: ultralytics/yolov5:latest-arm64 - name: Build and push CPU image - uses: docker/build-push-action@v4 + uses: docker/build-push-action@v5 continue-on-error: true with: context: . @@ -51,7 +51,7 @@ jobs: tags: ultralytics/yolov5:latest-cpu - name: Build and push GPU image - uses: docker/build-push-action@v4 + uses: docker/build-push-action@v5 continue-on-error: true with: context: . From e421dcf5ea5baa85273e3f80d537144806b4502a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 18 Sep 2023 14:56:59 +0200 Subject: [PATCH 1798/1976] Bump docker/setup-buildx-action from 2 to 3 (#12136) Bumps [docker/setup-buildx-action](https://github.com/docker/setup-buildx-action) from 2 to 3. - [Release notes](https://github.com/docker/setup-buildx-action/releases) - [Commits](https://github.com/docker/setup-buildx-action/compare/v2...v3) --- updated-dependencies: - dependency-name: docker/setup-buildx-action dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Glenn Jocher --- .github/workflows/docker.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index 3f6d9ed3978a..bd44fa844fe7 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -23,7 +23,7 @@ jobs: uses: docker/setup-qemu-action@v2 - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v2 + uses: docker/setup-buildx-action@v3 - name: Login to Docker Hub uses: docker/login-action@v2 From 831c8e90c12750843e4c94b109e20379b97d5b37 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 18 Sep 2023 14:58:39 +0200 Subject: [PATCH 1799/1976] Bump docker/login-action from 2 to 3 (#12137) Bumps [docker/login-action](https://github.com/docker/login-action) from 2 to 3. - [Release notes](https://github.com/docker/login-action/releases) - [Commits](https://github.com/docker/login-action/compare/v2...v3) --- updated-dependencies: - dependency-name: docker/login-action dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Glenn Jocher --- .github/workflows/docker.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index bd44fa844fe7..6cf0a29b0b08 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -26,7 +26,7 @@ jobs: uses: docker/setup-buildx-action@v3 - name: Login to Docker Hub - uses: docker/login-action@v2 + uses: docker/login-action@v3 with: username: ${{ secrets.DOCKERHUB_USERNAME }} password: ${{ secrets.DOCKERHUB_TOKEN }} From 9b6357c88647f54b0e8c77ce276ffb2e926e3f74 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 18 Sep 2023 15:13:34 +0200 Subject: [PATCH 1800/1976] Bump docker/setup-qemu-action from 2 to 3 (#12138) Bumps [docker/setup-qemu-action](https://github.com/docker/setup-qemu-action) from 2 to 3. - [Release notes](https://github.com/docker/setup-qemu-action/releases) - [Commits](https://github.com/docker/setup-qemu-action/compare/v2...v3) --- updated-dependencies: - dependency-name: docker/setup-qemu-action dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Glenn Jocher --- .github/workflows/docker.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index 6cf0a29b0b08..27672b407ef4 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -20,7 +20,7 @@ jobs: fetch-depth: 0 # copy full .git directory to access full git history in Docker images - name: Set up QEMU - uses: docker/setup-qemu-action@v2 + uses: docker/setup-qemu-action@v3 - name: Set up Docker Buildx uses: docker/setup-buildx-action@v3 From 005dcfa956fc5602b1364f452ade2d91037839a3 Mon Sep 17 00:00:00 2001 From: Lucid1ty <70336891+Lucid1ty@users.noreply.github.com> Date: Mon, 18 Sep 2023 21:15:56 +0800 Subject: [PATCH 1801/1976] Update YOLOv5 docs URL https://docs.ultralytics.com/yolov5 (#12132) Update YOLOv5 docs URL https://docs.ultralytics.com/yolov5/ Signed-off-by: Lucid1ty <70336891+Lucid1ty@users.noreply.github.com> Co-authored-by: Glenn Jocher --- README.zh-CN.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.zh-CN.md b/README.zh-CN.md index d5fe368f287b..0f1d4ee1ef7f 100644 --- a/README.zh-CN.md +++ b/README.zh-CN.md @@ -19,7 +19,7 @@ YOLOv5 🚀 是世界上最受欢迎的视觉 AI,代表 Ultralytics 对未来视觉 AI 方法的开源研究,结合在数千小时的研究和开发中积累的经验教训和最佳实践。 -我们希望这里的资源能帮助您充分利用 YOLOv5。请浏览 YOLOv5 文档 了解详细信息,在 GitHub 上提交问题以获得支持,并加入我们的 Discord 社区进行问题和讨论! +我们希望这里的资源能帮助您充分利用 YOLOv5。请浏览 YOLOv5 文档 了解详细信息,在 GitHub 上提交问题以获得支持,并加入我们的 Discord 社区进行问题和讨论! 如需申请企业许可,请在 [Ultralytics Licensing](https://ultralytics.com/license) 处填写表格 @@ -67,7 +67,7 @@ pip install ultralytics ##
文档
-有关训练、测试和部署的完整文档见[YOLOv5 文档](https://docs.ultralytics.com)。请参阅下面的快速入门示例。 +有关训练、测试和部署的完整文档见[YOLOv5 文档](https://docs.ultralytics.com/yolov5/)。请参阅下面的快速入门示例。
安装 From 6262c7feb42cd181f165681b9aff428785c0ff7e Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 21 Sep 2023 00:32:04 +0200 Subject: [PATCH 1802/1976] Update for https://yolovision.ultralytics.com #YV23 (#12155) * Update README.md Signed-off-by: Glenn Jocher * Update README.zh-CN.md Signed-off-by: Glenn Jocher --------- Signed-off-by: Glenn Jocher --- README.md | 4 ++++ README.zh-CN.md | 4 ++++ 2 files changed, 8 insertions(+) diff --git a/README.md b/README.md index 8903b95c0024..a32acb3f3629 100644 --- a/README.md +++ b/README.md @@ -1,7 +1,11 @@

+ + +

[English](README.md) | [简体中文](README.zh-CN.md) diff --git a/README.zh-CN.md b/README.zh-CN.md index 0f1d4ee1ef7f..f83f722aa626 100644 --- a/README.zh-CN.md +++ b/README.zh-CN.md @@ -1,7 +1,11 @@

+ + +

[英文](README.md)|[简体中文](README.zh-CN.md)
From bb9706e7d179fbf30a5eedd0b0008d1ec470f768 Mon Sep 17 00:00:00 2001 From: Luis Filipe Araujo de Souza <58831491+Doquey@users.noreply.github.com> Date: Fri, 29 Sep 2023 14:23:05 -0300 Subject: [PATCH 1803/1976] ONNX export Path to str() (#12177) * Update export.py Signed-off-by: Luis Filipe Araujo de Souza <58831491+Doquey@users.noreply.github.com> * Update export.py Signed-off-by: Luis Filipe Araujo de Souza <58831491+Doquey@users.noreply.github.com> * Update export.py Transformed the f variable into a string on the export onnx. This bug was making it impossible to export any models in .onnx, since it was making the typehint not accept the users input as it is specified in the functions documentation Signed-off-by: Luis Filipe Araujo de Souza <58831491+Doquey@users.noreply.github.com> * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Signed-off-by: Luis Filipe Araujo de Souza <58831491+Doquey@users.noreply.github.com> Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- export.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/export.py b/export.py index 92d42472dfc4..71e4eb94d1c4 100644 --- a/export.py +++ b/export.py @@ -155,7 +155,7 @@ def export_onnx(model, im, file, opset, dynamic, simplify, prefix=colorstr('ONNX import onnx LOGGER.info(f'\n{prefix} starting export with onnx {onnx.__version__}...') - f = file.with_suffix('.onnx') + f = str(file.with_suffix('.onnx')) output_names = ['output0', 'output1'] if isinstance(model, SegmentationModel) else ['output0'] if dynamic: From dd9e3382c9af9697fb071d26f1fd1698e9be3e04 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 3 Oct 2023 15:46:59 +0200 Subject: [PATCH 1804/1976] [pre-commit.ci] pre-commit suggestions (#12189) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/asottile/pyupgrade: v3.10.1 → v3.14.0](https://github.com/asottile/pyupgrade/compare/v3.10.1...v3.14.0) - [github.com/google/yapf: v0.40.0 → v0.40.2](https://github.com/google/yapf/compare/v0.40.0...v0.40.2) - [github.com/codespell-project/codespell: v2.2.5 → v2.2.6](https://github.com/codespell-project/codespell/compare/v2.2.5...v2.2.6) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index ff2190614378..39ab266f70f3 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -22,7 +22,7 @@ repos: - id: detect-private-key - repo: https://github.com/asottile/pyupgrade - rev: v3.10.1 + rev: v3.14.0 hooks: - id: pyupgrade name: Upgrade code @@ -34,7 +34,7 @@ repos: name: Sort imports - repo: https://github.com/google/yapf - rev: v0.40.0 + rev: v0.40.2 hooks: - id: yapf name: YAPF formatting @@ -56,7 +56,7 @@ repos: name: PEP8 - repo: https://github.com/codespell-project/codespell - rev: v2.2.5 + rev: v2.2.6 hooks: - id: codespell args: From e4df1ec5bab52601d5de6d62d428dfd03ab53be1 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 14 Oct 2023 19:13:57 +0200 Subject: [PATCH 1805/1976] [Snyk] Security upgrade ubuntu from lunar-20230615 to 23.04 (#12232) * fix: utils/docker/Dockerfile-cpu to reduce vulnerabilities The following vulnerabilities are fixed with an upgrade: - https://snyk.io/vuln/SNYK-UBUNTU2304-GLIBC-5901400 - https://snyk.io/vuln/SNYK-UBUNTU2304-GLIBC-5901400 - https://snyk.io/vuln/SNYK-UBUNTU2304-GLIBC-5927364 - https://snyk.io/vuln/SNYK-UBUNTU2304-GLIBC-5927364 - https://snyk.io/vuln/SNYK-UBUNTU2304-XZUTILS-5854648 * Update Dockerfile-cpu Signed-off-by: Glenn Jocher --------- Signed-off-by: Glenn Jocher Co-authored-by: snyk-bot --- utils/docker/Dockerfile-cpu | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/docker/Dockerfile-cpu b/utils/docker/Dockerfile-cpu index 2d1363a91ee5..e1696d0aebd8 100644 --- a/utils/docker/Dockerfile-cpu +++ b/utils/docker/Dockerfile-cpu @@ -3,7 +3,7 @@ # Image is CPU-optimized for ONNX, OpenVINO and PyTorch YOLOv5 deployments # Start FROM Ubuntu image https://hub.docker.com/_/ubuntu -FROM ubuntu:lunar-20230615 +FROM ubuntu:23.04 # Downloads to user config dir ADD https://ultralytics.com/assets/Arial.ttf https://ultralytics.com/assets/Arial.Unicode.ttf /root/.config/Ultralytics/ From 4d687c8c56e3ee4e6851e48c1c5089c731ef0fcd Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 21 Oct 2023 18:07:56 +0200 Subject: [PATCH 1806/1976] Exclude url.com and gstatic.com from links.yml (#12263) * Exclude url.com and gstatic.com from links.yml Signed-off-by: Glenn Jocher * Update links.yml Signed-off-by: Glenn Jocher --------- Signed-off-by: Glenn Jocher --- .github/workflows/links.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/links.yml b/.github/workflows/links.yml index c2665fd3f237..5d897da3c144 100644 --- a/.github/workflows/links.yml +++ b/.github/workflows/links.yml @@ -28,7 +28,7 @@ jobs: timeout_minutes: 5 retry_wait_seconds: 60 max_attempts: 3 - command: lychee --accept 429,999 --exclude-loopback --exclude 'https?://(www\.)?(linkedin\.com|twitter\.com|instagram\.com|kaggle\.com)' --exclude-path '**/ci.yaml' --exclude-mail --github-token ${{ secrets.GITHUB_TOKEN }} './**/*.md' './**/*.html' + command: lychee --accept 429,999 --exclude-loopback --exclude 'https?://(www\.)?(linkedin\.com|twitter\.com|instagram\.com|kaggle\.com|fonts\.gstatic\.com|url\.com)' --exclude-path '**/ci.yaml' --exclude-mail --github-token ${{ secrets.GITHUB_TOKEN }} './**/*.md' './**/*.html' - name: Test Markdown, HTML, YAML, Python and Notebook links with retry if: github.event_name == 'workflow_dispatch' @@ -37,4 +37,4 @@ jobs: timeout_minutes: 5 retry_wait_seconds: 60 max_attempts: 3 - command: lychee --accept 429,999 --exclude-loopback --exclude 'https?://(www\.)?(linkedin\.com|twitter\.com|instagram\.com|kaggle\.com|url\.com)' --exclude-path '**/ci.yaml' --exclude-mail --github-token ${{ secrets.GITHUB_TOKEN }} './**/*.md' './**/*.html' './**/*.yml' './**/*.yaml' './**/*.py' './**/*.ipynb' + command: lychee --accept 429,999 --exclude-loopback --exclude 'https?://(www\.)?(linkedin\.com|twitter\.com|instagram\.com|kaggle\.com|url\.com|fonts\.gstatic\.com|url\.com)' --exclude-path '**/ci.yaml' --exclude-mail --github-token ${{ secrets.GITHUB_TOKEN }} './**/*.md' './**/*.html' './**/*.yml' './**/*.yaml' './**/*.py' './**/*.ipynb' From 9236fdd5c272aee394b7172a6138e635585e01dd Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 26 Oct 2023 18:57:48 +0200 Subject: [PATCH 1807/1976] Fix discord social image URL (#12286) --- README.md | 4 ++-- README.zh-CN.md | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index a32acb3f3629..95b5e65c1ac3 100644 --- a/README.md +++ b/README.md @@ -48,7 +48,7 @@ To request an Enterprise License please complete the form at [Ultralytics Licens - +
@@ -495,7 +495,7 @@ For YOLOv5 bug reports and feature requests please visit [GitHub Issues](https:/ - + [tta]: https://docs.ultralytics.com/yolov5/tutorials/test_time_augmentation diff --git a/README.zh-CN.md b/README.zh-CN.md index f83f722aa626..9b7c065b9745 100644 --- a/README.zh-CN.md +++ b/README.zh-CN.md @@ -47,7 +47,7 @@ YOLOv5 🚀 是世界上最受欢迎的视觉 AI,代表 - + @@ -488,7 +488,7 @@ Ultralytics 提供两种许可证选项以适应各种使用场景: - + [tta]: https://docs.ultralytics.com/yolov5/tutorials/test_time_augmentation From 53efd07fef2bd0138871b94ed20ad8923a20e5cd Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 26 Oct 2023 19:28:21 +0200 Subject: [PATCH 1808/1976] Update social media links (#12287) * Update social media links * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- README.md | 83 +++++++++++++++++------------------------------ README.zh-CN.md | 85 +++++++++++++++++++------------------------------ 2 files changed, 62 insertions(+), 106 deletions(-) diff --git a/README.md b/README.md index 95b5e65c1ac3..dc64fcd89f65 100644 --- a/README.md +++ b/README.md @@ -29,26 +29,19 @@ We hope that the resources here will help you get the most out of YOLOv5. Please To request an Enterprise License please complete the form at [Ultralytics Licensing](https://ultralytics.com/license).
- - - - - - - - - - - - - - - - - - - - + Ultralytics GitHub + + Ultralytics LinkedIn + + Ultralytics Twitter + + Ultralytics YouTube + + Ultralytics TikTok + + Ultralytics Instagram + + Ultralytics Discord
@@ -56,10 +49,7 @@ To request an Enterprise License please complete the form at [Ultralytics Licens ##
YOLOv8 🚀 NEW
-We are thrilled to announce the launch of Ultralytics YOLOv8 🚀, our NEW cutting-edge, state-of-the-art (SOTA) model -released at **[https://github.com/ultralytics/ultralytics](https://github.com/ultralytics/ultralytics)**. -YOLOv8 is designed to be fast, accurate, and easy to use, making it an excellent choice for a wide range of -object detection, image segmentation and image classification tasks. +We are thrilled to announce the launch of Ultralytics YOLOv8 🚀, our NEW cutting-edge, state-of-the-art (SOTA) model released at **[https://github.com/ultralytics/ultralytics](https://github.com/ultralytics/ultralytics)**. YOLOv8 is designed to be fast, accurate, and easy to use, making it an excellent choice for a wide range of object detection, image segmentation and image classification tasks. See the [YOLOv8 Docs](https://docs.ultralytics.com) for details and get started with: @@ -96,8 +86,7 @@ pip install -r requirements.txt # install
Inference -YOLOv5 [PyTorch Hub](https://docs.ultralytics.com/yolov5/tutorials/pytorch_hub_model_loading) inference. [Models](https://github.com/ultralytics/yolov5/tree/master/models) download automatically from the latest -YOLOv5 [release](https://github.com/ultralytics/yolov5/releases). +YOLOv5 [PyTorch Hub](https://docs.ultralytics.com/yolov5/tutorials/pytorch_hub_model_loading) inference. [Models](https://github.com/ultralytics/yolov5/tree/master/models) download automatically from the latest YOLOv5 [release](https://github.com/ultralytics/yolov5/releases). ```python import torch @@ -120,8 +109,7 @@ results.print() # or .show(), .save(), .crop(), .pandas(), etc.
Inference with detect.py -`detect.py` runs inference on a variety of sources, downloading [models](https://github.com/ultralytics/yolov5/tree/master/models) automatically from -the latest YOLOv5 [release](https://github.com/ultralytics/yolov5/releases) and saving results to `runs/detect`. +`detect.py` runs inference on a variety of sources, downloading [models](https://github.com/ultralytics/yolov5/tree/master/models) automatically from the latest YOLOv5 [release](https://github.com/ultralytics/yolov5/releases) and saving results to `runs/detect`. ```bash python detect.py --weights yolov5s.pt --source 0 # webcam @@ -143,11 +131,7 @@ python detect.py --weights yolov5s.pt --source 0 # The commands below reproduce YOLOv5 [COCO](https://github.com/ultralytics/yolov5/blob/master/data/scripts/get_coco.sh) results. [Models](https://github.com/ultralytics/yolov5/tree/master/models) -and [datasets](https://github.com/ultralytics/yolov5/tree/master/data) download automatically from the latest -YOLOv5 [release](https://github.com/ultralytics/yolov5/releases). Training times for YOLOv5n/s/m/l/x are -1/2/4/6/8 days on a V100 GPU ([Multi-GPU](https://docs.ultralytics.com/yolov5/tutorials/multi_gpu_training) times faster). Use the -largest `--batch-size` possible, or pass `--batch-size -1` for -YOLOv5 [AutoBatch](https://github.com/ultralytics/yolov5/pull/5092). Batch sizes shown for V100-16GB. +and [datasets](https://github.com/ultralytics/yolov5/tree/master/data) download automatically from the latest YOLOv5 [release](https://github.com/ultralytics/yolov5/releases). Training times for YOLOv5n/s/m/l/x are 1/2/4/6/8 days on a V100 GPU ([Multi-GPU](https://docs.ultralytics.com/yolov5/tutorials/multi_gpu_training) times faster). Use the largest `--batch-size` possible, or pass `--batch-size -1` for YOLOv5 [AutoBatch](https://github.com/ultralytics/yolov5/pull/5092). Batch sizes shown for V100-16GB. ```bash python train.py --data coco.yaml --epochs 300 --weights '' --cfg yolov5n.yaml --batch-size 128 @@ -476,26 +460,19 @@ For YOLOv5 bug reports and feature requests please visit [GitHub Issues](https:/
- - - - - - - - - - - - - - - - - - - - + Ultralytics GitHub + + Ultralytics LinkedIn + + Ultralytics Twitter + + Ultralytics YouTube + + Ultralytics TikTok + + Ultralytics Instagram + + Ultralytics Discord
[tta]: https://docs.ultralytics.com/yolov5/tutorials/test_time_augmentation diff --git a/README.zh-CN.md b/README.zh-CN.md index 9b7c065b9745..d9816c2d98ee 100644 --- a/README.zh-CN.md +++ b/README.zh-CN.md @@ -28,33 +28,25 @@ YOLOv5 🚀 是世界上最受欢迎的视觉 AI,代表 - - - - - - - - - - - - - - - - - - - - + Ultralytics GitHub + + Ultralytics LinkedIn + + Ultralytics Twitter + + Ultralytics YouTube + + Ultralytics TikTok + + Ultralytics Instagram + + Ultralytics Discord ##
YOLOv8 🚀 新品
-我们很高兴宣布 Ultralytics YOLOv8 🚀 的发布,这是我们新推出的领先水平、最先进的(SOTA)模型,发布于 **[https://github.com/ultralytics/ultralytics](https://github.com/ultralytics/ultralytics)**。 -YOLOv8 旨在快速、准确且易于使用,使其成为广泛的物体检测、图像分割和图像分类任务的极佳选择。 +我们很高兴宣布 Ultralytics YOLOv8 🚀 的发布,这是我们新推出的领先水平、最先进的(SOTA)模型,发布于 **[https://github.com/ultralytics/ultralytics](https://github.com/ultralytics/ultralytics)**。 YOLOv8 旨在快速、准确且易于使用,使其成为广泛的物体检测、图像分割和图像分类任务的极佳选择。 请查看 [YOLOv8 文档](https://docs.ultralytics.com)了解详细信息,并开始使用: @@ -89,8 +81,7 @@ pip install -r requirements.txt # install
推理 -使用 YOLOv5 [PyTorch Hub](https://docs.ultralytics.com/yolov5/tutorials/pytorch_hub_model_loading) 推理。最新 [模型](https://github.com/ultralytics/yolov5/tree/master/models) 将自动的从 -YOLOv5 [release](https://github.com/ultralytics/yolov5/releases) 中下载。 +使用 YOLOv5 [PyTorch Hub](https://docs.ultralytics.com/yolov5/tutorials/pytorch_hub_model_loading) 推理。最新 [模型](https://github.com/ultralytics/yolov5/tree/master/models) 将自动的从 YOLOv5 [release](https://github.com/ultralytics/yolov5/releases) 中下载。 ```python import torch @@ -113,8 +104,7 @@ results.print() # or .show(), .save(), .crop(), .pandas(), etc.
使用 detect.py 推理 -`detect.py` 在各种来源上运行推理, [模型](https://github.com/ultralytics/yolov5/tree/master/models) 自动从 -最新的YOLOv5 [release](https://github.com/ultralytics/yolov5/releases) 中下载,并将结果保存到 `runs/detect` 。 +`detect.py` 在各种来源上运行推理, [模型](https://github.com/ultralytics/yolov5/tree/master/models) 自动从 最新的YOLOv5 [release](https://github.com/ultralytics/yolov5/releases) 中下载,并将结果保存到 `runs/detect` 。 ```bash python detect.py --weights yolov5s.pt --source 0 # webcam @@ -134,12 +124,8 @@ python detect.py --weights yolov5s.pt --source 0 #
训练 -下面的命令重现 YOLOv5 在 [COCO](https://github.com/ultralytics/yolov5/blob/master/data/scripts/get_coco.sh) 数据集上的结果。 -最新的 [模型](https://github.com/ultralytics/yolov5/tree/master/models) 和 [数据集](https://github.com/ultralytics/yolov5/tree/master/data) -将自动的从 YOLOv5 [release](https://github.com/ultralytics/yolov5/releases) 中下载。 -YOLOv5n/s/m/l/x 在 V100 GPU 的训练时间为 1/2/4/6/8 天( [多GPU](https://docs.ultralytics.com/yolov5/tutorials/multi_gpu_training) 训练速度更快)。 -尽可能使用更大的 `--batch-size` ,或通过 `--batch-size -1` 实现 -YOLOv5 [自动批处理](https://github.com/ultralytics/yolov5/pull/5092) 。下方显示的 batchsize 适用于 V100-16GB。 +下面的命令重现 YOLOv5 在 [COCO](https://github.com/ultralytics/yolov5/blob/master/data/scripts/get_coco.sh) 数据集上的结果。 最新的 [模型](https://github.com/ultralytics/yolov5/tree/master/models) 和 [数据集](https://github.com/ultralytics/yolov5/tree/master/data) +将自动的从 YOLOv5 [release](https://github.com/ultralytics/yolov5/releases) 中下载。 YOLOv5n/s/m/l/x 在 V100 GPU 的训练时间为 1/2/4/6/8 天( [多GPU](https://docs.ultralytics.com/yolov5/tutorials/multi_gpu_training) 训练速度更快)。 尽可能使用更大的 `--batch-size` ,或通过 `--batch-size -1` 实现 YOLOv5 [自动批处理](https://github.com/ultralytics/yolov5/pull/5092) 。下方显示的 batchsize 适用于 V100-16GB。 ```bash python train.py --data coco.yaml --epochs 300 --weights '' --cfg yolov5n.yaml --batch-size 128 @@ -254,7 +240,7 @@ YOLOv5 超级容易上手,简单易学。我们优先考虑现实世界的结
-##
实例分割模型 ⭐ 新
+##
实例分割模型 ⭐ 新
我们新的 YOLOv5 [release v7.0](https://github.com/ultralytics/yolov5/releases/v7.0) 实例分割模型是世界上最快和最准确的模型,击败所有当前 [SOTA 基准](https://paperswithcode.com/sota/real-time-instance-segmentation-on-mscoco)。我们使它非常易于训练、验证和部署。更多细节请查看 [发行说明](https://github.com/ultralytics/yolov5/releases/v7.0) 或访问我们的 [YOLOv5 分割 Colab 笔记本](https://github.com/ultralytics/yolov5/blob/master/segment/tutorial.ipynb) 以快速入门。 @@ -469,26 +455,19 @@ Ultralytics 提供两种许可证选项以适应各种使用场景:
- - - - - - - - - - - - - - - - - - - - + Ultralytics GitHub + + Ultralytics LinkedIn + + Ultralytics Twitter + + Ultralytics YouTube + + Ultralytics TikTok + + Ultralytics Instagram + + Ultralytics Discord
[tta]: https://docs.ultralytics.com/yolov5/tutorials/test_time_augmentation From c2f131abbeed0f5f852f30d0ac18aa00e1bde1c5 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 29 Oct 2023 20:08:19 +0100 Subject: [PATCH 1809/1976] [Snyk] Security upgrade werkzeug from 2.2.3 to 3.0.1 (#12288) fix: utils/google_app_engine/additional_requirements.txt to reduce vulnerabilities The following vulnerabilities are fixed by pinning transitive dependencies: - https://snyk.io/vuln/SNYK-PYTHON-WERKZEUG-6035177 Co-authored-by: snyk-bot --- utils/google_app_engine/additional_requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/google_app_engine/additional_requirements.txt b/utils/google_app_engine/additional_requirements.txt index fce1511588e3..a29141a633fd 100644 --- a/utils/google_app_engine/additional_requirements.txt +++ b/utils/google_app_engine/additional_requirements.txt @@ -2,4 +2,4 @@ pip==21.1 Flask==2.3.2 gunicorn==19.10.0 -werkzeug>=2.2.3 # not directly required, pinned by Snyk to avoid a vulnerability +werkzeug>=3.0.1 # not directly required, pinned by Snyk to avoid a vulnerability From 1c60c5353652ee7f08b8f80636aa690ab226176d Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 2 Nov 2023 11:00:35 +0100 Subject: [PATCH 1810/1976] Bump actions/setup-node from 3 to 4 (#12295) Bumps [actions/setup-node](https://github.com/actions/setup-node) from 3 to 4. - [Release notes](https://github.com/actions/setup-node/releases) - [Commits](https://github.com/actions/setup-node/compare/v3...v4) --- updated-dependencies: - dependency-name: actions/setup-node dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/translate-readme.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/translate-readme.yml b/.github/workflows/translate-readme.yml index 7a4dd3f32265..bd12954c7fb5 100644 --- a/.github/workflows/translate-readme.yml +++ b/.github/workflows/translate-readme.yml @@ -16,7 +16,7 @@ jobs: steps: - uses: actions/checkout@v4 - name: Setup Node.js - uses: actions/setup-node@v3 + uses: actions/setup-node@v4 with: node-version: 16 # ISO Language Codes: https://cloud.google.com/translate/docs/languages From 61473c345eaf1a12308a8d8a567d9dc9eefed5f3 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 3 Nov 2023 16:02:55 +0100 Subject: [PATCH 1811/1976] Update links.yml (#12319) * Update links.yml Signed-off-by: Glenn Jocher * Update links.yml Signed-off-by: Glenn Jocher --------- Signed-off-by: Glenn Jocher --- .github/workflows/links.yml | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/.github/workflows/links.yml b/.github/workflows/links.yml index 5d897da3c144..05408fd6227b 100644 --- a/.github/workflows/links.yml +++ b/.github/workflows/links.yml @@ -1,6 +1,11 @@ # Ultralytics YOLO 🚀, AGPL-3.0 license -# YOLO Continuous Integration (CI) GitHub Actions tests broken link checker -# Accept 429(Instagram, 'too many requests'), 999(LinkedIn, 'unknown status code'), Timeout(Twitter) +# Continuous Integration (CI) GitHub Actions tests broken link checker using https://github.com/lycheeverse/lychee +# Ignores the following status codes to reduce false positives: +# - 403(OpenVINO, 'forbidden') +# - 429(Instagram, 'too many requests') +# - 500(Zenodo, 'cached') +# - 502(Zenodo, 'bad gateway') +# - 999(LinkedIn, 'unknown status code') name: Check Broken links @@ -28,7 +33,7 @@ jobs: timeout_minutes: 5 retry_wait_seconds: 60 max_attempts: 3 - command: lychee --accept 429,999 --exclude-loopback --exclude 'https?://(www\.)?(linkedin\.com|twitter\.com|instagram\.com|kaggle\.com|fonts\.gstatic\.com|url\.com)' --exclude-path '**/ci.yaml' --exclude-mail --github-token ${{ secrets.GITHUB_TOKEN }} './**/*.md' './**/*.html' + command: lychee --accept 403,429,500,502,999 --exclude-loopback --exclude 'https?://(www\.)?(linkedin\.com|twitter\.com|instagram\.com|kaggle\.com|fonts\.gstatic\.com|url\.com)' --exclude-path '**/ci.yaml' --exclude-mail --github-token ${{ secrets.GITHUB_TOKEN }} './**/*.md' './**/*.html' - name: Test Markdown, HTML, YAML, Python and Notebook links with retry if: github.event_name == 'workflow_dispatch' @@ -37,4 +42,4 @@ jobs: timeout_minutes: 5 retry_wait_seconds: 60 max_attempts: 3 - command: lychee --accept 429,999 --exclude-loopback --exclude 'https?://(www\.)?(linkedin\.com|twitter\.com|instagram\.com|kaggle\.com|url\.com|fonts\.gstatic\.com|url\.com)' --exclude-path '**/ci.yaml' --exclude-mail --github-token ${{ secrets.GITHUB_TOKEN }} './**/*.md' './**/*.html' './**/*.yml' './**/*.yaml' './**/*.py' './**/*.ipynb' + command: lychee --accept 429,999 --exclude-loopback --exclude 'https?://(www\.)?(linkedin\.com|twitter\.com|instagram\.com|kaggle\.com|fonts\.gstatic\.com|url\.com)' --exclude-path '**/ci.yaml' --exclude-mail --github-token ${{ secrets.GITHUB_TOKEN }} './**/*.md' './**/*.html' './**/*.yml' './**/*.yaml' './**/*.py' './**/*.ipynb' From fd39c2b1d89ed9bb3cdc9660f70179047ea46e2e Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 3 Nov 2023 16:03:52 +0100 Subject: [PATCH 1812/1976] Bump pip from 21.1 to 23.3 in /utils/google_app_engine (#12316) Bumps [pip](https://github.com/pypa/pip) from 21.1 to 23.3. - [Changelog](https://github.com/pypa/pip/blob/main/NEWS.rst) - [Commits](https://github.com/pypa/pip/compare/21.1...23.3) --- updated-dependencies: - dependency-name: pip dependency-type: direct:production ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- utils/google_app_engine/additional_requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/google_app_engine/additional_requirements.txt b/utils/google_app_engine/additional_requirements.txt index a29141a633fd..c1a2af2c1145 100644 --- a/utils/google_app_engine/additional_requirements.txt +++ b/utils/google_app_engine/additional_requirements.txt @@ -1,5 +1,5 @@ # add these requirements in your app on top of the existing ones -pip==21.1 +pip==23.3 Flask==2.3.2 gunicorn==19.10.0 werkzeug>=3.0.1 # not directly required, pinned by Snyk to avoid a vulnerability From 6ca8f33796332f1b03018074412ca3cfe5cc9ba5 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 3 Nov 2023 18:59:36 +0100 Subject: [PATCH 1813/1976] [Snyk] Security upgrade ubuntu from 23.04 to mantic-20231011 (#12308) * fix: utils/docker/Dockerfile-cpu to reduce vulnerabilities The following vulnerabilities are fixed with an upgrade: - https://snyk.io/vuln/SNYK-UBUNTU2304-COREUTILS-5484945 - https://snyk.io/vuln/SNYK-UBUNTU2304-GLIBC-5919743 - https://snyk.io/vuln/SNYK-UBUNTU2304-GLIBC-5919743 - https://snyk.io/vuln/SNYK-UBUNTU2304-PROCPS-5816666 - https://snyk.io/vuln/SNYK-UBUNTU2304-XZUTILS-5854648 * Update apt install libgl1-mesa-glx to libgl1 --------- Co-authored-by: snyk-bot --- utils/docker/Dockerfile | 2 +- utils/docker/Dockerfile-arm64 | 2 +- utils/docker/Dockerfile-cpu | 4 ++-- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/utils/docker/Dockerfile b/utils/docker/Dockerfile index d1fc87f7c84c..f4727162065a 100644 --- a/utils/docker/Dockerfile +++ b/utils/docker/Dockerfile @@ -12,7 +12,7 @@ ADD https://ultralytics.com/assets/Arial.ttf https://ultralytics.com/assets/Aria ENV DEBIAN_FRONTEND noninteractive RUN apt update RUN TZ=Etc/UTC apt install -y tzdata -RUN apt install --no-install-recommends -y gcc git zip curl htop libgl1-mesa-glx libglib2.0-0 libpython3-dev gnupg +RUN apt install --no-install-recommends -y gcc git zip curl htop libgl1 libglib2.0-0 libpython3-dev gnupg # RUN alias python=python3 # Security updates diff --git a/utils/docker/Dockerfile-arm64 b/utils/docker/Dockerfile-arm64 index 802703e8d3f7..0de85bf8d609 100644 --- a/utils/docker/Dockerfile-arm64 +++ b/utils/docker/Dockerfile-arm64 @@ -12,7 +12,7 @@ ADD https://ultralytics.com/assets/Arial.ttf https://ultralytics.com/assets/Aria ENV DEBIAN_FRONTEND noninteractive RUN apt update RUN TZ=Etc/UTC apt install -y tzdata -RUN apt install --no-install-recommends -y python3-pip git zip curl htop gcc libgl1-mesa-glx libglib2.0-0 libpython3-dev +RUN apt install --no-install-recommends -y python3-pip git zip curl htop gcc libgl1 libglib2.0-0 libpython3-dev # RUN alias python=python3 # Install pip packages diff --git a/utils/docker/Dockerfile-cpu b/utils/docker/Dockerfile-cpu index e1696d0aebd8..c7ba1e321c89 100644 --- a/utils/docker/Dockerfile-cpu +++ b/utils/docker/Dockerfile-cpu @@ -3,7 +3,7 @@ # Image is CPU-optimized for ONNX, OpenVINO and PyTorch YOLOv5 deployments # Start FROM Ubuntu image https://hub.docker.com/_/ubuntu -FROM ubuntu:23.04 +FROM ubuntu:mantic-20231011 # Downloads to user config dir ADD https://ultralytics.com/assets/Arial.ttf https://ultralytics.com/assets/Arial.Unicode.ttf /root/.config/Ultralytics/ @@ -11,7 +11,7 @@ ADD https://ultralytics.com/assets/Arial.ttf https://ultralytics.com/assets/Aria # Install linux packages # g++ required to build 'tflite_support' and 'lap' packages, libusb-1.0-0 required for 'tflite_support' package RUN apt update \ - && apt install --no-install-recommends -y python3-pip git zip curl htop libgl1-mesa-glx libglib2.0-0 libpython3-dev gnupg g++ libusb-1.0-0 + && apt install --no-install-recommends -y python3-pip git zip curl htop libgl1 libglib2.0-0 libpython3-dev gnupg g++ libusb-1.0-0 # RUN alias python=python3 # Remove python3.11/EXTERNALLY-MANAGED or use 'pip install --break-system-packages' avoid 'externally-managed-environment' Ubuntu nightly error From 8566421831dfa55768b9102fad0a8e286b2ad5e1 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 4 Nov 2023 19:38:03 +0100 Subject: [PATCH 1814/1976] [Snyk] Security upgrade pillow from 9.5.0 to 10.0.1 (#12169) fix: requirements.txt to reduce vulnerabilities The following vulnerabilities are fixed by pinning transitive dependencies: - https://snyk.io/vuln/SNYK-PYTHON-PILLOW-5918878 Co-authored-by: snyk-bot --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 33bb7dba2611..63f4a9125415 100644 --- a/requirements.txt +++ b/requirements.txt @@ -6,7 +6,7 @@ gitpython>=3.1.30 matplotlib>=3.3 numpy>=1.22.2 opencv-python>=4.1.1 -Pillow>=7.1.2 +Pillow>=10.0.1 psutil # system resources PyYAML>=5.3.1 requests>=2.23.0 From 5fbddafef32adfd2c3aa6a264ffdc77221c5c254 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 4 Nov 2023 19:55:43 +0100 Subject: [PATCH 1815/1976] [Snyk] Security upgrade pillow from 9.5.0 to 10.0.0 (#12323) fix: requirements.txt to reduce vulnerabilities The following vulnerabilities are fixed by pinning transitive dependencies: - https://snyk.io/vuln/SNYK-PYTHON-PILLOW-6043904 Signed-off-by: Glenn Jocher Co-authored-by: snyk-bot --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 63f4a9125415..a61e8e26fdfe 100644 --- a/requirements.txt +++ b/requirements.txt @@ -6,7 +6,7 @@ gitpython>=3.1.30 matplotlib>=3.3 numpy>=1.22.2 opencv-python>=4.1.1 -Pillow>=10.0.1 +Pillow>=10.0.0 psutil # system resources PyYAML>=5.3.1 requests>=2.23.0 From d5d514e1f98a957dbdf62780c941d6b8b954d8a6 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 4 Nov 2023 20:33:33 +0100 Subject: [PATCH 1816/1976] [Snyk] Fix for 5 vulnerabilities (#12325) fix: requirements.txt to reduce vulnerabilities The following vulnerabilities are fixed by pinning transitive dependencies: - https://snyk.io/vuln/SNYK-PYTHON-NUMPY-2321964 - https://snyk.io/vuln/SNYK-PYTHON-NUMPY-2321966 - https://snyk.io/vuln/SNYK-PYTHON-NUMPY-2321970 - https://snyk.io/vuln/SNYK-PYTHON-PILLOW-5918878 - https://snyk.io/vuln/SNYK-PYTHON-PILLOW-6043904 Co-authored-by: snyk-bot --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index a61e8e26fdfe..63f4a9125415 100644 --- a/requirements.txt +++ b/requirements.txt @@ -6,7 +6,7 @@ gitpython>=3.1.30 matplotlib>=3.3 numpy>=1.22.2 opencv-python>=4.1.1 -Pillow>=10.0.0 +Pillow>=10.0.1 psutil # system resources PyYAML>=5.3.1 requests>=2.23.0 From b56b7245d2c25cda3c6d2bf2c5eeb25005443397 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 6 Nov 2023 13:56:24 +0100 Subject: [PATCH 1817/1976] [Snyk] Security upgrade pillow from 9.5.0 to 10.0.1 (#12327) fix: requirements.txt to reduce vulnerabilities The following vulnerabilities are fixed by pinning transitive dependencies: - https://snyk.io/vuln/SNYK-PYTHON-PILLOW-5918878 Co-authored-by: snyk-bot From 84ec8b586bd1c696f4bbf139e844a11d219f2711 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 6 Nov 2023 23:56:27 +0100 Subject: [PATCH 1818/1976] [pre-commit.ci] pre-commit suggestions (#12335) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/pre-commit/pre-commit-hooks: v4.4.0 → v4.5.0](https://github.com/pre-commit/pre-commit-hooks/compare/v4.4.0...v4.5.0) - [github.com/asottile/pyupgrade: v3.14.0 → v3.15.0](https://github.com/asottile/pyupgrade/compare/v3.14.0...v3.15.0) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 39ab266f70f3..ab272b2a1477 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -11,7 +11,7 @@ ci: repos: - repo: https://github.com/pre-commit/pre-commit-hooks - rev: v4.4.0 + rev: v4.5.0 hooks: - id: end-of-file-fixer - id: trailing-whitespace @@ -22,7 +22,7 @@ repos: - id: detect-private-key - repo: https://github.com/asottile/pyupgrade - rev: v3.14.0 + rev: v3.15.0 hooks: - id: pyupgrade name: Upgrade code From b6a65e1552b5e15614812e48a5614882fdd313ac Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 16 Nov 2023 21:13:59 +0100 Subject: [PATCH 1819/1976] [Snyk] Security upgrade pillow from 9.5.0 to 10.0.1 (#12334) fix: requirements.txt to reduce vulnerabilities The following vulnerabilities are fixed by pinning transitive dependencies: - https://snyk.io/vuln/SNYK-PYTHON-PILLOW-5918878 Co-authored-by: snyk-bot From b378d10f0c4efe247b73c21ba26d53b9b0d9797c Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 19 Nov 2023 00:53:01 +0100 Subject: [PATCH 1820/1976] =?UTF-8?q?Add=20Hindi=20=E0=A4=B9=E0=A4=BF?= =?UTF-8?q?=E0=A4=A8=E0=A5=8D=E0=A4=A6=E0=A5=80=20and=20Arabic=20=D8=A7?= =?UTF-8?q?=D9=84=D8=B9=D8=B1=D8=A8=D9=8A=D8=A9=20Docs=20translations=20(#?= =?UTF-8?q?12394)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Add Hindi हिन्दी and Arabic العربية Docs translations Signed-off-by: Glenn Jocher * Update README.md Signed-off-by: Glenn Jocher * Update README.zh-CN.md Signed-off-by: Glenn Jocher --------- Signed-off-by: Glenn Jocher --- README.md | 3 +-- README.zh-CN.md | 2 +- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index dc64fcd89f65..c778a17258e9 100644 --- a/README.md +++ b/README.md @@ -8,8 +8,7 @@ -->

-[English](README.md) | [简体中文](README.zh-CN.md) -
+[中文](https://docs.ultralytics.com/zh/) | [한국어](https://docs.ultralytics.com/ko/) | [日本語](https://docs.ultralytics.com/ja/) | [Русский](https://docs.ultralytics.com/ru/) | [Deutsch](https://docs.ultralytics.com/de/) | [Français](https://docs.ultralytics.com/fr/) | [Español](https://docs.ultralytics.com/es/) | [Português](https://docs.ultralytics.com/pt/) | [हिन्दी](https://docs.ultralytics.com/hi/) | [العربية](https://docs.ultralytics.com/ar/)
YOLOv5 CI diff --git a/README.zh-CN.md b/README.zh-CN.md index d9816c2d98ee..1bd91d7ecddc 100644 --- a/README.zh-CN.md +++ b/README.zh-CN.md @@ -8,7 +8,7 @@ -->

-[英文](README.md)|[简体中文](README.zh-CN.md)
+[中文](https://docs.ultralytics.com/zh/) | [한국어](https://docs.ultralytics.com/ko/) | [日本語](https://docs.ultralytics.com/ja/) | [Русский](https://docs.ultralytics.com/ru/) | [Deutsch](https://docs.ultralytics.com/de/) | [Français](https://docs.ultralytics.com/fr/) | [Español](https://docs.ultralytics.com/es/) | [Português](https://docs.ultralytics.com/pt/) | [हिन्दी](https://docs.ultralytics.com/hi/) | [العربية](https://docs.ultralytics.com/ar/)
YOLOv5 CI From 7c54e5d23bf91ab544e2cc102bda273590baa109 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 20 Nov 2023 01:58:21 +0100 Subject: [PATCH 1821/1976] Update dependabot.yml (#12396) Signed-off-by: Glenn Jocher --- .github/dependabot.yml | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/.github/dependabot.yml b/.github/dependabot.yml index c1b3d5d514c3..2d4ae31873b8 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -1,3 +1,7 @@ +# Ultralytics YOLO 🚀, AGPL-3.0 license +# Dependabot for package version updates +# https://docs.github.com/github/administering-a-repository/configuration-options-for-dependency-updates + version: 2 updates: - package-ecosystem: pip @@ -12,7 +16,7 @@ updates: - dependencies - package-ecosystem: github-actions - directory: "/" + directory: "/.github/workflows" schedule: interval: weekly time: "04:00" From 45147c0a70b7a32cbdf68f4bd07eefbd9eabe63c Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 20 Nov 2023 20:50:46 +0100 Subject: [PATCH 1822/1976] Update ci-testing.yml (#12404) * Update ci-testing.yml Signed-off-by: Glenn Jocher * Delete .github/workflows/translate-readme.yml Signed-off-by: Glenn Jocher --------- Signed-off-by: Glenn Jocher --- .github/workflows/ci-testing.yml | 17 ++++------------- .github/workflows/translate-readme.yml | 26 -------------------------- 2 files changed, 4 insertions(+), 39 deletions(-) delete mode 100644 .github/workflows/translate-readme.yml diff --git a/.github/workflows/ci-testing.yml b/.github/workflows/ci-testing.yml index b0e974aa2a2b..b80a3a31c0ae 100644 --- a/.github/workflows/ci-testing.yml +++ b/.github/workflows/ci-testing.yml @@ -18,7 +18,7 @@ jobs: fail-fast: false matrix: os: [ ubuntu-latest ] - python-version: [ '3.10' ] # requires python<=3.10 + python-version: [ '3.11' ] # requires python<=3.10 model: [ yolov5n ] steps: - uses: actions/checkout@v4 @@ -30,8 +30,7 @@ jobs: run: | python -m pip install --upgrade pip wheel pip install -r requirements.txt coremltools openvino-dev tensorflow-cpu --extra-index-url https://download.pytorch.org/whl/cpu - python --version - pip --version + yolo checks pip list - name: Benchmark DetectionModel run: | @@ -53,7 +52,7 @@ jobs: fail-fast: false matrix: os: [ ubuntu-latest, windows-latest ] # macos-latest bug https://github.com/ultralytics/yolov5/pull/9049 - python-version: [ '3.10' ] + python-version: [ '3.11' ] model: [ yolov5n ] include: - os: ubuntu-latest @@ -83,15 +82,7 @@ jobs: shell: bash # for Windows compatibility - name: Check environment run: | - python -c "import utils; utils.notebook_init()" - echo "RUNNER_OS is ${{ runner.os }}" - echo "GITHUB_EVENT_NAME is ${{ github.event_name }}" - echo "GITHUB_WORKFLOW is ${{ github.workflow }}" - echo "GITHUB_ACTOR is ${{ github.actor }}" - echo "GITHUB_REPOSITORY is ${{ github.repository }}" - echo "GITHUB_REPOSITORY_OWNER is ${{ github.repository_owner }}" - python --version - pip --version + yolo checks pip list - name: Test detection shell: bash # for Windows compatibility diff --git a/.github/workflows/translate-readme.yml b/.github/workflows/translate-readme.yml deleted file mode 100644 index bd12954c7fb5..000000000000 --- a/.github/workflows/translate-readme.yml +++ /dev/null @@ -1,26 +0,0 @@ -# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license -# README translation action to translate README.md to Chinese as README.zh-CN.md on any change to README.md - -name: Translate README - -on: - push: - branches: - - translate_readme # replace with 'master' to enable action - paths: - - README.md - -jobs: - Translate: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v4 - - name: Setup Node.js - uses: actions/setup-node@v4 - with: - node-version: 16 - # ISO Language Codes: https://cloud.google.com/translate/docs/languages - - name: Adding README - Chinese Simplified - uses: dephraiim/translate-readme@main - with: - LANG: zh-CN From 3d8f00455955d5759b8facb85930cecfc70f03b9 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 20 Nov 2023 20:51:50 +0100 Subject: [PATCH 1823/1976] Created using Colaboratory (#12405) * Created using Colaboratory * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- tutorial.ipynb | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/tutorial.ipynb b/tutorial.ipynb index d63117dbe0ed..1657c67965b0 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -24,12 +24,11 @@ " \n", " \n", "\n", + "[中文](https://docs.ultralytics.com/zh/) | [한국어](https://docs.ultralytics.com/ko/) | [日本語](https://docs.ultralytics.com/ja/) | [Русский](https://docs.ultralytics.com/ru/) | [Deutsch](https://docs.ultralytics.com/de/) | [Français](https://docs.ultralytics.com/fr/) | [Español](https://docs.ultralytics.com/es/) | [Português](https://docs.ultralytics.com/pt/) | [हिन्दी](https://docs.ultralytics.com/hi/) | [العربية](https://docs.ultralytics.com/ar/)\n", "\n", - "
\n", " \"Run\n", " \"Open\n", " \"Open\n", - "
\n", "\n", "This YOLOv5 🚀 notebook by Ultralytics presents simple train, validate and predict examples to help start your AI adventure.
We hope that the resources in this notebook will help you get the most out of YOLOv5. Please browse the YOLOv5 Docs for details, raise an issue on GitHub for support, and join our Discord community for questions and discussions!\n", "\n", From cc232e3e3557009bfaa0eb60fdda3787178e886d Mon Sep 17 00:00:00 2001 From: Troy Date: Thu, 23 Nov 2023 20:12:05 +0800 Subject: [PATCH 1824/1976] Increase NCCL timeout to 3 hours (#12345) * Increase NCCL timeout to 3 hours When training on a large dataset using DDP, the scanning process will be very long, and it will raise NCCL timeout error. Change the default timeout 30min to 3 hours, same as ultralytics yolov8 (https://github.com/ultralytics/ultralytics/pull/3343) Signed-off-by: Troy * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Signed-off-by: Troy Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Glenn Jocher --- train.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/train.py b/train.py index 004c8eeda121..4c3bec34835f 100644 --- a/train.py +++ b/train.py @@ -23,7 +23,7 @@ import sys import time from copy import deepcopy -from datetime import datetime +from datetime import datetime, timedelta from pathlib import Path try: @@ -529,7 +529,8 @@ def main(opt, callbacks=Callbacks()): assert torch.cuda.device_count() > LOCAL_RANK, 'insufficient CUDA devices for DDP command' torch.cuda.set_device(LOCAL_RANK) device = torch.device('cuda', LOCAL_RANK) - dist.init_process_group(backend='nccl' if dist.is_nccl_available() else 'gloo') + dist.init_process_group(backend='nccl' if dist.is_nccl_available() else 'gloo', + timeout=timedelta(seconds=10800)) # Train if not opt.evolve: From 3f02fdee1d8f1a6cf18a24be3438096466367d9f Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 23 Nov 2023 13:26:13 +0100 Subject: [PATCH 1825/1976] [Snyk] Security upgrade pillow from 9.5.0 to 10.0.1 (#12390) fix: requirements.txt to reduce vulnerabilities The following vulnerabilities are fixed by pinning transitive dependencies: - https://snyk.io/vuln/SNYK-PYTHON-PILLOW-5918878 Co-authored-by: snyk-bot From 0873e5fa33ace2c2332061dbe8de8fcbd2af4a9f Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 11 Dec 2023 13:45:00 +0100 Subject: [PATCH 1826/1976] [Snyk] Security upgrade ubuntu from mantic-20231011 to 23.10 (#12460) fix: utils/docker/Dockerfile-cpu to reduce vulnerabilities The following vulnerabilities are fixed with an upgrade: - https://snyk.io/vuln/SNYK-UBUNTU2310-GNUTLS28-6069047 - https://snyk.io/vuln/SNYK-UBUNTU2310-PERL-6085371 - https://snyk.io/vuln/SNYK-UBUNTU2310-PROCPS-5972730 - https://snyk.io/vuln/SNYK-UBUNTU2310-PROCPS-5972730 - https://snyk.io/vuln/SNYK-UBUNTU2310-TAR-6096092 Co-authored-by: snyk-bot --- utils/docker/Dockerfile-cpu | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/docker/Dockerfile-cpu b/utils/docker/Dockerfile-cpu index c7ba1e321c89..573ad3276e73 100644 --- a/utils/docker/Dockerfile-cpu +++ b/utils/docker/Dockerfile-cpu @@ -3,7 +3,7 @@ # Image is CPU-optimized for ONNX, OpenVINO and PyTorch YOLOv5 deployments # Start FROM Ubuntu image https://hub.docker.com/_/ubuntu -FROM ubuntu:mantic-20231011 +FROM ubuntu:23.10 # Downloads to user config dir ADD https://ultralytics.com/assets/Arial.ttf https://ultralytics.com/assets/Arial.Unicode.ttf /root/.config/Ultralytics/ From f400bba7836c1dbc2771db251984f20009b5fa81 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 11 Dec 2023 13:47:08 +0100 Subject: [PATCH 1827/1976] Bump actions/setup-python from 4 to 5 in /.github/workflows (#12493) * Bump actions/setup-python from 4 to 5 in /.github/workflows Bumps [actions/setup-python](https://github.com/actions/setup-python) from 4 to 5. - [Release notes](https://github.com/actions/setup-python/releases) - [Commits](https://github.com/actions/setup-python/compare/v4...v5) --- updated-dependencies: - dependency-name: actions/setup-python dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] * Bump actions/stale from 8 to 9 in /.github/workflows (#12492) Bumps [actions/stale](https://github.com/actions/stale) from 8 to 9. - [Release notes](https://github.com/actions/stale/releases) - [Changelog](https://github.com/actions/stale/blob/main/CHANGELOG.md) - [Commits](https://github.com/actions/stale/compare/v8...v9) --- updated-dependencies: - dependency-name: actions/stale dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Glenn Jocher --- .github/workflows/ci-testing.yml | 4 ++-- .github/workflows/stale.yml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/ci-testing.yml b/.github/workflows/ci-testing.yml index b80a3a31c0ae..aed7530e36d4 100644 --- a/.github/workflows/ci-testing.yml +++ b/.github/workflows/ci-testing.yml @@ -22,7 +22,7 @@ jobs: model: [ yolov5n ] steps: - uses: actions/checkout@v4 - - uses: actions/setup-python@v4 + - uses: actions/setup-python@v5 with: python-version: ${{ matrix.python-version }} cache: 'pip' # caching pip dependencies @@ -67,7 +67,7 @@ jobs: torch: '1.8.0' # min torch version CI https://pypi.org/project/torchvision/ steps: - uses: actions/checkout@v4 - - uses: actions/setup-python@v4 + - uses: actions/setup-python@v5 with: python-version: ${{ matrix.python-version }} cache: 'pip' # caching pip dependencies diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml index 65c8f70798f1..e78f82b31dce 100644 --- a/.github/workflows/stale.yml +++ b/.github/workflows/stale.yml @@ -9,7 +9,7 @@ jobs: stale: runs-on: ubuntu-latest steps: - - uses: actions/stale@v8 + - uses: actions/stale@v9 with: repo-token: ${{ secrets.GITHUB_TOKEN }} From a04b49f1aa2aa25b246b6bd1d9e909924ee8019a Mon Sep 17 00:00:00 2001 From: Angelo Delli Santi Date: Sat, 16 Dec 2023 23:31:22 +0100 Subject: [PATCH 1828/1976] Limit tensorflow version and add checks (#12494) * Limit tensorflow version and add checks * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Moving check in export script * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- export.py | 5 +++++ requirements.txt | 2 +- 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/export.py b/export.py index 71e4eb94d1c4..5611ab95b1dc 100644 --- a/export.py +++ b/export.py @@ -397,6 +397,11 @@ def export_saved_model(model, from models.tf import TFModel LOGGER.info(f'\n{prefix} starting export with tensorflow {tf.__version__}...') + if tf.__version__ > '2.13.1': + helper_url = 'https://github.com/ultralytics/yolov5/issues/12489' + LOGGER.info( + f'WARNING ⚠️ using Tensorflow {tf.__version__} > 2.13.1 might cause issue when exporting the model to tflite {helper_url}' + ) # handling issue https://github.com/ultralytics/yolov5/issues/12489 f = str(file).replace('.pt', '_saved_model') batch_size, ch, *imgsz = list(im.shape) # BCHW diff --git a/requirements.txt b/requirements.txt index 63f4a9125415..115055b1629c 100644 --- a/requirements.txt +++ b/requirements.txt @@ -34,7 +34,7 @@ seaborn>=0.11.0 # nvidia-pyindex # TensorRT export # nvidia-tensorrt # TensorRT export # scikit-learn<=1.1.2 # CoreML quantization -# tensorflow>=2.4.0 # TF exports (-cpu, -aarch64, -macos) +# tensorflow>=2.4.0,<=2.13.1 # TF exports (-cpu, -aarch64, -macos) # tensorflowjs>=3.9.0 # TF.js export # openvino-dev>=2023.0 # OpenVINO export From dafe39b07520d0ebb23b8335f1d8c23383792288 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 18 Dec 2023 16:30:47 +0100 Subject: [PATCH 1829/1976] Bump github/codeql-action from 2 to 3 in /.github/workflows (#12521) Bumps [github/codeql-action](https://github.com/github/codeql-action) from 2 to 3. - [Release notes](https://github.com/github/codeql-action/releases) - [Changelog](https://github.com/github/codeql-action/blob/main/CHANGELOG.md) - [Commits](https://github.com/github/codeql-action/compare/v2...v3) --- updated-dependencies: - dependency-name: github/codeql-action dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/codeql-analysis.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index 53af304ee992..11db0dceca3b 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -27,7 +27,7 @@ jobs: # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@v2 + uses: github/codeql-action/init@v3 with: languages: ${{ matrix.language }} # If you wish to specify custom queries, you can do so here or in a config file. @@ -38,7 +38,7 @@ jobs: # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). # If this step fails, then you should remove it and run the build manually (see below) - name: Autobuild - uses: github/codeql-action/autobuild@v2 + uses: github/codeql-action/autobuild@v3 # ℹ️ Command-line programs to run using the OS shell. # 📚 https://git.io/JvXDl @@ -52,4 +52,4 @@ jobs: # make release - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@v2 + uses: github/codeql-action/analyze@v3 From f33d42df74cda293015975d3686cb8e62d15a957 Mon Sep 17 00:00:00 2001 From: Angelo Delli Santi Date: Mon, 18 Dec 2023 17:14:05 +0100 Subject: [PATCH 1830/1976] Parametrize multiple of number of channels in Conv (#12508) * Parametrize multiple of number of channels in Conv * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fix issue when exporting Signed-off-by: Angelo Delli Santi * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Signed-off-by: Angelo Delli Santi Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Glenn Jocher --- models/tf.py | 9 ++++++--- models/yolo.py | 9 ++++++--- 2 files changed, 12 insertions(+), 6 deletions(-) diff --git a/models/tf.py b/models/tf.py index 62ba3ebf0782..17cca1e54fcf 100644 --- a/models/tf.py +++ b/models/tf.py @@ -380,9 +380,12 @@ def call(self, inputs): def parse_model(d, ch, model, imgsz): # model_dict, input_channels(3) LOGGER.info(f"\n{'':>3}{'from':>18}{'n':>3}{'params':>10} {'module':<40}{'arguments':<30}") - anchors, nc, gd, gw = d['anchors'], d['nc'], d['depth_multiple'], d['width_multiple'] + anchors, nc, gd, gw, ch_mul = d['anchors'], d['nc'], d['depth_multiple'], d['width_multiple'], d.get( + 'channel_multiple') na = (len(anchors[0]) // 2) if isinstance(anchors, list) else anchors # number of anchors no = na * (nc + 5) # number of outputs = anchors * (classes + 5) + if not ch_mul: + ch_mul = 8 layers, save, c2 = [], [], ch[-1] # layers, savelist, ch out for i, (f, n, m, args) in enumerate(d['backbone'] + d['head']): # from, number, module, args @@ -399,7 +402,7 @@ def parse_model(d, ch, model, imgsz): # model_dict, input_channels(3) nn.Conv2d, Conv, DWConv, DWConvTranspose2d, Bottleneck, SPP, SPPF, MixConv2d, Focus, CrossConv, BottleneckCSP, C3, C3x]: c1, c2 = ch[f], args[0] - c2 = make_divisible(c2 * gw, 8) if c2 != no else c2 + c2 = make_divisible(c2 * gw, ch_mul) if c2 != no else c2 args = [c1, c2, *args[1:]] if m in [BottleneckCSP, C3, C3x]: @@ -414,7 +417,7 @@ def parse_model(d, ch, model, imgsz): # model_dict, input_channels(3) if isinstance(args[1], int): # number of anchors args[1] = [list(range(args[1] * 2))] * len(f) if m is Segment: - args[3] = make_divisible(args[3] * gw, 8) + args[3] = make_divisible(args[3] * gw, ch_mul) args.append(imgsz) else: c2 = ch[f] diff --git a/models/yolo.py b/models/yolo.py index 4f4d567bec73..9cd3cddc25c3 100644 --- a/models/yolo.py +++ b/models/yolo.py @@ -299,10 +299,13 @@ def _from_yaml(self, cfg): def parse_model(d, ch): # model_dict, input_channels(3) # Parse a YOLOv5 model.yaml dictionary LOGGER.info(f"\n{'':>3}{'from':>18}{'n':>3}{'params':>10} {'module':<40}{'arguments':<30}") - anchors, nc, gd, gw, act = d['anchors'], d['nc'], d['depth_multiple'], d['width_multiple'], d.get('activation') + anchors, nc, gd, gw, act, ch_mul = d['anchors'], d['nc'], d['depth_multiple'], d['width_multiple'], d.get( + 'activation'), d.get('channel_multiple') if act: Conv.default_act = eval(act) # redefine default activation, i.e. Conv.default_act = nn.SiLU() LOGGER.info(f"{colorstr('activation:')} {act}") # print + if not ch_mul: + ch_mul = 8 na = (len(anchors[0]) // 2) if isinstance(anchors, list) else anchors # number of anchors no = na * (nc + 5) # number of outputs = anchors * (classes + 5) @@ -319,7 +322,7 @@ def parse_model(d, ch): # model_dict, input_channels(3) BottleneckCSP, C3, C3TR, C3SPP, C3Ghost, nn.ConvTranspose2d, DWConvTranspose2d, C3x}: c1, c2 = ch[f], args[0] if c2 != no: # if not output - c2 = make_divisible(c2 * gw, 8) + c2 = make_divisible(c2 * gw, ch_mul) args = [c1, c2, *args[1:]] if m in {BottleneckCSP, C3, C3TR, C3Ghost, C3x}: @@ -335,7 +338,7 @@ def parse_model(d, ch): # model_dict, input_channels(3) if isinstance(args[1], int): # number of anchors args[1] = [list(range(args[1] * 2))] * len(f) if m is Segment: - args[3] = make_divisible(args[3] * gw, 8) + args[3] = make_divisible(args[3] * gw, ch_mul) elif m is Contract: c2 = ch[f] * args[0] ** 2 elif m is Expand: From 63555c8e2230328585d09fdc50a6601822a70ded Mon Sep 17 00:00:00 2001 From: Angelo Delli Santi Date: Mon, 18 Dec 2023 17:14:59 +0100 Subject: [PATCH 1831/1976] Add option to quantize per-tensor (#12516) * Add option to quantize per-tensor * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Glenn Jocher --- export.py | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/export.py b/export.py index 5611ab95b1dc..d550a85fd99f 100644 --- a/export.py +++ b/export.py @@ -448,7 +448,8 @@ def export_pb(keras_model, file, prefix=colorstr('TensorFlow GraphDef:')): @try_export -def export_tflite(keras_model, im, file, int8, data, nms, agnostic_nms, prefix=colorstr('TensorFlow Lite:')): +def export_tflite(keras_model, im, file, int8, per_tensor, data, nms, agnostic_nms, + prefix=colorstr('TensorFlow Lite:')): # YOLOv5 TensorFlow Lite export import tensorflow as tf @@ -469,6 +470,8 @@ def export_tflite(keras_model, im, file, int8, data, nms, agnostic_nms, prefix=c converter.inference_input_type = tf.uint8 # or tf.int8 converter.inference_output_type = tf.uint8 # or tf.int8 converter.experimental_new_quantizer = True + if per_tensor: + converter._experimental_disable_per_channel = True f = str(file).replace('.pt', '-int8.tflite') if nms or agnostic_nms: converter.target_spec.supported_ops.append(tf.lite.OpsSet.SELECT_TF_OPS) @@ -713,6 +716,7 @@ def run( keras=False, # use Keras optimize=False, # TorchScript: optimize for mobile int8=False, # CoreML/TF INT8 quantization + per_tensor=False, # TF per tensor quantization dynamic=False, # ONNX/TF/TensorRT: dynamic axes simplify=False, # ONNX: simplify model opset=12, # ONNX: opset version @@ -798,7 +802,14 @@ def run( if pb or tfjs: # pb prerequisite to tfjs f[6], _ = export_pb(s_model, file) if tflite or edgetpu: - f[7], _ = export_tflite(s_model, im, file, int8 or edgetpu, data=data, nms=nms, agnostic_nms=agnostic_nms) + f[7], _ = export_tflite(s_model, + im, + file, + int8 or edgetpu, + per_tensor, + data=data, + nms=nms, + agnostic_nms=agnostic_nms) if edgetpu: f[8], _ = export_edgetpu(file) add_tflite_metadata(f[8] or f[7], metadata, num_outputs=len(s_model.outputs)) @@ -837,6 +848,7 @@ def parse_opt(known=False): parser.add_argument('--keras', action='store_true', help='TF: use Keras') parser.add_argument('--optimize', action='store_true', help='TorchScript: optimize for mobile') parser.add_argument('--int8', action='store_true', help='CoreML/TF/OpenVINO INT8 quantization') + parser.add_argument('--per-tensor', action='store_true', help='TF per-tensor quantization') parser.add_argument('--dynamic', action='store_true', help='ONNX/TF/TensorRT: dynamic axes') parser.add_argument('--simplify', action='store_true', help='ONNX: simplify model') parser.add_argument('--opset', type=int, default=17, help='ONNX: opset version') From ba63208025fb27df31f4f02265631f72bbbba6a5 Mon Sep 17 00:00:00 2001 From: Ryan <35791309+Gary55555@users.noreply.github.com> Date: Wed, 27 Dec 2023 06:58:32 +0800 Subject: [PATCH 1832/1976] Create `labels` dir on labels save (#12551) * Update val.py When saving predicted labels, create a folder named labels. Signed-off-by: Ryan <35791309+Gary55555@users.noreply.github.com> * Update val.py Signed-off-by: Glenn Jocher * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Signed-off-by: Ryan <35791309+Gary55555@users.noreply.github.com> Signed-off-by: Glenn Jocher Co-authored-by: Glenn Jocher Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- val.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/val.py b/val.py index 8da3ef7667aa..b3d05f4305ce 100644 --- a/val.py +++ b/val.py @@ -72,7 +72,8 @@ def save_one_json(predn, jdict, path, class_map): def process_batch(detections, labels, iouv): """ - Return correct prediction matrix + Return correct prediction matrix. + Arguments: detections (array[N, 6]), x1, y1, x2, y2, conf, class labels (array[M, 5]), class, x1, y1, x2, y2 @@ -258,6 +259,7 @@ def run( # Save/log if save_txt: + (save_dir / 'labels').mkdir(parents=True, exist_ok=True) save_one_txt(predn, save_conf, shape, file=save_dir / 'labels' / f'{path.stem}.txt') if save_json: save_one_json(predn, jdict, path, class_map) # append to COCO-JSON dictionary From 6970d88b8ace427de4ca1ab3a841097ccec6d234 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 1 Jan 2024 15:50:40 +0100 Subject: [PATCH 1833/1976] Update requirements.txt to `ultralytics 8.0.232` (#12565) Signed-off-by: Glenn Jocher --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 115055b1629c..eb5038f981c2 100644 --- a/requirements.txt +++ b/requirements.txt @@ -15,7 +15,7 @@ thop>=0.1.1 # FLOPs computation torch>=1.8.0 # see https://pytorch.org/get-started/locally (recommended) torchvision>=0.9.0 tqdm>=4.64.0 -ultralytics>=8.0.147 +ultralytics>=8.0.232 # protobuf<=3.20.1 # https://github.com/ultralytics/yolov5/issues/8012 # Logging --------------------------------------------------------------------- From 43c43d824486fa72d643400d43c960dc2e35b616 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 1 Jan 2024 18:54:18 +0100 Subject: [PATCH 1834/1976] Delete .github/PULL_REQUEST_TEMPLATE.md (#12564) Signed-off-by: Glenn Jocher --- .github/PULL_REQUEST_TEMPLATE.md | 13 ------------- 1 file changed, 13 deletions(-) delete mode 100644 .github/PULL_REQUEST_TEMPLATE.md diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md deleted file mode 100644 index d96d5afd2836..000000000000 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ /dev/null @@ -1,13 +0,0 @@ - - -copilot:all From 52db52b927d8050f25eeaec162ac715c42a6da9a Mon Sep 17 00:00:00 2001 From: Aarni Koskela Date: Wed, 3 Jan 2024 09:06:26 +0200 Subject: [PATCH 1835/1976] Add ndjson logging for training (#10970) * Add ndjson logging for training This adds support for NDJSON (newline-delimited JSON) metrics logging, for both console (stdout) output and a file (like the current CSV file). NDJSON can be easily grepped from the output and/or parsed with e.g. `jq`. The feature is enabled with the `--ndjson-console` and `--ndjson-file` switches to `train.py`. * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- train.py | 21 +++++++++++++++++++-- utils/loggers/__init__.py | 24 +++++++++++++++++++++++- 2 files changed, 42 insertions(+), 3 deletions(-) diff --git a/train.py b/train.py index 4c3bec34835f..ad37f2ca858d 100644 --- a/train.py +++ b/train.py @@ -58,7 +58,7 @@ get_latest_run, increment_path, init_seeds, intersect_dicts, labels_to_class_weights, labels_to_image_weights, methods, one_cycle, print_args, print_mutation, strip_optimizer, yaml_save) -from utils.loggers import Loggers +from utils.loggers import LOGGERS, Loggers from utils.loggers.comet.comet_utils import check_comet_resume from utils.loss import ComputeLoss from utils.metrics import fitness @@ -98,7 +98,20 @@ def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictio # Loggers data_dict = None if RANK in {-1, 0}: - loggers = Loggers(save_dir, weights, opt, hyp, LOGGER) # loggers instance + include_loggers = list(LOGGERS) + if getattr(opt, 'ndjson_console', False): + include_loggers.append('ndjson_console') + if getattr(opt, 'ndjson_file', False): + include_loggers.append('ndjson_file') + + loggers = Loggers( + save_dir=save_dir, + weights=weights, + opt=opt, + hyp=hyp, + logger=LOGGER, + include=tuple(include_loggers), + ) # Register actions for k in methods(loggers): @@ -482,6 +495,10 @@ def parse_opt(known=False): parser.add_argument('--bbox_interval', type=int, default=-1, help='Set bounding-box image logging interval') parser.add_argument('--artifact_alias', type=str, default='latest', help='Version of dataset artifact to use') + # NDJSON logging + parser.add_argument('--ndjson-console', action='store_true', help='Log ndjson to console') + parser.add_argument('--ndjson-file', action='store_true', help='Log ndjson to file') + return parser.parse_known_args()[0] if known else parser.parse_args() diff --git a/utils/loggers/__init__.py b/utils/loggers/__init__.py index ba7d2790e613..aa6eae4c6f8c 100644 --- a/utils/loggers/__init__.py +++ b/utils/loggers/__init__.py @@ -2,7 +2,7 @@ """ Logging utils """ - +import json import os import warnings from pathlib import Path @@ -58,6 +58,18 @@ comet_ml = None +def _json_default(value): + """Format `value` for JSON serialization (e.g. unwrap tensors). Fall back to strings.""" + if isinstance(value, torch.Tensor): + try: + value = value.item() + except ValueError: # "only one element tensors can be converted to Python scalars" + pass + if isinstance(value, float): + return value + return str(value) + + class Loggers(): # YOLOv5 Loggers class def __init__(self, save_dir=None, weights=None, opt=None, hyp=None, logger=None, include=LOGGERS): @@ -86,6 +98,8 @@ def __init__(self, save_dir=None, weights=None, opt=None, hyp=None, logger=None, for k in LOGGERS: setattr(self, k, None) # init empty logger dictionary self.csv = True # always log to csv + self.ndjson_console = ('ndjson_console' in self.include) # log ndjson to console + self.ndjson_file = ('ndjson_file' in self.include) # log ndjson to file # Messages if not comet_ml: @@ -228,6 +242,14 @@ def on_fit_epoch_end(self, vals, epoch, best_fitness, fi): s = '' if file.exists() else (('%20s,' * n % tuple(['epoch'] + self.keys)).rstrip(',') + '\n') # add header with open(file, 'a') as f: f.write(s + ('%20.5g,' * n % tuple([epoch] + vals)).rstrip(',') + '\n') + if self.ndjson_console or self.ndjson_file: + json_data = json.dumps(dict(epoch=epoch, **x), default=_json_default) + if self.ndjson_console: + print(json_data) + if self.ndjson_file: + file = self.save_dir / 'results.ndjson' + with open(file, 'a') as f: + print(json_data, file=f) if self.tb: for k, v in x.items(): From bd1a8295fb197082f82fdd41a353ad82c71505ef Mon Sep 17 00:00:00 2001 From: tiancailin Date: Wed, 3 Jan 2024 15:07:19 +0800 Subject: [PATCH 1836/1976] Fix bug in issue #12457. (#12459) * Fix bug in #12457. When run 'python.exe segment/predict.py --visualize' will throw AttributeError: 'tuple' object has no attribute 'shape' * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Glenn Jocher --- utils/plots.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/utils/plots.py b/utils/plots.py index db6f94a6674d..5901ca2dbfaa 100644 --- a/utils/plots.py +++ b/utils/plots.py @@ -59,7 +59,9 @@ def feature_visualization(x, module_type, stage, n=32, save_dir=Path('runs/detec n: Maximum number of feature maps to plot save_dir: Directory to save results """ - if 'Detect' not in module_type: + if ('Detect' + not in module_type) and ('Segment' + not in module_type): # 'Detect' for Object Detect task,'Segment' for Segment task batch, channels, height, width = x.shape # batch, channels, height, width if height > 1 and width > 1: f = save_dir / f"stage{stage}_{module_type.split('.')[-1]}_features.png" # filename From 69b0faf2f7d43b32833ec4b46b2faf50b5b7ceed Mon Sep 17 00:00:00 2001 From: Yakuho Date: Wed, 3 Jan 2024 15:09:23 +0800 Subject: [PATCH 1837/1976] Update selectable device Profile (#12353) * Update selectable device Profile * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- classify/predict.py | 2 +- classify/val.py | 2 +- detect.py | 2 +- segment/predict.py | 2 +- segment/val.py | 2 +- utils/general.py | 7 ++++--- val.py | 2 +- 7 files changed, 10 insertions(+), 9 deletions(-) diff --git a/classify/predict.py b/classify/predict.py index 653c374f768f..b056a0cd707b 100644 --- a/classify/predict.py +++ b/classify/predict.py @@ -106,7 +106,7 @@ def run( # Run inference model.warmup(imgsz=(1 if pt else bs, 3, *imgsz)) # warmup - seen, windows, dt = 0, [], (Profile(), Profile(), Profile()) + seen, windows, dt = 0, [], (Profile(device=device), Profile(device=device), Profile(device=device)) for path, im, im0s, vid_cap, s in dataset: with dt[0]: im = torch.Tensor(im).to(model.device) diff --git a/classify/val.py b/classify/val.py index 4b92e9f105db..6814c4d780e1 100644 --- a/classify/val.py +++ b/classify/val.py @@ -97,7 +97,7 @@ def run( workers=workers) model.eval() - pred, targets, loss, dt = [], [], 0, (Profile(), Profile(), Profile()) + pred, targets, loss, dt = [], [], 0, (Profile(device=device), Profile(device=device), Profile(device=device)) n = len(dataloader) # number of batches action = 'validating' if dataloader.dataset.root.stem == 'val' else 'testing' desc = f'{pbar.desc[:-36]}{action:>36}' if pbar else f'{action}' diff --git a/detect.py b/detect.py index fd9637138dd6..1ea4e0b60dd7 100644 --- a/detect.py +++ b/detect.py @@ -116,7 +116,7 @@ def run( # Run inference model.warmup(imgsz=(1 if pt or model.triton else bs, 3, *imgsz)) # warmup - seen, windows, dt = 0, [], (Profile(), Profile(), Profile()) + seen, windows, dt = 0, [], (Profile(device=device), Profile(device=device), Profile(device=device)) for path, im, im0s, vid_cap, s in dataset: with dt[0]: im = torch.from_numpy(im).to(model.device) diff --git a/segment/predict.py b/segment/predict.py index 113bc472e637..8e3d97dfeb92 100644 --- a/segment/predict.py +++ b/segment/predict.py @@ -117,7 +117,7 @@ def run( # Run inference model.warmup(imgsz=(1 if pt else bs, 3, *imgsz)) # warmup - seen, windows, dt = 0, [], (Profile(), Profile(), Profile()) + seen, windows, dt = 0, [], (Profile(device=device), Profile(device=device), Profile(device=device)) for path, im, im0s, vid_cap, s in dataset: with dt[0]: im = torch.from_numpy(im).to(model.device) diff --git a/segment/val.py b/segment/val.py index dc8081840e37..304d0c751314 100644 --- a/segment/val.py +++ b/segment/val.py @@ -233,7 +233,7 @@ def run( class_map = coco80_to_coco91_class() if is_coco else list(range(1000)) s = ('%22s' + '%11s' * 10) % ('Class', 'Images', 'Instances', 'Box(P', 'R', 'mAP50', 'mAP50-95)', 'Mask(P', 'R', 'mAP50', 'mAP50-95)') - dt = Profile(), Profile(), Profile() + dt = Profile(device=device), Profile(device=device), Profile(device=device) metrics = Metrics() loss = torch.zeros(4, device=device) jdict, stats = [], [] diff --git a/utils/general.py b/utils/general.py index 135141e21436..73925ce5fb95 100644 --- a/utils/general.py +++ b/utils/general.py @@ -182,9 +182,10 @@ def user_config_dir(dir='Ultralytics', env_var='YOLOV5_CONFIG_DIR'): class Profile(contextlib.ContextDecorator): # YOLOv5 Profile class. Usage: @Profile() decorator or 'with Profile():' context manager - def __init__(self, t=0.0): + def __init__(self, t=0.0, device: torch.device = None): self.t = t - self.cuda = torch.cuda.is_available() + self.device = device + self.cuda = True if (device and str(device)[:4] == 'cuda') else False def __enter__(self): self.start = self.time() @@ -196,7 +197,7 @@ def __exit__(self, type, value, traceback): def time(self): if self.cuda: - torch.cuda.synchronize() + torch.cuda.synchronize(self.device) return time.time() diff --git a/val.py b/val.py index b3d05f4305ce..1a4219c38962 100644 --- a/val.py +++ b/val.py @@ -191,7 +191,7 @@ def run( class_map = coco80_to_coco91_class() if is_coco else list(range(1000)) s = ('%22s' + '%11s' * 6) % ('Class', 'Images', 'Instances', 'P', 'R', 'mAP50', 'mAP50-95') tp, fp, p, r, f1, mp, mr, map50, ap50, map = 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 - dt = Profile(), Profile(), Profile() # profiling times + dt = Profile(device=device), Profile(device=device), Profile(device=device) # profiling times loss = torch.zeros(3, device=device) jdict, stats, ap, ap_class = [], [], [], [] callbacks.run('on_val_start') From c0b0729386b9c6fca072b507599499570e13177a Mon Sep 17 00:00:00 2001 From: chaoqin <33347064+XevenQC@users.noreply.github.com> Date: Wed, 3 Jan 2024 15:10:10 +0800 Subject: [PATCH 1838/1976] Fix the bug that multi webcam detection failed with OpenVINO (#11935) * Fix the bug that multi webcam detection failed with OpenVINO It would failed with the following error when detect multi webcam. "Input blob size is not equal network input size (2457600!=1228800)" Signed-off-by: Chao Qin * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Signed-off-by: Chao Qin Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- detect.py | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/detect.py b/detect.py index 1ea4e0b60dd7..03bc29de999e 100644 --- a/detect.py +++ b/detect.py @@ -124,12 +124,22 @@ def run( im /= 255 # 0 - 255 to 0.0 - 1.0 if len(im.shape) == 3: im = im[None] # expand for batch dim + if model.xml and im.shape[0] > 1: + ims = torch.chunk(im, im.shape[0], 0) # Inference with dt[1]: visualize = increment_path(save_dir / Path(path).stem, mkdir=True) if visualize else False - pred = model(im, augment=augment, visualize=visualize) - + if model.xml and im.shape[0] > 1: + pred = None + for image in ims: + if pred is None: + pred = model(image, augment=augment, visualize=visualize).unsqueeze(0) + else: + pred = torch.cat((pred, model(image, augment=augment, visualize=visualize).unsqueeze(0)), dim=0) + pred = [pred, None] + else: + pred = model(im, augment=augment, visualize=visualize) # NMS with dt[2]: pred = non_max_suppression(pred, conf_thres, iou_thres, classes, agnostic_nms, max_det=max_det) From b61143c7e55c6968288e6b034e14051fed871534 Mon Sep 17 00:00:00 2001 From: Nouran Ali Date: Wed, 3 Jan 2024 09:10:41 +0200 Subject: [PATCH 1839/1976] solves #11028 : added imagenet small versions 10,100 and 1000 (#12141) * added imagenet small versions 10,100 and 1000 * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- data/ImageNet10.yaml | 32 + data/ImageNet100.yaml | 120 ++++ data/ImageNet1000.yaml | 1022 ++++++++++++++++++++++++++++++ data/scripts/get_imagenet10.sh | 29 + data/scripts/get_imagenet100.sh | 29 + data/scripts/get_imagenet1000.sh | 29 + 6 files changed, 1261 insertions(+) create mode 100644 data/ImageNet10.yaml create mode 100644 data/ImageNet100.yaml create mode 100644 data/ImageNet1000.yaml create mode 100644 data/scripts/get_imagenet10.sh create mode 100644 data/scripts/get_imagenet100.sh create mode 100644 data/scripts/get_imagenet1000.sh diff --git a/data/ImageNet10.yaml b/data/ImageNet10.yaml new file mode 100644 index 000000000000..a498d43968ef --- /dev/null +++ b/data/ImageNet10.yaml @@ -0,0 +1,32 @@ +# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license +# ImageNet-1k dataset https://www.image-net.org/index.php by Stanford University +# Simplified class names from https://github.com/anishathalye/imagenet-simple-labels +# Example usage: python classify/train.py --data imagenet +# parent +# ├── yolov5 +# └── datasets +# └── imagenet10 ← downloads here + + +# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..] +path: ../datasets/imagenet10 # dataset root dir +train: train # train images (relative to 'path') 1281167 images +val: val # val images (relative to 'path') 50000 images +test: # test images (optional) + +# Classes +names: + 0: tench + 1: goldfish + 2: great white shark + 3: tiger shark + 4: hammerhead shark + 5: electric ray + 6: stingray + 7: cock + 8: hen + 9: ostrich + + +# Download script/URL (optional) +download: data/scripts/get_imagenet10.sh diff --git a/data/ImageNet100.yaml b/data/ImageNet100.yaml new file mode 100644 index 000000000000..2e6e44ec3e65 --- /dev/null +++ b/data/ImageNet100.yaml @@ -0,0 +1,120 @@ +# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license +# ImageNet-1k dataset https://www.image-net.org/index.php by Stanford University +# Simplified class names from https://github.com/anishathalye/imagenet-simple-labels +# Example usage: python classify/train.py --data imagenet +# parent +# ├── yolov5 +# └── datasets +# └── imagenet100 ← downloads here + + +# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..] +path: ../datasets/imagenet100 # dataset root dir +train: train # train images (relative to 'path') 1281167 images +val: val # val images (relative to 'path') 50000 images +test: # test images (optional) + +# Classes +names: + 0: tench + 1: goldfish + 2: great white shark + 3: tiger shark + 4: hammerhead shark + 5: electric ray + 6: stingray + 7: cock + 8: hen + 9: ostrich + 10: brambling + 11: goldfinch + 12: house finch + 13: junco + 14: indigo bunting + 15: American robin + 16: bulbul + 17: jay + 18: magpie + 19: chickadee + 20: American dipper + 21: kite + 22: bald eagle + 23: vulture + 24: great grey owl + 25: fire salamander + 26: smooth newt + 27: newt + 28: spotted salamander + 29: axolotl + 30: American bullfrog + 31: tree frog + 32: tailed frog + 33: loggerhead sea turtle + 34: leatherback sea turtle + 35: mud turtle + 36: terrapin + 37: box turtle + 38: banded gecko + 39: green iguana + 40: Carolina anole + 41: desert grassland whiptail lizard + 42: agama + 43: frilled-necked lizard + 44: alligator lizard + 45: Gila monster + 46: European green lizard + 47: chameleon + 48: Komodo dragon + 49: Nile crocodile + 50: American alligator + 51: triceratops + 52: worm snake + 53: ring-necked snake + 54: eastern hog-nosed snake + 55: smooth green snake + 56: kingsnake + 57: garter snake + 58: water snake + 59: vine snake + 60: night snake + 61: boa constrictor + 62: African rock python + 63: Indian cobra + 64: green mamba + 65: sea snake + 66: Saharan horned viper + 67: eastern diamondback rattlesnake + 68: sidewinder + 69: trilobite + 70: harvestman + 71: scorpion + 72: yellow garden spider + 73: barn spider + 74: European garden spider + 75: southern black widow + 76: tarantula + 77: wolf spider + 78: tick + 79: centipede + 80: black grouse + 81: ptarmigan + 82: ruffed grouse + 83: prairie grouse + 84: peacock + 85: quail + 86: partridge + 87: grey parrot + 88: macaw + 89: sulphur-crested cockatoo + 90: lorikeet + 91: coucal + 92: bee eater + 93: hornbill + 94: hummingbird + 95: jacamar + 96: toucan + 97: duck + 98: red-breasted merganser + 99: goose +# Download script/URL (optional) +download: data/scripts/get_imagenet100.sh diff --git a/data/ImageNet1000.yaml b/data/ImageNet1000.yaml new file mode 100644 index 000000000000..410bdbcafc83 --- /dev/null +++ b/data/ImageNet1000.yaml @@ -0,0 +1,1022 @@ +# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license +# ImageNet-1k dataset https://www.image-net.org/index.php by Stanford University +# Simplified class names from https://github.com/anishathalye/imagenet-simple-labels +# Example usage: python classify/train.py --data imagenet +# parent +# ├── yolov5 +# └── datasets +# └── imagenet100 ← downloads here + + +# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..] +path: ../datasets/imagenet1000 # dataset root dir +train: train # train images (relative to 'path') 1281167 images +val: val # val images (relative to 'path') 50000 images +test: # test images (optional) + +# Classes +names: + 0: tench + 1: goldfish + 2: great white shark + 3: tiger shark + 4: hammerhead shark + 5: electric ray + 6: stingray + 7: cock + 8: hen + 9: ostrich + 10: brambling + 11: goldfinch + 12: house finch + 13: junco + 14: indigo bunting + 15: American robin + 16: bulbul + 17: jay + 18: magpie + 19: chickadee + 20: American dipper + 21: kite + 22: bald eagle + 23: vulture + 24: great grey owl + 25: fire salamander + 26: smooth newt + 27: newt + 28: spotted salamander + 29: axolotl + 30: American bullfrog + 31: tree frog + 32: tailed frog + 33: loggerhead sea turtle + 34: leatherback sea turtle + 35: mud turtle + 36: terrapin + 37: box turtle + 38: banded gecko + 39: green iguana + 40: Carolina anole + 41: desert grassland whiptail lizard + 42: agama + 43: frilled-necked lizard + 44: alligator lizard + 45: Gila monster + 46: European green lizard + 47: chameleon + 48: Komodo dragon + 49: Nile crocodile + 50: American alligator + 51: triceratops + 52: worm snake + 53: ring-necked snake + 54: eastern hog-nosed snake + 55: smooth green snake + 56: kingsnake + 57: garter snake + 58: water snake + 59: vine snake + 60: night snake + 61: boa constrictor + 62: African rock python + 63: Indian cobra + 64: green mamba + 65: sea snake + 66: Saharan horned viper + 67: eastern diamondback rattlesnake + 68: sidewinder + 69: trilobite + 70: harvestman + 71: scorpion + 72: yellow garden spider + 73: barn spider + 74: European garden spider + 75: southern black widow + 76: tarantula + 77: wolf spider + 78: tick + 79: centipede + 80: black grouse + 81: ptarmigan + 82: ruffed grouse + 83: prairie grouse + 84: peacock + 85: quail + 86: partridge + 87: grey parrot + 88: macaw + 89: sulphur-crested cockatoo + 90: lorikeet + 91: coucal + 92: bee eater + 93: hornbill + 94: hummingbird + 95: jacamar + 96: toucan + 97: duck + 98: red-breasted merganser + 99: goose + 100: black swan + 101: tusker + 102: echidna + 103: platypus + 104: wallaby + 105: koala + 106: wombat + 107: jellyfish + 108: sea anemone + 109: brain coral + 110: flatworm + 111: nematode + 112: conch + 113: snail + 114: slug + 115: sea slug + 116: chiton + 117: chambered nautilus + 118: Dungeness crab + 119: rock crab + 120: fiddler crab + 121: red king crab + 122: American lobster + 123: spiny lobster + 124: crayfish + 125: hermit crab + 126: isopod + 127: white stork + 128: black stork + 129: spoonbill + 130: flamingo + 131: little blue heron + 132: great egret + 133: bittern + 134: crane (bird) + 135: limpkin + 136: common gallinule + 137: American coot + 138: bustard + 139: ruddy turnstone + 140: dunlin + 141: common redshank + 142: dowitcher + 143: oystercatcher + 144: pelican + 145: king penguin + 146: albatross + 147: grey whale + 148: killer whale + 149: dugong + 150: sea lion + 151: Chihuahua + 152: Japanese Chin + 153: Maltese + 154: Pekingese + 155: Shih Tzu + 156: King Charles Spaniel + 157: Papillon + 158: toy terrier + 159: Rhodesian Ridgeback + 160: Afghan Hound + 161: Basset Hound + 162: Beagle + 163: Bloodhound + 164: Bluetick Coonhound + 165: Black and Tan Coonhound + 166: Treeing Walker Coonhound + 167: English foxhound + 168: Redbone Coonhound + 169: borzoi + 170: Irish Wolfhound + 171: Italian Greyhound + 172: Whippet + 173: Ibizan Hound + 174: Norwegian Elkhound + 175: Otterhound + 176: Saluki + 177: Scottish Deerhound + 178: Weimaraner + 179: Staffordshire Bull Terrier + 180: American Staffordshire Terrier + 181: Bedlington Terrier + 182: Border Terrier + 183: Kerry Blue Terrier + 184: Irish Terrier + 185: Norfolk Terrier + 186: Norwich Terrier + 187: Yorkshire Terrier + 188: Wire Fox Terrier + 189: Lakeland Terrier + 190: Sealyham Terrier + 191: Airedale Terrier + 192: Cairn Terrier + 193: Australian Terrier + 194: Dandie Dinmont Terrier + 195: Boston Terrier + 196: Miniature Schnauzer + 197: Giant Schnauzer + 198: Standard Schnauzer + 199: Scottish Terrier + 200: Tibetan Terrier + 201: Australian Silky Terrier + 202: Soft-coated Wheaten Terrier + 203: West Highland White Terrier + 204: Lhasa Apso + 205: Flat-Coated Retriever + 206: Curly-coated Retriever + 207: Golden Retriever + 208: Labrador Retriever + 209: Chesapeake Bay Retriever + 210: German Shorthaired Pointer + 211: Vizsla + 212: English Setter + 213: Irish Setter + 214: Gordon Setter + 215: Brittany + 216: Clumber Spaniel + 217: English Springer Spaniel + 218: Welsh Springer Spaniel + 219: Cocker Spaniels + 220: Sussex Spaniel + 221: Irish Water Spaniel + 222: Kuvasz + 223: Schipperke + 224: Groenendael + 225: Malinois + 226: Briard + 227: Australian Kelpie + 228: Komondor + 229: Old English Sheepdog + 230: Shetland Sheepdog + 231: collie + 232: Border Collie + 233: Bouvier des Flandres + 234: Rottweiler + 235: German Shepherd Dog + 236: Dobermann + 237: Miniature Pinscher + 238: Greater Swiss Mountain Dog + 239: Bernese Mountain Dog + 240: Appenzeller Sennenhund + 241: Entlebucher Sennenhund + 242: Boxer + 243: Bullmastiff + 244: Tibetan Mastiff + 245: French Bulldog + 246: Great Dane + 247: St. Bernard + 248: husky + 249: Alaskan Malamute + 250: Siberian Husky + 251: Dalmatian + 252: Affenpinscher + 253: Basenji + 254: pug + 255: Leonberger + 256: Newfoundland + 257: Pyrenean Mountain Dog + 258: Samoyed + 259: Pomeranian + 260: Chow Chow + 261: Keeshond + 262: Griffon Bruxellois + 263: Pembroke Welsh Corgi + 264: Cardigan Welsh Corgi + 265: Toy Poodle + 266: Miniature Poodle + 267: Standard Poodle + 268: Mexican hairless dog + 269: grey wolf + 270: Alaskan tundra wolf + 271: red wolf + 272: coyote + 273: dingo + 274: dhole + 275: African wild dog + 276: hyena + 277: red fox + 278: kit fox + 279: Arctic fox + 280: grey fox + 281: tabby cat + 282: tiger cat + 283: Persian cat + 284: Siamese cat + 285: Egyptian Mau + 286: cougar + 287: lynx + 288: leopard + 289: snow leopard + 290: jaguar + 291: lion + 292: tiger + 293: cheetah + 294: brown bear + 295: American black bear + 296: polar bear + 297: sloth bear + 298: mongoose + 299: meerkat + 300: tiger beetle + 301: ladybug + 302: ground beetle + 303: longhorn beetle + 304: leaf beetle + 305: dung beetle + 306: rhinoceros beetle + 307: weevil + 308: fly + 309: bee + 310: ant + 311: grasshopper + 312: cricket + 313: stick insect + 314: cockroach + 315: mantis + 316: cicada + 317: leafhopper + 318: lacewing + 319: dragonfly + 320: damselfly + 321: red admiral + 322: ringlet + 323: monarch butterfly + 324: small white + 325: sulphur butterfly + 326: gossamer-winged butterfly + 327: starfish + 328: sea urchin + 329: sea cucumber + 330: cottontail rabbit + 331: hare + 332: Angora rabbit + 333: hamster + 334: porcupine + 335: fox squirrel + 336: marmot + 337: beaver + 338: guinea pig + 339: common sorrel + 340: zebra + 341: pig + 342: wild boar + 343: warthog + 344: hippopotamus + 345: ox + 346: water buffalo + 347: bison + 348: ram + 349: bighorn sheep + 350: Alpine ibex + 351: hartebeest + 352: impala + 353: gazelle + 354: dromedary + 355: llama + 356: weasel + 357: mink + 358: European polecat + 359: black-footed ferret + 360: otter + 361: skunk + 362: badger + 363: armadillo + 364: three-toed sloth + 365: orangutan + 366: gorilla + 367: chimpanzee + 368: gibbon + 369: siamang + 370: guenon + 371: patas monkey + 372: baboon + 373: macaque + 374: langur + 375: black-and-white colobus + 376: proboscis monkey + 377: marmoset + 378: white-headed capuchin + 379: howler monkey + 380: titi + 381: Geoffroy's spider monkey + 382: common squirrel monkey + 383: ring-tailed lemur + 384: indri + 385: Asian elephant + 386: African bush elephant + 387: red panda + 388: giant panda + 389: snoek + 390: eel + 391: coho salmon + 392: rock beauty + 393: clownfish + 394: sturgeon + 395: garfish + 396: lionfish + 397: pufferfish + 398: abacus + 399: abaya + 400: academic gown + 401: accordion + 402: acoustic guitar + 403: aircraft carrier + 404: airliner + 405: airship + 406: altar + 407: ambulance + 408: amphibious vehicle + 409: analog clock + 410: apiary + 411: apron + 412: waste container + 413: assault rifle + 414: backpack + 415: bakery + 416: balance beam + 417: balloon + 418: ballpoint pen + 419: Band-Aid + 420: banjo + 421: baluster + 422: barbell + 423: barber chair + 424: barbershop + 425: barn + 426: barometer + 427: barrel + 428: wheelbarrow + 429: baseball + 430: basketball + 431: bassinet + 432: bassoon + 433: swimming cap + 434: bath towel + 435: bathtub + 436: station wagon + 437: lighthouse + 438: beaker + 439: military cap + 440: beer bottle + 441: beer glass + 442: bell-cot + 443: bib + 444: tandem bicycle + 445: bikini + 446: ring binder + 447: binoculars + 448: birdhouse + 449: boathouse + 450: bobsleigh + 451: bolo tie + 452: poke bonnet + 453: bookcase + 454: bookstore + 455: bottle cap + 456: bow + 457: bow tie + 458: brass + 459: bra + 460: breakwater + 461: breastplate + 462: broom + 463: bucket + 464: buckle + 465: bulletproof vest + 466: high-speed train + 467: butcher shop + 468: taxicab + 469: cauldron + 470: candle + 471: cannon + 472: canoe + 473: can opener + 474: cardigan + 475: car mirror + 476: carousel + 477: tool kit + 478: carton + 479: car wheel + 480: automated teller machine + 481: cassette + 482: cassette player + 483: castle + 484: catamaran + 485: CD player + 486: cello + 487: mobile phone + 488: chain + 489: chain-link fence + 490: chain mail + 491: chainsaw + 492: chest + 493: chiffonier + 494: chime + 495: china cabinet + 496: Christmas stocking + 497: church + 498: movie theater + 499: cleaver + 500: cliff dwelling + 501: cloak + 502: clogs + 503: cocktail shaker + 504: coffee mug + 505: coffeemaker + 506: coil + 507: combination lock + 508: computer keyboard + 509: confectionery store + 510: container ship + 511: convertible + 512: corkscrew + 513: cornet + 514: cowboy boot + 515: cowboy hat + 516: cradle + 517: crane (machine) + 518: crash helmet + 519: crate + 520: infant bed + 521: Crock Pot + 522: croquet ball + 523: crutch + 524: cuirass + 525: dam + 526: desk + 527: desktop computer + 528: rotary dial telephone + 529: diaper + 530: digital clock + 531: digital watch + 532: dining table + 533: dishcloth + 534: dishwasher + 535: disc brake + 536: dock + 537: dog sled + 538: dome + 539: doormat + 540: drilling rig + 541: drum + 542: drumstick + 543: dumbbell + 544: Dutch oven + 545: electric fan + 546: electric guitar + 547: electric locomotive + 548: entertainment center + 549: envelope + 550: espresso machine + 551: face powder + 552: feather boa + 553: filing cabinet + 554: fireboat + 555: fire engine + 556: fire screen sheet + 557: flagpole + 558: flute + 559: folding chair + 560: football helmet + 561: forklift + 562: fountain + 563: fountain pen + 564: four-poster bed + 565: freight car + 566: French horn + 567: frying pan + 568: fur coat + 569: garbage truck + 570: gas mask + 571: gas pump + 572: goblet + 573: go-kart + 574: golf ball + 575: golf cart + 576: gondola + 577: gong + 578: gown + 579: grand piano + 580: greenhouse + 581: grille + 582: grocery store + 583: guillotine + 584: barrette + 585: hair spray + 586: half-track + 587: hammer + 588: hamper + 589: hair dryer + 590: hand-held computer + 591: handkerchief + 592: hard disk drive + 593: harmonica + 594: harp + 595: harvester + 596: hatchet + 597: holster + 598: home theater + 599: honeycomb + 600: hook + 601: hoop skirt + 602: horizontal bar + 603: horse-drawn vehicle + 604: hourglass + 605: iPod + 606: clothes iron + 607: jack-o'-lantern + 608: jeans + 609: jeep + 610: T-shirt + 611: jigsaw puzzle + 612: pulled rickshaw + 613: joystick + 614: kimono + 615: knee pad + 616: knot + 617: lab coat + 618: ladle + 619: lampshade + 620: laptop computer + 621: lawn mower + 622: lens cap + 623: paper knife + 624: library + 625: lifeboat + 626: lighter + 627: limousine + 628: ocean liner + 629: lipstick + 630: slip-on shoe + 631: lotion + 632: speaker + 633: loupe + 634: sawmill + 635: magnetic compass + 636: mail bag + 637: mailbox + 638: tights + 639: tank suit + 640: manhole cover + 641: maraca + 642: marimba + 643: mask + 644: match + 645: maypole + 646: maze + 647: measuring cup + 648: medicine chest + 649: megalith + 650: microphone + 651: microwave oven + 652: military uniform + 653: milk can + 654: minibus + 655: miniskirt + 656: minivan + 657: missile + 658: mitten + 659: mixing bowl + 660: mobile home + 661: Model T + 662: modem + 663: monastery + 664: monitor + 665: moped + 666: mortar + 667: square academic cap + 668: mosque + 669: mosquito net + 670: scooter + 671: mountain bike + 672: tent + 673: computer mouse + 674: mousetrap + 675: moving van + 676: muzzle + 677: nail + 678: neck brace + 679: necklace + 680: nipple + 681: notebook computer + 682: obelisk + 683: oboe + 684: ocarina + 685: odometer + 686: oil filter + 687: organ + 688: oscilloscope + 689: overskirt + 690: bullock cart + 691: oxygen mask + 692: packet + 693: paddle + 694: paddle wheel + 695: padlock + 696: paintbrush + 697: pajamas + 698: palace + 699: pan flute + 700: paper towel + 701: parachute + 702: parallel bars + 703: park bench + 704: parking meter + 705: passenger car + 706: patio + 707: payphone + 708: pedestal + 709: pencil case + 710: pencil sharpener + 711: perfume + 712: Petri dish + 713: photocopier + 714: plectrum + 715: Pickelhaube + 716: picket fence + 717: pickup truck + 718: pier + 719: piggy bank + 720: pill bottle + 721: pillow + 722: ping-pong ball + 723: pinwheel + 724: pirate ship + 725: pitcher + 726: hand plane + 727: planetarium + 728: plastic bag + 729: plate rack + 730: plow + 731: plunger + 732: Polaroid camera + 733: pole + 734: police van + 735: poncho + 736: billiard table + 737: soda bottle + 738: pot + 739: potter's wheel + 740: power drill + 741: prayer rug + 742: printer + 743: prison + 744: projectile + 745: projector + 746: hockey puck + 747: punching bag + 748: purse + 749: quill + 750: quilt + 751: race car + 752: racket + 753: radiator + 754: radio + 755: radio telescope + 756: rain barrel + 757: recreational vehicle + 758: reel + 759: reflex camera + 760: refrigerator + 761: remote control + 762: restaurant + 763: revolver + 764: rifle + 765: rocking chair + 766: rotisserie + 767: eraser + 768: rugby ball + 769: ruler + 770: running shoe + 771: safe + 772: safety pin + 773: salt shaker + 774: sandal + 775: sarong + 776: saxophone + 777: scabbard + 778: weighing scale + 779: school bus + 780: schooner + 781: scoreboard + 782: CRT screen + 783: screw + 784: screwdriver + 785: seat belt + 786: sewing machine + 787: shield + 788: shoe store + 789: shoji + 790: shopping basket + 791: shopping cart + 792: shovel + 793: shower cap + 794: shower curtain + 795: ski + 796: ski mask + 797: sleeping bag + 798: slide rule + 799: sliding door + 800: slot machine + 801: snorkel + 802: snowmobile + 803: snowplow + 804: soap dispenser + 805: soccer ball + 806: sock + 807: solar thermal collector + 808: sombrero + 809: soup bowl + 810: space bar + 811: space heater + 812: space shuttle + 813: spatula + 814: motorboat + 815: spider web + 816: spindle + 817: sports car + 818: spotlight + 819: stage + 820: steam locomotive + 821: through arch bridge + 822: steel drum + 823: stethoscope + 824: scarf + 825: stone wall + 826: stopwatch + 827: stove + 828: strainer + 829: tram + 830: stretcher + 831: couch + 832: stupa + 833: submarine + 834: suit + 835: sundial + 836: sunglass + 837: sunglasses + 838: sunscreen + 839: suspension bridge + 840: mop + 841: sweatshirt + 842: swimsuit + 843: swing + 844: switch + 845: syringe + 846: table lamp + 847: tank + 848: tape player + 849: teapot + 850: teddy bear + 851: television + 852: tennis ball + 853: thatched roof + 854: front curtain + 855: thimble + 856: threshing machine + 857: throne + 858: tile roof + 859: toaster + 860: tobacco shop + 861: toilet seat + 862: torch + 863: totem pole + 864: tow truck + 865: toy store + 866: tractor + 867: semi-trailer truck + 868: tray + 869: trench coat + 870: tricycle + 871: trimaran + 872: tripod + 873: triumphal arch + 874: trolleybus + 875: trombone + 876: tub + 877: turnstile + 878: typewriter keyboard + 879: umbrella + 880: unicycle + 881: upright piano + 882: vacuum cleaner + 883: vase + 884: vault + 885: velvet + 886: vending machine + 887: vestment + 888: viaduct + 889: violin + 890: volleyball + 891: waffle iron + 892: wall clock + 893: wallet + 894: wardrobe + 895: military aircraft + 896: sink + 897: washing machine + 898: water bottle + 899: water jug + 900: water tower + 901: whiskey jug + 902: whistle + 903: wig + 904: window screen + 905: window shade + 906: Windsor tie + 907: wine bottle + 908: wing + 909: wok + 910: wooden spoon + 911: wool + 912: split-rail fence + 913: shipwreck + 914: yawl + 915: yurt + 916: website + 917: comic book + 918: crossword + 919: traffic sign + 920: traffic light + 921: dust jacket + 922: menu + 923: plate + 924: guacamole + 925: consomme + 926: hot pot + 927: trifle + 928: ice cream + 929: ice pop + 930: baguette + 931: bagel + 932: pretzel + 933: cheeseburger + 934: hot dog + 935: mashed potato + 936: cabbage + 937: broccoli + 938: cauliflower + 939: zucchini + 940: spaghetti squash + 941: acorn squash + 942: butternut squash + 943: cucumber + 944: artichoke + 945: bell pepper + 946: cardoon + 947: mushroom + 948: Granny Smith + 949: strawberry + 950: orange + 951: lemon + 952: fig + 953: pineapple + 954: banana + 955: jackfruit + 956: custard apple + 957: pomegranate + 958: hay + 959: carbonara + 960: chocolate syrup + 961: dough + 962: meatloaf + 963: pizza + 964: pot pie + 965: burrito + 966: red wine + 967: espresso + 968: cup + 969: eggnog + 970: alp + 971: bubble + 972: cliff + 973: coral reef + 974: geyser + 975: lakeshore + 976: promontory + 977: shoal + 978: seashore + 979: valley + 980: volcano + 981: baseball player + 982: bridegroom + 983: scuba diver + 984: rapeseed + 985: daisy + 986: yellow lady's slipper + 987: corn + 988: acorn + 989: rose hip + 990: horse chestnut seed + 991: coral fungus + 992: agaric + 993: gyromitra + 994: stinkhorn mushroom + 995: earth star + 996: hen-of-the-woods + 997: bolete + 998: ear + 999: toilet paper + + +# Download script/URL (optional) +download: data/scripts/get_imagenet1000.sh diff --git a/data/scripts/get_imagenet10.sh b/data/scripts/get_imagenet10.sh new file mode 100644 index 000000000000..71e17c5d3fb7 --- /dev/null +++ b/data/scripts/get_imagenet10.sh @@ -0,0 +1,29 @@ +#!/bin/bash +# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license +# Download ILSVRC2012 ImageNet dataset https://image-net.org +# Example usage: bash data/scripts/get_imagenet.sh +# parent +# ├── yolov5 +# └── datasets +# └── imagenet ← downloads here + +# Arguments (optional) Usage: bash data/scripts/get_imagenet.sh --train --val +if [ "$#" -gt 0 ]; then + for opt in "$@"; do + case "${opt}" in + --train) train=true ;; + --val) val=true ;; + esac + done +else + train=true + val=true +fi + +# Make dir +d='../datasets/imagenet10' # unzip directory +mkdir -p $d && cd $d + +# Download/unzip train +wget https://github.com/ultralytics/yolov5/releases/download/v1.0/imagenet10.zip +unzip imagenet10.zip && rm imagenet10.zip diff --git a/data/scripts/get_imagenet100.sh b/data/scripts/get_imagenet100.sh new file mode 100644 index 000000000000..c57106b485db --- /dev/null +++ b/data/scripts/get_imagenet100.sh @@ -0,0 +1,29 @@ +#!/bin/bash +# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license +# Download ILSVRC2012 ImageNet dataset https://image-net.org +# Example usage: bash data/scripts/get_imagenet.sh +# parent +# ├── yolov5 +# └── datasets +# └── imagenet ← downloads here + +# Arguments (optional) Usage: bash data/scripts/get_imagenet.sh --train --val +if [ "$#" -gt 0 ]; then + for opt in "$@"; do + case "${opt}" in + --train) train=true ;; + --val) val=true ;; + esac + done +else + train=true + val=true +fi + +# Make dir +d='../datasets/imagenet100' # unzip directory +mkdir -p $d && cd $d + +# Download/unzip train +wget https://github.com/ultralytics/yolov5/releases/download/v1.0/imagenet100.zip +unzip imagenet100.zip && rm imagenet100.zip diff --git a/data/scripts/get_imagenet1000.sh b/data/scripts/get_imagenet1000.sh new file mode 100644 index 000000000000..451dd0f6b585 --- /dev/null +++ b/data/scripts/get_imagenet1000.sh @@ -0,0 +1,29 @@ +#!/bin/bash +# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license +# Download ILSVRC2012 ImageNet dataset https://image-net.org +# Example usage: bash data/scripts/get_imagenet.sh +# parent +# ├── yolov5 +# └── datasets +# └── imagenet ← downloads here + +# Arguments (optional) Usage: bash data/scripts/get_imagenet.sh --train --val +if [ "$#" -gt 0 ]; then + for opt in "$@"; do + case "${opt}" in + --train) train=true ;; + --val) val=true ;; + esac + done +else + train=true + val=true +fi + +# Make dir +d='../datasets/imagenet1000' # unzip directory +mkdir -p $d && cd $d + +# Download/unzip train +wget https://github.com/ultralytics/yolov5/releases/download/v1.0/imagenet1000.zip +unzip imagenet1000.zip && rm imagenet1000.zip From 66edf38933208d157700a9ebd65835e97504d34c Mon Sep 17 00:00:00 2001 From: Shayan Mousavinia <45814390+ShAmoNiA@users.noreply.github.com> Date: Wed, 3 Jan 2024 08:12:46 +0100 Subject: [PATCH 1840/1976] improving evolve (#11348) * improving evole in train.py Signed-off-by: Shayan Mousavinia <45814390+ShAmoNiA@users.noreply.github.com> * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix gen_ranges value in mutation part. Signed-off-by: Shayan Mousavinia <45814390+ShAmoNiA@users.noreply.github.com> * fix invalid syntax in line 532 remove on tab from "else" Signed-off-by: Shayan Mousavinia <45814390+ShAmoNiA@users.noreply.github.com> * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update train.py Signed-off-by: Shayan Mousavinia <45814390+ShAmoNiA@users.noreply.github.com> * Update train.py Signed-off-by: Shayan Mousavinia <45814390+ShAmoNiA@users.noreply.github.com> * fix range index Signed-off-by: Shayan Mousavinia <45814390+ShAmoNiA@users.noreply.github.com> * Update train.py fix population size add crossover min and max rate Signed-off-by: Shayan Mousavinia <45814390+ShAmoNiA@users.noreply.github.com> * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * update comments Signed-off-by: Shayan Mousavinia <45814390+ShAmoNiA@users.noreply.github.com> * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * save population for last generation The latest version incorporates a significant update whereby all hyper parameters are now stored in the population section of "evolve_population.yaml," located in "yolov5\data\hyps," following the transition to the new generation. This development allows for the continuation of a previously abandoned evolution process by utilizing the former population. Additionally, a new argument, "--evolve_population," has been introduced to enable the relocation of the manual "evolve_population.yaml" to any project directory to load for the aforementioned purpose. This enhancement offers greater flexibility and convenience to the users, making it easier for them to resume their evolutionary process. Signed-off-by: Shayan Mousavinia <45814390+ShAmoNiA@users.noreply.github.com> * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update train.py Signed-off-by: Shayan Mousavinia <45814390+ShAmoNiA@users.noreply.github.com> * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * remove try - except Signed-off-by: Shayan Mousavinia <45814390+ShAmoNiA@users.noreply.github.com> * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update train.py Add resume resume_evolve arg for **resume evolve from last generation**. Population will load from data/hyp by default and load all yaml file form them. Signed-off-by: Shayan Mousavinia <45814390+ShAmoNiA@users.noreply.github.com> * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update train.py Signed-off-by: Shayan Mousavinia <45814390+ShAmoNiA@users.noreply.github.com> * Update train.py Signed-off-by: Shayan Mousavinia <45814390+ShAmoNiA@users.noreply.github.com> * Update train.py Signed-off-by: Shayan Mousavinia <45814390+ShAmoNiA@users.noreply.github.com> * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update train.py Signed-off-by: Shayan Mousavinia <45814390+ShAmoNiA@users.noreply.github.com> * Update train.py Signed-off-by: Shayan Mousavinia <45814390+ShAmoNiA@users.noreply.github.com> * Update README.zh-CN.md Signed-off-by: Shayan Mousavinia <45814390+ShAmoNiA@users.noreply.github.com> * Update train.py update pop_size Signed-off-by: Shayan Mousavinia <45814390+ShAmoNiA@users.noreply.github.com> --------- Signed-off-by: Shayan Mousavinia <45814390+ShAmoNiA@users.noreply.github.com> Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- train.py | 246 +++++++++++++++++++++++++++++++++++++++---------------- 1 file changed, 176 insertions(+), 70 deletions(-) diff --git a/train.py b/train.py index ad37f2ca858d..378ff4bd30ea 100644 --- a/train.py +++ b/train.py @@ -468,6 +468,11 @@ def parse_opt(known=False): parser.add_argument('--noautoanchor', action='store_true', help='disable AutoAnchor') parser.add_argument('--noplots', action='store_true', help='save no plot files') parser.add_argument('--evolve', type=int, nargs='?', const=300, help='evolve hyperparameters for x generations') + parser.add_argument('--evolve_population', + type=str, + default=ROOT / 'data/hyps', + help='location for loading population') + parser.add_argument('--resume_evolve', type=str, default=None, help='resume evolve from last generation') parser.add_argument('--bucket', type=str, default='', help='gsutil bucket') parser.add_argument('--cache', type=str, nargs='?', const='ram', help='image --cache ram/disk') parser.add_argument('--image-weights', action='store_true', help='use weighted image selection for training') @@ -555,37 +560,48 @@ def main(opt, callbacks=Callbacks()): # Evolve hyperparameters (optional) else: - # Hyperparameter evolution metadata (mutation scale 0-1, lower_limit, upper_limit) + # Hyperparameter evolution metadata (including this hyperparameter True-False, lower_limit, upper_limit) meta = { - 'lr0': (1, 1e-5, 1e-1), # initial learning rate (SGD=1E-2, Adam=1E-3) - 'lrf': (1, 0.01, 1.0), # final OneCycleLR learning rate (lr0 * lrf) - 'momentum': (0.3, 0.6, 0.98), # SGD momentum/Adam beta1 - 'weight_decay': (1, 0.0, 0.001), # optimizer weight decay - 'warmup_epochs': (1, 0.0, 5.0), # warmup epochs (fractions ok) - 'warmup_momentum': (1, 0.0, 0.95), # warmup initial momentum - 'warmup_bias_lr': (1, 0.0, 0.2), # warmup initial bias lr - 'box': (1, 0.02, 0.2), # box loss gain - 'cls': (1, 0.2, 4.0), # cls loss gain - 'cls_pw': (1, 0.5, 2.0), # cls BCELoss positive_weight - 'obj': (1, 0.2, 4.0), # obj loss gain (scale with pixels) - 'obj_pw': (1, 0.5, 2.0), # obj BCELoss positive_weight - 'iou_t': (0, 0.1, 0.7), # IoU training threshold - 'anchor_t': (1, 2.0, 8.0), # anchor-multiple threshold - 'anchors': (2, 2.0, 10.0), # anchors per output grid (0 to ignore) - 'fl_gamma': (0, 0.0, 2.0), # focal loss gamma (efficientDet default gamma=1.5) - 'hsv_h': (1, 0.0, 0.1), # image HSV-Hue augmentation (fraction) - 'hsv_s': (1, 0.0, 0.9), # image HSV-Saturation augmentation (fraction) - 'hsv_v': (1, 0.0, 0.9), # image HSV-Value augmentation (fraction) - 'degrees': (1, 0.0, 45.0), # image rotation (+/- deg) - 'translate': (1, 0.0, 0.9), # image translation (+/- fraction) - 'scale': (1, 0.0, 0.9), # image scale (+/- gain) - 'shear': (1, 0.0, 10.0), # image shear (+/- deg) - 'perspective': (0, 0.0, 0.001), # image perspective (+/- fraction), range 0-0.001 - 'flipud': (1, 0.0, 1.0), # image flip up-down (probability) - 'fliplr': (0, 0.0, 1.0), # image flip left-right (probability) - 'mosaic': (1, 0.0, 1.0), # image mixup (probability) - 'mixup': (1, 0.0, 1.0), # image mixup (probability) - 'copy_paste': (1, 0.0, 1.0)} # segment copy-paste (probability) + 'lr0': (False, 1e-5, 1e-1), # initial learning rate (SGD=1E-2, Adam=1E-3) + 'lrf': (False, 0.01, 1.0), # final OneCycleLR learning rate (lr0 * lrf) + 'momentum': (False, 0.6, 0.98), # SGD momentum/Adam beta1 + 'weight_decay': (False, 0.0, 0.001), # optimizer weight decay + 'warmup_epochs': (False, 0.0, 5.0), # warmup epochs (fractions ok) + 'warmup_momentum': (False, 0.0, 0.95), # warmup initial momentum + 'warmup_bias_lr': (False, 0.0, 0.2), # warmup initial bias lr + 'box': (False, 0.02, 0.2), # box loss gain + 'cls': (False, 0.2, 4.0), # cls loss gain + 'cls_pw': (False, 0.5, 2.0), # cls BCELoss positive_weight + 'obj': (False, 0.2, 4.0), # obj loss gain (scale with pixels) + 'obj_pw': (False, 0.5, 2.0), # obj BCELoss positive_weight + 'iou_t': (False, 0.1, 0.7), # IoU training threshold + 'anchor_t': (False, 2.0, 8.0), # anchor-multiple threshold + 'anchors': (False, 2.0, 10.0), # anchors per output grid (0 to ignore) + 'fl_gamma': (False, 0.0, 2.0), # focal loss gamma (efficientDet default gamma=1.5) + 'hsv_h': (True, 0.0, 0.1), # image HSV-Hue augmentation (fraction) + 'hsv_s': (True, 0.0, 0.9), # image HSV-Saturation augmentation (fraction) + 'hsv_v': (True, 0.0, 0.9), # image HSV-Value augmentation (fraction) + 'degrees': (True, 0.0, 45.0), # image rotation (+/- deg) + 'translate': (True, 0.0, 0.9), # image translation (+/- fraction) + 'scale': (True, 0.0, 0.9), # image scale (+/- gain) + 'shear': (True, 0.0, 10.0), # image shear (+/- deg) + 'perspective': (True, 0.0, 0.001), # image perspective (+/- fraction), range 0-0.001 + 'flipud': (True, 0.0, 1.0), # image flip up-down (probability) + 'fliplr': (True, 0.0, 1.0), # image flip left-right (probability) + 'mosaic': (True, 0.0, 1.0), # image mixup (probability) + 'mixup': (True, 0.0, 1.0), # image mixup (probability) + 'copy_paste': (True, 0.0, 1.0)} # segment copy-paste (probability) + + # GA configs + pop_size = 50 + mutation_rate_min = 0.01 + mutation_rate_max = 0.5 + crossover_rate_min = 0.5 + crossover_rate_max = 1 + min_elite_size = 2 + max_elite_size = 5 + tournament_size_min = 2 + tournament_size_max = 10 with open(opt.hyp, errors='ignore') as f: hyp = yaml.safe_load(f) # load hyps dict @@ -604,46 +620,128 @@ def main(opt, callbacks=Callbacks()): f'gs://{opt.bucket}/evolve.csv', str(evolve_csv), ]) - for _ in range(opt.evolve): # generations to evolve - if evolve_csv.exists(): # if evolve.csv exists: select best hyps and mutate - # Select parent(s) - parent = 'single' # parent selection method: 'single' or 'weighted' - x = np.loadtxt(evolve_csv, ndmin=2, delimiter=',', skiprows=1) - n = min(5, len(x)) # number of previous results to consider - x = x[np.argsort(-fitness(x))][:n] # top n mutations - w = fitness(x) - fitness(x).min() + 1E-6 # weights (sum > 0) - if parent == 'single' or len(x) == 1: - # x = x[random.randint(0, n - 1)] # random selection - x = x[random.choices(range(n), weights=w)[0]] # weighted selection - elif parent == 'weighted': - x = (x * w.reshape(n, 1)).sum(0) / w.sum() # weighted combination - - # Mutate - mp, s = 0.8, 0.2 # mutation probability, sigma - npr = np.random - npr.seed(int(time.time())) - g = np.array([meta[k][0] for k in hyp.keys()]) # gains 0-1 - ng = len(meta) - v = np.ones(ng) - while all(v == 1): # mutate until a change occurs (prevent duplicates) - v = (g * (npr.random(ng) < mp) * npr.randn(ng) * npr.random() * s + 1).clip(0.3, 3.0) - for i, k in enumerate(hyp.keys()): # plt.hist(v.ravel(), 300) - hyp[k] = float(x[i + 7] * v[i]) # mutate - - # Constrain to limits - for k, v in meta.items(): - hyp[k] = max(hyp[k], v[1]) # lower limit - hyp[k] = min(hyp[k], v[2]) # upper limit - hyp[k] = round(hyp[k], 5) # significant digits - - # Train mutation - results = train(hyp.copy(), opt, device, callbacks) - callbacks = Callbacks() - # Write mutation results - keys = ('metrics/precision', 'metrics/recall', 'metrics/mAP_0.5', 'metrics/mAP_0.5:0.95', 'val/box_loss', - 'val/obj_loss', 'val/cls_loss') - print_mutation(keys, results, hyp.copy(), save_dir, opt.bucket) - + # Delete the items in meta dictionary whose first value is False + del_ = [] + for item in meta.keys(): + if meta[item][0] is False: + del_.append(item) + hyp_GA = hyp.copy() # Make a copy of hyp dictionary + for item in del_: + del meta[item] # Remove the item from meta dictionary + del hyp_GA[item] # Remove the item from hyp_GA dictionary + + # Set lower_limit and upper_limit arrays to hold the search space boundaries + lower_limit = np.array([meta[k][1] for k in hyp_GA.keys()]) + upper_limit = np.array([meta[k][2] for k in hyp_GA.keys()]) + + # Create gene_ranges list to hold the range of values for each gene in the population + gene_ranges = [] + for i in range(len(upper_limit)): + gene_ranges.append((lower_limit[i], upper_limit[i])) + + # Initialize the population with initial_values or random values + initial_values = [] + + # If resuming evolution from a previous checkpoint + if opt.resume_evolve is not None: + assert os.path.isfile(ROOT / opt.resume_evolve), 'evolve population path is wrong!' + with open(ROOT / opt.resume_evolve, errors='ignore') as f: + evolve_population = yaml.safe_load(f) + for value in evolve_population.values(): + value = np.array([value[k] for k in hyp_GA.keys()]) + initial_values.append(list(value)) + + # If not resuming from a previous checkpoint, generate initial values from .yaml files in opt.evolve_population + else: + yaml_files = [f for f in os.listdir(opt.evolve_population) if f.endswith('.yaml')] + for file_name in yaml_files: + with open(os.path.join(opt.evolve_population, file_name)) as yaml_file: + value = yaml.safe_load(yaml_file) + value = np.array([value[k] for k in hyp_GA.keys()]) + initial_values.append(list(value)) + + # Generate random values within the search space for the rest of the population + if (initial_values is None): + population = [generate_individual(gene_ranges, len(hyp_GA)) for i in range(pop_size)] + else: + if (pop_size > 1): + population = [ + generate_individual(gene_ranges, len(hyp_GA)) for i in range(pop_size - len(initial_values))] + for initial_value in initial_values: + population = [initial_value] + population + + # Run the genetic algorithm for a fixed number of generations + list_keys = list(hyp_GA.keys()) + for generation in range(opt.evolve): + if (generation >= 1): + save_dict = {} + for i in range(len(population)): + little_dict = {} + for j in range(len(population[i])): + little_dict[list_keys[j]] = float(population[i][j]) + save_dict['gen' + str(generation) + 'number' + str(i)] = little_dict + + with open(save_dir / 'evolve_population.yaml', 'w') as outfile: + yaml.dump(save_dict, outfile, default_flow_style=False) + + # Adaptive elite size + elite_size = min_elite_size + int((max_elite_size - min_elite_size) * (generation / opt.evolve)) + # Evaluate the fitness of each individual in the population + fitness_scores = [] + for individual in population: + for key, value in zip(hyp_GA.keys(), individual): + hyp_GA[key] = value + hyp.update(hyp_GA) + results = train(hyp.copy(), opt, device, callbacks) + callbacks = Callbacks() + # Write mutation results + keys = ('metrics/precision', 'metrics/recall', 'metrics/mAP_0.5', 'metrics/mAP_0.5:0.95', + 'val/box_loss', 'val/obj_loss', 'val/cls_loss') + print_mutation(keys, results, hyp.copy(), save_dir, opt.bucket) + fitness_scores.append(results[2]) + + # Select the fittest individuals for reproduction using adaptive tournament selection + selected_indices = [] + for i in range(pop_size - elite_size): + # Adaptive tournament size + tournament_size = max(max(2, tournament_size_min), + int(min(tournament_size_max, pop_size) - (generation / (opt.evolve / 10)))) + # Perform tournament selection to choose the best individual + tournament_indices = random.sample(range(pop_size), tournament_size) + tournament_fitness = [fitness_scores[j] for j in tournament_indices] + winner_index = tournament_indices[tournament_fitness.index(max(tournament_fitness))] + selected_indices.append(winner_index) + + # Add the elite individuals to the selected indices + elite_indices = [i for i in range(pop_size) if fitness_scores[i] in sorted(fitness_scores)[-elite_size:]] + selected_indices.extend(elite_indices) + # Create the next generation through crossover and mutation + next_generation = [] + for i in range(pop_size): + parent1_index = selected_indices[random.randint(0, pop_size - 1)] + parent2_index = selected_indices[random.randint(0, pop_size - 1)] + # Adaptive crossover rate + crossover_rate = max(crossover_rate_min, + min(crossover_rate_max, crossover_rate_max - (generation / opt.evolve))) + if random.uniform(0, 1) < crossover_rate: + crossover_point = random.randint(1, len(hyp_GA) - 1) + child = population[parent1_index][:crossover_point] + population[parent2_index][crossover_point:] + else: + child = population[parent1_index] + # Adaptive mutation rate + mutation_rate = max(mutation_rate_min, + min(mutation_rate_max, mutation_rate_max - (generation / opt.evolve))) + for j in range(len(hyp_GA)): + if random.uniform(0, 1) < mutation_rate: + child[j] += random.uniform(-0.1, 0.1) + child[j] = min(max(child[j], gene_ranges[j][0]), gene_ranges[j][1]) + next_generation.append(child) + # Replace the old population with the new generation + population = next_generation + # Print the best solution found + best_index = fitness_scores.index(max(fitness_scores)) + best_individual = population[best_index] + print('Best solution found:', best_individual) # Plot results plot_evolve(evolve_csv) LOGGER.info(f'Hyperparameter evolution finished {opt.evolve} generations\n' @@ -651,6 +749,14 @@ def main(opt, callbacks=Callbacks()): f'Usage example: $ python train.py --hyp {evolve_yaml}') +def generate_individual(input_ranges, individual_length): + individual = [] + for i in range(individual_length): + lower_bound, upper_bound = input_ranges[i] + individual.append(random.uniform(lower_bound, upper_bound)) + return individual + + def run(**kwargs): # Usage: import train; train.run(data='coco128.yaml', imgsz=320, weights='yolov5m.pt') opt = parse_opt(True) From 151c953d5e5a744ed90e1ce59bda4cd7ad7f38b0 Mon Sep 17 00:00:00 2001 From: Victor Sonck Date: Wed, 3 Jan 2024 08:14:39 +0100 Subject: [PATCH 1841/1976] Add segmentation and classification support for ClearML (#10752) * Added ClearML instance segmentation and classification support * Cleaned up ClearML plot output * typos * Log results as plots instead of debug samples * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Signed-off-by: Glenn Jocher Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Glenn Jocher --- utils/loggers/__init__.py | 49 +++++++++++++++---- utils/loggers/clearml/clearml_utils.py | 65 ++++++++++++++++++++++++-- 2 files changed, 101 insertions(+), 13 deletions(-) diff --git a/utils/loggers/__init__.py b/utils/loggers/__init__.py index aa6eae4c6f8c..381d477d127c 100644 --- a/utils/loggers/__init__.py +++ b/utils/loggers/__init__.py @@ -173,10 +173,11 @@ def on_pretrain_routine_end(self, labels, names): paths = self.save_dir.glob('*labels*.jpg') # training labels if self.wandb: self.wandb.log({'Labels': [wandb.Image(str(x), caption=x.name) for x in paths]}) - # if self.clearml: - # pass # ClearML saves these images automatically using hooks if self.comet_logger: self.comet_logger.on_pretrain_routine_end(paths) + if self.clearml: + for path in paths: + self.clearml.log_plot(title=path.stem, plot_path=path) def on_train_batch_end(self, model, ni, imgs, targets, paths, vals): log_dict = dict(zip(self.keys[:3], vals)) @@ -255,9 +256,7 @@ def on_fit_epoch_end(self, vals, epoch, best_fitness, fi): for k, v in x.items(): self.tb.add_scalar(k, v, epoch) elif self.clearml: # log to ClearML if TensorBoard not used - for k, v in x.items(): - title, series = k.split('/') - self.clearml.task.get_logger().report_scalar(title, series, v, epoch) + self.clearml.log_scalars(x, epoch) if self.wandb: if best_fitness == fi: @@ -311,9 +310,10 @@ def on_train_end(self, last, best, epoch, results): self.wandb.finish_run() if self.clearml and not self.opt.evolve: - self.clearml.task.update_output_model(model_path=str(best if best.exists() else last), - name='Best Model', - auto_delete_file=False) + self.clearml.log_summary(dict(zip(self.keys[3:10], results))) + [self.clearml.log_plot(title=f.stem, plot_path=f) for f in files] + self.clearml.log_model(str(best if best.exists() else last), + "Best Model" if best.exists() else "Last Model", epoch) if self.comet_logger: final_results = dict(zip(self.keys[3:10], results)) @@ -325,6 +325,8 @@ def on_params_update(self, params: dict): self.wandb.wandb_run.config.update(params, allow_val_change=True) if self.comet_logger: self.comet_logger.on_params_update(params) + if self.clearml: + self.clearml.task.connect(params) class GenericLogger: @@ -337,7 +339,7 @@ class GenericLogger: include: loggers to include """ - def __init__(self, opt, console_logger, include=('tb', 'wandb')): + def __init__(self, opt, console_logger, include=('tb', 'wandb', 'clearml')): # init default loggers self.save_dir = Path(opt.save_dir) self.include = include @@ -356,6 +358,22 @@ def __init__(self, opt, console_logger, include=('tb', 'wandb')): else: self.wandb = None + if clearml and 'clearml' in self.include: + try: + # Hyp is not available in classification mode + if 'hyp' not in opt: + hyp = {} + else: + hyp = opt.hyp + self.clearml = ClearmlLogger(opt, hyp) + except Exception: + self.clearml = None + prefix = colorstr('ClearML: ') + LOGGER.warning(f'{prefix}WARNING ⚠️ ClearML is installed but not configured, skipping ClearML logging.' + f' See https://github.com/ultralytics/yolov5/tree/master/utils/loggers/clearml#readme') + else: + self.clearml = None + def log_metrics(self, metrics, epoch): # Log metrics dictionary to all loggers if self.csv: @@ -372,6 +390,9 @@ def log_metrics(self, metrics, epoch): if self.wandb: self.wandb.log(metrics, step=epoch) + if self.clearml: + self.clearml.log_scalars(metrics, epoch) + def log_images(self, files, name='Images', epoch=0): # Log images to all loggers files = [Path(f) for f in (files if isinstance(files, (tuple, list)) else [files])] # to Path @@ -384,6 +405,12 @@ def log_images(self, files, name='Images', epoch=0): if self.wandb: self.wandb.log({name: [wandb.Image(str(f), caption=f.name) for f in files]}, step=epoch) + if self.clearml: + if name == 'Results': + [self.clearml.log_plot(f.stem, f) for f in files] + else: + self.clearml.log_debug_samples(files, title=name) + def log_graph(self, model, imgsz=(640, 640)): # Log model graph to all loggers if self.tb: @@ -395,11 +422,15 @@ def log_model(self, model_path, epoch=0, metadata={}): art = wandb.Artifact(name=f'run_{wandb.run.id}_model', type='model', metadata=metadata) art.add_file(str(model_path)) wandb.log_artifact(art) + if self.clearml: + self.clearml.log_model(model_path=model_path, model_name=model_path.stem) def update_params(self, params): # Update the parameters logged if self.wandb: wandb.run.config.update(params, allow_val_change=True) + if self.clearml: + self.clearml.task.connect(params) def log_tensorboard_graph(tb, model, imgsz=(640, 640)): diff --git a/utils/loggers/clearml/clearml_utils.py b/utils/loggers/clearml/clearml_utils.py index 4e999bfee5db..c7627a261186 100644 --- a/utils/loggers/clearml/clearml_utils.py +++ b/utils/loggers/clearml/clearml_utils.py @@ -3,6 +3,8 @@ import re from pathlib import Path +import matplotlib.image as mpimg +import matplotlib.pyplot as plt import numpy as np import yaml from ultralytics.utils.plotting import Annotator, colors @@ -78,18 +80,22 @@ def __init__(self, opt, hyp): # Maximum number of images to log to clearML per epoch self.max_imgs_to_log_per_epoch = 16 # Get the interval of epochs when bounding box images should be logged - self.bbox_interval = opt.bbox_interval + # Only for detection task though! + if 'bbox_interval' in opt: + self.bbox_interval = opt.bbox_interval self.clearml = clearml self.task = None self.data_dict = None if self.clearml: self.task = Task.init( - project_name=opt.project if opt.project != 'runs/train' else 'YOLOv5', + project_name=opt.project if not str(opt.project).startswith('runs/') else 'YOLOv5', task_name=opt.name if opt.name != 'exp' else 'Training', tags=['YOLOv5'], output_uri=True, reuse_last_task_id=opt.exist_ok, - auto_connect_frameworks={'pytorch': False} + auto_connect_frameworks={ + 'pytorch': False, + 'matplotlib': False} # We disconnect pytorch auto-detection, because we added manual model save points in the code ) # ClearML's hooks will already grab all general parameters @@ -112,6 +118,57 @@ def __init__(self, opt, hyp): # to give it to them opt.data = self.data_dict + def log_scalars(self, metrics, epoch): + """ + Log scalars/metrics to ClearML + + arguments: + metrics (dict) Metrics in dict format: {"metrics/mAP": 0.8, ...} + epoch (int) iteration number for the current set of metrics + """ + for k, v in metrics.items(): + title, series = k.split('/') + self.task.get_logger().report_scalar(title, series, v, epoch) + + def log_model(self, model_path, model_name, epoch=0): + """ + Log model weights to ClearML + + arguments: + model_path (PosixPath or str) Path to the model weights + model_name (str) Name of the model visible in ClearML + epoch (int) Iteration / epoch of the model weights + """ + self.task.update_output_model(model_path=str(model_path), + name=model_name, + iteration=epoch, + auto_delete_file=False) + + def log_summary(self, metrics): + """ + Log final metrics to a summary table + + arguments: + metrics (dict) Metrics in dict format: {"metrics/mAP": 0.8, ...} + """ + for k, v in metrics.items(): + self.task.get_logger().report_single_value(k, v) + + def log_plot(self, title, plot_path): + """ + Log image as plot in the plot section of ClearML + + arguments: + title (str) Title of the plot + plot_path (PosixPath or str) Path to the saved image file + """ + img = mpimg.imread(plot_path) + fig = plt.figure() + ax = fig.add_axes([0, 0, 1, 1], frameon=False, aspect='auto', xticks=[], yticks=[]) # no ticks + ax.imshow(img) + + self.task.get_logger().report_matplotlib_figure(title, "", figure=fig, report_interactive=False) + def log_debug_samples(self, files, title='Debug Samples'): """ Log files (images) as debug samples in the ClearML task. @@ -125,7 +182,7 @@ def log_debug_samples(self, files, title='Debug Samples'): it = re.search(r'_batch(\d+)', f.name) iteration = int(it.groups()[0]) if it else 0 self.task.get_logger().report_image(title=title, - series=f.name.replace(it.group(), ''), + series=f.name.replace(f"_batch{iteration}", ''), local_path=str(f), iteration=iteration) From 46ae996cb174a3e07cee80367fddc07783fd02ff Mon Sep 17 00:00:00 2001 From: davidsvaughn Date: Wed, 3 Jan 2024 02:15:07 -0500 Subject: [PATCH 1842/1976] Bugfix: update dataloaders.py to fix Multi-GPU DDP RAM multiple-cache issue (#10383) * Update dataloaders.py This is to address (and hopefully fix) this issue: Multi-GPU DDP RAM multiple-cache bug #3818 (https://github.com/ultralytics/yolov5/issues/3818). This was a very serious and "blocking" issue until I could figure out what was going on. The problem was especially bad when running Multi-GPU jobs with 8 GPUs, RAM usage was 8x higher than expected (!), causing repeated OOM failures. Hopefully this fix will help others. DDP causes each RANK to launch it's own process (one for each GPU) with it's own trainloader, and its own RAM image cache. The DistributedSampler used by DDP (https://github.com/pytorch/pytorch/blob/master/torch/utils/data/distributed.py) will feed only a subset of images (1/WORLD_SIZE) to each available GPU on each epoch, but since the images are shuffled between epochs, each GPU process must still cache all images. So I created a subclass of DistributedSampler called SmartDistributedSampler that forces each GPU process to always sample the same subset (using modulo arithmetic with RANK and WORLD_SIZE) while still allowing random shuffling between epochs. I don't believe this disrupts the overall "randomness" of the sampling, and I haven't noticed any performance degradation. Signed-off-by: davidsvaughn * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update dataloaders.py move extra parameter (rank) to end so won't mess up pre-existing positional args * Update dataloaders.py removing extra '#' * Update dataloaders.py sample from DDP index array (self.idx) in mixup mosaic * Merging self.indices and self.idx (DDP indices) into single attribute (self.indices). Also adding SmartDistributedSampler to segmentation dataloader * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Multiply GB displayed by WORLD_SIZE --------- Signed-off-by: davidsvaughn Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Glenn Jocher --- utils/dataloaders.py | 51 ++++++++++++++++++++++++++++++------ utils/segment/dataloaders.py | 11 +++++--- 2 files changed, 50 insertions(+), 12 deletions(-) diff --git a/utils/dataloaders.py b/utils/dataloaders.py index 1fbd0361ded4..d422ef0711cb 100644 --- a/utils/dataloaders.py +++ b/utils/dataloaders.py @@ -41,6 +41,7 @@ VID_FORMATS = 'asf', 'avi', 'gif', 'm4v', 'mkv', 'mov', 'mp4', 'mpeg', 'mpg', 'ts', 'wmv' # include video suffixes LOCAL_RANK = int(os.getenv('LOCAL_RANK', -1)) # https://pytorch.org/docs/stable/elastic/run.html RANK = int(os.getenv('RANK', -1)) +WORLD_SIZE = int(os.getenv('WORLD_SIZE', 1)) PIN_MEMORY = str(os.getenv('PIN_MEMORY', True)).lower() == 'true' # global pin_memory for dataloaders # Get orientation exif tag @@ -100,6 +101,34 @@ def seed_worker(worker_id): random.seed(worker_seed) +# Inherit from DistributedSampler and override iterator +# https://github.com/pytorch/pytorch/blob/master/torch/utils/data/distributed.py +class SmartDistributedSampler(distributed.DistributedSampler): + + def __iter__(self): + # deterministically shuffle based on epoch and seed + g = torch.Generator() + g.manual_seed(self.seed + self.epoch) + + # determine the the eventual size (n) of self.indices (DDP indices) + n = int((len(self.dataset) - self.rank - 1) / self.num_replicas) + 1 # num_replicas == WORLD_SIZE + idx = torch.randperm(n, generator=g) + if not self.shuffle: + idx = idx.sort()[0] + + idx = idx.tolist() + if self.drop_last: + idx = idx[:self.num_samples] + else: + padding_size = self.num_samples - len(idx) + if padding_size <= len(idx): + idx += idx[:padding_size] + else: + idx += (idx * math.ceil(padding_size / len(idx)))[:padding_size] + + return iter(idx) + + def create_dataloader(path, imgsz, batch_size, @@ -133,12 +162,13 @@ def create_dataloader(path, stride=int(stride), pad=pad, image_weights=image_weights, - prefix=prefix) + prefix=prefix, + rank=rank) batch_size = min(batch_size, len(dataset)) nd = torch.cuda.device_count() # number of CUDA devices nw = min([os.cpu_count() // max(nd, 1), batch_size if batch_size > 1 else 0, workers]) # number of workers - sampler = None if rank == -1 else distributed.DistributedSampler(dataset, shuffle=shuffle) + sampler = None if rank == -1 else SmartDistributedSampler(dataset, shuffle=shuffle) loader = DataLoader if image_weights else InfiniteDataLoader # only DataLoader allows for attribute updates generator = torch.Generator() generator.manual_seed(6148914691236517205 + seed + RANK) @@ -449,7 +479,9 @@ def __init__(self, stride=32, pad=0.0, min_items=0, - prefix=''): + prefix='', + rank=-1, + seed=0): self.img_size = img_size self.augment = augment self.hyp = hyp @@ -527,7 +559,10 @@ def __init__(self, nb = bi[-1] + 1 # number of batches self.batch = bi # batch index of image self.n = n - self.indices = range(n) + self.indices = np.arange(n) + if rank > -1: # DDP indices (see: SmartDistributedSampler) + # force each rank (i.e. GPU process) to sample the same subset of data on every epoch + self.indices = self.indices[np.random.RandomState(seed=seed).permutation(n) % WORLD_SIZE == RANK] # Update labels include_class = [] # filter labels to include only these classes (optional) @@ -576,14 +611,14 @@ def __init__(self, b, gb = 0, 1 << 30 # bytes of cached images, bytes per gigabytes self.im_hw0, self.im_hw = [None] * n, [None] * n fcn = self.cache_images_to_disk if cache_images == 'disk' else self.load_image - results = ThreadPool(NUM_THREADS).imap(fcn, range(n)) - pbar = tqdm(enumerate(results), total=n, bar_format=TQDM_BAR_FORMAT, disable=LOCAL_RANK > 0) + results = ThreadPool(NUM_THREADS).imap(lambda i: (i, fcn(i)), self.indices) + pbar = tqdm(results, total=len(self.indices), bar_format=TQDM_BAR_FORMAT, disable=LOCAL_RANK > 0) for i, x in pbar: if cache_images == 'disk': b += self.npy_files[i].stat().st_size else: # 'ram' self.ims[i], self.im_hw0[i], self.im_hw[i] = x # im, hw_orig, hw_resized = load_image(self, i) - b += self.ims[i].nbytes + b += self.ims[i].nbytes * WORLD_SIZE pbar.desc = f'{prefix}Caching images ({b / gb:.1f}GB {cache_images})' pbar.close() @@ -663,7 +698,7 @@ def __getitem__(self, index): # MixUp augmentation if random.random() < hyp['mixup']: - img, labels = mixup(img, labels, *self.load_mosaic(random.randint(0, self.n - 1))) + img, labels = mixup(img, labels, *self.load_mosaic(random.choice(self.indices))) else: # Load image diff --git a/utils/segment/dataloaders.py b/utils/segment/dataloaders.py index 3ee826dba69c..5398617eef68 100644 --- a/utils/segment/dataloaders.py +++ b/utils/segment/dataloaders.py @@ -12,7 +12,7 @@ from torch.utils.data import DataLoader, distributed from ..augmentations import augment_hsv, copy_paste, letterbox -from ..dataloaders import InfiniteDataLoader, LoadImagesAndLabels, seed_worker +from ..dataloaders import InfiniteDataLoader, LoadImagesAndLabels, SmartDistributedSampler, seed_worker from ..general import LOGGER, xyn2xy, xywhn2xyxy, xyxy2xywhn from ..torch_utils import torch_distributed_zero_first from .augmentations import mixup, random_perspective @@ -57,12 +57,13 @@ def create_dataloader(path, image_weights=image_weights, prefix=prefix, downsample_ratio=mask_downsample_ratio, - overlap=overlap_mask) + overlap=overlap_mask, + rank=rank) batch_size = min(batch_size, len(dataset)) nd = torch.cuda.device_count() # number of CUDA devices nw = min([os.cpu_count() // max(nd, 1), batch_size if batch_size > 1 else 0, workers]) # number of workers - sampler = None if rank == -1 else distributed.DistributedSampler(dataset, shuffle=shuffle) + sampler = None if rank == -1 else SmartDistributedSampler(dataset, shuffle=shuffle) loader = DataLoader if image_weights else InfiniteDataLoader # only DataLoader allows for attribute updates generator = torch.Generator() generator.manual_seed(6148914691236517205 + seed + RANK) @@ -98,9 +99,11 @@ def __init__( prefix='', downsample_ratio=1, overlap=False, + rank=-1, + seed=0, ): super().__init__(path, img_size, batch_size, augment, hyp, rect, image_weights, cache_images, single_cls, - stride, pad, min_items, prefix) + stride, pad, min_items, prefix, rank, seed) self.downsample_ratio = downsample_ratio self.overlap = overlap From f6a781ac76d00840521d51ed7366733938c2cd7e Mon Sep 17 00:00:00 2001 From: Aarni Koskela Date: Wed, 3 Jan 2024 12:34:34 +0200 Subject: [PATCH 1843/1976] Use named imports in models.yolo (#12576) --- models/yolo.py | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/models/yolo.py b/models/yolo.py index 9cd3cddc25c3..f6cdbcb5d2d8 100644 --- a/models/yolo.py +++ b/models/yolo.py @@ -8,12 +8,16 @@ import argparse import contextlib +import math import os import platform import sys from copy import deepcopy from pathlib import Path +import torch +import torch.nn as nn + FILE = Path(__file__).resolve() ROOT = FILE.parents[1] # YOLOv5 root directory if str(ROOT) not in sys.path: @@ -21,10 +25,12 @@ if platform.system() != 'Windows': ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative -from models.common import * # noqa -from models.experimental import * # noqa +from models.common import (C3, C3SPP, C3TR, SPP, SPPF, Bottleneck, BottleneckCSP, C3Ghost, C3x, Classify, Concat, + Contract, Conv, CrossConv, DetectMultiBackend, DWConv, DWConvTranspose2d, Expand, Focus, + GhostBottleneck, GhostConv, Proto) +from models.experimental import MixConv2d from utils.autoanchor import check_anchor_order -from utils.general import LOGGER, check_version, check_yaml, make_divisible, print_args +from utils.general import LOGGER, check_version, check_yaml, colorstr, make_divisible, print_args from utils.plots import feature_visualization from utils.torch_utils import (fuse_conv_and_bn, initialize_weights, model_info, profile, scale_img, select_device, time_sync) From dfa884f9db1cabbc8787a5862214827bfe3c9ed4 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 3 Jan 2024 11:35:32 +0100 Subject: [PATCH 1844/1976] Create pyproject.toml (#12577) Signed-off-by: Glenn Jocher --- pyproject.toml | 148 +++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 148 insertions(+) create mode 100644 pyproject.toml diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 000000000000..0ef1dc43dee4 --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,148 @@ +# Ultralyticsv5 YOLO 🚀, AGPL-3.0 license + +# Overview: +# This pyproject.toml file manages the build, packaging, and distribution of the Ultralytics library. +# It defines essential project metadata, dependencies, and settings used to develop and deploy the library. + +# Key Sections: +# - [build-system]: Specifies the build requirements and backend (e.g., setuptools, wheel). +# - [project]: Includes details like name, version, description, authors, dependencies and more. +# - [project.optional-dependencies]: Provides additional, optional packages for extended features. +# - [tool.*]: Configures settings for various tools (pytest, yapf, etc.) used in the project. + +# Installation: +# The Ultralytics library can be installed using the command: 'pip install ultralytics' +# For development purposes, you can install the package in editable mode with: 'pip install -e .' +# This approach allows for real-time code modifications without the need for re-installation. + +# Documentation: +# For comprehensive documentation and usage instructions, visit: https://docs.ultralytics.com + +[build-system] +requires = ["setuptools>=43.0.0", "wheel"] +build-backend = "setuptools.build_meta" + +# Project settings ----------------------------------------------------------------------------------------------------- +[project] +name = "YOLOv5" +description = "Ultralytics YOLOv5 for SOTA object detection, instance segmentation and image classification." +readme = "README.md" +requires-python = ">=3.8" +license = { "text" = "AGPL-3.0" } +keywords = ["machine-learning", "deep-learning", "computer-vision", "ML", "DL", "AI", "YOLO", "YOLOv3", "YOLOv5", "YOLOv8", "HUB", "Ultralytics"] +authors = [ + { name = "Glenn Jocher" }, + { name = "Ayush Chaurasia" }, + { name = "Jing Qiu" } +] +maintainers = [ + { name = "Glenn Jocher" }, + { name = "Ayush Chaurasia" }, + { name = "Jing Qiu" } +] +classifiers = [ + "Development Status :: 4 - Beta", + "Intended Audience :: Developers", + "Intended Audience :: Education", + "Intended Audience :: Science/Research", + "License :: OSI Approved :: GNU Affero General Public License v3 or later (AGPLv3+)", + "Programming Language :: Python :: 3", + "Programming Language :: Python :: 3.8", + "Programming Language :: Python :: 3.9", + "Programming Language :: Python :: 3.10", + "Programming Language :: Python :: 3.11", + "Topic :: Software Development", + "Topic :: Scientific/Engineering", + "Topic :: Scientific/Engineering :: Artificial Intelligence", + "Topic :: Scientific/Engineering :: Image Recognition", + "Operating System :: POSIX :: Linux", + "Operating System :: MacOS", + "Operating System :: Microsoft :: Windows", +] + +# Required dependencies ------------------------------------------------------------------------------------------------ +dependencies = [ + "matplotlib>=3.3.0", + "numpy>=1.22.2", + "opencv-python>=4.6.0", + "pillow>=7.1.2", + "pyyaml>=5.3.1", + "requests>=2.23.0", + "scipy>=1.4.1", + "torch>=1.8.0", + "torchvision>=0.9.0", + "tqdm>=4.64.0", # progress bars + "psutil", # system utilization + "py-cpuinfo", # display CPU info + "thop>=0.1.1", # FLOPs computation + "pandas>=1.1.4", + "seaborn>=0.11.0", # plotting + "ultralytics>=8.0.232" +] + +# Optional dependencies ------------------------------------------------------------------------------------------------ +[project.optional-dependencies] +dev = [ + "ipython", + "check-manifest", + "pre-commit", + "pytest", + "pytest-cov", + "coverage[toml]", + "mkdocs-material", + "mkdocstrings[python]", + "mkdocs-redirects", # for 301 redirects + "mkdocs-ultralytics-plugin>=0.0.34", # for meta descriptions and images, dates and authors +] +export = [ + "onnx>=1.12.0", # ONNX export + "coremltools>=7.0", # CoreML export + "openvino-dev>=2023.0", # OpenVINO export + "tensorflow<=2.13.1", # TF bug https://github.com/ultralytics/ultralytics/issues/5161 + "jax<=0.4.21", # tensorflowjs bug https://github.com/google/jax/issues/18978 + "jaxlib<=0.4.21", # tensorflowjs bug https://github.com/google/jax/issues/18978 + "tensorflowjs>=3.9.0", # TF.js export, automatically installs tensorflow +] +# tensorflow>=2.4.1,<=2.13.1 # TF exports (-cpu, -aarch64, -macos) +# tflite-support # for TFLite model metadata +# scikit-learn==0.19.2 # CoreML quantization +# nvidia-pyindex # TensorRT export +# nvidia-tensorrt # TensorRT export +logging = [ + "comet", # https://docs.ultralytics.com/integrations/comet/ + "tensorboard>=2.13.0", + "dvclive>=2.12.0", +] +extra = [ + "ipython", # interactive notebook + "albumentations>=1.0.3", # training augmentations + "pycocotools>=2.0.6", # COCO mAP +] + +[project.urls] +"Bug Reports" = "https://github.com/ultralytics/yolov5/issues" +"Funding" = "https://ultralytics.com" +"Source" = "https://github.com/ultralytics/yolov5/" + +# Tools settings ------------------------------------------------------------------------------------------------------- +[tool.pytest] +norecursedirs = [".git", "dist", "build"] +addopts = "--doctest-modules --durations=30 --color=yes" + +[tool.isort] +line_length = 120 +multi_line_output = 0 + +[tool.ruff] +line-length = 120 + +[tool.docformatter] +wrap-summaries = 120 +wrap-descriptions = 120 +in-place = true +pre-summary-newline = true +close-quotes-on-newline = true + +[tool.codespell] +ignore-words-list = "crate,nd,strack,dota,ane,segway,fo,gool,winn,commend" +skip = '*.csv,*venv*,docs/??/,docs/mkdocs_??.yml' From c42aba5b2f0a3e8a0004739ff0d5d0f83f288012 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 3 Jan 2024 11:36:55 +0100 Subject: [PATCH 1845/1976] Delete .pre-commit-config.yaml (#12578) * Delete .pre-commit-config.yaml Signed-off-by: Glenn Jocher * Delete setup.cfg Signed-off-by: Glenn Jocher --------- Signed-off-by: Glenn Jocher --- .pre-commit-config.yaml | 73 ----------------------------------------- setup.cfg | 56 ------------------------------- 2 files changed, 129 deletions(-) delete mode 100644 .pre-commit-config.yaml delete mode 100644 setup.cfg diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml deleted file mode 100644 index ab272b2a1477..000000000000 --- a/.pre-commit-config.yaml +++ /dev/null @@ -1,73 +0,0 @@ -# Ultralytics YOLO 🚀, AGPL-3.0 license -# Pre-commit hooks. For more information see https://github.com/pre-commit/pre-commit-hooks/blob/main/README.md - -exclude: 'docs/' -# Define bot property if installed via https://github.com/marketplace/pre-commit-ci -ci: - autofix_prs: true - autoupdate_commit_msg: '[pre-commit.ci] pre-commit suggestions' - autoupdate_schedule: monthly - # submodules: true - -repos: - - repo: https://github.com/pre-commit/pre-commit-hooks - rev: v4.5.0 - hooks: - - id: end-of-file-fixer - - id: trailing-whitespace - - id: check-case-conflict - # - id: check-yaml - - id: check-docstring-first - - id: double-quote-string-fixer - - id: detect-private-key - - - repo: https://github.com/asottile/pyupgrade - rev: v3.15.0 - hooks: - - id: pyupgrade - name: Upgrade code - - - repo: https://github.com/PyCQA/isort - rev: 5.12.0 - hooks: - - id: isort - name: Sort imports - - - repo: https://github.com/google/yapf - rev: v0.40.2 - hooks: - - id: yapf - name: YAPF formatting - - - repo: https://github.com/executablebooks/mdformat - rev: 0.7.17 - hooks: - - id: mdformat - name: MD formatting - additional_dependencies: - - mdformat-gfm - - mdformat-black - # exclude: "README.md|README.zh-CN.md|CONTRIBUTING.md" - - - repo: https://github.com/PyCQA/flake8 - rev: 6.1.0 - hooks: - - id: flake8 - name: PEP8 - - - repo: https://github.com/codespell-project/codespell - rev: v2.2.6 - hooks: - - id: codespell - args: - - --ignore-words-list=crate,nd,strack,dota - -# - repo: https://github.com/asottile/yesqa -# rev: v1.4.0 -# hooks: -# - id: yesqa - -# - repo: https://github.com/asottile/dead -# rev: v1.5.0 -# hooks: -# - id: dead diff --git a/setup.cfg b/setup.cfg deleted file mode 100644 index 2cde6a494836..000000000000 --- a/setup.cfg +++ /dev/null @@ -1,56 +0,0 @@ -# Project-wide configuration file, can be used for package metadata and other toll configurations -# Example usage: global configuration for PEP8 (via flake8) setting or default pytest arguments -# Local usage: pip install pre-commit, pre-commit run --all-files - -[metadata] -license_files = LICENSE -description_file = README.md - -[tool:pytest] -norecursedirs = - .git - dist - build -addopts = - --doctest-modules - --durations=25 - --color=yes - -[flake8] -max-line-length = 120 -exclude = .tox,*.egg,build,temp -select = E,W,F -doctests = True -verbose = 2 -# https://pep8.readthedocs.io/en/latest/intro.html#error-codes -format = pylint -# see: https://www.flake8rules.com/ -ignore = E731,F405,E402,W504,E501 - # E731: Do not assign a lambda expression, use a def - # F405: name may be undefined, or defined from star imports: module - # E402: module level import not at top of file - # W504: line break after binary operator - # E501: line too long - # removed: - # F401: module imported but unused - # E231: missing whitespace after ‘,’, ‘;’, or ‘:’ - # E127: continuation line over-indented for visual indent - # F403: ‘from module import *’ used; unable to detect undefined names - - -[isort] -# https://pycqa.github.io/isort/docs/configuration/options.html -line_length = 120 -# see: https://pycqa.github.io/isort/docs/configuration/multi_line_output_modes.html -multi_line_output = 0 - -[yapf] -based_on_style = pep8 -spaces_before_comment = 2 -COLUMN_LIMIT = 120 -COALESCE_BRACKETS = True -SPACES_AROUND_POWER_OPERATOR = True -SPACE_BETWEEN_ENDING_COMMA_AND_CLOSING_BRACKET = True -SPLIT_BEFORE_CLOSING_BRACKET = False -SPLIT_BEFORE_FIRST_ARGUMENT = False -# EACH_DICT_ENTRY_ON_SEPARATE_LINE = False From f56355d3210f3b6448834e4d48fba458b9fcb150 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 7 Jan 2024 01:43:03 +0100 Subject: [PATCH 1846/1976] Update links.yml (#12588) * Update links.yml Signed-off-by: Glenn Jocher * Update links.yml Signed-off-by: Glenn Jocher --------- Signed-off-by: Glenn Jocher --- .github/workflows/links.yml | 24 ++++++++++++++++++++++-- 1 file changed, 22 insertions(+), 2 deletions(-) diff --git a/.github/workflows/links.yml b/.github/workflows/links.yml index 05408fd6227b..1c7b28257d80 100644 --- a/.github/workflows/links.yml +++ b/.github/workflows/links.yml @@ -33,7 +33,15 @@ jobs: timeout_minutes: 5 retry_wait_seconds: 60 max_attempts: 3 - command: lychee --accept 403,429,500,502,999 --exclude-loopback --exclude 'https?://(www\.)?(linkedin\.com|twitter\.com|instagram\.com|kaggle\.com|fonts\.gstatic\.com|url\.com)' --exclude-path '**/ci.yaml' --exclude-mail --github-token ${{ secrets.GITHUB_TOKEN }} './**/*.md' './**/*.html' + command: | + lychee + --accept 403,429,500,502,999 + --exclude-loopback + --exclude 'https?://(www\.)?(linkedin\.com|twitter\.com|instagram\.com|kaggle\.com|fonts\.gstatic\.com|url\.com)' + --exclude-path '**/ci.yaml' + --github-token ${{ secrets.GITHUB_TOKEN }} + './**/*.md' + './**/*.html' - name: Test Markdown, HTML, YAML, Python and Notebook links with retry if: github.event_name == 'workflow_dispatch' @@ -42,4 +50,16 @@ jobs: timeout_minutes: 5 retry_wait_seconds: 60 max_attempts: 3 - command: lychee --accept 429,999 --exclude-loopback --exclude 'https?://(www\.)?(linkedin\.com|twitter\.com|instagram\.com|kaggle\.com|fonts\.gstatic\.com|url\.com)' --exclude-path '**/ci.yaml' --exclude-mail --github-token ${{ secrets.GITHUB_TOKEN }} './**/*.md' './**/*.html' './**/*.yml' './**/*.yaml' './**/*.py' './**/*.ipynb' + command: | + lychee + --accept 429,999 + --exclude-loopback + --exclude 'https?://(www\.)?(linkedin\.com|twitter\.com|instagram\.com|kaggle\.com|fonts\.gstatic\.com|url\.com)' + --exclude-path '**/ci.yaml' + --github-token ${{ secrets.GITHUB_TOKEN }} + './**/*.md' + './**/*.html' + './**/*.yml' + './**/*.yaml' + './**/*.py' + './**/*.ipynb' From ad97dc6a6d8b4384eb7224e98c8584ecd8bcfd2a Mon Sep 17 00:00:00 2001 From: Paula Derrenger <107626595+pderrenger@users.noreply.github.com> Date: Mon, 8 Jan 2024 01:29:14 +0100 Subject: [PATCH 1847/1976] Update Actions with Lychee and GitHub Token (#12592) * Add Ultralytics Actions * Auto-format by Ultralytics actions * Update format.yml Signed-off-by: Glenn Jocher --------- Signed-off-by: Glenn Jocher Co-authored-by: UltralyticsAssistant Co-authored-by: Glenn Jocher --- .github/workflows/format.yml | 25 + benchmarks.py | 116 ++--- classify/predict.py | 127 ++--- classify/train.py | 255 +++++----- classify/val.py | 81 ++-- detect.py | 184 +++---- export.py | 616 +++++++++++++----------- hubconf.py | 72 +-- models/common.py | 316 ++++++------ models/experimental.py | 35 +- models/tf.py | 252 ++++++---- models/yolo.py | 150 ++++-- segment/predict.py | 143 +++--- segment/train.py | 522 +++++++++++--------- segment/val.py | 297 +++++++----- train.py | 595 +++++++++++++---------- utils/__init__.py | 29 +- utils/activations.py | 9 +- utils/augmentations.py | 76 ++- utils/autoanchor.py | 72 +-- utils/autobatch.py | 24 +- utils/aws/resume.py | 16 +- utils/callbacks.py | 63 ++- utils/dataloaders.py | 602 ++++++++++++----------- utils/downloads.py | 91 ++-- utils/flask_rest_api/example_request.py | 12 +- utils/flask_rest_api/restapi.py | 28 +- utils/general.py | 574 +++++++++++++--------- utils/loggers/__init__.py | 195 ++++---- utils/loggers/clearml/clearml_utils.py | 123 ++--- utils/loggers/clearml/hpo.py | 78 +-- utils/loggers/comet/__init__.py | 243 +++++----- utils/loggers/comet/comet_utils.py | 54 +-- utils/loggers/comet/hpo.py | 114 ++--- utils/loggers/wandb/wandb_utils.py | 109 +++-- utils/loss.py | 58 +-- utils/metrics.py | 118 ++--- utils/plots.py | 254 +++++----- utils/segment/augmentations.py | 22 +- utils/segment/dataloaders.py | 149 +++--- utils/segment/general.py | 19 +- utils/segment/loss.py | 46 +- utils/segment/metrics.py | 156 +++--- utils/segment/plots.py | 39 +- utils/torch_utils.py | 230 +++++---- utils/triton.py | 43 +- val.py | 269 ++++++----- 47 files changed, 4228 insertions(+), 3473 deletions(-) create mode 100644 .github/workflows/format.yml diff --git a/.github/workflows/format.yml b/.github/workflows/format.yml new file mode 100644 index 000000000000..8d623f4f6e7e --- /dev/null +++ b/.github/workflows/format.yml @@ -0,0 +1,25 @@ +# Ultralytics 🚀 - AGPL-3.0 license +# Ultralytics Actions https://github.com/ultralytics/actions +# This workflow automatically formats code and documentation in PRs to official Ultralytics standards + +name: Ultralytics Actions + +on: + push: + branches: [main,master] + pull_request_target: + branches: [main,master] + +jobs: + format: + runs-on: ubuntu-latest + steps: + - name: Run Ultralytics Formatting + uses: ultralytics/actions@main + with: + token: ${{ secrets.GITHUB_TOKEN }} # automatically generated + python: true + docstrings: true + markdown: true + spelling: true + links: true diff --git a/benchmarks.py b/benchmarks.py index b590ff63cb01..09e82e588a2a 100644 --- a/benchmarks.py +++ b/benchmarks.py @@ -1,6 +1,6 @@ # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license """ -Run YOLOv5 benchmarks on all supported export formats +Run YOLOv5 benchmarks on all supported export formats. Format | `export.py --include` | Model --- | --- | --- @@ -50,115 +50,115 @@ def run( - weights=ROOT / 'yolov5s.pt', # weights path - imgsz=640, # inference size (pixels) - batch_size=1, # batch size - data=ROOT / 'data/coco128.yaml', # dataset.yaml path - device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu - half=False, # use FP16 half-precision inference - test=False, # test exports only - pt_only=False, # test PyTorch only - hard_fail=False, # throw error on benchmark failure + weights=ROOT / "yolov5s.pt", # weights path + imgsz=640, # inference size (pixels) + batch_size=1, # batch size + data=ROOT / "data/coco128.yaml", # dataset.yaml path + device="", # cuda device, i.e. 0 or 0,1,2,3 or cpu + half=False, # use FP16 half-precision inference + test=False, # test exports only + pt_only=False, # test PyTorch only + hard_fail=False, # throw error on benchmark failure ): y, t = [], time.time() device = select_device(device) model_type = type(attempt_load(weights, fuse=False)) # DetectionModel, SegmentationModel, etc. for i, (name, f, suffix, cpu, gpu) in export.export_formats().iterrows(): # index, (name, file, suffix, CPU, GPU) try: - assert i not in (9, 10), 'inference not supported' # Edge TPU and TF.js are unsupported - assert i != 5 or platform.system() == 'Darwin', 'inference only supported on macOS>=10.13' # CoreML - if 'cpu' in device.type: - assert cpu, 'inference not supported on CPU' - if 'cuda' in device.type: - assert gpu, 'inference not supported on GPU' + assert i not in (9, 10), "inference not supported" # Edge TPU and TF.js are unsupported + assert i != 5 or platform.system() == "Darwin", "inference only supported on macOS>=10.13" # CoreML + if "cpu" in device.type: + assert cpu, "inference not supported on CPU" + if "cuda" in device.type: + assert gpu, "inference not supported on GPU" # Export - if f == '-': + if f == "-": w = weights # PyTorch format else: - w = export.run(weights=weights, - imgsz=[imgsz], - include=[f], - batch_size=batch_size, - device=device, - half=half)[-1] # all others - assert suffix in str(w), 'export failed' + w = export.run( + weights=weights, imgsz=[imgsz], include=[f], batch_size=batch_size, device=device, half=half + )[-1] # all others + assert suffix in str(w), "export failed" # Validate if model_type == SegmentationModel: - result = val_seg(data, w, batch_size, imgsz, plots=False, device=device, task='speed', half=half) + result = val_seg(data, w, batch_size, imgsz, plots=False, device=device, task="speed", half=half) metric = result[0][7] # (box(p, r, map50, map), mask(p, r, map50, map), *loss(box, obj, cls)) else: # DetectionModel: - result = val_det(data, w, batch_size, imgsz, plots=False, device=device, task='speed', half=half) + result = val_det(data, w, batch_size, imgsz, plots=False, device=device, task="speed", half=half) metric = result[0][3] # (p, r, map50, map, *loss(box, obj, cls)) speed = result[2][1] # times (preprocess, inference, postprocess) y.append([name, round(file_size(w), 1), round(metric, 4), round(speed, 2)]) # MB, mAP, t_inference except Exception as e: if hard_fail: - assert type(e) is AssertionError, f'Benchmark --hard-fail for {name}: {e}' - LOGGER.warning(f'WARNING ⚠️ Benchmark failure for {name}: {e}') + assert type(e) is AssertionError, f"Benchmark --hard-fail for {name}: {e}" + LOGGER.warning(f"WARNING ⚠️ Benchmark failure for {name}: {e}") y.append([name, None, None, None]) # mAP, t_inference if pt_only and i == 0: break # break after PyTorch # Print results - LOGGER.info('\n') + LOGGER.info("\n") parse_opt() notebook_init() # print system info - c = ['Format', 'Size (MB)', 'mAP50-95', 'Inference time (ms)'] if map else ['Format', 'Export', '', ''] + c = ["Format", "Size (MB)", "mAP50-95", "Inference time (ms)"] if map else ["Format", "Export", "", ""] py = pd.DataFrame(y, columns=c) - LOGGER.info(f'\nBenchmarks complete ({time.time() - t:.2f}s)') + LOGGER.info(f"\nBenchmarks complete ({time.time() - t:.2f}s)") LOGGER.info(str(py if map else py.iloc[:, :2])) if hard_fail and isinstance(hard_fail, str): - metrics = py['mAP50-95'].array # values to compare to floor + metrics = py["mAP50-95"].array # values to compare to floor floor = eval(hard_fail) # minimum metric floor to pass, i.e. = 0.29 mAP for YOLOv5n - assert all(x > floor for x in metrics if pd.notna(x)), f'HARD FAIL: mAP50-95 < floor {floor}' + assert all(x > floor for x in metrics if pd.notna(x)), f"HARD FAIL: mAP50-95 < floor {floor}" return py def test( - weights=ROOT / 'yolov5s.pt', # weights path - imgsz=640, # inference size (pixels) - batch_size=1, # batch size - data=ROOT / 'data/coco128.yaml', # dataset.yaml path - device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu - half=False, # use FP16 half-precision inference - test=False, # test exports only - pt_only=False, # test PyTorch only - hard_fail=False, # throw error on benchmark failure + weights=ROOT / "yolov5s.pt", # weights path + imgsz=640, # inference size (pixels) + batch_size=1, # batch size + data=ROOT / "data/coco128.yaml", # dataset.yaml path + device="", # cuda device, i.e. 0 or 0,1,2,3 or cpu + half=False, # use FP16 half-precision inference + test=False, # test exports only + pt_only=False, # test PyTorch only + hard_fail=False, # throw error on benchmark failure ): y, t = [], time.time() device = select_device(device) for i, (name, f, suffix, gpu) in export.export_formats().iterrows(): # index, (name, file, suffix, gpu-capable) try: - w = weights if f == '-' else \ - export.run(weights=weights, imgsz=[imgsz], include=[f], device=device, half=half)[-1] # weights - assert suffix in str(w), 'export failed' + w = ( + weights + if f == "-" + else export.run(weights=weights, imgsz=[imgsz], include=[f], device=device, half=half)[-1] + ) # weights + assert suffix in str(w), "export failed" y.append([name, True]) except Exception: y.append([name, False]) # mAP, t_inference # Print results - LOGGER.info('\n') + LOGGER.info("\n") parse_opt() notebook_init() # print system info - py = pd.DataFrame(y, columns=['Format', 'Export']) - LOGGER.info(f'\nExports complete ({time.time() - t:.2f}s)') + py = pd.DataFrame(y, columns=["Format", "Export"]) + LOGGER.info(f"\nExports complete ({time.time() - t:.2f}s)") LOGGER.info(str(py)) return py def parse_opt(): parser = argparse.ArgumentParser() - parser.add_argument('--weights', type=str, default=ROOT / 'yolov5s.pt', help='weights path') - parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=640, help='inference size (pixels)') - parser.add_argument('--batch-size', type=int, default=1, help='batch size') - parser.add_argument('--data', type=str, default=ROOT / 'data/coco128.yaml', help='dataset.yaml path') - parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') - parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference') - parser.add_argument('--test', action='store_true', help='test exports only') - parser.add_argument('--pt-only', action='store_true', help='test PyTorch only') - parser.add_argument('--hard-fail', nargs='?', const=True, default=False, help='Exception on error or < min metric') + parser.add_argument("--weights", type=str, default=ROOT / "yolov5s.pt", help="weights path") + parser.add_argument("--imgsz", "--img", "--img-size", type=int, default=640, help="inference size (pixels)") + parser.add_argument("--batch-size", type=int, default=1, help="batch size") + parser.add_argument("--data", type=str, default=ROOT / "data/coco128.yaml", help="dataset.yaml path") + parser.add_argument("--device", default="", help="cuda device, i.e. 0 or 0,1,2,3 or cpu") + parser.add_argument("--half", action="store_true", help="use FP16 half-precision inference") + parser.add_argument("--test", action="store_true", help="test exports only") + parser.add_argument("--pt-only", action="store_true", help="test PyTorch only") + parser.add_argument("--hard-fail", nargs="?", const=True, default=False, help="Exception on error or < min metric") opt = parser.parse_args() opt.data = check_yaml(opt.data) # check YAML print_args(vars(opt)) @@ -169,6 +169,6 @@ def main(opt): test(**vars(opt)) if opt.test else run(**vars(opt)) -if __name__ == '__main__': +if __name__ == "__main__": opt = parse_opt() main(opt) diff --git a/classify/predict.py b/classify/predict.py index b056a0cd707b..b7d2f05d7bce 100644 --- a/classify/predict.py +++ b/classify/predict.py @@ -48,43 +48,54 @@ from models.common import DetectMultiBackend from utils.augmentations import classify_transforms from utils.dataloaders import IMG_FORMATS, VID_FORMATS, LoadImages, LoadScreenshots, LoadStreams -from utils.general import (LOGGER, Profile, check_file, check_img_size, check_imshow, check_requirements, colorstr, cv2, - increment_path, print_args, strip_optimizer) +from utils.general import ( + LOGGER, + Profile, + check_file, + check_img_size, + check_imshow, + check_requirements, + colorstr, + cv2, + increment_path, + print_args, + strip_optimizer, +) from utils.torch_utils import select_device, smart_inference_mode @smart_inference_mode() def run( - weights=ROOT / 'yolov5s-cls.pt', # model.pt path(s) - source=ROOT / 'data/images', # file/dir/URL/glob/screen/0(webcam) - data=ROOT / 'data/coco128.yaml', # dataset.yaml path - imgsz=(224, 224), # inference size (height, width) - device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu - view_img=False, # show results - save_txt=False, # save results to *.txt - nosave=False, # do not save images/videos - augment=False, # augmented inference - visualize=False, # visualize features - update=False, # update all models - project=ROOT / 'runs/predict-cls', # save results to project/name - name='exp', # save results to project/name - exist_ok=False, # existing project/name ok, do not increment - half=False, # use FP16 half-precision inference - dnn=False, # use OpenCV DNN for ONNX inference - vid_stride=1, # video frame-rate stride + weights=ROOT / "yolov5s-cls.pt", # model.pt path(s) + source=ROOT / "data/images", # file/dir/URL/glob/screen/0(webcam) + data=ROOT / "data/coco128.yaml", # dataset.yaml path + imgsz=(224, 224), # inference size (height, width) + device="", # cuda device, i.e. 0 or 0,1,2,3 or cpu + view_img=False, # show results + save_txt=False, # save results to *.txt + nosave=False, # do not save images/videos + augment=False, # augmented inference + visualize=False, # visualize features + update=False, # update all models + project=ROOT / "runs/predict-cls", # save results to project/name + name="exp", # save results to project/name + exist_ok=False, # existing project/name ok, do not increment + half=False, # use FP16 half-precision inference + dnn=False, # use OpenCV DNN for ONNX inference + vid_stride=1, # video frame-rate stride ): source = str(source) - save_img = not nosave and not source.endswith('.txt') # save inference images + save_img = not nosave and not source.endswith(".txt") # save inference images is_file = Path(source).suffix[1:] in (IMG_FORMATS + VID_FORMATS) - is_url = source.lower().startswith(('rtsp://', 'rtmp://', 'http://', 'https://')) - webcam = source.isnumeric() or source.endswith('.streams') or (is_url and not is_file) - screenshot = source.lower().startswith('screen') + is_url = source.lower().startswith(("rtsp://", "rtmp://", "http://", "https://")) + webcam = source.isnumeric() or source.endswith(".streams") or (is_url and not is_file) + screenshot = source.lower().startswith("screen") if is_url and is_file: source = check_file(source) # download # Directories save_dir = increment_path(Path(project) / name, exist_ok=exist_ok) # increment run - (save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir + (save_dir / "labels" if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir # Load model device = select_device(device) @@ -127,15 +138,15 @@ def run( seen += 1 if webcam: # batch_size >= 1 p, im0, frame = path[i], im0s[i].copy(), dataset.count - s += f'{i}: ' + s += f"{i}: " else: - p, im0, frame = path, im0s.copy(), getattr(dataset, 'frame', 0) + p, im0, frame = path, im0s.copy(), getattr(dataset, "frame", 0) p = Path(p) # to Path save_path = str(save_dir / p.name) # im.jpg - txt_path = str(save_dir / 'labels' / p.stem) + ('' if dataset.mode == 'image' else f'_{frame}') # im.txt + txt_path = str(save_dir / "labels" / p.stem) + ("" if dataset.mode == "image" else f"_{frame}") # im.txt - s += '%gx%g ' % im.shape[2:] # print string + s += "%gx%g " % im.shape[2:] # print string annotator = Annotator(im0, example=str(names), pil=True) # Print results @@ -143,17 +154,17 @@ def run( s += f"{', '.join(f'{names[j]} {prob[j]:.2f}' for j in top5i)}, " # Write results - text = '\n'.join(f'{prob[j]:.2f} {names[j]}' for j in top5i) + text = "\n".join(f"{prob[j]:.2f} {names[j]}" for j in top5i) if save_img or view_img: # Add bbox to image annotator.text([32, 32], text, txt_color=(255, 255, 255)) if save_txt: # Write to file - with open(f'{txt_path}.txt', 'a') as f: - f.write(text + '\n') + with open(f"{txt_path}.txt", "a") as f: + f.write(text + "\n") # Stream results im0 = annotator.result() if view_img: - if platform.system() == 'Linux' and p not in windows: + if platform.system() == "Linux" and p not in windows: windows.append(p) cv2.namedWindow(str(p), cv2.WINDOW_NORMAL | cv2.WINDOW_KEEPRATIO) # allow window resize (Linux) cv2.resizeWindow(str(p), im0.shape[1], im0.shape[0]) @@ -162,7 +173,7 @@ def run( # Save results (image with detections) if save_img: - if dataset.mode == 'image': + if dataset.mode == "image": cv2.imwrite(save_path, im0) else: # 'video' or 'stream' if vid_path[i] != save_path: # new video @@ -175,18 +186,18 @@ def run( h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) else: # stream fps, w, h = 30, im0.shape[1], im0.shape[0] - save_path = str(Path(save_path).with_suffix('.mp4')) # force *.mp4 suffix on results videos - vid_writer[i] = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h)) + save_path = str(Path(save_path).with_suffix(".mp4")) # force *.mp4 suffix on results videos + vid_writer[i] = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*"mp4v"), fps, (w, h)) vid_writer[i].write(im0) # Print time (inference-only) - LOGGER.info(f'{s}{dt[1].dt * 1E3:.1f}ms') + LOGGER.info(f"{s}{dt[1].dt * 1E3:.1f}ms") # Print results - t = tuple(x.t / seen * 1E3 for x in dt) # speeds per image - LOGGER.info(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {(1, 3, *imgsz)}' % t) + t = tuple(x.t / seen * 1e3 for x in dt) # speeds per image + LOGGER.info(f"Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {(1, 3, *imgsz)}" % t) if save_txt or save_img: - s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else '' + s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else "" LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}{s}") if update: strip_optimizer(weights[0]) # update model (to fix SourceChangeWarning) @@ -194,23 +205,23 @@ def run( def parse_opt(): parser = argparse.ArgumentParser() - parser.add_argument('--weights', nargs='+', type=str, default=ROOT / 'yolov5s-cls.pt', help='model path(s)') - parser.add_argument('--source', type=str, default=ROOT / 'data/images', help='file/dir/URL/glob/screen/0(webcam)') - parser.add_argument('--data', type=str, default=ROOT / 'data/coco128.yaml', help='(optional) dataset.yaml path') - parser.add_argument('--imgsz', '--img', '--img-size', nargs='+', type=int, default=[224], help='inference size h,w') - parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') - parser.add_argument('--view-img', action='store_true', help='show results') - parser.add_argument('--save-txt', action='store_true', help='save results to *.txt') - parser.add_argument('--nosave', action='store_true', help='do not save images/videos') - parser.add_argument('--augment', action='store_true', help='augmented inference') - parser.add_argument('--visualize', action='store_true', help='visualize features') - parser.add_argument('--update', action='store_true', help='update all models') - parser.add_argument('--project', default=ROOT / 'runs/predict-cls', help='save results to project/name') - parser.add_argument('--name', default='exp', help='save results to project/name') - parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment') - parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference') - parser.add_argument('--dnn', action='store_true', help='use OpenCV DNN for ONNX inference') - parser.add_argument('--vid-stride', type=int, default=1, help='video frame-rate stride') + parser.add_argument("--weights", nargs="+", type=str, default=ROOT / "yolov5s-cls.pt", help="model path(s)") + parser.add_argument("--source", type=str, default=ROOT / "data/images", help="file/dir/URL/glob/screen/0(webcam)") + parser.add_argument("--data", type=str, default=ROOT / "data/coco128.yaml", help="(optional) dataset.yaml path") + parser.add_argument("--imgsz", "--img", "--img-size", nargs="+", type=int, default=[224], help="inference size h,w") + parser.add_argument("--device", default="", help="cuda device, i.e. 0 or 0,1,2,3 or cpu") + parser.add_argument("--view-img", action="store_true", help="show results") + parser.add_argument("--save-txt", action="store_true", help="save results to *.txt") + parser.add_argument("--nosave", action="store_true", help="do not save images/videos") + parser.add_argument("--augment", action="store_true", help="augmented inference") + parser.add_argument("--visualize", action="store_true", help="visualize features") + parser.add_argument("--update", action="store_true", help="update all models") + parser.add_argument("--project", default=ROOT / "runs/predict-cls", help="save results to project/name") + parser.add_argument("--name", default="exp", help="save results to project/name") + parser.add_argument("--exist-ok", action="store_true", help="existing project/name ok, do not increment") + parser.add_argument("--half", action="store_true", help="use FP16 half-precision inference") + parser.add_argument("--dnn", action="store_true", help="use OpenCV DNN for ONNX inference") + parser.add_argument("--vid-stride", type=int, default=1, help="video frame-rate stride") opt = parser.parse_args() opt.imgsz *= 2 if len(opt.imgsz) == 1 else 1 # expand print_args(vars(opt)) @@ -218,10 +229,10 @@ def parse_opt(): def main(opt): - check_requirements(ROOT / 'requirements.txt', exclude=('tensorboard', 'thop')) + check_requirements(ROOT / "requirements.txt", exclude=("tensorboard", "thop")) run(**vars(opt)) -if __name__ == '__main__': +if __name__ == "__main__": opt = parse_opt() main(opt) diff --git a/classify/train.py b/classify/train.py index ecbea1d8c0de..63befed0f780 100644 --- a/classify/train.py +++ b/classify/train.py @@ -1,6 +1,6 @@ # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license """ -Train a YOLOv5 classifier model on a classification dataset +Train a YOLOv5 classifier model on a classification dataset. Usage - Single-GPU training: $ python classify/train.py --model yolov5s-cls.pt --data imagenette160 --epochs 5 --img 224 @@ -40,33 +40,61 @@ from models.experimental import attempt_load from models.yolo import ClassificationModel, DetectionModel from utils.dataloaders import create_classification_dataloader -from utils.general import (DATASETS_DIR, LOGGER, TQDM_BAR_FORMAT, WorkingDirectory, check_git_info, check_git_status, - check_requirements, colorstr, download, increment_path, init_seeds, print_args, yaml_save) +from utils.general import ( + DATASETS_DIR, + LOGGER, + TQDM_BAR_FORMAT, + WorkingDirectory, + check_git_info, + check_git_status, + check_requirements, + colorstr, + download, + increment_path, + init_seeds, + print_args, + yaml_save, +) from utils.loggers import GenericLogger from utils.plots import imshow_cls -from utils.torch_utils import (ModelEMA, de_parallel, model_info, reshape_classifier_output, select_device, smart_DDP, - smart_optimizer, smartCrossEntropyLoss, torch_distributed_zero_first) - -LOCAL_RANK = int(os.getenv('LOCAL_RANK', -1)) # https://pytorch.org/docs/stable/elastic/run.html -RANK = int(os.getenv('RANK', -1)) -WORLD_SIZE = int(os.getenv('WORLD_SIZE', 1)) +from utils.torch_utils import ( + ModelEMA, + de_parallel, + model_info, + reshape_classifier_output, + select_device, + smart_DDP, + smart_optimizer, + smartCrossEntropyLoss, + torch_distributed_zero_first, +) + +LOCAL_RANK = int(os.getenv("LOCAL_RANK", -1)) # https://pytorch.org/docs/stable/elastic/run.html +RANK = int(os.getenv("RANK", -1)) +WORLD_SIZE = int(os.getenv("WORLD_SIZE", 1)) GIT_INFO = check_git_info() def train(opt, device): init_seeds(opt.seed + 1 + RANK, deterministic=True) - save_dir, data, bs, epochs, nw, imgsz, pretrained = \ - opt.save_dir, Path(opt.data), opt.batch_size, opt.epochs, min(os.cpu_count() - 1, opt.workers), \ - opt.imgsz, str(opt.pretrained).lower() == 'true' - cuda = device.type != 'cpu' + save_dir, data, bs, epochs, nw, imgsz, pretrained = ( + opt.save_dir, + Path(opt.data), + opt.batch_size, + opt.epochs, + min(os.cpu_count() - 1, opt.workers), + opt.imgsz, + str(opt.pretrained).lower() == "true", + ) + cuda = device.type != "cpu" # Directories - wdir = save_dir / 'weights' + wdir = save_dir / "weights" wdir.mkdir(parents=True, exist_ok=True) # make dir - last, best = wdir / 'last.pt', wdir / 'best.pt' + last, best = wdir / "last.pt", wdir / "best.pt" # Save run settings - yaml_save(save_dir / 'opt.yaml', vars(opt)) + yaml_save(save_dir / "opt.yaml", vars(opt)) # Logger logger = GenericLogger(opt=opt, console_logger=LOGGER) if RANK in {-1, 0} else None @@ -75,51 +103,55 @@ def train(opt, device): with torch_distributed_zero_first(LOCAL_RANK), WorkingDirectory(ROOT): data_dir = data if data.is_dir() else (DATASETS_DIR / data) if not data_dir.is_dir(): - LOGGER.info(f'\nDataset not found ⚠️, missing path {data_dir}, attempting download...') + LOGGER.info(f"\nDataset not found ⚠️, missing path {data_dir}, attempting download...") t = time.time() - if str(data) == 'imagenet': - subprocess.run(['bash', str(ROOT / 'data/scripts/get_imagenet.sh')], shell=True, check=True) + if str(data) == "imagenet": + subprocess.run(["bash", str(ROOT / "data/scripts/get_imagenet.sh")], shell=True, check=True) else: - url = f'https://github.com/ultralytics/yolov5/releases/download/v1.0/{data}.zip' + url = f"https://github.com/ultralytics/yolov5/releases/download/v1.0/{data}.zip" download(url, dir=data_dir.parent) s = f"Dataset download success ✅ ({time.time() - t:.1f}s), saved to {colorstr('bold', data_dir)}\n" LOGGER.info(s) # Dataloaders - nc = len([x for x in (data_dir / 'train').glob('*') if x.is_dir()]) # number of classes - trainloader = create_classification_dataloader(path=data_dir / 'train', - imgsz=imgsz, - batch_size=bs // WORLD_SIZE, - augment=True, - cache=opt.cache, - rank=LOCAL_RANK, - workers=nw) - - test_dir = data_dir / 'test' if (data_dir / 'test').exists() else data_dir / 'val' # data/test or data/val + nc = len([x for x in (data_dir / "train").glob("*") if x.is_dir()]) # number of classes + trainloader = create_classification_dataloader( + path=data_dir / "train", + imgsz=imgsz, + batch_size=bs // WORLD_SIZE, + augment=True, + cache=opt.cache, + rank=LOCAL_RANK, + workers=nw, + ) + + test_dir = data_dir / "test" if (data_dir / "test").exists() else data_dir / "val" # data/test or data/val if RANK in {-1, 0}: - testloader = create_classification_dataloader(path=test_dir, - imgsz=imgsz, - batch_size=bs // WORLD_SIZE * 2, - augment=False, - cache=opt.cache, - rank=-1, - workers=nw) + testloader = create_classification_dataloader( + path=test_dir, + imgsz=imgsz, + batch_size=bs // WORLD_SIZE * 2, + augment=False, + cache=opt.cache, + rank=-1, + workers=nw, + ) # Model with torch_distributed_zero_first(LOCAL_RANK), WorkingDirectory(ROOT): - if Path(opt.model).is_file() or opt.model.endswith('.pt'): - model = attempt_load(opt.model, device='cpu', fuse=False) + if Path(opt.model).is_file() or opt.model.endswith(".pt"): + model = attempt_load(opt.model, device="cpu", fuse=False) elif opt.model in torchvision.models.__dict__: # TorchVision models i.e. resnet50, efficientnet_b0 - model = torchvision.models.__dict__[opt.model](weights='IMAGENET1K_V1' if pretrained else None) + model = torchvision.models.__dict__[opt.model](weights="IMAGENET1K_V1" if pretrained else None) else: - m = hub.list('ultralytics/yolov5') # + hub.list('pytorch/vision') # models - raise ModuleNotFoundError(f'--model {opt.model} not found. Available models are: \n' + '\n'.join(m)) + m = hub.list("ultralytics/yolov5") # + hub.list('pytorch/vision') # models + raise ModuleNotFoundError(f"--model {opt.model} not found. Available models are: \n" + "\n".join(m)) if isinstance(model, DetectionModel): LOGGER.warning("WARNING ⚠️ pass YOLOv5 classifier model with '-cls' suffix, i.e. '--model yolov5s-cls.pt'") model = ClassificationModel(model=model, nc=nc, cutoff=opt.cutoff or 10) # convert to classification model reshape_classifier_output(model, nc) # update class count for m in model.modules(): - if not pretrained and hasattr(m, 'reset_parameters'): + if not pretrained and hasattr(m, "reset_parameters"): m.reset_parameters() if isinstance(m, torch.nn.Dropout) and opt.dropout is not None: m.p = opt.dropout # set dropout @@ -135,8 +167,8 @@ def train(opt, device): if opt.verbose: LOGGER.info(model) images, labels = next(iter(trainloader)) - file = imshow_cls(images[:25], labels[:25], names=model.names, f=save_dir / 'train_images.jpg') - logger.log_images(file, name='Train Examples') + file = imshow_cls(images[:25], labels[:25], names=model.names, f=save_dir / "train_images.jpg") + logger.log_images(file, name="Train Examples") logger.log_graph(model, imgsz) # log model # Optimizer @@ -163,11 +195,13 @@ def train(opt, device): best_fitness = 0.0 scaler = amp.GradScaler(enabled=cuda) val = test_dir.stem # 'val' or 'test' - LOGGER.info(f'Image sizes {imgsz} train, {imgsz} test\n' - f'Using {nw * WORLD_SIZE} dataloader workers\n' - f"Logging results to {colorstr('bold', save_dir)}\n" - f'Starting {opt.model} training on {data} dataset with {nc} classes for {epochs} epochs...\n\n' - f"{'Epoch':>10}{'GPU_mem':>10}{'train_loss':>12}{f'{val}_loss':>12}{'top1_acc':>12}{'top5_acc':>12}") + LOGGER.info( + f'Image sizes {imgsz} train, {imgsz} test\n' + f'Using {nw * WORLD_SIZE} dataloader workers\n' + f"Logging results to {colorstr('bold', save_dir)}\n" + f'Starting {opt.model} training on {data} dataset with {nc} classes for {epochs} epochs...\n\n' + f"{'Epoch':>10}{'GPU_mem':>10}{'train_loss':>12}{f'{val}_loss':>12}{'top1_acc':>12}{'top5_acc':>12}" + ) for epoch in range(epochs): # loop over the dataset multiple times tloss, vloss, fitness = 0.0, 0.0, 0.0 # train loss, val loss, fitness model.train() @@ -198,15 +232,14 @@ def train(opt, device): if RANK in {-1, 0}: # Print tloss = (tloss * i + loss.item()) / (i + 1) # update mean losses - mem = '%.3gG' % (torch.cuda.memory_reserved() / 1E9 if torch.cuda.is_available() else 0) # (GB) - pbar.desc = f"{f'{epoch + 1}/{epochs}':>10}{mem:>10}{tloss:>12.3g}" + ' ' * 36 + mem = "%.3gG" % (torch.cuda.memory_reserved() / 1e9 if torch.cuda.is_available() else 0) # (GB) + pbar.desc = f"{f'{epoch + 1}/{epochs}':>10}{mem:>10}{tloss:>12.3g}" + " " * 36 # Test if i == len(pbar) - 1: # last batch - top1, top5, vloss = validate.run(model=ema.ema, - dataloader=testloader, - criterion=criterion, - pbar=pbar) # test accuracy, loss + top1, top5, vloss = validate.run( + model=ema.ema, dataloader=testloader, criterion=criterion, pbar=pbar + ) # test accuracy, loss fitness = top1 # define fitness as top1 accuracy # Scheduler @@ -220,26 +253,28 @@ def train(opt, device): # Log metrics = { - 'train/loss': tloss, - f'{val}/loss': vloss, - 'metrics/accuracy_top1': top1, - 'metrics/accuracy_top5': top5, - 'lr/0': optimizer.param_groups[0]['lr']} # learning rate + "train/loss": tloss, + f"{val}/loss": vloss, + "metrics/accuracy_top1": top1, + "metrics/accuracy_top5": top5, + "lr/0": optimizer.param_groups[0]["lr"], + } # learning rate logger.log_metrics(metrics, epoch) # Save model final_epoch = epoch + 1 == epochs if (not opt.nosave) or final_epoch: ckpt = { - 'epoch': epoch, - 'best_fitness': best_fitness, - 'model': deepcopy(ema.ema).half(), # deepcopy(de_parallel(model)).half(), - 'ema': None, # deepcopy(ema.ema).half(), - 'updates': ema.updates, - 'optimizer': None, # optimizer.state_dict(), - 'opt': vars(opt), - 'git': GIT_INFO, # {remote, branch, commit} if a git repo - 'date': datetime.now().isoformat()} + "epoch": epoch, + "best_fitness": best_fitness, + "model": deepcopy(ema.ema).half(), # deepcopy(de_parallel(model)).half(), + "ema": None, # deepcopy(ema.ema).half(), + "updates": ema.updates, + "optimizer": None, # optimizer.state_dict(), + "opt": vars(opt), + "git": GIT_INFO, # {remote, branch, commit} if a git repo + "date": datetime.now().isoformat(), + } # Save last, best and delete torch.save(ckpt, last) @@ -249,49 +284,51 @@ def train(opt, device): # Train complete if RANK in {-1, 0} and final_epoch: - LOGGER.info(f'\nTraining complete ({(time.time() - t0) / 3600:.3f} hours)' - f"\nResults saved to {colorstr('bold', save_dir)}" - f'\nPredict: python classify/predict.py --weights {best} --source im.jpg' - f'\nValidate: python classify/val.py --weights {best} --data {data_dir}' - f'\nExport: python export.py --weights {best} --include onnx' - f"\nPyTorch Hub: model = torch.hub.load('ultralytics/yolov5', 'custom', '{best}')" - f'\nVisualize: https://netron.app\n') + LOGGER.info( + f'\nTraining complete ({(time.time() - t0) / 3600:.3f} hours)' + f"\nResults saved to {colorstr('bold', save_dir)}" + f'\nPredict: python classify/predict.py --weights {best} --source im.jpg' + f'\nValidate: python classify/val.py --weights {best} --data {data_dir}' + f'\nExport: python export.py --weights {best} --include onnx' + f"\nPyTorch Hub: model = torch.hub.load('ultralytics/yolov5', 'custom', '{best}')" + f'\nVisualize: https://netron.app\n' + ) # Plot examples images, labels = (x[:25] for x in next(iter(testloader))) # first 25 images and labels pred = torch.max(ema.ema(images.to(device)), 1)[1] - file = imshow_cls(images, labels, pred, de_parallel(model).names, verbose=False, f=save_dir / 'test_images.jpg') + file = imshow_cls(images, labels, pred, de_parallel(model).names, verbose=False, f=save_dir / "test_images.jpg") # Log results - meta = {'epochs': epochs, 'top1_acc': best_fitness, 'date': datetime.now().isoformat()} - logger.log_images(file, name='Test Examples (true-predicted)', epoch=epoch) + meta = {"epochs": epochs, "top1_acc": best_fitness, "date": datetime.now().isoformat()} + logger.log_images(file, name="Test Examples (true-predicted)", epoch=epoch) logger.log_model(best, epochs, metadata=meta) def parse_opt(known=False): parser = argparse.ArgumentParser() - parser.add_argument('--model', type=str, default='yolov5s-cls.pt', help='initial weights path') - parser.add_argument('--data', type=str, default='imagenette160', help='cifar10, cifar100, mnist, imagenet, ...') - parser.add_argument('--epochs', type=int, default=10, help='total training epochs') - parser.add_argument('--batch-size', type=int, default=64, help='total batch size for all GPUs') - parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=224, help='train, val image size (pixels)') - parser.add_argument('--nosave', action='store_true', help='only save final checkpoint') - parser.add_argument('--cache', type=str, nargs='?', const='ram', help='--cache images in "ram" (default) or "disk"') - parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') - parser.add_argument('--workers', type=int, default=8, help='max dataloader workers (per RANK in DDP mode)') - parser.add_argument('--project', default=ROOT / 'runs/train-cls', help='save to project/name') - parser.add_argument('--name', default='exp', help='save to project/name') - parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment') - parser.add_argument('--pretrained', nargs='?', const=True, default=True, help='start from i.e. --pretrained False') - parser.add_argument('--optimizer', choices=['SGD', 'Adam', 'AdamW', 'RMSProp'], default='Adam', help='optimizer') - parser.add_argument('--lr0', type=float, default=0.001, help='initial learning rate') - parser.add_argument('--decay', type=float, default=5e-5, help='weight decay') - parser.add_argument('--label-smoothing', type=float, default=0.1, help='Label smoothing epsilon') - parser.add_argument('--cutoff', type=int, default=None, help='Model layer cutoff index for Classify() head') - parser.add_argument('--dropout', type=float, default=None, help='Dropout (fraction)') - parser.add_argument('--verbose', action='store_true', help='Verbose mode') - parser.add_argument('--seed', type=int, default=0, help='Global training seed') - parser.add_argument('--local_rank', type=int, default=-1, help='Automatic DDP Multi-GPU argument, do not modify') + parser.add_argument("--model", type=str, default="yolov5s-cls.pt", help="initial weights path") + parser.add_argument("--data", type=str, default="imagenette160", help="cifar10, cifar100, mnist, imagenet, ...") + parser.add_argument("--epochs", type=int, default=10, help="total training epochs") + parser.add_argument("--batch-size", type=int, default=64, help="total batch size for all GPUs") + parser.add_argument("--imgsz", "--img", "--img-size", type=int, default=224, help="train, val image size (pixels)") + parser.add_argument("--nosave", action="store_true", help="only save final checkpoint") + parser.add_argument("--cache", type=str, nargs="?", const="ram", help='--cache images in "ram" (default) or "disk"') + parser.add_argument("--device", default="", help="cuda device, i.e. 0 or 0,1,2,3 or cpu") + parser.add_argument("--workers", type=int, default=8, help="max dataloader workers (per RANK in DDP mode)") + parser.add_argument("--project", default=ROOT / "runs/train-cls", help="save to project/name") + parser.add_argument("--name", default="exp", help="save to project/name") + parser.add_argument("--exist-ok", action="store_true", help="existing project/name ok, do not increment") + parser.add_argument("--pretrained", nargs="?", const=True, default=True, help="start from i.e. --pretrained False") + parser.add_argument("--optimizer", choices=["SGD", "Adam", "AdamW", "RMSProp"], default="Adam", help="optimizer") + parser.add_argument("--lr0", type=float, default=0.001, help="initial learning rate") + parser.add_argument("--decay", type=float, default=5e-5, help="weight decay") + parser.add_argument("--label-smoothing", type=float, default=0.1, help="Label smoothing epsilon") + parser.add_argument("--cutoff", type=int, default=None, help="Model layer cutoff index for Classify() head") + parser.add_argument("--dropout", type=float, default=None, help="Dropout (fraction)") + parser.add_argument("--verbose", action="store_true", help="Verbose mode") + parser.add_argument("--seed", type=int, default=0, help="Global training seed") + parser.add_argument("--local_rank", type=int, default=-1, help="Automatic DDP Multi-GPU argument, do not modify") return parser.parse_known_args()[0] if known else parser.parse_args() @@ -300,17 +337,17 @@ def main(opt): if RANK in {-1, 0}: print_args(vars(opt)) check_git_status() - check_requirements(ROOT / 'requirements.txt') + check_requirements(ROOT / "requirements.txt") # DDP mode device = select_device(opt.device, batch_size=opt.batch_size) if LOCAL_RANK != -1: - assert opt.batch_size != -1, 'AutoBatch is coming soon for classification, please pass a valid --batch-size' - assert opt.batch_size % WORLD_SIZE == 0, f'--batch-size {opt.batch_size} must be multiple of WORLD_SIZE' - assert torch.cuda.device_count() > LOCAL_RANK, 'insufficient CUDA devices for DDP command' + assert opt.batch_size != -1, "AutoBatch is coming soon for classification, please pass a valid --batch-size" + assert opt.batch_size % WORLD_SIZE == 0, f"--batch-size {opt.batch_size} must be multiple of WORLD_SIZE" + assert torch.cuda.device_count() > LOCAL_RANK, "insufficient CUDA devices for DDP command" torch.cuda.set_device(LOCAL_RANK) - device = torch.device('cuda', LOCAL_RANK) - dist.init_process_group(backend='nccl' if dist.is_nccl_available() else 'gloo') + device = torch.device("cuda", LOCAL_RANK) + dist.init_process_group(backend="nccl" if dist.is_nccl_available() else "gloo") # Parameters opt.save_dir = increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok) # increment run @@ -328,6 +365,6 @@ def run(**kwargs): return opt -if __name__ == '__main__': +if __name__ == "__main__": opt = parse_opt() main(opt) diff --git a/classify/val.py b/classify/val.py index 6814c4d780e1..b170253d6e0c 100644 --- a/classify/val.py +++ b/classify/val.py @@ -1,6 +1,6 @@ # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license """ -Validate a trained YOLOv5 classification model on a classification dataset +Validate a trained YOLOv5 classification model on a classification dataset. Usage: $ bash data/scripts/get_imagenet.sh --val # download ImageNet val split (6.3G, 50000 images) @@ -36,22 +36,30 @@ from models.common import DetectMultiBackend from utils.dataloaders import create_classification_dataloader -from utils.general import (LOGGER, TQDM_BAR_FORMAT, Profile, check_img_size, check_requirements, colorstr, - increment_path, print_args) +from utils.general import ( + LOGGER, + TQDM_BAR_FORMAT, + Profile, + check_img_size, + check_requirements, + colorstr, + increment_path, + print_args, +) from utils.torch_utils import select_device, smart_inference_mode @smart_inference_mode() def run( - data=ROOT / '../datasets/mnist', # dataset dir - weights=ROOT / 'yolov5s-cls.pt', # model.pt path(s) + data=ROOT / "../datasets/mnist", # dataset dir + weights=ROOT / "yolov5s-cls.pt", # model.pt path(s) batch_size=128, # batch size imgsz=224, # inference size (pixels) - device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu + device="", # cuda device, i.e. 0 or 0,1,2,3 or cpu workers=8, # max dataloader workers (per RANK in DDP mode) verbose=False, # verbose output - project=ROOT / 'runs/val-cls', # save to project/name - name='exp', # save to project/name + project=ROOT / "runs/val-cls", # save to project/name + name="exp", # save to project/name exist_ok=False, # existing project/name ok, do not increment half=False, # use FP16 half-precision inference dnn=False, # use OpenCV DNN for ONNX inference @@ -64,7 +72,7 @@ def run( training = model is not None if training: # called by train.py device, pt, jit, engine = next(model.parameters()).device, True, False, False # get model device, PyTorch model - half &= device.type != 'cpu' # half precision only supported on CUDA + half &= device.type != "cpu" # half precision only supported on CUDA model.half() if half else model.float() else: # called directly device = select_device(device, batch_size=batch_size) @@ -84,25 +92,22 @@ def run( device = model.device if not (pt or jit): batch_size = 1 # export.py models default to batch-size 1 - LOGGER.info(f'Forcing --batch-size 1 square inference (1,3,{imgsz},{imgsz}) for non-PyTorch models') + LOGGER.info(f"Forcing --batch-size 1 square inference (1,3,{imgsz},{imgsz}) for non-PyTorch models") # Dataloader data = Path(data) - test_dir = data / 'test' if (data / 'test').exists() else data / 'val' # data/test or data/val - dataloader = create_classification_dataloader(path=test_dir, - imgsz=imgsz, - batch_size=batch_size, - augment=False, - rank=-1, - workers=workers) + test_dir = data / "test" if (data / "test").exists() else data / "val" # data/test or data/val + dataloader = create_classification_dataloader( + path=test_dir, imgsz=imgsz, batch_size=batch_size, augment=False, rank=-1, workers=workers + ) model.eval() pred, targets, loss, dt = [], [], 0, (Profile(device=device), Profile(device=device), Profile(device=device)) n = len(dataloader) # number of batches - action = 'validating' if dataloader.dataset.root.stem == 'val' else 'testing' - desc = f'{pbar.desc[:-36]}{action:>36}' if pbar else f'{action}' + action = "validating" if dataloader.dataset.root.stem == "val" else "testing" + desc = f"{pbar.desc[:-36]}{action:>36}" if pbar else f"{action}" bar = tqdm(dataloader, desc, n, not training, bar_format=TQDM_BAR_FORMAT, position=0) - with torch.cuda.amp.autocast(enabled=device.type != 'cpu'): + with torch.cuda.amp.autocast(enabled=device.type != "cpu"): for images, labels in bar: with dt[0]: images, labels = images.to(device, non_blocking=True), labels.to(device) @@ -123,19 +128,19 @@ def run( top1, top5 = acc.mean(0).tolist() if pbar: - pbar.desc = f'{pbar.desc[:-36]}{loss:>12.3g}{top1:>12.3g}{top5:>12.3g}' + pbar.desc = f"{pbar.desc[:-36]}{loss:>12.3g}{top1:>12.3g}{top5:>12.3g}" if verbose: # all classes LOGGER.info(f"{'Class':>24}{'Images':>12}{'top1_acc':>12}{'top5_acc':>12}") LOGGER.info(f"{'all':>24}{targets.shape[0]:>12}{top1:>12.3g}{top5:>12.3g}") for i, c in model.names.items(): acc_i = acc[targets == i] top1i, top5i = acc_i.mean(0).tolist() - LOGGER.info(f'{c:>24}{acc_i.shape[0]:>12}{top1i:>12.3g}{top5i:>12.3g}') + LOGGER.info(f"{c:>24}{acc_i.shape[0]:>12}{top1i:>12.3g}{top5i:>12.3g}") # Print results - t = tuple(x.t / len(dataloader.dataset.samples) * 1E3 for x in dt) # speeds per image + t = tuple(x.t / len(dataloader.dataset.samples) * 1e3 for x in dt) # speeds per image shape = (1, 3, imgsz, imgsz) - LOGGER.info(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms post-process per image at shape {shape}' % t) + LOGGER.info(f"Speed: %.1fms pre-process, %.1fms inference, %.1fms post-process per image at shape {shape}" % t) LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}") return top1, top5, loss @@ -143,28 +148,28 @@ def run( def parse_opt(): parser = argparse.ArgumentParser() - parser.add_argument('--data', type=str, default=ROOT / '../datasets/mnist', help='dataset path') - parser.add_argument('--weights', nargs='+', type=str, default=ROOT / 'yolov5s-cls.pt', help='model.pt path(s)') - parser.add_argument('--batch-size', type=int, default=128, help='batch size') - parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=224, help='inference size (pixels)') - parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') - parser.add_argument('--workers', type=int, default=8, help='max dataloader workers (per RANK in DDP mode)') - parser.add_argument('--verbose', nargs='?', const=True, default=True, help='verbose output') - parser.add_argument('--project', default=ROOT / 'runs/val-cls', help='save to project/name') - parser.add_argument('--name', default='exp', help='save to project/name') - parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment') - parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference') - parser.add_argument('--dnn', action='store_true', help='use OpenCV DNN for ONNX inference') + parser.add_argument("--data", type=str, default=ROOT / "../datasets/mnist", help="dataset path") + parser.add_argument("--weights", nargs="+", type=str, default=ROOT / "yolov5s-cls.pt", help="model.pt path(s)") + parser.add_argument("--batch-size", type=int, default=128, help="batch size") + parser.add_argument("--imgsz", "--img", "--img-size", type=int, default=224, help="inference size (pixels)") + parser.add_argument("--device", default="", help="cuda device, i.e. 0 or 0,1,2,3 or cpu") + parser.add_argument("--workers", type=int, default=8, help="max dataloader workers (per RANK in DDP mode)") + parser.add_argument("--verbose", nargs="?", const=True, default=True, help="verbose output") + parser.add_argument("--project", default=ROOT / "runs/val-cls", help="save to project/name") + parser.add_argument("--name", default="exp", help="save to project/name") + parser.add_argument("--exist-ok", action="store_true", help="existing project/name ok, do not increment") + parser.add_argument("--half", action="store_true", help="use FP16 half-precision inference") + parser.add_argument("--dnn", action="store_true", help="use OpenCV DNN for ONNX inference") opt = parser.parse_args() print_args(vars(opt)) return opt def main(opt): - check_requirements(ROOT / 'requirements.txt', exclude=('tensorboard', 'thop')) + check_requirements(ROOT / "requirements.txt", exclude=("tensorboard", "thop")) run(**vars(opt)) -if __name__ == '__main__': +if __name__ == "__main__": opt = parse_opt() main(opt) diff --git a/detect.py b/detect.py index 03bc29de999e..b7d77ef431d4 100644 --- a/detect.py +++ b/detect.py @@ -47,54 +47,68 @@ from models.common import DetectMultiBackend from utils.dataloaders import IMG_FORMATS, VID_FORMATS, LoadImages, LoadScreenshots, LoadStreams -from utils.general import (LOGGER, Profile, check_file, check_img_size, check_imshow, check_requirements, colorstr, cv2, - increment_path, non_max_suppression, print_args, scale_boxes, strip_optimizer, xyxy2xywh) +from utils.general import ( + LOGGER, + Profile, + check_file, + check_img_size, + check_imshow, + check_requirements, + colorstr, + cv2, + increment_path, + non_max_suppression, + print_args, + scale_boxes, + strip_optimizer, + xyxy2xywh, +) from utils.torch_utils import select_device, smart_inference_mode @smart_inference_mode() def run( - weights=ROOT / 'yolov5s.pt', # model path or triton URL - source=ROOT / 'data/images', # file/dir/URL/glob/screen/0(webcam) - data=ROOT / 'data/coco128.yaml', # dataset.yaml path - imgsz=(640, 640), # inference size (height, width) - conf_thres=0.25, # confidence threshold - iou_thres=0.45, # NMS IOU threshold - max_det=1000, # maximum detections per image - device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu - view_img=False, # show results - save_txt=False, # save results to *.txt - save_csv=False, # save results in CSV format - save_conf=False, # save confidences in --save-txt labels - save_crop=False, # save cropped prediction boxes - nosave=False, # do not save images/videos - classes=None, # filter by class: --class 0, or --class 0 2 3 - agnostic_nms=False, # class-agnostic NMS - augment=False, # augmented inference - visualize=False, # visualize features - update=False, # update all models - project=ROOT / 'runs/detect', # save results to project/name - name='exp', # save results to project/name - exist_ok=False, # existing project/name ok, do not increment - line_thickness=3, # bounding box thickness (pixels) - hide_labels=False, # hide labels - hide_conf=False, # hide confidences - half=False, # use FP16 half-precision inference - dnn=False, # use OpenCV DNN for ONNX inference - vid_stride=1, # video frame-rate stride + weights=ROOT / "yolov5s.pt", # model path or triton URL + source=ROOT / "data/images", # file/dir/URL/glob/screen/0(webcam) + data=ROOT / "data/coco128.yaml", # dataset.yaml path + imgsz=(640, 640), # inference size (height, width) + conf_thres=0.25, # confidence threshold + iou_thres=0.45, # NMS IOU threshold + max_det=1000, # maximum detections per image + device="", # cuda device, i.e. 0 or 0,1,2,3 or cpu + view_img=False, # show results + save_txt=False, # save results to *.txt + save_csv=False, # save results in CSV format + save_conf=False, # save confidences in --save-txt labels + save_crop=False, # save cropped prediction boxes + nosave=False, # do not save images/videos + classes=None, # filter by class: --class 0, or --class 0 2 3 + agnostic_nms=False, # class-agnostic NMS + augment=False, # augmented inference + visualize=False, # visualize features + update=False, # update all models + project=ROOT / "runs/detect", # save results to project/name + name="exp", # save results to project/name + exist_ok=False, # existing project/name ok, do not increment + line_thickness=3, # bounding box thickness (pixels) + hide_labels=False, # hide labels + hide_conf=False, # hide confidences + half=False, # use FP16 half-precision inference + dnn=False, # use OpenCV DNN for ONNX inference + vid_stride=1, # video frame-rate stride ): source = str(source) - save_img = not nosave and not source.endswith('.txt') # save inference images + save_img = not nosave and not source.endswith(".txt") # save inference images is_file = Path(source).suffix[1:] in (IMG_FORMATS + VID_FORMATS) - is_url = source.lower().startswith(('rtsp://', 'rtmp://', 'http://', 'https://')) - webcam = source.isnumeric() or source.endswith('.streams') or (is_url and not is_file) - screenshot = source.lower().startswith('screen') + is_url = source.lower().startswith(("rtsp://", "rtmp://", "http://", "https://")) + webcam = source.isnumeric() or source.endswith(".streams") or (is_url and not is_file) + screenshot = source.lower().startswith("screen") if is_url and is_file: source = check_file(source) # download # Directories save_dir = increment_path(Path(project) / name, exist_ok=exist_ok) # increment run - (save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir + (save_dir / "labels" if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir # Load model device = select_device(device) @@ -148,12 +162,12 @@ def run( # pred = utils.general.apply_classifier(pred, classifier_model, im, im0s) # Define the path for the CSV file - csv_path = save_dir / 'predictions.csv' + csv_path = save_dir / "predictions.csv" # Create or append to the CSV file def write_to_csv(image_name, prediction, confidence): - data = {'Image Name': image_name, 'Prediction': prediction, 'Confidence': confidence} - with open(csv_path, mode='a', newline='') as f: + data = {"Image Name": image_name, "Prediction": prediction, "Confidence": confidence} + with open(csv_path, mode="a", newline="") as f: writer = csv.DictWriter(f, fieldnames=data.keys()) if not csv_path.is_file(): writer.writeheader() @@ -164,14 +178,14 @@ def write_to_csv(image_name, prediction, confidence): seen += 1 if webcam: # batch_size >= 1 p, im0, frame = path[i], im0s[i].copy(), dataset.count - s += f'{i}: ' + s += f"{i}: " else: - p, im0, frame = path, im0s.copy(), getattr(dataset, 'frame', 0) + p, im0, frame = path, im0s.copy(), getattr(dataset, "frame", 0) p = Path(p) # to Path save_path = str(save_dir / p.name) # im.jpg - txt_path = str(save_dir / 'labels' / p.stem) + ('' if dataset.mode == 'image' else f'_{frame}') # im.txt - s += '%gx%g ' % im.shape[2:] # print string + txt_path = str(save_dir / "labels" / p.stem) + ("" if dataset.mode == "image" else f"_{frame}") # im.txt + s += "%gx%g " % im.shape[2:] # print string gn = torch.tensor(im0.shape)[[1, 0, 1, 0]] # normalization gain whwh imc = im0.copy() if save_crop else im0 # for save_crop annotator = Annotator(im0, line_width=line_thickness, example=str(names)) @@ -187,9 +201,9 @@ def write_to_csv(image_name, prediction, confidence): # Write results for *xyxy, conf, cls in reversed(det): c = int(cls) # integer class - label = names[c] if hide_conf else f'{names[c]}' + label = names[c] if hide_conf else f"{names[c]}" confidence = float(conf) - confidence_str = f'{confidence:.2f}' + confidence_str = f"{confidence:.2f}" if save_csv: write_to_csv(p.name, label, confidence_str) @@ -197,20 +211,20 @@ def write_to_csv(image_name, prediction, confidence): if save_txt: # Write to file xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh line = (cls, *xywh, conf) if save_conf else (cls, *xywh) # label format - with open(f'{txt_path}.txt', 'a') as f: - f.write(('%g ' * len(line)).rstrip() % line + '\n') + with open(f"{txt_path}.txt", "a") as f: + f.write(("%g " * len(line)).rstrip() % line + "\n") if save_img or save_crop or view_img: # Add bbox to image c = int(cls) # integer class - label = None if hide_labels else (names[c] if hide_conf else f'{names[c]} {conf:.2f}') + label = None if hide_labels else (names[c] if hide_conf else f"{names[c]} {conf:.2f}") annotator.box_label(xyxy, label, color=colors(c, True)) if save_crop: - save_one_box(xyxy, imc, file=save_dir / 'crops' / names[c] / f'{p.stem}.jpg', BGR=True) + save_one_box(xyxy, imc, file=save_dir / "crops" / names[c] / f"{p.stem}.jpg", BGR=True) # Stream results im0 = annotator.result() if view_img: - if platform.system() == 'Linux' and p not in windows: + if platform.system() == "Linux" and p not in windows: windows.append(p) cv2.namedWindow(str(p), cv2.WINDOW_NORMAL | cv2.WINDOW_KEEPRATIO) # allow window resize (Linux) cv2.resizeWindow(str(p), im0.shape[1], im0.shape[0]) @@ -219,7 +233,7 @@ def write_to_csv(image_name, prediction, confidence): # Save results (image with detections) if save_img: - if dataset.mode == 'image': + if dataset.mode == "image": cv2.imwrite(save_path, im0) else: # 'video' or 'stream' if vid_path[i] != save_path: # new video @@ -232,18 +246,18 @@ def write_to_csv(image_name, prediction, confidence): h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) else: # stream fps, w, h = 30, im0.shape[1], im0.shape[0] - save_path = str(Path(save_path).with_suffix('.mp4')) # force *.mp4 suffix on results videos - vid_writer[i] = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h)) + save_path = str(Path(save_path).with_suffix(".mp4")) # force *.mp4 suffix on results videos + vid_writer[i] = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*"mp4v"), fps, (w, h)) vid_writer[i].write(im0) # Print time (inference-only) LOGGER.info(f"{s}{'' if len(det) else '(no detections), '}{dt[1].dt * 1E3:.1f}ms") # Print results - t = tuple(x.t / seen * 1E3 for x in dt) # speeds per image - LOGGER.info(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {(1, 3, *imgsz)}' % t) + t = tuple(x.t / seen * 1e3 for x in dt) # speeds per image + LOGGER.info(f"Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {(1, 3, *imgsz)}" % t) if save_txt or save_img: - s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else '' + s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else "" LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}{s}") if update: strip_optimizer(weights[0]) # update model (to fix SourceChangeWarning) @@ -251,34 +265,34 @@ def write_to_csv(image_name, prediction, confidence): def parse_opt(): parser = argparse.ArgumentParser() - parser.add_argument('--weights', nargs='+', type=str, default=ROOT / 'yolov5s.pt', help='model path or triton URL') - parser.add_argument('--source', type=str, default=ROOT / 'data/images', help='file/dir/URL/glob/screen/0(webcam)') - parser.add_argument('--data', type=str, default=ROOT / 'data/coco128.yaml', help='(optional) dataset.yaml path') - parser.add_argument('--imgsz', '--img', '--img-size', nargs='+', type=int, default=[640], help='inference size h,w') - parser.add_argument('--conf-thres', type=float, default=0.25, help='confidence threshold') - parser.add_argument('--iou-thres', type=float, default=0.45, help='NMS IoU threshold') - parser.add_argument('--max-det', type=int, default=1000, help='maximum detections per image') - parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') - parser.add_argument('--view-img', action='store_true', help='show results') - parser.add_argument('--save-txt', action='store_true', help='save results to *.txt') - parser.add_argument('--save-csv', action='store_true', help='save results in CSV format') - parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels') - parser.add_argument('--save-crop', action='store_true', help='save cropped prediction boxes') - parser.add_argument('--nosave', action='store_true', help='do not save images/videos') - parser.add_argument('--classes', nargs='+', type=int, help='filter by class: --classes 0, or --classes 0 2 3') - parser.add_argument('--agnostic-nms', action='store_true', help='class-agnostic NMS') - parser.add_argument('--augment', action='store_true', help='augmented inference') - parser.add_argument('--visualize', action='store_true', help='visualize features') - parser.add_argument('--update', action='store_true', help='update all models') - parser.add_argument('--project', default=ROOT / 'runs/detect', help='save results to project/name') - parser.add_argument('--name', default='exp', help='save results to project/name') - parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment') - parser.add_argument('--line-thickness', default=3, type=int, help='bounding box thickness (pixels)') - parser.add_argument('--hide-labels', default=False, action='store_true', help='hide labels') - parser.add_argument('--hide-conf', default=False, action='store_true', help='hide confidences') - parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference') - parser.add_argument('--dnn', action='store_true', help='use OpenCV DNN for ONNX inference') - parser.add_argument('--vid-stride', type=int, default=1, help='video frame-rate stride') + parser.add_argument("--weights", nargs="+", type=str, default=ROOT / "yolov5s.pt", help="model path or triton URL") + parser.add_argument("--source", type=str, default=ROOT / "data/images", help="file/dir/URL/glob/screen/0(webcam)") + parser.add_argument("--data", type=str, default=ROOT / "data/coco128.yaml", help="(optional) dataset.yaml path") + parser.add_argument("--imgsz", "--img", "--img-size", nargs="+", type=int, default=[640], help="inference size h,w") + parser.add_argument("--conf-thres", type=float, default=0.25, help="confidence threshold") + parser.add_argument("--iou-thres", type=float, default=0.45, help="NMS IoU threshold") + parser.add_argument("--max-det", type=int, default=1000, help="maximum detections per image") + parser.add_argument("--device", default="", help="cuda device, i.e. 0 or 0,1,2,3 or cpu") + parser.add_argument("--view-img", action="store_true", help="show results") + parser.add_argument("--save-txt", action="store_true", help="save results to *.txt") + parser.add_argument("--save-csv", action="store_true", help="save results in CSV format") + parser.add_argument("--save-conf", action="store_true", help="save confidences in --save-txt labels") + parser.add_argument("--save-crop", action="store_true", help="save cropped prediction boxes") + parser.add_argument("--nosave", action="store_true", help="do not save images/videos") + parser.add_argument("--classes", nargs="+", type=int, help="filter by class: --classes 0, or --classes 0 2 3") + parser.add_argument("--agnostic-nms", action="store_true", help="class-agnostic NMS") + parser.add_argument("--augment", action="store_true", help="augmented inference") + parser.add_argument("--visualize", action="store_true", help="visualize features") + parser.add_argument("--update", action="store_true", help="update all models") + parser.add_argument("--project", default=ROOT / "runs/detect", help="save results to project/name") + parser.add_argument("--name", default="exp", help="save results to project/name") + parser.add_argument("--exist-ok", action="store_true", help="existing project/name ok, do not increment") + parser.add_argument("--line-thickness", default=3, type=int, help="bounding box thickness (pixels)") + parser.add_argument("--hide-labels", default=False, action="store_true", help="hide labels") + parser.add_argument("--hide-conf", default=False, action="store_true", help="hide confidences") + parser.add_argument("--half", action="store_true", help="use FP16 half-precision inference") + parser.add_argument("--dnn", action="store_true", help="use OpenCV DNN for ONNX inference") + parser.add_argument("--vid-stride", type=int, default=1, help="video frame-rate stride") opt = parser.parse_args() opt.imgsz *= 2 if len(opt.imgsz) == 1 else 1 # expand print_args(vars(opt)) @@ -286,10 +300,10 @@ def parse_opt(): def main(opt): - check_requirements(ROOT / 'requirements.txt', exclude=('tensorboard', 'thop')) + check_requirements(ROOT / "requirements.txt", exclude=("tensorboard", "thop")) run(**vars(opt)) -if __name__ == '__main__': +if __name__ == "__main__": opt = parse_opt() main(opt) diff --git a/export.py b/export.py index d550a85fd99f..74701c37a947 100644 --- a/export.py +++ b/export.py @@ -64,30 +64,42 @@ ROOT = FILE.parents[0] # YOLOv5 root directory if str(ROOT) not in sys.path: sys.path.append(str(ROOT)) # add ROOT to PATH -if platform.system() != 'Windows': +if platform.system() != "Windows": ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative from models.experimental import attempt_load from models.yolo import ClassificationModel, Detect, DetectionModel, SegmentationModel from utils.dataloaders import LoadImages -from utils.general import (LOGGER, Profile, check_dataset, check_img_size, check_requirements, check_version, - check_yaml, colorstr, file_size, get_default_args, print_args, url2file, yaml_save) +from utils.general import ( + LOGGER, + Profile, + check_dataset, + check_img_size, + check_requirements, + check_version, + check_yaml, + colorstr, + file_size, + get_default_args, + print_args, + url2file, + yaml_save, +) from utils.torch_utils import select_device, smart_inference_mode -MACOS = platform.system() == 'Darwin' # macOS environment +MACOS = platform.system() == "Darwin" # macOS environment class iOSModel(torch.nn.Module): - def __init__(self, model, im): super().__init__() b, c, h, w = im.shape # batch, channel, height, width self.model = model self.nc = model.nc # number of classes if w == h: - self.normalize = 1. / w + self.normalize = 1.0 / w else: - self.normalize = torch.tensor([1. / w, 1. / h, 1. / w, 1. / h]) # broadcast (slower, smaller) + self.normalize = torch.tensor([1.0 / w, 1.0 / h, 1.0 / w, 1.0 / h]) # broadcast (slower, smaller) # np = model(im)[0].shape[1] # number of points # self.normalize = torch.tensor([1. / w, 1. / h, 1. / w, 1. / h]).expand(np, 4) # explicit (faster, larger) @@ -99,19 +111,20 @@ def forward(self, x): def export_formats(): # YOLOv5 export formats x = [ - ['PyTorch', '-', '.pt', True, True], - ['TorchScript', 'torchscript', '.torchscript', True, True], - ['ONNX', 'onnx', '.onnx', True, True], - ['OpenVINO', 'openvino', '_openvino_model', True, False], - ['TensorRT', 'engine', '.engine', False, True], - ['CoreML', 'coreml', '.mlmodel', True, False], - ['TensorFlow SavedModel', 'saved_model', '_saved_model', True, True], - ['TensorFlow GraphDef', 'pb', '.pb', True, True], - ['TensorFlow Lite', 'tflite', '.tflite', True, False], - ['TensorFlow Edge TPU', 'edgetpu', '_edgetpu.tflite', False, False], - ['TensorFlow.js', 'tfjs', '_web_model', False, False], - ['PaddlePaddle', 'paddle', '_paddle_model', True, True], ] - return pd.DataFrame(x, columns=['Format', 'Argument', 'Suffix', 'CPU', 'GPU']) + ["PyTorch", "-", ".pt", True, True], + ["TorchScript", "torchscript", ".torchscript", True, True], + ["ONNX", "onnx", ".onnx", True, True], + ["OpenVINO", "openvino", "_openvino_model", True, False], + ["TensorRT", "engine", ".engine", False, True], + ["CoreML", "coreml", ".mlmodel", True, False], + ["TensorFlow SavedModel", "saved_model", "_saved_model", True, True], + ["TensorFlow GraphDef", "pb", ".pb", True, True], + ["TensorFlow Lite", "tflite", ".tflite", True, False], + ["TensorFlow Edge TPU", "edgetpu", "_edgetpu.tflite", False, False], + ["TensorFlow.js", "tfjs", "_web_model", False, False], + ["PaddlePaddle", "paddle", "_paddle_model", True, True], + ] + return pd.DataFrame(x, columns=["Format", "Argument", "Suffix", "CPU", "GPU"]) def try_export(inner_func): @@ -119,28 +132,28 @@ def try_export(inner_func): inner_args = get_default_args(inner_func) def outer_func(*args, **kwargs): - prefix = inner_args['prefix'] + prefix = inner_args["prefix"] try: with Profile() as dt: f, model = inner_func(*args, **kwargs) - LOGGER.info(f'{prefix} export success ✅ {dt.t:.1f}s, saved as {f} ({file_size(f):.1f} MB)') + LOGGER.info(f"{prefix} export success ✅ {dt.t:.1f}s, saved as {f} ({file_size(f):.1f} MB)") return f, model except Exception as e: - LOGGER.info(f'{prefix} export failure ❌ {dt.t:.1f}s: {e}') + LOGGER.info(f"{prefix} export failure ❌ {dt.t:.1f}s: {e}") return None, None return outer_func @try_export -def export_torchscript(model, im, file, optimize, prefix=colorstr('TorchScript:')): +def export_torchscript(model, im, file, optimize, prefix=colorstr("TorchScript:")): # YOLOv5 TorchScript model export - LOGGER.info(f'\n{prefix} starting export with torch {torch.__version__}...') - f = file.with_suffix('.torchscript') + LOGGER.info(f"\n{prefix} starting export with torch {torch.__version__}...") + f = file.with_suffix(".torchscript") ts = torch.jit.trace(model, im, strict=False) - d = {'shape': im.shape, 'stride': int(max(model.stride)), 'names': model.names} - extra_files = {'config.txt': json.dumps(d)} # torch._C.ExtraFilesMap() + d = {"shape": im.shape, "stride": int(max(model.stride)), "names": model.names} + extra_files = {"config.txt": json.dumps(d)} # torch._C.ExtraFilesMap() if optimize: # https://pytorch.org/tutorials/recipes/mobile_interpreter.html optimize_for_mobile(ts)._save_for_lite_interpreter(str(f), _extra_files=extra_files) else: @@ -149,22 +162,22 @@ def export_torchscript(model, im, file, optimize, prefix=colorstr('TorchScript:' @try_export -def export_onnx(model, im, file, opset, dynamic, simplify, prefix=colorstr('ONNX:')): +def export_onnx(model, im, file, opset, dynamic, simplify, prefix=colorstr("ONNX:")): # YOLOv5 ONNX export - check_requirements('onnx>=1.12.0') + check_requirements("onnx>=1.12.0") import onnx - LOGGER.info(f'\n{prefix} starting export with onnx {onnx.__version__}...') - f = str(file.with_suffix('.onnx')) + LOGGER.info(f"\n{prefix} starting export with onnx {onnx.__version__}...") + f = str(file.with_suffix(".onnx")) - output_names = ['output0', 'output1'] if isinstance(model, SegmentationModel) else ['output0'] + output_names = ["output0", "output1"] if isinstance(model, SegmentationModel) else ["output0"] if dynamic: - dynamic = {'images': {0: 'batch', 2: 'height', 3: 'width'}} # shape(1,3,640,640) + dynamic = {"images": {0: "batch", 2: "height", 3: "width"}} # shape(1,3,640,640) if isinstance(model, SegmentationModel): - dynamic['output0'] = {0: 'batch', 1: 'anchors'} # shape(1,25200,85) - dynamic['output1'] = {0: 'batch', 2: 'mask_height', 3: 'mask_width'} # shape(1,32,160,160) + dynamic["output0"] = {0: "batch", 1: "anchors"} # shape(1,25200,85) + dynamic["output1"] = {0: "batch", 2: "mask_height", 3: "mask_width"} # shape(1,32,160,160) elif isinstance(model, DetectionModel): - dynamic['output0'] = {0: 'batch', 1: 'anchors'} # shape(1,25200,85) + dynamic["output0"] = {0: "batch", 1: "anchors"} # shape(1,25200,85) torch.onnx.export( model.cpu() if dynamic else model, # --dynamic only compatible with cpu @@ -173,16 +186,17 @@ def export_onnx(model, im, file, opset, dynamic, simplify, prefix=colorstr('ONNX verbose=False, opset_version=opset, do_constant_folding=True, # WARNING: DNN inference with torch>=1.12 may require do_constant_folding=False - input_names=['images'], + input_names=["images"], output_names=output_names, - dynamic_axes=dynamic or None) + dynamic_axes=dynamic or None, + ) # Checks model_onnx = onnx.load(f) # load onnx model onnx.checker.check_model(model_onnx) # check onnx model # Metadata - d = {'stride': int(max(model.stride)), 'names': model.names} + d = {"stride": int(max(model.stride)), "names": model.names} for k, v in d.items(): meta = model_onnx.metadata_props.add() meta.key, meta.value = k, str(v) @@ -192,36 +206,37 @@ def export_onnx(model, im, file, opset, dynamic, simplify, prefix=colorstr('ONNX if simplify: try: cuda = torch.cuda.is_available() - check_requirements(('onnxruntime-gpu' if cuda else 'onnxruntime', 'onnx-simplifier>=0.4.1')) + check_requirements(("onnxruntime-gpu" if cuda else "onnxruntime", "onnx-simplifier>=0.4.1")) import onnxsim - LOGGER.info(f'{prefix} simplifying with onnx-simplifier {onnxsim.__version__}...') + LOGGER.info(f"{prefix} simplifying with onnx-simplifier {onnxsim.__version__}...") model_onnx, check = onnxsim.simplify(model_onnx) - assert check, 'assert check failed' + assert check, "assert check failed" onnx.save(model_onnx, f) except Exception as e: - LOGGER.info(f'{prefix} simplifier failure: {e}') + LOGGER.info(f"{prefix} simplifier failure: {e}") return f, model_onnx @try_export -def export_openvino(file, metadata, half, int8, data, prefix=colorstr('OpenVINO:')): +def export_openvino(file, metadata, half, int8, data, prefix=colorstr("OpenVINO:")): # YOLOv5 OpenVINO export - check_requirements('openvino-dev>=2023.0') # requires openvino-dev: https://pypi.org/project/openvino-dev/ + check_requirements("openvino-dev>=2023.0") # requires openvino-dev: https://pypi.org/project/openvino-dev/ import openvino.runtime as ov # noqa from openvino.tools import mo # noqa - LOGGER.info(f'\n{prefix} starting export with openvino {ov.__version__}...') - f = str(file).replace(file.suffix, f'_openvino_model{os.sep}') - f_onnx = file.with_suffix('.onnx') - f_ov = str(Path(f) / file.with_suffix('.xml').name) + LOGGER.info(f"\n{prefix} starting export with openvino {ov.__version__}...") + f = str(file).replace(file.suffix, f"_openvino_model{os.sep}") + f_onnx = file.with_suffix(".onnx") + f_ov = str(Path(f) / file.with_suffix(".xml").name) if int8: - check_requirements('nncf>=2.4.0') # requires at least version 2.4.0 to use the post-training quantization + check_requirements("nncf>=2.4.0") # requires at least version 2.4.0 to use the post-training quantization import nncf import numpy as np from openvino.runtime import Core from utils.dataloaders import create_dataloader + core = Core() onnx_model = core.read_model(f_onnx) # export @@ -233,24 +248,21 @@ def prepare_input_tensor(image: np.ndarray): input_tensor = np.expand_dims(input_tensor, 0) return input_tensor - def gen_dataloader(yaml_path, task='train', imgsz=640, workers=4): + def gen_dataloader(yaml_path, task="train", imgsz=640, workers=4): data_yaml = check_yaml(yaml_path) data = check_dataset(data_yaml) - dataloader = create_dataloader(data[task], - imgsz=imgsz, - batch_size=1, - stride=32, - pad=0.5, - single_cls=False, - rect=False, - workers=workers)[0] + dataloader = create_dataloader( + data[task], imgsz=imgsz, batch_size=1, stride=32, pad=0.5, single_cls=False, rect=False, workers=workers + )[0] return dataloader # noqa: F811 def transform_fn(data_item): """ - Quantization transform function. Extracts and preprocess input data from dataloader item for quantization. + Quantization transform function. + + Extracts and preprocess input data from dataloader item for quantization. Parameters: data_item: Tuple with data item produced by DataLoader during iteration Returns: @@ -264,77 +276,77 @@ def transform_fn(data_item): quantization_dataset = nncf.Dataset(ds, transform_fn) ov_model = nncf.quantize(onnx_model, quantization_dataset, preset=nncf.QuantizationPreset.MIXED) else: - ov_model = mo.convert_model(f_onnx, model_name=file.stem, framework='onnx', compress_to_fp16=half) # export + ov_model = mo.convert_model(f_onnx, model_name=file.stem, framework="onnx", compress_to_fp16=half) # export ov.serialize(ov_model, f_ov) # save - yaml_save(Path(f) / file.with_suffix('.yaml').name, metadata) # add metadata.yaml + yaml_save(Path(f) / file.with_suffix(".yaml").name, metadata) # add metadata.yaml return f, None @try_export -def export_paddle(model, im, file, metadata, prefix=colorstr('PaddlePaddle:')): +def export_paddle(model, im, file, metadata, prefix=colorstr("PaddlePaddle:")): # YOLOv5 Paddle export - check_requirements(('paddlepaddle', 'x2paddle')) + check_requirements(("paddlepaddle", "x2paddle")) import x2paddle from x2paddle.convert import pytorch2paddle - LOGGER.info(f'\n{prefix} starting export with X2Paddle {x2paddle.__version__}...') - f = str(file).replace('.pt', f'_paddle_model{os.sep}') + LOGGER.info(f"\n{prefix} starting export with X2Paddle {x2paddle.__version__}...") + f = str(file).replace(".pt", f"_paddle_model{os.sep}") - pytorch2paddle(module=model, save_dir=f, jit_type='trace', input_examples=[im]) # export - yaml_save(Path(f) / file.with_suffix('.yaml').name, metadata) # add metadata.yaml + pytorch2paddle(module=model, save_dir=f, jit_type="trace", input_examples=[im]) # export + yaml_save(Path(f) / file.with_suffix(".yaml").name, metadata) # add metadata.yaml return f, None @try_export -def export_coreml(model, im, file, int8, half, nms, prefix=colorstr('CoreML:')): +def export_coreml(model, im, file, int8, half, nms, prefix=colorstr("CoreML:")): # YOLOv5 CoreML export - check_requirements('coremltools') + check_requirements("coremltools") import coremltools as ct - LOGGER.info(f'\n{prefix} starting export with coremltools {ct.__version__}...') - f = file.with_suffix('.mlmodel') + LOGGER.info(f"\n{prefix} starting export with coremltools {ct.__version__}...") + f = file.with_suffix(".mlmodel") if nms: model = iOSModel(model, im) ts = torch.jit.trace(model, im, strict=False) # TorchScript model - ct_model = ct.convert(ts, inputs=[ct.ImageType('image', shape=im.shape, scale=1 / 255, bias=[0, 0, 0])]) - bits, mode = (8, 'kmeans_lut') if int8 else (16, 'linear') if half else (32, None) + ct_model = ct.convert(ts, inputs=[ct.ImageType("image", shape=im.shape, scale=1 / 255, bias=[0, 0, 0])]) + bits, mode = (8, "kmeans_lut") if int8 else (16, "linear") if half else (32, None) if bits < 32: if MACOS: # quantization only supported on macOS with warnings.catch_warnings(): - warnings.filterwarnings('ignore', category=DeprecationWarning) # suppress numpy==1.20 float warning + warnings.filterwarnings("ignore", category=DeprecationWarning) # suppress numpy==1.20 float warning ct_model = ct.models.neural_network.quantization_utils.quantize_weights(ct_model, bits, mode) else: - print(f'{prefix} quantization only supported on macOS, skipping...') + print(f"{prefix} quantization only supported on macOS, skipping...") ct_model.save(f) return f, ct_model @try_export -def export_engine(model, im, file, half, dynamic, simplify, workspace=4, verbose=False, prefix=colorstr('TensorRT:')): +def export_engine(model, im, file, half, dynamic, simplify, workspace=4, verbose=False, prefix=colorstr("TensorRT:")): # YOLOv5 TensorRT export https://developer.nvidia.com/tensorrt - assert im.device.type != 'cpu', 'export running on CPU but must be on GPU, i.e. `python export.py --device 0`' + assert im.device.type != "cpu", "export running on CPU but must be on GPU, i.e. `python export.py --device 0`" try: import tensorrt as trt except Exception: - if platform.system() == 'Linux': - check_requirements('nvidia-tensorrt', cmds='-U --index-url https://pypi.ngc.nvidia.com') + if platform.system() == "Linux": + check_requirements("nvidia-tensorrt", cmds="-U --index-url https://pypi.ngc.nvidia.com") import tensorrt as trt - if trt.__version__[0] == '7': # TensorRT 7 handling https://github.com/ultralytics/yolov5/issues/6012 + if trt.__version__[0] == "7": # TensorRT 7 handling https://github.com/ultralytics/yolov5/issues/6012 grid = model.model[-1].anchor_grid model.model[-1].anchor_grid = [a[..., :1, :1, :] for a in grid] export_onnx(model, im, file, 12, dynamic, simplify) # opset 12 model.model[-1].anchor_grid = grid else: # TensorRT >= 8 - check_version(trt.__version__, '8.0.0', hard=True) # require tensorrt>=8.0.0 + check_version(trt.__version__, "8.0.0", hard=True) # require tensorrt>=8.0.0 export_onnx(model, im, file, 12, dynamic, simplify) # opset 12 - onnx = file.with_suffix('.onnx') + onnx = file.with_suffix(".onnx") - LOGGER.info(f'\n{prefix} starting export with TensorRT {trt.__version__}...') - assert onnx.exists(), f'failed to export ONNX file: {onnx}' - f = file.with_suffix('.engine') # TensorRT engine file + LOGGER.info(f"\n{prefix} starting export with TensorRT {trt.__version__}...") + assert onnx.exists(), f"failed to export ONNX file: {onnx}" + f = file.with_suffix(".engine") # TensorRT engine file logger = trt.Logger(trt.Logger.INFO) if verbose: logger.min_severity = trt.Logger.Severity.VERBOSE @@ -344,11 +356,11 @@ def export_engine(model, im, file, half, dynamic, simplify, workspace=4, verbose config.max_workspace_size = workspace * 1 << 30 # config.set_memory_pool_limit(trt.MemoryPoolType.WORKSPACE, workspace << 30) # fix TRT 8.4 deprecation notice - flag = (1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)) + flag = 1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH) network = builder.create_network(flag) parser = trt.OnnxParser(network, logger) if not parser.parse_from_file(str(onnx)): - raise RuntimeError(f'failed to load ONNX file: {onnx}') + raise RuntimeError(f"failed to load ONNX file: {onnx}") inputs = [network.get_input(i) for i in range(network.num_inputs)] outputs = [network.get_output(i) for i in range(network.num_outputs)] @@ -359,33 +371,35 @@ def export_engine(model, im, file, half, dynamic, simplify, workspace=4, verbose if dynamic: if im.shape[0] <= 1: - LOGGER.warning(f'{prefix} WARNING ⚠️ --dynamic model requires maximum --batch-size argument') + LOGGER.warning(f"{prefix} WARNING ⚠️ --dynamic model requires maximum --batch-size argument") profile = builder.create_optimization_profile() for inp in inputs: profile.set_shape(inp.name, (1, *im.shape[1:]), (max(1, im.shape[0] // 2), *im.shape[1:]), im.shape) config.add_optimization_profile(profile) - LOGGER.info(f'{prefix} building FP{16 if builder.platform_has_fast_fp16 and half else 32} engine as {f}') + LOGGER.info(f"{prefix} building FP{16 if builder.platform_has_fast_fp16 and half else 32} engine as {f}") if builder.platform_has_fast_fp16 and half: config.set_flag(trt.BuilderFlag.FP16) - with builder.build_engine(network, config) as engine, open(f, 'wb') as t: + with builder.build_engine(network, config) as engine, open(f, "wb") as t: t.write(engine.serialize()) return f, None @try_export -def export_saved_model(model, - im, - file, - dynamic, - tf_nms=False, - agnostic_nms=False, - topk_per_class=100, - topk_all=100, - iou_thres=0.45, - conf_thres=0.25, - keras=False, - prefix=colorstr('TensorFlow SavedModel:')): +def export_saved_model( + model, + im, + file, + dynamic, + tf_nms=False, + agnostic_nms=False, + topk_per_class=100, + topk_all=100, + iou_thres=0.45, + conf_thres=0.25, + keras=False, + prefix=colorstr("TensorFlow SavedModel:"), +): # YOLOv5 TensorFlow SavedModel export try: import tensorflow as tf @@ -396,13 +410,13 @@ def export_saved_model(model, from models.tf import TFModel - LOGGER.info(f'\n{prefix} starting export with tensorflow {tf.__version__}...') - if tf.__version__ > '2.13.1': - helper_url = 'https://github.com/ultralytics/yolov5/issues/12489' + LOGGER.info(f"\n{prefix} starting export with tensorflow {tf.__version__}...") + if tf.__version__ > "2.13.1": + helper_url = "https://github.com/ultralytics/yolov5/issues/12489" LOGGER.info( - f'WARNING ⚠️ using Tensorflow {tf.__version__} > 2.13.1 might cause issue when exporting the model to tflite {helper_url}' + f"WARNING ⚠️ using Tensorflow {tf.__version__} > 2.13.1 might cause issue when exporting the model to tflite {helper_url}" ) # handling issue https://github.com/ultralytics/yolov5/issues/12489 - f = str(file).replace('.pt', '_saved_model') + f = str(file).replace(".pt", "_saved_model") batch_size, ch, *imgsz = list(im.shape) # BCHW tf_model = TFModel(cfg=model.yaml, model=model, nc=model.nc, imgsz=imgsz) @@ -414,7 +428,7 @@ def export_saved_model(model, keras_model.trainable = False keras_model.summary() if keras: - keras_model.save(f, save_format='tf') + keras_model.save(f, save_format="tf") else: spec = tf.TensorSpec(keras_model.inputs[0].shape, keras_model.inputs[0].dtype) m = tf.function(lambda x: keras_model(x)) # full model @@ -423,21 +437,24 @@ def export_saved_model(model, tfm = tf.Module() tfm.__call__ = tf.function(lambda x: frozen_func(x)[:4] if tf_nms else frozen_func(x), [spec]) tfm.__call__(im) - tf.saved_model.save(tfm, - f, - options=tf.saved_model.SaveOptions(experimental_custom_gradients=False) if check_version( - tf.__version__, '2.6') else tf.saved_model.SaveOptions()) + tf.saved_model.save( + tfm, + f, + options=tf.saved_model.SaveOptions(experimental_custom_gradients=False) + if check_version(tf.__version__, "2.6") + else tf.saved_model.SaveOptions(), + ) return f, keras_model @try_export -def export_pb(keras_model, file, prefix=colorstr('TensorFlow GraphDef:')): +def export_pb(keras_model, file, prefix=colorstr("TensorFlow GraphDef:")): # YOLOv5 TensorFlow GraphDef *.pb export https://github.com/leimao/Frozen_Graph_TensorFlow import tensorflow as tf from tensorflow.python.framework.convert_to_constants import convert_variables_to_constants_v2 - LOGGER.info(f'\n{prefix} starting export with tensorflow {tf.__version__}...') - f = file.with_suffix('.pb') + LOGGER.info(f"\n{prefix} starting export with tensorflow {tf.__version__}...") + f = file.with_suffix(".pb") m = tf.function(lambda x: keras_model(x)) # full model m = m.get_concrete_function(tf.TensorSpec(keras_model.inputs[0].shape, keras_model.inputs[0].dtype)) @@ -448,14 +465,15 @@ def export_pb(keras_model, file, prefix=colorstr('TensorFlow GraphDef:')): @try_export -def export_tflite(keras_model, im, file, int8, per_tensor, data, nms, agnostic_nms, - prefix=colorstr('TensorFlow Lite:')): +def export_tflite( + keras_model, im, file, int8, per_tensor, data, nms, agnostic_nms, prefix=colorstr("TensorFlow Lite:") +): # YOLOv5 TensorFlow Lite export import tensorflow as tf - LOGGER.info(f'\n{prefix} starting export with tensorflow {tf.__version__}...') + LOGGER.info(f"\n{prefix} starting export with tensorflow {tf.__version__}...") batch_size, ch, *imgsz = list(im.shape) # BCHW - f = str(file).replace('.pt', '-fp16.tflite') + f = str(file).replace(".pt", "-fp16.tflite") converter = tf.lite.TFLiteConverter.from_keras_model(keras_model) converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS] @@ -463,7 +481,8 @@ def export_tflite(keras_model, im, file, int8, per_tensor, data, nms, agnostic_n converter.optimizations = [tf.lite.Optimize.DEFAULT] if int8: from models.tf import representative_dataset_gen - dataset = LoadImages(check_dataset(check_yaml(data))['train'], img_size=imgsz, auto=False) + + dataset = LoadImages(check_dataset(check_yaml(data))["train"], img_size=imgsz, auto=False) converter.representative_dataset = lambda: representative_dataset_gen(dataset, ncalib=100) converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8] converter.target_spec.supported_types = [] @@ -472,77 +491,87 @@ def export_tflite(keras_model, im, file, int8, per_tensor, data, nms, agnostic_n converter.experimental_new_quantizer = True if per_tensor: converter._experimental_disable_per_channel = True - f = str(file).replace('.pt', '-int8.tflite') + f = str(file).replace(".pt", "-int8.tflite") if nms or agnostic_nms: converter.target_spec.supported_ops.append(tf.lite.OpsSet.SELECT_TF_OPS) tflite_model = converter.convert() - open(f, 'wb').write(tflite_model) + open(f, "wb").write(tflite_model) return f, None @try_export -def export_edgetpu(file, prefix=colorstr('Edge TPU:')): +def export_edgetpu(file, prefix=colorstr("Edge TPU:")): # YOLOv5 Edge TPU export https://coral.ai/docs/edgetpu/models-intro/ - cmd = 'edgetpu_compiler --version' - help_url = 'https://coral.ai/docs/edgetpu/compiler/' - assert platform.system() == 'Linux', f'export only supported on Linux. See {help_url}' - if subprocess.run(f'{cmd} > /dev/null 2>&1', shell=True).returncode != 0: - LOGGER.info(f'\n{prefix} export requires Edge TPU compiler. Attempting install from {help_url}') - sudo = subprocess.run('sudo --version >/dev/null', shell=True).returncode == 0 # sudo installed on system + cmd = "edgetpu_compiler --version" + help_url = "https://coral.ai/docs/edgetpu/compiler/" + assert platform.system() == "Linux", f"export only supported on Linux. See {help_url}" + if subprocess.run(f"{cmd} > /dev/null 2>&1", shell=True).returncode != 0: + LOGGER.info(f"\n{prefix} export requires Edge TPU compiler. Attempting install from {help_url}") + sudo = subprocess.run("sudo --version >/dev/null", shell=True).returncode == 0 # sudo installed on system for c in ( - 'curl https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -', - 'echo "deb https://packages.cloud.google.com/apt coral-edgetpu-stable main" | sudo tee /etc/apt/sources.list.d/coral-edgetpu.list', - 'sudo apt-get update', 'sudo apt-get install edgetpu-compiler'): - subprocess.run(c if sudo else c.replace('sudo ', ''), shell=True, check=True) + "curl https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -", + 'echo "deb https://packages.cloud.google.com/apt coral-edgetpu-stable main" | sudo tee /etc/apt/sources.list.d/coral-edgetpu.list', + "sudo apt-get update", + "sudo apt-get install edgetpu-compiler", + ): + subprocess.run(c if sudo else c.replace("sudo ", ""), shell=True, check=True) ver = subprocess.run(cmd, shell=True, capture_output=True, check=True).stdout.decode().split()[-1] - LOGGER.info(f'\n{prefix} starting export with Edge TPU compiler {ver}...') - f = str(file).replace('.pt', '-int8_edgetpu.tflite') # Edge TPU model - f_tfl = str(file).replace('.pt', '-int8.tflite') # TFLite model - - subprocess.run([ - 'edgetpu_compiler', - '-s', - '-d', - '-k', - '10', - '--out_dir', - str(file.parent), - f_tfl, ], check=True) + LOGGER.info(f"\n{prefix} starting export with Edge TPU compiler {ver}...") + f = str(file).replace(".pt", "-int8_edgetpu.tflite") # Edge TPU model + f_tfl = str(file).replace(".pt", "-int8.tflite") # TFLite model + + subprocess.run( + [ + "edgetpu_compiler", + "-s", + "-d", + "-k", + "10", + "--out_dir", + str(file.parent), + f_tfl, + ], + check=True, + ) return f, None @try_export -def export_tfjs(file, int8, prefix=colorstr('TensorFlow.js:')): +def export_tfjs(file, int8, prefix=colorstr("TensorFlow.js:")): # YOLOv5 TensorFlow.js export - check_requirements('tensorflowjs') + check_requirements("tensorflowjs") import tensorflowjs as tfjs - LOGGER.info(f'\n{prefix} starting export with tensorflowjs {tfjs.__version__}...') - f = str(file).replace('.pt', '_web_model') # js dir - f_pb = file.with_suffix('.pb') # *.pb path - f_json = f'{f}/model.json' # *.json path + LOGGER.info(f"\n{prefix} starting export with tensorflowjs {tfjs.__version__}...") + f = str(file).replace(".pt", "_web_model") # js dir + f_pb = file.with_suffix(".pb") # *.pb path + f_json = f"{f}/model.json" # *.json path args = [ - 'tensorflowjs_converter', - '--input_format=tf_frozen_model', - '--quantize_uint8' if int8 else '', - '--output_node_names=Identity,Identity_1,Identity_2,Identity_3', + "tensorflowjs_converter", + "--input_format=tf_frozen_model", + "--quantize_uint8" if int8 else "", + "--output_node_names=Identity,Identity_1,Identity_2,Identity_3", str(f_pb), - str(f), ] + str(f), + ] subprocess.run([arg for arg in args if arg], check=True) json = Path(f_json).read_text() - with open(f_json, 'w') as j: # sort JSON Identity_* in ascending order + with open(f_json, "w") as j: # sort JSON Identity_* in ascending order subst = re.sub( r'{"outputs": {"Identity.?.?": {"name": "Identity.?.?"}, ' r'"Identity.?.?": {"name": "Identity.?.?"}, ' r'"Identity.?.?": {"name": "Identity.?.?"}, ' - r'"Identity.?.?": {"name": "Identity.?.?"}}}', r'{"outputs": {"Identity": {"name": "Identity"}, ' + r'"Identity.?.?": {"name": "Identity.?.?"}}}', + r'{"outputs": {"Identity": {"name": "Identity"}, ' r'"Identity_1": {"name": "Identity_1"}, ' r'"Identity_2": {"name": "Identity_2"}, ' - r'"Identity_3": {"name": "Identity_3"}}}', json) + r'"Identity_3": {"name": "Identity_3"}}}', + json, + ) j.write(subst) return f, None @@ -555,8 +584,8 @@ def add_tflite_metadata(file, metadata, num_outputs): from tflite_support import metadata as _metadata from tflite_support import metadata_schema_py_generated as _metadata_fb - tmp_file = Path('/tmp/meta.txt') - with open(tmp_file, 'w') as meta_f: + tmp_file = Path("/tmp/meta.txt") + with open(tmp_file, "w") as meta_f: meta_f.write(str(metadata)) model_meta = _metadata_fb.ModelMetadataT() @@ -580,22 +609,22 @@ def add_tflite_metadata(file, metadata, num_outputs): tmp_file.unlink() -def pipeline_coreml(model, im, file, names, y, prefix=colorstr('CoreML Pipeline:')): +def pipeline_coreml(model, im, file, names, y, prefix=colorstr("CoreML Pipeline:")): # YOLOv5 CoreML pipeline import coremltools as ct from PIL import Image - print(f'{prefix} starting pipeline with coremltools {ct.__version__}...') + print(f"{prefix} starting pipeline with coremltools {ct.__version__}...") batch_size, ch, h, w = list(im.shape) # BCHW t = time.time() # YOLOv5 Output shapes spec = model.get_spec() out0, out1 = iter(spec.description.output) - if platform.system() == 'Darwin': - img = Image.new('RGB', (w, h)) # img(192 width, 320 height) + if platform.system() == "Darwin": + img = Image.new("RGB", (w, h)) # img(192 width, 320 height) # img = torch.zeros((*opt.img_size, 3)).numpy() # img size(320,192,3) iDetection - out = model.predict({'image': img}) + out = model.predict({"image": img}) out0_shape, out1_shape = out[out0.name].shape, out[out1.name].shape else: # linux and windows can not run model.predict(), get sizes from pytorch output y s = tuple(y[0].shape) @@ -605,7 +634,7 @@ def pipeline_coreml(model, im, file, names, y, prefix=colorstr('CoreML Pipeline: nx, ny = spec.description.input[0].type.imageType.width, spec.description.input[0].type.imageType.height na, nc = out0_shape # na, nc = out0.type.multiArrayType.shape # number anchors, classes - assert len(names) == nc, f'{len(names)} names found for nc={nc}' # check + assert len(names) == nc, f"{len(names)} names found for nc={nc}" # check # Define output shapes (missing) out0.type.multiArrayType.shape[:] = out0_shape # (3780, 80) @@ -639,8 +668,8 @@ def pipeline_coreml(model, im, file, names, y, prefix=colorstr('CoreML Pipeline: nms_spec.description.output.add() nms_spec.description.output[i].ParseFromString(decoder_output) - nms_spec.description.output[0].name = 'confidence' - nms_spec.description.output[1].name = 'coordinates' + nms_spec.description.output[0].name = "confidence" + nms_spec.description.output[1].name = "coordinates" output_sizes = [nc, 4] for i in range(2): @@ -656,10 +685,10 @@ def pipeline_coreml(model, im, file, names, y, prefix=colorstr('CoreML Pipeline: nms = nms_spec.nonMaximumSuppression nms.confidenceInputFeatureName = out0.name # 1x507x80 nms.coordinatesInputFeatureName = out1.name # 1x507x4 - nms.confidenceOutputFeatureName = 'confidence' - nms.coordinatesOutputFeatureName = 'coordinates' - nms.iouThresholdInputFeatureName = 'iouThreshold' - nms.confidenceThresholdInputFeatureName = 'confidenceThreshold' + nms.confidenceOutputFeatureName = "confidence" + nms.coordinatesOutputFeatureName = "coordinates" + nms.iouThresholdInputFeatureName = "iouThreshold" + nms.confidenceThresholdInputFeatureName = "confidenceThreshold" nms.iouThreshold = 0.45 nms.confidenceThreshold = 0.25 nms.pickTop.perClass = True @@ -667,10 +696,14 @@ def pipeline_coreml(model, im, file, names, y, prefix=colorstr('CoreML Pipeline: nms_model = ct.models.MLModel(nms_spec) # 4. Pipeline models together - pipeline = ct.models.pipeline.Pipeline(input_features=[('image', ct.models.datatypes.Array(3, ny, nx)), - ('iouThreshold', ct.models.datatypes.Double()), - ('confidenceThreshold', ct.models.datatypes.Double())], - output_features=['confidence', 'coordinates']) + pipeline = ct.models.pipeline.Pipeline( + input_features=[ + ("image", ct.models.datatypes.Array(3, ny, nx)), + ("iouThreshold", ct.models.datatypes.Double()), + ("confidenceThreshold", ct.models.datatypes.Double()), + ], + output_features=["confidence", "coordinates"], + ) pipeline.add_model(model) pipeline.add_model(nms_model) @@ -681,73 +714,77 @@ def pipeline_coreml(model, im, file, names, y, prefix=colorstr('CoreML Pipeline: # Update metadata pipeline.spec.specificationVersion = 5 - pipeline.spec.description.metadata.versionString = 'https://github.com/ultralytics/yolov5' - pipeline.spec.description.metadata.shortDescription = 'https://github.com/ultralytics/yolov5' - pipeline.spec.description.metadata.author = 'glenn.jocher@ultralytics.com' - pipeline.spec.description.metadata.license = 'https://github.com/ultralytics/yolov5/blob/master/LICENSE' - pipeline.spec.description.metadata.userDefined.update({ - 'classes': ','.join(names.values()), - 'iou_threshold': str(nms.iouThreshold), - 'confidence_threshold': str(nms.confidenceThreshold)}) + pipeline.spec.description.metadata.versionString = "https://github.com/ultralytics/yolov5" + pipeline.spec.description.metadata.shortDescription = "https://github.com/ultralytics/yolov5" + pipeline.spec.description.metadata.author = "glenn.jocher@ultralytics.com" + pipeline.spec.description.metadata.license = "https://github.com/ultralytics/yolov5/blob/master/LICENSE" + pipeline.spec.description.metadata.userDefined.update( + { + "classes": ",".join(names.values()), + "iou_threshold": str(nms.iouThreshold), + "confidence_threshold": str(nms.confidenceThreshold), + } + ) # Save the model - f = file.with_suffix('.mlmodel') # filename + f = file.with_suffix(".mlmodel") # filename model = ct.models.MLModel(pipeline.spec) - model.input_description['image'] = 'Input image' - model.input_description['iouThreshold'] = f'(optional) IOU Threshold override (default: {nms.iouThreshold})' - model.input_description['confidenceThreshold'] = \ - f'(optional) Confidence Threshold override (default: {nms.confidenceThreshold})' - model.output_description['confidence'] = 'Boxes × Class confidence (see user-defined metadata "classes")' - model.output_description['coordinates'] = 'Boxes × [x, y, width, height] (relative to image size)' + model.input_description["image"] = "Input image" + model.input_description["iouThreshold"] = f"(optional) IOU Threshold override (default: {nms.iouThreshold})" + model.input_description[ + "confidenceThreshold" + ] = f"(optional) Confidence Threshold override (default: {nms.confidenceThreshold})" + model.output_description["confidence"] = 'Boxes × Class confidence (see user-defined metadata "classes")' + model.output_description["coordinates"] = "Boxes × [x, y, width, height] (relative to image size)" model.save(f) # pipelined - print(f'{prefix} pipeline success ({time.time() - t:.2f}s), saved as {f} ({file_size(f):.1f} MB)') + print(f"{prefix} pipeline success ({time.time() - t:.2f}s), saved as {f} ({file_size(f):.1f} MB)") @smart_inference_mode() def run( - data=ROOT / 'data/coco128.yaml', # 'dataset.yaml path' - weights=ROOT / 'yolov5s.pt', # weights path - imgsz=(640, 640), # image (height, width) - batch_size=1, # batch size - device='cpu', # cuda device, i.e. 0 or 0,1,2,3 or cpu - include=('torchscript', 'onnx'), # include formats - half=False, # FP16 half-precision export - inplace=False, # set YOLOv5 Detect() inplace=True - keras=False, # use Keras - optimize=False, # TorchScript: optimize for mobile - int8=False, # CoreML/TF INT8 quantization - per_tensor=False, # TF per tensor quantization - dynamic=False, # ONNX/TF/TensorRT: dynamic axes - simplify=False, # ONNX: simplify model - opset=12, # ONNX: opset version - verbose=False, # TensorRT: verbose log - workspace=4, # TensorRT: workspace size (GB) - nms=False, # TF: add NMS to model - agnostic_nms=False, # TF: add agnostic NMS to model - topk_per_class=100, # TF.js NMS: topk per class to keep - topk_all=100, # TF.js NMS: topk for all classes to keep - iou_thres=0.45, # TF.js NMS: IoU threshold - conf_thres=0.25, # TF.js NMS: confidence threshold + data=ROOT / "data/coco128.yaml", # 'dataset.yaml path' + weights=ROOT / "yolov5s.pt", # weights path + imgsz=(640, 640), # image (height, width) + batch_size=1, # batch size + device="cpu", # cuda device, i.e. 0 or 0,1,2,3 or cpu + include=("torchscript", "onnx"), # include formats + half=False, # FP16 half-precision export + inplace=False, # set YOLOv5 Detect() inplace=True + keras=False, # use Keras + optimize=False, # TorchScript: optimize for mobile + int8=False, # CoreML/TF INT8 quantization + per_tensor=False, # TF per tensor quantization + dynamic=False, # ONNX/TF/TensorRT: dynamic axes + simplify=False, # ONNX: simplify model + opset=12, # ONNX: opset version + verbose=False, # TensorRT: verbose log + workspace=4, # TensorRT: workspace size (GB) + nms=False, # TF: add NMS to model + agnostic_nms=False, # TF: add agnostic NMS to model + topk_per_class=100, # TF.js NMS: topk per class to keep + topk_all=100, # TF.js NMS: topk for all classes to keep + iou_thres=0.45, # TF.js NMS: IoU threshold + conf_thres=0.25, # TF.js NMS: confidence threshold ): t = time.time() include = [x.lower() for x in include] # to lowercase - fmts = tuple(export_formats()['Argument'][1:]) # --include arguments + fmts = tuple(export_formats()["Argument"][1:]) # --include arguments flags = [x in include for x in fmts] - assert sum(flags) == len(include), f'ERROR: Invalid --include {include}, valid --include arguments are {fmts}' + assert sum(flags) == len(include), f"ERROR: Invalid --include {include}, valid --include arguments are {fmts}" jit, onnx, xml, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs, paddle = flags # export booleans - file = Path(url2file(weights) if str(weights).startswith(('http:/', 'https:/')) else weights) # PyTorch weights + file = Path(url2file(weights) if str(weights).startswith(("http:/", "https:/")) else weights) # PyTorch weights # Load PyTorch model device = select_device(device) if half: - assert device.type != 'cpu' or coreml, '--half only compatible with GPU export, i.e. use --device 0' - assert not dynamic, '--half not compatible with --dynamic, i.e. use either --half or --dynamic but not both' + assert device.type != "cpu" or coreml, "--half only compatible with GPU export, i.e. use --device 0" + assert not dynamic, "--half not compatible with --dynamic, i.e. use either --half or --dynamic but not both" model = attempt_load(weights, device=device, inplace=True, fuse=True) # load FP32 model # Checks imgsz *= 2 if len(imgsz) == 1 else 1 # expand if optimize: - assert device.type == 'cpu', '--optimize not compatible with cuda devices, i.e. use --device cpu' + assert device.type == "cpu", "--optimize not compatible with cuda devices, i.e. use --device cpu" # Input gs = int(max(model.stride)) # grid size (max stride) @@ -767,12 +804,12 @@ def run( if half and not coreml: im, model = im.half(), model.half() # to FP16 shape = tuple((y[0] if isinstance(y, tuple) else y).shape) # model output shape - metadata = {'stride': int(max(model.stride)), 'names': model.names} # model metadata + metadata = {"stride": int(max(model.stride)), "names": model.names} # model metadata LOGGER.info(f"\n{colorstr('PyTorch:')} starting from {file} with output shape {shape} ({file_size(file):.1f} MB)") # Exports - f = [''] * len(fmts) # exported filenames - warnings.filterwarnings(action='ignore', category=torch.jit.TracerWarning) # suppress TracerWarning + f = [""] * len(fmts) # exported filenames + warnings.filterwarnings(action="ignore", category=torch.jit.TracerWarning) # suppress TracerWarning if jit: # TorchScript f[0], _ = export_torchscript(model, im, file, optimize) if engine: # TensorRT required before ONNX @@ -786,30 +823,27 @@ def run( if nms: pipeline_coreml(ct_model, im, file, model.names, y) if any((saved_model, pb, tflite, edgetpu, tfjs)): # TensorFlow formats - assert not tflite or not tfjs, 'TFLite and TF.js models must be exported separately, please pass only one type.' - assert not isinstance(model, ClassificationModel), 'ClassificationModel export to TF formats not yet supported.' - f[5], s_model = export_saved_model(model.cpu(), - im, - file, - dynamic, - tf_nms=nms or agnostic_nms or tfjs, - agnostic_nms=agnostic_nms or tfjs, - topk_per_class=topk_per_class, - topk_all=topk_all, - iou_thres=iou_thres, - conf_thres=conf_thres, - keras=keras) + assert not tflite or not tfjs, "TFLite and TF.js models must be exported separately, please pass only one type." + assert not isinstance(model, ClassificationModel), "ClassificationModel export to TF formats not yet supported." + f[5], s_model = export_saved_model( + model.cpu(), + im, + file, + dynamic, + tf_nms=nms or agnostic_nms or tfjs, + agnostic_nms=agnostic_nms or tfjs, + topk_per_class=topk_per_class, + topk_all=topk_all, + iou_thres=iou_thres, + conf_thres=conf_thres, + keras=keras, + ) if pb or tfjs: # pb prerequisite to tfjs f[6], _ = export_pb(s_model, file) if tflite or edgetpu: - f[7], _ = export_tflite(s_model, - im, - file, - int8 or edgetpu, - per_tensor, - data=data, - nms=nms, - agnostic_nms=agnostic_nms) + f[7], _ = export_tflite( + s_model, im, file, int8 or edgetpu, per_tensor, data=data, nms=nms, agnostic_nms=agnostic_nms + ) if edgetpu: f[8], _ = export_edgetpu(file) add_tflite_metadata(f[8] or f[7], metadata, num_outputs=len(s_model.outputs)) @@ -823,58 +857,66 @@ def run( if any(f): cls, det, seg = (isinstance(model, x) for x in (ClassificationModel, DetectionModel, SegmentationModel)) # type det &= not seg # segmentation models inherit from SegmentationModel(DetectionModel) - dir = Path('segment' if seg else 'classify' if cls else '') - h = '--half' if half else '' # --half FP16 inference arg - s = '# WARNING ⚠️ ClassificationModel not yet supported for PyTorch Hub AutoShape inference' if cls else \ - '# WARNING ⚠️ SegmentationModel not yet supported for PyTorch Hub AutoShape inference' if seg else '' - LOGGER.info(f'\nExport complete ({time.time() - t:.1f}s)' - f"\nResults saved to {colorstr('bold', file.parent.resolve())}" - f"\nDetect: python {dir / ('detect.py' if det else 'predict.py')} --weights {f[-1]} {h}" - f"\nValidate: python {dir / 'val.py'} --weights {f[-1]} {h}" - f"\nPyTorch Hub: model = torch.hub.load('ultralytics/yolov5', 'custom', '{f[-1]}') {s}" - f'\nVisualize: https://netron.app') + dir = Path("segment" if seg else "classify" if cls else "") + h = "--half" if half else "" # --half FP16 inference arg + s = ( + "# WARNING ⚠️ ClassificationModel not yet supported for PyTorch Hub AutoShape inference" + if cls + else "# WARNING ⚠️ SegmentationModel not yet supported for PyTorch Hub AutoShape inference" + if seg + else "" + ) + LOGGER.info( + f'\nExport complete ({time.time() - t:.1f}s)' + f"\nResults saved to {colorstr('bold', file.parent.resolve())}" + f"\nDetect: python {dir / ('detect.py' if det else 'predict.py')} --weights {f[-1]} {h}" + f"\nValidate: python {dir / 'val.py'} --weights {f[-1]} {h}" + f"\nPyTorch Hub: model = torch.hub.load('ultralytics/yolov5', 'custom', '{f[-1]}') {s}" + f'\nVisualize: https://netron.app' + ) return f # return list of exported files/dirs def parse_opt(known=False): parser = argparse.ArgumentParser() - parser.add_argument('--data', type=str, default=ROOT / 'data/coco128.yaml', help='dataset.yaml path') - parser.add_argument('--weights', nargs='+', type=str, default=ROOT / 'yolov5s.pt', help='model.pt path(s)') - parser.add_argument('--imgsz', '--img', '--img-size', nargs='+', type=int, default=[640, 640], help='image (h, w)') - parser.add_argument('--batch-size', type=int, default=1, help='batch size') - parser.add_argument('--device', default='cpu', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') - parser.add_argument('--half', action='store_true', help='FP16 half-precision export') - parser.add_argument('--inplace', action='store_true', help='set YOLOv5 Detect() inplace=True') - parser.add_argument('--keras', action='store_true', help='TF: use Keras') - parser.add_argument('--optimize', action='store_true', help='TorchScript: optimize for mobile') - parser.add_argument('--int8', action='store_true', help='CoreML/TF/OpenVINO INT8 quantization') - parser.add_argument('--per-tensor', action='store_true', help='TF per-tensor quantization') - parser.add_argument('--dynamic', action='store_true', help='ONNX/TF/TensorRT: dynamic axes') - parser.add_argument('--simplify', action='store_true', help='ONNX: simplify model') - parser.add_argument('--opset', type=int, default=17, help='ONNX: opset version') - parser.add_argument('--verbose', action='store_true', help='TensorRT: verbose log') - parser.add_argument('--workspace', type=int, default=4, help='TensorRT: workspace size (GB)') - parser.add_argument('--nms', action='store_true', help='TF: add NMS to model') - parser.add_argument('--agnostic-nms', action='store_true', help='TF: add agnostic NMS to model') - parser.add_argument('--topk-per-class', type=int, default=100, help='TF.js NMS: topk per class to keep') - parser.add_argument('--topk-all', type=int, default=100, help='TF.js NMS: topk for all classes to keep') - parser.add_argument('--iou-thres', type=float, default=0.45, help='TF.js NMS: IoU threshold') - parser.add_argument('--conf-thres', type=float, default=0.25, help='TF.js NMS: confidence threshold') + parser.add_argument("--data", type=str, default=ROOT / "data/coco128.yaml", help="dataset.yaml path") + parser.add_argument("--weights", nargs="+", type=str, default=ROOT / "yolov5s.pt", help="model.pt path(s)") + parser.add_argument("--imgsz", "--img", "--img-size", nargs="+", type=int, default=[640, 640], help="image (h, w)") + parser.add_argument("--batch-size", type=int, default=1, help="batch size") + parser.add_argument("--device", default="cpu", help="cuda device, i.e. 0 or 0,1,2,3 or cpu") + parser.add_argument("--half", action="store_true", help="FP16 half-precision export") + parser.add_argument("--inplace", action="store_true", help="set YOLOv5 Detect() inplace=True") + parser.add_argument("--keras", action="store_true", help="TF: use Keras") + parser.add_argument("--optimize", action="store_true", help="TorchScript: optimize for mobile") + parser.add_argument("--int8", action="store_true", help="CoreML/TF/OpenVINO INT8 quantization") + parser.add_argument("--per-tensor", action="store_true", help="TF per-tensor quantization") + parser.add_argument("--dynamic", action="store_true", help="ONNX/TF/TensorRT: dynamic axes") + parser.add_argument("--simplify", action="store_true", help="ONNX: simplify model") + parser.add_argument("--opset", type=int, default=17, help="ONNX: opset version") + parser.add_argument("--verbose", action="store_true", help="TensorRT: verbose log") + parser.add_argument("--workspace", type=int, default=4, help="TensorRT: workspace size (GB)") + parser.add_argument("--nms", action="store_true", help="TF: add NMS to model") + parser.add_argument("--agnostic-nms", action="store_true", help="TF: add agnostic NMS to model") + parser.add_argument("--topk-per-class", type=int, default=100, help="TF.js NMS: topk per class to keep") + parser.add_argument("--topk-all", type=int, default=100, help="TF.js NMS: topk for all classes to keep") + parser.add_argument("--iou-thres", type=float, default=0.45, help="TF.js NMS: IoU threshold") + parser.add_argument("--conf-thres", type=float, default=0.25, help="TF.js NMS: confidence threshold") parser.add_argument( - '--include', - nargs='+', - default=['torchscript'], - help='torchscript, onnx, openvino, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs, paddle') + "--include", + nargs="+", + default=["torchscript"], + help="torchscript, onnx, openvino, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs, paddle", + ) opt = parser.parse_known_args()[0] if known else parser.parse_args() print_args(vars(opt)) return opt def main(opt): - for opt.weights in (opt.weights if isinstance(opt.weights, list) else [opt.weights]): + for opt.weights in opt.weights if isinstance(opt.weights, list) else [opt.weights]: run(**vars(opt)) -if __name__ == '__main__': +if __name__ == "__main__": opt = parse_opt() main(opt) diff --git a/hubconf.py b/hubconf.py index f0192698fbe3..691d8eb64749 100644 --- a/hubconf.py +++ b/hubconf.py @@ -14,7 +14,8 @@ def _create(name, pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None): - """Creates or loads a YOLOv5 model + """ + Creates or loads a YOLOv5 model. Arguments: name (str): model name 'yolov5s' or path 'path/to/best.pt' @@ -39,9 +40,9 @@ def _create(name, pretrained=True, channels=3, classes=80, autoshape=True, verbo if not verbose: LOGGER.setLevel(logging.WARNING) - check_requirements(ROOT / 'requirements.txt', exclude=('opencv-python', 'tensorboard', 'thop')) + check_requirements(ROOT / "requirements.txt", exclude=("opencv-python", "tensorboard", "thop")) name = Path(name) - path = name.with_suffix('.pt') if name.suffix == '' and not name.is_dir() else name # checkpoint path + path = name.with_suffix(".pt") if name.suffix == "" and not name.is_dir() else name # checkpoint path try: device = select_device(device) if pretrained and channels == 3 and classes == 80: @@ -49,91 +50,95 @@ def _create(name, pretrained=True, channels=3, classes=80, autoshape=True, verbo model = DetectMultiBackend(path, device=device, fuse=autoshape) # detection model if autoshape: if model.pt and isinstance(model.model, ClassificationModel): - LOGGER.warning('WARNING ⚠️ YOLOv5 ClassificationModel is not yet AutoShape compatible. ' - 'You must pass torch tensors in BCHW to this model, i.e. shape(1,3,224,224).') + LOGGER.warning( + "WARNING ⚠️ YOLOv5 ClassificationModel is not yet AutoShape compatible. " + "You must pass torch tensors in BCHW to this model, i.e. shape(1,3,224,224)." + ) elif model.pt and isinstance(model.model, SegmentationModel): - LOGGER.warning('WARNING ⚠️ YOLOv5 SegmentationModel is not yet AutoShape compatible. ' - 'You will not be able to run inference with this model.') + LOGGER.warning( + "WARNING ⚠️ YOLOv5 SegmentationModel is not yet AutoShape compatible. " + "You will not be able to run inference with this model." + ) else: model = AutoShape(model) # for file/URI/PIL/cv2/np inputs and NMS except Exception: model = attempt_load(path, device=device, fuse=False) # arbitrary model else: - cfg = list((Path(__file__).parent / 'models').rglob(f'{path.stem}.yaml'))[0] # model.yaml path + cfg = list((Path(__file__).parent / "models").rglob(f"{path.stem}.yaml"))[0] # model.yaml path model = DetectionModel(cfg, channels, classes) # create model if pretrained: ckpt = torch.load(attempt_download(path), map_location=device) # load - csd = ckpt['model'].float().state_dict() # checkpoint state_dict as FP32 - csd = intersect_dicts(csd, model.state_dict(), exclude=['anchors']) # intersect + csd = ckpt["model"].float().state_dict() # checkpoint state_dict as FP32 + csd = intersect_dicts(csd, model.state_dict(), exclude=["anchors"]) # intersect model.load_state_dict(csd, strict=False) # load - if len(ckpt['model'].names) == classes: - model.names = ckpt['model'].names # set class names attribute + if len(ckpt["model"].names) == classes: + model.names = ckpt["model"].names # set class names attribute if not verbose: LOGGER.setLevel(logging.INFO) # reset to default return model.to(device) except Exception as e: - help_url = 'https://docs.ultralytics.com/yolov5/tutorials/pytorch_hub_model_loading' - s = f'{e}. Cache may be out of date, try `force_reload=True` or see {help_url} for help.' + help_url = "https://docs.ultralytics.com/yolov5/tutorials/pytorch_hub_model_loading" + s = f"{e}. Cache may be out of date, try `force_reload=True` or see {help_url} for help." raise Exception(s) from e -def custom(path='path/to/model.pt', autoshape=True, _verbose=True, device=None): +def custom(path="path/to/model.pt", autoshape=True, _verbose=True, device=None): # YOLOv5 custom or local model return _create(path, autoshape=autoshape, verbose=_verbose, device=device) def yolov5n(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device=None): # YOLOv5-nano model https://github.com/ultralytics/yolov5 - return _create('yolov5n', pretrained, channels, classes, autoshape, _verbose, device) + return _create("yolov5n", pretrained, channels, classes, autoshape, _verbose, device) def yolov5s(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device=None): # YOLOv5-small model https://github.com/ultralytics/yolov5 - return _create('yolov5s', pretrained, channels, classes, autoshape, _verbose, device) + return _create("yolov5s", pretrained, channels, classes, autoshape, _verbose, device) def yolov5m(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device=None): # YOLOv5-medium model https://github.com/ultralytics/yolov5 - return _create('yolov5m', pretrained, channels, classes, autoshape, _verbose, device) + return _create("yolov5m", pretrained, channels, classes, autoshape, _verbose, device) def yolov5l(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device=None): # YOLOv5-large model https://github.com/ultralytics/yolov5 - return _create('yolov5l', pretrained, channels, classes, autoshape, _verbose, device) + return _create("yolov5l", pretrained, channels, classes, autoshape, _verbose, device) def yolov5x(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device=None): # YOLOv5-xlarge model https://github.com/ultralytics/yolov5 - return _create('yolov5x', pretrained, channels, classes, autoshape, _verbose, device) + return _create("yolov5x", pretrained, channels, classes, autoshape, _verbose, device) def yolov5n6(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device=None): # YOLOv5-nano-P6 model https://github.com/ultralytics/yolov5 - return _create('yolov5n6', pretrained, channels, classes, autoshape, _verbose, device) + return _create("yolov5n6", pretrained, channels, classes, autoshape, _verbose, device) def yolov5s6(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device=None): # YOLOv5-small-P6 model https://github.com/ultralytics/yolov5 - return _create('yolov5s6', pretrained, channels, classes, autoshape, _verbose, device) + return _create("yolov5s6", pretrained, channels, classes, autoshape, _verbose, device) def yolov5m6(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device=None): # YOLOv5-medium-P6 model https://github.com/ultralytics/yolov5 - return _create('yolov5m6', pretrained, channels, classes, autoshape, _verbose, device) + return _create("yolov5m6", pretrained, channels, classes, autoshape, _verbose, device) def yolov5l6(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device=None): # YOLOv5-large-P6 model https://github.com/ultralytics/yolov5 - return _create('yolov5l6', pretrained, channels, classes, autoshape, _verbose, device) + return _create("yolov5l6", pretrained, channels, classes, autoshape, _verbose, device) def yolov5x6(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device=None): # YOLOv5-xlarge-P6 model https://github.com/ultralytics/yolov5 - return _create('yolov5x6', pretrained, channels, classes, autoshape, _verbose, device) + return _create("yolov5x6", pretrained, channels, classes, autoshape, _verbose, device) -if __name__ == '__main__': +if __name__ == "__main__": import argparse from pathlib import Path @@ -144,7 +149,7 @@ def yolov5x6(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=T # Argparser parser = argparse.ArgumentParser() - parser.add_argument('--model', type=str, default='yolov5s', help='model name') + parser.add_argument("--model", type=str, default="yolov5s", help="model name") opt = parser.parse_args() print_args(vars(opt)) @@ -154,12 +159,13 @@ def yolov5x6(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=T # Images imgs = [ - 'data/images/zidane.jpg', # filename - Path('data/images/zidane.jpg'), # Path - 'https://ultralytics.com/images/zidane.jpg', # URI - cv2.imread('data/images/bus.jpg')[:, :, ::-1], # OpenCV - Image.open('data/images/bus.jpg'), # PIL - np.zeros((320, 640, 3))] # numpy + "data/images/zidane.jpg", # filename + Path("data/images/zidane.jpg"), # Path + "https://ultralytics.com/images/zidane.jpg", # URI + cv2.imread("data/images/bus.jpg")[:, :, ::-1], # OpenCV + Image.open("data/images/bus.jpg"), # PIL + np.zeros((320, 640, 3)), + ] # numpy # Inference results = model(imgs, size=320) # batched inference diff --git a/models/common.py b/models/common.py index 75cc4e97bbc7..09e7560f4d84 100644 --- a/models/common.py +++ b/models/common.py @@ -1,7 +1,5 @@ # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license -""" -Common modules -""" +"""Common modules.""" import ast import contextlib @@ -28,20 +26,34 @@ try: import ultralytics - assert hasattr(ultralytics, '__version__') # verify package is not directory + assert hasattr(ultralytics, "__version__") # verify package is not directory except (ImportError, AssertionError): import os - os.system('pip install -U ultralytics') + os.system("pip install -U ultralytics") import ultralytics from ultralytics.utils.plotting import Annotator, colors, save_one_box from utils import TryExcept from utils.dataloaders import exif_transpose, letterbox -from utils.general import (LOGGER, ROOT, Profile, check_requirements, check_suffix, check_version, colorstr, - increment_path, is_jupyter, make_divisible, non_max_suppression, scale_boxes, xywh2xyxy, - xyxy2xywh, yaml_load) +from utils.general import ( + LOGGER, + ROOT, + Profile, + check_requirements, + check_suffix, + check_version, + colorstr, + increment_path, + is_jupyter, + make_divisible, + non_max_suppression, + scale_boxes, + xywh2xyxy, + xyxy2xywh, + yaml_load, +) from utils.torch_utils import copy_attr, smart_inference_mode @@ -223,7 +235,7 @@ def __init__(self, c1, c2, k=(5, 9, 13)): def forward(self, x): x = self.cv1(x) with warnings.catch_warnings(): - warnings.simplefilter('ignore') # suppress torch 1.9.0 max_pool2d() warning + warnings.simplefilter("ignore") # suppress torch 1.9.0 max_pool2d() warning return self.cv2(torch.cat([x] + [m(x) for m in self.m], 1)) @@ -239,7 +251,7 @@ def __init__(self, c1, c2, k=5): # equivalent to SPP(k=(5, 9, 13)) def forward(self, x): x = self.cv1(x) with warnings.catch_warnings(): - warnings.simplefilter('ignore') # suppress torch 1.9.0 max_pool2d() warning + warnings.simplefilter("ignore") # suppress torch 1.9.0 max_pool2d() warning y1 = self.m(x) y2 = self.m(y1) return self.cv2(torch.cat((x, y1, y2, self.m(y2)), 1)) @@ -278,9 +290,11 @@ def __init__(self, c1, c2, k=3, s=1): # ch_in, ch_out, kernel, stride self.conv = nn.Sequential( GhostConv(c1, c_, 1, 1), # pw DWConv(c_, c_, k, s, act=False) if s == 2 else nn.Identity(), # dw - GhostConv(c_, c2, 1, 1, act=False)) # pw-linear - self.shortcut = nn.Sequential(DWConv(c1, c1, k, s, act=False), Conv(c1, c2, 1, 1, - act=False)) if s == 2 else nn.Identity() + GhostConv(c_, c2, 1, 1, act=False), + ) # pw-linear + self.shortcut = ( + nn.Sequential(DWConv(c1, c1, k, s, act=False), Conv(c1, c2, 1, 1, act=False)) if s == 2 else nn.Identity() + ) def forward(self, x): return self.conv(x) + self.shortcut(x) @@ -309,9 +323,9 @@ def __init__(self, gain=2): def forward(self, x): b, c, h, w = x.size() # assert C / s ** 2 == 0, 'Indivisible gain' s = self.gain - x = x.view(b, s, s, c // s ** 2, h, w) # x(1,2,2,16,80,80) + x = x.view(b, s, s, c // s**2, h, w) # x(1,2,2,16,80,80) x = x.permute(0, 3, 4, 1, 5, 2).contiguous() # x(1,16,80,2,80,2) - return x.view(b, c // s ** 2, h * s, w * s) # x(1,16,160,160) + return x.view(b, c // s**2, h * s, w * s) # x(1,16,160,160) class Concat(nn.Module): @@ -326,7 +340,7 @@ def forward(self, x): class DetectMultiBackend(nn.Module): # YOLOv5 MultiBackend class for python inference on various backends - def __init__(self, weights='yolov5s.pt', device=torch.device('cpu'), dnn=False, data=None, fp16=False, fuse=True): + def __init__(self, weights="yolov5s.pt", device=torch.device("cpu"), dnn=False, data=None, fp16=False, fuse=True): # Usage: # PyTorch: weights = *.pt # TorchScript: *.torchscript @@ -348,65 +362,68 @@ def __init__(self, weights='yolov5s.pt', device=torch.device('cpu'), dnn=False, fp16 &= pt or jit or onnx or engine or triton # FP16 nhwc = coreml or saved_model or pb or tflite or edgetpu # BHWC formats (vs torch BCWH) stride = 32 # default stride - cuda = torch.cuda.is_available() and device.type != 'cpu' # use CUDA + cuda = torch.cuda.is_available() and device.type != "cpu" # use CUDA if not (pt or triton): w = attempt_download(w) # download if not local if pt: # PyTorch model = attempt_load(weights if isinstance(weights, list) else w, device=device, inplace=True, fuse=fuse) stride = max(int(model.stride.max()), 32) # model stride - names = model.module.names if hasattr(model, 'module') else model.names # get class names + names = model.module.names if hasattr(model, "module") else model.names # get class names model.half() if fp16 else model.float() self.model = model # explicitly assign for to(), cpu(), cuda(), half() elif jit: # TorchScript - LOGGER.info(f'Loading {w} for TorchScript inference...') - extra_files = {'config.txt': ''} # model metadata + LOGGER.info(f"Loading {w} for TorchScript inference...") + extra_files = {"config.txt": ""} # model metadata model = torch.jit.load(w, _extra_files=extra_files, map_location=device) model.half() if fp16 else model.float() - if extra_files['config.txt']: # load metadata dict - d = json.loads(extra_files['config.txt'], - object_hook=lambda d: { - int(k) if k.isdigit() else k: v - for k, v in d.items()}) - stride, names = int(d['stride']), d['names'] + if extra_files["config.txt"]: # load metadata dict + d = json.loads( + extra_files["config.txt"], + object_hook=lambda d: {int(k) if k.isdigit() else k: v for k, v in d.items()}, + ) + stride, names = int(d["stride"]), d["names"] elif dnn: # ONNX OpenCV DNN - LOGGER.info(f'Loading {w} for ONNX OpenCV DNN inference...') - check_requirements('opencv-python>=4.5.4') + LOGGER.info(f"Loading {w} for ONNX OpenCV DNN inference...") + check_requirements("opencv-python>=4.5.4") net = cv2.dnn.readNetFromONNX(w) elif onnx: # ONNX Runtime - LOGGER.info(f'Loading {w} for ONNX Runtime inference...') - check_requirements(('onnx', 'onnxruntime-gpu' if cuda else 'onnxruntime')) + LOGGER.info(f"Loading {w} for ONNX Runtime inference...") + check_requirements(("onnx", "onnxruntime-gpu" if cuda else "onnxruntime")) import onnxruntime - providers = ['CUDAExecutionProvider', 'CPUExecutionProvider'] if cuda else ['CPUExecutionProvider'] + + providers = ["CUDAExecutionProvider", "CPUExecutionProvider"] if cuda else ["CPUExecutionProvider"] session = onnxruntime.InferenceSession(w, providers=providers) output_names = [x.name for x in session.get_outputs()] meta = session.get_modelmeta().custom_metadata_map # metadata - if 'stride' in meta: - stride, names = int(meta['stride']), eval(meta['names']) + if "stride" in meta: + stride, names = int(meta["stride"]), eval(meta["names"]) elif xml: # OpenVINO - LOGGER.info(f'Loading {w} for OpenVINO inference...') - check_requirements('openvino>=2023.0') # requires openvino-dev: https://pypi.org/project/openvino-dev/ + LOGGER.info(f"Loading {w} for OpenVINO inference...") + check_requirements("openvino>=2023.0") # requires openvino-dev: https://pypi.org/project/openvino-dev/ from openvino.runtime import Core, Layout, get_batch + core = Core() if not Path(w).is_file(): # if not *.xml - w = next(Path(w).glob('*.xml')) # get *.xml file from *_openvino_model dir - ov_model = core.read_model(model=w, weights=Path(w).with_suffix('.bin')) + w = next(Path(w).glob("*.xml")) # get *.xml file from *_openvino_model dir + ov_model = core.read_model(model=w, weights=Path(w).with_suffix(".bin")) if ov_model.get_parameters()[0].get_layout().empty: - ov_model.get_parameters()[0].set_layout(Layout('NCHW')) + ov_model.get_parameters()[0].set_layout(Layout("NCHW")) batch_dim = get_batch(ov_model) if batch_dim.is_static: batch_size = batch_dim.get_length() - ov_compiled_model = core.compile_model(ov_model, device_name='AUTO') # AUTO selects best available device - stride, names = self._load_metadata(Path(w).with_suffix('.yaml')) # load metadata + ov_compiled_model = core.compile_model(ov_model, device_name="AUTO") # AUTO selects best available device + stride, names = self._load_metadata(Path(w).with_suffix(".yaml")) # load metadata elif engine: # TensorRT - LOGGER.info(f'Loading {w} for TensorRT inference...') + LOGGER.info(f"Loading {w} for TensorRT inference...") import tensorrt as trt # https://developer.nvidia.com/nvidia-tensorrt-download - check_version(trt.__version__, '7.0.0', hard=True) # require tensorrt>=7.0.0 - if device.type == 'cpu': - device = torch.device('cuda:0') - Binding = namedtuple('Binding', ('name', 'dtype', 'shape', 'data', 'ptr')) + + check_version(trt.__version__, "7.0.0", hard=True) # require tensorrt>=7.0.0 + if device.type == "cpu": + device = torch.device("cuda:0") + Binding = namedtuple("Binding", ("name", "dtype", "shape", "data", "ptr")) logger = trt.Logger(trt.Logger.INFO) - with open(w, 'rb') as f, trt.Runtime(logger) as runtime: + with open(w, "rb") as f, trt.Runtime(logger) as runtime: model = runtime.deserialize_cuda_engine(f.read()) context = model.create_execution_context() bindings = OrderedDict() @@ -428,22 +445,24 @@ def __init__(self, weights='yolov5s.pt', device=torch.device('cpu'), dnn=False, im = torch.from_numpy(np.empty(shape, dtype=dtype)).to(device) bindings[name] = Binding(name, dtype, shape, im, int(im.data_ptr())) binding_addrs = OrderedDict((n, d.ptr) for n, d in bindings.items()) - batch_size = bindings['images'].shape[0] # if dynamic, this is instead max batch size + batch_size = bindings["images"].shape[0] # if dynamic, this is instead max batch size elif coreml: # CoreML - LOGGER.info(f'Loading {w} for CoreML inference...') + LOGGER.info(f"Loading {w} for CoreML inference...") import coremltools as ct + model = ct.models.MLModel(w) elif saved_model: # TF SavedModel - LOGGER.info(f'Loading {w} for TensorFlow SavedModel inference...') + LOGGER.info(f"Loading {w} for TensorFlow SavedModel inference...") import tensorflow as tf + keras = False # assume TF1 saved_model model = tf.keras.models.load_model(w) if keras else tf.saved_model.load(w) elif pb: # GraphDef https://www.tensorflow.org/guide/migrate#a_graphpb_or_graphpbtxt - LOGGER.info(f'Loading {w} for TensorFlow GraphDef inference...') + LOGGER.info(f"Loading {w} for TensorFlow GraphDef inference...") import tensorflow as tf def wrap_frozen_graph(gd, inputs, outputs): - x = tf.compat.v1.wrap_function(lambda: tf.compat.v1.import_graph_def(gd, name=''), []) # wrapped + x = tf.compat.v1.wrap_function(lambda: tf.compat.v1.import_graph_def(gd, name=""), []) # wrapped ge = x.graph.as_graph_element return x.prune(tf.nest.map_structure(ge, inputs), tf.nest.map_structure(ge, outputs)) @@ -452,46 +471,50 @@ def gd_outputs(gd): for node in gd.node: # tensorflow.core.framework.node_def_pb2.NodeDef name_list.append(node.name) input_list.extend(node.input) - return sorted(f'{x}:0' for x in list(set(name_list) - set(input_list)) if not x.startswith('NoOp')) + return sorted(f"{x}:0" for x in list(set(name_list) - set(input_list)) if not x.startswith("NoOp")) gd = tf.Graph().as_graph_def() # TF GraphDef - with open(w, 'rb') as f: + with open(w, "rb") as f: gd.ParseFromString(f.read()) - frozen_func = wrap_frozen_graph(gd, inputs='x:0', outputs=gd_outputs(gd)) + frozen_func = wrap_frozen_graph(gd, inputs="x:0", outputs=gd_outputs(gd)) elif tflite or edgetpu: # https://www.tensorflow.org/lite/guide/python#install_tensorflow_lite_for_python try: # https://coral.ai/docs/edgetpu/tflite-python/#update-existing-tf-lite-code-for-the-edge-tpu from tflite_runtime.interpreter import Interpreter, load_delegate except ImportError: import tensorflow as tf - Interpreter, load_delegate = tf.lite.Interpreter, tf.lite.experimental.load_delegate, + + Interpreter, load_delegate = ( + tf.lite.Interpreter, + tf.lite.experimental.load_delegate, + ) if edgetpu: # TF Edge TPU https://coral.ai/software/#edgetpu-runtime - LOGGER.info(f'Loading {w} for TensorFlow Lite Edge TPU inference...') - delegate = { - 'Linux': 'libedgetpu.so.1', - 'Darwin': 'libedgetpu.1.dylib', - 'Windows': 'edgetpu.dll'}[platform.system()] + LOGGER.info(f"Loading {w} for TensorFlow Lite Edge TPU inference...") + delegate = {"Linux": "libedgetpu.so.1", "Darwin": "libedgetpu.1.dylib", "Windows": "edgetpu.dll"}[ + platform.system() + ] interpreter = Interpreter(model_path=w, experimental_delegates=[load_delegate(delegate)]) else: # TFLite - LOGGER.info(f'Loading {w} for TensorFlow Lite inference...') + LOGGER.info(f"Loading {w} for TensorFlow Lite inference...") interpreter = Interpreter(model_path=w) # load TFLite model interpreter.allocate_tensors() # allocate input_details = interpreter.get_input_details() # inputs output_details = interpreter.get_output_details() # outputs # load metadata with contextlib.suppress(zipfile.BadZipFile): - with zipfile.ZipFile(w, 'r') as model: + with zipfile.ZipFile(w, "r") as model: meta_file = model.namelist()[0] - meta = ast.literal_eval(model.read(meta_file).decode('utf-8')) - stride, names = int(meta['stride']), meta['names'] + meta = ast.literal_eval(model.read(meta_file).decode("utf-8")) + stride, names = int(meta["stride"]), meta["names"] elif tfjs: # TF.js - raise NotImplementedError('ERROR: YOLOv5 TF.js inference is not supported') + raise NotImplementedError("ERROR: YOLOv5 TF.js inference is not supported") elif paddle: # PaddlePaddle - LOGGER.info(f'Loading {w} for PaddlePaddle inference...') - check_requirements('paddlepaddle-gpu' if cuda else 'paddlepaddle') + LOGGER.info(f"Loading {w} for PaddlePaddle inference...") + check_requirements("paddlepaddle-gpu" if cuda else "paddlepaddle") import paddle.inference as pdi + if not Path(w).is_file(): # if not *.pdmodel - w = next(Path(w).rglob('*.pdmodel')) # get *.pdmodel file from *_paddle_model dir - weights = Path(w).with_suffix('.pdiparams') + w = next(Path(w).rglob("*.pdmodel")) # get *.pdmodel file from *_paddle_model dir + weights = Path(w).with_suffix(".pdiparams") config = pdi.Config(str(w), str(weights)) if cuda: config.enable_use_gpu(memory_pool_init_size_mb=2048, device_id=0) @@ -499,19 +522,20 @@ def gd_outputs(gd): input_handle = predictor.get_input_handle(predictor.get_input_names()[0]) output_names = predictor.get_output_names() elif triton: # NVIDIA Triton Inference Server - LOGGER.info(f'Using {w} as Triton Inference Server...') - check_requirements('tritonclient[all]') + LOGGER.info(f"Using {w} as Triton Inference Server...") + check_requirements("tritonclient[all]") from utils.triton import TritonRemoteModel + model = TritonRemoteModel(url=w) - nhwc = model.runtime.startswith('tensorflow') + nhwc = model.runtime.startswith("tensorflow") else: - raise NotImplementedError(f'ERROR: {w} is not a supported format') + raise NotImplementedError(f"ERROR: {w} is not a supported format") # class names - if 'names' not in locals(): - names = yaml_load(data)['names'] if data else {i: f'class{i}' for i in range(999)} - if names[0] == 'n01440764' and len(names) == 1000: # ImageNet - names = yaml_load(ROOT / 'data/ImageNet.yaml')['names'] # human-readable names + if "names" not in locals(): + names = yaml_load(data)["names"] if data else {i: f"class{i}" for i in range(999)} + if names[0] == "n01440764" and len(names) == 1000: # ImageNet + names = yaml_load(ROOT / "data/ImageNet.yaml")["names"] # human-readable names self.__dict__.update(locals()) # assign all variables to self @@ -538,26 +562,26 @@ def forward(self, im, augment=False, visualize=False): im = im.cpu().numpy() # FP32 y = list(self.ov_compiled_model(im).values()) elif self.engine: # TensorRT - if self.dynamic and im.shape != self.bindings['images'].shape: - i = self.model.get_binding_index('images') + if self.dynamic and im.shape != self.bindings["images"].shape: + i = self.model.get_binding_index("images") self.context.set_binding_shape(i, im.shape) # reshape if dynamic - self.bindings['images'] = self.bindings['images']._replace(shape=im.shape) + self.bindings["images"] = self.bindings["images"]._replace(shape=im.shape) for name in self.output_names: i = self.model.get_binding_index(name) self.bindings[name].data.resize_(tuple(self.context.get_binding_shape(i))) - s = self.bindings['images'].shape + s = self.bindings["images"].shape assert im.shape == s, f"input size {im.shape} {'>' if self.dynamic else 'not equal to'} max model size {s}" - self.binding_addrs['images'] = int(im.data_ptr()) + self.binding_addrs["images"] = int(im.data_ptr()) self.context.execute_v2(list(self.binding_addrs.values())) y = [self.bindings[x].data for x in sorted(self.output_names)] elif self.coreml: # CoreML im = im.cpu().numpy() - im = Image.fromarray((im[0] * 255).astype('uint8')) + im = Image.fromarray((im[0] * 255).astype("uint8")) # im = im.resize((192, 320), Image.BILINEAR) - y = self.model.predict({'image': im}) # coordinates are xywh normalized - if 'confidence' in y: - box = xywh2xyxy(y['coordinates'] * [[w, h, w, h]]) # xyxy pixels - conf, cls = y['confidence'].max(1), y['confidence'].argmax(1).astype(np.float) + y = self.model.predict({"image": im}) # coordinates are xywh normalized + if "confidence" in y: + box = xywh2xyxy(y["coordinates"] * [[w, h, w, h]]) # xyxy pixels + conf, cls = y["confidence"].max(1), y["confidence"].argmax(1).astype(np.float) y = np.concatenate((box, conf.reshape(-1, 1), cls.reshape(-1, 1)), 1) else: y = list(reversed(y.values())) # reversed for segmentation models (pred, proto) @@ -576,17 +600,17 @@ def forward(self, im, augment=False, visualize=False): y = self.frozen_func(x=self.tf.constant(im)) else: # Lite or Edge TPU input = self.input_details[0] - int8 = input['dtype'] == np.uint8 # is TFLite quantized uint8 model + int8 = input["dtype"] == np.uint8 # is TFLite quantized uint8 model if int8: - scale, zero_point = input['quantization'] + scale, zero_point = input["quantization"] im = (im / scale + zero_point).astype(np.uint8) # de-scale - self.interpreter.set_tensor(input['index'], im) + self.interpreter.set_tensor(input["index"], im) self.interpreter.invoke() y = [] for output in self.output_details: - x = self.interpreter.get_tensor(output['index']) + x = self.interpreter.get_tensor(output["index"]) if int8: - scale, zero_point = output['quantization'] + scale, zero_point = output["quantization"] x = (x.astype(np.float32) - zero_point) * scale # re-scale y.append(x) y = [x if isinstance(x, np.ndarray) else x.numpy() for x in y] @@ -603,32 +627,33 @@ def from_numpy(self, x): def warmup(self, imgsz=(1, 3, 640, 640)): # Warmup model by running inference once warmup_types = self.pt, self.jit, self.onnx, self.engine, self.saved_model, self.pb, self.triton - if any(warmup_types) and (self.device.type != 'cpu' or self.triton): + if any(warmup_types) and (self.device.type != "cpu" or self.triton): im = torch.empty(*imgsz, dtype=torch.half if self.fp16 else torch.float, device=self.device) # input for _ in range(2 if self.jit else 1): # self.forward(im) # warmup @staticmethod - def _model_type(p='path/to/model.pt'): + def _model_type(p="path/to/model.pt"): # Return model type from model path, i.e. path='path/to/model.onnx' -> type=onnx # types = [pt, jit, onnx, xml, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs, paddle] from export import export_formats from utils.downloads import is_url + sf = list(export_formats().Suffix) # export suffixes if not is_url(p, check=False): check_suffix(p, sf) # checks url = urlparse(p) # if url may be Triton inference server types = [s in Path(p).name for s in sf] types[8] &= not types[9] # tflite &= not edgetpu - triton = not any(types) and all([any(s in url.scheme for s in ['http', 'grpc']), url.netloc]) + triton = not any(types) and all([any(s in url.scheme for s in ["http", "grpc"]), url.netloc]) return types + [triton] @staticmethod - def _load_metadata(f=Path('path/to/meta.yaml')): + def _load_metadata(f=Path("path/to/meta.yaml")): # Load metadata from meta.yaml if it exists if f.exists(): d = yaml_load(f) - return d['stride'], d['names'] # assign stride, names + return d["stride"], d["names"] # assign stride, names return None, None @@ -645,8 +670,8 @@ class AutoShape(nn.Module): def __init__(self, model, verbose=True): super().__init__() if verbose: - LOGGER.info('Adding AutoShape... ') - copy_attr(self, model, include=('yaml', 'nc', 'hyp', 'names', 'stride', 'abc'), exclude=()) # copy attributes + LOGGER.info("Adding AutoShape... ") + copy_attr(self, model, include=("yaml", "nc", "hyp", "names", "stride", "abc"), exclude=()) # copy attributes self.dmb = isinstance(model, DetectMultiBackend) # DetectMultiBackend() instance self.pt = not self.dmb or model.pt # PyTorch model self.model = model.eval() @@ -682,7 +707,7 @@ def forward(self, ims, size=640, augment=False, profile=False): if isinstance(size, int): # expand size = (size, size) p = next(self.model.parameters()) if self.pt else torch.empty(1, device=self.model.device) # param - autocast = self.amp and (p.device.type != 'cpu') # Automatic Mixed Precision (AMP) inference + autocast = self.amp and (p.device.type != "cpu") # Automatic Mixed Precision (AMP) inference if isinstance(ims, torch.Tensor): # torch with amp.autocast(autocast): return self.model(ims.to(p.device).type_as(p), augment=augment) # inference @@ -691,13 +716,13 @@ def forward(self, ims, size=640, augment=False, profile=False): n, ims = (len(ims), list(ims)) if isinstance(ims, (list, tuple)) else (1, [ims]) # number, list of images shape0, shape1, files = [], [], [] # image and inference shapes, filenames for i, im in enumerate(ims): - f = f'image{i}' # filename + f = f"image{i}" # filename if isinstance(im, (str, Path)): # filename or uri - im, f = Image.open(requests.get(im, stream=True).raw if str(im).startswith('http') else im), im + im, f = Image.open(requests.get(im, stream=True).raw if str(im).startswith("http") else im), im im = np.asarray(exif_transpose(im)) elif isinstance(im, Image.Image): # PIL Image - im, f = np.asarray(exif_transpose(im)), getattr(im, 'filename', f) or f - files.append(Path(f).with_suffix('.jpg').name) + im, f = np.asarray(exif_transpose(im)), getattr(im, "filename", f) or f + files.append(Path(f).with_suffix(".jpg").name) if im.shape[0] < 5: # image in CHW im = im.transpose((1, 2, 0)) # reverse dataloader .transpose(2, 0, 1) im = im[..., :3] if im.ndim == 3 else cv2.cvtColor(im, cv2.COLOR_GRAY2BGR) # enforce 3ch input @@ -718,13 +743,15 @@ def forward(self, ims, size=640, augment=False, profile=False): # Post-process with dt[2]: - y = non_max_suppression(y if self.dmb else y[0], - self.conf, - self.iou, - self.classes, - self.agnostic, - self.multi_label, - max_det=self.max_det) # NMS + y = non_max_suppression( + y if self.dmb else y[0], + self.conf, + self.iou, + self.classes, + self.agnostic, + self.multi_label, + max_det=self.max_det, + ) # NMS for i in range(n): scale_boxes(shape1, y[i][:, :4], shape0[i]) @@ -747,40 +774,44 @@ def __init__(self, ims, pred, files, times=(0, 0, 0), names=None, shape=None): self.xyxyn = [x / g for x, g in zip(self.xyxy, gn)] # xyxy normalized self.xywhn = [x / g for x, g in zip(self.xywh, gn)] # xywh normalized self.n = len(self.pred) # number of images (batch size) - self.t = tuple(x.t / self.n * 1E3 for x in times) # timestamps (ms) + self.t = tuple(x.t / self.n * 1e3 for x in times) # timestamps (ms) self.s = tuple(shape) # inference BCHW shape - def _run(self, pprint=False, show=False, save=False, crop=False, render=False, labels=True, save_dir=Path('')): - s, crops = '', [] + def _run(self, pprint=False, show=False, save=False, crop=False, render=False, labels=True, save_dir=Path("")): + s, crops = "", [] for i, (im, pred) in enumerate(zip(self.ims, self.pred)): - s += f'\nimage {i + 1}/{len(self.pred)}: {im.shape[0]}x{im.shape[1]} ' # string + s += f"\nimage {i + 1}/{len(self.pred)}: {im.shape[0]}x{im.shape[1]} " # string if pred.shape[0]: for c in pred[:, -1].unique(): n = (pred[:, -1] == c).sum() # detections per class s += f"{n} {self.names[int(c)]}{'s' * (n > 1)}, " # add to string - s = s.rstrip(', ') + s = s.rstrip(", ") if show or save or render or crop: annotator = Annotator(im, example=str(self.names)) for *box, conf, cls in reversed(pred): # xyxy, confidence, class - label = f'{self.names[int(cls)]} {conf:.2f}' + label = f"{self.names[int(cls)]} {conf:.2f}" if crop: - file = save_dir / 'crops' / self.names[int(cls)] / self.files[i] if save else None - crops.append({ - 'box': box, - 'conf': conf, - 'cls': cls, - 'label': label, - 'im': save_one_box(box, im, file=file, save=save)}) + file = save_dir / "crops" / self.names[int(cls)] / self.files[i] if save else None + crops.append( + { + "box": box, + "conf": conf, + "cls": cls, + "label": label, + "im": save_one_box(box, im, file=file, save=save), + } + ) else: # all others - annotator.box_label(box, label if labels else '', color=colors(cls)) + annotator.box_label(box, label if labels else "", color=colors(cls)) im = annotator.im else: - s += '(no detections)' + s += "(no detections)" im = Image.fromarray(im.astype(np.uint8)) if isinstance(im, np.ndarray) else im # from np if show: if is_jupyter(): from IPython.display import display + display(im) else: im.show(self.files[i]) @@ -792,22 +823,22 @@ def _run(self, pprint=False, show=False, save=False, crop=False, render=False, l if render: self.ims[i] = np.asarray(im) if pprint: - s = s.lstrip('\n') - return f'{s}\nSpeed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {self.s}' % self.t + s = s.lstrip("\n") + return f"{s}\nSpeed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {self.s}" % self.t if crop: if save: - LOGGER.info(f'Saved results to {save_dir}\n') + LOGGER.info(f"Saved results to {save_dir}\n") return crops - @TryExcept('Showing images is not supported in this environment') + @TryExcept("Showing images is not supported in this environment") def show(self, labels=True): self._run(show=True, labels=labels) # show results - def save(self, labels=True, save_dir='runs/detect/exp', exist_ok=False): + def save(self, labels=True, save_dir="runs/detect/exp", exist_ok=False): save_dir = increment_path(save_dir, exist_ok, mkdir=True) # increment save_dir self._run(save=True, labels=labels, save_dir=save_dir) # save results - def crop(self, save=True, save_dir='runs/detect/exp', exist_ok=False): + def crop(self, save=True, save_dir="runs/detect/exp", exist_ok=False): save_dir = increment_path(save_dir, exist_ok, mkdir=True) if save else None return self._run(crop=True, save=save, save_dir=save_dir) # crop results @@ -818,9 +849,9 @@ def render(self, labels=True): def pandas(self): # return detections as pandas DataFrames, i.e. print(results.pandas().xyxy[0]) new = copy(self) # return copy - ca = 'xmin', 'ymin', 'xmax', 'ymax', 'confidence', 'class', 'name' # xyxy columns - cb = 'xcenter', 'ycenter', 'width', 'height', 'confidence', 'class', 'name' # xywh columns - for k, c in zip(['xyxy', 'xyxyn', 'xywh', 'xywhn'], [ca, ca, cb, cb]): + ca = "xmin", "ymin", "xmax", "ymax", "confidence", "class", "name" # xyxy columns + cb = "xcenter", "ycenter", "width", "height", "confidence", "class", "name" # xywh columns + for k, c in zip(["xyxy", "xyxyn", "xywh", "xywhn"], [ca, ca, cb, cb]): a = [[x[:5] + [int(x[5]), self.names[int(x[5])]] for x in x.tolist()] for x in getattr(self, k)] # update setattr(new, k, [pd.DataFrame(x, columns=c) for x in a]) return new @@ -844,7 +875,7 @@ def __str__(self): # override print(results) return self._run(pprint=True) # print results def __repr__(self): - return f'YOLOv5 {self.__class__} instance\n' + self.__str__() + return f"YOLOv5 {self.__class__} instance\n" + self.__str__() class Proto(nn.Module): @@ -852,7 +883,7 @@ class Proto(nn.Module): def __init__(self, c1, c_=256, c2=32): # ch_in, number of protos, number of masks super().__init__() self.cv1 = Conv(c1, c_, k=3) - self.upsample = nn.Upsample(scale_factor=2, mode='nearest') + self.upsample = nn.Upsample(scale_factor=2, mode="nearest") self.cv2 = Conv(c_, c_, k=3) self.cv3 = Conv(c_, c2) @@ -862,14 +893,9 @@ def forward(self, x): class Classify(nn.Module): # YOLOv5 classification head, i.e. x(b,c1,20,20) to x(b,c2) - def __init__(self, - c1, - c2, - k=1, - s=1, - p=None, - g=1, - dropout_p=0.0): # ch_in, ch_out, kernel, stride, padding, groups, dropout probability + def __init__( + self, c1, c2, k=1, s=1, p=None, g=1, dropout_p=0.0 + ): # ch_in, ch_out, kernel, stride, padding, groups, dropout probability super().__init__() c_ = 1280 # efficientnet_b0 size self.conv = Conv(c1, c_, k, s, autopad(k, p), g) diff --git a/models/experimental.py b/models/experimental.py index 11f75e2254b3..c242364bdec5 100644 --- a/models/experimental.py +++ b/models/experimental.py @@ -1,7 +1,5 @@ # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license -""" -Experimental modules -""" +"""Experimental modules.""" import math import numpy as np @@ -38,7 +36,7 @@ def __init__(self, c1, c2, k=(1, 3), s=1, equal_ch=True): # ch_in, ch_out, kern super().__init__() n = len(k) # number of convolutions if equal_ch: # equal c_ per group - i = torch.linspace(0, n - 1E-6, c2).floor() # c2 indices + i = torch.linspace(0, n - 1e-6, c2).floor() # c2 indices c_ = [(i == g).sum() for g in range(n)] # intermediate channels else: # equal weight.numel() per group b = [c2] + [0] * n @@ -48,8 +46,9 @@ def __init__(self, c1, c2, k=(1, 3), s=1, equal_ch=True): # ch_in, ch_out, kern a[0] = 1 c_ = np.linalg.lstsq(a, b, rcond=None)[0].round() # solve for equal weight indices, ax = b - self.m = nn.ModuleList([ - nn.Conv2d(c1, int(c_), k, s, k // 2, groups=math.gcd(c1, int(c_)), bias=False) for k, c_ in zip(k, c_)]) + self.m = nn.ModuleList( + [nn.Conv2d(c1, int(c_), k, s, k // 2, groups=math.gcd(c1, int(c_)), bias=False) for k, c_ in zip(k, c_)] + ) self.bn = nn.BatchNorm2d(c2) self.act = nn.SiLU() @@ -76,16 +75,16 @@ def attempt_load(weights, device=None, inplace=True, fuse=True): model = Ensemble() for w in weights if isinstance(weights, list) else [weights]: - ckpt = torch.load(attempt_download(w), map_location='cpu') # load - ckpt = (ckpt.get('ema') or ckpt['model']).to(device).float() # FP32 model + ckpt = torch.load(attempt_download(w), map_location="cpu") # load + ckpt = (ckpt.get("ema") or ckpt["model"]).to(device).float() # FP32 model # Model compatibility updates - if not hasattr(ckpt, 'stride'): - ckpt.stride = torch.tensor([32.]) - if hasattr(ckpt, 'names') and isinstance(ckpt.names, (list, tuple)): + if not hasattr(ckpt, "stride"): + ckpt.stride = torch.tensor([32.0]) + if hasattr(ckpt, "names") and isinstance(ckpt.names, (list, tuple)): ckpt.names = dict(enumerate(ckpt.names)) # convert to dict - model.append(ckpt.fuse().eval() if fuse and hasattr(ckpt, 'fuse') else ckpt.eval()) # model in eval mode + model.append(ckpt.fuse().eval() if fuse and hasattr(ckpt, "fuse") else ckpt.eval()) # model in eval mode # Module updates for m in model.modules(): @@ -93,9 +92,9 @@ def attempt_load(weights, device=None, inplace=True, fuse=True): if t in (nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6, nn.SiLU, Detect, Model): m.inplace = inplace if t is Detect and not isinstance(m.anchor_grid, list): - delattr(m, 'anchor_grid') - setattr(m, 'anchor_grid', [torch.zeros(1)] * m.nl) - elif t is nn.Upsample and not hasattr(m, 'recompute_scale_factor'): + delattr(m, "anchor_grid") + setattr(m, "anchor_grid", [torch.zeros(1)] * m.nl) + elif t is nn.Upsample and not hasattr(m, "recompute_scale_factor"): m.recompute_scale_factor = None # torch 1.11.0 compatibility # Return model @@ -103,9 +102,9 @@ def attempt_load(weights, device=None, inplace=True, fuse=True): return model[-1] # Return detection ensemble - print(f'Ensemble created with {weights}\n') - for k in 'names', 'nc', 'yaml': + print(f"Ensemble created with {weights}\n") + for k in "names", "nc", "yaml": setattr(model, k, getattr(model[0], k)) model.stride = model[torch.argmax(torch.tensor([m.stride.max() for m in model])).int()].stride # max stride - assert all(model[0].nc == m.nc for m in model), f'Models have different class counts: {[m.nc for m in model]}' + assert all(model[0].nc == m.nc for m in model), f"Models have different class counts: {[m.nc for m in model]}" return model diff --git a/models/tf.py b/models/tf.py index 17cca1e54fcf..53520b52c086 100644 --- a/models/tf.py +++ b/models/tf.py @@ -27,8 +27,21 @@ import torch.nn as nn from tensorflow import keras -from models.common import (C3, SPP, SPPF, Bottleneck, BottleneckCSP, C3x, Concat, Conv, CrossConv, DWConv, - DWConvTranspose2d, Focus, autopad) +from models.common import ( + C3, + SPP, + SPPF, + Bottleneck, + BottleneckCSP, + C3x, + Concat, + Conv, + CrossConv, + DWConv, + DWConvTranspose2d, + Focus, + autopad, +) from models.experimental import MixConv2d, attempt_load from models.yolo import Detect, Segment from utils.activations import SiLU @@ -44,7 +57,8 @@ def __init__(self, w=None): gamma_initializer=keras.initializers.Constant(w.weight.numpy()), moving_mean_initializer=keras.initializers.Constant(w.running_mean.numpy()), moving_variance_initializer=keras.initializers.Constant(w.running_var.numpy()), - epsilon=w.eps) + epsilon=w.eps, + ) def call(self, inputs): return self.bn(inputs) @@ -60,7 +74,7 @@ def __init__(self, pad): self.pad = tf.constant([[0, 0], [pad[0], pad[0]], [pad[1], pad[1]], [0, 0]]) def call(self, inputs): - return tf.pad(inputs, self.pad, mode='constant', constant_values=0) + return tf.pad(inputs, self.pad, mode="constant", constant_values=0) class TFConv(keras.layers.Layer): @@ -75,12 +89,13 @@ def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True, w=None): filters=c2, kernel_size=k, strides=s, - padding='SAME' if s == 1 else 'VALID', - use_bias=not hasattr(w, 'bn'), + padding="SAME" if s == 1 else "VALID", + use_bias=not hasattr(w, "bn"), kernel_initializer=keras.initializers.Constant(w.conv.weight.permute(2, 3, 1, 0).numpy()), - bias_initializer='zeros' if hasattr(w, 'bn') else keras.initializers.Constant(w.conv.bias.numpy())) + bias_initializer="zeros" if hasattr(w, "bn") else keras.initializers.Constant(w.conv.bias.numpy()), + ) self.conv = conv if s == 1 else keras.Sequential([TFPad(autopad(k, p)), conv]) - self.bn = TFBN(w.bn) if hasattr(w, 'bn') else tf.identity + self.bn = TFBN(w.bn) if hasattr(w, "bn") else tf.identity self.act = activations(w.act) if act else tf.identity def call(self, inputs): @@ -92,17 +107,18 @@ class TFDWConv(keras.layers.Layer): def __init__(self, c1, c2, k=1, s=1, p=None, act=True, w=None): # ch_in, ch_out, weights, kernel, stride, padding, groups super().__init__() - assert c2 % c1 == 0, f'TFDWConv() output={c2} must be a multiple of input={c1} channels' + assert c2 % c1 == 0, f"TFDWConv() output={c2} must be a multiple of input={c1} channels" conv = keras.layers.DepthwiseConv2D( kernel_size=k, depth_multiplier=c2 // c1, strides=s, - padding='SAME' if s == 1 else 'VALID', - use_bias=not hasattr(w, 'bn'), + padding="SAME" if s == 1 else "VALID", + use_bias=not hasattr(w, "bn"), depthwise_initializer=keras.initializers.Constant(w.conv.weight.permute(2, 3, 1, 0).numpy()), - bias_initializer='zeros' if hasattr(w, 'bn') else keras.initializers.Constant(w.conv.bias.numpy())) + bias_initializer="zeros" if hasattr(w, "bn") else keras.initializers.Constant(w.conv.bias.numpy()), + ) self.conv = conv if s == 1 else keras.Sequential([TFPad(autopad(k, p)), conv]) - self.bn = TFBN(w.bn) if hasattr(w, 'bn') else tf.identity + self.bn = TFBN(w.bn) if hasattr(w, "bn") else tf.identity self.act = activations(w.act) if act else tf.identity def call(self, inputs): @@ -114,19 +130,23 @@ class TFDWConvTranspose2d(keras.layers.Layer): def __init__(self, c1, c2, k=1, s=1, p1=0, p2=0, w=None): # ch_in, ch_out, weights, kernel, stride, padding, groups super().__init__() - assert c1 == c2, f'TFDWConv() output={c2} must be equal to input={c1} channels' - assert k == 4 and p1 == 1, 'TFDWConv() only valid for k=4 and p1=1' + assert c1 == c2, f"TFDWConv() output={c2} must be equal to input={c1} channels" + assert k == 4 and p1 == 1, "TFDWConv() only valid for k=4 and p1=1" weight, bias = w.weight.permute(2, 3, 1, 0).numpy(), w.bias.numpy() self.c1 = c1 self.conv = [ - keras.layers.Conv2DTranspose(filters=1, - kernel_size=k, - strides=s, - padding='VALID', - output_padding=p2, - use_bias=True, - kernel_initializer=keras.initializers.Constant(weight[..., i:i + 1]), - bias_initializer=keras.initializers.Constant(bias[i])) for i in range(c1)] + keras.layers.Conv2DTranspose( + filters=1, + kernel_size=k, + strides=s, + padding="VALID", + output_padding=p2, + use_bias=True, + kernel_initializer=keras.initializers.Constant(weight[..., i : i + 1]), + bias_initializer=keras.initializers.Constant(bias[i]), + ) + for i in range(c1) + ] def call(self, inputs): return tf.concat([m(x) for m, x in zip(self.conv, tf.split(inputs, self.c1, 3))], 3)[:, 1:-1, 1:-1] @@ -176,14 +196,15 @@ class TFConv2d(keras.layers.Layer): def __init__(self, c1, c2, k, s=1, g=1, bias=True, w=None): super().__init__() assert g == 1, "TF v2.2 Conv2D does not support 'groups' argument" - self.conv = keras.layers.Conv2D(filters=c2, - kernel_size=k, - strides=s, - padding='VALID', - use_bias=bias, - kernel_initializer=keras.initializers.Constant( - w.weight.permute(2, 3, 1, 0).numpy()), - bias_initializer=keras.initializers.Constant(w.bias.numpy()) if bias else None) + self.conv = keras.layers.Conv2D( + filters=c2, + kernel_size=k, + strides=s, + padding="VALID", + use_bias=bias, + kernel_initializer=keras.initializers.Constant(w.weight.permute(2, 3, 1, 0).numpy()), + bias_initializer=keras.initializers.Constant(w.bias.numpy()) if bias else None, + ) def call(self, inputs): return self.conv(inputs) @@ -233,8 +254,9 @@ def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5, w=None): self.cv1 = TFConv(c1, c_, 1, 1, w=w.cv1) self.cv2 = TFConv(c1, c_, 1, 1, w=w.cv2) self.cv3 = TFConv(2 * c_, c2, 1, 1, w=w.cv3) - self.m = keras.Sequential([ - TFCrossConv(c_, c_, k=3, s=1, g=g, e=1.0, shortcut=shortcut, w=w.m[j]) for j in range(n)]) + self.m = keras.Sequential( + [TFCrossConv(c_, c_, k=3, s=1, g=g, e=1.0, shortcut=shortcut, w=w.m[j]) for j in range(n)] + ) def call(self, inputs): return self.cv3(tf.concat((self.m(self.cv1(inputs)), self.cv2(inputs)), axis=3)) @@ -247,7 +269,7 @@ def __init__(self, c1, c2, k=(5, 9, 13), w=None): c_ = c1 // 2 # hidden channels self.cv1 = TFConv(c1, c_, 1, 1, w=w.cv1) self.cv2 = TFConv(c_ * (len(k) + 1), c2, 1, 1, w=w.cv2) - self.m = [keras.layers.MaxPool2D(pool_size=x, strides=1, padding='SAME') for x in k] + self.m = [keras.layers.MaxPool2D(pool_size=x, strides=1, padding="SAME") for x in k] def call(self, inputs): x = self.cv1(inputs) @@ -261,7 +283,7 @@ def __init__(self, c1, c2, k=5, w=None): c_ = c1 // 2 # hidden channels self.cv1 = TFConv(c1, c_, 1, 1, w=w.cv1) self.cv2 = TFConv(c_ * 4, c2, 1, 1, w=w.cv2) - self.m = keras.layers.MaxPool2D(pool_size=k, strides=1, padding='SAME') + self.m = keras.layers.MaxPool2D(pool_size=k, strides=1, padding="SAME") def call(self, inputs): x = self.cv1(inputs) @@ -307,10 +329,10 @@ def call(self, inputs): # Normalize xywh to 0-1 to reduce calibration error xy /= tf.constant([[self.imgsz[1], self.imgsz[0]]], dtype=tf.float32) wh /= tf.constant([[self.imgsz[1], self.imgsz[0]]], dtype=tf.float32) - y = tf.concat([xy, wh, tf.sigmoid(y[..., 4:5 + self.nc]), y[..., 5 + self.nc:]], -1) + y = tf.concat([xy, wh, tf.sigmoid(y[..., 4 : 5 + self.nc]), y[..., 5 + self.nc :]], -1) z.append(tf.reshape(y, [-1, self.na * ny * nx, self.no])) - return tf.transpose(x, [0, 2, 1, 3]) if self.training else (tf.concat(z, 1), ) + return tf.transpose(x, [0, 2, 1, 3]) if self.training else (tf.concat(z, 1),) @staticmethod def _make_grid(nx=20, ny=20): @@ -340,11 +362,10 @@ def call(self, x): class TFProto(keras.layers.Layer): - def __init__(self, c1, c_=256, c2=32, w=None): super().__init__() self.cv1 = TFConv(c1, c_, k=3, w=w.cv1) - self.upsample = TFUpsample(None, scale_factor=2, mode='nearest') + self.upsample = TFUpsample(None, scale_factor=2, mode="nearest") self.cv2 = TFConv(c_, c_, k=3, w=w.cv2) self.cv3 = TFConv(c_, c2, w=w.cv3) @@ -356,7 +377,7 @@ class TFUpsample(keras.layers.Layer): # TF version of torch.nn.Upsample() def __init__(self, size, scale_factor, mode, w=None): # warning: all arguments needed including 'w' super().__init__() - assert scale_factor % 2 == 0, 'scale_factor must be multiple of 2' + assert scale_factor % 2 == 0, "scale_factor must be multiple of 2" self.upsample = lambda x: tf.image.resize(x, (x.shape[1] * scale_factor, x.shape[2] * scale_factor), mode) # self.upsample = keras.layers.UpSampling2D(size=scale_factor, interpolation=mode) # with default arguments: align_corners=False, half_pixel_centers=False @@ -371,7 +392,7 @@ class TFConcat(keras.layers.Layer): # TF version of torch.concat() def __init__(self, dimension=1, w=None): super().__init__() - assert dimension == 1, 'convert only NCHW to NHWC concat' + assert dimension == 1, "convert only NCHW to NHWC concat" self.d = 3 def call(self, inputs): @@ -380,15 +401,20 @@ def call(self, inputs): def parse_model(d, ch, model, imgsz): # model_dict, input_channels(3) LOGGER.info(f"\n{'':>3}{'from':>18}{'n':>3}{'params':>10} {'module':<40}{'arguments':<30}") - anchors, nc, gd, gw, ch_mul = d['anchors'], d['nc'], d['depth_multiple'], d['width_multiple'], d.get( - 'channel_multiple') + anchors, nc, gd, gw, ch_mul = ( + d["anchors"], + d["nc"], + d["depth_multiple"], + d["width_multiple"], + d.get("channel_multiple"), + ) na = (len(anchors[0]) // 2) if isinstance(anchors, list) else anchors # number of anchors no = na * (nc + 5) # number of outputs = anchors * (classes + 5) if not ch_mul: ch_mul = 8 layers, save, c2 = [], [], ch[-1] # layers, savelist, ch out - for i, (f, n, m, args) in enumerate(d['backbone'] + d['head']): # from, number, module, args + for i, (f, n, m, args) in enumerate(d["backbone"] + d["head"]): # from, number, module, args m_str = m m = eval(m) if isinstance(m, str) else m # eval strings for j, a in enumerate(args): @@ -399,8 +425,20 @@ def parse_model(d, ch, model, imgsz): # model_dict, input_channels(3) n = max(round(n * gd), 1) if n > 1 else n # depth gain if m in [ - nn.Conv2d, Conv, DWConv, DWConvTranspose2d, Bottleneck, SPP, SPPF, MixConv2d, Focus, CrossConv, - BottleneckCSP, C3, C3x]: + nn.Conv2d, + Conv, + DWConv, + DWConvTranspose2d, + Bottleneck, + SPP, + SPPF, + MixConv2d, + Focus, + CrossConv, + BottleneckCSP, + C3, + C3x, + ]: c1, c2 = ch[f], args[0] c2 = make_divisible(c2 * gw, ch_mul) if c2 != no else c2 @@ -422,15 +460,18 @@ def parse_model(d, ch, model, imgsz): # model_dict, input_channels(3) else: c2 = ch[f] - tf_m = eval('TF' + m_str.replace('nn.', '')) - m_ = keras.Sequential([tf_m(*args, w=model.model[i][j]) for j in range(n)]) if n > 1 \ - else tf_m(*args, w=model.model[i]) # module + tf_m = eval("TF" + m_str.replace("nn.", "")) + m_ = ( + keras.Sequential([tf_m(*args, w=model.model[i][j]) for j in range(n)]) + if n > 1 + else tf_m(*args, w=model.model[i]) + ) # module torch_m_ = nn.Sequential(*(m(*args) for _ in range(n))) if n > 1 else m(*args) # module - t = str(m)[8:-2].replace('__main__.', '') # module type + t = str(m)[8:-2].replace("__main__.", "") # module type np = sum(x.numel() for x in torch_m_.parameters()) # number params m_.i, m_.f, m_.type, m_.np = i, f, t, np # attach index, 'from' index, type, number params - LOGGER.info(f'{i:>3}{str(f):>18}{str(n):>3}{np:>10} {t:<40}{str(args):<30}') # print + LOGGER.info(f"{i:>3}{str(f):>18}{str(n):>3}{np:>10} {t:<40}{str(args):<30}") # print save.extend(x % i for x in ([f] if isinstance(f, int) else f) if x != -1) # append to savelist layers.append(m_) ch.append(c2) @@ -439,30 +480,33 @@ def parse_model(d, ch, model, imgsz): # model_dict, input_channels(3) class TFModel: # TF YOLOv5 model - def __init__(self, cfg='yolov5s.yaml', ch=3, nc=None, model=None, imgsz=(640, 640)): # model, channels, classes + def __init__(self, cfg="yolov5s.yaml", ch=3, nc=None, model=None, imgsz=(640, 640)): # model, channels, classes super().__init__() if isinstance(cfg, dict): self.yaml = cfg # model dict else: # is *.yaml import yaml # for torch hub + self.yaml_file = Path(cfg).name with open(cfg) as f: self.yaml = yaml.load(f, Loader=yaml.FullLoader) # model dict # Define model - if nc and nc != self.yaml['nc']: + if nc and nc != self.yaml["nc"]: LOGGER.info(f"Overriding {cfg} nc={self.yaml['nc']} with nc={nc}") - self.yaml['nc'] = nc # override yaml value + self.yaml["nc"] = nc # override yaml value self.model, self.savelist = parse_model(deepcopy(self.yaml), ch=[ch], model=model, imgsz=imgsz) - def predict(self, - inputs, - tf_nms=False, - agnostic_nms=False, - topk_per_class=100, - topk_all=100, - iou_thres=0.45, - conf_thres=0.25): + def predict( + self, + inputs, + tf_nms=False, + agnostic_nms=False, + topk_per_class=100, + topk_all=100, + iou_thres=0.45, + conf_thres=0.25, + ): y = [] # outputs x = inputs for m in self.model.layers: @@ -482,14 +526,10 @@ def predict(self, nms = AgnosticNMS()((boxes, classes, scores), topk_all, iou_thres, conf_thres) else: boxes = tf.expand_dims(boxes, 2) - nms = tf.image.combined_non_max_suppression(boxes, - scores, - topk_per_class, - topk_all, - iou_thres, - conf_thres, - clip_boxes=False) - return (nms, ) + nms = tf.image.combined_non_max_suppression( + boxes, scores, topk_per_class, topk_all, iou_thres, conf_thres, clip_boxes=False + ) + return (nms,) return x # output [1,6300,85] = [xywh, conf, class0, class1, ...] # x = x[0] # [x(1,6300,85), ...] to x(6300,85) # xywh = x[..., :4] # x(6300,4) boxes @@ -508,36 +548,42 @@ class AgnosticNMS(keras.layers.Layer): # TF Agnostic NMS def call(self, input, topk_all, iou_thres, conf_thres): # wrap map_fn to avoid TypeSpec related error https://stackoverflow.com/a/65809989/3036450 - return tf.map_fn(lambda x: self._nms(x, topk_all, iou_thres, conf_thres), - input, - fn_output_signature=(tf.float32, tf.float32, tf.float32, tf.int32), - name='agnostic_nms') + return tf.map_fn( + lambda x: self._nms(x, topk_all, iou_thres, conf_thres), + input, + fn_output_signature=(tf.float32, tf.float32, tf.float32, tf.int32), + name="agnostic_nms", + ) @staticmethod def _nms(x, topk_all=100, iou_thres=0.45, conf_thres=0.25): # agnostic NMS boxes, classes, scores = x class_inds = tf.cast(tf.argmax(classes, axis=-1), tf.float32) scores_inp = tf.reduce_max(scores, -1) - selected_inds = tf.image.non_max_suppression(boxes, - scores_inp, - max_output_size=topk_all, - iou_threshold=iou_thres, - score_threshold=conf_thres) + selected_inds = tf.image.non_max_suppression( + boxes, scores_inp, max_output_size=topk_all, iou_threshold=iou_thres, score_threshold=conf_thres + ) selected_boxes = tf.gather(boxes, selected_inds) - padded_boxes = tf.pad(selected_boxes, - paddings=[[0, topk_all - tf.shape(selected_boxes)[0]], [0, 0]], - mode='CONSTANT', - constant_values=0.0) + padded_boxes = tf.pad( + selected_boxes, + paddings=[[0, topk_all - tf.shape(selected_boxes)[0]], [0, 0]], + mode="CONSTANT", + constant_values=0.0, + ) selected_scores = tf.gather(scores_inp, selected_inds) - padded_scores = tf.pad(selected_scores, - paddings=[[0, topk_all - tf.shape(selected_boxes)[0]]], - mode='CONSTANT', - constant_values=-1.0) + padded_scores = tf.pad( + selected_scores, + paddings=[[0, topk_all - tf.shape(selected_boxes)[0]]], + mode="CONSTANT", + constant_values=-1.0, + ) selected_classes = tf.gather(class_inds, selected_inds) - padded_classes = tf.pad(selected_classes, - paddings=[[0, topk_all - tf.shape(selected_boxes)[0]]], - mode='CONSTANT', - constant_values=-1.0) + padded_classes = tf.pad( + selected_classes, + paddings=[[0, topk_all - tf.shape(selected_boxes)[0]]], + mode="CONSTANT", + constant_values=-1.0, + ) valid_detections = tf.shape(selected_inds)[0] return padded_boxes, padded_scores, padded_classes, valid_detections @@ -551,7 +597,7 @@ def activations(act=nn.SiLU): elif isinstance(act, (nn.SiLU, SiLU)): return lambda x: keras.activations.swish(x) else: - raise Exception(f'no matching TensorFlow activation found for PyTorch activation {act}') + raise Exception(f"no matching TensorFlow activation found for PyTorch activation {act}") def representative_dataset_gen(dataset, ncalib=100): @@ -566,14 +612,14 @@ def representative_dataset_gen(dataset, ncalib=100): def run( - weights=ROOT / 'yolov5s.pt', # weights path - imgsz=(640, 640), # inference size h,w - batch_size=1, # batch size - dynamic=False, # dynamic batch size + weights=ROOT / "yolov5s.pt", # weights path + imgsz=(640, 640), # inference size h,w + batch_size=1, # batch size + dynamic=False, # dynamic batch size ): # PyTorch model im = torch.zeros((batch_size, 3, *imgsz)) # BCHW image - model = attempt_load(weights, device=torch.device('cpu'), inplace=True, fuse=False) + model = attempt_load(weights, device=torch.device("cpu"), inplace=True, fuse=False) _ = model(im) # inference model.info() @@ -587,15 +633,15 @@ def run( keras_model = keras.Model(inputs=im, outputs=tf_model.predict(im)) keras_model.summary() - LOGGER.info('PyTorch, TensorFlow and Keras models successfully verified.\nUse export.py for TF model export.') + LOGGER.info("PyTorch, TensorFlow and Keras models successfully verified.\nUse export.py for TF model export.") def parse_opt(): parser = argparse.ArgumentParser() - parser.add_argument('--weights', type=str, default=ROOT / 'yolov5s.pt', help='weights path') - parser.add_argument('--imgsz', '--img', '--img-size', nargs='+', type=int, default=[640], help='inference size h,w') - parser.add_argument('--batch-size', type=int, default=1, help='batch size') - parser.add_argument('--dynamic', action='store_true', help='dynamic batch size') + parser.add_argument("--weights", type=str, default=ROOT / "yolov5s.pt", help="weights path") + parser.add_argument("--imgsz", "--img", "--img-size", nargs="+", type=int, default=[640], help="inference size h,w") + parser.add_argument("--batch-size", type=int, default=1, help="batch size") + parser.add_argument("--dynamic", action="store_true", help="dynamic batch size") opt = parser.parse_args() opt.imgsz *= 2 if len(opt.imgsz) == 1 else 1 # expand print_args(vars(opt)) @@ -606,6 +652,6 @@ def main(opt): run(**vars(opt)) -if __name__ == '__main__': +if __name__ == "__main__": opt = parse_opt() main(opt) diff --git a/models/yolo.py b/models/yolo.py index f6cdbcb5d2d8..e98351b98691 100644 --- a/models/yolo.py +++ b/models/yolo.py @@ -1,6 +1,6 @@ # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license """ -YOLO-specific modules +YOLO-specific modules. Usage: $ python models/yolo.py --cfg yolov5s.yaml @@ -22,18 +22,46 @@ ROOT = FILE.parents[1] # YOLOv5 root directory if str(ROOT) not in sys.path: sys.path.append(str(ROOT)) # add ROOT to PATH -if platform.system() != 'Windows': +if platform.system() != "Windows": ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative -from models.common import (C3, C3SPP, C3TR, SPP, SPPF, Bottleneck, BottleneckCSP, C3Ghost, C3x, Classify, Concat, - Contract, Conv, CrossConv, DetectMultiBackend, DWConv, DWConvTranspose2d, Expand, Focus, - GhostBottleneck, GhostConv, Proto) +from models.common import ( + C3, + C3SPP, + C3TR, + SPP, + SPPF, + Bottleneck, + BottleneckCSP, + C3Ghost, + C3x, + Classify, + Concat, + Contract, + Conv, + CrossConv, + DetectMultiBackend, + DWConv, + DWConvTranspose2d, + Expand, + Focus, + GhostBottleneck, + GhostConv, + Proto, +) from models.experimental import MixConv2d from utils.autoanchor import check_anchor_order from utils.general import LOGGER, check_version, check_yaml, colorstr, make_divisible, print_args from utils.plots import feature_visualization -from utils.torch_utils import (fuse_conv_and_bn, initialize_weights, model_info, profile, scale_img, select_device, - time_sync) +from utils.torch_utils import ( + fuse_conv_and_bn, + initialize_weights, + model_info, + profile, + scale_img, + select_device, + time_sync, +) try: import thop # for FLOPs computation @@ -55,7 +83,7 @@ def __init__(self, nc=80, anchors=(), ch=(), inplace=True): # detection layer self.na = len(anchors[0]) // 2 # number of anchors self.grid = [torch.empty(0) for _ in range(self.nl)] # init grid self.anchor_grid = [torch.empty(0) for _ in range(self.nl)] # init anchor grid - self.register_buffer('anchors', torch.tensor(anchors).float().view(self.nl, -1, 2)) # shape(nl,na,2) + self.register_buffer("anchors", torch.tensor(anchors).float().view(self.nl, -1, 2)) # shape(nl,na,2) self.m = nn.ModuleList(nn.Conv2d(x, self.no * self.na, 1) for x in ch) # output conv self.inplace = inplace # use inplace ops (e.g. slice assignment) @@ -82,14 +110,14 @@ def forward(self, x): y = torch.cat((xy, wh, conf), 4) z.append(y.view(bs, self.na * nx * ny, self.no)) - return x if self.training else (torch.cat(z, 1), ) if self.export else (torch.cat(z, 1), x) + return x if self.training else (torch.cat(z, 1),) if self.export else (torch.cat(z, 1), x) - def _make_grid(self, nx=20, ny=20, i=0, torch_1_10=check_version(torch.__version__, '1.10.0')): + def _make_grid(self, nx=20, ny=20, i=0, torch_1_10=check_version(torch.__version__, "1.10.0")): d = self.anchors[i].device t = self.anchors[i].dtype shape = 1, self.na, ny, nx, 2 # grid shape y, x = torch.arange(ny, device=d, dtype=t), torch.arange(nx, device=d, dtype=t) - yv, xv = torch.meshgrid(y, x, indexing='ij') if torch_1_10 else torch.meshgrid(y, x) # torch>=0.7 compatibility + yv, xv = torch.meshgrid(y, x, indexing="ij") if torch_1_10 else torch.meshgrid(y, x) # torch>=0.7 compatibility grid = torch.stack((xv, yv), 2).expand(shape) - 0.5 # add grid offset, i.e. y = 2.0 * x - 0.5 anchor_grid = (self.anchors[i] * self.stride[i]).view((1, self.na, 1, 1, 2)).expand(shape) return grid, anchor_grid @@ -132,23 +160,23 @@ def _forward_once(self, x, profile=False, visualize=False): def _profile_one_layer(self, m, x, dt): c = m == self.model[-1] # is final layer, copy input as inplace fix - o = thop.profile(m, inputs=(x.copy() if c else x, ), verbose=False)[0] / 1E9 * 2 if thop else 0 # FLOPs + o = thop.profile(m, inputs=(x.copy() if c else x,), verbose=False)[0] / 1e9 * 2 if thop else 0 # FLOPs t = time_sync() for _ in range(10): m(x.copy() if c else x) dt.append((time_sync() - t) * 100) if m == self.model[0]: LOGGER.info(f"{'time (ms)':>10s} {'GFLOPs':>10s} {'params':>10s} module") - LOGGER.info(f'{dt[-1]:10.2f} {o:10.2f} {m.np:10.0f} {m.type}') + LOGGER.info(f"{dt[-1]:10.2f} {o:10.2f} {m.np:10.0f} {m.type}") if c: LOGGER.info(f"{sum(dt):10.2f} {'-':>10s} {'-':>10s} Total") def fuse(self): # fuse model Conv2d() + BatchNorm2d() layers - LOGGER.info('Fusing layers... ') + LOGGER.info("Fusing layers... ") for m in self.model.modules(): - if isinstance(m, (Conv, DWConv)) and hasattr(m, 'bn'): + if isinstance(m, (Conv, DWConv)) and hasattr(m, "bn"): m.conv = fuse_conv_and_bn(m.conv, m.bn) # update conv - delattr(m, 'bn') # remove batchnorm + delattr(m, "bn") # remove batchnorm m.forward = m.forward_fuse # update forward self.info() return self @@ -170,27 +198,28 @@ def _apply(self, fn): class DetectionModel(BaseModel): # YOLOv5 detection model - def __init__(self, cfg='yolov5s.yaml', ch=3, nc=None, anchors=None): # model, input channels, number of classes + def __init__(self, cfg="yolov5s.yaml", ch=3, nc=None, anchors=None): # model, input channels, number of classes super().__init__() if isinstance(cfg, dict): self.yaml = cfg # model dict else: # is *.yaml import yaml # for torch hub + self.yaml_file = Path(cfg).name - with open(cfg, encoding='ascii', errors='ignore') as f: + with open(cfg, encoding="ascii", errors="ignore") as f: self.yaml = yaml.safe_load(f) # model dict # Define model - ch = self.yaml['ch'] = self.yaml.get('ch', ch) # input channels - if nc and nc != self.yaml['nc']: + ch = self.yaml["ch"] = self.yaml.get("ch", ch) # input channels + if nc and nc != self.yaml["nc"]: LOGGER.info(f"Overriding model.yaml nc={self.yaml['nc']} with nc={nc}") - self.yaml['nc'] = nc # override yaml value + self.yaml["nc"] = nc # override yaml value if anchors: - LOGGER.info(f'Overriding model.yaml anchors with anchors={anchors}') - self.yaml['anchors'] = round(anchors) # override yaml value + LOGGER.info(f"Overriding model.yaml anchors with anchors={anchors}") + self.yaml["anchors"] = round(anchors) # override yaml value self.model, self.save = parse_model(deepcopy(self.yaml), ch=[ch]) # model, savelist - self.names = [str(i) for i in range(self.yaml['nc'])] # default names - self.inplace = self.yaml.get('inplace', True) + self.names = [str(i) for i in range(self.yaml["nc"])] # default names + self.inplace = self.yaml.get("inplace", True) # Build strides, anchors m = self.model[-1] # Detect() @@ -207,7 +236,7 @@ def __init__(self, cfg='yolov5s.yaml', ch=3, nc=None, anchors=None): # model, i # Init weights, biases initialize_weights(self) self.info() - LOGGER.info('') + LOGGER.info("") def forward(self, x, augment=False, profile=False, visualize=False): if augment: @@ -248,9 +277,9 @@ def _descale_pred(self, p, flips, scale, img_size): def _clip_augmented(self, y): # Clip YOLOv5 augmented inference tails nl = self.model[-1].nl # number of detection layers (P3-P5) - g = sum(4 ** x for x in range(nl)) # grid points + g = sum(4**x for x in range(nl)) # grid points e = 1 # exclude layer count - i = (y[0].shape[1] // g) * sum(4 ** x for x in range(e)) # indices + i = (y[0].shape[1] // g) * sum(4**x for x in range(e)) # indices y[0] = y[0][:, :-i] # large i = (y[-1].shape[1] // g) * sum(4 ** (nl - 1 - x) for x in range(e)) # indices y[-1] = y[-1][:, i:] # small @@ -263,7 +292,9 @@ def _initialize_biases(self, cf=None): # initialize biases into Detect(), cf is for mi, s in zip(m.m, m.stride): # from b = mi.bias.view(m.na, -1) # conv.bias(255) to (3,85) b.data[:, 4] += math.log(8 / (640 / s) ** 2) # obj (8 objects per 640 image) - b.data[:, 5:5 + m.nc] += math.log(0.6 / (m.nc - 0.99999)) if cf is None else torch.log(cf / cf.sum()) # cls + b.data[:, 5 : 5 + m.nc] += ( + math.log(0.6 / (m.nc - 0.99999)) if cf is None else torch.log(cf / cf.sum()) + ) # cls mi.bias = torch.nn.Parameter(b.view(-1), requires_grad=True) @@ -272,7 +303,7 @@ def _initialize_biases(self, cf=None): # initialize biases into Detect(), cf is class SegmentationModel(DetectionModel): # YOLOv5 segmentation model - def __init__(self, cfg='yolov5s-seg.yaml', ch=3, nc=None, anchors=None): + def __init__(self, cfg="yolov5s-seg.yaml", ch=3, nc=None, anchors=None): super().__init__(cfg, ch, nc, anchors) @@ -288,9 +319,9 @@ def _from_detection_model(self, model, nc=1000, cutoff=10): model = model.model # unwrap DetectMultiBackend model.model = model.model[:cutoff] # backbone m = model.model[-1] # last layer - ch = m.conv.in_channels if hasattr(m, 'conv') else m.cv1.conv.in_channels # ch into module + ch = m.conv.in_channels if hasattr(m, "conv") else m.cv1.conv.in_channels # ch into module c = Classify(ch, nc) # Classify() - c.i, c.f, c.type = m.i, m.f, 'models.common.Classify' # index, from, type + c.i, c.f, c.type = m.i, m.f, "models.common.Classify" # index, from, type model.model[-1] = c # replace self.model = model.model self.stride = model.stride @@ -305,8 +336,14 @@ def _from_yaml(self, cfg): def parse_model(d, ch): # model_dict, input_channels(3) # Parse a YOLOv5 model.yaml dictionary LOGGER.info(f"\n{'':>3}{'from':>18}{'n':>3}{'params':>10} {'module':<40}{'arguments':<30}") - anchors, nc, gd, gw, act, ch_mul = d['anchors'], d['nc'], d['depth_multiple'], d['width_multiple'], d.get( - 'activation'), d.get('channel_multiple') + anchors, nc, gd, gw, act, ch_mul = ( + d["anchors"], + d["nc"], + d["depth_multiple"], + d["width_multiple"], + d.get("activation"), + d.get("channel_multiple"), + ) if act: Conv.default_act = eval(act) # redefine default activation, i.e. Conv.default_act = nn.SiLU() LOGGER.info(f"{colorstr('activation:')} {act}") # print @@ -316,7 +353,7 @@ def parse_model(d, ch): # model_dict, input_channels(3) no = na * (nc + 5) # number of outputs = anchors * (classes + 5) layers, save, c2 = [], [], ch[-1] # layers, savelist, ch out - for i, (f, n, m, args) in enumerate(d['backbone'] + d['head']): # from, number, module, args + for i, (f, n, m, args) in enumerate(d["backbone"] + d["head"]): # from, number, module, args m = eval(m) if isinstance(m, str) else m # eval strings for j, a in enumerate(args): with contextlib.suppress(NameError): @@ -324,8 +361,25 @@ def parse_model(d, ch): # model_dict, input_channels(3) n = n_ = max(round(n * gd), 1) if n > 1 else n # depth gain if m in { - Conv, GhostConv, Bottleneck, GhostBottleneck, SPP, SPPF, DWConv, MixConv2d, Focus, CrossConv, - BottleneckCSP, C3, C3TR, C3SPP, C3Ghost, nn.ConvTranspose2d, DWConvTranspose2d, C3x}: + Conv, + GhostConv, + Bottleneck, + GhostBottleneck, + SPP, + SPPF, + DWConv, + MixConv2d, + Focus, + CrossConv, + BottleneckCSP, + C3, + C3TR, + C3SPP, + C3Ghost, + nn.ConvTranspose2d, + DWConvTranspose2d, + C3x, + }: c1, c2 = ch[f], args[0] if c2 != no: # if not output c2 = make_divisible(c2 * gw, ch_mul) @@ -353,10 +407,10 @@ def parse_model(d, ch): # model_dict, input_channels(3) c2 = ch[f] m_ = nn.Sequential(*(m(*args) for _ in range(n))) if n > 1 else m(*args) # module - t = str(m)[8:-2].replace('__main__.', '') # module type + t = str(m)[8:-2].replace("__main__.", "") # module type np = sum(x.numel() for x in m_.parameters()) # number params m_.i, m_.f, m_.type, m_.np = i, f, t, np # attach index, 'from' index, type, number params - LOGGER.info(f'{i:>3}{str(f):>18}{n_:>3}{np:10.0f} {t:<40}{str(args):<30}') # print + LOGGER.info(f"{i:>3}{str(f):>18}{n_:>3}{np:10.0f} {t:<40}{str(args):<30}") # print save.extend(x % i for x in ([f] if isinstance(f, int) else f) if x != -1) # append to savelist layers.append(m_) if i == 0: @@ -365,14 +419,14 @@ def parse_model(d, ch): # model_dict, input_channels(3) return nn.Sequential(*layers), sorted(save) -if __name__ == '__main__': +if __name__ == "__main__": parser = argparse.ArgumentParser() - parser.add_argument('--cfg', type=str, default='yolov5s.yaml', help='model.yaml') - parser.add_argument('--batch-size', type=int, default=1, help='total batch size for all GPUs') - parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') - parser.add_argument('--profile', action='store_true', help='profile model speed') - parser.add_argument('--line-profile', action='store_true', help='profile model speed layer by layer') - parser.add_argument('--test', action='store_true', help='test all yolo*.yaml') + parser.add_argument("--cfg", type=str, default="yolov5s.yaml", help="model.yaml") + parser.add_argument("--batch-size", type=int, default=1, help="total batch size for all GPUs") + parser.add_argument("--device", default="", help="cuda device, i.e. 0 or 0,1,2,3 or cpu") + parser.add_argument("--profile", action="store_true", help="profile model speed") + parser.add_argument("--line-profile", action="store_true", help="profile model speed layer by layer") + parser.add_argument("--test", action="store_true", help="test all yolo*.yaml") opt = parser.parse_args() opt.cfg = check_yaml(opt.cfg) # check YAML print_args(vars(opt)) @@ -390,11 +444,11 @@ def parse_model(d, ch): # model_dict, input_channels(3) results = profile(input=im, ops=[model], n=3) elif opt.test: # test all models - for cfg in Path(ROOT / 'models').rglob('yolo*.yaml'): + for cfg in Path(ROOT / "models").rglob("yolo*.yaml"): try: _ = Model(cfg) except Exception as e: - print(f'Error in {cfg}: {e}') + print(f"Error in {cfg}: {e}") else: # report fused model summary model.fuse() diff --git a/segment/predict.py b/segment/predict.py index 8e3d97dfeb92..23a4e3538509 100644 --- a/segment/predict.py +++ b/segment/predict.py @@ -46,23 +46,36 @@ from models.common import DetectMultiBackend from utils.dataloaders import IMG_FORMATS, VID_FORMATS, LoadImages, LoadScreenshots, LoadStreams -from utils.general import (LOGGER, Profile, check_file, check_img_size, check_imshow, check_requirements, colorstr, cv2, - increment_path, non_max_suppression, print_args, scale_boxes, scale_segments, - strip_optimizer) +from utils.general import ( + LOGGER, + Profile, + check_file, + check_img_size, + check_imshow, + check_requirements, + colorstr, + cv2, + increment_path, + non_max_suppression, + print_args, + scale_boxes, + scale_segments, + strip_optimizer, +) from utils.segment.general import masks2segments, process_mask, process_mask_native from utils.torch_utils import select_device, smart_inference_mode @smart_inference_mode() def run( - weights=ROOT / 'yolov5s-seg.pt', # model.pt path(s) - source=ROOT / 'data/images', # file/dir/URL/glob/screen/0(webcam) - data=ROOT / 'data/coco128.yaml', # dataset.yaml path + weights=ROOT / "yolov5s-seg.pt", # model.pt path(s) + source=ROOT / "data/images", # file/dir/URL/glob/screen/0(webcam) + data=ROOT / "data/coco128.yaml", # dataset.yaml path imgsz=(640, 640), # inference size (height, width) conf_thres=0.25, # confidence threshold iou_thres=0.45, # NMS IOU threshold max_det=1000, # maximum detections per image - device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu + device="", # cuda device, i.e. 0 or 0,1,2,3 or cpu view_img=False, # show results save_txt=False, # save results to *.txt save_conf=False, # save confidences in --save-txt labels @@ -73,8 +86,8 @@ def run( augment=False, # augmented inference visualize=False, # visualize features update=False, # update all models - project=ROOT / 'runs/predict-seg', # save results to project/name - name='exp', # save results to project/name + project=ROOT / "runs/predict-seg", # save results to project/name + name="exp", # save results to project/name exist_ok=False, # existing project/name ok, do not increment line_thickness=3, # bounding box thickness (pixels) hide_labels=False, # hide labels @@ -85,17 +98,17 @@ def run( retina_masks=False, ): source = str(source) - save_img = not nosave and not source.endswith('.txt') # save inference images + save_img = not nosave and not source.endswith(".txt") # save inference images is_file = Path(source).suffix[1:] in (IMG_FORMATS + VID_FORMATS) - is_url = source.lower().startswith(('rtsp://', 'rtmp://', 'http://', 'https://')) - webcam = source.isnumeric() or source.endswith('.streams') or (is_url and not is_file) - screenshot = source.lower().startswith('screen') + is_url = source.lower().startswith(("rtsp://", "rtmp://", "http://", "https://")) + webcam = source.isnumeric() or source.endswith(".streams") or (is_url and not is_file) + screenshot = source.lower().startswith("screen") if is_url and is_file: source = check_file(source) # download # Directories save_dir = increment_path(Path(project) / name, exist_ok=exist_ok) # increment run - (save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir + (save_dir / "labels" if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir # Load model device = select_device(device) @@ -143,14 +156,14 @@ def run( seen += 1 if webcam: # batch_size >= 1 p, im0, frame = path[i], im0s[i].copy(), dataset.count - s += f'{i}: ' + s += f"{i}: " else: - p, im0, frame = path, im0s.copy(), getattr(dataset, 'frame', 0) + p, im0, frame = path, im0s.copy(), getattr(dataset, "frame", 0) p = Path(p) # to Path save_path = str(save_dir / p.name) # im.jpg - txt_path = str(save_dir / 'labels' / p.stem) + ('' if dataset.mode == 'image' else f'_{frame}') # im.txt - s += '%gx%g ' % im.shape[2:] # print string + txt_path = str(save_dir / "labels" / p.stem) + ("" if dataset.mode == "image" else f"_{frame}") # im.txt + s += "%gx%g " % im.shape[2:] # print string imc = im0.copy() if save_crop else im0 # for save_crop annotator = Annotator(im0, line_width=line_thickness, example=str(names)) if len(det): @@ -166,7 +179,8 @@ def run( if save_txt: segments = [ scale_segments(im0.shape if retina_masks else im.shape[2:], x, im0.shape, normalize=True) - for x in reversed(masks2segments(masks))] + for x in reversed(masks2segments(masks)) + ] # Print results for c in det[:, 5].unique(): @@ -177,39 +191,42 @@ def run( annotator.masks( masks, colors=[colors(x, True) for x in det[:, 5]], - im_gpu=torch.as_tensor(im0, dtype=torch.float16).to(device).permute(2, 0, 1).flip(0).contiguous() / - 255 if retina_masks else im[i]) + im_gpu=torch.as_tensor(im0, dtype=torch.float16).to(device).permute(2, 0, 1).flip(0).contiguous() + / 255 + if retina_masks + else im[i], + ) # Write results for j, (*xyxy, conf, cls) in enumerate(reversed(det[:, :6])): if save_txt: # Write to file seg = segments[j].reshape(-1) # (n,2) to (n*2) line = (cls, *seg, conf) if save_conf else (cls, *seg) # label format - with open(f'{txt_path}.txt', 'a') as f: - f.write(('%g ' * len(line)).rstrip() % line + '\n') + with open(f"{txt_path}.txt", "a") as f: + f.write(("%g " * len(line)).rstrip() % line + "\n") if save_img or save_crop or view_img: # Add bbox to image c = int(cls) # integer class - label = None if hide_labels else (names[c] if hide_conf else f'{names[c]} {conf:.2f}') + label = None if hide_labels else (names[c] if hide_conf else f"{names[c]} {conf:.2f}") annotator.box_label(xyxy, label, color=colors(c, True)) # annotator.draw.polygon(segments[j], outline=colors(c, True), width=3) if save_crop: - save_one_box(xyxy, imc, file=save_dir / 'crops' / names[c] / f'{p.stem}.jpg', BGR=True) + save_one_box(xyxy, imc, file=save_dir / "crops" / names[c] / f"{p.stem}.jpg", BGR=True) # Stream results im0 = annotator.result() if view_img: - if platform.system() == 'Linux' and p not in windows: + if platform.system() == "Linux" and p not in windows: windows.append(p) cv2.namedWindow(str(p), cv2.WINDOW_NORMAL | cv2.WINDOW_KEEPRATIO) # allow window resize (Linux) cv2.resizeWindow(str(p), im0.shape[1], im0.shape[0]) cv2.imshow(str(p), im0) - if cv2.waitKey(1) == ord('q'): # 1 millisecond + if cv2.waitKey(1) == ord("q"): # 1 millisecond exit() # Save results (image with detections) if save_img: - if dataset.mode == 'image': + if dataset.mode == "image": cv2.imwrite(save_path, im0) else: # 'video' or 'stream' if vid_path[i] != save_path: # new video @@ -222,18 +239,18 @@ def run( h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) else: # stream fps, w, h = 30, im0.shape[1], im0.shape[0] - save_path = str(Path(save_path).with_suffix('.mp4')) # force *.mp4 suffix on results videos - vid_writer[i] = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h)) + save_path = str(Path(save_path).with_suffix(".mp4")) # force *.mp4 suffix on results videos + vid_writer[i] = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*"mp4v"), fps, (w, h)) vid_writer[i].write(im0) # Print time (inference-only) LOGGER.info(f"{s}{'' if len(det) else '(no detections), '}{dt[1].dt * 1E3:.1f}ms") # Print results - t = tuple(x.t / seen * 1E3 for x in dt) # speeds per image - LOGGER.info(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {(1, 3, *imgsz)}' % t) + t = tuple(x.t / seen * 1e3 for x in dt) # speeds per image + LOGGER.info(f"Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {(1, 3, *imgsz)}" % t) if save_txt or save_img: - s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else '' + s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else "" LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}{s}") if update: strip_optimizer(weights[0]) # update model (to fix SourceChangeWarning) @@ -241,34 +258,34 @@ def run( def parse_opt(): parser = argparse.ArgumentParser() - parser.add_argument('--weights', nargs='+', type=str, default=ROOT / 'yolov5s-seg.pt', help='model path(s)') - parser.add_argument('--source', type=str, default=ROOT / 'data/images', help='file/dir/URL/glob/screen/0(webcam)') - parser.add_argument('--data', type=str, default=ROOT / 'data/coco128.yaml', help='(optional) dataset.yaml path') - parser.add_argument('--imgsz', '--img', '--img-size', nargs='+', type=int, default=[640], help='inference size h,w') - parser.add_argument('--conf-thres', type=float, default=0.25, help='confidence threshold') - parser.add_argument('--iou-thres', type=float, default=0.45, help='NMS IoU threshold') - parser.add_argument('--max-det', type=int, default=1000, help='maximum detections per image') - parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') - parser.add_argument('--view-img', action='store_true', help='show results') - parser.add_argument('--save-txt', action='store_true', help='save results to *.txt') - parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels') - parser.add_argument('--save-crop', action='store_true', help='save cropped prediction boxes') - parser.add_argument('--nosave', action='store_true', help='do not save images/videos') - parser.add_argument('--classes', nargs='+', type=int, help='filter by class: --classes 0, or --classes 0 2 3') - parser.add_argument('--agnostic-nms', action='store_true', help='class-agnostic NMS') - parser.add_argument('--augment', action='store_true', help='augmented inference') - parser.add_argument('--visualize', action='store_true', help='visualize features') - parser.add_argument('--update', action='store_true', help='update all models') - parser.add_argument('--project', default=ROOT / 'runs/predict-seg', help='save results to project/name') - parser.add_argument('--name', default='exp', help='save results to project/name') - parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment') - parser.add_argument('--line-thickness', default=3, type=int, help='bounding box thickness (pixels)') - parser.add_argument('--hide-labels', default=False, action='store_true', help='hide labels') - parser.add_argument('--hide-conf', default=False, action='store_true', help='hide confidences') - parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference') - parser.add_argument('--dnn', action='store_true', help='use OpenCV DNN for ONNX inference') - parser.add_argument('--vid-stride', type=int, default=1, help='video frame-rate stride') - parser.add_argument('--retina-masks', action='store_true', help='whether to plot masks in native resolution') + parser.add_argument("--weights", nargs="+", type=str, default=ROOT / "yolov5s-seg.pt", help="model path(s)") + parser.add_argument("--source", type=str, default=ROOT / "data/images", help="file/dir/URL/glob/screen/0(webcam)") + parser.add_argument("--data", type=str, default=ROOT / "data/coco128.yaml", help="(optional) dataset.yaml path") + parser.add_argument("--imgsz", "--img", "--img-size", nargs="+", type=int, default=[640], help="inference size h,w") + parser.add_argument("--conf-thres", type=float, default=0.25, help="confidence threshold") + parser.add_argument("--iou-thres", type=float, default=0.45, help="NMS IoU threshold") + parser.add_argument("--max-det", type=int, default=1000, help="maximum detections per image") + parser.add_argument("--device", default="", help="cuda device, i.e. 0 or 0,1,2,3 or cpu") + parser.add_argument("--view-img", action="store_true", help="show results") + parser.add_argument("--save-txt", action="store_true", help="save results to *.txt") + parser.add_argument("--save-conf", action="store_true", help="save confidences in --save-txt labels") + parser.add_argument("--save-crop", action="store_true", help="save cropped prediction boxes") + parser.add_argument("--nosave", action="store_true", help="do not save images/videos") + parser.add_argument("--classes", nargs="+", type=int, help="filter by class: --classes 0, or --classes 0 2 3") + parser.add_argument("--agnostic-nms", action="store_true", help="class-agnostic NMS") + parser.add_argument("--augment", action="store_true", help="augmented inference") + parser.add_argument("--visualize", action="store_true", help="visualize features") + parser.add_argument("--update", action="store_true", help="update all models") + parser.add_argument("--project", default=ROOT / "runs/predict-seg", help="save results to project/name") + parser.add_argument("--name", default="exp", help="save results to project/name") + parser.add_argument("--exist-ok", action="store_true", help="existing project/name ok, do not increment") + parser.add_argument("--line-thickness", default=3, type=int, help="bounding box thickness (pixels)") + parser.add_argument("--hide-labels", default=False, action="store_true", help="hide labels") + parser.add_argument("--hide-conf", default=False, action="store_true", help="hide confidences") + parser.add_argument("--half", action="store_true", help="use FP16 half-precision inference") + parser.add_argument("--dnn", action="store_true", help="use OpenCV DNN for ONNX inference") + parser.add_argument("--vid-stride", type=int, default=1, help="video frame-rate stride") + parser.add_argument("--retina-masks", action="store_true", help="whether to plot masks in native resolution") opt = parser.parse_args() opt.imgsz *= 2 if len(opt.imgsz) == 1 else 1 # expand print_args(vars(opt)) @@ -276,10 +293,10 @@ def parse_opt(): def main(opt): - check_requirements(ROOT / 'requirements.txt', exclude=('tensorboard', 'thop')) + check_requirements(ROOT / "requirements.txt", exclude=("tensorboard", "thop")) run(**vars(opt)) -if __name__ == '__main__': +if __name__ == "__main__": opt = parse_opt() main(opt) diff --git a/segment/train.py b/segment/train.py index 2ae09c1cbf66..fe262348fae4 100644 --- a/segment/train.py +++ b/segment/train.py @@ -1,7 +1,7 @@ # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license """ -Train a YOLOv5 segment model on a segment dataset -Models and datasets download automatically from the latest YOLOv5 release. +Train a YOLOv5 segment model on a segment dataset Models and datasets download automatically from the latest YOLOv5 +release. Usage - Single-GPU training: $ python segment/train.py --data coco128-seg.yaml --weights yolov5s-seg.pt --img 640 # from pretrained (recommended) @@ -47,47 +47,104 @@ from utils.autobatch import check_train_batch_size from utils.callbacks import Callbacks from utils.downloads import attempt_download, is_url -from utils.general import (LOGGER, TQDM_BAR_FORMAT, check_amp, check_dataset, check_file, check_git_info, - check_git_status, check_img_size, check_requirements, check_suffix, check_yaml, colorstr, - get_latest_run, increment_path, init_seeds, intersect_dicts, labels_to_class_weights, - labels_to_image_weights, one_cycle, print_args, print_mutation, strip_optimizer, yaml_save) +from utils.general import ( + LOGGER, + TQDM_BAR_FORMAT, + check_amp, + check_dataset, + check_file, + check_git_info, + check_git_status, + check_img_size, + check_requirements, + check_suffix, + check_yaml, + colorstr, + get_latest_run, + increment_path, + init_seeds, + intersect_dicts, + labels_to_class_weights, + labels_to_image_weights, + one_cycle, + print_args, + print_mutation, + strip_optimizer, + yaml_save, +) from utils.loggers import GenericLogger from utils.plots import plot_evolve, plot_labels from utils.segment.dataloaders import create_dataloader from utils.segment.loss import ComputeLoss from utils.segment.metrics import KEYS, fitness from utils.segment.plots import plot_images_and_masks, plot_results_with_masks -from utils.torch_utils import (EarlyStopping, ModelEMA, de_parallel, select_device, smart_DDP, smart_optimizer, - smart_resume, torch_distributed_zero_first) - -LOCAL_RANK = int(os.getenv('LOCAL_RANK', -1)) # https://pytorch.org/docs/stable/elastic/run.html -RANK = int(os.getenv('RANK', -1)) -WORLD_SIZE = int(os.getenv('WORLD_SIZE', 1)) +from utils.torch_utils import ( + EarlyStopping, + ModelEMA, + de_parallel, + select_device, + smart_DDP, + smart_optimizer, + smart_resume, + torch_distributed_zero_first, +) + +LOCAL_RANK = int(os.getenv("LOCAL_RANK", -1)) # https://pytorch.org/docs/stable/elastic/run.html +RANK = int(os.getenv("RANK", -1)) +WORLD_SIZE = int(os.getenv("WORLD_SIZE", 1)) GIT_INFO = check_git_info() def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictionary - save_dir, epochs, batch_size, weights, single_cls, evolve, data, cfg, resume, noval, nosave, workers, freeze, mask_ratio = \ - Path(opt.save_dir), opt.epochs, opt.batch_size, opt.weights, opt.single_cls, opt.evolve, opt.data, opt.cfg, \ - opt.resume, opt.noval, opt.nosave, opt.workers, opt.freeze, opt.mask_ratio + ( + save_dir, + epochs, + batch_size, + weights, + single_cls, + evolve, + data, + cfg, + resume, + noval, + nosave, + workers, + freeze, + mask_ratio, + ) = ( + Path(opt.save_dir), + opt.epochs, + opt.batch_size, + opt.weights, + opt.single_cls, + opt.evolve, + opt.data, + opt.cfg, + opt.resume, + opt.noval, + opt.nosave, + opt.workers, + opt.freeze, + opt.mask_ratio, + ) # callbacks.run('on_pretrain_routine_start') # Directories - w = save_dir / 'weights' # weights dir + w = save_dir / "weights" # weights dir (w.parent if evolve else w).mkdir(parents=True, exist_ok=True) # make dir - last, best = w / 'last.pt', w / 'best.pt' + last, best = w / "last.pt", w / "best.pt" # Hyperparameters if isinstance(hyp, str): - with open(hyp, errors='ignore') as f: + with open(hyp, errors="ignore") as f: hyp = yaml.safe_load(f) # load hyps dict - LOGGER.info(colorstr('hyperparameters: ') + ', '.join(f'{k}={v}' for k, v in hyp.items())) + LOGGER.info(colorstr("hyperparameters: ") + ", ".join(f"{k}={v}" for k, v in hyp.items())) opt.hyp = hyp.copy() # for saving hyps to checkpoints # Save run settings if not evolve: - yaml_save(save_dir / 'hyp.yaml', hyp) - yaml_save(save_dir / 'opt.yaml', vars(opt)) + yaml_save(save_dir / "hyp.yaml", hyp) + yaml_save(save_dir / "opt.yaml", vars(opt)) # Loggers data_dict = None @@ -97,39 +154,39 @@ def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictio # Config plots = not evolve and not opt.noplots # create plots overlap = not opt.no_overlap - cuda = device.type != 'cpu' + cuda = device.type != "cpu" init_seeds(opt.seed + 1 + RANK, deterministic=True) with torch_distributed_zero_first(LOCAL_RANK): data_dict = data_dict or check_dataset(data) # check if None - train_path, val_path = data_dict['train'], data_dict['val'] - nc = 1 if single_cls else int(data_dict['nc']) # number of classes - names = {0: 'item'} if single_cls and len(data_dict['names']) != 1 else data_dict['names'] # class names - is_coco = isinstance(val_path, str) and val_path.endswith('coco/val2017.txt') # COCO dataset + train_path, val_path = data_dict["train"], data_dict["val"] + nc = 1 if single_cls else int(data_dict["nc"]) # number of classes + names = {0: "item"} if single_cls and len(data_dict["names"]) != 1 else data_dict["names"] # class names + is_coco = isinstance(val_path, str) and val_path.endswith("coco/val2017.txt") # COCO dataset # Model - check_suffix(weights, '.pt') # check weights - pretrained = weights.endswith('.pt') + check_suffix(weights, ".pt") # check weights + pretrained = weights.endswith(".pt") if pretrained: with torch_distributed_zero_first(LOCAL_RANK): weights = attempt_download(weights) # download if not found locally - ckpt = torch.load(weights, map_location='cpu') # load checkpoint to CPU to avoid CUDA memory leak - model = SegmentationModel(cfg or ckpt['model'].yaml, ch=3, nc=nc, anchors=hyp.get('anchors')).to(device) - exclude = ['anchor'] if (cfg or hyp.get('anchors')) and not resume else [] # exclude keys - csd = ckpt['model'].float().state_dict() # checkpoint state_dict as FP32 + ckpt = torch.load(weights, map_location="cpu") # load checkpoint to CPU to avoid CUDA memory leak + model = SegmentationModel(cfg or ckpt["model"].yaml, ch=3, nc=nc, anchors=hyp.get("anchors")).to(device) + exclude = ["anchor"] if (cfg or hyp.get("anchors")) and not resume else [] # exclude keys + csd = ckpt["model"].float().state_dict() # checkpoint state_dict as FP32 csd = intersect_dicts(csd, model.state_dict(), exclude=exclude) # intersect model.load_state_dict(csd, strict=False) # load - LOGGER.info(f'Transferred {len(csd)}/{len(model.state_dict())} items from {weights}') # report + LOGGER.info(f"Transferred {len(csd)}/{len(model.state_dict())} items from {weights}") # report else: - model = SegmentationModel(cfg, ch=3, nc=nc, anchors=hyp.get('anchors')).to(device) # create + model = SegmentationModel(cfg, ch=3, nc=nc, anchors=hyp.get("anchors")).to(device) # create amp = check_amp(model) # check AMP # Freeze - freeze = [f'model.{x}.' for x in (freeze if len(freeze) > 1 else range(freeze[0]))] # layers to freeze + freeze = [f"model.{x}." for x in (freeze if len(freeze) > 1 else range(freeze[0]))] # layers to freeze for k, v in model.named_parameters(): v.requires_grad = True # train all layers # v.register_hook(lambda x: torch.nan_to_num(x)) # NaN to 0 (commented for erratic training results) if any(x in k for x in freeze): - LOGGER.info(f'freezing {k}') + LOGGER.info(f"freezing {k}") v.requires_grad = False # Image size @@ -139,20 +196,20 @@ def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictio # Batch size if RANK == -1 and batch_size == -1: # single-GPU only, estimate best batch size batch_size = check_train_batch_size(model, imgsz, amp) - logger.update_params({'batch_size': batch_size}) + logger.update_params({"batch_size": batch_size}) # loggers.on_params_update({"batch_size": batch_size}) # Optimizer nbs = 64 # nominal batch size accumulate = max(round(nbs / batch_size), 1) # accumulate loss before optimizing - hyp['weight_decay'] *= batch_size * accumulate / nbs # scale weight_decay - optimizer = smart_optimizer(model, opt.optimizer, hyp['lr0'], hyp['momentum'], hyp['weight_decay']) + hyp["weight_decay"] *= batch_size * accumulate / nbs # scale weight_decay + optimizer = smart_optimizer(model, opt.optimizer, hyp["lr0"], hyp["momentum"], hyp["weight_decay"]) # Scheduler if opt.cos_lr: - lf = one_cycle(1, hyp['lrf'], epochs) # cosine 1->hyp['lrf'] + lf = one_cycle(1, hyp["lrf"], epochs) # cosine 1->hyp['lrf'] else: - lf = lambda x: (1 - x / epochs) * (1.0 - hyp['lrf']) + hyp['lrf'] # linear + lf = lambda x: (1 - x / epochs) * (1.0 - hyp["lrf"]) + hyp["lrf"] # linear scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf) # plot_lr_scheduler(optimizer, scheduler, epochs) # EMA @@ -168,15 +225,15 @@ def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictio # DP mode if cuda and RANK == -1 and torch.cuda.device_count() > 1: LOGGER.warning( - 'WARNING ⚠️ DP not recommended, use torch.distributed.run for best DDP Multi-GPU results.\n' - 'See Multi-GPU Tutorial at https://docs.ultralytics.com/yolov5/tutorials/multi_gpu_training to get started.' + "WARNING ⚠️ DP not recommended, use torch.distributed.run for best DDP Multi-GPU results.\n" + "See Multi-GPU Tutorial at https://docs.ultralytics.com/yolov5/tutorials/multi_gpu_training to get started." ) model = torch.nn.DataParallel(model) # SyncBatchNorm if opt.sync_bn and cuda and RANK != -1: model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model).to(device) - LOGGER.info('Using SyncBatchNorm()') + LOGGER.info("Using SyncBatchNorm()") # Trainloader train_loader, dataset = create_dataloader( @@ -187,41 +244,43 @@ def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictio single_cls, hyp=hyp, augment=True, - cache=None if opt.cache == 'val' else opt.cache, + cache=None if opt.cache == "val" else opt.cache, rect=opt.rect, rank=LOCAL_RANK, workers=workers, image_weights=opt.image_weights, quad=opt.quad, - prefix=colorstr('train: '), + prefix=colorstr("train: "), shuffle=True, mask_downsample_ratio=mask_ratio, overlap_mask=overlap, ) labels = np.concatenate(dataset.labels, 0) mlc = int(labels[:, 0].max()) # max label class - assert mlc < nc, f'Label class {mlc} exceeds nc={nc} in {data}. Possible class labels are 0-{nc - 1}' + assert mlc < nc, f"Label class {mlc} exceeds nc={nc} in {data}. Possible class labels are 0-{nc - 1}" # Process 0 if RANK in {-1, 0}: - val_loader = create_dataloader(val_path, - imgsz, - batch_size // WORLD_SIZE * 2, - gs, - single_cls, - hyp=hyp, - cache=None if noval else opt.cache, - rect=True, - rank=-1, - workers=workers * 2, - pad=0.5, - mask_downsample_ratio=mask_ratio, - overlap_mask=overlap, - prefix=colorstr('val: '))[0] + val_loader = create_dataloader( + val_path, + imgsz, + batch_size // WORLD_SIZE * 2, + gs, + single_cls, + hyp=hyp, + cache=None if noval else opt.cache, + rect=True, + rank=-1, + workers=workers * 2, + pad=0.5, + mask_downsample_ratio=mask_ratio, + overlap_mask=overlap, + prefix=colorstr("val: "), + )[0] if not resume: if not opt.noautoanchor: - check_anchors(dataset, model=model, thr=hyp['anchor_t'], imgsz=imgsz) # run AutoAnchor + check_anchors(dataset, model=model, thr=hyp["anchor_t"], imgsz=imgsz) # run AutoAnchor model.half().float() # pre-reduce anchor precision if plots: @@ -234,10 +293,10 @@ def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictio # Model attributes nl = de_parallel(model).model[-1].nl # number of detection layers (to scale hyps) - hyp['box'] *= 3 / nl # scale to layers - hyp['cls'] *= nc / 80 * 3 / nl # scale to classes and layers - hyp['obj'] *= (imgsz / 640) ** 2 * 3 / nl # scale to image size and layers - hyp['label_smoothing'] = opt.label_smoothing + hyp["box"] *= 3 / nl # scale to layers + hyp["cls"] *= nc / 80 * 3 / nl # scale to classes and layers + hyp["obj"] *= (imgsz / 640) ** 2 * 3 / nl # scale to image size and layers + hyp["label_smoothing"] = opt.label_smoothing model.nc = nc # attach number of classes to model model.hyp = hyp # attach hyperparameters to model model.class_weights = labels_to_class_weights(dataset.labels, nc).to(device) * nc # attach class weights @@ -246,7 +305,7 @@ def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictio # Start training t0 = time.time() nb = len(train_loader) # number of batches - nw = max(round(hyp['warmup_epochs'] * nb), 100) # number of warmup iterations, max(3 epochs, 100 iterations) + nw = max(round(hyp["warmup_epochs"] * nb), 100) # number of warmup iterations, max(3 epochs, 100 iterations) # nw = min(nw, (epochs - start_epoch) / 2 * nb) # limit warmup to < 1/2 of training last_opt_step = -1 maps = np.zeros(nc) # mAP per class @@ -256,10 +315,12 @@ def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictio stopper, stop = EarlyStopping(patience=opt.patience), False compute_loss = ComputeLoss(model, overlap=overlap) # init loss class # callbacks.run('on_train_start') - LOGGER.info(f'Image sizes {imgsz} train, {imgsz} val\n' - f'Using {train_loader.num_workers * WORLD_SIZE} dataloader workers\n' - f"Logging results to {colorstr('bold', save_dir)}\n" - f'Starting training for {epochs} epochs...') + LOGGER.info( + f'Image sizes {imgsz} train, {imgsz} val\n' + f'Using {train_loader.num_workers * WORLD_SIZE} dataloader workers\n' + f"Logging results to {colorstr('bold', save_dir)}\n" + f'Starting training for {epochs} epochs...' + ) for epoch in range(start_epoch, epochs): # epoch ------------------------------------------------------------------ # callbacks.run('on_train_epoch_start') model.train() @@ -278,8 +339,10 @@ def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictio if RANK != -1: train_loader.sampler.set_epoch(epoch) pbar = enumerate(train_loader) - LOGGER.info(('\n' + '%11s' * 8) % - ('Epoch', 'GPU_mem', 'box_loss', 'seg_loss', 'obj_loss', 'cls_loss', 'Instances', 'Size')) + LOGGER.info( + ("\n" + "%11s" * 8) + % ("Epoch", "GPU_mem", "box_loss", "seg_loss", "obj_loss", "cls_loss", "Instances", "Size") + ) if RANK in {-1, 0}: pbar = tqdm(pbar, total=nb, bar_format=TQDM_BAR_FORMAT) # progress bar optimizer.zero_grad() @@ -295,9 +358,9 @@ def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictio accumulate = max(1, np.interp(ni, xi, [1, nbs / batch_size]).round()) for j, x in enumerate(optimizer.param_groups): # bias lr falls from 0.1 to lr0, all other lrs rise from 0.0 to lr0 - x['lr'] = np.interp(ni, xi, [hyp['warmup_bias_lr'] if j == 0 else 0.0, x['initial_lr'] * lf(epoch)]) - if 'momentum' in x: - x['momentum'] = np.interp(ni, xi, [hyp['warmup_momentum'], hyp['momentum']]) + x["lr"] = np.interp(ni, xi, [hyp["warmup_bias_lr"] if j == 0 else 0.0, x["initial_lr"] * lf(epoch)]) + if "momentum" in x: + x["momentum"] = np.interp(ni, xi, [hyp["warmup_momentum"], hyp["momentum"]]) # Multi-scale if opt.multi_scale: @@ -305,7 +368,7 @@ def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictio sf = sz / max(imgs.shape[2:]) # scale factor if sf != 1: ns = [math.ceil(x * sf / gs) * gs for x in imgs.shape[2:]] # new shape (stretched to gs-multiple) - imgs = nn.functional.interpolate(imgs, size=ns, mode='bilinear', align_corners=False) + imgs = nn.functional.interpolate(imgs, size=ns, mode="bilinear", align_corners=False) # Forward with torch.cuda.amp.autocast(amp): @@ -314,7 +377,7 @@ def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictio if RANK != -1: loss *= WORLD_SIZE # gradient averaged between devices in DDP mode if opt.quad: - loss *= 4. + loss *= 4.0 # Backward scaler.scale(loss).backward() @@ -333,9 +396,11 @@ def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictio # Log if RANK in {-1, 0}: mloss = (mloss * i + loss_items) / (i + 1) # update mean losses - mem = f'{torch.cuda.memory_reserved() / 1E9 if torch.cuda.is_available() else 0:.3g}G' # (GB) - pbar.set_description(('%11s' * 2 + '%11.4g' * 6) % - (f'{epoch}/{epochs - 1}', mem, *mloss, targets.shape[0], imgs.shape[-1])) + mem = f"{torch.cuda.memory_reserved() / 1E9 if torch.cuda.is_available() else 0:.3g}G" # (GB) + pbar.set_description( + ("%11s" * 2 + "%11.4g" * 6) + % (f"{epoch}/{epochs - 1}", mem, *mloss, targets.shape[0], imgs.shape[-1]) + ) # callbacks.run('on_train_batch_end', model, ni, imgs, targets, paths) # if callbacks.stop_training: # return @@ -343,35 +408,37 @@ def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictio # Mosaic plots if plots: if ni < 3: - plot_images_and_masks(imgs, targets, masks, paths, save_dir / f'train_batch{ni}.jpg') + plot_images_and_masks(imgs, targets, masks, paths, save_dir / f"train_batch{ni}.jpg") if ni == 10: - files = sorted(save_dir.glob('train*.jpg')) - logger.log_images(files, 'Mosaics', epoch) + files = sorted(save_dir.glob("train*.jpg")) + logger.log_images(files, "Mosaics", epoch) # end batch ------------------------------------------------------------------------------------------------ # Scheduler - lr = [x['lr'] for x in optimizer.param_groups] # for loggers + lr = [x["lr"] for x in optimizer.param_groups] # for loggers scheduler.step() if RANK in {-1, 0}: # mAP # callbacks.run('on_train_epoch_end', epoch=epoch) - ema.update_attr(model, include=['yaml', 'nc', 'hyp', 'names', 'stride', 'class_weights']) + ema.update_attr(model, include=["yaml", "nc", "hyp", "names", "stride", "class_weights"]) final_epoch = (epoch + 1 == epochs) or stopper.possible_stop if not noval or final_epoch: # Calculate mAP - results, maps, _ = validate.run(data_dict, - batch_size=batch_size // WORLD_SIZE * 2, - imgsz=imgsz, - half=amp, - model=ema.ema, - single_cls=single_cls, - dataloader=val_loader, - save_dir=save_dir, - plots=False, - callbacks=callbacks, - compute_loss=compute_loss, - mask_downsample_ratio=mask_ratio, - overlap=overlap) + results, maps, _ = validate.run( + data_dict, + batch_size=batch_size // WORLD_SIZE * 2, + imgsz=imgsz, + half=amp, + model=ema.ema, + single_cls=single_cls, + dataloader=val_loader, + save_dir=save_dir, + plots=False, + callbacks=callbacks, + compute_loss=compute_loss, + mask_downsample_ratio=mask_ratio, + overlap=overlap, + ) # Update best mAP fi = fitness(np.array(results).reshape(1, -1)) # weighted combination of [P, R, mAP@.5, mAP@.5-.95] @@ -387,23 +454,24 @@ def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictio # Save model if (not nosave) or (final_epoch and not evolve): # if save ckpt = { - 'epoch': epoch, - 'best_fitness': best_fitness, - 'model': deepcopy(de_parallel(model)).half(), - 'ema': deepcopy(ema.ema).half(), - 'updates': ema.updates, - 'optimizer': optimizer.state_dict(), - 'opt': vars(opt), - 'git': GIT_INFO, # {remote, branch, commit} if a git repo - 'date': datetime.now().isoformat()} + "epoch": epoch, + "best_fitness": best_fitness, + "model": deepcopy(de_parallel(model)).half(), + "ema": deepcopy(ema.ema).half(), + "updates": ema.updates, + "optimizer": optimizer.state_dict(), + "opt": vars(opt), + "git": GIT_INFO, # {remote, branch, commit} if a git repo + "date": datetime.now().isoformat(), + } # Save last, best and delete torch.save(ckpt, last) if best_fitness == fi: torch.save(ckpt, best) if opt.save_period > 0 and epoch % opt.save_period == 0: - torch.save(ckpt, w / f'epoch{epoch}.pt') - logger.log_model(w / f'epoch{epoch}.pt') + torch.save(ckpt, w / f"epoch{epoch}.pt") + logger.log_model(w / f"epoch{epoch}.pt") del ckpt # callbacks.run('on_model_save', last, epoch, final_epoch, best_fitness, fi) @@ -419,12 +487,12 @@ def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictio # end epoch ---------------------------------------------------------------------------------------------------- # end training ----------------------------------------------------------------------------------------------------- if RANK in {-1, 0}: - LOGGER.info(f'\n{epoch - start_epoch + 1} epochs completed in {(time.time() - t0) / 3600:.3f} hours.') + LOGGER.info(f"\n{epoch - start_epoch + 1} epochs completed in {(time.time() - t0) / 3600:.3f} hours.") for f in last, best: if f.exists(): strip_optimizer(f) # strip optimizers if f is best: - LOGGER.info(f'\nValidating {f}...') + LOGGER.info(f"\nValidating {f}...") results, _, _ = validate.run( data_dict, batch_size=batch_size // WORLD_SIZE * 2, @@ -440,7 +508,8 @@ def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictio callbacks=callbacks, compute_loss=compute_loss, mask_downsample_ratio=mask_ratio, - overlap=overlap) # val best model with plots + overlap=overlap, + ) # val best model with plots if is_coco: # callbacks.run('on_fit_epoch_end', list(mloss) + list(results) + lr, epoch, best_fitness, fi) metrics_dict = dict(zip(KEYS, list(mloss) + list(results) + lr)) @@ -452,56 +521,56 @@ def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictio if not opt.evolve: logger.log_model(best, epoch) if plots: - plot_results_with_masks(file=save_dir / 'results.csv') # save results.png - files = ['results.png', 'confusion_matrix.png', *(f'{x}_curve.png' for x in ('F1', 'PR', 'P', 'R'))] + plot_results_with_masks(file=save_dir / "results.csv") # save results.png + files = ["results.png", "confusion_matrix.png", *(f"{x}_curve.png" for x in ("F1", "PR", "P", "R"))] files = [(save_dir / f) for f in files if (save_dir / f).exists()] # filter LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}") - logger.log_images(files, 'Results', epoch + 1) - logger.log_images(sorted(save_dir.glob('val*.jpg')), 'Validation', epoch + 1) + logger.log_images(files, "Results", epoch + 1) + logger.log_images(sorted(save_dir.glob("val*.jpg")), "Validation", epoch + 1) torch.cuda.empty_cache() return results def parse_opt(known=False): parser = argparse.ArgumentParser() - parser.add_argument('--weights', type=str, default=ROOT / 'yolov5s-seg.pt', help='initial weights path') - parser.add_argument('--cfg', type=str, default='', help='model.yaml path') - parser.add_argument('--data', type=str, default=ROOT / 'data/coco128-seg.yaml', help='dataset.yaml path') - parser.add_argument('--hyp', type=str, default=ROOT / 'data/hyps/hyp.scratch-low.yaml', help='hyperparameters path') - parser.add_argument('--epochs', type=int, default=100, help='total training epochs') - parser.add_argument('--batch-size', type=int, default=16, help='total batch size for all GPUs, -1 for autobatch') - parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=640, help='train, val image size (pixels)') - parser.add_argument('--rect', action='store_true', help='rectangular training') - parser.add_argument('--resume', nargs='?', const=True, default=False, help='resume most recent training') - parser.add_argument('--nosave', action='store_true', help='only save final checkpoint') - parser.add_argument('--noval', action='store_true', help='only validate final epoch') - parser.add_argument('--noautoanchor', action='store_true', help='disable AutoAnchor') - parser.add_argument('--noplots', action='store_true', help='save no plot files') - parser.add_argument('--evolve', type=int, nargs='?', const=300, help='evolve hyperparameters for x generations') - parser.add_argument('--bucket', type=str, default='', help='gsutil bucket') - parser.add_argument('--cache', type=str, nargs='?', const='ram', help='image --cache ram/disk') - parser.add_argument('--image-weights', action='store_true', help='use weighted image selection for training') - parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') - parser.add_argument('--multi-scale', action='store_true', help='vary img-size +/- 50%%') - parser.add_argument('--single-cls', action='store_true', help='train multi-class data as single-class') - parser.add_argument('--optimizer', type=str, choices=['SGD', 'Adam', 'AdamW'], default='SGD', help='optimizer') - parser.add_argument('--sync-bn', action='store_true', help='use SyncBatchNorm, only available in DDP mode') - parser.add_argument('--workers', type=int, default=8, help='max dataloader workers (per RANK in DDP mode)') - parser.add_argument('--project', default=ROOT / 'runs/train-seg', help='save to project/name') - parser.add_argument('--name', default='exp', help='save to project/name') - parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment') - parser.add_argument('--quad', action='store_true', help='quad dataloader') - parser.add_argument('--cos-lr', action='store_true', help='cosine LR scheduler') - parser.add_argument('--label-smoothing', type=float, default=0.0, help='Label smoothing epsilon') - parser.add_argument('--patience', type=int, default=100, help='EarlyStopping patience (epochs without improvement)') - parser.add_argument('--freeze', nargs='+', type=int, default=[0], help='Freeze layers: backbone=10, first3=0 1 2') - parser.add_argument('--save-period', type=int, default=-1, help='Save checkpoint every x epochs (disabled if < 1)') - parser.add_argument('--seed', type=int, default=0, help='Global training seed') - parser.add_argument('--local_rank', type=int, default=-1, help='Automatic DDP Multi-GPU argument, do not modify') + parser.add_argument("--weights", type=str, default=ROOT / "yolov5s-seg.pt", help="initial weights path") + parser.add_argument("--cfg", type=str, default="", help="model.yaml path") + parser.add_argument("--data", type=str, default=ROOT / "data/coco128-seg.yaml", help="dataset.yaml path") + parser.add_argument("--hyp", type=str, default=ROOT / "data/hyps/hyp.scratch-low.yaml", help="hyperparameters path") + parser.add_argument("--epochs", type=int, default=100, help="total training epochs") + parser.add_argument("--batch-size", type=int, default=16, help="total batch size for all GPUs, -1 for autobatch") + parser.add_argument("--imgsz", "--img", "--img-size", type=int, default=640, help="train, val image size (pixels)") + parser.add_argument("--rect", action="store_true", help="rectangular training") + parser.add_argument("--resume", nargs="?", const=True, default=False, help="resume most recent training") + parser.add_argument("--nosave", action="store_true", help="only save final checkpoint") + parser.add_argument("--noval", action="store_true", help="only validate final epoch") + parser.add_argument("--noautoanchor", action="store_true", help="disable AutoAnchor") + parser.add_argument("--noplots", action="store_true", help="save no plot files") + parser.add_argument("--evolve", type=int, nargs="?", const=300, help="evolve hyperparameters for x generations") + parser.add_argument("--bucket", type=str, default="", help="gsutil bucket") + parser.add_argument("--cache", type=str, nargs="?", const="ram", help="image --cache ram/disk") + parser.add_argument("--image-weights", action="store_true", help="use weighted image selection for training") + parser.add_argument("--device", default="", help="cuda device, i.e. 0 or 0,1,2,3 or cpu") + parser.add_argument("--multi-scale", action="store_true", help="vary img-size +/- 50%%") + parser.add_argument("--single-cls", action="store_true", help="train multi-class data as single-class") + parser.add_argument("--optimizer", type=str, choices=["SGD", "Adam", "AdamW"], default="SGD", help="optimizer") + parser.add_argument("--sync-bn", action="store_true", help="use SyncBatchNorm, only available in DDP mode") + parser.add_argument("--workers", type=int, default=8, help="max dataloader workers (per RANK in DDP mode)") + parser.add_argument("--project", default=ROOT / "runs/train-seg", help="save to project/name") + parser.add_argument("--name", default="exp", help="save to project/name") + parser.add_argument("--exist-ok", action="store_true", help="existing project/name ok, do not increment") + parser.add_argument("--quad", action="store_true", help="quad dataloader") + parser.add_argument("--cos-lr", action="store_true", help="cosine LR scheduler") + parser.add_argument("--label-smoothing", type=float, default=0.0, help="Label smoothing epsilon") + parser.add_argument("--patience", type=int, default=100, help="EarlyStopping patience (epochs without improvement)") + parser.add_argument("--freeze", nargs="+", type=int, default=[0], help="Freeze layers: backbone=10, first3=0 1 2") + parser.add_argument("--save-period", type=int, default=-1, help="Save checkpoint every x epochs (disabled if < 1)") + parser.add_argument("--seed", type=int, default=0, help="Global training seed") + parser.add_argument("--local_rank", type=int, default=-1, help="Automatic DDP Multi-GPU argument, do not modify") # Instance Segmentation Args - parser.add_argument('--mask-ratio', type=int, default=4, help='Downsample the truth masks to saving memory') - parser.add_argument('--no-overlap', action='store_true', help='Overlap masks train faster at slightly less mAP') + parser.add_argument("--mask-ratio", type=int, default=4, help="Downsample the truth masks to saving memory") + parser.add_argument("--no-overlap", action="store_true", help="Overlap masks train faster at slightly less mAP") return parser.parse_known_args()[0] if known else parser.parse_args() @@ -511,46 +580,51 @@ def main(opt, callbacks=Callbacks()): if RANK in {-1, 0}: print_args(vars(opt)) check_git_status() - check_requirements(ROOT / 'requirements.txt') + check_requirements(ROOT / "requirements.txt") # Resume if opt.resume and not opt.evolve: # resume from specified or most recent last.pt last = Path(check_file(opt.resume) if isinstance(opt.resume, str) else get_latest_run()) - opt_yaml = last.parent.parent / 'opt.yaml' # train options yaml + opt_yaml = last.parent.parent / "opt.yaml" # train options yaml opt_data = opt.data # original dataset if opt_yaml.is_file(): - with open(opt_yaml, errors='ignore') as f: + with open(opt_yaml, errors="ignore") as f: d = yaml.safe_load(f) else: - d = torch.load(last, map_location='cpu')['opt'] + d = torch.load(last, map_location="cpu")["opt"] opt = argparse.Namespace(**d) # replace - opt.cfg, opt.weights, opt.resume = '', str(last), True # reinstate + opt.cfg, opt.weights, opt.resume = "", str(last), True # reinstate if is_url(opt_data): opt.data = check_file(opt_data) # avoid HUB resume auth timeout else: - opt.data, opt.cfg, opt.hyp, opt.weights, opt.project = \ - check_file(opt.data), check_yaml(opt.cfg), check_yaml(opt.hyp), str(opt.weights), str(opt.project) # checks - assert len(opt.cfg) or len(opt.weights), 'either --cfg or --weights must be specified' + opt.data, opt.cfg, opt.hyp, opt.weights, opt.project = ( + check_file(opt.data), + check_yaml(opt.cfg), + check_yaml(opt.hyp), + str(opt.weights), + str(opt.project), + ) # checks + assert len(opt.cfg) or len(opt.weights), "either --cfg or --weights must be specified" if opt.evolve: - if opt.project == str(ROOT / 'runs/train-seg'): # if default project name, rename to runs/evolve-seg - opt.project = str(ROOT / 'runs/evolve-seg') + if opt.project == str(ROOT / "runs/train-seg"): # if default project name, rename to runs/evolve-seg + opt.project = str(ROOT / "runs/evolve-seg") opt.exist_ok, opt.resume = opt.resume, False # pass resume to exist_ok and disable resume - if opt.name == 'cfg': + if opt.name == "cfg": opt.name = Path(opt.cfg).stem # use model.yaml as name opt.save_dir = str(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok)) # DDP mode device = select_device(opt.device, batch_size=opt.batch_size) if LOCAL_RANK != -1: - msg = 'is not compatible with YOLOv5 Multi-GPU DDP training' - assert not opt.image_weights, f'--image-weights {msg}' - assert not opt.evolve, f'--evolve {msg}' - assert opt.batch_size != -1, f'AutoBatch with --batch-size -1 {msg}, please pass a valid --batch-size' - assert opt.batch_size % WORLD_SIZE == 0, f'--batch-size {opt.batch_size} must be multiple of WORLD_SIZE' - assert torch.cuda.device_count() > LOCAL_RANK, 'insufficient CUDA devices for DDP command' + msg = "is not compatible with YOLOv5 Multi-GPU DDP training" + assert not opt.image_weights, f"--image-weights {msg}" + assert not opt.evolve, f"--evolve {msg}" + assert opt.batch_size != -1, f"AutoBatch with --batch-size -1 {msg}, please pass a valid --batch-size" + assert opt.batch_size % WORLD_SIZE == 0, f"--batch-size {opt.batch_size} must be multiple of WORLD_SIZE" + assert torch.cuda.device_count() > LOCAL_RANK, "insufficient CUDA devices for DDP command" torch.cuda.set_device(LOCAL_RANK) - device = torch.device('cuda', LOCAL_RANK) - dist.init_process_group(backend='nccl' if dist.is_nccl_available() else 'gloo') + device = torch.device("cuda", LOCAL_RANK) + dist.init_process_group(backend="nccl" if dist.is_nccl_available() else "gloo") # Train if not opt.evolve: @@ -560,65 +634,69 @@ def main(opt, callbacks=Callbacks()): else: # Hyperparameter evolution metadata (mutation scale 0-1, lower_limit, upper_limit) meta = { - 'lr0': (1, 1e-5, 1e-1), # initial learning rate (SGD=1E-2, Adam=1E-3) - 'lrf': (1, 0.01, 1.0), # final OneCycleLR learning rate (lr0 * lrf) - 'momentum': (0.3, 0.6, 0.98), # SGD momentum/Adam beta1 - 'weight_decay': (1, 0.0, 0.001), # optimizer weight decay - 'warmup_epochs': (1, 0.0, 5.0), # warmup epochs (fractions ok) - 'warmup_momentum': (1, 0.0, 0.95), # warmup initial momentum - 'warmup_bias_lr': (1, 0.0, 0.2), # warmup initial bias lr - 'box': (1, 0.02, 0.2), # box loss gain - 'cls': (1, 0.2, 4.0), # cls loss gain - 'cls_pw': (1, 0.5, 2.0), # cls BCELoss positive_weight - 'obj': (1, 0.2, 4.0), # obj loss gain (scale with pixels) - 'obj_pw': (1, 0.5, 2.0), # obj BCELoss positive_weight - 'iou_t': (0, 0.1, 0.7), # IoU training threshold - 'anchor_t': (1, 2.0, 8.0), # anchor-multiple threshold - 'anchors': (2, 2.0, 10.0), # anchors per output grid (0 to ignore) - 'fl_gamma': (0, 0.0, 2.0), # focal loss gamma (efficientDet default gamma=1.5) - 'hsv_h': (1, 0.0, 0.1), # image HSV-Hue augmentation (fraction) - 'hsv_s': (1, 0.0, 0.9), # image HSV-Saturation augmentation (fraction) - 'hsv_v': (1, 0.0, 0.9), # image HSV-Value augmentation (fraction) - 'degrees': (1, 0.0, 45.0), # image rotation (+/- deg) - 'translate': (1, 0.0, 0.9), # image translation (+/- fraction) - 'scale': (1, 0.0, 0.9), # image scale (+/- gain) - 'shear': (1, 0.0, 10.0), # image shear (+/- deg) - 'perspective': (0, 0.0, 0.001), # image perspective (+/- fraction), range 0-0.001 - 'flipud': (1, 0.0, 1.0), # image flip up-down (probability) - 'fliplr': (0, 0.0, 1.0), # image flip left-right (probability) - 'mosaic': (1, 0.0, 1.0), # image mixup (probability) - 'mixup': (1, 0.0, 1.0), # image mixup (probability) - 'copy_paste': (1, 0.0, 1.0)} # segment copy-paste (probability) - - with open(opt.hyp, errors='ignore') as f: + "lr0": (1, 1e-5, 1e-1), # initial learning rate (SGD=1E-2, Adam=1E-3) + "lrf": (1, 0.01, 1.0), # final OneCycleLR learning rate (lr0 * lrf) + "momentum": (0.3, 0.6, 0.98), # SGD momentum/Adam beta1 + "weight_decay": (1, 0.0, 0.001), # optimizer weight decay + "warmup_epochs": (1, 0.0, 5.0), # warmup epochs (fractions ok) + "warmup_momentum": (1, 0.0, 0.95), # warmup initial momentum + "warmup_bias_lr": (1, 0.0, 0.2), # warmup initial bias lr + "box": (1, 0.02, 0.2), # box loss gain + "cls": (1, 0.2, 4.0), # cls loss gain + "cls_pw": (1, 0.5, 2.0), # cls BCELoss positive_weight + "obj": (1, 0.2, 4.0), # obj loss gain (scale with pixels) + "obj_pw": (1, 0.5, 2.0), # obj BCELoss positive_weight + "iou_t": (0, 0.1, 0.7), # IoU training threshold + "anchor_t": (1, 2.0, 8.0), # anchor-multiple threshold + "anchors": (2, 2.0, 10.0), # anchors per output grid (0 to ignore) + "fl_gamma": (0, 0.0, 2.0), # focal loss gamma (efficientDet default gamma=1.5) + "hsv_h": (1, 0.0, 0.1), # image HSV-Hue augmentation (fraction) + "hsv_s": (1, 0.0, 0.9), # image HSV-Saturation augmentation (fraction) + "hsv_v": (1, 0.0, 0.9), # image HSV-Value augmentation (fraction) + "degrees": (1, 0.0, 45.0), # image rotation (+/- deg) + "translate": (1, 0.0, 0.9), # image translation (+/- fraction) + "scale": (1, 0.0, 0.9), # image scale (+/- gain) + "shear": (1, 0.0, 10.0), # image shear (+/- deg) + "perspective": (0, 0.0, 0.001), # image perspective (+/- fraction), range 0-0.001 + "flipud": (1, 0.0, 1.0), # image flip up-down (probability) + "fliplr": (0, 0.0, 1.0), # image flip left-right (probability) + "mosaic": (1, 0.0, 1.0), # image mixup (probability) + "mixup": (1, 0.0, 1.0), # image mixup (probability) + "copy_paste": (1, 0.0, 1.0), + } # segment copy-paste (probability) + + with open(opt.hyp, errors="ignore") as f: hyp = yaml.safe_load(f) # load hyps dict - if 'anchors' not in hyp: # anchors commented in hyp.yaml - hyp['anchors'] = 3 + if "anchors" not in hyp: # anchors commented in hyp.yaml + hyp["anchors"] = 3 if opt.noautoanchor: - del hyp['anchors'], meta['anchors'] + del hyp["anchors"], meta["anchors"] opt.noval, opt.nosave, save_dir = True, True, Path(opt.save_dir) # only val/save final epoch # ei = [isinstance(x, (int, float)) for x in hyp.values()] # evolvable indices - evolve_yaml, evolve_csv = save_dir / 'hyp_evolve.yaml', save_dir / 'evolve.csv' + evolve_yaml, evolve_csv = save_dir / "hyp_evolve.yaml", save_dir / "evolve.csv" if opt.bucket: # download evolve.csv if exists - subprocess.run([ - 'gsutil', - 'cp', - f'gs://{opt.bucket}/evolve.csv', - str(evolve_csv), ]) + subprocess.run( + [ + "gsutil", + "cp", + f"gs://{opt.bucket}/evolve.csv", + str(evolve_csv), + ] + ) for _ in range(opt.evolve): # generations to evolve if evolve_csv.exists(): # if evolve.csv exists: select best hyps and mutate # Select parent(s) - parent = 'single' # parent selection method: 'single' or 'weighted' - x = np.loadtxt(evolve_csv, ndmin=2, delimiter=',', skiprows=1) + parent = "single" # parent selection method: 'single' or 'weighted' + x = np.loadtxt(evolve_csv, ndmin=2, delimiter=",", skiprows=1) n = min(5, len(x)) # number of previous results to consider x = x[np.argsort(-fitness(x))][:n] # top n mutations - w = fitness(x) - fitness(x).min() + 1E-6 # weights (sum > 0) - if parent == 'single' or len(x) == 1: + w = fitness(x) - fitness(x).min() + 1e-6 # weights (sum > 0) + if parent == "single" or len(x) == 1: # x = x[random.randint(0, n - 1)] # random selection x = x[random.choices(range(n), weights=w)[0]] # weighted selection - elif parent == 'weighted': + elif parent == "weighted": x = (x * w.reshape(n, 1)).sum(0) / w.sum() # weighted combination # Mutate @@ -647,9 +725,11 @@ def main(opt, callbacks=Callbacks()): # Plot results plot_evolve(evolve_csv) - LOGGER.info(f'Hyperparameter evolution finished {opt.evolve} generations\n' - f"Results saved to {colorstr('bold', save_dir)}\n" - f'Usage example: $ python train.py --hyp {evolve_yaml}') + LOGGER.info( + f'Hyperparameter evolution finished {opt.evolve} generations\n' + f"Results saved to {colorstr('bold', save_dir)}\n" + f'Usage example: $ python train.py --hyp {evolve_yaml}' + ) def run(**kwargs): @@ -661,6 +741,6 @@ def run(**kwargs): return opt -if __name__ == '__main__': +if __name__ == "__main__": opt = parse_opt() main(opt) diff --git a/segment/val.py b/segment/val.py index 304d0c751314..1e5159c710ed 100644 --- a/segment/val.py +++ b/segment/val.py @@ -1,6 +1,6 @@ # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license """ -Validate a trained YOLOv5 segment model on a segment dataset +Validate a trained YOLOv5 segment model on a segment dataset. Usage: $ bash data/scripts/get_coco.sh --val --segments # download COCO-segments val split (1G, 5000 images) @@ -43,9 +43,24 @@ from models.common import DetectMultiBackend from models.yolo import SegmentationModel from utils.callbacks import Callbacks -from utils.general import (LOGGER, NUM_THREADS, TQDM_BAR_FORMAT, Profile, check_dataset, check_img_size, - check_requirements, check_yaml, coco80_to_coco91_class, colorstr, increment_path, - non_max_suppression, print_args, scale_boxes, xywh2xyxy, xyxy2xywh) +from utils.general import ( + LOGGER, + NUM_THREADS, + TQDM_BAR_FORMAT, + Profile, + check_dataset, + check_img_size, + check_requirements, + check_yaml, + coco80_to_coco91_class, + colorstr, + increment_path, + non_max_suppression, + print_args, + scale_boxes, + xywh2xyxy, + xyxy2xywh, +) from utils.metrics import ConfusionMatrix, box_iou from utils.plots import output_to_target, plot_val_study from utils.segment.dataloaders import create_dataloader @@ -61,8 +76,8 @@ def save_one_txt(predn, save_conf, shape, file): for *xyxy, conf, cls in predn.tolist(): xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh line = (cls, *xywh, conf) if save_conf else (cls, *xywh) # label format - with open(file, 'a') as f: - f.write(('%g ' * len(line)).rstrip() % line + '\n') + with open(file, "a") as f: + f.write(("%g " * len(line)).rstrip() % line + "\n") def save_one_json(predn, jdict, path, class_map, pred_masks): @@ -70,8 +85,8 @@ def save_one_json(predn, jdict, path, class_map, pred_masks): from pycocotools.mask import encode def single_encode(x): - rle = encode(np.asarray(x[:, :, None], order='F', dtype='uint8'))[0] - rle['counts'] = rle['counts'].decode('utf-8') + rle = encode(np.asarray(x[:, :, None], order="F", dtype="uint8"))[0] + rle["counts"] = rle["counts"].decode("utf-8") return rle image_id = int(path.stem) if path.stem.isnumeric() else path.stem @@ -81,12 +96,15 @@ def single_encode(x): with ThreadPool(NUM_THREADS) as pool: rles = pool.map(single_encode, pred_masks) for i, (p, b) in enumerate(zip(predn.tolist(), box.tolist())): - jdict.append({ - 'image_id': image_id, - 'category_id': class_map[int(p[5])], - 'bbox': [round(x, 3) for x in b], - 'score': round(p[4], 5), - 'segmentation': rles[i]}) + jdict.append( + { + "image_id": image_id, + "category_id": class_map[int(p[5])], + "bbox": [round(x, 3) for x in b], + "score": round(p[4], 5), + "segmentation": rles[i], + } + ) def process_batch(detections, labels, iouv, pred_masks=None, gt_masks=None, overlap=False, masks=False): @@ -105,7 +123,7 @@ def process_batch(detections, labels, iouv, pred_masks=None, gt_masks=None, over gt_masks = gt_masks.repeat(nl, 1, 1) # shape(1,640,640) -> (n,640,640) gt_masks = torch.where(gt_masks == index, 1.0, 0.0) if gt_masks.shape[1:] != pred_masks.shape[1:]: - gt_masks = F.interpolate(gt_masks[None], pred_masks.shape[1:], mode='bilinear', align_corners=False)[0] + gt_masks = F.interpolate(gt_masks[None], pred_masks.shape[1:], mode="bilinear", align_corners=False)[0] gt_masks = gt_masks.gt_(0.5) iou = mask_iou(gt_masks.view(gt_masks.shape[0], -1), pred_masks.view(pred_masks.shape[0], -1)) else: # boxes @@ -128,39 +146,39 @@ def process_batch(detections, labels, iouv, pred_masks=None, gt_masks=None, over @smart_inference_mode() def run( - data, - weights=None, # model.pt path(s) - batch_size=32, # batch size - imgsz=640, # inference size (pixels) - conf_thres=0.001, # confidence threshold - iou_thres=0.6, # NMS IoU threshold - max_det=300, # maximum detections per image - task='val', # train, val, test, speed or study - device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu - workers=8, # max dataloader workers (per RANK in DDP mode) - single_cls=False, # treat as single-class dataset - augment=False, # augmented inference - verbose=False, # verbose output - save_txt=False, # save results to *.txt - save_hybrid=False, # save label+prediction hybrid results to *.txt - save_conf=False, # save confidences in --save-txt labels - save_json=False, # save a COCO-JSON results file - project=ROOT / 'runs/val-seg', # save to project/name - name='exp', # save to project/name - exist_ok=False, # existing project/name ok, do not increment - half=True, # use FP16 half-precision inference - dnn=False, # use OpenCV DNN for ONNX inference - model=None, - dataloader=None, - save_dir=Path(''), - plots=True, - overlap=False, - mask_downsample_ratio=1, - compute_loss=None, - callbacks=Callbacks(), + data, + weights=None, # model.pt path(s) + batch_size=32, # batch size + imgsz=640, # inference size (pixels) + conf_thres=0.001, # confidence threshold + iou_thres=0.6, # NMS IoU threshold + max_det=300, # maximum detections per image + task="val", # train, val, test, speed or study + device="", # cuda device, i.e. 0 or 0,1,2,3 or cpu + workers=8, # max dataloader workers (per RANK in DDP mode) + single_cls=False, # treat as single-class dataset + augment=False, # augmented inference + verbose=False, # verbose output + save_txt=False, # save results to *.txt + save_hybrid=False, # save label+prediction hybrid results to *.txt + save_conf=False, # save confidences in --save-txt labels + save_json=False, # save a COCO-JSON results file + project=ROOT / "runs/val-seg", # save to project/name + name="exp", # save to project/name + exist_ok=False, # existing project/name ok, do not increment + half=True, # use FP16 half-precision inference + dnn=False, # use OpenCV DNN for ONNX inference + model=None, + dataloader=None, + save_dir=Path(""), + plots=True, + overlap=False, + mask_downsample_ratio=1, + compute_loss=None, + callbacks=Callbacks(), ): if save_json: - check_requirements('pycocotools>=2.0.6') + check_requirements("pycocotools>=2.0.6") process = process_mask_native # more accurate else: process = process_mask # faster @@ -169,7 +187,7 @@ def run( training = model is not None if training: # called by train.py device, pt, jit, engine = next(model.parameters()).device, True, False, False # get model device, PyTorch model - half &= device.type != 'cpu' # half precision only supported on CUDA + half &= device.type != "cpu" # half precision only supported on CUDA model.half() if half else model.float() nm = de_parallel(model).model[-1].nm # number of masks else: # called directly @@ -177,7 +195,7 @@ def run( # Directories save_dir = increment_path(Path(project) / name, exist_ok=exist_ok) # increment run - (save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir + (save_dir / "labels" if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir # Load model model = DetectMultiBackend(weights, device=device, dnn=dnn, data=data, fp16=half) @@ -191,16 +209,16 @@ def run( device = model.device if not (pt or jit): batch_size = 1 # export.py models default to batch-size 1 - LOGGER.info(f'Forcing --batch-size 1 square inference (1,3,{imgsz},{imgsz}) for non-PyTorch models') + LOGGER.info(f"Forcing --batch-size 1 square inference (1,3,{imgsz},{imgsz}) for non-PyTorch models") # Data data = check_dataset(data) # check # Configure model.eval() - cuda = device.type != 'cpu' - is_coco = isinstance(data.get('val'), str) and data['val'].endswith(f'coco{os.sep}val2017.txt') # COCO dataset - nc = 1 if single_cls else int(data['nc']) # number of classes + cuda = device.type != "cpu" + is_coco = isinstance(data.get("val"), str) and data["val"].endswith(f"coco{os.sep}val2017.txt") # COCO dataset + nc = 1 if single_cls else int(data["nc"]) # number of classes iouv = torch.linspace(0.5, 0.95, 10, device=device) # iou vector for mAP@0.5:0.95 niou = iouv.numel() @@ -208,31 +226,46 @@ def run( if not training: if pt and not single_cls: # check --weights are trained on --data ncm = model.model.nc - assert ncm == nc, f'{weights} ({ncm} classes) trained on different --data than what you passed ({nc} ' \ - f'classes). Pass correct combination of --weights and --data that are trained together.' + assert ncm == nc, ( + f"{weights} ({ncm} classes) trained on different --data than what you passed ({nc} " + f"classes). Pass correct combination of --weights and --data that are trained together." + ) model.warmup(imgsz=(1 if pt else batch_size, 3, imgsz, imgsz)) # warmup - pad, rect = (0.0, False) if task == 'speed' else (0.5, pt) # square inference for benchmarks - task = task if task in ('train', 'val', 'test') else 'val' # path to train/val/test images - dataloader = create_dataloader(data[task], - imgsz, - batch_size, - stride, - single_cls, - pad=pad, - rect=rect, - workers=workers, - prefix=colorstr(f'{task}: '), - overlap_mask=overlap, - mask_downsample_ratio=mask_downsample_ratio)[0] + pad, rect = (0.0, False) if task == "speed" else (0.5, pt) # square inference for benchmarks + task = task if task in ("train", "val", "test") else "val" # path to train/val/test images + dataloader = create_dataloader( + data[task], + imgsz, + batch_size, + stride, + single_cls, + pad=pad, + rect=rect, + workers=workers, + prefix=colorstr(f"{task}: "), + overlap_mask=overlap, + mask_downsample_ratio=mask_downsample_ratio, + )[0] seen = 0 confusion_matrix = ConfusionMatrix(nc=nc) - names = model.names if hasattr(model, 'names') else model.module.names # get class names + names = model.names if hasattr(model, "names") else model.module.names # get class names if isinstance(names, (list, tuple)): # old format names = dict(enumerate(names)) class_map = coco80_to_coco91_class() if is_coco else list(range(1000)) - s = ('%22s' + '%11s' * 10) % ('Class', 'Images', 'Instances', 'Box(P', 'R', 'mAP50', 'mAP50-95)', 'Mask(P', 'R', - 'mAP50', 'mAP50-95)') + s = ("%22s" + "%11s" * 10) % ( + "Class", + "Images", + "Instances", + "Box(P", + "R", + "mAP50", + "mAP50-95)", + "Mask(P", + "R", + "mAP50", + "mAP50-95)", + ) dt = Profile(device=device), Profile(device=device), Profile(device=device) metrics = Metrics() loss = torch.zeros(4, device=device) @@ -263,14 +296,9 @@ def run( targets[:, 2:] *= torch.tensor((width, height, width, height), device=device) # to pixels lb = [targets[targets[:, 0] == i, 1:] for i in range(nb)] if save_hybrid else [] # for autolabelling with dt[2]: - preds = non_max_suppression(preds, - conf_thres, - iou_thres, - labels=lb, - multi_label=True, - agnostic=single_cls, - max_det=max_det, - nm=nm) + preds = non_max_suppression( + preds, conf_thres, iou_thres, labels=lb, multi_label=True, agnostic=single_cls, max_det=max_det, nm=nm + ) # Metrics plot_masks = [] # masks for plotting @@ -317,10 +345,11 @@ def run( # Save/log if save_txt: - save_one_txt(predn, save_conf, shape, file=save_dir / 'labels' / f'{path.stem}.txt') + save_one_txt(predn, save_conf, shape, file=save_dir / "labels" / f"{path.stem}.txt") if save_json: - pred_masks = scale_image(im[si].shape[1:], - pred_masks.permute(1, 2, 0).contiguous().cpu().numpy(), shape, shapes[si][1]) + pred_masks = scale_image( + im[si].shape[1:], pred_masks.permute(1, 2, 0).contiguous().cpu().numpy(), shape, shapes[si][1] + ) save_one_json(predn, jdict, path, class_map, pred_masks) # append to COCO-JSON dictionary # callbacks.run('on_val_image_end', pred, predn, path, names, im[si]) @@ -328,9 +357,15 @@ def run( if plots and batch_i < 3: if len(plot_masks): plot_masks = torch.cat(plot_masks, dim=0) - plot_images_and_masks(im, targets, masks, paths, save_dir / f'val_batch{batch_i}_labels.jpg', names) - plot_images_and_masks(im, output_to_target(preds, max_det=15), plot_masks, paths, - save_dir / f'val_batch{batch_i}_pred.jpg', names) # pred + plot_images_and_masks(im, targets, masks, paths, save_dir / f"val_batch{batch_i}_labels.jpg", names) + plot_images_and_masks( + im, + output_to_target(preds, max_det=15), + plot_masks, + paths, + save_dir / f"val_batch{batch_i}_pred.jpg", + names, + ) # pred # callbacks.run('on_val_batch_end') @@ -342,10 +377,10 @@ def run( nt = np.bincount(stats[4].astype(int), minlength=nc) # number of targets per class # Print results - pf = '%22s' + '%11i' * 2 + '%11.3g' * 8 # print format - LOGGER.info(pf % ('all', seen, nt.sum(), *metrics.mean_results())) + pf = "%22s" + "%11i" * 2 + "%11.3g" * 8 # print format + LOGGER.info(pf % ("all", seen, nt.sum(), *metrics.mean_results())) if nt.sum() == 0: - LOGGER.warning(f'WARNING ⚠️ no labels found in {task} set, can not compute metrics without labels') + LOGGER.warning(f"WARNING ⚠️ no labels found in {task} set, can not compute metrics without labels") # Print results per class if (verbose or (nc < 50 and not training)) and nc > 1 and len(stats): @@ -353,10 +388,10 @@ def run( LOGGER.info(pf % (names[c], seen, nt[c], *metrics.class_result(i))) # Print speeds - t = tuple(x.t / seen * 1E3 for x in dt) # speeds per image + t = tuple(x.t / seen * 1e3 for x in dt) # speeds per image if not training: shape = (batch_size, 3, imgsz, imgsz) - LOGGER.info(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {shape}' % t) + LOGGER.info(f"Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {shape}" % t) # Plots if plots: @@ -367,11 +402,11 @@ def run( # Save JSON if save_json and len(jdict): - w = Path(weights[0] if isinstance(weights, list) else weights).stem if weights is not None else '' # weights - anno_json = str(Path('../datasets/coco/annotations/instances_val2017.json')) # annotations - pred_json = str(save_dir / f'{w}_predictions.json') # predictions - LOGGER.info(f'\nEvaluating pycocotools mAP... saving {pred_json}...') - with open(pred_json, 'w') as f: + w = Path(weights[0] if isinstance(weights, list) else weights).stem if weights is not None else "" # weights + anno_json = str(Path("../datasets/coco/annotations/instances_val2017.json")) # annotations + pred_json = str(save_dir / f"{w}_predictions.json") # predictions + LOGGER.info(f"\nEvaluating pycocotools mAP... saving {pred_json}...") + with open(pred_json, "w") as f: json.dump(jdict, f) try: # https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocoEvalDemo.ipynb @@ -381,7 +416,7 @@ def run( anno = COCO(anno_json) # init annotations api pred = anno.loadRes(pred_json) # init predictions api results = [] - for eval in COCOeval(anno, pred, 'bbox'), COCOeval(anno, pred, 'segm'): + for eval in COCOeval(anno, pred, "bbox"), COCOeval(anno, pred, "segm"): if is_coco: eval.params.imgIds = [int(Path(x).stem) for x in dataloader.dataset.im_files] # img ID to evaluate eval.evaluate() @@ -390,12 +425,12 @@ def run( results.extend(eval.stats[:2]) # update results (mAP@0.5:0.95, mAP@0.5) map_bbox, map50_bbox, map_mask, map50_mask = results except Exception as e: - LOGGER.info(f'pycocotools unable to run: {e}') + LOGGER.info(f"pycocotools unable to run: {e}") # Return results model.float() # for training if not training: - s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else '' + s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else "" LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}{s}") final_metric = mp_bbox, mr_bbox, map50_bbox, map_bbox, mp_mask, mr_mask, map50_mask, map_mask return (*final_metric, *(loss.cpu() / len(dataloader)).tolist()), metrics.get_maps(nc), t @@ -403,28 +438,28 @@ def run( def parse_opt(): parser = argparse.ArgumentParser() - parser.add_argument('--data', type=str, default=ROOT / 'data/coco128-seg.yaml', help='dataset.yaml path') - parser.add_argument('--weights', nargs='+', type=str, default=ROOT / 'yolov5s-seg.pt', help='model path(s)') - parser.add_argument('--batch-size', type=int, default=32, help='batch size') - parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=640, help='inference size (pixels)') - parser.add_argument('--conf-thres', type=float, default=0.001, help='confidence threshold') - parser.add_argument('--iou-thres', type=float, default=0.6, help='NMS IoU threshold') - parser.add_argument('--max-det', type=int, default=300, help='maximum detections per image') - parser.add_argument('--task', default='val', help='train, val, test, speed or study') - parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') - parser.add_argument('--workers', type=int, default=8, help='max dataloader workers (per RANK in DDP mode)') - parser.add_argument('--single-cls', action='store_true', help='treat as single-class dataset') - parser.add_argument('--augment', action='store_true', help='augmented inference') - parser.add_argument('--verbose', action='store_true', help='report mAP by class') - parser.add_argument('--save-txt', action='store_true', help='save results to *.txt') - parser.add_argument('--save-hybrid', action='store_true', help='save label+prediction hybrid results to *.txt') - parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels') - parser.add_argument('--save-json', action='store_true', help='save a COCO-JSON results file') - parser.add_argument('--project', default=ROOT / 'runs/val-seg', help='save results to project/name') - parser.add_argument('--name', default='exp', help='save to project/name') - parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment') - parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference') - parser.add_argument('--dnn', action='store_true', help='use OpenCV DNN for ONNX inference') + parser.add_argument("--data", type=str, default=ROOT / "data/coco128-seg.yaml", help="dataset.yaml path") + parser.add_argument("--weights", nargs="+", type=str, default=ROOT / "yolov5s-seg.pt", help="model path(s)") + parser.add_argument("--batch-size", type=int, default=32, help="batch size") + parser.add_argument("--imgsz", "--img", "--img-size", type=int, default=640, help="inference size (pixels)") + parser.add_argument("--conf-thres", type=float, default=0.001, help="confidence threshold") + parser.add_argument("--iou-thres", type=float, default=0.6, help="NMS IoU threshold") + parser.add_argument("--max-det", type=int, default=300, help="maximum detections per image") + parser.add_argument("--task", default="val", help="train, val, test, speed or study") + parser.add_argument("--device", default="", help="cuda device, i.e. 0 or 0,1,2,3 or cpu") + parser.add_argument("--workers", type=int, default=8, help="max dataloader workers (per RANK in DDP mode)") + parser.add_argument("--single-cls", action="store_true", help="treat as single-class dataset") + parser.add_argument("--augment", action="store_true", help="augmented inference") + parser.add_argument("--verbose", action="store_true", help="report mAP by class") + parser.add_argument("--save-txt", action="store_true", help="save results to *.txt") + parser.add_argument("--save-hybrid", action="store_true", help="save label+prediction hybrid results to *.txt") + parser.add_argument("--save-conf", action="store_true", help="save confidences in --save-txt labels") + parser.add_argument("--save-json", action="store_true", help="save a COCO-JSON results file") + parser.add_argument("--project", default=ROOT / "runs/val-seg", help="save results to project/name") + parser.add_argument("--name", default="exp", help="save to project/name") + parser.add_argument("--exist-ok", action="store_true", help="existing project/name ok, do not increment") + parser.add_argument("--half", action="store_true", help="use FP16 half-precision inference") + parser.add_argument("--dnn", action="store_true", help="use OpenCV DNN for ONNX inference") opt = parser.parse_args() opt.data = check_yaml(opt.data) # check YAML # opt.save_json |= opt.data.endswith('coco.yaml') @@ -434,40 +469,40 @@ def parse_opt(): def main(opt): - check_requirements(ROOT / 'requirements.txt', exclude=('tensorboard', 'thop')) + check_requirements(ROOT / "requirements.txt", exclude=("tensorboard", "thop")) - if opt.task in ('train', 'val', 'test'): # run normally + if opt.task in ("train", "val", "test"): # run normally if opt.conf_thres > 0.001: # https://github.com/ultralytics/yolov5/issues/1466 - LOGGER.warning(f'WARNING ⚠️ confidence threshold {opt.conf_thres} > 0.001 produces invalid results') + LOGGER.warning(f"WARNING ⚠️ confidence threshold {opt.conf_thres} > 0.001 produces invalid results") if opt.save_hybrid: - LOGGER.warning('WARNING ⚠️ --save-hybrid returns high mAP from hybrid labels, not from predictions alone') + LOGGER.warning("WARNING ⚠️ --save-hybrid returns high mAP from hybrid labels, not from predictions alone") run(**vars(opt)) else: weights = opt.weights if isinstance(opt.weights, list) else [opt.weights] - opt.half = torch.cuda.is_available() and opt.device != 'cpu' # FP16 for fastest results - if opt.task == 'speed': # speed benchmarks + opt.half = torch.cuda.is_available() and opt.device != "cpu" # FP16 for fastest results + if opt.task == "speed": # speed benchmarks # python val.py --task speed --data coco.yaml --batch 1 --weights yolov5n.pt yolov5s.pt... opt.conf_thres, opt.iou_thres, opt.save_json = 0.25, 0.45, False for opt.weights in weights: run(**vars(opt), plots=False) - elif opt.task == 'study': # speed vs mAP benchmarks + elif opt.task == "study": # speed vs mAP benchmarks # python val.py --task study --data coco.yaml --iou 0.7 --weights yolov5n.pt yolov5s.pt... for opt.weights in weights: - f = f'study_{Path(opt.data).stem}_{Path(opt.weights).stem}.txt' # filename to save to + f = f"study_{Path(opt.data).stem}_{Path(opt.weights).stem}.txt" # filename to save to x, y = list(range(256, 1536 + 128, 128)), [] # x axis (image sizes), y axis for opt.imgsz in x: # img-size - LOGGER.info(f'\nRunning {f} --imgsz {opt.imgsz}...') + LOGGER.info(f"\nRunning {f} --imgsz {opt.imgsz}...") r, _, t = run(**vars(opt), plots=False) y.append(r + t) # results and times - np.savetxt(f, y, fmt='%10.4g') # save - subprocess.run(['zip', '-r', 'study.zip', 'study_*.txt']) + np.savetxt(f, y, fmt="%10.4g") # save + subprocess.run(["zip", "-r", "study.zip", "study_*.txt"]) plot_val_study(x=x) # plot else: raise NotImplementedError(f'--task {opt.task} not in ("train", "val", "test", "speed", "study")') -if __name__ == '__main__': +if __name__ == "__main__": opt = parse_opt() main(opt) diff --git a/train.py b/train.py index 378ff4bd30ea..73297d204393 100644 --- a/train.py +++ b/train.py @@ -1,7 +1,6 @@ # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license """ -Train a YOLOv5 model on a custom dataset. -Models and datasets download automatically from the latest YOLOv5 release. +Train a YOLOv5 model on a custom dataset. Models and datasets download automatically from the latest YOLOv5 release. Usage - Single-GPU training: $ python train.py --data coco128.yaml --weights yolov5s.pt --img 640 # from pretrained (recommended) @@ -53,56 +52,97 @@ from utils.callbacks import Callbacks from utils.dataloaders import create_dataloader from utils.downloads import attempt_download, is_url -from utils.general import (LOGGER, TQDM_BAR_FORMAT, check_amp, check_dataset, check_file, check_git_info, - check_git_status, check_img_size, check_requirements, check_suffix, check_yaml, colorstr, - get_latest_run, increment_path, init_seeds, intersect_dicts, labels_to_class_weights, - labels_to_image_weights, methods, one_cycle, print_args, print_mutation, strip_optimizer, - yaml_save) +from utils.general import ( + LOGGER, + TQDM_BAR_FORMAT, + check_amp, + check_dataset, + check_file, + check_git_info, + check_git_status, + check_img_size, + check_requirements, + check_suffix, + check_yaml, + colorstr, + get_latest_run, + increment_path, + init_seeds, + intersect_dicts, + labels_to_class_weights, + labels_to_image_weights, + methods, + one_cycle, + print_args, + print_mutation, + strip_optimizer, + yaml_save, +) from utils.loggers import LOGGERS, Loggers from utils.loggers.comet.comet_utils import check_comet_resume from utils.loss import ComputeLoss from utils.metrics import fitness from utils.plots import plot_evolve -from utils.torch_utils import (EarlyStopping, ModelEMA, de_parallel, select_device, smart_DDP, smart_optimizer, - smart_resume, torch_distributed_zero_first) - -LOCAL_RANK = int(os.getenv('LOCAL_RANK', -1)) # https://pytorch.org/docs/stable/elastic/run.html -RANK = int(os.getenv('RANK', -1)) -WORLD_SIZE = int(os.getenv('WORLD_SIZE', 1)) +from utils.torch_utils import ( + EarlyStopping, + ModelEMA, + de_parallel, + select_device, + smart_DDP, + smart_optimizer, + smart_resume, + torch_distributed_zero_first, +) + +LOCAL_RANK = int(os.getenv("LOCAL_RANK", -1)) # https://pytorch.org/docs/stable/elastic/run.html +RANK = int(os.getenv("RANK", -1)) +WORLD_SIZE = int(os.getenv("WORLD_SIZE", 1)) GIT_INFO = check_git_info() def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictionary - save_dir, epochs, batch_size, weights, single_cls, evolve, data, cfg, resume, noval, nosave, workers, freeze = \ - Path(opt.save_dir), opt.epochs, opt.batch_size, opt.weights, opt.single_cls, opt.evolve, opt.data, opt.cfg, \ - opt.resume, opt.noval, opt.nosave, opt.workers, opt.freeze - callbacks.run('on_pretrain_routine_start') + save_dir, epochs, batch_size, weights, single_cls, evolve, data, cfg, resume, noval, nosave, workers, freeze = ( + Path(opt.save_dir), + opt.epochs, + opt.batch_size, + opt.weights, + opt.single_cls, + opt.evolve, + opt.data, + opt.cfg, + opt.resume, + opt.noval, + opt.nosave, + opt.workers, + opt.freeze, + ) + callbacks.run("on_pretrain_routine_start") # Directories - w = save_dir / 'weights' # weights dir + w = save_dir / "weights" # weights dir (w.parent if evolve else w).mkdir(parents=True, exist_ok=True) # make dir - last, best = w / 'last.pt', w / 'best.pt' + last, best = w / "last.pt", w / "best.pt" # Hyperparameters if isinstance(hyp, str): - with open(hyp, errors='ignore') as f: + with open(hyp, errors="ignore") as f: hyp = yaml.safe_load(f) # load hyps dict - LOGGER.info(colorstr('hyperparameters: ') + ', '.join(f'{k}={v}' for k, v in hyp.items())) + LOGGER.info(colorstr("hyperparameters: ") + ", ".join(f"{k}={v}" for k, v in hyp.items())) opt.hyp = hyp.copy() # for saving hyps to checkpoints # Save run settings if not evolve: - yaml_save(save_dir / 'hyp.yaml', hyp) - yaml_save(save_dir / 'opt.yaml', vars(opt)) + yaml_save(save_dir / "hyp.yaml", hyp) + yaml_save(save_dir / "opt.yaml", vars(opt)) # Loggers data_dict = None if RANK in {-1, 0}: include_loggers = list(LOGGERS) - if getattr(opt, 'ndjson_console', False): - include_loggers.append('ndjson_console') - if getattr(opt, 'ndjson_file', False): - include_loggers.append('ndjson_file') + if getattr(opt, "ndjson_console", False): + include_loggers.append("ndjson_console") + if getattr(opt, "ndjson_file", False): + include_loggers.append("ndjson_file") loggers = Loggers( save_dir=save_dir, @@ -124,39 +164,39 @@ def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictio # Config plots = not evolve and not opt.noplots # create plots - cuda = device.type != 'cpu' + cuda = device.type != "cpu" init_seeds(opt.seed + 1 + RANK, deterministic=True) with torch_distributed_zero_first(LOCAL_RANK): data_dict = data_dict or check_dataset(data) # check if None - train_path, val_path = data_dict['train'], data_dict['val'] - nc = 1 if single_cls else int(data_dict['nc']) # number of classes - names = {0: 'item'} if single_cls and len(data_dict['names']) != 1 else data_dict['names'] # class names - is_coco = isinstance(val_path, str) and val_path.endswith('coco/val2017.txt') # COCO dataset + train_path, val_path = data_dict["train"], data_dict["val"] + nc = 1 if single_cls else int(data_dict["nc"]) # number of classes + names = {0: "item"} if single_cls and len(data_dict["names"]) != 1 else data_dict["names"] # class names + is_coco = isinstance(val_path, str) and val_path.endswith("coco/val2017.txt") # COCO dataset # Model - check_suffix(weights, '.pt') # check weights - pretrained = weights.endswith('.pt') + check_suffix(weights, ".pt") # check weights + pretrained = weights.endswith(".pt") if pretrained: with torch_distributed_zero_first(LOCAL_RANK): weights = attempt_download(weights) # download if not found locally - ckpt = torch.load(weights, map_location='cpu') # load checkpoint to CPU to avoid CUDA memory leak - model = Model(cfg or ckpt['model'].yaml, ch=3, nc=nc, anchors=hyp.get('anchors')).to(device) # create - exclude = ['anchor'] if (cfg or hyp.get('anchors')) and not resume else [] # exclude keys - csd = ckpt['model'].float().state_dict() # checkpoint state_dict as FP32 + ckpt = torch.load(weights, map_location="cpu") # load checkpoint to CPU to avoid CUDA memory leak + model = Model(cfg or ckpt["model"].yaml, ch=3, nc=nc, anchors=hyp.get("anchors")).to(device) # create + exclude = ["anchor"] if (cfg or hyp.get("anchors")) and not resume else [] # exclude keys + csd = ckpt["model"].float().state_dict() # checkpoint state_dict as FP32 csd = intersect_dicts(csd, model.state_dict(), exclude=exclude) # intersect model.load_state_dict(csd, strict=False) # load - LOGGER.info(f'Transferred {len(csd)}/{len(model.state_dict())} items from {weights}') # report + LOGGER.info(f"Transferred {len(csd)}/{len(model.state_dict())} items from {weights}") # report else: - model = Model(cfg, ch=3, nc=nc, anchors=hyp.get('anchors')).to(device) # create + model = Model(cfg, ch=3, nc=nc, anchors=hyp.get("anchors")).to(device) # create amp = check_amp(model) # check AMP # Freeze - freeze = [f'model.{x}.' for x in (freeze if len(freeze) > 1 else range(freeze[0]))] # layers to freeze + freeze = [f"model.{x}." for x in (freeze if len(freeze) > 1 else range(freeze[0]))] # layers to freeze for k, v in model.named_parameters(): v.requires_grad = True # train all layers # v.register_hook(lambda x: torch.nan_to_num(x)) # NaN to 0 (commented for erratic training results) if any(x in k for x in freeze): - LOGGER.info(f'freezing {k}') + LOGGER.info(f"freezing {k}") v.requires_grad = False # Image size @@ -166,19 +206,19 @@ def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictio # Batch size if RANK == -1 and batch_size == -1: # single-GPU only, estimate best batch size batch_size = check_train_batch_size(model, imgsz, amp) - loggers.on_params_update({'batch_size': batch_size}) + loggers.on_params_update({"batch_size": batch_size}) # Optimizer nbs = 64 # nominal batch size accumulate = max(round(nbs / batch_size), 1) # accumulate loss before optimizing - hyp['weight_decay'] *= batch_size * accumulate / nbs # scale weight_decay - optimizer = smart_optimizer(model, opt.optimizer, hyp['lr0'], hyp['momentum'], hyp['weight_decay']) + hyp["weight_decay"] *= batch_size * accumulate / nbs # scale weight_decay + optimizer = smart_optimizer(model, opt.optimizer, hyp["lr0"], hyp["momentum"], hyp["weight_decay"]) # Scheduler if opt.cos_lr: - lf = one_cycle(1, hyp['lrf'], epochs) # cosine 1->hyp['lrf'] + lf = one_cycle(1, hyp["lrf"], epochs) # cosine 1->hyp['lrf'] else: - lf = lambda x: (1 - x / epochs) * (1.0 - hyp['lrf']) + hyp['lrf'] # linear + lf = lambda x: (1 - x / epochs) * (1.0 - hyp["lrf"]) + hyp["lrf"] # linear scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf) # plot_lr_scheduler(optimizer, scheduler, epochs) # EMA @@ -194,58 +234,62 @@ def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictio # DP mode if cuda and RANK == -1 and torch.cuda.device_count() > 1: LOGGER.warning( - 'WARNING ⚠️ DP not recommended, use torch.distributed.run for best DDP Multi-GPU results.\n' - 'See Multi-GPU Tutorial at https://docs.ultralytics.com/yolov5/tutorials/multi_gpu_training to get started.' + "WARNING ⚠️ DP not recommended, use torch.distributed.run for best DDP Multi-GPU results.\n" + "See Multi-GPU Tutorial at https://docs.ultralytics.com/yolov5/tutorials/multi_gpu_training to get started." ) model = torch.nn.DataParallel(model) # SyncBatchNorm if opt.sync_bn and cuda and RANK != -1: model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model).to(device) - LOGGER.info('Using SyncBatchNorm()') + LOGGER.info("Using SyncBatchNorm()") # Trainloader - train_loader, dataset = create_dataloader(train_path, - imgsz, - batch_size // WORLD_SIZE, - gs, - single_cls, - hyp=hyp, - augment=True, - cache=None if opt.cache == 'val' else opt.cache, - rect=opt.rect, - rank=LOCAL_RANK, - workers=workers, - image_weights=opt.image_weights, - quad=opt.quad, - prefix=colorstr('train: '), - shuffle=True, - seed=opt.seed) + train_loader, dataset = create_dataloader( + train_path, + imgsz, + batch_size // WORLD_SIZE, + gs, + single_cls, + hyp=hyp, + augment=True, + cache=None if opt.cache == "val" else opt.cache, + rect=opt.rect, + rank=LOCAL_RANK, + workers=workers, + image_weights=opt.image_weights, + quad=opt.quad, + prefix=colorstr("train: "), + shuffle=True, + seed=opt.seed, + ) labels = np.concatenate(dataset.labels, 0) mlc = int(labels[:, 0].max()) # max label class - assert mlc < nc, f'Label class {mlc} exceeds nc={nc} in {data}. Possible class labels are 0-{nc - 1}' + assert mlc < nc, f"Label class {mlc} exceeds nc={nc} in {data}. Possible class labels are 0-{nc - 1}" # Process 0 if RANK in {-1, 0}: - val_loader = create_dataloader(val_path, - imgsz, - batch_size // WORLD_SIZE * 2, - gs, - single_cls, - hyp=hyp, - cache=None if noval else opt.cache, - rect=True, - rank=-1, - workers=workers * 2, - pad=0.5, - prefix=colorstr('val: '))[0] + val_loader = create_dataloader( + val_path, + imgsz, + batch_size // WORLD_SIZE * 2, + gs, + single_cls, + hyp=hyp, + cache=None if noval else opt.cache, + rect=True, + rank=-1, + workers=workers * 2, + pad=0.5, + prefix=colorstr("val: "), + )[0] if not resume: if not opt.noautoanchor: - check_anchors(dataset, model=model, thr=hyp['anchor_t'], imgsz=imgsz) # run AutoAnchor + check_anchors(dataset, model=model, thr=hyp["anchor_t"], imgsz=imgsz) # run AutoAnchor model.half().float() # pre-reduce anchor precision - callbacks.run('on_pretrain_routine_end', labels, names) + callbacks.run("on_pretrain_routine_end", labels, names) # DDP mode if cuda and RANK != -1: @@ -253,10 +297,10 @@ def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictio # Model attributes nl = de_parallel(model).model[-1].nl # number of detection layers (to scale hyps) - hyp['box'] *= 3 / nl # scale to layers - hyp['cls'] *= nc / 80 * 3 / nl # scale to classes and layers - hyp['obj'] *= (imgsz / 640) ** 2 * 3 / nl # scale to image size and layers - hyp['label_smoothing'] = opt.label_smoothing + hyp["box"] *= 3 / nl # scale to layers + hyp["cls"] *= nc / 80 * 3 / nl # scale to classes and layers + hyp["obj"] *= (imgsz / 640) ** 2 * 3 / nl # scale to image size and layers + hyp["label_smoothing"] = opt.label_smoothing model.nc = nc # attach number of classes to model model.hyp = hyp # attach hyperparameters to model model.class_weights = labels_to_class_weights(dataset.labels, nc).to(device) * nc # attach class weights @@ -265,7 +309,7 @@ def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictio # Start training t0 = time.time() nb = len(train_loader) # number of batches - nw = max(round(hyp['warmup_epochs'] * nb), 100) # number of warmup iterations, max(3 epochs, 100 iterations) + nw = max(round(hyp["warmup_epochs"] * nb), 100) # number of warmup iterations, max(3 epochs, 100 iterations) # nw = min(nw, (epochs - start_epoch) / 2 * nb) # limit warmup to < 1/2 of training last_opt_step = -1 maps = np.zeros(nc) # mAP per class @@ -274,13 +318,15 @@ def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictio scaler = torch.cuda.amp.GradScaler(enabled=amp) stopper, stop = EarlyStopping(patience=opt.patience), False compute_loss = ComputeLoss(model) # init loss class - callbacks.run('on_train_start') - LOGGER.info(f'Image sizes {imgsz} train, {imgsz} val\n' - f'Using {train_loader.num_workers * WORLD_SIZE} dataloader workers\n' - f"Logging results to {colorstr('bold', save_dir)}\n" - f'Starting training for {epochs} epochs...') + callbacks.run("on_train_start") + LOGGER.info( + f'Image sizes {imgsz} train, {imgsz} val\n' + f'Using {train_loader.num_workers * WORLD_SIZE} dataloader workers\n' + f"Logging results to {colorstr('bold', save_dir)}\n" + f'Starting training for {epochs} epochs...' + ) for epoch in range(start_epoch, epochs): # epoch ------------------------------------------------------------------ - callbacks.run('on_train_epoch_start') + callbacks.run("on_train_epoch_start") model.train() # Update image weights (optional, single-GPU only) @@ -297,12 +343,12 @@ def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictio if RANK != -1: train_loader.sampler.set_epoch(epoch) pbar = enumerate(train_loader) - LOGGER.info(('\n' + '%11s' * 7) % ('Epoch', 'GPU_mem', 'box_loss', 'obj_loss', 'cls_loss', 'Instances', 'Size')) + LOGGER.info(("\n" + "%11s" * 7) % ("Epoch", "GPU_mem", "box_loss", "obj_loss", "cls_loss", "Instances", "Size")) if RANK in {-1, 0}: pbar = tqdm(pbar, total=nb, bar_format=TQDM_BAR_FORMAT) # progress bar optimizer.zero_grad() for i, (imgs, targets, paths, _) in pbar: # batch ------------------------------------------------------------- - callbacks.run('on_train_batch_start') + callbacks.run("on_train_batch_start") ni = i + nb * epoch # number integrated batches (since train start) imgs = imgs.to(device, non_blocking=True).float() / 255 # uint8 to float32, 0-255 to 0.0-1.0 @@ -313,9 +359,9 @@ def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictio accumulate = max(1, np.interp(ni, xi, [1, nbs / batch_size]).round()) for j, x in enumerate(optimizer.param_groups): # bias lr falls from 0.1 to lr0, all other lrs rise from 0.0 to lr0 - x['lr'] = np.interp(ni, xi, [hyp['warmup_bias_lr'] if j == 0 else 0.0, x['initial_lr'] * lf(epoch)]) - if 'momentum' in x: - x['momentum'] = np.interp(ni, xi, [hyp['warmup_momentum'], hyp['momentum']]) + x["lr"] = np.interp(ni, xi, [hyp["warmup_bias_lr"] if j == 0 else 0.0, x["initial_lr"] * lf(epoch)]) + if "momentum" in x: + x["momentum"] = np.interp(ni, xi, [hyp["warmup_momentum"], hyp["momentum"]]) # Multi-scale if opt.multi_scale: @@ -323,7 +369,7 @@ def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictio sf = sz / max(imgs.shape[2:]) # scale factor if sf != 1: ns = [math.ceil(x * sf / gs) * gs for x in imgs.shape[2:]] # new shape (stretched to gs-multiple) - imgs = nn.functional.interpolate(imgs, size=ns, mode='bilinear', align_corners=False) + imgs = nn.functional.interpolate(imgs, size=ns, mode="bilinear", align_corners=False) # Forward with torch.cuda.amp.autocast(amp): @@ -332,7 +378,7 @@ def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictio if RANK != -1: loss *= WORLD_SIZE # gradient averaged between devices in DDP mode if opt.quad: - loss *= 4. + loss *= 4.0 # Backward scaler.scale(loss).backward() @@ -351,35 +397,39 @@ def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictio # Log if RANK in {-1, 0}: mloss = (mloss * i + loss_items) / (i + 1) # update mean losses - mem = f'{torch.cuda.memory_reserved() / 1E9 if torch.cuda.is_available() else 0:.3g}G' # (GB) - pbar.set_description(('%11s' * 2 + '%11.4g' * 5) % - (f'{epoch}/{epochs - 1}', mem, *mloss, targets.shape[0], imgs.shape[-1])) - callbacks.run('on_train_batch_end', model, ni, imgs, targets, paths, list(mloss)) + mem = f"{torch.cuda.memory_reserved() / 1E9 if torch.cuda.is_available() else 0:.3g}G" # (GB) + pbar.set_description( + ("%11s" * 2 + "%11.4g" * 5) + % (f"{epoch}/{epochs - 1}", mem, *mloss, targets.shape[0], imgs.shape[-1]) + ) + callbacks.run("on_train_batch_end", model, ni, imgs, targets, paths, list(mloss)) if callbacks.stop_training: return # end batch ------------------------------------------------------------------------------------------------ # Scheduler - lr = [x['lr'] for x in optimizer.param_groups] # for loggers + lr = [x["lr"] for x in optimizer.param_groups] # for loggers scheduler.step() if RANK in {-1, 0}: # mAP - callbacks.run('on_train_epoch_end', epoch=epoch) - ema.update_attr(model, include=['yaml', 'nc', 'hyp', 'names', 'stride', 'class_weights']) + callbacks.run("on_train_epoch_end", epoch=epoch) + ema.update_attr(model, include=["yaml", "nc", "hyp", "names", "stride", "class_weights"]) final_epoch = (epoch + 1 == epochs) or stopper.possible_stop if not noval or final_epoch: # Calculate mAP - results, maps, _ = validate.run(data_dict, - batch_size=batch_size // WORLD_SIZE * 2, - imgsz=imgsz, - half=amp, - model=ema.ema, - single_cls=single_cls, - dataloader=val_loader, - save_dir=save_dir, - plots=False, - callbacks=callbacks, - compute_loss=compute_loss) + results, maps, _ = validate.run( + data_dict, + batch_size=batch_size // WORLD_SIZE * 2, + imgsz=imgsz, + half=amp, + model=ema.ema, + single_cls=single_cls, + dataloader=val_loader, + save_dir=save_dir, + plots=False, + callbacks=callbacks, + compute_loss=compute_loss, + ) # Update best mAP fi = fitness(np.array(results).reshape(1, -1)) # weighted combination of [P, R, mAP@.5, mAP@.5-.95] @@ -387,29 +437,30 @@ def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictio if fi > best_fitness: best_fitness = fi log_vals = list(mloss) + list(results) + lr - callbacks.run('on_fit_epoch_end', log_vals, epoch, best_fitness, fi) + callbacks.run("on_fit_epoch_end", log_vals, epoch, best_fitness, fi) # Save model if (not nosave) or (final_epoch and not evolve): # if save ckpt = { - 'epoch': epoch, - 'best_fitness': best_fitness, - 'model': deepcopy(de_parallel(model)).half(), - 'ema': deepcopy(ema.ema).half(), - 'updates': ema.updates, - 'optimizer': optimizer.state_dict(), - 'opt': vars(opt), - 'git': GIT_INFO, # {remote, branch, commit} if a git repo - 'date': datetime.now().isoformat()} + "epoch": epoch, + "best_fitness": best_fitness, + "model": deepcopy(de_parallel(model)).half(), + "ema": deepcopy(ema.ema).half(), + "updates": ema.updates, + "optimizer": optimizer.state_dict(), + "opt": vars(opt), + "git": GIT_INFO, # {remote, branch, commit} if a git repo + "date": datetime.now().isoformat(), + } # Save last, best and delete torch.save(ckpt, last) if best_fitness == fi: torch.save(ckpt, best) if opt.save_period > 0 and epoch % opt.save_period == 0: - torch.save(ckpt, w / f'epoch{epoch}.pt') + torch.save(ckpt, w / f"epoch{epoch}.pt") del ckpt - callbacks.run('on_model_save', last, epoch, final_epoch, best_fitness, fi) + callbacks.run("on_model_save", last, epoch, final_epoch, best_fitness, fi) # EarlyStopping if RANK != -1: # if DDP training @@ -423,12 +474,12 @@ def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictio # end epoch ---------------------------------------------------------------------------------------------------- # end training ----------------------------------------------------------------------------------------------------- if RANK in {-1, 0}: - LOGGER.info(f'\n{epoch - start_epoch + 1} epochs completed in {(time.time() - t0) / 3600:.3f} hours.') + LOGGER.info(f"\n{epoch - start_epoch + 1} epochs completed in {(time.time() - t0) / 3600:.3f} hours.") for f in last, best: if f.exists(): strip_optimizer(f) # strip optimizers if f is best: - LOGGER.info(f'\nValidating {f}...') + LOGGER.info(f"\nValidating {f}...") results, _, _ = validate.run( data_dict, batch_size=batch_size // WORLD_SIZE * 2, @@ -442,11 +493,12 @@ def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictio verbose=True, plots=plots, callbacks=callbacks, - compute_loss=compute_loss) # val best model with plots + compute_loss=compute_loss, + ) # val best model with plots if is_coco: - callbacks.run('on_fit_epoch_end', list(mloss) + list(results) + lr, epoch, best_fitness, fi) + callbacks.run("on_fit_epoch_end", list(mloss) + list(results) + lr, epoch, best_fitness, fi) - callbacks.run('on_train_end', last, best, epoch, results) + callbacks.run("on_train_end", last, best, epoch, results) torch.cuda.empty_cache() return results @@ -454,55 +506,54 @@ def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictio def parse_opt(known=False): parser = argparse.ArgumentParser() - parser.add_argument('--weights', type=str, default=ROOT / 'yolov5s.pt', help='initial weights path') - parser.add_argument('--cfg', type=str, default='', help='model.yaml path') - parser.add_argument('--data', type=str, default=ROOT / 'data/coco128.yaml', help='dataset.yaml path') - parser.add_argument('--hyp', type=str, default=ROOT / 'data/hyps/hyp.scratch-low.yaml', help='hyperparameters path') - parser.add_argument('--epochs', type=int, default=100, help='total training epochs') - parser.add_argument('--batch-size', type=int, default=16, help='total batch size for all GPUs, -1 for autobatch') - parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=640, help='train, val image size (pixels)') - parser.add_argument('--rect', action='store_true', help='rectangular training') - parser.add_argument('--resume', nargs='?', const=True, default=False, help='resume most recent training') - parser.add_argument('--nosave', action='store_true', help='only save final checkpoint') - parser.add_argument('--noval', action='store_true', help='only validate final epoch') - parser.add_argument('--noautoanchor', action='store_true', help='disable AutoAnchor') - parser.add_argument('--noplots', action='store_true', help='save no plot files') - parser.add_argument('--evolve', type=int, nargs='?', const=300, help='evolve hyperparameters for x generations') - parser.add_argument('--evolve_population', - type=str, - default=ROOT / 'data/hyps', - help='location for loading population') - parser.add_argument('--resume_evolve', type=str, default=None, help='resume evolve from last generation') - parser.add_argument('--bucket', type=str, default='', help='gsutil bucket') - parser.add_argument('--cache', type=str, nargs='?', const='ram', help='image --cache ram/disk') - parser.add_argument('--image-weights', action='store_true', help='use weighted image selection for training') - parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') - parser.add_argument('--multi-scale', action='store_true', help='vary img-size +/- 50%%') - parser.add_argument('--single-cls', action='store_true', help='train multi-class data as single-class') - parser.add_argument('--optimizer', type=str, choices=['SGD', 'Adam', 'AdamW'], default='SGD', help='optimizer') - parser.add_argument('--sync-bn', action='store_true', help='use SyncBatchNorm, only available in DDP mode') - parser.add_argument('--workers', type=int, default=8, help='max dataloader workers (per RANK in DDP mode)') - parser.add_argument('--project', default=ROOT / 'runs/train', help='save to project/name') - parser.add_argument('--name', default='exp', help='save to project/name') - parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment') - parser.add_argument('--quad', action='store_true', help='quad dataloader') - parser.add_argument('--cos-lr', action='store_true', help='cosine LR scheduler') - parser.add_argument('--label-smoothing', type=float, default=0.0, help='Label smoothing epsilon') - parser.add_argument('--patience', type=int, default=100, help='EarlyStopping patience (epochs without improvement)') - parser.add_argument('--freeze', nargs='+', type=int, default=[0], help='Freeze layers: backbone=10, first3=0 1 2') - parser.add_argument('--save-period', type=int, default=-1, help='Save checkpoint every x epochs (disabled if < 1)') - parser.add_argument('--seed', type=int, default=0, help='Global training seed') - parser.add_argument('--local_rank', type=int, default=-1, help='Automatic DDP Multi-GPU argument, do not modify') + parser.add_argument("--weights", type=str, default=ROOT / "yolov5s.pt", help="initial weights path") + parser.add_argument("--cfg", type=str, default="", help="model.yaml path") + parser.add_argument("--data", type=str, default=ROOT / "data/coco128.yaml", help="dataset.yaml path") + parser.add_argument("--hyp", type=str, default=ROOT / "data/hyps/hyp.scratch-low.yaml", help="hyperparameters path") + parser.add_argument("--epochs", type=int, default=100, help="total training epochs") + parser.add_argument("--batch-size", type=int, default=16, help="total batch size for all GPUs, -1 for autobatch") + parser.add_argument("--imgsz", "--img", "--img-size", type=int, default=640, help="train, val image size (pixels)") + parser.add_argument("--rect", action="store_true", help="rectangular training") + parser.add_argument("--resume", nargs="?", const=True, default=False, help="resume most recent training") + parser.add_argument("--nosave", action="store_true", help="only save final checkpoint") + parser.add_argument("--noval", action="store_true", help="only validate final epoch") + parser.add_argument("--noautoanchor", action="store_true", help="disable AutoAnchor") + parser.add_argument("--noplots", action="store_true", help="save no plot files") + parser.add_argument("--evolve", type=int, nargs="?", const=300, help="evolve hyperparameters for x generations") + parser.add_argument( + "--evolve_population", type=str, default=ROOT / "data/hyps", help="location for loading population" + ) + parser.add_argument("--resume_evolve", type=str, default=None, help="resume evolve from last generation") + parser.add_argument("--bucket", type=str, default="", help="gsutil bucket") + parser.add_argument("--cache", type=str, nargs="?", const="ram", help="image --cache ram/disk") + parser.add_argument("--image-weights", action="store_true", help="use weighted image selection for training") + parser.add_argument("--device", default="", help="cuda device, i.e. 0 or 0,1,2,3 or cpu") + parser.add_argument("--multi-scale", action="store_true", help="vary img-size +/- 50%%") + parser.add_argument("--single-cls", action="store_true", help="train multi-class data as single-class") + parser.add_argument("--optimizer", type=str, choices=["SGD", "Adam", "AdamW"], default="SGD", help="optimizer") + parser.add_argument("--sync-bn", action="store_true", help="use SyncBatchNorm, only available in DDP mode") + parser.add_argument("--workers", type=int, default=8, help="max dataloader workers (per RANK in DDP mode)") + parser.add_argument("--project", default=ROOT / "runs/train", help="save to project/name") + parser.add_argument("--name", default="exp", help="save to project/name") + parser.add_argument("--exist-ok", action="store_true", help="existing project/name ok, do not increment") + parser.add_argument("--quad", action="store_true", help="quad dataloader") + parser.add_argument("--cos-lr", action="store_true", help="cosine LR scheduler") + parser.add_argument("--label-smoothing", type=float, default=0.0, help="Label smoothing epsilon") + parser.add_argument("--patience", type=int, default=100, help="EarlyStopping patience (epochs without improvement)") + parser.add_argument("--freeze", nargs="+", type=int, default=[0], help="Freeze layers: backbone=10, first3=0 1 2") + parser.add_argument("--save-period", type=int, default=-1, help="Save checkpoint every x epochs (disabled if < 1)") + parser.add_argument("--seed", type=int, default=0, help="Global training seed") + parser.add_argument("--local_rank", type=int, default=-1, help="Automatic DDP Multi-GPU argument, do not modify") # Logger arguments - parser.add_argument('--entity', default=None, help='Entity') - parser.add_argument('--upload_dataset', nargs='?', const=True, default=False, help='Upload data, "val" option') - parser.add_argument('--bbox_interval', type=int, default=-1, help='Set bounding-box image logging interval') - parser.add_argument('--artifact_alias', type=str, default='latest', help='Version of dataset artifact to use') + parser.add_argument("--entity", default=None, help="Entity") + parser.add_argument("--upload_dataset", nargs="?", const=True, default=False, help='Upload data, "val" option') + parser.add_argument("--bbox_interval", type=int, default=-1, help="Set bounding-box image logging interval") + parser.add_argument("--artifact_alias", type=str, default="latest", help="Version of dataset artifact to use") # NDJSON logging - parser.add_argument('--ndjson-console', action='store_true', help='Log ndjson to console') - parser.add_argument('--ndjson-file', action='store_true', help='Log ndjson to file') + parser.add_argument("--ndjson-console", action="store_true", help="Log ndjson to console") + parser.add_argument("--ndjson-file", action="store_true", help="Log ndjson to file") return parser.parse_known_args()[0] if known else parser.parse_args() @@ -512,47 +563,53 @@ def main(opt, callbacks=Callbacks()): if RANK in {-1, 0}: print_args(vars(opt)) check_git_status() - check_requirements(ROOT / 'requirements.txt') + check_requirements(ROOT / "requirements.txt") # Resume (from specified or most recent last.pt) if opt.resume and not check_comet_resume(opt) and not opt.evolve: last = Path(check_file(opt.resume) if isinstance(opt.resume, str) else get_latest_run()) - opt_yaml = last.parent.parent / 'opt.yaml' # train options yaml + opt_yaml = last.parent.parent / "opt.yaml" # train options yaml opt_data = opt.data # original dataset if opt_yaml.is_file(): - with open(opt_yaml, errors='ignore') as f: + with open(opt_yaml, errors="ignore") as f: d = yaml.safe_load(f) else: - d = torch.load(last, map_location='cpu')['opt'] + d = torch.load(last, map_location="cpu")["opt"] opt = argparse.Namespace(**d) # replace - opt.cfg, opt.weights, opt.resume = '', str(last), True # reinstate + opt.cfg, opt.weights, opt.resume = "", str(last), True # reinstate if is_url(opt_data): opt.data = check_file(opt_data) # avoid HUB resume auth timeout else: - opt.data, opt.cfg, opt.hyp, opt.weights, opt.project = \ - check_file(opt.data), check_yaml(opt.cfg), check_yaml(opt.hyp), str(opt.weights), str(opt.project) # checks - assert len(opt.cfg) or len(opt.weights), 'either --cfg or --weights must be specified' + opt.data, opt.cfg, opt.hyp, opt.weights, opt.project = ( + check_file(opt.data), + check_yaml(opt.cfg), + check_yaml(opt.hyp), + str(opt.weights), + str(opt.project), + ) # checks + assert len(opt.cfg) or len(opt.weights), "either --cfg or --weights must be specified" if opt.evolve: - if opt.project == str(ROOT / 'runs/train'): # if default project name, rename to runs/evolve - opt.project = str(ROOT / 'runs/evolve') + if opt.project == str(ROOT / "runs/train"): # if default project name, rename to runs/evolve + opt.project = str(ROOT / "runs/evolve") opt.exist_ok, opt.resume = opt.resume, False # pass resume to exist_ok and disable resume - if opt.name == 'cfg': + if opt.name == "cfg": opt.name = Path(opt.cfg).stem # use model.yaml as name opt.save_dir = str(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok)) # DDP mode device = select_device(opt.device, batch_size=opt.batch_size) if LOCAL_RANK != -1: - msg = 'is not compatible with YOLOv5 Multi-GPU DDP training' - assert not opt.image_weights, f'--image-weights {msg}' - assert not opt.evolve, f'--evolve {msg}' - assert opt.batch_size != -1, f'AutoBatch with --batch-size -1 {msg}, please pass a valid --batch-size' - assert opt.batch_size % WORLD_SIZE == 0, f'--batch-size {opt.batch_size} must be multiple of WORLD_SIZE' - assert torch.cuda.device_count() > LOCAL_RANK, 'insufficient CUDA devices for DDP command' + msg = "is not compatible with YOLOv5 Multi-GPU DDP training" + assert not opt.image_weights, f"--image-weights {msg}" + assert not opt.evolve, f"--evolve {msg}" + assert opt.batch_size != -1, f"AutoBatch with --batch-size -1 {msg}, please pass a valid --batch-size" + assert opt.batch_size % WORLD_SIZE == 0, f"--batch-size {opt.batch_size} must be multiple of WORLD_SIZE" + assert torch.cuda.device_count() > LOCAL_RANK, "insufficient CUDA devices for DDP command" torch.cuda.set_device(LOCAL_RANK) - device = torch.device('cuda', LOCAL_RANK) - dist.init_process_group(backend='nccl' if dist.is_nccl_available() else 'gloo', - timeout=timedelta(seconds=10800)) + device = torch.device("cuda", LOCAL_RANK) + dist.init_process_group( + backend="nccl" if dist.is_nccl_available() else "gloo", timeout=timedelta(seconds=10800) + ) # Train if not opt.evolve: @@ -562,35 +619,36 @@ def main(opt, callbacks=Callbacks()): else: # Hyperparameter evolution metadata (including this hyperparameter True-False, lower_limit, upper_limit) meta = { - 'lr0': (False, 1e-5, 1e-1), # initial learning rate (SGD=1E-2, Adam=1E-3) - 'lrf': (False, 0.01, 1.0), # final OneCycleLR learning rate (lr0 * lrf) - 'momentum': (False, 0.6, 0.98), # SGD momentum/Adam beta1 - 'weight_decay': (False, 0.0, 0.001), # optimizer weight decay - 'warmup_epochs': (False, 0.0, 5.0), # warmup epochs (fractions ok) - 'warmup_momentum': (False, 0.0, 0.95), # warmup initial momentum - 'warmup_bias_lr': (False, 0.0, 0.2), # warmup initial bias lr - 'box': (False, 0.02, 0.2), # box loss gain - 'cls': (False, 0.2, 4.0), # cls loss gain - 'cls_pw': (False, 0.5, 2.0), # cls BCELoss positive_weight - 'obj': (False, 0.2, 4.0), # obj loss gain (scale with pixels) - 'obj_pw': (False, 0.5, 2.0), # obj BCELoss positive_weight - 'iou_t': (False, 0.1, 0.7), # IoU training threshold - 'anchor_t': (False, 2.0, 8.0), # anchor-multiple threshold - 'anchors': (False, 2.0, 10.0), # anchors per output grid (0 to ignore) - 'fl_gamma': (False, 0.0, 2.0), # focal loss gamma (efficientDet default gamma=1.5) - 'hsv_h': (True, 0.0, 0.1), # image HSV-Hue augmentation (fraction) - 'hsv_s': (True, 0.0, 0.9), # image HSV-Saturation augmentation (fraction) - 'hsv_v': (True, 0.0, 0.9), # image HSV-Value augmentation (fraction) - 'degrees': (True, 0.0, 45.0), # image rotation (+/- deg) - 'translate': (True, 0.0, 0.9), # image translation (+/- fraction) - 'scale': (True, 0.0, 0.9), # image scale (+/- gain) - 'shear': (True, 0.0, 10.0), # image shear (+/- deg) - 'perspective': (True, 0.0, 0.001), # image perspective (+/- fraction), range 0-0.001 - 'flipud': (True, 0.0, 1.0), # image flip up-down (probability) - 'fliplr': (True, 0.0, 1.0), # image flip left-right (probability) - 'mosaic': (True, 0.0, 1.0), # image mixup (probability) - 'mixup': (True, 0.0, 1.0), # image mixup (probability) - 'copy_paste': (True, 0.0, 1.0)} # segment copy-paste (probability) + "lr0": (False, 1e-5, 1e-1), # initial learning rate (SGD=1E-2, Adam=1E-3) + "lrf": (False, 0.01, 1.0), # final OneCycleLR learning rate (lr0 * lrf) + "momentum": (False, 0.6, 0.98), # SGD momentum/Adam beta1 + "weight_decay": (False, 0.0, 0.001), # optimizer weight decay + "warmup_epochs": (False, 0.0, 5.0), # warmup epochs (fractions ok) + "warmup_momentum": (False, 0.0, 0.95), # warmup initial momentum + "warmup_bias_lr": (False, 0.0, 0.2), # warmup initial bias lr + "box": (False, 0.02, 0.2), # box loss gain + "cls": (False, 0.2, 4.0), # cls loss gain + "cls_pw": (False, 0.5, 2.0), # cls BCELoss positive_weight + "obj": (False, 0.2, 4.0), # obj loss gain (scale with pixels) + "obj_pw": (False, 0.5, 2.0), # obj BCELoss positive_weight + "iou_t": (False, 0.1, 0.7), # IoU training threshold + "anchor_t": (False, 2.0, 8.0), # anchor-multiple threshold + "anchors": (False, 2.0, 10.0), # anchors per output grid (0 to ignore) + "fl_gamma": (False, 0.0, 2.0), # focal loss gamma (efficientDet default gamma=1.5) + "hsv_h": (True, 0.0, 0.1), # image HSV-Hue augmentation (fraction) + "hsv_s": (True, 0.0, 0.9), # image HSV-Saturation augmentation (fraction) + "hsv_v": (True, 0.0, 0.9), # image HSV-Value augmentation (fraction) + "degrees": (True, 0.0, 45.0), # image rotation (+/- deg) + "translate": (True, 0.0, 0.9), # image translation (+/- fraction) + "scale": (True, 0.0, 0.9), # image scale (+/- gain) + "shear": (True, 0.0, 10.0), # image shear (+/- deg) + "perspective": (True, 0.0, 0.001), # image perspective (+/- fraction), range 0-0.001 + "flipud": (True, 0.0, 1.0), # image flip up-down (probability) + "fliplr": (True, 0.0, 1.0), # image flip left-right (probability) + "mosaic": (True, 0.0, 1.0), # image mixup (probability) + "mixup": (True, 0.0, 1.0), # image mixup (probability) + "copy_paste": (True, 0.0, 1.0), + } # segment copy-paste (probability) # GA configs pop_size = 50 @@ -603,22 +661,25 @@ def main(opt, callbacks=Callbacks()): tournament_size_min = 2 tournament_size_max = 10 - with open(opt.hyp, errors='ignore') as f: + with open(opt.hyp, errors="ignore") as f: hyp = yaml.safe_load(f) # load hyps dict - if 'anchors' not in hyp: # anchors commented in hyp.yaml - hyp['anchors'] = 3 + if "anchors" not in hyp: # anchors commented in hyp.yaml + hyp["anchors"] = 3 if opt.noautoanchor: - del hyp['anchors'], meta['anchors'] + del hyp["anchors"], meta["anchors"] opt.noval, opt.nosave, save_dir = True, True, Path(opt.save_dir) # only val/save final epoch # ei = [isinstance(x, (int, float)) for x in hyp.values()] # evolvable indices - evolve_yaml, evolve_csv = save_dir / 'hyp_evolve.yaml', save_dir / 'evolve.csv' + evolve_yaml, evolve_csv = save_dir / "hyp_evolve.yaml", save_dir / "evolve.csv" if opt.bucket: # download evolve.csv if exists - subprocess.run([ - 'gsutil', - 'cp', - f'gs://{opt.bucket}/evolve.csv', - str(evolve_csv), ]) + subprocess.run( + [ + "gsutil", + "cp", + f"gs://{opt.bucket}/evolve.csv", + str(evolve_csv), + ] + ) # Delete the items in meta dictionary whose first value is False del_ = [] @@ -644,8 +705,8 @@ def main(opt, callbacks=Callbacks()): # If resuming evolution from a previous checkpoint if opt.resume_evolve is not None: - assert os.path.isfile(ROOT / opt.resume_evolve), 'evolve population path is wrong!' - with open(ROOT / opt.resume_evolve, errors='ignore') as f: + assert os.path.isfile(ROOT / opt.resume_evolve), "evolve population path is wrong!" + with open(ROOT / opt.resume_evolve, errors="ignore") as f: evolve_population = yaml.safe_load(f) for value in evolve_population.values(): value = np.array([value[k] for k in hyp_GA.keys()]) @@ -653,7 +714,7 @@ def main(opt, callbacks=Callbacks()): # If not resuming from a previous checkpoint, generate initial values from .yaml files in opt.evolve_population else: - yaml_files = [f for f in os.listdir(opt.evolve_population) if f.endswith('.yaml')] + yaml_files = [f for f in os.listdir(opt.evolve_population) if f.endswith(".yaml")] for file_name in yaml_files: with open(os.path.join(opt.evolve_population, file_name)) as yaml_file: value = yaml.safe_load(yaml_file) @@ -661,27 +722,28 @@ def main(opt, callbacks=Callbacks()): initial_values.append(list(value)) # Generate random values within the search space for the rest of the population - if (initial_values is None): + if initial_values is None: population = [generate_individual(gene_ranges, len(hyp_GA)) for i in range(pop_size)] else: - if (pop_size > 1): + if pop_size > 1: population = [ - generate_individual(gene_ranges, len(hyp_GA)) for i in range(pop_size - len(initial_values))] + generate_individual(gene_ranges, len(hyp_GA)) for i in range(pop_size - len(initial_values)) + ] for initial_value in initial_values: population = [initial_value] + population # Run the genetic algorithm for a fixed number of generations list_keys = list(hyp_GA.keys()) for generation in range(opt.evolve): - if (generation >= 1): + if generation >= 1: save_dict = {} for i in range(len(population)): little_dict = {} for j in range(len(population[i])): little_dict[list_keys[j]] = float(population[i][j]) - save_dict['gen' + str(generation) + 'number' + str(i)] = little_dict + save_dict["gen" + str(generation) + "number" + str(i)] = little_dict - with open(save_dir / 'evolve_population.yaml', 'w') as outfile: + with open(save_dir / "evolve_population.yaml", "w") as outfile: yaml.dump(save_dict, outfile, default_flow_style=False) # Adaptive elite size @@ -695,8 +757,15 @@ def main(opt, callbacks=Callbacks()): results = train(hyp.copy(), opt, device, callbacks) callbacks = Callbacks() # Write mutation results - keys = ('metrics/precision', 'metrics/recall', 'metrics/mAP_0.5', 'metrics/mAP_0.5:0.95', - 'val/box_loss', 'val/obj_loss', 'val/cls_loss') + keys = ( + "metrics/precision", + "metrics/recall", + "metrics/mAP_0.5", + "metrics/mAP_0.5:0.95", + "val/box_loss", + "val/obj_loss", + "val/cls_loss", + ) print_mutation(keys, results, hyp.copy(), save_dir, opt.bucket) fitness_scores.append(results[2]) @@ -704,8 +773,10 @@ def main(opt, callbacks=Callbacks()): selected_indices = [] for i in range(pop_size - elite_size): # Adaptive tournament size - tournament_size = max(max(2, tournament_size_min), - int(min(tournament_size_max, pop_size) - (generation / (opt.evolve / 10)))) + tournament_size = max( + max(2, tournament_size_min), + int(min(tournament_size_max, pop_size) - (generation / (opt.evolve / 10))), + ) # Perform tournament selection to choose the best individual tournament_indices = random.sample(range(pop_size), tournament_size) tournament_fitness = [fitness_scores[j] for j in tournament_indices] @@ -721,16 +792,18 @@ def main(opt, callbacks=Callbacks()): parent1_index = selected_indices[random.randint(0, pop_size - 1)] parent2_index = selected_indices[random.randint(0, pop_size - 1)] # Adaptive crossover rate - crossover_rate = max(crossover_rate_min, - min(crossover_rate_max, crossover_rate_max - (generation / opt.evolve))) + crossover_rate = max( + crossover_rate_min, min(crossover_rate_max, crossover_rate_max - (generation / opt.evolve)) + ) if random.uniform(0, 1) < crossover_rate: crossover_point = random.randint(1, len(hyp_GA) - 1) child = population[parent1_index][:crossover_point] + population[parent2_index][crossover_point:] else: child = population[parent1_index] # Adaptive mutation rate - mutation_rate = max(mutation_rate_min, - min(mutation_rate_max, mutation_rate_max - (generation / opt.evolve))) + mutation_rate = max( + mutation_rate_min, min(mutation_rate_max, mutation_rate_max - (generation / opt.evolve)) + ) for j in range(len(hyp_GA)): if random.uniform(0, 1) < mutation_rate: child[j] += random.uniform(-0.1, 0.1) @@ -741,12 +814,14 @@ def main(opt, callbacks=Callbacks()): # Print the best solution found best_index = fitness_scores.index(max(fitness_scores)) best_individual = population[best_index] - print('Best solution found:', best_individual) + print("Best solution found:", best_individual) # Plot results plot_evolve(evolve_csv) - LOGGER.info(f'Hyperparameter evolution finished {opt.evolve} generations\n' - f"Results saved to {colorstr('bold', save_dir)}\n" - f'Usage example: $ python train.py --hyp {evolve_yaml}') + LOGGER.info( + f'Hyperparameter evolution finished {opt.evolve} generations\n' + f"Results saved to {colorstr('bold', save_dir)}\n" + f'Usage example: $ python train.py --hyp {evolve_yaml}' + ) def generate_individual(input_ranges, individual_length): @@ -766,6 +841,6 @@ def run(**kwargs): return opt -if __name__ == '__main__': +if __name__ == "__main__": opt = parse_opt() main(opt) diff --git a/utils/__init__.py b/utils/__init__.py index 4c7379c87466..eff756e2b90e 100644 --- a/utils/__init__.py +++ b/utils/__init__.py @@ -1,21 +1,19 @@ # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license -""" -utils/initialization -""" +"""utils/initialization.""" import contextlib import platform import threading -def emojis(str=''): +def emojis(str=""): # Return platform-dependent emoji-safe version of string - return str.encode().decode('ascii', 'ignore') if platform.system() == 'Windows' else str + return str.encode().decode("ascii", "ignore") if platform.system() == "Windows" else str class TryExcept(contextlib.ContextDecorator): # YOLOv5 TryExcept class. Usage: @TryExcept() decorator or 'with TryExcept():' context manager - def __init__(self, msg=''): + def __init__(self, msg=""): self.msg = msg def __enter__(self): @@ -43,13 +41,13 @@ def join_threads(verbose=False): for t in threading.enumerate(): if t is not main_thread: if verbose: - print(f'Joining thread {t.name}') + print(f"Joining thread {t.name}") t.join() def notebook_init(verbose=True): # Check system software and hardware - print('Checking setup...') + print("Checking setup...") import os import shutil @@ -63,24 +61,25 @@ def notebook_init(verbose=True): import psutil - if check_requirements('wandb', install=False): - os.system('pip uninstall -y wandb') # eliminate unexpected account creation prompt with infinite hang + if check_requirements("wandb", install=False): + os.system("pip uninstall -y wandb") # eliminate unexpected account creation prompt with infinite hang if is_colab(): - shutil.rmtree('/content/sample_data', ignore_errors=True) # remove colab /sample_data directory + shutil.rmtree("/content/sample_data", ignore_errors=True) # remove colab /sample_data directory # System info display = None if verbose: gb = 1 << 30 # bytes to GiB (1024 ** 3) ram = psutil.virtual_memory().total - total, used, free = shutil.disk_usage('/') + total, used, free = shutil.disk_usage("/") with contextlib.suppress(Exception): # clear display if ipython is installed from IPython import display + display.clear_output() - s = f'({os.cpu_count()} CPUs, {ram / gb:.1f} GB RAM, {(total - free) / gb:.1f}/{total / gb:.1f} GB disk)' + s = f"({os.cpu_count()} CPUs, {ram / gb:.1f} GB RAM, {(total - free) / gb:.1f}/{total / gb:.1f} GB disk)" else: - s = '' + s = "" select_device(newline=False) - print(emojis(f'Setup complete ✅ {s}')) + print(emojis(f"Setup complete ✅ {s}")) return display diff --git a/utils/activations.py b/utils/activations.py index e4d4bbde5ec8..616002f06a73 100644 --- a/utils/activations.py +++ b/utils/activations.py @@ -1,7 +1,5 @@ # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license -""" -Activation functions -""" +"""Activation functions.""" import torch import torch.nn as nn @@ -33,7 +31,6 @@ def forward(x): class MemoryEfficientMish(nn.Module): # Mish activation memory-efficient class F(torch.autograd.Function): - @staticmethod def forward(ctx, x): ctx.save_for_backward(x) @@ -62,7 +59,7 @@ def forward(self, x): class AconC(nn.Module): - r""" ACON activation (activate or not) + r"""ACON activation (activate or not) AconC: (p1*x-p2*x) * sigmoid(beta*(p1*x-p2*x)) + p2*x, beta is a learnable parameter according to "Activate or Not: Learning Customized Activation" . """ @@ -79,7 +76,7 @@ def forward(self, x): class MetaAconC(nn.Module): - r""" ACON activation (activate or not) + r"""ACON activation (activate or not) MetaAconC: (p1*x-p2*x) * sigmoid(beta*(p1*x-p2*x)) + p2*x, beta is generated by a small network according to "Activate or Not: Learning Customized Activation" . """ diff --git a/utils/augmentations.py b/utils/augmentations.py index 1e609303e209..b3b9524320d0 100644 --- a/utils/augmentations.py +++ b/utils/augmentations.py @@ -1,7 +1,5 @@ # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license -""" -Image augmentation functions -""" +"""Image augmentation functions.""" import math import random @@ -23,10 +21,11 @@ class Albumentations: # YOLOv5 Albumentations class (optional, only used if package is installed) def __init__(self, size=640): self.transform = None - prefix = colorstr('albumentations: ') + prefix = colorstr("albumentations: ") try: import albumentations as A - check_version(A.__version__, '1.0.3', hard=True) # version requirement + + check_version(A.__version__, "1.0.3", hard=True) # version requirement T = [ A.RandomResizedCrop(height=size, width=size, scale=(0.8, 1.0), ratio=(0.9, 1.11), p=0.0), @@ -36,19 +35,20 @@ def __init__(self, size=640): A.CLAHE(p=0.01), A.RandomBrightnessContrast(p=0.0), A.RandomGamma(p=0.0), - A.ImageCompression(quality_lower=75, p=0.0)] # transforms - self.transform = A.Compose(T, bbox_params=A.BboxParams(format='yolo', label_fields=['class_labels'])) + A.ImageCompression(quality_lower=75, p=0.0), + ] # transforms + self.transform = A.Compose(T, bbox_params=A.BboxParams(format="yolo", label_fields=["class_labels"])) - LOGGER.info(prefix + ', '.join(f'{x}'.replace('always_apply=False, ', '') for x in T if x.p)) + LOGGER.info(prefix + ", ".join(f"{x}".replace("always_apply=False, ", "") for x in T if x.p)) except ImportError: # package not installed, skip pass except Exception as e: - LOGGER.info(f'{prefix}{e}') + LOGGER.info(f"{prefix}{e}") def __call__(self, im, labels, p=1.0): if self.transform and random.random() < p: new = self.transform(image=im, bboxes=labels[:, 1:], class_labels=labels[:, 0]) # transformed - im, labels = new['image'], np.array([[c, *b] for c, b in zip(new['class_labels'], new['bboxes'])]) + im, labels = new["image"], np.array([[c, *b] for c, b in zip(new["class_labels"], new["bboxes"])]) return im, labels @@ -97,7 +97,7 @@ def replicate(im, labels): boxes = labels[:, 1:].astype(int) x1, y1, x2, y2 = boxes.T s = ((x2 - x1) + (y2 - y1)) / 2 # side length (pixels) - for i in s.argsort()[:round(s.size * 0.5)]: # smallest indices + for i in s.argsort()[: round(s.size * 0.5)]: # smallest indices x1b, y1b, x2b, y2b = boxes[i] bh, bw = y2b - y1b, x2b - x1b yc, xc = int(random.uniform(0, h - bh)), int(random.uniform(0, w - bw)) # offset x, y @@ -141,15 +141,9 @@ def letterbox(im, new_shape=(640, 640), color=(114, 114, 114), auto=True, scaleF return im, ratio, (dw, dh) -def random_perspective(im, - targets=(), - segments=(), - degrees=10, - translate=.1, - scale=.1, - shear=10, - perspective=0.0, - border=(0, 0)): +def random_perspective( + im, targets=(), segments=(), degrees=10, translate=0.1, scale=0.1, shear=10, perspective=0.0, border=(0, 0) +): # torchvision.transforms.RandomAffine(degrees=(-10, 10), translate=(0.1, 0.1), scale=(0.9, 1.1), shear=(-10, 10)) # targets = [cls, xyxy] @@ -303,50 +297,52 @@ def box_candidates(box1, box2, wh_thr=2, ar_thr=100, area_thr=0.1, eps=1e-16): def classify_albumentations( - augment=True, - size=224, - scale=(0.08, 1.0), - ratio=(0.75, 1.0 / 0.75), # 0.75, 1.33 - hflip=0.5, - vflip=0.0, - jitter=0.4, - mean=IMAGENET_MEAN, - std=IMAGENET_STD, - auto_aug=False): + augment=True, + size=224, + scale=(0.08, 1.0), + ratio=(0.75, 1.0 / 0.75), # 0.75, 1.33 + hflip=0.5, + vflip=0.0, + jitter=0.4, + mean=IMAGENET_MEAN, + std=IMAGENET_STD, + auto_aug=False, +): # YOLOv5 classification Albumentations (optional, only used if package is installed) - prefix = colorstr('albumentations: ') + prefix = colorstr("albumentations: ") try: import albumentations as A from albumentations.pytorch import ToTensorV2 - check_version(A.__version__, '1.0.3', hard=True) # version requirement + + check_version(A.__version__, "1.0.3", hard=True) # version requirement if augment: # Resize and crop T = [A.RandomResizedCrop(height=size, width=size, scale=scale, ratio=ratio)] if auto_aug: # TODO: implement AugMix, AutoAug & RandAug in albumentation - LOGGER.info(f'{prefix}auto augmentations are currently not supported') + LOGGER.info(f"{prefix}auto augmentations are currently not supported") else: if hflip > 0: T += [A.HorizontalFlip(p=hflip)] if vflip > 0: T += [A.VerticalFlip(p=vflip)] if jitter > 0: - color_jitter = (float(jitter), ) * 3 # repeat value for brightness, contrast, satuaration, 0 hue + color_jitter = (float(jitter),) * 3 # repeat value for brightness, contrast, satuaration, 0 hue T += [A.ColorJitter(*color_jitter, 0)] else: # Use fixed crop for eval set (reproducibility) T = [A.SmallestMaxSize(max_size=size), A.CenterCrop(height=size, width=size)] T += [A.Normalize(mean=mean, std=std), ToTensorV2()] # Normalize and convert to Tensor - LOGGER.info(prefix + ', '.join(f'{x}'.replace('always_apply=False, ', '') for x in T if x.p)) + LOGGER.info(prefix + ", ".join(f"{x}".replace("always_apply=False, ", "") for x in T if x.p)) return A.Compose(T) except ImportError: # package not installed, skip - LOGGER.warning(f'{prefix}⚠️ not found, install with `pip install albumentations` (recommended)') + LOGGER.warning(f"{prefix}⚠️ not found, install with `pip install albumentations` (recommended)") except Exception as e: - LOGGER.info(f'{prefix}{e}') + LOGGER.info(f"{prefix}{e}") def classify_transforms(size=224): # Transforms to apply if albumentations not installed - assert isinstance(size, int), f'ERROR: classify_transforms size {size} must be integer, not (list, tuple)' + assert isinstance(size, int), f"ERROR: classify_transforms size {size} must be integer, not (list, tuple)" # T.Compose([T.ToTensor(), T.Resize(size), T.CenterCrop(size), T.Normalize(IMAGENET_MEAN, IMAGENET_STD)]) return T.Compose([CenterCrop(size), ToTensor(), T.Normalize(IMAGENET_MEAN, IMAGENET_STD)]) @@ -366,7 +362,7 @@ def __call__(self, im): # im = np.array HWC hs, ws = (math.ceil(x / self.stride) * self.stride for x in (h, w)) if self.auto else self.h, self.w top, left = round((hs - h) / 2 - 0.1), round((ws - w) / 2 - 0.1) im_out = np.full((self.h, self.w, 3), 114, dtype=im.dtype) - im_out[top:top + h, left:left + w] = cv2.resize(im, (w, h), interpolation=cv2.INTER_LINEAR) + im_out[top : top + h, left : left + w] = cv2.resize(im, (w, h), interpolation=cv2.INTER_LINEAR) return im_out @@ -380,7 +376,7 @@ def __call__(self, im): # im = np.array HWC imh, imw = im.shape[:2] m = min(imh, imw) # min dimension top, left = (imh - m) // 2, (imw - m) // 2 - return cv2.resize(im[top:top + m, left:left + m], (self.w, self.h), interpolation=cv2.INTER_LINEAR) + return cv2.resize(im[top : top + m, left : left + m], (self.w, self.h), interpolation=cv2.INTER_LINEAR) class ToTensor: diff --git a/utils/autoanchor.py b/utils/autoanchor.py index 4c11ab3decec..89e4d97fdcd5 100644 --- a/utils/autoanchor.py +++ b/utils/autoanchor.py @@ -1,7 +1,5 @@ # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license -""" -AutoAnchor utils -""" +"""AutoAnchor utils.""" import random @@ -13,7 +11,7 @@ from utils import TryExcept from utils.general import LOGGER, TQDM_BAR_FORMAT, colorstr -PREFIX = colorstr('AutoAnchor: ') +PREFIX = colorstr("AutoAnchor: ") def check_anchor_order(m): @@ -22,14 +20,14 @@ def check_anchor_order(m): da = a[-1] - a[0] # delta a ds = m.stride[-1] - m.stride[0] # delta s if da and (da.sign() != ds.sign()): # same order - LOGGER.info(f'{PREFIX}Reversing anchor order') + LOGGER.info(f"{PREFIX}Reversing anchor order") m.anchors[:] = m.anchors.flip(0) -@TryExcept(f'{PREFIX}ERROR') +@TryExcept(f"{PREFIX}ERROR") def check_anchors(dataset, model, thr=4.0, imgsz=640): # Check anchor fit to data, recompute if necessary - m = model.module.model[-1] if hasattr(model, 'module') else model.model[-1] # Detect() + m = model.module.model[-1] if hasattr(model, "module") else model.model[-1] # Detect() shapes = imgsz * dataset.shapes / dataset.shapes.max(1, keepdims=True) scale = np.random.uniform(0.9, 1.1, size=(shapes.shape[0], 1)) # augment scale wh = torch.tensor(np.concatenate([l[:, 3:5] * s for s, l in zip(shapes * scale, dataset.labels)])).float() # wh @@ -45,11 +43,11 @@ def metric(k): # compute metric stride = m.stride.to(m.anchors.device).view(-1, 1, 1) # model strides anchors = m.anchors.clone() * stride # current anchors bpr, aat = metric(anchors.cpu().view(-1, 2)) - s = f'\n{PREFIX}{aat:.2f} anchors/target, {bpr:.3f} Best Possible Recall (BPR). ' + s = f"\n{PREFIX}{aat:.2f} anchors/target, {bpr:.3f} Best Possible Recall (BPR). " if bpr > 0.98: # threshold to recompute - LOGGER.info(f'{s}Current anchors are a good fit to dataset ✅') + LOGGER.info(f"{s}Current anchors are a good fit to dataset ✅") else: - LOGGER.info(f'{s}Anchors are a poor fit to dataset ⚠️, attempting to improve...') + LOGGER.info(f"{s}Anchors are a poor fit to dataset ⚠️, attempting to improve...") na = m.anchors.numel() // 2 # number of anchors anchors = kmean_anchors(dataset, n=na, img_size=imgsz, thr=thr, gen=1000, verbose=False) new_bpr = metric(anchors)[0] @@ -58,28 +56,29 @@ def metric(k): # compute metric m.anchors[:] = anchors.clone().view_as(m.anchors) check_anchor_order(m) # must be in pixel-space (not grid-space) m.anchors /= stride - s = f'{PREFIX}Done ✅ (optional: update model *.yaml to use these anchors in the future)' + s = f"{PREFIX}Done ✅ (optional: update model *.yaml to use these anchors in the future)" else: - s = f'{PREFIX}Done ⚠️ (original anchors better than new anchors, proceeding with original anchors)' + s = f"{PREFIX}Done ⚠️ (original anchors better than new anchors, proceeding with original anchors)" LOGGER.info(s) -def kmean_anchors(dataset='./data/coco128.yaml', n=9, img_size=640, thr=4.0, gen=1000, verbose=True): - """ Creates kmeans-evolved anchors from training dataset +def kmean_anchors(dataset="./data/coco128.yaml", n=9, img_size=640, thr=4.0, gen=1000, verbose=True): + """ + Creates kmeans-evolved anchors from training dataset. - Arguments: - dataset: path to data.yaml, or a loaded dataset - n: number of anchors - img_size: image size used for training - thr: anchor-label wh ratio threshold hyperparameter hyp['anchor_t'] used for training, default=4.0 - gen: generations to evolve anchors using genetic algorithm - verbose: print all results + Arguments: + dataset: path to data.yaml, or a loaded dataset + n: number of anchors + img_size: image size used for training + thr: anchor-label wh ratio threshold hyperparameter hyp['anchor_t'] used for training, default=4.0 + gen: generations to evolve anchors using genetic algorithm + verbose: print all results - Return: - k: kmeans evolved anchors + Return: + k: kmeans evolved anchors - Usage: - from utils.autoanchor import *; _ = kmean_anchors() + Usage: + from utils.autoanchor import *; _ = kmean_anchors() """ from scipy.cluster.vq import kmeans @@ -100,20 +99,23 @@ def print_results(k, verbose=True): k = k[np.argsort(k.prod(1))] # sort small to large x, best = metric(k, wh0) bpr, aat = (best > thr).float().mean(), (x > thr).float().mean() * n # best possible recall, anch > thr - s = f'{PREFIX}thr={thr:.2f}: {bpr:.4f} best possible recall, {aat:.2f} anchors past thr\n' \ - f'{PREFIX}n={n}, img_size={img_size}, metric_all={x.mean():.3f}/{best.mean():.3f}-mean/best, ' \ - f'past_thr={x[x > thr].mean():.3f}-mean: ' + s = ( + f"{PREFIX}thr={thr:.2f}: {bpr:.4f} best possible recall, {aat:.2f} anchors past thr\n" + f"{PREFIX}n={n}, img_size={img_size}, metric_all={x.mean():.3f}/{best.mean():.3f}-mean/best, " + f"past_thr={x[x > thr].mean():.3f}-mean: " + ) for x in k: - s += '%i,%i, ' % (round(x[0]), round(x[1])) + s += "%i,%i, " % (round(x[0]), round(x[1])) if verbose: LOGGER.info(s[:-2]) return k if isinstance(dataset, str): # *.yaml file - with open(dataset, errors='ignore') as f: + with open(dataset, errors="ignore") as f: data_dict = yaml.safe_load(f) # model dict from utils.dataloaders import LoadImagesAndLabels - dataset = LoadImagesAndLabels(data_dict['train'], augment=True, rect=True) + + dataset = LoadImagesAndLabels(data_dict["train"], augment=True, rect=True) # Get label wh shapes = img_size * dataset.shapes / dataset.shapes.max(1, keepdims=True) @@ -122,19 +124,19 @@ def print_results(k, verbose=True): # Filter i = (wh0 < 3.0).any(1).sum() if i: - LOGGER.info(f'{PREFIX}WARNING ⚠️ Extremely small objects found: {i} of {len(wh0)} labels are <3 pixels in size') + LOGGER.info(f"{PREFIX}WARNING ⚠️ Extremely small objects found: {i} of {len(wh0)} labels are <3 pixels in size") wh = wh0[(wh0 >= 2.0).any(1)].astype(np.float32) # filter > 2 pixels # wh = wh * (npr.rand(wh.shape[0], 1) * 0.9 + 0.1) # multiply by random scale 0-1 # Kmeans init try: - LOGGER.info(f'{PREFIX}Running kmeans for {n} anchors on {len(wh)} points...') + LOGGER.info(f"{PREFIX}Running kmeans for {n} anchors on {len(wh)} points...") assert n <= len(wh) # apply overdetermined constraint s = wh.std(0) # sigmas for whitening k = kmeans(wh / s, n, iter=30)[0] * s # points assert n == len(k) # kmeans may return fewer points than requested if wh is insufficient or too similar except Exception: - LOGGER.warning(f'{PREFIX}WARNING ⚠️ switching strategies from kmeans to random init') + LOGGER.warning(f"{PREFIX}WARNING ⚠️ switching strategies from kmeans to random init") k = np.sort(npr.rand(n * 2)).reshape(n, 2) * img_size # random init wh, wh0 = (torch.tensor(x, dtype=torch.float32) for x in (wh, wh0)) k = print_results(k, verbose=False) @@ -162,7 +164,7 @@ def print_results(k, verbose=True): fg = anchor_fitness(kg) if fg > f: f, k = fg, kg.copy() - pbar.desc = f'{PREFIX}Evolving anchors with Genetic Algorithm: fitness = {f:.4f}' + pbar.desc = f"{PREFIX}Evolving anchors with Genetic Algorithm: fitness = {f:.4f}" if verbose: print_results(k, verbose) diff --git a/utils/autobatch.py b/utils/autobatch.py index aa763b888462..396dbed1dda4 100644 --- a/utils/autobatch.py +++ b/utils/autobatch.py @@ -1,7 +1,5 @@ # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license -""" -Auto-batch utils -""" +"""Auto-batch utils.""" from copy import deepcopy @@ -27,14 +25,14 @@ def autobatch(model, imgsz=640, fraction=0.8, batch_size=16): # print(autobatch(model)) # Check device - prefix = colorstr('AutoBatch: ') - LOGGER.info(f'{prefix}Computing optimal batch size for --imgsz {imgsz}') + prefix = colorstr("AutoBatch: ") + LOGGER.info(f"{prefix}Computing optimal batch size for --imgsz {imgsz}") device = next(model.parameters()).device # get model device - if device.type == 'cpu': - LOGGER.info(f'{prefix}CUDA not detected, using default CPU batch-size {batch_size}') + if device.type == "cpu": + LOGGER.info(f"{prefix}CUDA not detected, using default CPU batch-size {batch_size}") return batch_size if torch.backends.cudnn.benchmark: - LOGGER.info(f'{prefix} ⚠️ Requires torch.backends.cudnn.benchmark=False, using default batch-size {batch_size}') + LOGGER.info(f"{prefix} ⚠️ Requires torch.backends.cudnn.benchmark=False, using default batch-size {batch_size}") return batch_size # Inspect CUDA memory @@ -45,7 +43,7 @@ def autobatch(model, imgsz=640, fraction=0.8, batch_size=16): r = torch.cuda.memory_reserved(device) / gb # GiB reserved a = torch.cuda.memory_allocated(device) / gb # GiB allocated f = t - (r + a) # GiB free - LOGGER.info(f'{prefix}{d} ({properties.name}) {t:.2f}G total, {r:.2f}G reserved, {a:.2f}G allocated, {f:.2f}G free') + LOGGER.info(f"{prefix}{d} ({properties.name}) {t:.2f}G total, {r:.2f}G reserved, {a:.2f}G allocated, {f:.2f}G free") # Profile batch sizes batch_sizes = [1, 2, 4, 8, 16] @@ -53,11 +51,11 @@ def autobatch(model, imgsz=640, fraction=0.8, batch_size=16): img = [torch.empty(b, 3, imgsz, imgsz) for b in batch_sizes] results = profile(img, model, n=3, device=device) except Exception as e: - LOGGER.warning(f'{prefix}{e}') + LOGGER.warning(f"{prefix}{e}") # Fit a solution y = [x[2] for x in results if x] # memory [2] - p = np.polyfit(batch_sizes[:len(y)], y, deg=1) # first degree polynomial fit + p = np.polyfit(batch_sizes[: len(y)], y, deg=1) # first degree polynomial fit b = int((f * fraction - p[1]) / p[0]) # y intercept (optimal batch size) if None in results: # some sizes failed i = results.index(None) # first fail index @@ -65,8 +63,8 @@ def autobatch(model, imgsz=640, fraction=0.8, batch_size=16): b = batch_sizes[max(i - 1, 0)] # select prior safe point if b < 1 or b > 1024: # b outside of safe range b = batch_size - LOGGER.warning(f'{prefix}WARNING ⚠️ CUDA anomaly detected, recommend restart environment and retry command.') + LOGGER.warning(f"{prefix}WARNING ⚠️ CUDA anomaly detected, recommend restart environment and retry command.") fraction = (np.polyval(p, b) + r + a) / t # actual fraction predicted - LOGGER.info(f'{prefix}Using batch-size {b} for {d} {t * fraction:.2f}G/{t:.2f}G ({fraction * 100:.0f}%) ✅') + LOGGER.info(f"{prefix}Using batch-size {b} for {d} {t * fraction:.2f}G/{t:.2f}G ({fraction * 100:.0f}%) ✅") return b diff --git a/utils/aws/resume.py b/utils/aws/resume.py index b21731c979a1..4525ba96749a 100644 --- a/utils/aws/resume.py +++ b/utils/aws/resume.py @@ -14,27 +14,27 @@ sys.path.append(str(ROOT)) # add ROOT to PATH port = 0 # --master_port -path = Path('').resolve() -for last in path.rglob('*/**/last.pt'): +path = Path("").resolve() +for last in path.rglob("*/**/last.pt"): ckpt = torch.load(last) - if ckpt['optimizer'] is None: + if ckpt["optimizer"] is None: continue # Load opt.yaml - with open(last.parent.parent / 'opt.yaml', errors='ignore') as f: + with open(last.parent.parent / "opt.yaml", errors="ignore") as f: opt = yaml.safe_load(f) # Get device count - d = opt['device'].split(',') # devices + d = opt["device"].split(",") # devices nd = len(d) # number of devices ddp = nd > 1 or (nd == 0 and torch.cuda.device_count() > 1) # distributed data parallel if ddp: # multi-GPU port += 1 - cmd = f'python -m torch.distributed.run --nproc_per_node {nd} --master_port {port} train.py --resume {last}' + cmd = f"python -m torch.distributed.run --nproc_per_node {nd} --master_port {port} train.py --resume {last}" else: # single-GPU - cmd = f'python train.py --resume {last}' + cmd = f"python train.py --resume {last}" - cmd += ' > /dev/null 2>&1 &' # redirect output to dev/null and run in daemon thread + cmd += " > /dev/null 2>&1 &" # redirect output to dev/null and run in daemon thread print(cmd) os.system(cmd) diff --git a/utils/callbacks.py b/utils/callbacks.py index c90fa824cdb4..ab7befdb0b3b 100644 --- a/utils/callbacks.py +++ b/utils/callbacks.py @@ -1,43 +1,40 @@ # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license -""" -Callback utils -""" +"""Callback utils.""" import threading class Callbacks: - """" - Handles all registered callbacks for YOLOv5 Hooks - """ + """" Handles all registered callbacks for YOLOv5 Hooks.""" def __init__(self): # Define the available callbacks self._callbacks = { - 'on_pretrain_routine_start': [], - 'on_pretrain_routine_end': [], - 'on_train_start': [], - 'on_train_epoch_start': [], - 'on_train_batch_start': [], - 'optimizer_step': [], - 'on_before_zero_grad': [], - 'on_train_batch_end': [], - 'on_train_epoch_end': [], - 'on_val_start': [], - 'on_val_batch_start': [], - 'on_val_image_end': [], - 'on_val_batch_end': [], - 'on_val_end': [], - 'on_fit_epoch_end': [], # fit = train + val - 'on_model_save': [], - 'on_train_end': [], - 'on_params_update': [], - 'teardown': [], } + "on_pretrain_routine_start": [], + "on_pretrain_routine_end": [], + "on_train_start": [], + "on_train_epoch_start": [], + "on_train_batch_start": [], + "optimizer_step": [], + "on_before_zero_grad": [], + "on_train_batch_end": [], + "on_train_epoch_end": [], + "on_val_start": [], + "on_val_batch_start": [], + "on_val_image_end": [], + "on_val_batch_end": [], + "on_val_end": [], + "on_fit_epoch_end": [], # fit = train + val + "on_model_save": [], + "on_train_end": [], + "on_params_update": [], + "teardown": [], + } self.stop_training = False # set True to interrupt training - def register_action(self, hook, name='', callback=None): + def register_action(self, hook, name="", callback=None): """ - Register a new action to a callback hook + Register a new action to a callback hook. Args: hook: The callback hook name to register the action to @@ -46,11 +43,11 @@ def register_action(self, hook, name='', callback=None): """ assert hook in self._callbacks, f"hook '{hook}' not found in callbacks {self._callbacks}" assert callable(callback), f"callback '{callback}' is not callable" - self._callbacks[hook].append({'name': name, 'callback': callback}) + self._callbacks[hook].append({"name": name, "callback": callback}) def get_registered_actions(self, hook=None): - """" - Returns all the registered actions by callback hook + """ + " Returns all the registered actions by callback hook. Args: hook: The name of the hook to check, defaults to all @@ -59,7 +56,7 @@ def get_registered_actions(self, hook=None): def run(self, hook, *args, thread=False, **kwargs): """ - Loop through the registered actions and fire all callbacks on main thread + Loop through the registered actions and fire all callbacks on main thread. Args: hook: The name of the hook to check, defaults to all @@ -71,6 +68,6 @@ def run(self, hook, *args, thread=False, **kwargs): assert hook in self._callbacks, f"hook '{hook}' not found in callbacks {self._callbacks}" for logger in self._callbacks[hook]: if thread: - threading.Thread(target=logger['callback'], args=args, kwargs=kwargs, daemon=True).start() + threading.Thread(target=logger["callback"], args=args, kwargs=kwargs, daemon=True).start() else: - logger['callback'](*args, **kwargs) + logger["callback"](*args, **kwargs) diff --git a/utils/dataloaders.py b/utils/dataloaders.py index d422ef0711cb..c821e917ed38 100644 --- a/utils/dataloaders.py +++ b/utils/dataloaders.py @@ -1,7 +1,5 @@ # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license -""" -Dataloaders and dataset utils -""" +"""Dataloaders and dataset utils.""" import contextlib import glob @@ -28,25 +26,49 @@ from torch.utils.data import DataLoader, Dataset, dataloader, distributed from tqdm import tqdm -from utils.augmentations import (Albumentations, augment_hsv, classify_albumentations, classify_transforms, copy_paste, - letterbox, mixup, random_perspective) -from utils.general import (DATASETS_DIR, LOGGER, NUM_THREADS, TQDM_BAR_FORMAT, check_dataset, check_requirements, - check_yaml, clean_str, cv2, is_colab, is_kaggle, segments2boxes, unzip_file, xyn2xy, - xywh2xyxy, xywhn2xyxy, xyxy2xywhn) +from utils.augmentations import ( + Albumentations, + augment_hsv, + classify_albumentations, + classify_transforms, + copy_paste, + letterbox, + mixup, + random_perspective, +) +from utils.general import ( + DATASETS_DIR, + LOGGER, + NUM_THREADS, + TQDM_BAR_FORMAT, + check_dataset, + check_requirements, + check_yaml, + clean_str, + cv2, + is_colab, + is_kaggle, + segments2boxes, + unzip_file, + xyn2xy, + xywh2xyxy, + xywhn2xyxy, + xyxy2xywhn, +) from utils.torch_utils import torch_distributed_zero_first # Parameters -HELP_URL = 'See https://docs.ultralytics.com/yolov5/tutorials/train_custom_data' -IMG_FORMATS = 'bmp', 'dng', 'jpeg', 'jpg', 'mpo', 'png', 'tif', 'tiff', 'webp', 'pfm' # include image suffixes -VID_FORMATS = 'asf', 'avi', 'gif', 'm4v', 'mkv', 'mov', 'mp4', 'mpeg', 'mpg', 'ts', 'wmv' # include video suffixes -LOCAL_RANK = int(os.getenv('LOCAL_RANK', -1)) # https://pytorch.org/docs/stable/elastic/run.html -RANK = int(os.getenv('RANK', -1)) -WORLD_SIZE = int(os.getenv('WORLD_SIZE', 1)) -PIN_MEMORY = str(os.getenv('PIN_MEMORY', True)).lower() == 'true' # global pin_memory for dataloaders +HELP_URL = "See https://docs.ultralytics.com/yolov5/tutorials/train_custom_data" +IMG_FORMATS = "bmp", "dng", "jpeg", "jpg", "mpo", "png", "tif", "tiff", "webp", "pfm" # include image suffixes +VID_FORMATS = "asf", "avi", "gif", "m4v", "mkv", "mov", "mp4", "mpeg", "mpg", "ts", "wmv" # include video suffixes +LOCAL_RANK = int(os.getenv("LOCAL_RANK", -1)) # https://pytorch.org/docs/stable/elastic/run.html +RANK = int(os.getenv("RANK", -1)) +WORLD_SIZE = int(os.getenv("WORLD_SIZE", 1)) +PIN_MEMORY = str(os.getenv("PIN_MEMORY", True)).lower() == "true" # global pin_memory for dataloaders # Get orientation exif tag for orientation in ExifTags.TAGS.keys(): - if ExifTags.TAGS[orientation] == 'Orientation': + if ExifTags.TAGS[orientation] == "Orientation": break @@ -54,7 +76,7 @@ def get_hash(paths): # Returns a single hash value of a list of paths (files or dirs) size = sum(os.path.getsize(p) for p in paths if os.path.exists(p)) # sizes h = hashlib.sha256(str(size).encode()) # hash sizes - h.update(''.join(paths).encode()) # hash paths + h.update("".join(paths).encode()) # hash paths return h.hexdigest() # return hash @@ -86,17 +108,18 @@ def exif_transpose(image): 5: Image.TRANSPOSE, 6: Image.ROTATE_270, 7: Image.TRANSVERSE, - 8: Image.ROTATE_90}.get(orientation) + 8: Image.ROTATE_90, + }.get(orientation) if method is not None: image = image.transpose(method) del exif[0x0112] - image.info['exif'] = exif.tobytes() + image.info["exif"] = exif.tobytes() return image def seed_worker(worker_id): # Set dataloader worker seed https://pytorch.org/docs/stable/notes/randomness.html#dataloader - worker_seed = torch.initial_seed() % 2 ** 32 + worker_seed = torch.initial_seed() % 2**32 np.random.seed(worker_seed) random.seed(worker_seed) @@ -104,7 +127,6 @@ def seed_worker(worker_id): # Inherit from DistributedSampler and override iterator # https://github.com/pytorch/pytorch/blob/master/torch/utils/data/distributed.py class SmartDistributedSampler(distributed.DistributedSampler): - def __iter__(self): # deterministically shuffle based on epoch and seed g = torch.Generator() @@ -118,7 +140,7 @@ def __iter__(self): idx = idx.tolist() if self.drop_last: - idx = idx[:self.num_samples] + idx = idx[: self.num_samples] else: padding_size = self.num_samples - len(idx) if padding_size <= len(idx): @@ -129,25 +151,27 @@ def __iter__(self): return iter(idx) -def create_dataloader(path, - imgsz, - batch_size, - stride, - single_cls=False, - hyp=None, - augment=False, - cache=False, - pad=0.0, - rect=False, - rank=-1, - workers=8, - image_weights=False, - quad=False, - prefix='', - shuffle=False, - seed=0): +def create_dataloader( + path, + imgsz, + batch_size, + stride, + single_cls=False, + hyp=None, + augment=False, + cache=False, + pad=0.0, + rect=False, + rank=-1, + workers=8, + image_weights=False, + quad=False, + prefix="", + shuffle=False, + seed=0, +): if rect and shuffle: - LOGGER.warning('WARNING ⚠️ --rect is incompatible with DataLoader shuffle, setting shuffle=False') + LOGGER.warning("WARNING ⚠️ --rect is incompatible with DataLoader shuffle, setting shuffle=False") shuffle = False with torch_distributed_zero_first(rank): # init dataset *.cache only once if DDP dataset = LoadImagesAndLabels( @@ -163,7 +187,8 @@ def create_dataloader(path, pad=pad, image_weights=image_weights, prefix=prefix, - rank=rank) + rank=rank, + ) batch_size = min(batch_size, len(dataset)) nd = torch.cuda.device_count() # number of CUDA devices @@ -172,26 +197,29 @@ def create_dataloader(path, loader = DataLoader if image_weights else InfiniteDataLoader # only DataLoader allows for attribute updates generator = torch.Generator() generator.manual_seed(6148914691236517205 + seed + RANK) - return loader(dataset, - batch_size=batch_size, - shuffle=shuffle and sampler is None, - num_workers=nw, - sampler=sampler, - pin_memory=PIN_MEMORY, - collate_fn=LoadImagesAndLabels.collate_fn4 if quad else LoadImagesAndLabels.collate_fn, - worker_init_fn=seed_worker, - generator=generator), dataset + return loader( + dataset, + batch_size=batch_size, + shuffle=shuffle and sampler is None, + num_workers=nw, + sampler=sampler, + pin_memory=PIN_MEMORY, + collate_fn=LoadImagesAndLabels.collate_fn4 if quad else LoadImagesAndLabels.collate_fn, + worker_init_fn=seed_worker, + generator=generator, + ), dataset class InfiniteDataLoader(dataloader.DataLoader): - """ Dataloader that reuses workers + """ + Dataloader that reuses workers. Uses same syntax as vanilla DataLoader """ def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) - object.__setattr__(self, 'batch_sampler', _RepeatSampler(self.batch_sampler)) + object.__setattr__(self, "batch_sampler", _RepeatSampler(self.batch_sampler)) self.iterator = super().__iter__() def __len__(self): @@ -203,7 +231,8 @@ def __iter__(self): class _RepeatSampler: - """ Sampler that repeats forever + """ + Sampler that repeats forever. Args: sampler (Sampler) @@ -221,7 +250,7 @@ class LoadScreenshots: # YOLOv5 screenshot dataloader, i.e. `python detect.py --source "screen 0 100 100 512 256"` def __init__(self, source, img_size=640, stride=32, auto=True, transforms=None): # source = [screen_number left top width height] (pixels) - check_requirements('mss') + check_requirements("mss") import mss source, *params = source.split() @@ -236,17 +265,17 @@ def __init__(self, source, img_size=640, stride=32, auto=True, transforms=None): self.stride = stride self.transforms = transforms self.auto = auto - self.mode = 'stream' + self.mode = "stream" self.frame = 0 self.sct = mss.mss() # Parse monitor shape monitor = self.sct.monitors[self.screen] - self.top = monitor['top'] if top is None else (monitor['top'] + top) - self.left = monitor['left'] if left is None else (monitor['left'] + left) - self.width = width or monitor['width'] - self.height = height or monitor['height'] - self.monitor = {'left': self.left, 'top': self.top, 'width': self.width, 'height': self.height} + self.top = monitor["top"] if top is None else (monitor["top"] + top) + self.left = monitor["left"] if left is None else (monitor["left"] + left) + self.width = width or monitor["width"] + self.height = height or monitor["height"] + self.monitor = {"left": self.left, "top": self.top, "width": self.width, "height": self.height} def __iter__(self): return self @@ -254,7 +283,7 @@ def __iter__(self): def __next__(self): # mss screen capture: get raw pixels from the screen as np array im0 = np.array(self.sct.grab(self.monitor))[:, :, :3] # [:, :, :3] BGRA to BGR - s = f'screen {self.screen} (LTWH): {self.left},{self.top},{self.width},{self.height}: ' + s = f"screen {self.screen} (LTWH): {self.left},{self.top},{self.width},{self.height}: " if self.transforms: im = self.transforms(im0) # transforms @@ -269,22 +298,22 @@ def __next__(self): class LoadImages: # YOLOv5 image/video dataloader, i.e. `python detect.py --source image.jpg/vid.mp4` def __init__(self, path, img_size=640, stride=32, auto=True, transforms=None, vid_stride=1): - if isinstance(path, str) and Path(path).suffix == '.txt': # *.txt file with img/vid/dir on each line + if isinstance(path, str) and Path(path).suffix == ".txt": # *.txt file with img/vid/dir on each line path = Path(path).read_text().rsplit() files = [] for p in sorted(path) if isinstance(path, (list, tuple)) else [path]: p = str(Path(p).resolve()) - if '*' in p: + if "*" in p: files.extend(sorted(glob.glob(p, recursive=True))) # glob elif os.path.isdir(p): - files.extend(sorted(glob.glob(os.path.join(p, '*.*')))) # dir + files.extend(sorted(glob.glob(os.path.join(p, "*.*")))) # dir elif os.path.isfile(p): files.append(p) # files else: - raise FileNotFoundError(f'{p} does not exist') + raise FileNotFoundError(f"{p} does not exist") - images = [x for x in files if x.split('.')[-1].lower() in IMG_FORMATS] - videos = [x for x in files if x.split('.')[-1].lower() in VID_FORMATS] + images = [x for x in files if x.split(".")[-1].lower() in IMG_FORMATS] + videos = [x for x in files if x.split(".")[-1].lower() in VID_FORMATS] ni, nv = len(images), len(videos) self.img_size = img_size @@ -292,7 +321,7 @@ def __init__(self, path, img_size=640, stride=32, auto=True, transforms=None, vi self.files = images + videos self.nf = ni + nv # number of files self.video_flag = [False] * ni + [True] * nv - self.mode = 'image' + self.mode = "image" self.auto = auto self.transforms = transforms # optional self.vid_stride = vid_stride # video frame-rate stride @@ -300,8 +329,10 @@ def __init__(self, path, img_size=640, stride=32, auto=True, transforms=None, vi self._new_video(videos[0]) # new video else: self.cap = None - assert self.nf > 0, f'No images or videos found in {p}. ' \ - f'Supported formats are:\nimages: {IMG_FORMATS}\nvideos: {VID_FORMATS}' + assert self.nf > 0, ( + f"No images or videos found in {p}. " + f"Supported formats are:\nimages: {IMG_FORMATS}\nvideos: {VID_FORMATS}" + ) def __iter__(self): self.count = 0 @@ -314,7 +345,7 @@ def __next__(self): if self.video_flag[self.count]: # Read video - self.mode = 'video' + self.mode = "video" for _ in range(self.vid_stride): self.cap.grab() ret_val, im0 = self.cap.retrieve() @@ -329,14 +360,14 @@ def __next__(self): self.frame += 1 # im0 = self._cv2_rotate(im0) # for use if cv2 autorotation is False - s = f'video {self.count + 1}/{self.nf} ({self.frame}/{self.frames}) {path}: ' + s = f"video {self.count + 1}/{self.nf} ({self.frame}/{self.frames}) {path}: " else: # Read image self.count += 1 im0 = cv2.imread(path) # BGR - assert im0 is not None, f'Image Not Found {path}' - s = f'image {self.count}/{self.nf} {path}: ' + assert im0 is not None, f"Image Not Found {path}" + s = f"image {self.count}/{self.nf} {path}: " if self.transforms: im = self.transforms(im0) # transforms @@ -371,9 +402,9 @@ def __len__(self): class LoadStreams: # YOLOv5 streamloader, i.e. `python detect.py --source 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP streams` - def __init__(self, sources='file.streams', img_size=640, stride=32, auto=True, transforms=None, vid_stride=1): + def __init__(self, sources="file.streams", img_size=640, stride=32, auto=True, transforms=None, vid_stride=1): torch.backends.cudnn.benchmark = True # faster for fixed-size inference - self.mode = 'stream' + self.mode = "stream" self.img_size = img_size self.stride = stride self.vid_stride = vid_stride # video frame-rate stride @@ -383,29 +414,30 @@ def __init__(self, sources='file.streams', img_size=640, stride=32, auto=True, t self.imgs, self.fps, self.frames, self.threads = [None] * n, [0] * n, [0] * n, [None] * n for i, s in enumerate(sources): # index, source # Start thread to read frames from video stream - st = f'{i + 1}/{n}: {s}... ' - if urlparse(s).hostname in ('www.youtube.com', 'youtube.com', 'youtu.be'): # if source is YouTube video + st = f"{i + 1}/{n}: {s}... " + if urlparse(s).hostname in ("www.youtube.com", "youtube.com", "youtu.be"): # if source is YouTube video # YouTube format i.e. 'https://www.youtube.com/watch?v=Zgi9g1ksQHc' or 'https://youtu.be/LNwODJXcvt4' - check_requirements(('pafy', 'youtube_dl==2020.12.2')) + check_requirements(("pafy", "youtube_dl==2020.12.2")) import pafy - s = pafy.new(s).getbest(preftype='mp4').url # YouTube URL + + s = pafy.new(s).getbest(preftype="mp4").url # YouTube URL s = eval(s) if s.isnumeric() else s # i.e. s = '0' local webcam if s == 0: - assert not is_colab(), '--source 0 webcam unsupported on Colab. Rerun command in a local environment.' - assert not is_kaggle(), '--source 0 webcam unsupported on Kaggle. Rerun command in a local environment.' + assert not is_colab(), "--source 0 webcam unsupported on Colab. Rerun command in a local environment." + assert not is_kaggle(), "--source 0 webcam unsupported on Kaggle. Rerun command in a local environment." cap = cv2.VideoCapture(s) - assert cap.isOpened(), f'{st}Failed to open {s}' + assert cap.isOpened(), f"{st}Failed to open {s}" w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) fps = cap.get(cv2.CAP_PROP_FPS) # warning: may return 0 or nan - self.frames[i] = max(int(cap.get(cv2.CAP_PROP_FRAME_COUNT)), 0) or float('inf') # infinite stream fallback + self.frames[i] = max(int(cap.get(cv2.CAP_PROP_FRAME_COUNT)), 0) or float("inf") # infinite stream fallback self.fps[i] = max((fps if math.isfinite(fps) else 0) % 100, 0) or 30 # 30 FPS fallback _, self.imgs[i] = cap.read() # guarantee first frame self.threads[i] = Thread(target=self.update, args=([i, cap, s]), daemon=True) - LOGGER.info(f'{st} Success ({self.frames[i]} frames {w}x{h} at {self.fps[i]:.2f} FPS)') + LOGGER.info(f"{st} Success ({self.frames[i]} frames {w}x{h} at {self.fps[i]:.2f} FPS)") self.threads[i].start() - LOGGER.info('') # newline + LOGGER.info("") # newline # check for common shapes s = np.stack([letterbox(x, img_size, stride=stride, auto=auto)[0].shape for x in self.imgs]) @@ -413,7 +445,7 @@ def __init__(self, sources='file.streams', img_size=640, stride=32, auto=True, t self.auto = auto and self.rect self.transforms = transforms # optional if not self.rect: - LOGGER.warning('WARNING ⚠️ Stream shapes differ. For optimal performance supply similarly-shaped streams.') + LOGGER.warning("WARNING ⚠️ Stream shapes differ. For optimal performance supply similarly-shaped streams.") def update(self, i, cap, stream): # Read stream `i` frames in daemon thread @@ -426,7 +458,7 @@ def update(self, i, cap, stream): if success: self.imgs[i] = im else: - LOGGER.warning('WARNING ⚠️ Video stream unresponsive, please check your IP camera connection.') + LOGGER.warning("WARNING ⚠️ Video stream unresponsive, please check your IP camera connection.") self.imgs[i] = np.zeros_like(self.imgs[i]) cap.open(stream) # re-open stream if signal was lost time.sleep(0.0) # wait time @@ -437,7 +469,7 @@ def __iter__(self): def __next__(self): self.count += 1 - if not all(x.is_alive() for x in self.threads) or cv2.waitKey(1) == ord('q'): # q to quit + if not all(x.is_alive() for x in self.threads) or cv2.waitKey(1) == ord("q"): # q to quit cv2.destroyAllWindows() raise StopIteration @@ -449,7 +481,7 @@ def __next__(self): im = im[..., ::-1].transpose((0, 3, 1, 2)) # BGR to RGB, BHWC to BCHW im = np.ascontiguousarray(im) # contiguous - return self.sources, im, im0, None, '' + return self.sources, im, im0, None, "" def __len__(self): return len(self.sources) # 1E12 frames = 32 streams at 30 FPS for 30 years @@ -457,8 +489,8 @@ def __len__(self): def img2label_paths(img_paths): # Define label paths as a function of image paths - sa, sb = f'{os.sep}images{os.sep}', f'{os.sep}labels{os.sep}' # /images/, /labels/ substrings - return [sb.join(x.rsplit(sa, 1)).rsplit('.', 1)[0] + '.txt' for x in img_paths] + sa, sb = f"{os.sep}images{os.sep}", f"{os.sep}labels{os.sep}" # /images/, /labels/ substrings + return [sb.join(x.rsplit(sa, 1)).rsplit(".", 1)[0] + ".txt" for x in img_paths] class LoadImagesAndLabels(Dataset): @@ -466,22 +498,24 @@ class LoadImagesAndLabels(Dataset): cache_version = 0.6 # dataset labels *.cache version rand_interp_methods = [cv2.INTER_NEAREST, cv2.INTER_LINEAR, cv2.INTER_CUBIC, cv2.INTER_AREA, cv2.INTER_LANCZOS4] - def __init__(self, - path, - img_size=640, - batch_size=16, - augment=False, - hyp=None, - rect=False, - image_weights=False, - cache_images=False, - single_cls=False, - stride=32, - pad=0.0, - min_items=0, - prefix='', - rank=-1, - seed=0): + def __init__( + self, + path, + img_size=640, + batch_size=16, + augment=False, + hyp=None, + rect=False, + image_weights=False, + cache_images=False, + single_cls=False, + stride=32, + pad=0.0, + min_items=0, + prefix="", + rank=-1, + seed=0, + ): self.img_size = img_size self.augment = augment self.hyp = hyp @@ -498,46 +532,46 @@ def __init__(self, for p in path if isinstance(path, list) else [path]: p = Path(p) # os-agnostic if p.is_dir(): # dir - f += glob.glob(str(p / '**' / '*.*'), recursive=True) + f += glob.glob(str(p / "**" / "*.*"), recursive=True) # f = list(p.rglob('*.*')) # pathlib elif p.is_file(): # file with open(p) as t: t = t.read().strip().splitlines() parent = str(p.parent) + os.sep - f += [x.replace('./', parent, 1) if x.startswith('./') else x for x in t] # to global path + f += [x.replace("./", parent, 1) if x.startswith("./") else x for x in t] # to global path # f += [p.parent / x.lstrip(os.sep) for x in t] # to global path (pathlib) else: - raise FileNotFoundError(f'{prefix}{p} does not exist') - self.im_files = sorted(x.replace('/', os.sep) for x in f if x.split('.')[-1].lower() in IMG_FORMATS) + raise FileNotFoundError(f"{prefix}{p} does not exist") + self.im_files = sorted(x.replace("/", os.sep) for x in f if x.split(".")[-1].lower() in IMG_FORMATS) # self.img_files = sorted([x for x in f if x.suffix[1:].lower() in IMG_FORMATS]) # pathlib - assert self.im_files, f'{prefix}No images found' + assert self.im_files, f"{prefix}No images found" except Exception as e: - raise Exception(f'{prefix}Error loading data from {path}: {e}\n{HELP_URL}') from e + raise Exception(f"{prefix}Error loading data from {path}: {e}\n{HELP_URL}") from e # Check cache self.label_files = img2label_paths(self.im_files) # labels - cache_path = (p if p.is_file() else Path(self.label_files[0]).parent).with_suffix('.cache') + cache_path = (p if p.is_file() else Path(self.label_files[0]).parent).with_suffix(".cache") try: cache, exists = np.load(cache_path, allow_pickle=True).item(), True # load dict - assert cache['version'] == self.cache_version # matches current version - assert cache['hash'] == get_hash(self.label_files + self.im_files) # identical hash + assert cache["version"] == self.cache_version # matches current version + assert cache["hash"] == get_hash(self.label_files + self.im_files) # identical hash except Exception: cache, exists = self.cache_labels(cache_path, prefix), False # run cache ops # Display cache - nf, nm, ne, nc, n = cache.pop('results') # found, missing, empty, corrupt, total + nf, nm, ne, nc, n = cache.pop("results") # found, missing, empty, corrupt, total if exists and LOCAL_RANK in {-1, 0}: - d = f'Scanning {cache_path}... {nf} images, {nm + ne} backgrounds, {nc} corrupt' + d = f"Scanning {cache_path}... {nf} images, {nm + ne} backgrounds, {nc} corrupt" tqdm(None, desc=prefix + d, total=n, initial=n, bar_format=TQDM_BAR_FORMAT) # display cache results - if cache['msgs']: - LOGGER.info('\n'.join(cache['msgs'])) # display warnings - assert nf > 0 or not augment, f'{prefix}No labels found in {cache_path}, can not start training. {HELP_URL}' + if cache["msgs"]: + LOGGER.info("\n".join(cache["msgs"])) # display warnings + assert nf > 0 or not augment, f"{prefix}No labels found in {cache_path}, can not start training. {HELP_URL}" # Read cache - [cache.pop(k) for k in ('hash', 'version', 'msgs')] # remove items + [cache.pop(k) for k in ("hash", "version", "msgs")] # remove items labels, shapes, self.segments = zip(*cache.values()) nl = len(np.concatenate(labels, 0)) # number of labels - assert nl > 0 or not augment, f'{prefix}All labels empty in {cache_path}, can not start training. {HELP_URL}' + assert nl > 0 or not augment, f"{prefix}All labels empty in {cache_path}, can not start training. {HELP_URL}" self.labels = list(labels) self.shapes = np.array(shapes) self.im_files = list(cache.keys()) # update @@ -546,7 +580,7 @@ def __init__(self, # Filter images if min_items: include = np.array([len(x) >= min_items for x in self.labels]).nonzero()[0].astype(int) - LOGGER.info(f'{prefix}{n - len(include)}/{n} images filtered from dataset') + LOGGER.info(f"{prefix}{n - len(include)}/{n} images filtered from dataset") self.im_files = [self.im_files[i] for i in include] self.label_files = [self.label_files[i] for i in include] self.labels = [self.labels[i] for i in include] @@ -603,52 +637,56 @@ def __init__(self, self.batch_shapes = np.ceil(np.array(shapes) * img_size / stride + pad).astype(int) * stride # Cache images into RAM/disk for faster training - if cache_images == 'ram' and not self.check_cache_ram(prefix=prefix): + if cache_images == "ram" and not self.check_cache_ram(prefix=prefix): cache_images = False self.ims = [None] * n - self.npy_files = [Path(f).with_suffix('.npy') for f in self.im_files] + self.npy_files = [Path(f).with_suffix(".npy") for f in self.im_files] if cache_images: b, gb = 0, 1 << 30 # bytes of cached images, bytes per gigabytes self.im_hw0, self.im_hw = [None] * n, [None] * n - fcn = self.cache_images_to_disk if cache_images == 'disk' else self.load_image + fcn = self.cache_images_to_disk if cache_images == "disk" else self.load_image results = ThreadPool(NUM_THREADS).imap(lambda i: (i, fcn(i)), self.indices) pbar = tqdm(results, total=len(self.indices), bar_format=TQDM_BAR_FORMAT, disable=LOCAL_RANK > 0) for i, x in pbar: - if cache_images == 'disk': + if cache_images == "disk": b += self.npy_files[i].stat().st_size else: # 'ram' self.ims[i], self.im_hw0[i], self.im_hw[i] = x # im, hw_orig, hw_resized = load_image(self, i) b += self.ims[i].nbytes * WORLD_SIZE - pbar.desc = f'{prefix}Caching images ({b / gb:.1f}GB {cache_images})' + pbar.desc = f"{prefix}Caching images ({b / gb:.1f}GB {cache_images})" pbar.close() - def check_cache_ram(self, safety_margin=0.1, prefix=''): + def check_cache_ram(self, safety_margin=0.1, prefix=""): # Check image caching requirements vs available memory b, gb = 0, 1 << 30 # bytes of cached images, bytes per gigabytes n = min(self.n, 30) # extrapolate from 30 random images for _ in range(n): im = cv2.imread(random.choice(self.im_files)) # sample image ratio = self.img_size / max(im.shape[0], im.shape[1]) # max(h, w) # ratio - b += im.nbytes * ratio ** 2 + b += im.nbytes * ratio**2 mem_required = b * self.n / n # GB required to cache dataset into RAM mem = psutil.virtual_memory() cache = mem_required * (1 + safety_margin) < mem.available # to cache or not to cache, that is the question if not cache: - LOGGER.info(f'{prefix}{mem_required / gb:.1f}GB RAM required, ' - f'{mem.available / gb:.1f}/{mem.total / gb:.1f}GB available, ' - f"{'caching images ✅' if cache else 'not caching images ⚠️'}") + LOGGER.info( + f'{prefix}{mem_required / gb:.1f}GB RAM required, ' + f'{mem.available / gb:.1f}/{mem.total / gb:.1f}GB available, ' + f"{'caching images ✅' if cache else 'not caching images ⚠️'}" + ) return cache - def cache_labels(self, path=Path('./labels.cache'), prefix=''): + def cache_labels(self, path=Path("./labels.cache"), prefix=""): # Cache dataset labels, check images and read shapes x = {} # dict nm, nf, ne, nc, msgs = 0, 0, 0, 0, [] # number missing, found, empty, corrupt, messages - desc = f'{prefix}Scanning {path.parent / path.stem}...' + desc = f"{prefix}Scanning {path.parent / path.stem}..." with Pool(NUM_THREADS) as pool: - pbar = tqdm(pool.imap(verify_image_label, zip(self.im_files, self.label_files, repeat(prefix))), - desc=desc, - total=len(self.im_files), - bar_format=TQDM_BAR_FORMAT) + pbar = tqdm( + pool.imap(verify_image_label, zip(self.im_files, self.label_files, repeat(prefix))), + desc=desc, + total=len(self.im_files), + bar_format=TQDM_BAR_FORMAT, + ) for im_file, lb, shape, segments, nm_f, nf_f, ne_f, nc_f, msg in pbar: nm += nm_f nf += nf_f @@ -658,23 +696,23 @@ def cache_labels(self, path=Path('./labels.cache'), prefix=''): x[im_file] = [lb, shape, segments] if msg: msgs.append(msg) - pbar.desc = f'{desc} {nf} images, {nm + ne} backgrounds, {nc} corrupt' + pbar.desc = f"{desc} {nf} images, {nm + ne} backgrounds, {nc} corrupt" pbar.close() if msgs: - LOGGER.info('\n'.join(msgs)) + LOGGER.info("\n".join(msgs)) if nf == 0: - LOGGER.warning(f'{prefix}WARNING ⚠️ No labels found in {path}. {HELP_URL}') - x['hash'] = get_hash(self.label_files + self.im_files) - x['results'] = nf, nm, ne, nc, len(self.im_files) - x['msgs'] = msgs # warnings - x['version'] = self.cache_version # cache version + LOGGER.warning(f"{prefix}WARNING ⚠️ No labels found in {path}. {HELP_URL}") + x["hash"] = get_hash(self.label_files + self.im_files) + x["results"] = nf, nm, ne, nc, len(self.im_files) + x["msgs"] = msgs # warnings + x["version"] = self.cache_version # cache version try: np.save(path, x) # save cache for next time - path.with_suffix('.cache.npy').rename(path) # remove .npy suffix - LOGGER.info(f'{prefix}New cache created: {path}') + path.with_suffix(".cache.npy").rename(path) # remove .npy suffix + LOGGER.info(f"{prefix}New cache created: {path}") except Exception as e: - LOGGER.warning(f'{prefix}WARNING ⚠️ Cache directory {path.parent} is not writeable: {e}') # not writeable + LOGGER.warning(f"{prefix}WARNING ⚠️ Cache directory {path.parent} is not writeable: {e}") # not writeable return x def __len__(self): @@ -690,14 +728,14 @@ def __getitem__(self, index): index = self.indices[index] # linear, shuffled, or image_weights hyp = self.hyp - mosaic = self.mosaic and random.random() < hyp['mosaic'] + mosaic = self.mosaic and random.random() < hyp["mosaic"] if mosaic: # Load mosaic img, labels = self.load_mosaic(index) shapes = None # MixUp augmentation - if random.random() < hyp['mixup']: + if random.random() < hyp["mixup"]: img, labels = mixup(img, labels, *self.load_mosaic(random.choice(self.indices))) else: @@ -714,17 +752,19 @@ def __getitem__(self, index): labels[:, 1:] = xywhn2xyxy(labels[:, 1:], ratio[0] * w, ratio[1] * h, padw=pad[0], padh=pad[1]) if self.augment: - img, labels = random_perspective(img, - labels, - degrees=hyp['degrees'], - translate=hyp['translate'], - scale=hyp['scale'], - shear=hyp['shear'], - perspective=hyp['perspective']) + img, labels = random_perspective( + img, + labels, + degrees=hyp["degrees"], + translate=hyp["translate"], + scale=hyp["scale"], + shear=hyp["shear"], + perspective=hyp["perspective"], + ) nl = len(labels) # number of labels if nl: - labels[:, 1:5] = xyxy2xywhn(labels[:, 1:5], w=img.shape[1], h=img.shape[0], clip=True, eps=1E-3) + labels[:, 1:5] = xyxy2xywhn(labels[:, 1:5], w=img.shape[1], h=img.shape[0], clip=True, eps=1e-3) if self.augment: # Albumentations @@ -732,16 +772,16 @@ def __getitem__(self, index): nl = len(labels) # update after albumentations # HSV color-space - augment_hsv(img, hgain=hyp['hsv_h'], sgain=hyp['hsv_s'], vgain=hyp['hsv_v']) + augment_hsv(img, hgain=hyp["hsv_h"], sgain=hyp["hsv_s"], vgain=hyp["hsv_v"]) # Flip up-down - if random.random() < hyp['flipud']: + if random.random() < hyp["flipud"]: img = np.flipud(img) if nl: labels[:, 2] = 1 - labels[:, 2] # Flip left-right - if random.random() < hyp['fliplr']: + if random.random() < hyp["fliplr"]: img = np.fliplr(img) if nl: labels[:, 1] = 1 - labels[:, 1] @@ -762,13 +802,17 @@ def __getitem__(self, index): def load_image(self, i): # Loads 1 image from dataset index 'i', returns (im, original hw, resized hw) - im, f, fn = self.ims[i], self.im_files[i], self.npy_files[i], + im, f, fn = ( + self.ims[i], + self.im_files[i], + self.npy_files[i], + ) if im is None: # not cached in RAM if fn.exists(): # load npy im = np.load(fn) else: # read image im = cv2.imread(f) # BGR - assert im is not None, f'Image Not Found {f}' + assert im is not None, f"Image Not Found {f}" h0, w0 = im.shape[:2] # orig hw r = self.img_size / max(h0, w0) # ratio if r != 1: # if sizes are not equal @@ -828,16 +872,18 @@ def load_mosaic(self, index): # img4, labels4 = replicate(img4, labels4) # replicate # Augment - img4, labels4, segments4 = copy_paste(img4, labels4, segments4, p=self.hyp['copy_paste']) - img4, labels4 = random_perspective(img4, - labels4, - segments4, - degrees=self.hyp['degrees'], - translate=self.hyp['translate'], - scale=self.hyp['scale'], - shear=self.hyp['shear'], - perspective=self.hyp['perspective'], - border=self.mosaic_border) # border to remove + img4, labels4, segments4 = copy_paste(img4, labels4, segments4, p=self.hyp["copy_paste"]) + img4, labels4 = random_perspective( + img4, + labels4, + segments4, + degrees=self.hyp["degrees"], + translate=self.hyp["translate"], + scale=self.hyp["scale"], + shear=self.hyp["shear"], + perspective=self.hyp["perspective"], + border=self.mosaic_border, + ) # border to remove return img4, labels4 @@ -886,12 +932,12 @@ def load_mosaic9(self, index): segments9.extend(segments) # Image - img9[y1:y2, x1:x2] = img[y1 - pady:, x1 - padx:] # img9[ymin:ymax, xmin:xmax] + img9[y1:y2, x1:x2] = img[y1 - pady :, x1 - padx :] # img9[ymin:ymax, xmin:xmax] hp, wp = h, w # height, width previous # Offset yc, xc = (int(random.uniform(0, s)) for _ in self.mosaic_border) # mosaic center x, y - img9 = img9[yc:yc + 2 * s, xc:xc + 2 * s] + img9 = img9[yc : yc + 2 * s, xc : xc + 2 * s] # Concat/clip labels labels9 = np.concatenate(labels9, 0) @@ -905,16 +951,18 @@ def load_mosaic9(self, index): # img9, labels9 = replicate(img9, labels9) # replicate # Augment - img9, labels9, segments9 = copy_paste(img9, labels9, segments9, p=self.hyp['copy_paste']) - img9, labels9 = random_perspective(img9, - labels9, - segments9, - degrees=self.hyp['degrees'], - translate=self.hyp['translate'], - scale=self.hyp['scale'], - shear=self.hyp['shear'], - perspective=self.hyp['perspective'], - border=self.mosaic_border) # border to remove + img9, labels9, segments9 = copy_paste(img9, labels9, segments9, p=self.hyp["copy_paste"]) + img9, labels9 = random_perspective( + img9, + labels9, + segments9, + degrees=self.hyp["degrees"], + translate=self.hyp["translate"], + scale=self.hyp["scale"], + shear=self.hyp["shear"], + perspective=self.hyp["perspective"], + border=self.mosaic_border, + ) # border to remove return img9, labels9 @@ -937,8 +985,9 @@ def collate_fn4(batch): for i in range(n): # zidane torch.zeros(16,3,720,1280) # BCHW i *= 4 if random.random() < 0.5: - im1 = F.interpolate(im[i].unsqueeze(0).float(), scale_factor=2.0, mode='bilinear', - align_corners=False)[0].type(im[i].type()) + im1 = F.interpolate(im[i].unsqueeze(0).float(), scale_factor=2.0, mode="bilinear", align_corners=False)[ + 0 + ].type(im[i].type()) lb = label[i] else: im1 = torch.cat((torch.cat((im[i], im[i + 1]), 1), torch.cat((im[i + 2], im[i + 3]), 1)), 2) @@ -953,21 +1002,21 @@ def collate_fn4(batch): # Ancillary functions -------------------------------------------------------------------------------------------------- -def flatten_recursive(path=DATASETS_DIR / 'coco128'): +def flatten_recursive(path=DATASETS_DIR / "coco128"): # Flatten a recursive directory by bringing all files to top level - new_path = Path(f'{str(path)}_flat') + new_path = Path(f"{str(path)}_flat") if os.path.exists(new_path): shutil.rmtree(new_path) # delete output folder os.makedirs(new_path) # make new output folder - for file in tqdm(glob.glob(f'{str(Path(path))}/**/*.*', recursive=True)): + for file in tqdm(glob.glob(f"{str(Path(path))}/**/*.*", recursive=True)): shutil.copyfile(file, new_path / Path(file).name) -def extract_boxes(path=DATASETS_DIR / 'coco128'): # from utils.dataloaders import *; extract_boxes() +def extract_boxes(path=DATASETS_DIR / "coco128"): # from utils.dataloaders import *; extract_boxes() # Convert detection dataset into classification dataset, with one directory per class path = Path(path) # images dir - shutil.rmtree(path / 'classification') if (path / 'classification').is_dir() else None # remove existing - files = list(path.rglob('*.*')) + shutil.rmtree(path / "classification") if (path / "classification").is_dir() else None # remove existing + files = list(path.rglob("*.*")) n = len(files) # number of files for im_file in tqdm(files, total=n): if im_file.suffix[1:] in IMG_FORMATS: @@ -983,7 +1032,7 @@ def extract_boxes(path=DATASETS_DIR / 'coco128'): # from utils.dataloaders impo for j, x in enumerate(lb): c = int(x[0]) # class - f = (path / 'classifier') / f'{c}' / f'{path.stem}_{im_file.stem}_{j}.jpg' # new filename + f = (path / "classifier") / f"{c}" / f"{path.stem}_{im_file.stem}_{j}.jpg" # new filename if not f.parent.is_dir(): f.parent.mkdir(parents=True) @@ -994,11 +1043,11 @@ def extract_boxes(path=DATASETS_DIR / 'coco128'): # from utils.dataloaders impo b[[0, 2]] = np.clip(b[[0, 2]], 0, w) # clip boxes outside of image b[[1, 3]] = np.clip(b[[1, 3]], 0, h) - assert cv2.imwrite(str(f), im[b[1]:b[3], b[0]:b[2]]), f'box failure in {f}' + assert cv2.imwrite(str(f), im[b[1] : b[3], b[0] : b[2]]), f"box failure in {f}" -def autosplit(path=DATASETS_DIR / 'coco128/images', weights=(0.9, 0.1, 0.0), annotated_only=False): - """ Autosplit a dataset into train/val/test splits and save path/autosplit_*.txt files +def autosplit(path=DATASETS_DIR / "coco128/images", weights=(0.9, 0.1, 0.0), annotated_only=False): + """Autosplit a dataset into train/val/test splits and save path/autosplit_*.txt files Usage: from utils.dataloaders import *; autosplit() Arguments path: Path to images directory @@ -1006,40 +1055,40 @@ def autosplit(path=DATASETS_DIR / 'coco128/images', weights=(0.9, 0.1, 0.0), ann annotated_only: Only use images with an annotated txt file """ path = Path(path) # images dir - files = sorted(x for x in path.rglob('*.*') if x.suffix[1:].lower() in IMG_FORMATS) # image files only + files = sorted(x for x in path.rglob("*.*") if x.suffix[1:].lower() in IMG_FORMATS) # image files only n = len(files) # number of files random.seed(0) # for reproducibility indices = random.choices([0, 1, 2], weights=weights, k=n) # assign each image to a split - txt = ['autosplit_train.txt', 'autosplit_val.txt', 'autosplit_test.txt'] # 3 txt files + txt = ["autosplit_train.txt", "autosplit_val.txt", "autosplit_test.txt"] # 3 txt files for x in txt: if (path.parent / x).exists(): (path.parent / x).unlink() # remove existing - print(f'Autosplitting images from {path}' + ', using *.txt labeled images only' * annotated_only) + print(f"Autosplitting images from {path}" + ", using *.txt labeled images only" * annotated_only) for i, img in tqdm(zip(indices, files), total=n): if not annotated_only or Path(img2label_paths([str(img)])[0]).exists(): # check label - with open(path.parent / txt[i], 'a') as f: - f.write(f'./{img.relative_to(path.parent).as_posix()}' + '\n') # add image to txt file + with open(path.parent / txt[i], "a") as f: + f.write(f"./{img.relative_to(path.parent).as_posix()}" + "\n") # add image to txt file def verify_image_label(args): # Verify one image-label pair im_file, lb_file, prefix = args - nm, nf, ne, nc, msg, segments = 0, 0, 0, 0, '', [] # number (missing, found, empty, corrupt), message, segments + nm, nf, ne, nc, msg, segments = 0, 0, 0, 0, "", [] # number (missing, found, empty, corrupt), message, segments try: # verify images im = Image.open(im_file) im.verify() # PIL verify shape = exif_size(im) # image size - assert (shape[0] > 9) & (shape[1] > 9), f'image size {shape} <10 pixels' - assert im.format.lower() in IMG_FORMATS, f'invalid image format {im.format}' - if im.format.lower() in ('jpg', 'jpeg'): - with open(im_file, 'rb') as f: + assert (shape[0] > 9) & (shape[1] > 9), f"image size {shape} <10 pixels" + assert im.format.lower() in IMG_FORMATS, f"invalid image format {im.format}" + if im.format.lower() in ("jpg", "jpeg"): + with open(im_file, "rb") as f: f.seek(-2, 2) - if f.read() != b'\xff\xd9': # corrupt JPEG - ImageOps.exif_transpose(Image.open(im_file)).save(im_file, 'JPEG', subsampling=0, quality=100) - msg = f'{prefix}WARNING ⚠️ {im_file}: corrupt JPEG restored and saved' + if f.read() != b"\xff\xd9": # corrupt JPEG + ImageOps.exif_transpose(Image.open(im_file)).save(im_file, "JPEG", subsampling=0, quality=100) + msg = f"{prefix}WARNING ⚠️ {im_file}: corrupt JPEG restored and saved" # verify labels if os.path.isfile(lb_file): @@ -1053,15 +1102,15 @@ def verify_image_label(args): lb = np.array(lb, dtype=np.float32) nl = len(lb) if nl: - assert lb.shape[1] == 5, f'labels require 5 columns, {lb.shape[1]} columns detected' - assert (lb >= 0).all(), f'negative label values {lb[lb < 0]}' - assert (lb[:, 1:] <= 1).all(), f'non-normalized or out of bounds coordinates {lb[:, 1:][lb[:, 1:] > 1]}' + assert lb.shape[1] == 5, f"labels require 5 columns, {lb.shape[1]} columns detected" + assert (lb >= 0).all(), f"negative label values {lb[lb < 0]}" + assert (lb[:, 1:] <= 1).all(), f"non-normalized or out of bounds coordinates {lb[:, 1:][lb[:, 1:] > 1]}" _, i = np.unique(lb, axis=0, return_index=True) if len(i) < nl: # duplicate row check lb = lb[i] # remove duplicates if segments: segments = [segments[x] for x in i] - msg = f'{prefix}WARNING ⚠️ {im_file}: {nl - len(i)} duplicate labels removed' + msg = f"{prefix}WARNING ⚠️ {im_file}: {nl - len(i)} duplicate labels removed" else: ne = 1 # label empty lb = np.zeros((0, 5), dtype=np.float32) @@ -1071,12 +1120,13 @@ def verify_image_label(args): return im_file, lb, shape, segments, nm, nf, ne, nc, msg except Exception as e: nc = 1 - msg = f'{prefix}WARNING ⚠️ {im_file}: ignoring corrupt image/label: {e}' + msg = f"{prefix}WARNING ⚠️ {im_file}: ignoring corrupt image/label: {e}" return [None, None, None, None, nm, nf, ne, nc, msg] -class HUBDatasetStats(): - """ Class for generating HUB dataset JSON and `-hub` dataset directory +class HUBDatasetStats: + """ + Class for generating HUB dataset JSON and `-hub` dataset directory. Arguments path: Path to data.yaml or data.zip (with data.yaml inside data.zip) @@ -1090,43 +1140,43 @@ class HUBDatasetStats(): stats.process_images() """ - def __init__(self, path='coco128.yaml', autodownload=False): + def __init__(self, path="coco128.yaml", autodownload=False): # Initialize class zipped, data_dir, yaml_path = self._unzip(Path(path)) try: - with open(check_yaml(yaml_path), errors='ignore') as f: + with open(check_yaml(yaml_path), errors="ignore") as f: data = yaml.safe_load(f) # data dict if zipped: - data['path'] = data_dir + data["path"] = data_dir except Exception as e: - raise Exception('error/HUB/dataset_stats/yaml_load') from e + raise Exception("error/HUB/dataset_stats/yaml_load") from e check_dataset(data, autodownload) # download dataset if missing - self.hub_dir = Path(data['path'] + '-hub') - self.im_dir = self.hub_dir / 'images' + self.hub_dir = Path(data["path"] + "-hub") + self.im_dir = self.hub_dir / "images" self.im_dir.mkdir(parents=True, exist_ok=True) # makes /images - self.stats = {'nc': data['nc'], 'names': list(data['names'].values())} # statistics dictionary + self.stats = {"nc": data["nc"], "names": list(data["names"].values())} # statistics dictionary self.data = data @staticmethod def _find_yaml(dir): # Return data.yaml file - files = list(dir.glob('*.yaml')) or list(dir.rglob('*.yaml')) # try root level first and then recursive - assert files, f'No *.yaml file found in {dir}' + files = list(dir.glob("*.yaml")) or list(dir.rglob("*.yaml")) # try root level first and then recursive + assert files, f"No *.yaml file found in {dir}" if len(files) > 1: files = [f for f in files if f.stem == dir.stem] # prefer *.yaml files that match dir name - assert files, f'Multiple *.yaml files found in {dir}, only 1 *.yaml file allowed' - assert len(files) == 1, f'Multiple *.yaml files found: {files}, only 1 *.yaml file allowed in {dir}' + assert files, f"Multiple *.yaml files found in {dir}, only 1 *.yaml file allowed" + assert len(files) == 1, f"Multiple *.yaml files found: {files}, only 1 *.yaml file allowed in {dir}" return files[0] def _unzip(self, path): # Unzip data.zip - if not str(path).endswith('.zip'): # path is data.yaml + if not str(path).endswith(".zip"): # path is data.yaml return False, None, path - assert Path(path).is_file(), f'Error unzipping {path}, file not found' + assert Path(path).is_file(), f"Error unzipping {path}, file not found" unzip_file(path, path=path.parent) - dir = path.with_suffix('') # dataset directory == zip name - assert dir.is_dir(), f'Error unzipping {path}, {dir} not found. path/to/abc.zip MUST unzip to path/to/abc/' + dir = path.with_suffix("") # dataset directory == zip name + assert dir.is_dir(), f"Error unzipping {path}, {dir} not found. path/to/abc.zip MUST unzip to path/to/abc/" return True, str(dir), self._find_yaml(dir) # zipped, data_dir, yaml_path def _hub_ops(self, f, max_dim=1920): @@ -1137,9 +1187,9 @@ def _hub_ops(self, f, max_dim=1920): r = max_dim / max(im.height, im.width) # ratio if r < 1.0: # image too large im = im.resize((int(im.width * r), int(im.height * r))) - im.save(f_new, 'JPEG', quality=50, optimize=True) # save + im.save(f_new, "JPEG", quality=50, optimize=True) # save except Exception as e: # use OpenCV - LOGGER.info(f'WARNING ⚠️ HUB ops PIL failure {f}: {e}') + LOGGER.info(f"WARNING ⚠️ HUB ops PIL failure {f}: {e}") im = cv2.imread(f) im_height, im_width = im.shape[:2] r = max_dim / max(im_height, im_width) # ratio @@ -1153,30 +1203,32 @@ def _round(labels): # Update labels to integer class and 6 decimal place floats return [[int(c), *(round(x, 4) for x in points)] for c, *points in labels] - for split in 'train', 'val', 'test': + for split in "train", "val", "test": if self.data.get(split) is None: self.stats[split] = None # i.e. no test set continue dataset = LoadImagesAndLabels(self.data[split]) # load dataset - x = np.array([ - np.bincount(label[:, 0].astype(int), minlength=self.data['nc']) - for label in tqdm(dataset.labels, total=dataset.n, desc='Statistics')]) # shape(128x80) + x = np.array( + [ + np.bincount(label[:, 0].astype(int), minlength=self.data["nc"]) + for label in tqdm(dataset.labels, total=dataset.n, desc="Statistics") + ] + ) # shape(128x80) self.stats[split] = { - 'instance_stats': { - 'total': int(x.sum()), - 'per_class': x.sum(0).tolist()}, - 'image_stats': { - 'total': dataset.n, - 'unlabelled': int(np.all(x == 0, 1).sum()), - 'per_class': (x > 0).sum(0).tolist()}, - 'labels': [{ - str(Path(k).name): _round(v.tolist())} for k, v in zip(dataset.im_files, dataset.labels)]} + "instance_stats": {"total": int(x.sum()), "per_class": x.sum(0).tolist()}, + "image_stats": { + "total": dataset.n, + "unlabelled": int(np.all(x == 0, 1).sum()), + "per_class": (x > 0).sum(0).tolist(), + }, + "labels": [{str(Path(k).name): _round(v.tolist())} for k, v in zip(dataset.im_files, dataset.labels)], + } # Save, print and return if save: - stats_path = self.hub_dir / 'stats.json' - print(f'Saving {stats_path.resolve()}...') - with open(stats_path, 'w') as f: + stats_path = self.hub_dir / "stats.json" + print(f"Saving {stats_path.resolve()}...") + with open(stats_path, "w") as f: json.dump(self.stats, f) # save stats.json if verbose: print(json.dumps(self.stats, indent=2, sort_keys=False)) @@ -1184,14 +1236,14 @@ def _round(labels): def process_images(self): # Compress images for Ultralytics HUB - for split in 'train', 'val', 'test': + for split in "train", "val", "test": if self.data.get(split) is None: continue dataset = LoadImagesAndLabels(self.data[split]) # load dataset - desc = f'{split} images' + desc = f"{split} images" for _ in tqdm(ThreadPool(NUM_THREADS).imap(self._hub_ops, dataset.im_files), total=dataset.n, desc=desc): pass - print(f'Done. All images saved to {self.im_dir}') + print(f"Done. All images saved to {self.im_dir}") return self.im_dir @@ -1199,6 +1251,7 @@ def process_images(self): class ClassificationDataset(torchvision.datasets.ImageFolder): """ YOLOv5 Classification Dataset. + Arguments root: Dataset path transform: torchvision transforms, used by default @@ -1209,9 +1262,9 @@ def __init__(self, root, augment, imgsz, cache=False): super().__init__(root=root) self.torch_transforms = classify_transforms(imgsz) self.album_transforms = classify_albumentations(augment, imgsz) if augment else None - self.cache_ram = cache is True or cache == 'ram' - self.cache_disk = cache == 'disk' - self.samples = [list(x) + [Path(x[0]).with_suffix('.npy'), None] for x in self.samples] # file, index, npy, im + self.cache_ram = cache is True or cache == "ram" + self.cache_disk = cache == "disk" + self.samples = [list(x) + [Path(x[0]).with_suffix(".npy"), None] for x in self.samples] # file, index, npy, im def __getitem__(self, i): f, j, fn, im = self.samples[i] # filename, index, filename.with_suffix('.npy'), image @@ -1224,20 +1277,15 @@ def __getitem__(self, i): else: # read image im = cv2.imread(f) # BGR if self.album_transforms: - sample = self.album_transforms(image=cv2.cvtColor(im, cv2.COLOR_BGR2RGB))['image'] + sample = self.album_transforms(image=cv2.cvtColor(im, cv2.COLOR_BGR2RGB))["image"] else: sample = self.torch_transforms(im) return sample, j -def create_classification_dataloader(path, - imgsz=224, - batch_size=16, - augment=True, - cache=False, - rank=-1, - workers=8, - shuffle=True): +def create_classification_dataloader( + path, imgsz=224, batch_size=16, augment=True, cache=False, rank=-1, workers=8, shuffle=True +): # Returns Dataloader object to be used with YOLOv5 Classifier with torch_distributed_zero_first(rank): # init dataset *.cache only once if DDP dataset = ClassificationDataset(root=path, imgsz=imgsz, augment=augment, cache=cache) @@ -1247,11 +1295,13 @@ def create_classification_dataloader(path, sampler = None if rank == -1 else distributed.DistributedSampler(dataset, shuffle=shuffle) generator = torch.Generator() generator.manual_seed(6148914691236517205 + RANK) - return InfiniteDataLoader(dataset, - batch_size=batch_size, - shuffle=shuffle and sampler is None, - num_workers=nw, - sampler=sampler, - pin_memory=PIN_MEMORY, - worker_init_fn=seed_worker, - generator=generator) # or DataLoader(persistent_workers=True) + return InfiniteDataLoader( + dataset, + batch_size=batch_size, + shuffle=shuffle and sampler is None, + num_workers=nw, + sampler=sampler, + pin_memory=PIN_MEMORY, + worker_init_fn=seed_worker, + generator=generator, + ) # or DataLoader(persistent_workers=True) diff --git a/utils/downloads.py b/utils/downloads.py index 9298259d4ab1..ee700acb618b 100644 --- a/utils/downloads.py +++ b/utils/downloads.py @@ -1,7 +1,5 @@ # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license -""" -Download utils -""" +"""Download utils.""" import logging import subprocess @@ -23,89 +21,90 @@ def is_url(url, check=True): return False -def gsutil_getsize(url=''): +def gsutil_getsize(url=""): # gs://bucket/file size https://cloud.google.com/storage/docs/gsutil/commands/du - output = subprocess.check_output(['gsutil', 'du', url], shell=True, encoding='utf-8') + output = subprocess.check_output(["gsutil", "du", url], shell=True, encoding="utf-8") if output: return int(output.split()[0]) return 0 -def url_getsize(url='https://ultralytics.com/images/bus.jpg'): +def url_getsize(url="https://ultralytics.com/images/bus.jpg"): # Return downloadable file size in bytes response = requests.head(url, allow_redirects=True) - return int(response.headers.get('content-length', -1)) + return int(response.headers.get("content-length", -1)) def curl_download(url, filename, *, silent: bool = False) -> bool: - """ - Download a file from a url to a filename using curl. - """ - silent_option = 'sS' if silent else '' # silent - proc = subprocess.run([ - 'curl', - '-#', - f'-{silent_option}L', - url, - '--output', - filename, - '--retry', - '9', - '-C', - '-', ]) + """Download a file from a url to a filename using curl.""" + silent_option = "sS" if silent else "" # silent + proc = subprocess.run( + [ + "curl", + "-#", + f"-{silent_option}L", + url, + "--output", + filename, + "--retry", + "9", + "-C", + "-", + ] + ) return proc.returncode == 0 -def safe_download(file, url, url2=None, min_bytes=1E0, error_msg=''): +def safe_download(file, url, url2=None, min_bytes=1e0, error_msg=""): # Attempts to download file from url or url2, checks and removes incomplete downloads < min_bytes from utils.general import LOGGER file = Path(file) assert_msg = f"Downloaded file '{file}' does not exist or size is < min_bytes={min_bytes}" try: # url1 - LOGGER.info(f'Downloading {url} to {file}...') + LOGGER.info(f"Downloading {url} to {file}...") torch.hub.download_url_to_file(url, str(file), progress=LOGGER.level <= logging.INFO) assert file.exists() and file.stat().st_size > min_bytes, assert_msg # check except Exception as e: # url2 if file.exists(): file.unlink() # remove partial downloads - LOGGER.info(f'ERROR: {e}\nRe-attempting {url2 or url} to {file}...') + LOGGER.info(f"ERROR: {e}\nRe-attempting {url2 or url} to {file}...") # curl download, retry and resume on fail curl_download(url2 or url, file) finally: if not file.exists() or file.stat().st_size < min_bytes: # check if file.exists(): file.unlink() # remove partial downloads - LOGGER.info(f'ERROR: {assert_msg}\n{error_msg}') - LOGGER.info('') + LOGGER.info(f"ERROR: {assert_msg}\n{error_msg}") + LOGGER.info("") -def attempt_download(file, repo='ultralytics/yolov5', release='v7.0'): +def attempt_download(file, repo="ultralytics/yolov5", release="v7.0"): # Attempt file download from GitHub release assets if not found locally. release = 'latest', 'v7.0', etc. from utils.general import LOGGER - def github_assets(repository, version='latest'): + def github_assets(repository, version="latest"): # Return GitHub repo tag (i.e. 'v7.0') and assets (i.e. ['yolov5s.pt', 'yolov5m.pt', ...]) - if version != 'latest': - version = f'tags/{version}' # i.e. tags/v7.0 - response = requests.get(f'https://api.github.com/repos/{repository}/releases/{version}').json() # github api - return response['tag_name'], [x['name'] for x in response['assets']] # tag, assets + if version != "latest": + version = f"tags/{version}" # i.e. tags/v7.0 + response = requests.get(f"https://api.github.com/repos/{repository}/releases/{version}").json() # github api + return response["tag_name"], [x["name"] for x in response["assets"]] # tag, assets - file = Path(str(file).strip().replace("'", '')) + file = Path(str(file).strip().replace("'", "")) if not file.exists(): # URL specified name = Path(urllib.parse.unquote(str(file))).name # decode '%2F' to '/' etc. - if str(file).startswith(('http:/', 'https:/')): # download - url = str(file).replace(':/', '://') # Pathlib turns :// -> :/ - file = name.split('?')[0] # parse authentication https://url.com/file.txt?auth... + if str(file).startswith(("http:/", "https:/")): # download + url = str(file).replace(":/", "://") # Pathlib turns :// -> :/ + file = name.split("?")[0] # parse authentication https://url.com/file.txt?auth... if Path(file).is_file(): - LOGGER.info(f'Found {url} locally at {file}') # file already exists + LOGGER.info(f"Found {url} locally at {file}") # file already exists else: - safe_download(file=file, url=url, min_bytes=1E5) + safe_download(file=file, url=url, min_bytes=1e5) return file # GitHub assets - assets = [f'yolov5{size}{suffix}.pt' for size in 'nsmlx' for suffix in ('', '6', '-cls', '-seg')] # default + assets = [f"yolov5{size}{suffix}.pt" for size in "nsmlx" for suffix in ("", "6", "-cls", "-seg")] # default try: tag, assets = github_assets(repo, release) except Exception: @@ -113,15 +112,17 @@ def github_assets(repository, version='latest'): tag, assets = github_assets(repo) # latest release except Exception: try: - tag = subprocess.check_output('git tag', shell=True, stderr=subprocess.STDOUT).decode().split()[-1] + tag = subprocess.check_output("git tag", shell=True, stderr=subprocess.STDOUT).decode().split()[-1] except Exception: tag = release if name in assets: file.parent.mkdir(parents=True, exist_ok=True) # make parent dir (if required) - safe_download(file, - url=f'https://github.com/{repo}/releases/download/{tag}/{name}', - min_bytes=1E5, - error_msg=f'{file} missing, try downloading from https://github.com/{repo}/releases/{tag}') + safe_download( + file, + url=f"https://github.com/{repo}/releases/download/{tag}/{name}", + min_bytes=1e5, + error_msg=f"{file} missing, try downloading from https://github.com/{repo}/releases/{tag}", + ) return str(file) diff --git a/utils/flask_rest_api/example_request.py b/utils/flask_rest_api/example_request.py index 256ad1319c82..7b850051cca0 100644 --- a/utils/flask_rest_api/example_request.py +++ b/utils/flask_rest_api/example_request.py @@ -1,19 +1,17 @@ # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license -""" -Perform test request -""" +"""Perform test request.""" import pprint import requests -DETECTION_URL = 'http://localhost:5000/v1/object-detection/yolov5s' -IMAGE = 'zidane.jpg' +DETECTION_URL = "http://localhost:5000/v1/object-detection/yolov5s" +IMAGE = "zidane.jpg" # Read image -with open(IMAGE, 'rb') as f: +with open(IMAGE, "rb") as f: image_data = f.read() -response = requests.post(DETECTION_URL, files={'image': image_data}).json() +response = requests.post(DETECTION_URL, files={"image": image_data}).json() pprint.pprint(response) diff --git a/utils/flask_rest_api/restapi.py b/utils/flask_rest_api/restapi.py index ae4756b276e4..e62c7ebd709f 100644 --- a/utils/flask_rest_api/restapi.py +++ b/utils/flask_rest_api/restapi.py @@ -1,7 +1,5 @@ # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license -""" -Run a Flask REST API exposing one or more YOLOv5s models -""" +"""Run a Flask REST API exposing one or more YOLOv5s models.""" import argparse import io @@ -13,36 +11,36 @@ app = Flask(__name__) models = {} -DETECTION_URL = '/v1/object-detection/' +DETECTION_URL = "/v1/object-detection/" -@app.route(DETECTION_URL, methods=['POST']) +@app.route(DETECTION_URL, methods=["POST"]) def predict(model): - if request.method != 'POST': + if request.method != "POST": return - if request.files.get('image'): + if request.files.get("image"): # Method 1 # with request.files["image"] as f: # im = Image.open(io.BytesIO(f.read())) # Method 2 - im_file = request.files['image'] + im_file = request.files["image"] im_bytes = im_file.read() im = Image.open(io.BytesIO(im_bytes)) if model in models: results = models[model](im, size=640) # reduce size=320 for faster inference - return results.pandas().xyxy[0].to_json(orient='records') + return results.pandas().xyxy[0].to_json(orient="records") -if __name__ == '__main__': - parser = argparse.ArgumentParser(description='Flask API exposing YOLOv5 model') - parser.add_argument('--port', default=5000, type=int, help='port number') - parser.add_argument('--model', nargs='+', default=['yolov5s'], help='model(s) to run, i.e. --model yolov5n yolov5s') +if __name__ == "__main__": + parser = argparse.ArgumentParser(description="Flask API exposing YOLOv5 model") + parser.add_argument("--port", default=5000, type=int, help="port number") + parser.add_argument("--model", nargs="+", default=["yolov5s"], help="model(s) to run, i.e. --model yolov5n yolov5s") opt = parser.parse_args() for m in opt.model: - models[m] = torch.hub.load('ultralytics/yolov5', m, force_reload=True, skip_validation=True) + models[m] = torch.hub.load("ultralytics/yolov5", m, force_reload=True, skip_validation=True) - app.run(host='0.0.0.0', port=opt.port) # debug=True causes Restarting with stat + app.run(host="0.0.0.0", port=opt.port) # debug=True causes Restarting with stat diff --git a/utils/general.py b/utils/general.py index 73925ce5fb95..47ab656e5a3f 100644 --- a/utils/general.py +++ b/utils/general.py @@ -1,7 +1,5 @@ # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license -""" -General utils -""" +"""General utils.""" import contextlib import glob @@ -40,9 +38,9 @@ try: import ultralytics - assert hasattr(ultralytics, '__version__') # verify package is not directory + assert hasattr(ultralytics, "__version__") # verify package is not directory except (ImportError, AssertionError): - os.system('pip install -U ultralytics') + os.system("pip install -U ultralytics") import ultralytics from ultralytics.utils.checks import check_requirements @@ -53,67 +51,67 @@ FILE = Path(__file__).resolve() ROOT = FILE.parents[1] # YOLOv5 root directory -RANK = int(os.getenv('RANK', -1)) +RANK = int(os.getenv("RANK", -1)) # Settings NUM_THREADS = min(8, max(1, os.cpu_count() - 1)) # number of YOLOv5 multiprocessing threads -DATASETS_DIR = Path(os.getenv('YOLOv5_DATASETS_DIR', ROOT.parent / 'datasets')) # global datasets directory -AUTOINSTALL = str(os.getenv('YOLOv5_AUTOINSTALL', True)).lower() == 'true' # global auto-install mode -VERBOSE = str(os.getenv('YOLOv5_VERBOSE', True)).lower() == 'true' # global verbose mode -TQDM_BAR_FORMAT = '{l_bar}{bar:10}{r_bar}' # tqdm bar format -FONT = 'Arial.ttf' # https://ultralytics.com/assets/Arial.ttf - -torch.set_printoptions(linewidth=320, precision=5, profile='long') -np.set_printoptions(linewidth=320, formatter={'float_kind': '{:11.5g}'.format}) # format short g, %precision=5 +DATASETS_DIR = Path(os.getenv("YOLOv5_DATASETS_DIR", ROOT.parent / "datasets")) # global datasets directory +AUTOINSTALL = str(os.getenv("YOLOv5_AUTOINSTALL", True)).lower() == "true" # global auto-install mode +VERBOSE = str(os.getenv("YOLOv5_VERBOSE", True)).lower() == "true" # global verbose mode +TQDM_BAR_FORMAT = "{l_bar}{bar:10}{r_bar}" # tqdm bar format +FONT = "Arial.ttf" # https://ultralytics.com/assets/Arial.ttf + +torch.set_printoptions(linewidth=320, precision=5, profile="long") +np.set_printoptions(linewidth=320, formatter={"float_kind": "{:11.5g}".format}) # format short g, %precision=5 pd.options.display.max_columns = 10 cv2.setNumThreads(0) # prevent OpenCV from multithreading (incompatible with PyTorch DataLoader) -os.environ['NUMEXPR_MAX_THREADS'] = str(NUM_THREADS) # NumExpr max threads -os.environ['OMP_NUM_THREADS'] = '1' if platform.system() == 'darwin' else str(NUM_THREADS) # OpenMP (PyTorch and SciPy) -os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # suppress verbose TF compiler warnings in Colab +os.environ["NUMEXPR_MAX_THREADS"] = str(NUM_THREADS) # NumExpr max threads +os.environ["OMP_NUM_THREADS"] = "1" if platform.system() == "darwin" else str(NUM_THREADS) # OpenMP (PyTorch and SciPy) +os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2" # suppress verbose TF compiler warnings in Colab -def is_ascii(s=''): +def is_ascii(s=""): # Is string composed of all ASCII (no UTF) characters? (note str().isascii() introduced in python 3.7) s = str(s) # convert list, tuple, None, etc. to str - return len(s.encode().decode('ascii', 'ignore')) == len(s) + return len(s.encode().decode("ascii", "ignore")) == len(s) -def is_chinese(s='人工智能'): +def is_chinese(s="人工智能"): # Is string composed of any Chinese characters? - return bool(re.search('[\u4e00-\u9fff]', str(s))) + return bool(re.search("[\u4e00-\u9fff]", str(s))) def is_colab(): # Is environment a Google Colab instance? - return 'google.colab' in sys.modules + return "google.colab" in sys.modules def is_jupyter(): """ - Check if the current script is running inside a Jupyter Notebook. - Verified on Colab, Jupyterlab, Kaggle, Paperspace. + Check if the current script is running inside a Jupyter Notebook. Verified on Colab, Jupyterlab, Kaggle, Paperspace. Returns: bool: True if running inside a Jupyter Notebook, False otherwise. """ with contextlib.suppress(Exception): from IPython import get_ipython + return get_ipython() is not None return False def is_kaggle(): # Is environment a Kaggle Notebook? - return os.environ.get('PWD') == '/kaggle/working' and os.environ.get('KAGGLE_URL_BASE') == 'https://www.kaggle.com' + return os.environ.get("PWD") == "/kaggle/working" and os.environ.get("KAGGLE_URL_BASE") == "https://www.kaggle.com" def is_docker() -> bool: """Check if the process runs inside a docker container.""" - if Path('/.dockerenv').exists(): + if Path("/.dockerenv").exists(): return True try: # check if docker is in control groups - with open('/proc/self/cgroup') as file: - return any('docker' in line for line in file) + with open("/proc/self/cgroup") as file: + return any("docker" in line for line in file) except OSError: return False @@ -122,9 +120,9 @@ def is_writeable(dir, test=False): # Return True if directory has write permissions, test opening a file with write permissions if test=True if not test: return os.access(dir, os.W_OK) # possible issues on Windows - file = Path(dir) / 'tmp.txt' + file = Path(dir) / "tmp.txt" try: - with open(file, 'w'): # open file with write permissions + with open(file, "w"): # open file with write permissions pass file.unlink() # remove file return True @@ -132,47 +130,52 @@ def is_writeable(dir, test=False): return False -LOGGING_NAME = 'yolov5' +LOGGING_NAME = "yolov5" def set_logging(name=LOGGING_NAME, verbose=True): # sets up logging for the given name - rank = int(os.getenv('RANK', -1)) # rank in world for Multi-GPU trainings + rank = int(os.getenv("RANK", -1)) # rank in world for Multi-GPU trainings level = logging.INFO if verbose and rank in {-1, 0} else logging.ERROR - logging.config.dictConfig({ - 'version': 1, - 'disable_existing_loggers': False, - 'formatters': { - name: { - 'format': '%(message)s'}}, - 'handlers': { - name: { - 'class': 'logging.StreamHandler', - 'formatter': name, - 'level': level, }}, - 'loggers': { - name: { - 'level': level, - 'handlers': [name], - 'propagate': False, }}}) + logging.config.dictConfig( + { + "version": 1, + "disable_existing_loggers": False, + "formatters": {name: {"format": "%(message)s"}}, + "handlers": { + name: { + "class": "logging.StreamHandler", + "formatter": name, + "level": level, + } + }, + "loggers": { + name: { + "level": level, + "handlers": [name], + "propagate": False, + } + }, + } + ) set_logging(LOGGING_NAME) # run before defining LOGGER LOGGER = logging.getLogger(LOGGING_NAME) # define globally (used in train.py, val.py, detect.py, etc.) -if platform.system() == 'Windows': +if platform.system() == "Windows": for fn in LOGGER.info, LOGGER.warning: setattr(LOGGER, fn.__name__, lambda x: fn(emojis(x))) # emoji safe logging -def user_config_dir(dir='Ultralytics', env_var='YOLOV5_CONFIG_DIR'): +def user_config_dir(dir="Ultralytics", env_var="YOLOV5_CONFIG_DIR"): # Return path of user configuration directory. Prefer environment variable if exists. Make dir if required. env = os.getenv(env_var) if env: path = Path(env) # use environment variable else: - cfg = {'Windows': 'AppData/Roaming', 'Linux': '.config', 'Darwin': 'Library/Application Support'} # 3 OS dirs - path = Path.home() / cfg.get(platform.system(), '') # OS-specific config dir - path = (path if is_writeable(path) else Path('/tmp')) / dir # GCP and AWS lambda fix, only /tmp is writeable + cfg = {"Windows": "AppData/Roaming", "Linux": ".config", "Darwin": "Library/Application Support"} # 3 OS dirs + path = Path.home() / cfg.get(platform.system(), "") # OS-specific config dir + path = (path if is_writeable(path) else Path("/tmp")) / dir # GCP and AWS lambda fix, only /tmp is writeable path.mkdir(exist_ok=True) # make if required return path @@ -185,7 +188,7 @@ class Profile(contextlib.ContextDecorator): def __init__(self, t=0.0, device: torch.device = None): self.t = t self.device = device - self.cuda = True if (device and str(device)[:4] == 'cuda') else False + self.cuda = True if (device and str(device)[:4] == "cuda") else False def __enter__(self): self.start = self.time() @@ -203,7 +206,7 @@ def time(self): class Timeout(contextlib.ContextDecorator): # YOLOv5 Timeout class. Usage: @Timeout(seconds) decorator or 'with Timeout(seconds):' context manager - def __init__(self, seconds, *, timeout_msg='', suppress_timeout_errors=True): + def __init__(self, seconds, *, timeout_msg="", suppress_timeout_errors=True): self.seconds = int(seconds) self.timeout_message = timeout_msg self.suppress = bool(suppress_timeout_errors) @@ -212,12 +215,12 @@ def _timeout_handler(self, signum, frame): raise TimeoutError(self.timeout_message) def __enter__(self): - if platform.system() != 'Windows': # not supported on Windows + if platform.system() != "Windows": # not supported on Windows signal.signal(signal.SIGALRM, self._timeout_handler) # Set handler for SIGALRM signal.alarm(self.seconds) # start countdown for SIGALRM to be raised def __exit__(self, exc_type, exc_val, exc_tb): - if platform.system() != 'Windows': + if platform.system() != "Windows": signal.alarm(0) # Cancel SIGALRM if it's scheduled if self.suppress and exc_type is TimeoutError: # Suppress TimeoutError return True @@ -238,7 +241,7 @@ def __exit__(self, exc_type, exc_val, exc_tb): def methods(instance): # Get class/instance methods - return [f for f in dir(instance) if callable(getattr(instance, f)) and not f.startswith('__')] + return [f for f in dir(instance) if callable(getattr(instance, f)) and not f.startswith("__")] def print_args(args: Optional[dict] = None, show_file=True, show_func=False): @@ -249,11 +252,11 @@ def print_args(args: Optional[dict] = None, show_file=True, show_func=False): args, _, _, frm = inspect.getargvalues(x) args = {k: v for k, v in frm.items() if k in args} try: - file = Path(file).resolve().relative_to(ROOT).with_suffix('') + file = Path(file).resolve().relative_to(ROOT).with_suffix("") except ValueError: file = Path(file).stem - s = (f'{file}: ' if show_file else '') + (f'{func}: ' if show_func else '') - LOGGER.info(colorstr(s) + ', '.join(f'{k}={v}' for k, v in args.items())) + s = (f"{file}: " if show_file else "") + (f"{func}: " if show_func else "") + LOGGER.info(colorstr(s) + ", ".join(f"{k}={v}" for k, v in args.items())) def init_seeds(seed=0, deterministic=False): @@ -264,11 +267,11 @@ def init_seeds(seed=0, deterministic=False): torch.cuda.manual_seed(seed) torch.cuda.manual_seed_all(seed) # for Multi-GPU, exception safe # torch.backends.cudnn.benchmark = True # AutoBatch problem https://github.com/ultralytics/yolov5/issues/9287 - if deterministic and check_version(torch.__version__, '1.12.0'): # https://github.com/ultralytics/yolov5/pull/8213 + if deterministic and check_version(torch.__version__, "1.12.0"): # https://github.com/ultralytics/yolov5/pull/8213 torch.use_deterministic_algorithms(True) torch.backends.cudnn.deterministic = True - os.environ['CUBLAS_WORKSPACE_CONFIG'] = ':4096:8' - os.environ['PYTHONHASHSEED'] = str(seed) + os.environ["CUBLAS_WORKSPACE_CONFIG"] = ":4096:8" + os.environ["PYTHONHASHSEED"] = str(seed) def intersect_dicts(da, db, exclude=()): @@ -282,22 +285,22 @@ def get_default_args(func): return {k: v.default for k, v in signature.parameters.items() if v.default is not inspect.Parameter.empty} -def get_latest_run(search_dir='.'): +def get_latest_run(search_dir="."): # Return path to most recent 'last.pt' in /runs (i.e. to --resume from) - last_list = glob.glob(f'{search_dir}/**/last*.pt', recursive=True) - return max(last_list, key=os.path.getctime) if last_list else '' + last_list = glob.glob(f"{search_dir}/**/last*.pt", recursive=True) + return max(last_list, key=os.path.getctime) if last_list else "" def file_age(path=__file__): # Return days since last file update - dt = (datetime.now() - datetime.fromtimestamp(Path(path).stat().st_mtime)) # delta + dt = datetime.now() - datetime.fromtimestamp(Path(path).stat().st_mtime) # delta return dt.days # + dt.seconds / 86400 # fractional days def file_date(path=__file__): # Return human-readable file modification date, i.e. '2021-3-26' t = datetime.fromtimestamp(Path(path).stat().st_mtime) - return f'{t.year}-{t.month}-{t.day}' + return f"{t.year}-{t.month}-{t.day}" def file_size(path): @@ -307,7 +310,7 @@ def file_size(path): if path.is_file(): return path.stat().st_size / mb elif path.is_dir(): - return sum(f.stat().st_size for f in path.glob('**/*') if f.is_file()) / mb + return sum(f.stat().st_size for f in path.glob("**/*") if f.is_file()) / mb else: return 0.0 @@ -319,7 +322,7 @@ def check_online(): def run_once(): # Check once try: - socket.create_connection(('1.1.1.1', 443), 5) # check host accessibility + socket.create_connection(("1.1.1.1", 443), 5) # check host accessibility return True except OSError: return False @@ -330,68 +333,69 @@ def run_once(): def git_describe(path=ROOT): # path must be a directory # Return human-readable git description, i.e. v5.0-5-g3e25f1e https://git-scm.com/docs/git-describe try: - assert (Path(path) / '.git').is_dir() - return check_output(f'git -C {path} describe --tags --long --always', shell=True).decode()[:-1] + assert (Path(path) / ".git").is_dir() + return check_output(f"git -C {path} describe --tags --long --always", shell=True).decode()[:-1] except Exception: - return '' + return "" @TryExcept() @WorkingDirectory(ROOT) -def check_git_status(repo='ultralytics/yolov5', branch='master'): +def check_git_status(repo="ultralytics/yolov5", branch="master"): # YOLOv5 status check, recommend 'git pull' if code is out of date - url = f'https://github.com/{repo}' - msg = f', for updates see {url}' - s = colorstr('github: ') # string - assert Path('.git').exists(), s + 'skipping check (not a git repository)' + msg - assert check_online(), s + 'skipping check (offline)' + msg + url = f"https://github.com/{repo}" + msg = f", for updates see {url}" + s = colorstr("github: ") # string + assert Path(".git").exists(), s + "skipping check (not a git repository)" + msg + assert check_online(), s + "skipping check (offline)" + msg - splits = re.split(pattern=r'\s', string=check_output('git remote -v', shell=True).decode()) + splits = re.split(pattern=r"\s", string=check_output("git remote -v", shell=True).decode()) matches = [repo in s for s in splits] if any(matches): remote = splits[matches.index(True) - 1] else: - remote = 'ultralytics' - check_output(f'git remote add {remote} {url}', shell=True) - check_output(f'git fetch {remote}', shell=True, timeout=5) # git fetch - local_branch = check_output('git rev-parse --abbrev-ref HEAD', shell=True).decode().strip() # checked out - n = int(check_output(f'git rev-list {local_branch}..{remote}/{branch} --count', shell=True)) # commits behind + remote = "ultralytics" + check_output(f"git remote add {remote} {url}", shell=True) + check_output(f"git fetch {remote}", shell=True, timeout=5) # git fetch + local_branch = check_output("git rev-parse --abbrev-ref HEAD", shell=True).decode().strip() # checked out + n = int(check_output(f"git rev-list {local_branch}..{remote}/{branch} --count", shell=True)) # commits behind if n > 0: - pull = 'git pull' if remote == 'origin' else f'git pull {remote} {branch}' + pull = "git pull" if remote == "origin" else f"git pull {remote} {branch}" s += f"⚠️ YOLOv5 is out of date by {n} commit{'s' * (n > 1)}. Use '{pull}' or 'git clone {url}' to update." else: - s += f'up to date with {url} ✅' + s += f"up to date with {url} ✅" LOGGER.info(s) @WorkingDirectory(ROOT) -def check_git_info(path='.'): +def check_git_info(path="."): # YOLOv5 git info check, return {remote, branch, commit} - check_requirements('gitpython') + check_requirements("gitpython") import git + try: repo = git.Repo(path) - remote = repo.remotes.origin.url.replace('.git', '') # i.e. 'https://github.com/ultralytics/yolov5' + remote = repo.remotes.origin.url.replace(".git", "") # i.e. 'https://github.com/ultralytics/yolov5' commit = repo.head.commit.hexsha # i.e. '3134699c73af83aac2a481435550b968d5792c0d' try: branch = repo.active_branch.name # i.e. 'main' except TypeError: # not on any branch branch = None # i.e. 'detached HEAD' state - return {'remote': remote, 'branch': branch, 'commit': commit} + return {"remote": remote, "branch": branch, "commit": commit} except git.exc.InvalidGitRepositoryError: # path is not a git dir - return {'remote': None, 'branch': None, 'commit': None} + return {"remote": None, "branch": None, "commit": None} -def check_python(minimum='3.8.0'): +def check_python(minimum="3.8.0"): # Check current python version vs. required python version - check_version(platform.python_version(), minimum, name='Python ', hard=True) + check_version(platform.python_version(), minimum, name="Python ", hard=True) -def check_version(current='0.0.0', minimum='0.0.0', name='version ', pinned=False, hard=False, verbose=False): +def check_version(current="0.0.0", minimum="0.0.0", name="version ", pinned=False, hard=False, verbose=False): # Check version vs. required version current, minimum = (pkg.parse_version(x) for x in (current, minimum)) result = (current == minimum) if pinned else (current >= minimum) # bool - s = f'WARNING ⚠️ {name}{minimum} is required by YOLOv5, but {name}{current} is currently installed' # string + s = f"WARNING ⚠️ {name}{minimum} is required by YOLOv5, but {name}{current} is currently installed" # string if hard: assert result, emojis(s) # assert min requirements met if verbose and not result: @@ -407,7 +411,7 @@ def check_img_size(imgsz, s=32, floor=0): imgsz = list(imgsz) # convert to list if tuple new_size = [max(make_divisible(x, int(s)), floor) for x in imgsz] if new_size != imgsz: - LOGGER.warning(f'WARNING ⚠️ --img-size {imgsz} must be multiple of max stride {s}, updating to {new_size}') + LOGGER.warning(f"WARNING ⚠️ --img-size {imgsz} must be multiple of max stride {s}, updating to {new_size}") return new_size @@ -416,18 +420,18 @@ def check_imshow(warn=False): try: assert not is_jupyter() assert not is_docker() - cv2.imshow('test', np.zeros((1, 1, 3))) + cv2.imshow("test", np.zeros((1, 1, 3))) cv2.waitKey(1) cv2.destroyAllWindows() cv2.waitKey(1) return True except Exception as e: if warn: - LOGGER.warning(f'WARNING ⚠️ Environment does not support cv2.imshow() or PIL Image.show()\n{e}') + LOGGER.warning(f"WARNING ⚠️ Environment does not support cv2.imshow() or PIL Image.show()\n{e}") return False -def check_suffix(file='yolov5s.pt', suffix=('.pt', ), msg=''): +def check_suffix(file="yolov5s.pt", suffix=(".pt",), msg=""): # Check file(s) for acceptable suffix if file and suffix: if isinstance(suffix, str): @@ -435,38 +439,40 @@ def check_suffix(file='yolov5s.pt', suffix=('.pt', ), msg=''): for f in file if isinstance(file, (list, tuple)) else [file]: s = Path(f).suffix.lower() # file suffix if len(s): - assert s in suffix, f'{msg}{f} acceptable suffix is {suffix}' + assert s in suffix, f"{msg}{f} acceptable suffix is {suffix}" -def check_yaml(file, suffix=('.yaml', '.yml')): +def check_yaml(file, suffix=(".yaml", ".yml")): # Search/download YAML file (if necessary) and return path, checking suffix return check_file(file, suffix) -def check_file(file, suffix=''): +def check_file(file, suffix=""): # Search/download file (if necessary) and return path check_suffix(file, suffix) # optional file = str(file) # convert to str() if os.path.isfile(file) or not file: # exists return file - elif file.startswith(('http:/', 'https:/')): # download + elif file.startswith(("http:/", "https:/")): # download url = file # warning: Pathlib turns :// -> :/ - file = Path(urllib.parse.unquote(file).split('?')[0]).name # '%2F' to '/', split https://url.com/file.txt?auth + file = Path(urllib.parse.unquote(file).split("?")[0]).name # '%2F' to '/', split https://url.com/file.txt?auth if os.path.isfile(file): - LOGGER.info(f'Found {url} locally at {file}') # file already exists + LOGGER.info(f"Found {url} locally at {file}") # file already exists else: - LOGGER.info(f'Downloading {url} to {file}...') + LOGGER.info(f"Downloading {url} to {file}...") torch.hub.download_url_to_file(url, file) - assert Path(file).exists() and Path(file).stat().st_size > 0, f'File download failed: {url}' # check + assert Path(file).exists() and Path(file).stat().st_size > 0, f"File download failed: {url}" # check return file - elif file.startswith('clearml://'): # ClearML Dataset ID - assert 'clearml' in sys.modules, "ClearML is not installed, so cannot use ClearML dataset. Try running 'pip install clearml'." + elif file.startswith("clearml://"): # ClearML Dataset ID + assert ( + "clearml" in sys.modules + ), "ClearML is not installed, so cannot use ClearML dataset. Try running 'pip install clearml'." return file else: # search files = [] - for d in 'data', 'models', 'utils': # search directories - files.extend(glob.glob(str(ROOT / d / '**' / file), recursive=True)) # find file - assert len(files), f'File not found: {file}' # assert file was found + for d in "data", "models", "utils": # search directories + files.extend(glob.glob(str(ROOT / d / "**" / file), recursive=True)) # find file + assert len(files), f"File not found: {file}" # assert file was found assert len(files) == 1, f"Multiple files match '{file}', specify exact path: {files}" # assert unique return files[0] # return file @@ -476,8 +482,8 @@ def check_font(font=FONT, progress=False): font = Path(font) file = CONFIG_DIR / font.name if not font.exists() and not file.exists(): - url = f'https://ultralytics.com/assets/{font.name}' - LOGGER.info(f'Downloading {url} to {file}...') + url = f"https://ultralytics.com/assets/{font.name}" + LOGGER.info(f"Downloading {url} to {file}...") torch.hub.download_url_to_file(url, str(file), progress=progress) @@ -485,10 +491,10 @@ def check_dataset(data, autodownload=True): # Download, check and/or unzip dataset if not found locally # Download (optional) - extract_dir = '' + extract_dir = "" if isinstance(data, (str, Path)) and (is_zipfile(data) or is_tarfile(data)): - download(data, dir=f'{DATASETS_DIR}/{Path(data).stem}', unzip=True, delete=False, curl=False, threads=1) - data = next((DATASETS_DIR / Path(data).stem).rglob('*.yaml')) + download(data, dir=f"{DATASETS_DIR}/{Path(data).stem}", unzip=True, delete=False, curl=False, threads=1) + data = next((DATASETS_DIR / Path(data).stem).rglob("*.yaml")) extract_dir, autodownload = data.parent, False # Read yaml (optional) @@ -496,54 +502,54 @@ def check_dataset(data, autodownload=True): data = yaml_load(data) # dictionary # Checks - for k in 'train', 'val', 'names': + for k in "train", "val", "names": assert k in data, emojis(f"data.yaml '{k}:' field missing ❌") - if isinstance(data['names'], (list, tuple)): # old array format - data['names'] = dict(enumerate(data['names'])) # convert to dict - assert all(isinstance(k, int) for k in data['names'].keys()), 'data.yaml names keys must be integers, i.e. 2: car' - data['nc'] = len(data['names']) + if isinstance(data["names"], (list, tuple)): # old array format + data["names"] = dict(enumerate(data["names"])) # convert to dict + assert all(isinstance(k, int) for k in data["names"].keys()), "data.yaml names keys must be integers, i.e. 2: car" + data["nc"] = len(data["names"]) # Resolve paths - path = Path(extract_dir or data.get('path') or '') # optional 'path' default to '.' + path = Path(extract_dir or data.get("path") or "") # optional 'path' default to '.' if not path.is_absolute(): path = (ROOT / path).resolve() - data['path'] = path # download scripts - for k in 'train', 'val', 'test': + data["path"] = path # download scripts + for k in "train", "val", "test": if data.get(k): # prepend path if isinstance(data[k], str): x = (path / data[k]).resolve() - if not x.exists() and data[k].startswith('../'): + if not x.exists() and data[k].startswith("../"): x = (path / data[k][3:]).resolve() data[k] = str(x) else: data[k] = [str((path / x).resolve()) for x in data[k]] # Parse yaml - train, val, test, s = (data.get(x) for x in ('train', 'val', 'test', 'download')) + train, val, test, s = (data.get(x) for x in ("train", "val", "test", "download")) if val: val = [Path(x).resolve() for x in (val if isinstance(val, list) else [val])] # val path if not all(x.exists() for x in val): - LOGGER.info('\nDataset not found ⚠️, missing paths %s' % [str(x) for x in val if not x.exists()]) + LOGGER.info("\nDataset not found ⚠️, missing paths %s" % [str(x) for x in val if not x.exists()]) if not s or not autodownload: - raise Exception('Dataset not found ❌') + raise Exception("Dataset not found ❌") t = time.time() - if s.startswith('http') and s.endswith('.zip'): # URL + if s.startswith("http") and s.endswith(".zip"): # URL f = Path(s).name # filename - LOGGER.info(f'Downloading {s} to {f}...') + LOGGER.info(f"Downloading {s} to {f}...") torch.hub.download_url_to_file(s, f) Path(DATASETS_DIR).mkdir(parents=True, exist_ok=True) # create root unzip_file(f, path=DATASETS_DIR) # unzip Path(f).unlink() # remove zip r = None # success - elif s.startswith('bash '): # bash script - LOGGER.info(f'Running {s} ...') + elif s.startswith("bash "): # bash script + LOGGER.info(f"Running {s} ...") r = subprocess.run(s, shell=True) else: # python script - r = exec(s, {'yaml': data}) # return None - dt = f'({round(time.time() - t, 1)}s)' - s = f"success ✅ {dt}, saved to {colorstr('bold', DATASETS_DIR)}" if r in (0, None) else f'failure {dt} ❌' - LOGGER.info(f'Dataset download {s}') - check_font('Arial.ttf' if is_ascii(data['names']) else 'Arial.Unicode.ttf', progress=True) # download fonts + r = exec(s, {"yaml": data}) # return None + dt = f"({round(time.time() - t, 1)}s)" + s = f"success ✅ {dt}, saved to {colorstr('bold', DATASETS_DIR)}" if r in (0, None) else f"failure {dt} ❌" + LOGGER.info(f"Dataset download {s}") + check_font("Arial.ttf" if is_ascii(data["names"]) else "Arial.Unicode.ttf", progress=True) # download fonts return data # dictionary @@ -559,35 +565,35 @@ def amp_allclose(model, im): b = m(im).xywhn[0] # AMP inference return a.shape == b.shape and torch.allclose(a, b, atol=0.1) # close to 10% absolute tolerance - prefix = colorstr('AMP: ') + prefix = colorstr("AMP: ") device = next(model.parameters()).device # get model device - if device.type in ('cpu', 'mps'): + if device.type in ("cpu", "mps"): return False # AMP only used on CUDA devices - f = ROOT / 'data' / 'images' / 'bus.jpg' # image to check - im = f if f.exists() else 'https://ultralytics.com/images/bus.jpg' if check_online() else np.ones((640, 640, 3)) + f = ROOT / "data" / "images" / "bus.jpg" # image to check + im = f if f.exists() else "https://ultralytics.com/images/bus.jpg" if check_online() else np.ones((640, 640, 3)) try: - assert amp_allclose(deepcopy(model), im) or amp_allclose(DetectMultiBackend('yolov5n.pt', device), im) - LOGGER.info(f'{prefix}checks passed ✅') + assert amp_allclose(deepcopy(model), im) or amp_allclose(DetectMultiBackend("yolov5n.pt", device), im) + LOGGER.info(f"{prefix}checks passed ✅") return True except Exception: - help_url = 'https://github.com/ultralytics/yolov5/issues/7908' - LOGGER.warning(f'{prefix}checks failed ❌, disabling Automatic Mixed Precision. See {help_url}') + help_url = "https://github.com/ultralytics/yolov5/issues/7908" + LOGGER.warning(f"{prefix}checks failed ❌, disabling Automatic Mixed Precision. See {help_url}") return False -def yaml_load(file='data.yaml'): +def yaml_load(file="data.yaml"): # Single-line safe yaml loading - with open(file, errors='ignore') as f: + with open(file, errors="ignore") as f: return yaml.safe_load(f) -def yaml_save(file='data.yaml', data={}): +def yaml_save(file="data.yaml", data={}): # Single-line safe yaml saving - with open(file, 'w') as f: + with open(file, "w") as f: yaml.safe_dump({k: str(v) if isinstance(v, Path) else v for k, v in data.items()}, f, sort_keys=False) -def unzip_file(file, path=None, exclude=('.DS_Store', '__MACOSX')): +def unzip_file(file, path=None, exclude=(".DS_Store", "__MACOSX")): # Unzip a *.zip file to path/, excluding files containing strings in exclude list if path is None: path = Path(file).parent # default path @@ -599,11 +605,11 @@ def unzip_file(file, path=None, exclude=('.DS_Store', '__MACOSX')): def url2file(url): # Convert URL to filename, i.e. https://url.com/file.txt?auth -> file.txt - url = str(Path(url)).replace(':/', '://') # Pathlib turns :// -> :/ - return Path(urllib.parse.unquote(url)).name.split('?')[0] # '%2F' to '/', split https://url.com/file.txt?auth + url = str(Path(url)).replace(":/", "://") # Pathlib turns :// -> :/ + return Path(urllib.parse.unquote(url)).name.split("?")[0] # '%2F' to '/', split https://url.com/file.txt?auth -def download(url, dir='.', unzip=True, delete=True, curl=False, threads=1, retry=3): +def download(url, dir=".", unzip=True, delete=True, curl=False, threads=1, retry=3): # Multithreaded file download and unzip function, used in data.yaml for autodownload def download_one(url, dir): # Download 1 file @@ -612,7 +618,7 @@ def download_one(url, dir): f = Path(url) # filename else: # does not exist f = dir / Path(url).name - LOGGER.info(f'Downloading {url} to {f}...') + LOGGER.info(f"Downloading {url} to {f}...") for i in range(retry + 1): if curl: success = curl_download(url, f, silent=(threads > 1)) @@ -622,18 +628,18 @@ def download_one(url, dir): if success: break elif i < retry: - LOGGER.warning(f'⚠️ Download failure, retrying {i + 1}/{retry} {url}...') + LOGGER.warning(f"⚠️ Download failure, retrying {i + 1}/{retry} {url}...") else: - LOGGER.warning(f'❌ Failed to download {url}...') + LOGGER.warning(f"❌ Failed to download {url}...") - if unzip and success and (f.suffix == '.gz' or is_zipfile(f) or is_tarfile(f)): - LOGGER.info(f'Unzipping {f}...') + if unzip and success and (f.suffix == ".gz" or is_zipfile(f) or is_tarfile(f)): + LOGGER.info(f"Unzipping {f}...") if is_zipfile(f): unzip_file(f, dir) # unzip elif is_tarfile(f): - subprocess.run(['tar', 'xf', f, '--directory', f.parent], check=True) # unzip - elif f.suffix == '.gz': - subprocess.run(['tar', 'xfz', f, '--directory', f.parent], check=True) # unzip + subprocess.run(["tar", "xf", f, "--directory", f.parent], check=True) # unzip + elif f.suffix == ".gz": + subprocess.run(["tar", "xfz", f, "--directory", f.parent], check=True) # unzip if delete: f.unlink() # remove zip @@ -658,7 +664,7 @@ def make_divisible(x, divisor): def clean_str(s): # Cleans a string by replacing special characters with underscore _ - return re.sub(pattern='[|@#!¡·$€%&()=?¿^*;:,¨´><+]', repl='_', string=s) + return re.sub(pattern="[|@#!¡·$€%&()=?¿^*;:,¨´><+]", repl="_", string=s) def one_cycle(y1=0.0, y2=1.0, steps=100): @@ -668,28 +674,29 @@ def one_cycle(y1=0.0, y2=1.0, steps=100): def colorstr(*input): # Colors a string https://en.wikipedia.org/wiki/ANSI_escape_code, i.e. colorstr('blue', 'hello world') - *args, string = input if len(input) > 1 else ('blue', 'bold', input[0]) # color arguments, string + *args, string = input if len(input) > 1 else ("blue", "bold", input[0]) # color arguments, string colors = { - 'black': '\033[30m', # basic colors - 'red': '\033[31m', - 'green': '\033[32m', - 'yellow': '\033[33m', - 'blue': '\033[34m', - 'magenta': '\033[35m', - 'cyan': '\033[36m', - 'white': '\033[37m', - 'bright_black': '\033[90m', # bright colors - 'bright_red': '\033[91m', - 'bright_green': '\033[92m', - 'bright_yellow': '\033[93m', - 'bright_blue': '\033[94m', - 'bright_magenta': '\033[95m', - 'bright_cyan': '\033[96m', - 'bright_white': '\033[97m', - 'end': '\033[0m', # misc - 'bold': '\033[1m', - 'underline': '\033[4m'} - return ''.join(colors[x] for x in args) + f'{string}' + colors['end'] + "black": "\033[30m", # basic colors + "red": "\033[31m", + "green": "\033[32m", + "yellow": "\033[33m", + "blue": "\033[34m", + "magenta": "\033[35m", + "cyan": "\033[36m", + "white": "\033[37m", + "bright_black": "\033[90m", # bright colors + "bright_red": "\033[91m", + "bright_green": "\033[92m", + "bright_yellow": "\033[93m", + "bright_blue": "\033[94m", + "bright_magenta": "\033[95m", + "bright_cyan": "\033[96m", + "bright_white": "\033[97m", + "end": "\033[0m", # misc + "bold": "\033[1m", + "underline": "\033[4m", + } + return "".join(colors[x] for x in args) + f"{string}" + colors["end"] def labels_to_class_weights(labels, nc=80): @@ -725,9 +732,87 @@ def coco80_to_coco91_class(): # converts 80-index (val2014) to 91-index (paper) # x1 = [list(a[i] == b).index(True) + 1 for i in range(80)] # darknet to coco # x2 = [list(b[i] == a).index(True) if any(b[i] == a) else None for i in range(91)] # coco to darknet return [ - 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 27, 28, 31, 32, 33, 34, - 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, - 64, 65, 67, 70, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 84, 85, 86, 87, 88, 89, 90] + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 13, + 14, + 15, + 16, + 17, + 18, + 19, + 20, + 21, + 22, + 23, + 24, + 25, + 27, + 28, + 31, + 32, + 33, + 34, + 35, + 36, + 37, + 38, + 39, + 40, + 41, + 42, + 43, + 44, + 46, + 47, + 48, + 49, + 50, + 51, + 52, + 53, + 54, + 55, + 56, + 57, + 58, + 59, + 60, + 61, + 62, + 63, + 64, + 65, + 67, + 70, + 72, + 73, + 74, + 75, + 76, + 77, + 78, + 79, + 80, + 81, + 82, + 84, + 85, + 86, + 87, + 88, + 89, + 90, + ] def xyxy2xywh(x): @@ -784,7 +869,10 @@ def segment2box(segment, width=640, height=640): # Convert 1 segment label to 1 box label, applying inside-image constraint, i.e. (xy1, xy2, ...) to (xyxy) x, y = segment.T # segment xy inside = (x >= 0) & (y >= 0) & (x <= width) & (y <= height) - x, y, = x[inside], y[inside] + ( + x, + y, + ) = x[inside], y[inside] return np.array([x.min(), y.min(), x.max(), y.max()]) if any(x) else np.zeros((1, 4)) # xyxy @@ -865,30 +953,31 @@ def clip_segments(segments, shape): def non_max_suppression( - prediction, - conf_thres=0.25, - iou_thres=0.45, - classes=None, - agnostic=False, - multi_label=False, - labels=(), - max_det=300, - nm=0, # number of masks + prediction, + conf_thres=0.25, + iou_thres=0.45, + classes=None, + agnostic=False, + multi_label=False, + labels=(), + max_det=300, + nm=0, # number of masks ): - """Non-Maximum Suppression (NMS) on inference results to reject overlapping detections + """ + Non-Maximum Suppression (NMS) on inference results to reject overlapping detections. Returns: list of detections, on (n,6) tensor per image [xyxy, conf, cls] """ # Checks - assert 0 <= conf_thres <= 1, f'Invalid Confidence threshold {conf_thres}, valid values are between 0.0 and 1.0' - assert 0 <= iou_thres <= 1, f'Invalid IoU {iou_thres}, valid values are between 0.0 and 1.0' + assert 0 <= conf_thres <= 1, f"Invalid Confidence threshold {conf_thres}, valid values are between 0.0 and 1.0" + assert 0 <= iou_thres <= 1, f"Invalid IoU {iou_thres}, valid values are between 0.0 and 1.0" if isinstance(prediction, (list, tuple)): # YOLOv5 model in validation model, output = (inference_out, loss_out) prediction = prediction[0] # select only inference output device = prediction.device - mps = 'mps' in device.type # Apple MPS + mps = "mps" in device.type # Apple MPS if mps: # MPS not fully supported yet, convert tensors to CPU before NMS prediction = prediction.cpu() bs = prediction.shape[0] # batch size @@ -959,7 +1048,7 @@ def non_max_suppression( boxes, scores = x[:, :4] + c, x[:, 4] # boxes (offset by class), scores i = torchvision.ops.nms(boxes, scores, iou_thres) # NMS i = i[:max_det] # limit detections - if merge and (1 < n < 3E3): # Merge NMS (boxes merged using weighted mean) + if merge and (1 < n < 3e3): # Merge NMS (boxes merged using weighted mean) # update boxes as boxes(i,4) = weights(i,n) * boxes(n,4) iou = box_iou(boxes[i], boxes) > iou_thres # iou matrix weights = iou * scores[None] # box weights @@ -971,31 +1060,31 @@ def non_max_suppression( if mps: output[xi] = output[xi].to(device) if (time.time() - t) > time_limit: - LOGGER.warning(f'WARNING ⚠️ NMS time limit {time_limit:.3f}s exceeded') + LOGGER.warning(f"WARNING ⚠️ NMS time limit {time_limit:.3f}s exceeded") break # time limit exceeded return output -def strip_optimizer(f='best.pt', s=''): # from utils.general import *; strip_optimizer() +def strip_optimizer(f="best.pt", s=""): # from utils.general import *; strip_optimizer() # Strip optimizer from 'f' to finalize training, optionally save as 's' - x = torch.load(f, map_location=torch.device('cpu')) - if x.get('ema'): - x['model'] = x['ema'] # replace model with ema - for k in 'optimizer', 'best_fitness', 'ema', 'updates': # keys + x = torch.load(f, map_location=torch.device("cpu")) + if x.get("ema"): + x["model"] = x["ema"] # replace model with ema + for k in "optimizer", "best_fitness", "ema", "updates": # keys x[k] = None - x['epoch'] = -1 - x['model'].half() # to FP16 - for p in x['model'].parameters(): + x["epoch"] = -1 + x["model"].half() # to FP16 + for p in x["model"].parameters(): p.requires_grad = False torch.save(x, s or f) - mb = os.path.getsize(s or f) / 1E6 # filesize + mb = os.path.getsize(s or f) / 1e6 # filesize LOGGER.info(f"Optimizer stripped from {f},{f' saved as {s},' if s else ''} {mb:.1f}MB") -def print_mutation(keys, results, hyp, save_dir, bucket, prefix=colorstr('evolve: ')): - evolve_csv = save_dir / 'evolve.csv' - evolve_yaml = save_dir / 'hyp_evolve.yaml' +def print_mutation(keys, results, hyp, save_dir, bucket, prefix=colorstr("evolve: ")): + evolve_csv = save_dir / "evolve.csv" + evolve_yaml = save_dir / "hyp_evolve.yaml" keys = tuple(keys) + tuple(hyp.keys()) # [results + hyps] keys = tuple(x.strip() for x in keys) vals = results + tuple(hyp.values()) @@ -1003,33 +1092,48 @@ def print_mutation(keys, results, hyp, save_dir, bucket, prefix=colorstr('evolve # Download (optional) if bucket: - url = f'gs://{bucket}/evolve.csv' + url = f"gs://{bucket}/evolve.csv" if gsutil_getsize(url) > (evolve_csv.stat().st_size if evolve_csv.exists() else 0): - subprocess.run(['gsutil', 'cp', f'{url}', f'{save_dir}']) # download evolve.csv if larger than local + subprocess.run(["gsutil", "cp", f"{url}", f"{save_dir}"]) # download evolve.csv if larger than local # Log to evolve.csv - s = '' if evolve_csv.exists() else (('%20s,' * n % keys).rstrip(',') + '\n') # add header - with open(evolve_csv, 'a') as f: - f.write(s + ('%20.5g,' * n % vals).rstrip(',') + '\n') + s = "" if evolve_csv.exists() else (("%20s," * n % keys).rstrip(",") + "\n") # add header + with open(evolve_csv, "a") as f: + f.write(s + ("%20.5g," * n % vals).rstrip(",") + "\n") # Save yaml - with open(evolve_yaml, 'w') as f: + with open(evolve_yaml, "w") as f: data = pd.read_csv(evolve_csv, skipinitialspace=True) data = data.rename(columns=lambda x: x.strip()) # strip keys i = np.argmax(fitness(data.values[:, :4])) # generations = len(data) - f.write('# YOLOv5 Hyperparameter Evolution Results\n' + f'# Best generation: {i}\n' + - f'# Last generation: {generations - 1}\n' + '# ' + ', '.join(f'{x.strip():>20s}' for x in keys[:7]) + - '\n' + '# ' + ', '.join(f'{x:>20.5g}' for x in data.values[i, :7]) + '\n\n') + f.write( + "# YOLOv5 Hyperparameter Evolution Results\n" + + f"# Best generation: {i}\n" + + f"# Last generation: {generations - 1}\n" + + "# " + + ", ".join(f"{x.strip():>20s}" for x in keys[:7]) + + "\n" + + "# " + + ", ".join(f"{x:>20.5g}" for x in data.values[i, :7]) + + "\n\n" + ) yaml.safe_dump(data.loc[i][7:].to_dict(), f, sort_keys=False) # Print to screen - LOGGER.info(prefix + f'{generations} generations finished, current result:\n' + prefix + - ', '.join(f'{x.strip():>20s}' for x in keys) + '\n' + prefix + ', '.join(f'{x:20.5g}' - for x in vals) + '\n\n') + LOGGER.info( + prefix + + f"{generations} generations finished, current result:\n" + + prefix + + ", ".join(f"{x.strip():>20s}" for x in keys) + + "\n" + + prefix + + ", ".join(f"{x:20.5g}" for x in vals) + + "\n\n" + ) if bucket: - subprocess.run(['gsutil', 'cp', f'{evolve_csv}', f'{evolve_yaml}', f'gs://{bucket}']) # upload + subprocess.run(["gsutil", "cp", f"{evolve_csv}", f"{evolve_yaml}", f"gs://{bucket}"]) # upload def apply_classifier(x, model, img, im0): @@ -1053,7 +1157,7 @@ def apply_classifier(x, model, img, im0): pred_cls1 = d[:, 5].long() ims = [] for a in d: - cutout = im0[i][int(a[1]):int(a[3]), int(a[0]):int(a[2])] + cutout = im0[i][int(a[1]) : int(a[3]), int(a[0]) : int(a[2])] im = cv2.resize(cutout, (224, 224)) # BGR im = im[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416 @@ -1067,15 +1171,15 @@ def apply_classifier(x, model, img, im0): return x -def increment_path(path, exist_ok=False, sep='', mkdir=False): +def increment_path(path, exist_ok=False, sep="", mkdir=False): # Increment file or directory path, i.e. runs/exp --> runs/exp{sep}2, runs/exp{sep}3, ... etc. path = Path(path) # os-agnostic if path.exists() and not exist_ok: - path, suffix = (path.with_suffix(''), path.suffix) if path.is_file() else (path, '') + path, suffix = (path.with_suffix(""), path.suffix) if path.is_file() else (path, "") # Method 1 for n in range(2, 9999): - p = f'{path}{sep}{n}{suffix}' # increment path + p = f"{path}{sep}{n}{suffix}" # increment path if not os.path.exists(p): # break path = Path(p) @@ -1110,7 +1214,7 @@ def imwrite(filename, img): def imshow(path, im): - imshow_(path.encode('unicode_escape').decode(), im) + imshow_(path.encode("unicode_escape").decode(), im) if Path(inspect.stack()[0].filename).parent.parent.as_posix() in inspect.stack()[-1].filename: diff --git a/utils/loggers/__init__.py b/utils/loggers/__init__.py index 381d477d127c..df67e45c8221 100644 --- a/utils/loggers/__init__.py +++ b/utils/loggers/__init__.py @@ -1,7 +1,5 @@ # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license -""" -Logging utils -""" +"""Logging utils.""" import json import os import warnings @@ -16,8 +14,8 @@ from utils.plots import plot_images, plot_labels, plot_results from utils.torch_utils import de_parallel -LOGGERS = ('csv', 'tb', 'wandb', 'clearml', 'comet') # *.csv, TensorBoard, Weights & Biases, ClearML -RANK = int(os.getenv('RANK', -1)) +LOGGERS = ("csv", "tb", "wandb", "clearml", "comet") # *.csv, TensorBoard, Weights & Biases, ClearML +RANK = int(os.getenv("RANK", -1)) try: from torch.utils.tensorboard import SummaryWriter @@ -27,8 +25,8 @@ try: import wandb - assert hasattr(wandb, '__version__') # verify package import not local dir - if pkg.parse_version(wandb.__version__) >= pkg.parse_version('0.12.2') and RANK in {0, -1}: + assert hasattr(wandb, "__version__") # verify package import not local dir + if pkg.parse_version(wandb.__version__) >= pkg.parse_version("0.12.2") and RANK in {0, -1}: try: wandb_login_success = wandb.login(timeout=30) except wandb.errors.UsageError: # known non-TTY terminal issue @@ -41,7 +39,7 @@ try: import clearml - assert hasattr(clearml, '__version__') # verify package import not local dir + assert hasattr(clearml, "__version__") # verify package import not local dir except (ImportError, AssertionError): clearml = None @@ -49,7 +47,7 @@ if RANK in {0, -1}: import comet_ml - assert hasattr(comet_ml, '__version__') # verify package import not local dir + assert hasattr(comet_ml, "__version__") # verify package import not local dir from utils.loggers.comet import CometLogger else: @@ -59,7 +57,11 @@ def _json_default(value): - """Format `value` for JSON serialization (e.g. unwrap tensors). Fall back to strings.""" + """ + Format `value` for JSON serialization (e.g. unwrap tensors). + + Fall back to strings. + """ if isinstance(value, torch.Tensor): try: value = value.item() @@ -70,7 +72,7 @@ def _json_default(value): return str(value) -class Loggers(): +class Loggers: # YOLOv5 Loggers class def __init__(self, save_dir=None, weights=None, opt=None, hyp=None, logger=None, include=LOGGERS): self.save_dir = save_dir @@ -81,62 +83,65 @@ def __init__(self, save_dir=None, weights=None, opt=None, hyp=None, logger=None, self.logger = logger # for printing results to console self.include = include self.keys = [ - 'train/box_loss', - 'train/obj_loss', - 'train/cls_loss', # train loss - 'metrics/precision', - 'metrics/recall', - 'metrics/mAP_0.5', - 'metrics/mAP_0.5:0.95', # metrics - 'val/box_loss', - 'val/obj_loss', - 'val/cls_loss', # val loss - 'x/lr0', - 'x/lr1', - 'x/lr2'] # params - self.best_keys = ['best/epoch', 'best/precision', 'best/recall', 'best/mAP_0.5', 'best/mAP_0.5:0.95'] + "train/box_loss", + "train/obj_loss", + "train/cls_loss", # train loss + "metrics/precision", + "metrics/recall", + "metrics/mAP_0.5", + "metrics/mAP_0.5:0.95", # metrics + "val/box_loss", + "val/obj_loss", + "val/cls_loss", # val loss + "x/lr0", + "x/lr1", + "x/lr2", + ] # params + self.best_keys = ["best/epoch", "best/precision", "best/recall", "best/mAP_0.5", "best/mAP_0.5:0.95"] for k in LOGGERS: setattr(self, k, None) # init empty logger dictionary self.csv = True # always log to csv - self.ndjson_console = ('ndjson_console' in self.include) # log ndjson to console - self.ndjson_file = ('ndjson_file' in self.include) # log ndjson to file + self.ndjson_console = "ndjson_console" in self.include # log ndjson to console + self.ndjson_file = "ndjson_file" in self.include # log ndjson to file # Messages if not comet_ml: - prefix = colorstr('Comet: ') + prefix = colorstr("Comet: ") s = f"{prefix}run 'pip install comet_ml' to automatically track and visualize YOLOv5 🚀 runs in Comet" self.logger.info(s) # TensorBoard s = self.save_dir - if 'tb' in self.include and not self.opt.evolve: - prefix = colorstr('TensorBoard: ') + if "tb" in self.include and not self.opt.evolve: + prefix = colorstr("TensorBoard: ") self.logger.info(f"{prefix}Start with 'tensorboard --logdir {s.parent}', view at http://localhost:6006/") self.tb = SummaryWriter(str(s)) # W&B - if wandb and 'wandb' in self.include: + if wandb and "wandb" in self.include: self.opt.hyp = self.hyp # add hyperparameters self.wandb = WandbLogger(self.opt) else: self.wandb = None # ClearML - if clearml and 'clearml' in self.include: + if clearml and "clearml" in self.include: try: self.clearml = ClearmlLogger(self.opt, self.hyp) except Exception: self.clearml = None - prefix = colorstr('ClearML: ') - LOGGER.warning(f'{prefix}WARNING ⚠️ ClearML is installed but not configured, skipping ClearML logging.' - f' See https://docs.ultralytics.com/yolov5/tutorials/clearml_logging_integration#readme') + prefix = colorstr("ClearML: ") + LOGGER.warning( + f"{prefix}WARNING ⚠️ ClearML is installed but not configured, skipping ClearML logging." + f" See https://docs.ultralytics.com/yolov5/tutorials/clearml_logging_integration#readme" + ) else: self.clearml = None # Comet - if comet_ml and 'comet' in self.include: - if isinstance(self.opt.resume, str) and self.opt.resume.startswith('comet://'): - run_id = self.opt.resume.split('/')[-1] + if comet_ml and "comet" in self.include: + if isinstance(self.opt.resume, str) and self.opt.resume.startswith("comet://"): + run_id = self.opt.resume.split("/")[-1] self.comet_logger = CometLogger(self.opt, self.hyp, run_id=run_id) else: @@ -170,9 +175,9 @@ def on_pretrain_routine_end(self, labels, names): # Callback runs on pre-train routine end if self.plots: plot_labels(labels, names, self.save_dir) - paths = self.save_dir.glob('*labels*.jpg') # training labels + paths = self.save_dir.glob("*labels*.jpg") # training labels if self.wandb: - self.wandb.log({'Labels': [wandb.Image(str(x), caption=x.name) for x in paths]}) + self.wandb.log({"Labels": [wandb.Image(str(x), caption=x.name) for x in paths]}) if self.comet_logger: self.comet_logger.on_pretrain_routine_end(paths) if self.clearml: @@ -185,16 +190,16 @@ def on_train_batch_end(self, model, ni, imgs, targets, paths, vals): # ni: number integrated batches (since train start) if self.plots: if ni < 3: - f = self.save_dir / f'train_batch{ni}.jpg' # filename + f = self.save_dir / f"train_batch{ni}.jpg" # filename plot_images(imgs, targets, paths, f) if ni == 0 and self.tb and not self.opt.sync_bn: log_tensorboard_graph(self.tb, model, imgsz=(self.opt.imgsz, self.opt.imgsz)) if ni == 10 and (self.wandb or self.clearml): - files = sorted(self.save_dir.glob('train*.jpg')) + files = sorted(self.save_dir.glob("train*.jpg")) if self.wandb: - self.wandb.log({'Mosaics': [wandb.Image(str(f), caption=f.name) for f in files if f.exists()]}) + self.wandb.log({"Mosaics": [wandb.Image(str(f), caption=f.name) for f in files if f.exists()]}) if self.clearml: - self.clearml.log_debug_samples(files, title='Mosaics') + self.clearml.log_debug_samples(files, title="Mosaics") if self.comet_logger: self.comet_logger.on_train_batch_end(log_dict, step=ni) @@ -225,11 +230,11 @@ def on_val_batch_end(self, batch_i, im, targets, paths, shapes, out): def on_val_end(self, nt, tp, fp, p, r, f1, ap, ap50, ap_class, confusion_matrix): # Callback runs on val end if self.wandb or self.clearml: - files = sorted(self.save_dir.glob('val*.jpg')) + files = sorted(self.save_dir.glob("val*.jpg")) if self.wandb: - self.wandb.log({'Validation': [wandb.Image(str(f), caption=f.name) for f in files]}) + self.wandb.log({"Validation": [wandb.Image(str(f), caption=f.name) for f in files]}) if self.clearml: - self.clearml.log_debug_samples(files, title='Validation') + self.clearml.log_debug_samples(files, title="Validation") if self.comet_logger: self.comet_logger.on_val_end(nt, tp, fp, p, r, f1, ap, ap50, ap_class, confusion_matrix) @@ -238,18 +243,18 @@ def on_fit_epoch_end(self, vals, epoch, best_fitness, fi): # Callback runs at the end of each fit (train+val) epoch x = dict(zip(self.keys, vals)) if self.csv: - file = self.save_dir / 'results.csv' + file = self.save_dir / "results.csv" n = len(x) + 1 # number of cols - s = '' if file.exists() else (('%20s,' * n % tuple(['epoch'] + self.keys)).rstrip(',') + '\n') # add header - with open(file, 'a') as f: - f.write(s + ('%20.5g,' * n % tuple([epoch] + vals)).rstrip(',') + '\n') + s = "" if file.exists() else (("%20s," * n % tuple(["epoch"] + self.keys)).rstrip(",") + "\n") # add header + with open(file, "a") as f: + f.write(s + ("%20.5g," * n % tuple([epoch] + vals)).rstrip(",") + "\n") if self.ndjson_console or self.ndjson_file: json_data = json.dumps(dict(epoch=epoch, **x), default=_json_default) if self.ndjson_console: print(json_data) if self.ndjson_file: - file = self.save_dir / 'results.ndjson' - with open(file, 'a') as f: + file = self.save_dir / "results.ndjson" + with open(file, "a") as f: print(json_data, file=f) if self.tb: @@ -279,9 +284,9 @@ def on_model_save(self, last, epoch, final_epoch, best_fitness, fi): if self.wandb: self.wandb.log_model(last.parent, self.opt, epoch, fi, best_model=best_fitness == fi) if self.clearml: - self.clearml.task.update_output_model(model_path=str(last), - model_name='Latest Model', - auto_delete_file=False) + self.clearml.task.update_output_model( + model_path=str(last), model_name="Latest Model", auto_delete_file=False + ) if self.comet_logger: self.comet_logger.on_model_save(last, epoch, final_epoch, best_fitness, fi) @@ -289,31 +294,34 @@ def on_model_save(self, last, epoch, final_epoch, best_fitness, fi): def on_train_end(self, last, best, epoch, results): # Callback runs on training end, i.e. saving best model if self.plots: - plot_results(file=self.save_dir / 'results.csv') # save results.png - files = ['results.png', 'confusion_matrix.png', *(f'{x}_curve.png' for x in ('F1', 'PR', 'P', 'R'))] + plot_results(file=self.save_dir / "results.csv") # save results.png + files = ["results.png", "confusion_matrix.png", *(f"{x}_curve.png" for x in ("F1", "PR", "P", "R"))] files = [(self.save_dir / f) for f in files if (self.save_dir / f).exists()] # filter self.logger.info(f"Results saved to {colorstr('bold', self.save_dir)}") if self.tb and not self.clearml: # These images are already captured by ClearML by now, we don't want doubles for f in files: - self.tb.add_image(f.stem, cv2.imread(str(f))[..., ::-1], epoch, dataformats='HWC') + self.tb.add_image(f.stem, cv2.imread(str(f))[..., ::-1], epoch, dataformats="HWC") if self.wandb: self.wandb.log(dict(zip(self.keys[3:10], results))) - self.wandb.log({'Results': [wandb.Image(str(f), caption=f.name) for f in files]}) + self.wandb.log({"Results": [wandb.Image(str(f), caption=f.name) for f in files]}) # Calling wandb.log. TODO: Refactor this into WandbLogger.log_model if not self.opt.evolve: - wandb.log_artifact(str(best if best.exists() else last), - type='model', - name=f'run_{self.wandb.wandb_run.id}_model', - aliases=['latest', 'best', 'stripped']) + wandb.log_artifact( + str(best if best.exists() else last), + type="model", + name=f"run_{self.wandb.wandb_run.id}_model", + aliases=["latest", "best", "stripped"], + ) self.wandb.finish_run() if self.clearml and not self.opt.evolve: self.clearml.log_summary(dict(zip(self.keys[3:10], results))) [self.clearml.log_plot(title=f.stem, plot_path=f) for f in files] - self.clearml.log_model(str(best if best.exists() else last), - "Best Model" if best.exists() else "Last Model", epoch) + self.clearml.log_model( + str(best if best.exists() else last), "Best Model" if best.exists() else "Last Model", epoch + ) if self.comet_logger: final_results = dict(zip(self.keys[3:10], results)) @@ -339,38 +347,41 @@ class GenericLogger: include: loggers to include """ - def __init__(self, opt, console_logger, include=('tb', 'wandb', 'clearml')): + def __init__(self, opt, console_logger, include=("tb", "wandb", "clearml")): # init default loggers self.save_dir = Path(opt.save_dir) self.include = include self.console_logger = console_logger - self.csv = self.save_dir / 'results.csv' # CSV logger - if 'tb' in self.include: - prefix = colorstr('TensorBoard: ') + self.csv = self.save_dir / "results.csv" # CSV logger + if "tb" in self.include: + prefix = colorstr("TensorBoard: ") self.console_logger.info( - f"{prefix}Start with 'tensorboard --logdir {self.save_dir.parent}', view at http://localhost:6006/") + f"{prefix}Start with 'tensorboard --logdir {self.save_dir.parent}', view at http://localhost:6006/" + ) self.tb = SummaryWriter(str(self.save_dir)) - if wandb and 'wandb' in self.include: - self.wandb = wandb.init(project=web_project_name(str(opt.project)), - name=None if opt.name == 'exp' else opt.name, - config=opt) + if wandb and "wandb" in self.include: + self.wandb = wandb.init( + project=web_project_name(str(opt.project)), name=None if opt.name == "exp" else opt.name, config=opt + ) else: self.wandb = None - if clearml and 'clearml' in self.include: + if clearml and "clearml" in self.include: try: # Hyp is not available in classification mode - if 'hyp' not in opt: + if "hyp" not in opt: hyp = {} else: hyp = opt.hyp self.clearml = ClearmlLogger(opt, hyp) except Exception: self.clearml = None - prefix = colorstr('ClearML: ') - LOGGER.warning(f'{prefix}WARNING ⚠️ ClearML is installed but not configured, skipping ClearML logging.' - f' See https://github.com/ultralytics/yolov5/tree/master/utils/loggers/clearml#readme') + prefix = colorstr("ClearML: ") + LOGGER.warning( + f"{prefix}WARNING ⚠️ ClearML is installed but not configured, skipping ClearML logging." + f" See https://github.com/ultralytics/yolov5/tree/master/utils/loggers/clearml#readme" + ) else: self.clearml = None @@ -379,9 +390,9 @@ def log_metrics(self, metrics, epoch): if self.csv: keys, vals = list(metrics.keys()), list(metrics.values()) n = len(metrics) + 1 # number of cols - s = '' if self.csv.exists() else (('%23s,' * n % tuple(['epoch'] + keys)).rstrip(',') + '\n') # header - with open(self.csv, 'a') as f: - f.write(s + ('%23.5g,' * n % tuple([epoch] + vals)).rstrip(',') + '\n') + s = "" if self.csv.exists() else (("%23s," * n % tuple(["epoch"] + keys)).rstrip(",") + "\n") # header + with open(self.csv, "a") as f: + f.write(s + ("%23.5g," * n % tuple([epoch] + vals)).rstrip(",") + "\n") if self.tb: for k, v in metrics.items(): @@ -393,20 +404,20 @@ def log_metrics(self, metrics, epoch): if self.clearml: self.clearml.log_scalars(metrics, epoch) - def log_images(self, files, name='Images', epoch=0): + def log_images(self, files, name="Images", epoch=0): # Log images to all loggers files = [Path(f) for f in (files if isinstance(files, (tuple, list)) else [files])] # to Path files = [f for f in files if f.exists()] # filter by exists if self.tb: for f in files: - self.tb.add_image(f.stem, cv2.imread(str(f))[..., ::-1], epoch, dataformats='HWC') + self.tb.add_image(f.stem, cv2.imread(str(f))[..., ::-1], epoch, dataformats="HWC") if self.wandb: self.wandb.log({name: [wandb.Image(str(f), caption=f.name) for f in files]}, step=epoch) if self.clearml: - if name == 'Results': + if name == "Results": [self.clearml.log_plot(f.stem, f) for f in files] else: self.clearml.log_debug_samples(files, title=name) @@ -419,7 +430,7 @@ def log_graph(self, model, imgsz=(640, 640)): def log_model(self, model_path, epoch=0, metadata={}): # Log model to all loggers if self.wandb: - art = wandb.Artifact(name=f'run_{wandb.run.id}_model', type='model', metadata=metadata) + art = wandb.Artifact(name=f"run_{wandb.run.id}_model", type="model", metadata=metadata) art.add_file(str(model_path)) wandb.log_artifact(art) if self.clearml: @@ -440,15 +451,15 @@ def log_tensorboard_graph(tb, model, imgsz=(640, 640)): imgsz = (imgsz, imgsz) if isinstance(imgsz, int) else imgsz # expand im = torch.zeros((1, 3, *imgsz)).to(p.device).type_as(p) # input image (WARNING: must be zeros, not empty) with warnings.catch_warnings(): - warnings.simplefilter('ignore') # suppress jit trace warning + warnings.simplefilter("ignore") # suppress jit trace warning tb.add_graph(torch.jit.trace(de_parallel(model), im, strict=False), []) except Exception as e: - LOGGER.warning(f'WARNING ⚠️ TensorBoard graph visualization failure {e}') + LOGGER.warning(f"WARNING ⚠️ TensorBoard graph visualization failure {e}") def web_project_name(project): # Convert local project name to web project name - if not project.startswith('runs/train'): + if not project.startswith("runs/train"): return project - suffix = '-Classify' if project.endswith('-cls') else '-Segment' if project.endswith('-seg') else '' - return f'YOLOv5{suffix}' + suffix = "-Classify" if project.endswith("-cls") else "-Segment" if project.endswith("-seg") else "" + return f"YOLOv5{suffix}" diff --git a/utils/loggers/clearml/clearml_utils.py b/utils/loggers/clearml/clearml_utils.py index c7627a261186..8b141d177afd 100644 --- a/utils/loggers/clearml/clearml_utils.py +++ b/utils/loggers/clearml/clearml_utils.py @@ -13,55 +13,63 @@ import clearml from clearml import Dataset, Task - assert hasattr(clearml, '__version__') # verify package import not local dir + assert hasattr(clearml, "__version__") # verify package import not local dir except (ImportError, AssertionError): clearml = None def construct_dataset(clearml_info_string): - """Load in a clearml dataset and fill the internal data_dict with its contents. - """ - dataset_id = clearml_info_string.replace('clearml://', '') + """Load in a clearml dataset and fill the internal data_dict with its contents.""" + dataset_id = clearml_info_string.replace("clearml://", "") dataset = Dataset.get(dataset_id=dataset_id) dataset_root_path = Path(dataset.get_local_copy()) # We'll search for the yaml file definition in the dataset - yaml_filenames = list(glob.glob(str(dataset_root_path / '*.yaml')) + glob.glob(str(dataset_root_path / '*.yml'))) + yaml_filenames = list(glob.glob(str(dataset_root_path / "*.yaml")) + glob.glob(str(dataset_root_path / "*.yml"))) if len(yaml_filenames) > 1: - raise ValueError('More than one yaml file was found in the dataset root, cannot determine which one contains ' - 'the dataset definition this way.') + raise ValueError( + "More than one yaml file was found in the dataset root, cannot determine which one contains " + "the dataset definition this way." + ) elif len(yaml_filenames) == 0: - raise ValueError('No yaml definition found in dataset root path, check that there is a correct yaml file ' - 'inside the dataset root path.') + raise ValueError( + "No yaml definition found in dataset root path, check that there is a correct yaml file " + "inside the dataset root path." + ) with open(yaml_filenames[0]) as f: dataset_definition = yaml.safe_load(f) - assert set(dataset_definition.keys()).issuperset( - {'train', 'test', 'val', 'nc', 'names'} + assert set( + dataset_definition.keys() + ).issuperset( + {"train", "test", "val", "nc", "names"} ), "The right keys were not found in the yaml file, make sure it at least has the following keys: ('train', 'test', 'val', 'nc', 'names')" data_dict = dict() - data_dict['train'] = str( - (dataset_root_path / dataset_definition['train']).resolve()) if dataset_definition['train'] else None - data_dict['test'] = str( - (dataset_root_path / dataset_definition['test']).resolve()) if dataset_definition['test'] else None - data_dict['val'] = str( - (dataset_root_path / dataset_definition['val']).resolve()) if dataset_definition['val'] else None - data_dict['nc'] = dataset_definition['nc'] - data_dict['names'] = dataset_definition['names'] + data_dict["train"] = ( + str((dataset_root_path / dataset_definition["train"]).resolve()) if dataset_definition["train"] else None + ) + data_dict["test"] = ( + str((dataset_root_path / dataset_definition["test"]).resolve()) if dataset_definition["test"] else None + ) + data_dict["val"] = ( + str((dataset_root_path / dataset_definition["val"]).resolve()) if dataset_definition["val"] else None + ) + data_dict["nc"] = dataset_definition["nc"] + data_dict["names"] = dataset_definition["names"] return data_dict class ClearmlLogger: - """Log training runs, datasets, models, and predictions to ClearML. + """ + Log training runs, datasets, models, and predictions to ClearML. - This logger sends information to ClearML at app.clear.ml or to your own hosted server. By default, - this information includes hyperparameters, system configuration and metrics, model metrics, code information and - basic data metrics and analyses. + This logger sends information to ClearML at app.clear.ml or to your own hosted server. By default, this information + includes hyperparameters, system configuration and metrics, model metrics, code information and basic data metrics + and analyses. - By providing additional command line arguments to train.py, datasets, - models and predictions can also be logged. + By providing additional command line arguments to train.py, datasets, models and predictions can also be logged. """ def __init__(self, opt, hyp): @@ -81,36 +89,36 @@ def __init__(self, opt, hyp): self.max_imgs_to_log_per_epoch = 16 # Get the interval of epochs when bounding box images should be logged # Only for detection task though! - if 'bbox_interval' in opt: + if "bbox_interval" in opt: self.bbox_interval = opt.bbox_interval self.clearml = clearml self.task = None self.data_dict = None if self.clearml: self.task = Task.init( - project_name=opt.project if not str(opt.project).startswith('runs/') else 'YOLOv5', - task_name=opt.name if opt.name != 'exp' else 'Training', - tags=['YOLOv5'], + project_name=opt.project if not str(opt.project).startswith("runs/") else "YOLOv5", + task_name=opt.name if opt.name != "exp" else "Training", + tags=["YOLOv5"], output_uri=True, reuse_last_task_id=opt.exist_ok, - auto_connect_frameworks={ - 'pytorch': False, - 'matplotlib': False} + auto_connect_frameworks={"pytorch": False, "matplotlib": False}, # We disconnect pytorch auto-detection, because we added manual model save points in the code ) # ClearML's hooks will already grab all general parameters # Only the hyperparameters coming from the yaml config file # will have to be added manually! - self.task.connect(hyp, name='Hyperparameters') - self.task.connect(opt, name='Args') + self.task.connect(hyp, name="Hyperparameters") + self.task.connect(opt, name="Args") # Make sure the code is easily remotely runnable by setting the docker image to use by the remote agent - self.task.set_base_docker('ultralytics/yolov5:latest', - docker_arguments='--ipc=host -e="CLEARML_AGENT_SKIP_PYTHON_ENV_INSTALL=1"', - docker_setup_bash_script='pip install clearml') + self.task.set_base_docker( + "ultralytics/yolov5:latest", + docker_arguments='--ipc=host -e="CLEARML_AGENT_SKIP_PYTHON_ENV_INSTALL=1"', + docker_setup_bash_script="pip install clearml", + ) # Get ClearML Dataset Version if requested - if opt.data.startswith('clearml://'): + if opt.data.startswith("clearml://"): # data_dict should have the following keys: # names, nc (number of classes), test, train, val (all three relative paths to ../datasets) self.data_dict = construct_dataset(opt.data) @@ -120,33 +128,32 @@ def __init__(self, opt, hyp): def log_scalars(self, metrics, epoch): """ - Log scalars/metrics to ClearML + Log scalars/metrics to ClearML. arguments: metrics (dict) Metrics in dict format: {"metrics/mAP": 0.8, ...} epoch (int) iteration number for the current set of metrics """ for k, v in metrics.items(): - title, series = k.split('/') + title, series = k.split("/") self.task.get_logger().report_scalar(title, series, v, epoch) def log_model(self, model_path, model_name, epoch=0): """ - Log model weights to ClearML + Log model weights to ClearML. arguments: model_path (PosixPath or str) Path to the model weights model_name (str) Name of the model visible in ClearML epoch (int) Iteration / epoch of the model weights """ - self.task.update_output_model(model_path=str(model_path), - name=model_name, - iteration=epoch, - auto_delete_file=False) + self.task.update_output_model( + model_path=str(model_path), name=model_name, iteration=epoch, auto_delete_file=False + ) def log_summary(self, metrics): """ - Log final metrics to a summary table + Log final metrics to a summary table. arguments: metrics (dict) Metrics in dict format: {"metrics/mAP": 0.8, ...} @@ -156,7 +163,7 @@ def log_summary(self, metrics): def log_plot(self, title, plot_path): """ - Log image as plot in the plot section of ClearML + Log image as plot in the plot section of ClearML. arguments: title (str) Title of the plot @@ -164,12 +171,12 @@ def log_plot(self, title, plot_path): """ img = mpimg.imread(plot_path) fig = plt.figure() - ax = fig.add_axes([0, 0, 1, 1], frameon=False, aspect='auto', xticks=[], yticks=[]) # no ticks + ax = fig.add_axes([0, 0, 1, 1], frameon=False, aspect="auto", xticks=[], yticks=[]) # no ticks ax.imshow(img) self.task.get_logger().report_matplotlib_figure(title, "", figure=fig, report_interactive=False) - def log_debug_samples(self, files, title='Debug Samples'): + def log_debug_samples(self, files, title="Debug Samples"): """ Log files (images) as debug samples in the ClearML task. @@ -179,12 +186,11 @@ def log_debug_samples(self, files, title='Debug Samples'): """ for f in files: if f.exists(): - it = re.search(r'_batch(\d+)', f.name) + it = re.search(r"_batch(\d+)", f.name) iteration = int(it.groups()[0]) if it else 0 - self.task.get_logger().report_image(title=title, - series=f.name.replace(f"_batch{iteration}", ''), - local_path=str(f), - iteration=iteration) + self.task.get_logger().report_image( + title=title, series=f.name.replace(f"_batch{iteration}", ""), local_path=str(f), iteration=iteration + ) def log_image_with_boxes(self, image_path, boxes, class_names, image, conf_threshold=0.25): """ @@ -206,15 +212,14 @@ def log_image_with_boxes(self, image_path, boxes, class_names, image, conf_thres class_name = class_names[int(class_nr)] confidence_percentage = round(float(conf) * 100, 2) - label = f'{class_name}: {confidence_percentage}%' + label = f"{class_name}: {confidence_percentage}%" if conf > conf_threshold: annotator.rectangle(box.cpu().numpy(), outline=color) annotator.box_label(box.cpu().numpy(), label=label, color=color) annotated_image = annotator.result() - self.task.get_logger().report_image(title='Bounding Boxes', - series=image_path.name, - iteration=self.current_epoch, - image=annotated_image) + self.task.get_logger().report_image( + title="Bounding Boxes", series=image_path.name, iteration=self.current_epoch, image=annotated_image + ) self.current_epoch_logged_images.add(image_path) diff --git a/utils/loggers/clearml/hpo.py b/utils/loggers/clearml/hpo.py index ee518b0fbfc8..4e314ea868df 100644 --- a/utils/loggers/clearml/hpo.py +++ b/utils/loggers/clearml/hpo.py @@ -1,18 +1,21 @@ from clearml import Task + # Connecting ClearML with the current process, # from here on everything is logged automatically from clearml.automation import HyperParameterOptimizer, UniformParameterRange from clearml.automation.optuna import OptimizerOptuna -task = Task.init(project_name='Hyper-Parameter Optimization', - task_name='YOLOv5', - task_type=Task.TaskTypes.optimizer, - reuse_last_task_id=False) +task = Task.init( + project_name="Hyper-Parameter Optimization", + task_name="YOLOv5", + task_type=Task.TaskTypes.optimizer, + reuse_last_task_id=False, +) # Example use case: optimizer = HyperParameterOptimizer( # This is the experiment we want to optimize - base_task_id='', + base_task_id="", # here we define the hyper-parameters to optimize # Notice: The parameter name should exactly match what you see in the UI: / # For Example, here we see in the base experiment a section Named: "General" @@ -20,39 +23,40 @@ # If you have `argparse` for example, then arguments will appear under the "Args" section, # and you should instead pass "Args/batch_size" hyper_parameters=[ - UniformParameterRange('Hyperparameters/lr0', min_value=1e-5, max_value=1e-1), - UniformParameterRange('Hyperparameters/lrf', min_value=0.01, max_value=1.0), - UniformParameterRange('Hyperparameters/momentum', min_value=0.6, max_value=0.98), - UniformParameterRange('Hyperparameters/weight_decay', min_value=0.0, max_value=0.001), - UniformParameterRange('Hyperparameters/warmup_epochs', min_value=0.0, max_value=5.0), - UniformParameterRange('Hyperparameters/warmup_momentum', min_value=0.0, max_value=0.95), - UniformParameterRange('Hyperparameters/warmup_bias_lr', min_value=0.0, max_value=0.2), - UniformParameterRange('Hyperparameters/box', min_value=0.02, max_value=0.2), - UniformParameterRange('Hyperparameters/cls', min_value=0.2, max_value=4.0), - UniformParameterRange('Hyperparameters/cls_pw', min_value=0.5, max_value=2.0), - UniformParameterRange('Hyperparameters/obj', min_value=0.2, max_value=4.0), - UniformParameterRange('Hyperparameters/obj_pw', min_value=0.5, max_value=2.0), - UniformParameterRange('Hyperparameters/iou_t', min_value=0.1, max_value=0.7), - UniformParameterRange('Hyperparameters/anchor_t', min_value=2.0, max_value=8.0), - UniformParameterRange('Hyperparameters/fl_gamma', min_value=0.0, max_value=4.0), - UniformParameterRange('Hyperparameters/hsv_h', min_value=0.0, max_value=0.1), - UniformParameterRange('Hyperparameters/hsv_s', min_value=0.0, max_value=0.9), - UniformParameterRange('Hyperparameters/hsv_v', min_value=0.0, max_value=0.9), - UniformParameterRange('Hyperparameters/degrees', min_value=0.0, max_value=45.0), - UniformParameterRange('Hyperparameters/translate', min_value=0.0, max_value=0.9), - UniformParameterRange('Hyperparameters/scale', min_value=0.0, max_value=0.9), - UniformParameterRange('Hyperparameters/shear', min_value=0.0, max_value=10.0), - UniformParameterRange('Hyperparameters/perspective', min_value=0.0, max_value=0.001), - UniformParameterRange('Hyperparameters/flipud', min_value=0.0, max_value=1.0), - UniformParameterRange('Hyperparameters/fliplr', min_value=0.0, max_value=1.0), - UniformParameterRange('Hyperparameters/mosaic', min_value=0.0, max_value=1.0), - UniformParameterRange('Hyperparameters/mixup', min_value=0.0, max_value=1.0), - UniformParameterRange('Hyperparameters/copy_paste', min_value=0.0, max_value=1.0)], + UniformParameterRange("Hyperparameters/lr0", min_value=1e-5, max_value=1e-1), + UniformParameterRange("Hyperparameters/lrf", min_value=0.01, max_value=1.0), + UniformParameterRange("Hyperparameters/momentum", min_value=0.6, max_value=0.98), + UniformParameterRange("Hyperparameters/weight_decay", min_value=0.0, max_value=0.001), + UniformParameterRange("Hyperparameters/warmup_epochs", min_value=0.0, max_value=5.0), + UniformParameterRange("Hyperparameters/warmup_momentum", min_value=0.0, max_value=0.95), + UniformParameterRange("Hyperparameters/warmup_bias_lr", min_value=0.0, max_value=0.2), + UniformParameterRange("Hyperparameters/box", min_value=0.02, max_value=0.2), + UniformParameterRange("Hyperparameters/cls", min_value=0.2, max_value=4.0), + UniformParameterRange("Hyperparameters/cls_pw", min_value=0.5, max_value=2.0), + UniformParameterRange("Hyperparameters/obj", min_value=0.2, max_value=4.0), + UniformParameterRange("Hyperparameters/obj_pw", min_value=0.5, max_value=2.0), + UniformParameterRange("Hyperparameters/iou_t", min_value=0.1, max_value=0.7), + UniformParameterRange("Hyperparameters/anchor_t", min_value=2.0, max_value=8.0), + UniformParameterRange("Hyperparameters/fl_gamma", min_value=0.0, max_value=4.0), + UniformParameterRange("Hyperparameters/hsv_h", min_value=0.0, max_value=0.1), + UniformParameterRange("Hyperparameters/hsv_s", min_value=0.0, max_value=0.9), + UniformParameterRange("Hyperparameters/hsv_v", min_value=0.0, max_value=0.9), + UniformParameterRange("Hyperparameters/degrees", min_value=0.0, max_value=45.0), + UniformParameterRange("Hyperparameters/translate", min_value=0.0, max_value=0.9), + UniformParameterRange("Hyperparameters/scale", min_value=0.0, max_value=0.9), + UniformParameterRange("Hyperparameters/shear", min_value=0.0, max_value=10.0), + UniformParameterRange("Hyperparameters/perspective", min_value=0.0, max_value=0.001), + UniformParameterRange("Hyperparameters/flipud", min_value=0.0, max_value=1.0), + UniformParameterRange("Hyperparameters/fliplr", min_value=0.0, max_value=1.0), + UniformParameterRange("Hyperparameters/mosaic", min_value=0.0, max_value=1.0), + UniformParameterRange("Hyperparameters/mixup", min_value=0.0, max_value=1.0), + UniformParameterRange("Hyperparameters/copy_paste", min_value=0.0, max_value=1.0), + ], # this is the objective metric we want to maximize/minimize - objective_metric_title='metrics', - objective_metric_series='mAP_0.5', + objective_metric_title="metrics", + objective_metric_series="mAP_0.5", # now we decide if we want to maximize it or minimize it (accuracy we maximize) - objective_metric_sign='max', + objective_metric_sign="max", # let us limit the number of concurrent experiments, # this in turn will make sure we do dont bombard the scheduler with experiments. # if we have an auto-scaler connected, this, by proxy, will limit the number of machine @@ -81,4 +85,4 @@ # make sure background optimization stopped optimizer.stop() -print('We are done, good bye') +print("We are done, good bye") diff --git a/utils/loggers/comet/__init__.py b/utils/loggers/comet/__init__.py index c14a5f885696..bdf81f63982e 100644 --- a/utils/loggers/comet/__init__.py +++ b/utils/loggers/comet/__init__.py @@ -17,7 +17,7 @@ # Project Configuration config = comet_ml.config.get_config() - COMET_PROJECT_NAME = config.get_string(os.getenv('COMET_PROJECT_NAME'), 'comet.project_name', default='yolov5') + COMET_PROJECT_NAME = config.get_string(os.getenv("COMET_PROJECT_NAME"), "comet.project_name", default="yolov5") except ImportError: comet_ml = None COMET_PROJECT_NAME = None @@ -31,42 +31,40 @@ from utils.general import check_dataset, scale_boxes, xywh2xyxy from utils.metrics import box_iou -COMET_PREFIX = 'comet://' +COMET_PREFIX = "comet://" -COMET_MODE = os.getenv('COMET_MODE', 'online') +COMET_MODE = os.getenv("COMET_MODE", "online") # Model Saving Settings -COMET_MODEL_NAME = os.getenv('COMET_MODEL_NAME', 'yolov5') +COMET_MODEL_NAME = os.getenv("COMET_MODEL_NAME", "yolov5") # Dataset Artifact Settings -COMET_UPLOAD_DATASET = os.getenv('COMET_UPLOAD_DATASET', 'false').lower() == 'true' +COMET_UPLOAD_DATASET = os.getenv("COMET_UPLOAD_DATASET", "false").lower() == "true" # Evaluation Settings -COMET_LOG_CONFUSION_MATRIX = (os.getenv('COMET_LOG_CONFUSION_MATRIX', 'true').lower() == 'true') -COMET_LOG_PREDICTIONS = os.getenv('COMET_LOG_PREDICTIONS', 'true').lower() == 'true' -COMET_MAX_IMAGE_UPLOADS = int(os.getenv('COMET_MAX_IMAGE_UPLOADS', 100)) +COMET_LOG_CONFUSION_MATRIX = os.getenv("COMET_LOG_CONFUSION_MATRIX", "true").lower() == "true" +COMET_LOG_PREDICTIONS = os.getenv("COMET_LOG_PREDICTIONS", "true").lower() == "true" +COMET_MAX_IMAGE_UPLOADS = int(os.getenv("COMET_MAX_IMAGE_UPLOADS", 100)) # Confusion Matrix Settings -CONF_THRES = float(os.getenv('CONF_THRES', 0.001)) -IOU_THRES = float(os.getenv('IOU_THRES', 0.6)) +CONF_THRES = float(os.getenv("CONF_THRES", 0.001)) +IOU_THRES = float(os.getenv("IOU_THRES", 0.6)) # Batch Logging Settings -COMET_LOG_BATCH_METRICS = (os.getenv('COMET_LOG_BATCH_METRICS', 'false').lower() == 'true') -COMET_BATCH_LOGGING_INTERVAL = os.getenv('COMET_BATCH_LOGGING_INTERVAL', 1) -COMET_PREDICTION_LOGGING_INTERVAL = os.getenv('COMET_PREDICTION_LOGGING_INTERVAL', 1) -COMET_LOG_PER_CLASS_METRICS = (os.getenv('COMET_LOG_PER_CLASS_METRICS', 'false').lower() == 'true') +COMET_LOG_BATCH_METRICS = os.getenv("COMET_LOG_BATCH_METRICS", "false").lower() == "true" +COMET_BATCH_LOGGING_INTERVAL = os.getenv("COMET_BATCH_LOGGING_INTERVAL", 1) +COMET_PREDICTION_LOGGING_INTERVAL = os.getenv("COMET_PREDICTION_LOGGING_INTERVAL", 1) +COMET_LOG_PER_CLASS_METRICS = os.getenv("COMET_LOG_PER_CLASS_METRICS", "false").lower() == "true" -RANK = int(os.getenv('RANK', -1)) +RANK = int(os.getenv("RANK", -1)) to_pil = T.ToPILImage() class CometLogger: - """Log metrics, parameters, source code, models and much more - with Comet - """ + """Log metrics, parameters, source code, models and much more with Comet.""" - def __init__(self, opt, hyp, run_id=None, job_type='Training', **experiment_kwargs) -> None: + def __init__(self, opt, hyp, run_id=None, job_type="Training", **experiment_kwargs) -> None: self.job_type = job_type self.opt = opt self.hyp = hyp @@ -87,57 +85,58 @@ def __init__(self, opt, hyp, run_id=None, job_type='Training', **experiment_kwar # Default parameters to pass to Experiment objects self.default_experiment_kwargs = { - 'log_code': False, - 'log_env_gpu': True, - 'log_env_cpu': True, - 'project_name': COMET_PROJECT_NAME, } + "log_code": False, + "log_env_gpu": True, + "log_env_cpu": True, + "project_name": COMET_PROJECT_NAME, + } self.default_experiment_kwargs.update(experiment_kwargs) self.experiment = self._get_experiment(self.comet_mode, run_id) self.experiment.set_name(self.opt.name) self.data_dict = self.check_dataset(self.opt.data) - self.class_names = self.data_dict['names'] - self.num_classes = self.data_dict['nc'] + self.class_names = self.data_dict["names"] + self.num_classes = self.data_dict["nc"] self.logged_images_count = 0 self.max_images = COMET_MAX_IMAGE_UPLOADS if run_id is None: - self.experiment.log_other('Created from', 'YOLOv5') + self.experiment.log_other("Created from", "YOLOv5") if not isinstance(self.experiment, comet_ml.OfflineExperiment): - workspace, project_name, experiment_id = self.experiment.url.split('/')[-3:] + workspace, project_name, experiment_id = self.experiment.url.split("/")[-3:] self.experiment.log_other( - 'Run Path', - f'{workspace}/{project_name}/{experiment_id}', + "Run Path", + f"{workspace}/{project_name}/{experiment_id}", ) self.log_parameters(vars(opt)) self.log_parameters(self.opt.hyp) self.log_asset_data( self.opt.hyp, - name='hyperparameters.json', - metadata={'type': 'hyp-config-file'}, + name="hyperparameters.json", + metadata={"type": "hyp-config-file"}, ) self.log_asset( - f'{self.opt.save_dir}/opt.yaml', - metadata={'type': 'opt-config-file'}, + f"{self.opt.save_dir}/opt.yaml", + metadata={"type": "opt-config-file"}, ) self.comet_log_confusion_matrix = COMET_LOG_CONFUSION_MATRIX - if hasattr(self.opt, 'conf_thres'): + if hasattr(self.opt, "conf_thres"): self.conf_thres = self.opt.conf_thres else: self.conf_thres = CONF_THRES - if hasattr(self.opt, 'iou_thres'): + if hasattr(self.opt, "iou_thres"): self.iou_thres = self.opt.iou_thres else: self.iou_thres = IOU_THRES - self.log_parameters({'val_iou_threshold': self.iou_thres, 'val_conf_threshold': self.conf_thres}) + self.log_parameters({"val_iou_threshold": self.iou_thres, "val_conf_threshold": self.conf_thres}) self.comet_log_predictions = COMET_LOG_PREDICTIONS if self.opt.bbox_interval == -1: - self.comet_log_prediction_interval = (1 if self.opt.epochs < 10 else self.opt.epochs // 10) + self.comet_log_prediction_interval = 1 if self.opt.epochs < 10 else self.opt.epochs // 10 else: self.comet_log_prediction_interval = self.opt.bbox_interval @@ -147,30 +146,35 @@ def __init__(self, opt, hyp, run_id=None, job_type='Training', **experiment_kwar self.comet_log_per_class_metrics = COMET_LOG_PER_CLASS_METRICS - self.experiment.log_others({ - 'comet_mode': COMET_MODE, - 'comet_max_image_uploads': COMET_MAX_IMAGE_UPLOADS, - 'comet_log_per_class_metrics': COMET_LOG_PER_CLASS_METRICS, - 'comet_log_batch_metrics': COMET_LOG_BATCH_METRICS, - 'comet_log_confusion_matrix': COMET_LOG_CONFUSION_MATRIX, - 'comet_model_name': COMET_MODEL_NAME, }) + self.experiment.log_others( + { + "comet_mode": COMET_MODE, + "comet_max_image_uploads": COMET_MAX_IMAGE_UPLOADS, + "comet_log_per_class_metrics": COMET_LOG_PER_CLASS_METRICS, + "comet_log_batch_metrics": COMET_LOG_BATCH_METRICS, + "comet_log_confusion_matrix": COMET_LOG_CONFUSION_MATRIX, + "comet_model_name": COMET_MODEL_NAME, + } + ) # Check if running the Experiment with the Comet Optimizer - if hasattr(self.opt, 'comet_optimizer_id'): - self.experiment.log_other('optimizer_id', self.opt.comet_optimizer_id) - self.experiment.log_other('optimizer_objective', self.opt.comet_optimizer_objective) - self.experiment.log_other('optimizer_metric', self.opt.comet_optimizer_metric) - self.experiment.log_other('optimizer_parameters', json.dumps(self.hyp)) + if hasattr(self.opt, "comet_optimizer_id"): + self.experiment.log_other("optimizer_id", self.opt.comet_optimizer_id) + self.experiment.log_other("optimizer_objective", self.opt.comet_optimizer_objective) + self.experiment.log_other("optimizer_metric", self.opt.comet_optimizer_metric) + self.experiment.log_other("optimizer_parameters", json.dumps(self.hyp)) def _get_experiment(self, mode, experiment_id=None): - if mode == 'offline': + if mode == "offline": if experiment_id is not None: return comet_ml.ExistingOfflineExperiment( previous_experiment=experiment_id, **self.default_experiment_kwargs, ) - return comet_ml.OfflineExperiment(**self.default_experiment_kwargs, ) + return comet_ml.OfflineExperiment( + **self.default_experiment_kwargs, + ) else: try: @@ -183,11 +187,13 @@ def _get_experiment(self, mode, experiment_id=None): return comet_ml.Experiment(**self.default_experiment_kwargs) except ValueError: - logger.warning('COMET WARNING: ' - 'Comet credentials have not been set. ' - 'Comet will default to offline logging. ' - 'Please set your credentials to enable online logging.') - return self._get_experiment('offline', experiment_id) + logger.warning( + "COMET WARNING: " + "Comet credentials have not been set. " + "Comet will default to offline logging. " + "Please set your credentials to enable online logging." + ) + return self._get_experiment("offline", experiment_id) return @@ -211,12 +217,13 @@ def log_model(self, path, opt, epoch, fitness_score, best_model=False): return model_metadata = { - 'fitness_score': fitness_score[-1], - 'epochs_trained': epoch + 1, - 'save_period': opt.save_period, - 'total_epochs': opt.epochs, } + "fitness_score": fitness_score[-1], + "epochs_trained": epoch + 1, + "save_period": opt.save_period, + "total_epochs": opt.epochs, + } - model_files = glob.glob(f'{path}/*.pt') + model_files = glob.glob(f"{path}/*.pt") for model_path in model_files: name = Path(model_path).name @@ -232,14 +239,14 @@ def check_dataset(self, data_file): with open(data_file) as f: data_config = yaml.safe_load(f) - path = data_config.get('path') + path = data_config.get("path") if path and path.startswith(COMET_PREFIX): - path = data_config['path'].replace(COMET_PREFIX, '') + path = data_config["path"].replace(COMET_PREFIX, "") data_dict = self.download_dataset_artifact(path) return data_dict - self.log_asset(self.opt.data, metadata={'type': 'data-config-file'}) + self.log_asset(self.opt.data, metadata={"type": "data-config-file"}) return check_dataset(data_file) @@ -255,8 +262,8 @@ def log_predictions(self, image, labelsn, path, shape, predn): filtered_detections = detections[mask] filtered_labels = labelsn[mask] - image_id = path.split('/')[-1].split('.')[0] - image_name = f'{image_id}_curr_epoch_{self.experiment.curr_epoch}' + image_id = path.split("/")[-1].split(".")[0] + image_name = f"{image_id}_curr_epoch_{self.experiment.curr_epoch}" if image_name not in self.logged_image_names: native_scale_image = PIL.Image.open(path) self.log_image(native_scale_image, name=image_name) @@ -264,23 +271,21 @@ def log_predictions(self, image, labelsn, path, shape, predn): metadata = [] for cls, *xyxy in filtered_labels.tolist(): - metadata.append({ - 'label': f'{self.class_names[int(cls)]}-gt', - 'score': 100, - 'box': { - 'x': xyxy[0], - 'y': xyxy[1], - 'x2': xyxy[2], - 'y2': xyxy[3]}, }) + metadata.append( + { + "label": f"{self.class_names[int(cls)]}-gt", + "score": 100, + "box": {"x": xyxy[0], "y": xyxy[1], "x2": xyxy[2], "y2": xyxy[3]}, + } + ) for *xyxy, conf, cls in filtered_detections.tolist(): - metadata.append({ - 'label': f'{self.class_names[int(cls)]}', - 'score': conf * 100, - 'box': { - 'x': xyxy[0], - 'y': xyxy[1], - 'x2': xyxy[2], - 'y2': xyxy[3]}, }) + metadata.append( + { + "label": f"{self.class_names[int(cls)]}", + "score": conf * 100, + "box": {"x": xyxy[0], "y": xyxy[1], "x2": xyxy[2], "y2": xyxy[3]}, + } + ) self.metadata_dict[image_name] = metadata self.logged_images_count += 1 @@ -307,7 +312,7 @@ def preprocess_prediction(self, image, labels, shape, pred): return predn, labelsn def add_assets_to_artifact(self, artifact, path, asset_path, split): - img_paths = sorted(glob.glob(f'{asset_path}/*')) + img_paths = sorted(glob.glob(f"{asset_path}/*")) label_paths = img2label_paths(img_paths) for image_file, label_file in zip(img_paths, label_paths): @@ -317,33 +322,33 @@ def add_assets_to_artifact(self, artifact, path, asset_path, split): artifact.add( image_file, logical_path=image_logical_path, - metadata={'split': split}, + metadata={"split": split}, ) artifact.add( label_file, logical_path=label_logical_path, - metadata={'split': split}, + metadata={"split": split}, ) except ValueError as e: - logger.error('COMET ERROR: Error adding file to Artifact. Skipping file.') - logger.error(f'COMET ERROR: {e}') + logger.error("COMET ERROR: Error adding file to Artifact. Skipping file.") + logger.error(f"COMET ERROR: {e}") continue return artifact def upload_dataset_artifact(self): - dataset_name = self.data_dict.get('dataset_name', 'yolov5-dataset') - path = str((ROOT / Path(self.data_dict['path'])).resolve()) + dataset_name = self.data_dict.get("dataset_name", "yolov5-dataset") + path = str((ROOT / Path(self.data_dict["path"])).resolve()) metadata = self.data_dict.copy() - for key in ['train', 'val', 'test']: + for key in ["train", "val", "test"]: split_path = metadata.get(key) if split_path is not None: - metadata[key] = split_path.replace(path, '') + metadata[key] = split_path.replace(path, "") - artifact = comet_ml.Artifact(name=dataset_name, artifact_type='dataset', metadata=metadata) + artifact = comet_ml.Artifact(name=dataset_name, artifact_type="dataset", metadata=metadata) for key in metadata.keys(): - if key in ['train', 'val', 'test']: + if key in ["train", "val", "test"]: if isinstance(self.upload_dataset, str) and (key != self.upload_dataset): continue @@ -362,26 +367,27 @@ def download_dataset_artifact(self, artifact_path): metadata = logged_artifact.metadata data_dict = metadata.copy() - data_dict['path'] = artifact_save_dir + data_dict["path"] = artifact_save_dir - metadata_names = metadata.get('names') + metadata_names = metadata.get("names") if isinstance(metadata_names, dict): - data_dict['names'] = {int(k): v for k, v in metadata.get('names').items()} + data_dict["names"] = {int(k): v for k, v in metadata.get("names").items()} elif isinstance(metadata_names, list): - data_dict['names'] = {int(k): v for k, v in zip(range(len(metadata_names)), metadata_names)} + data_dict["names"] = {int(k): v for k, v in zip(range(len(metadata_names)), metadata_names)} else: raise "Invalid 'names' field in dataset yaml file. Please use a list or dictionary" return self.update_data_paths(data_dict) def update_data_paths(self, data_dict): - path = data_dict.get('path', '') + path = data_dict.get("path", "") - for split in ['train', 'val', 'test']: + for split in ["train", "val", "test"]: if data_dict.get(split): split_path = data_dict.get(split) - data_dict[split] = (f'{path}/{split_path}' if isinstance(split, str) else [ - f'{path}/{x}' for x in split_path]) + data_dict[split] = ( + f"{path}/{split_path}" if isinstance(split, str) else [f"{path}/{x}" for x in split_path] + ) return data_dict @@ -422,11 +428,11 @@ def on_train_batch_end(self, log_dict, step): def on_train_end(self, files, save_dir, last, best, epoch, results): if self.comet_log_predictions: curr_epoch = self.experiment.curr_epoch - self.experiment.log_asset_data(self.metadata_dict, 'image-metadata.json', epoch=curr_epoch) + self.experiment.log_asset_data(self.metadata_dict, "image-metadata.json", epoch=curr_epoch) for f in files: - self.log_asset(f, metadata={'epoch': epoch}) - self.log_asset(f'{save_dir}/results.csv', metadata={'epoch': epoch}) + self.log_asset(f, metadata={"epoch": epoch}) + self.log_asset(f"{save_dir}/results.csv", metadata={"epoch": epoch}) if not self.opt.evolve: model_path = str(best if best.exists() else last) @@ -440,9 +446,9 @@ def on_train_end(self, files, save_dir, last, best, epoch, results): ) # Check if running Experiment with Comet Optimizer - if hasattr(self.opt, 'comet_optimizer_id'): + if hasattr(self.opt, "comet_optimizer_id"): metric = results.get(self.opt.comet_optimizer_metric) - self.experiment.log_other('optimizer_metric_value', metric) + self.experiment.log_other("optimizer_metric_value", metric) self.finish_run() @@ -477,21 +483,22 @@ def on_val_end(self, nt, tp, fp, p, r, f1, ap, ap50, ap_class, confusion_matrix) class_name = self.class_names[c] self.experiment.log_metrics( { - 'mAP@.5': ap50[i], - 'mAP@.5:.95': ap[i], - 'precision': p[i], - 'recall': r[i], - 'f1': f1[i], - 'true_positives': tp[i], - 'false_positives': fp[i], - 'support': nt[c], }, + "mAP@.5": ap50[i], + "mAP@.5:.95": ap[i], + "precision": p[i], + "recall": r[i], + "f1": f1[i], + "true_positives": tp[i], + "false_positives": fp[i], + "support": nt[c], + }, prefix=class_name, ) if self.comet_log_confusion_matrix: epoch = self.experiment.curr_epoch class_names = list(self.class_names.values()) - class_names.append('background') + class_names.append("background") num_classes = len(class_names) self.experiment.log_confusion_matrix( @@ -499,9 +506,9 @@ def on_val_end(self, nt, tp, fp, p, r, f1, ap, ap50, ap_class, confusion_matrix) max_categories=num_classes, labels=class_names, epoch=epoch, - column_label='Actual Category', - row_label='Predicted Category', - file_name=f'confusion-matrix-epoch-{epoch}.json', + column_label="Actual Category", + row_label="Predicted Category", + file_name=f"confusion-matrix-epoch-{epoch}.json", ) def on_fit_epoch_end(self, result, epoch): diff --git a/utils/loggers/comet/comet_utils.py b/utils/loggers/comet/comet_utils.py index 27600761ad28..f7b56dddd5f7 100644 --- a/utils/loggers/comet/comet_utils.py +++ b/utils/loggers/comet/comet_utils.py @@ -11,28 +11,28 @@ logger = logging.getLogger(__name__) -COMET_PREFIX = 'comet://' -COMET_MODEL_NAME = os.getenv('COMET_MODEL_NAME', 'yolov5') -COMET_DEFAULT_CHECKPOINT_FILENAME = os.getenv('COMET_DEFAULT_CHECKPOINT_FILENAME', 'last.pt') +COMET_PREFIX = "comet://" +COMET_MODEL_NAME = os.getenv("COMET_MODEL_NAME", "yolov5") +COMET_DEFAULT_CHECKPOINT_FILENAME = os.getenv("COMET_DEFAULT_CHECKPOINT_FILENAME", "last.pt") def download_model_checkpoint(opt, experiment): - model_dir = f'{opt.project}/{experiment.name}' + model_dir = f"{opt.project}/{experiment.name}" os.makedirs(model_dir, exist_ok=True) model_name = COMET_MODEL_NAME model_asset_list = experiment.get_model_asset_list(model_name) if len(model_asset_list) == 0: - logger.error(f'COMET ERROR: No checkpoints found for model name : {model_name}') + logger.error(f"COMET ERROR: No checkpoints found for model name : {model_name}") return model_asset_list = sorted( model_asset_list, - key=lambda x: x['step'], + key=lambda x: x["step"], reverse=True, ) - logged_checkpoint_map = {asset['fileName']: asset['assetId'] for asset in model_asset_list} + logged_checkpoint_map = {asset["fileName"]: asset["assetId"] for asset in model_asset_list} resource_url = urlparse(opt.weights) checkpoint_filename = resource_url.query @@ -44,28 +44,28 @@ def download_model_checkpoint(opt, experiment): checkpoint_filename = COMET_DEFAULT_CHECKPOINT_FILENAME if asset_id is None: - logger.error(f'COMET ERROR: Checkpoint {checkpoint_filename} not found in the given Experiment') + logger.error(f"COMET ERROR: Checkpoint {checkpoint_filename} not found in the given Experiment") return try: - logger.info(f'COMET INFO: Downloading checkpoint {checkpoint_filename}') + logger.info(f"COMET INFO: Downloading checkpoint {checkpoint_filename}") asset_filename = checkpoint_filename - model_binary = experiment.get_asset(asset_id, return_type='binary', stream=False) - model_download_path = f'{model_dir}/{asset_filename}' - with open(model_download_path, 'wb') as f: + model_binary = experiment.get_asset(asset_id, return_type="binary", stream=False) + model_download_path = f"{model_dir}/{asset_filename}" + with open(model_download_path, "wb") as f: f.write(model_binary) opt.weights = model_download_path except Exception as e: - logger.warning('COMET WARNING: Unable to download checkpoint from Comet') + logger.warning("COMET WARNING: Unable to download checkpoint from Comet") logger.exception(e) def set_opt_parameters(opt, experiment): - """Update the opts Namespace with parameters - from Comet's ExistingExperiment when resuming a run + """ + Update the opts Namespace with parameters from Comet's ExistingExperiment when resuming a run. Args: opt (argparse.Namespace): Namespace of command line options @@ -75,9 +75,9 @@ def set_opt_parameters(opt, experiment): resume_string = opt.resume for asset in asset_list: - if asset['fileName'] == 'opt.yaml': - asset_id = asset['assetId'] - asset_binary = experiment.get_asset(asset_id, return_type='binary', stream=False) + if asset["fileName"] == "opt.yaml": + asset_id = asset["assetId"] + asset_binary = experiment.get_asset(asset_id, return_type="binary", stream=False) opt_dict = yaml.safe_load(asset_binary) for key, value in opt_dict.items(): setattr(opt, key, value) @@ -85,18 +85,18 @@ def set_opt_parameters(opt, experiment): # Save hyperparameters to YAML file # Necessary to pass checks in training script - save_dir = f'{opt.project}/{experiment.name}' + save_dir = f"{opt.project}/{experiment.name}" os.makedirs(save_dir, exist_ok=True) - hyp_yaml_path = f'{save_dir}/hyp.yaml' - with open(hyp_yaml_path, 'w') as f: + hyp_yaml_path = f"{save_dir}/hyp.yaml" + with open(hyp_yaml_path, "w") as f: yaml.dump(opt.hyp, f) opt.hyp = hyp_yaml_path def check_comet_weights(opt): - """Downloads model weights from Comet and updates the - weights path to point to saved weights location + """ + Downloads model weights from Comet and updates the weights path to point to saved weights location. Args: opt (argparse.Namespace): Command Line arguments passed @@ -113,7 +113,7 @@ def check_comet_weights(opt): if opt.weights.startswith(COMET_PREFIX): api = comet_ml.API() resource = urlparse(opt.weights) - experiment_path = f'{resource.netloc}{resource.path}' + experiment_path = f"{resource.netloc}{resource.path}" experiment = api.get(experiment_path) download_model_checkpoint(opt, experiment) return True @@ -122,8 +122,8 @@ def check_comet_weights(opt): def check_comet_resume(opt): - """Restores run parameters to its original state based on the model checkpoint - and logged Experiment parameters. + """ + Restores run parameters to its original state based on the model checkpoint and logged Experiment parameters. Args: opt (argparse.Namespace): Command Line arguments passed @@ -140,7 +140,7 @@ def check_comet_resume(opt): if opt.resume.startswith(COMET_PREFIX): api = comet_ml.API() resource = urlparse(opt.resume) - experiment_path = f'{resource.netloc}{resource.path}' + experiment_path = f"{resource.netloc}{resource.path}" experiment = api.get(experiment_path) set_opt_parameters(opt, experiment) download_model_checkpoint(opt, experiment) diff --git a/utils/loggers/comet/hpo.py b/utils/loggers/comet/hpo.py index fc49115c1358..a9e6fabec1cd 100644 --- a/utils/loggers/comet/hpo.py +++ b/utils/loggers/comet/hpo.py @@ -21,77 +21,79 @@ # Project Configuration config = comet_ml.config.get_config() -COMET_PROJECT_NAME = config.get_string(os.getenv('COMET_PROJECT_NAME'), 'comet.project_name', default='yolov5') +COMET_PROJECT_NAME = config.get_string(os.getenv("COMET_PROJECT_NAME"), "comet.project_name", default="yolov5") def get_args(known=False): parser = argparse.ArgumentParser() - parser.add_argument('--weights', type=str, default=ROOT / 'yolov5s.pt', help='initial weights path') - parser.add_argument('--cfg', type=str, default='', help='model.yaml path') - parser.add_argument('--data', type=str, default=ROOT / 'data/coco128.yaml', help='dataset.yaml path') - parser.add_argument('--hyp', type=str, default=ROOT / 'data/hyps/hyp.scratch-low.yaml', help='hyperparameters path') - parser.add_argument('--epochs', type=int, default=300, help='total training epochs') - parser.add_argument('--batch-size', type=int, default=16, help='total batch size for all GPUs, -1 for autobatch') - parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=640, help='train, val image size (pixels)') - parser.add_argument('--rect', action='store_true', help='rectangular training') - parser.add_argument('--resume', nargs='?', const=True, default=False, help='resume most recent training') - parser.add_argument('--nosave', action='store_true', help='only save final checkpoint') - parser.add_argument('--noval', action='store_true', help='only validate final epoch') - parser.add_argument('--noautoanchor', action='store_true', help='disable AutoAnchor') - parser.add_argument('--noplots', action='store_true', help='save no plot files') - parser.add_argument('--evolve', type=int, nargs='?', const=300, help='evolve hyperparameters for x generations') - parser.add_argument('--bucket', type=str, default='', help='gsutil bucket') - parser.add_argument('--cache', type=str, nargs='?', const='ram', help='--cache images in "ram" (default) or "disk"') - parser.add_argument('--image-weights', action='store_true', help='use weighted image selection for training') - parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') - parser.add_argument('--multi-scale', action='store_true', help='vary img-size +/- 50%%') - parser.add_argument('--single-cls', action='store_true', help='train multi-class data as single-class') - parser.add_argument('--optimizer', type=str, choices=['SGD', 'Adam', 'AdamW'], default='SGD', help='optimizer') - parser.add_argument('--sync-bn', action='store_true', help='use SyncBatchNorm, only available in DDP mode') - parser.add_argument('--workers', type=int, default=8, help='max dataloader workers (per RANK in DDP mode)') - parser.add_argument('--project', default=ROOT / 'runs/train', help='save to project/name') - parser.add_argument('--name', default='exp', help='save to project/name') - parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment') - parser.add_argument('--quad', action='store_true', help='quad dataloader') - parser.add_argument('--cos-lr', action='store_true', help='cosine LR scheduler') - parser.add_argument('--label-smoothing', type=float, default=0.0, help='Label smoothing epsilon') - parser.add_argument('--patience', type=int, default=100, help='EarlyStopping patience (epochs without improvement)') - parser.add_argument('--freeze', nargs='+', type=int, default=[0], help='Freeze layers: backbone=10, first3=0 1 2') - parser.add_argument('--save-period', type=int, default=-1, help='Save checkpoint every x epochs (disabled if < 1)') - parser.add_argument('--seed', type=int, default=0, help='Global training seed') - parser.add_argument('--local_rank', type=int, default=-1, help='Automatic DDP Multi-GPU argument, do not modify') + parser.add_argument("--weights", type=str, default=ROOT / "yolov5s.pt", help="initial weights path") + parser.add_argument("--cfg", type=str, default="", help="model.yaml path") + parser.add_argument("--data", type=str, default=ROOT / "data/coco128.yaml", help="dataset.yaml path") + parser.add_argument("--hyp", type=str, default=ROOT / "data/hyps/hyp.scratch-low.yaml", help="hyperparameters path") + parser.add_argument("--epochs", type=int, default=300, help="total training epochs") + parser.add_argument("--batch-size", type=int, default=16, help="total batch size for all GPUs, -1 for autobatch") + parser.add_argument("--imgsz", "--img", "--img-size", type=int, default=640, help="train, val image size (pixels)") + parser.add_argument("--rect", action="store_true", help="rectangular training") + parser.add_argument("--resume", nargs="?", const=True, default=False, help="resume most recent training") + parser.add_argument("--nosave", action="store_true", help="only save final checkpoint") + parser.add_argument("--noval", action="store_true", help="only validate final epoch") + parser.add_argument("--noautoanchor", action="store_true", help="disable AutoAnchor") + parser.add_argument("--noplots", action="store_true", help="save no plot files") + parser.add_argument("--evolve", type=int, nargs="?", const=300, help="evolve hyperparameters for x generations") + parser.add_argument("--bucket", type=str, default="", help="gsutil bucket") + parser.add_argument("--cache", type=str, nargs="?", const="ram", help='--cache images in "ram" (default) or "disk"') + parser.add_argument("--image-weights", action="store_true", help="use weighted image selection for training") + parser.add_argument("--device", default="", help="cuda device, i.e. 0 or 0,1,2,3 or cpu") + parser.add_argument("--multi-scale", action="store_true", help="vary img-size +/- 50%%") + parser.add_argument("--single-cls", action="store_true", help="train multi-class data as single-class") + parser.add_argument("--optimizer", type=str, choices=["SGD", "Adam", "AdamW"], default="SGD", help="optimizer") + parser.add_argument("--sync-bn", action="store_true", help="use SyncBatchNorm, only available in DDP mode") + parser.add_argument("--workers", type=int, default=8, help="max dataloader workers (per RANK in DDP mode)") + parser.add_argument("--project", default=ROOT / "runs/train", help="save to project/name") + parser.add_argument("--name", default="exp", help="save to project/name") + parser.add_argument("--exist-ok", action="store_true", help="existing project/name ok, do not increment") + parser.add_argument("--quad", action="store_true", help="quad dataloader") + parser.add_argument("--cos-lr", action="store_true", help="cosine LR scheduler") + parser.add_argument("--label-smoothing", type=float, default=0.0, help="Label smoothing epsilon") + parser.add_argument("--patience", type=int, default=100, help="EarlyStopping patience (epochs without improvement)") + parser.add_argument("--freeze", nargs="+", type=int, default=[0], help="Freeze layers: backbone=10, first3=0 1 2") + parser.add_argument("--save-period", type=int, default=-1, help="Save checkpoint every x epochs (disabled if < 1)") + parser.add_argument("--seed", type=int, default=0, help="Global training seed") + parser.add_argument("--local_rank", type=int, default=-1, help="Automatic DDP Multi-GPU argument, do not modify") # Weights & Biases arguments - parser.add_argument('--entity', default=None, help='W&B: Entity') - parser.add_argument('--upload_dataset', nargs='?', const=True, default=False, help='W&B: Upload data, "val" option') - parser.add_argument('--bbox_interval', type=int, default=-1, help='W&B: Set bounding-box image logging interval') - parser.add_argument('--artifact_alias', type=str, default='latest', help='W&B: Version of dataset artifact to use') + parser.add_argument("--entity", default=None, help="W&B: Entity") + parser.add_argument("--upload_dataset", nargs="?", const=True, default=False, help='W&B: Upload data, "val" option') + parser.add_argument("--bbox_interval", type=int, default=-1, help="W&B: Set bounding-box image logging interval") + parser.add_argument("--artifact_alias", type=str, default="latest", help="W&B: Version of dataset artifact to use") # Comet Arguments - parser.add_argument('--comet_optimizer_config', type=str, help='Comet: Path to a Comet Optimizer Config File.') - parser.add_argument('--comet_optimizer_id', type=str, help='Comet: ID of the Comet Optimizer sweep.') - parser.add_argument('--comet_optimizer_objective', type=str, help="Comet: Set to 'minimize' or 'maximize'.") - parser.add_argument('--comet_optimizer_metric', type=str, help='Comet: Metric to Optimize.') - parser.add_argument('--comet_optimizer_workers', - type=int, - default=1, - help='Comet: Number of Parallel Workers to use with the Comet Optimizer.') + parser.add_argument("--comet_optimizer_config", type=str, help="Comet: Path to a Comet Optimizer Config File.") + parser.add_argument("--comet_optimizer_id", type=str, help="Comet: ID of the Comet Optimizer sweep.") + parser.add_argument("--comet_optimizer_objective", type=str, help="Comet: Set to 'minimize' or 'maximize'.") + parser.add_argument("--comet_optimizer_metric", type=str, help="Comet: Metric to Optimize.") + parser.add_argument( + "--comet_optimizer_workers", + type=int, + default=1, + help="Comet: Number of Parallel Workers to use with the Comet Optimizer.", + ) return parser.parse_known_args()[0] if known else parser.parse_args() def run(parameters, opt): - hyp_dict = {k: v for k, v in parameters.items() if k not in ['epochs', 'batch_size']} + hyp_dict = {k: v for k, v in parameters.items() if k not in ["epochs", "batch_size"]} opt.save_dir = str(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok or opt.evolve)) - opt.batch_size = parameters.get('batch_size') - opt.epochs = parameters.get('epochs') + opt.batch_size = parameters.get("batch_size") + opt.epochs = parameters.get("epochs") device = select_device(opt.device, batch_size=opt.batch_size) train(hyp_dict, opt, device, callbacks=Callbacks()) -if __name__ == '__main__': +if __name__ == "__main__": opt = get_args(known=True) opt.weights = str(opt.weights) @@ -99,7 +101,7 @@ def run(parameters, opt): opt.data = str(opt.data) opt.project = str(opt.project) - optimizer_id = os.getenv('COMET_OPTIMIZER_ID') + optimizer_id = os.getenv("COMET_OPTIMIZER_ID") if optimizer_id is None: with open(opt.comet_optimizer_config) as f: optimizer_config = json.load(f) @@ -110,9 +112,9 @@ def run(parameters, opt): opt.comet_optimizer_id = optimizer.id status = optimizer.status() - opt.comet_optimizer_objective = status['spec']['objective'] - opt.comet_optimizer_metric = status['spec']['metric'] + opt.comet_optimizer_objective = status["spec"]["objective"] + opt.comet_optimizer_metric = status["spec"]["metric"] - logger.info('COMET INFO: Starting Hyperparameter Sweep') + logger.info("COMET INFO: Starting Hyperparameter Sweep") for parameter in optimizer.get_parameters(): - run(parameter['parameters'], opt) + run(parameter["parameters"], opt) diff --git a/utils/loggers/wandb/wandb_utils.py b/utils/loggers/wandb/wandb_utils.py index 4ea32b1d4c6e..f8d49a33d00f 100644 --- a/utils/loggers/wandb/wandb_utils.py +++ b/utils/loggers/wandb/wandb_utils.py @@ -15,34 +15,35 @@ ROOT = FILE.parents[3] # YOLOv5 root directory if str(ROOT) not in sys.path: sys.path.append(str(ROOT)) # add ROOT to PATH -RANK = int(os.getenv('RANK', -1)) -DEPRECATION_WARNING = f"{colorstr('wandb')}: WARNING ⚠️ wandb is deprecated and will be removed in a future release. " \ - f'See supported integrations at https://github.com/ultralytics/yolov5#integrations.' +RANK = int(os.getenv("RANK", -1)) +DEPRECATION_WARNING = ( + f"{colorstr('wandb')}: WARNING ⚠️ wandb is deprecated and will be removed in a future release. " + f'See supported integrations at https://github.com/ultralytics/yolov5#integrations.' +) try: import wandb - assert hasattr(wandb, '__version__') # verify package import not local dir + assert hasattr(wandb, "__version__") # verify package import not local dir LOGGER.warning(DEPRECATION_WARNING) except (ImportError, AssertionError): wandb = None -class WandbLogger(): - """Log training runs, datasets, models, and predictions to Weights & Biases. +class WandbLogger: + """ + Log training runs, datasets, models, and predictions to Weights & Biases. - This logger sends information to W&B at wandb.ai. By default, this information - includes hyperparameters, system configuration and metrics, model metrics, - and basic data metrics and analyses. + This logger sends information to W&B at wandb.ai. By default, this information includes hyperparameters, system + configuration and metrics, model metrics, and basic data metrics and analyses. - By providing additional command line arguments to train.py, datasets, - models and predictions can also be logged. + By providing additional command line arguments to train.py, datasets, models and predictions can also be logged. For more on how this logger is used, see the Weights & Biases documentation: https://docs.wandb.com/guides/integrations/yolov5 """ - def __init__(self, opt, run_id=None, job_type='Training'): + def __init__(self, opt, run_id=None, job_type="Training"): """ - Initialize WandbLogger instance - Upload dataset if opt.upload_dataset is True @@ -53,7 +54,7 @@ def __init__(self, opt, run_id=None, job_type='Training'): run_id (str) -- Run ID of W&B run to be resumed job_type (str) -- To set the job_type for this run - """ + """ # Pre-training routine -- self.job_type = job_type self.wandb, self.wandb_run = wandb, wandb.run if wandb else None @@ -64,17 +65,23 @@ def __init__(self, opt, run_id=None, job_type='Training'): self.max_imgs_to_log = 16 self.data_dict = None if self.wandb: - self.wandb_run = wandb.init(config=opt, - resume='allow', - project='YOLOv5' if opt.project == 'runs/train' else Path(opt.project).stem, - entity=opt.entity, - name=opt.name if opt.name != 'exp' else None, - job_type=job_type, - id=run_id, - allow_val_change=True) if not wandb.run else wandb.run + self.wandb_run = ( + wandb.init( + config=opt, + resume="allow", + project="YOLOv5" if opt.project == "runs/train" else Path(opt.project).stem, + entity=opt.entity, + name=opt.name if opt.name != "exp" else None, + job_type=job_type, + id=run_id, + allow_val_change=True, + ) + if not wandb.run + else wandb.run + ) if self.wandb_run: - if self.job_type == 'Training': + if self.job_type == "Training": if isinstance(opt.data, dict): # This means another dataset manager has already processed the dataset info (e.g. ClearML) # and they will have stored the already processed dict in opt.data @@ -97,11 +104,17 @@ def setup_training(self, opt): if isinstance(opt.resume, str): model_dir, _ = self.download_model_artifact(opt) if model_dir: - self.weights = Path(model_dir) / 'last.pt' + self.weights = Path(model_dir) / "last.pt" config = self.wandb_run.config - opt.weights, opt.save_period, opt.batch_size, opt.bbox_interval, opt.epochs, opt.hyp, opt.imgsz = str( - self.weights), config.save_period, config.batch_size, config.bbox_interval, config.epochs, \ - config.hyp, config.imgsz + opt.weights, opt.save_period, opt.batch_size, opt.bbox_interval, opt.epochs, opt.hyp, opt.imgsz = ( + str(self.weights), + config.save_period, + config.batch_size, + config.bbox_interval, + config.epochs, + config.hyp, + config.imgsz, + ) if opt.bbox_interval == -1: self.bbox_interval = opt.bbox_interval = (opt.epochs // 10) if opt.epochs > 10 else 1 @@ -110,7 +123,7 @@ def setup_training(self, opt): def log_model(self, path, opt, epoch, fitness_score, best_model=False): """ - Log the model checkpoint as W&B artifact + Log the model checkpoint as W&B artifact. arguments: path (Path) -- Path of directory containing the checkpoints @@ -119,26 +132,30 @@ def log_model(self, path, opt, epoch, fitness_score, best_model=False): fitness_score (float) -- fitness score for current epoch best_model (boolean) -- Boolean representing if the current checkpoint is the best yet. """ - model_artifact = wandb.Artifact('run_' + wandb.run.id + '_model', - type='model', - metadata={ - 'original_url': str(path), - 'epochs_trained': epoch + 1, - 'save period': opt.save_period, - 'project': opt.project, - 'total_epochs': opt.epochs, - 'fitness_score': fitness_score}) - model_artifact.add_file(str(path / 'last.pt'), name='last.pt') - wandb.log_artifact(model_artifact, - aliases=['latest', 'last', 'epoch ' + str(self.current_epoch), 'best' if best_model else '']) - LOGGER.info(f'Saving model artifact on epoch {epoch + 1}') + model_artifact = wandb.Artifact( + "run_" + wandb.run.id + "_model", + type="model", + metadata={ + "original_url": str(path), + "epochs_trained": epoch + 1, + "save period": opt.save_period, + "project": opt.project, + "total_epochs": opt.epochs, + "fitness_score": fitness_score, + }, + ) + model_artifact.add_file(str(path / "last.pt"), name="last.pt") + wandb.log_artifact( + model_artifact, aliases=["latest", "last", "epoch " + str(self.current_epoch), "best" if best_model else ""] + ) + LOGGER.info(f"Saving model artifact on epoch {epoch + 1}") def val_one_image(self, pred, predn, path, names, im): pass def log(self, log_dict): """ - save the metrics to the logging dictionary + Save the metrics to the logging dictionary. arguments: log_dict (Dict) -- metrics/media to be logged in current step @@ -149,7 +166,7 @@ def log(self, log_dict): def end_epoch(self): """ - commit the log_dict, model artifacts and Tables to W&B and flush the log_dict. + Commit the log_dict, model artifacts and Tables to W&B and flush the log_dict. arguments: best_result (boolean): Boolean representing if the result of this evaluation is best or not @@ -160,16 +177,14 @@ def end_epoch(self): wandb.log(self.log_dict) except BaseException as e: LOGGER.info( - f'An error occurred in wandb logger. The training will proceed without interruption. More info\n{e}' + f"An error occurred in wandb logger. The training will proceed without interruption. More info\n{e}" ) self.wandb_run.finish() self.wandb_run = None self.log_dict = {} def finish_run(self): - """ - Log metrics if any and finish the current W&B run - """ + """Log metrics if any and finish the current W&B run.""" if self.wandb_run: if self.log_dict: with all_logging_disabled(): @@ -180,7 +195,7 @@ def finish_run(self): @contextmanager def all_logging_disabled(highest_level=logging.CRITICAL): - """ source - https://gist.github.com/simon-weber/7853144 + """source - https://gist.github.com/simon-weber/7853144 A context manager that will prevent any logging messages triggered during the body from being processed. :param highest_level: the maximum logging level in use. This would only need to be changed if a custom level greater than CRITICAL is defined. diff --git a/utils/loss.py b/utils/loss.py index 26cca8797315..26b8c06bf333 100644 --- a/utils/loss.py +++ b/utils/loss.py @@ -1,7 +1,5 @@ # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license -""" -Loss functions -""" +"""Loss functions.""" import torch import torch.nn as nn @@ -19,7 +17,7 @@ class BCEBlurWithLogitsLoss(nn.Module): # BCEwithLogitLoss() with reduced missing label effects. def __init__(self, alpha=0.05): super().__init__() - self.loss_fcn = nn.BCEWithLogitsLoss(reduction='none') # must be nn.BCEWithLogitsLoss() + self.loss_fcn = nn.BCEWithLogitsLoss(reduction="none") # must be nn.BCEWithLogitsLoss() self.alpha = alpha def forward(self, pred, true): @@ -40,7 +38,7 @@ def __init__(self, loss_fcn, gamma=1.5, alpha=0.25): self.gamma = gamma self.alpha = alpha self.reduction = loss_fcn.reduction - self.loss_fcn.reduction = 'none' # required to apply FL to each element + self.loss_fcn.reduction = "none" # required to apply FL to each element def forward(self, pred, true): loss = self.loss_fcn(pred, true) @@ -54,9 +52,9 @@ def forward(self, pred, true): modulating_factor = (1.0 - p_t) ** self.gamma loss *= alpha_factor * modulating_factor - if self.reduction == 'mean': + if self.reduction == "mean": return loss.mean() - elif self.reduction == 'sum': + elif self.reduction == "sum": return loss.sum() else: # 'none' return loss @@ -70,7 +68,7 @@ def __init__(self, loss_fcn, gamma=1.5, alpha=0.25): self.gamma = gamma self.alpha = alpha self.reduction = loss_fcn.reduction - self.loss_fcn.reduction = 'none' # required to apply FL to each element + self.loss_fcn.reduction = "none" # required to apply FL to each element def forward(self, pred, true): loss = self.loss_fcn(pred, true) @@ -80,9 +78,9 @@ def forward(self, pred, true): modulating_factor = torch.abs(true - pred_prob) ** self.gamma loss *= alpha_factor * modulating_factor - if self.reduction == 'mean': + if self.reduction == "mean": return loss.mean() - elif self.reduction == 'sum': + elif self.reduction == "sum": return loss.sum() else: # 'none' return loss @@ -97,14 +95,14 @@ def __init__(self, model, autobalance=False): h = model.hyp # hyperparameters # Define criteria - BCEcls = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h['cls_pw']], device=device)) - BCEobj = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h['obj_pw']], device=device)) + BCEcls = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h["cls_pw"]], device=device)) + BCEobj = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h["obj_pw"]], device=device)) # Class label smoothing https://arxiv.org/pdf/1902.04103.pdf eqn 3 - self.cp, self.cn = smooth_BCE(eps=h.get('label_smoothing', 0.0)) # positive, negative BCE targets + self.cp, self.cn = smooth_BCE(eps=h.get("label_smoothing", 0.0)) # positive, negative BCE targets # Focal loss - g = h['fl_gamma'] # focal loss gamma + g = h["fl_gamma"] # focal loss gamma if g > 0: BCEcls, BCEobj = FocalLoss(BCEcls, g), FocalLoss(BCEobj, g) @@ -167,9 +165,9 @@ def __call__(self, p, targets): # predictions, targets if self.autobalance: self.balance = [x / self.balance[self.ssi] for x in self.balance] - lbox *= self.hyp['box'] - lobj *= self.hyp['obj'] - lcls *= self.hyp['cls'] + lbox *= self.hyp["box"] + lobj *= self.hyp["obj"] + lcls *= self.hyp["cls"] bs = tobj.shape[0] # batch size return (lbox + lobj + lcls) * bs, torch.cat((lbox, lobj, lcls)).detach() @@ -183,16 +181,20 @@ def build_targets(self, p, targets): targets = torch.cat((targets.repeat(na, 1, 1), ai[..., None]), 2) # append anchor indices g = 0.5 # bias - off = torch.tensor( - [ - [0, 0], - [1, 0], - [0, 1], - [-1, 0], - [0, -1], # j,k,l,m - # [1, 1], [1, -1], [-1, 1], [-1, -1], # jk,jm,lk,lm - ], - device=self.device).float() * g # offsets + off = ( + torch.tensor( + [ + [0, 0], + [1, 0], + [0, 1], + [-1, 0], + [0, -1], # j,k,l,m + # [1, 1], [1, -1], [-1, 1], [-1, -1], # jk,jm,lk,lm + ], + device=self.device, + ).float() + * g + ) # offsets for i in range(self.nl): anchors, shape = self.anchors[i], p[i].shape @@ -203,7 +205,7 @@ def build_targets(self, p, targets): if nt: # Matches r = t[..., 4:6] / anchors[:, None] # wh ratio - j = torch.max(r, 1 / r).max(2)[0] < self.hyp['anchor_t'] # compare + j = torch.max(r, 1 / r).max(2)[0] < self.hyp["anchor_t"] # compare # j = wh_iou(anchors, t[:, 4:6]) > model.hyp['iou_t'] # iou(3,n)=wh_iou(anchors(3,2), gwh(n,2)) t = t[j] # filter diff --git a/utils/metrics.py b/utils/metrics.py index 5646f40e9860..5f45621dc372 100644 --- a/utils/metrics.py +++ b/utils/metrics.py @@ -1,7 +1,5 @@ # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license -""" -Model validation metrics -""" +"""Model validation metrics.""" import math import warnings @@ -25,11 +23,13 @@ def smooth(y, f=0.05): nf = round(len(y) * f * 2) // 2 + 1 # number of filter elements (must be odd) p = np.ones(nf // 2) # ones padding yp = np.concatenate((p * y[0], y, p * y[-1]), 0) # y padded - return np.convolve(yp, np.ones(nf) / nf, mode='valid') # y-smoothed + return np.convolve(yp, np.ones(nf) / nf, mode="valid") # y-smoothed -def ap_per_class(tp, conf, pred_cls, target_cls, plot=False, save_dir='.', names=(), eps=1e-16, prefix=''): - """ Compute the average precision, given the recall and precision curves. +def ap_per_class(tp, conf, pred_cls, target_cls, plot=False, save_dir=".", names=(), eps=1e-16, prefix=""): + """ + Compute the average precision, given the recall and precision curves. + Source: https://github.com/rafaelpadilla/Object-Detection-Metrics. # Arguments tp: True positives (nparray, nx1 or nx10). @@ -83,10 +83,10 @@ def ap_per_class(tp, conf, pred_cls, target_cls, plot=False, save_dir='.', names names = [v for k, v in names.items() if k in unique_classes] # list: only classes that have data names = dict(enumerate(names)) # to dict if plot: - plot_pr_curve(px, py, ap, Path(save_dir) / f'{prefix}PR_curve.png', names) - plot_mc_curve(px, f1, Path(save_dir) / f'{prefix}F1_curve.png', names, ylabel='F1') - plot_mc_curve(px, p, Path(save_dir) / f'{prefix}P_curve.png', names, ylabel='Precision') - plot_mc_curve(px, r, Path(save_dir) / f'{prefix}R_curve.png', names, ylabel='Recall') + plot_pr_curve(px, py, ap, Path(save_dir) / f"{prefix}PR_curve.png", names) + plot_mc_curve(px, f1, Path(save_dir) / f"{prefix}F1_curve.png", names, ylabel="F1") + plot_mc_curve(px, p, Path(save_dir) / f"{prefix}P_curve.png", names, ylabel="Precision") + plot_mc_curve(px, r, Path(save_dir) / f"{prefix}R_curve.png", names, ylabel="Recall") i = smooth(f1.mean(0), 0.1).argmax() # max F1 index p, r, f1 = p[:, i], r[:, i], f1[:, i] @@ -96,7 +96,7 @@ def ap_per_class(tp, conf, pred_cls, target_cls, plot=False, save_dir='.', names def compute_ap(recall, precision): - """ Compute the average precision, given the recall and precision curves + """Compute the average precision, given the recall and precision curves # Arguments recall: The recall curve (list) precision: The precision curve (list) @@ -112,8 +112,8 @@ def compute_ap(recall, precision): mpre = np.flip(np.maximum.accumulate(np.flip(mpre))) # Integrate area under curve - method = 'interp' # methods: 'continuous', 'interp' - if method == 'interp': + method = "interp" # methods: 'continuous', 'interp' + if method == "interp": x = np.linspace(0, 1, 101) # 101-point interp (COCO) ap = np.trapz(np.interp(x, mrec, mpre), x) # integrate else: # 'continuous' @@ -134,6 +134,7 @@ def __init__(self, nc, conf=0.25, iou_thres=0.45): def process_batch(self, detections, labels): """ Return intersection-over-union (Jaccard index) of boxes. + Both sets of boxes are expected to be in (x1, y1, x2, y2) format. Arguments: detections (Array[N, 6]), x1, y1, x2, y2, conf, class @@ -183,40 +184,41 @@ def tp_fp(self): # fn = self.matrix.sum(0) - tp # false negatives (missed detections) return tp[:-1], fp[:-1] # remove background class - @TryExcept('WARNING ⚠️ ConfusionMatrix plot failure') - def plot(self, normalize=True, save_dir='', names=()): + @TryExcept("WARNING ⚠️ ConfusionMatrix plot failure") + def plot(self, normalize=True, save_dir="", names=()): import seaborn as sn - array = self.matrix / ((self.matrix.sum(0).reshape(1, -1) + 1E-9) if normalize else 1) # normalize columns + array = self.matrix / ((self.matrix.sum(0).reshape(1, -1) + 1e-9) if normalize else 1) # normalize columns array[array < 0.005] = np.nan # don't annotate (would appear as 0.00) fig, ax = plt.subplots(1, 1, figsize=(12, 9), tight_layout=True) nc, nn = self.nc, len(names) # number of classes, names sn.set(font_scale=1.0 if nc < 50 else 0.8) # for label size labels = (0 < nn < 99) and (nn == nc) # apply names to ticklabels - ticklabels = (names + ['background']) if labels else 'auto' + ticklabels = (names + ["background"]) if labels else "auto" with warnings.catch_warnings(): - warnings.simplefilter('ignore') # suppress empty matrix RuntimeWarning: All-NaN slice encountered - sn.heatmap(array, - ax=ax, - annot=nc < 30, - annot_kws={ - 'size': 8}, - cmap='Blues', - fmt='.2f', - square=True, - vmin=0.0, - xticklabels=ticklabels, - yticklabels=ticklabels).set_facecolor((1, 1, 1)) - ax.set_xlabel('True') - ax.set_ylabel('Predicted') - ax.set_title('Confusion Matrix') - fig.savefig(Path(save_dir) / 'confusion_matrix.png', dpi=250) + warnings.simplefilter("ignore") # suppress empty matrix RuntimeWarning: All-NaN slice encountered + sn.heatmap( + array, + ax=ax, + annot=nc < 30, + annot_kws={"size": 8}, + cmap="Blues", + fmt=".2f", + square=True, + vmin=0.0, + xticklabels=ticklabels, + yticklabels=ticklabels, + ).set_facecolor((1, 1, 1)) + ax.set_xlabel("True") + ax.set_ylabel("Predicted") + ax.set_title("Confusion Matrix") + fig.savefig(Path(save_dir) / "confusion_matrix.png", dpi=250) plt.close(fig) def print(self): for i in range(self.nc + 1): - print(' '.join(map(str, self.matrix[i]))) + print(" ".join(map(str, self.matrix[i]))) def bbox_iou(box1, box2, xywh=True, GIoU=False, DIoU=False, CIoU=False, eps=1e-7): @@ -235,8 +237,9 @@ def bbox_iou(box1, box2, xywh=True, GIoU=False, DIoU=False, CIoU=False, eps=1e-7 w2, h2 = b2_x2 - b2_x1, (b2_y2 - b2_y1).clamp(eps) # Intersection area - inter = (b1_x2.minimum(b2_x2) - b1_x1.maximum(b2_x1)).clamp(0) * \ - (b1_y2.minimum(b2_y2) - b1_y1.maximum(b2_y1)).clamp(0) + inter = (b1_x2.minimum(b2_x2) - b1_x1.maximum(b2_x1)).clamp(0) * ( + b1_y2.minimum(b2_y2) - b1_y1.maximum(b2_y1) + ).clamp(0) # Union Area union = w1 * h1 + w2 * h2 - inter + eps @@ -247,10 +250,10 @@ def bbox_iou(box1, box2, xywh=True, GIoU=False, DIoU=False, CIoU=False, eps=1e-7 cw = b1_x2.maximum(b2_x2) - b1_x1.minimum(b2_x1) # convex (smallest enclosing box) width ch = b1_y2.maximum(b2_y2) - b1_y1.minimum(b2_y1) # convex height if CIoU or DIoU: # Distance or Complete IoU https://arxiv.org/abs/1911.08287v1 - c2 = cw ** 2 + ch ** 2 + eps # convex diagonal squared + c2 = cw**2 + ch**2 + eps # convex diagonal squared rho2 = ((b2_x1 + b2_x2 - b1_x1 - b1_x2) ** 2 + (b2_y1 + b2_y2 - b1_y1 - b1_y2) ** 2) / 4 # center dist ** 2 if CIoU: # https://github.com/Zzh-tju/DIoU-SSD-pytorch/blob/master/utils/box/box_utils.py#L47 - v = (4 / math.pi ** 2) * (torch.atan(w2 / h2) - torch.atan(w1 / h1)).pow(2) + v = (4 / math.pi**2) * (torch.atan(w2 / h2) - torch.atan(w1 / h1)).pow(2) with torch.no_grad(): alpha = v / (v - iou + (1 + eps)) return iou - (rho2 / c2 + v * alpha) # CIoU @@ -264,6 +267,7 @@ def box_iou(box1, box2, eps=1e-7): # https://github.com/pytorch/vision/blob/master/torchvision/ops/boxes.py """ Return intersection-over-union (Jaccard index) of boxes. + Both sets of boxes are expected to be in (x1, y1, x2, y2) format. Arguments: box1 (Tensor[N, 4]) @@ -282,7 +286,10 @@ def box_iou(box1, box2, eps=1e-7): def bbox_ioa(box1, box2, eps=1e-7): - """ Returns the intersection over box2 area given box1, box2. Boxes are x1y1x2y2 + """ + Returns the intersection over box2 area given box1, box2. + + Boxes are x1y1x2y2 box1: np.array of shape(4) box2: np.array of shape(nx4) returns: np.array of shape(n) @@ -293,8 +300,9 @@ def bbox_ioa(box1, box2, eps=1e-7): b2_x1, b2_y1, b2_x2, b2_y2 = box2.T # Intersection area - inter_area = (np.minimum(b1_x2, b2_x2) - np.maximum(b1_x1, b2_x1)).clip(0) * \ - (np.minimum(b1_y2, b2_y2) - np.maximum(b1_y1, b2_y1)).clip(0) + inter_area = (np.minimum(b1_x2, b2_x2) - np.maximum(b1_x1, b2_x1)).clip(0) * ( + np.minimum(b1_y2, b2_y2) - np.maximum(b1_y1, b2_y1) + ).clip(0) # box2 area box2_area = (b2_x2 - b2_x1) * (b2_y2 - b2_y1) + eps @@ -315,46 +323,46 @@ def wh_iou(wh1, wh2, eps=1e-7): @threaded -def plot_pr_curve(px, py, ap, save_dir=Path('pr_curve.png'), names=()): +def plot_pr_curve(px, py, ap, save_dir=Path("pr_curve.png"), names=()): # Precision-recall curve fig, ax = plt.subplots(1, 1, figsize=(9, 6), tight_layout=True) py = np.stack(py, axis=1) if 0 < len(names) < 21: # display per-class legend if < 21 classes for i, y in enumerate(py.T): - ax.plot(px, y, linewidth=1, label=f'{names[i]} {ap[i, 0]:.3f}') # plot(recall, precision) + ax.plot(px, y, linewidth=1, label=f"{names[i]} {ap[i, 0]:.3f}") # plot(recall, precision) else: - ax.plot(px, py, linewidth=1, color='grey') # plot(recall, precision) + ax.plot(px, py, linewidth=1, color="grey") # plot(recall, precision) - ax.plot(px, py.mean(1), linewidth=3, color='blue', label='all classes %.3f mAP@0.5' % ap[:, 0].mean()) - ax.set_xlabel('Recall') - ax.set_ylabel('Precision') + ax.plot(px, py.mean(1), linewidth=3, color="blue", label="all classes %.3f mAP@0.5" % ap[:, 0].mean()) + ax.set_xlabel("Recall") + ax.set_ylabel("Precision") ax.set_xlim(0, 1) ax.set_ylim(0, 1) - ax.legend(bbox_to_anchor=(1.04, 1), loc='upper left') - ax.set_title('Precision-Recall Curve') + ax.legend(bbox_to_anchor=(1.04, 1), loc="upper left") + ax.set_title("Precision-Recall Curve") fig.savefig(save_dir, dpi=250) plt.close(fig) @threaded -def plot_mc_curve(px, py, save_dir=Path('mc_curve.png'), names=(), xlabel='Confidence', ylabel='Metric'): +def plot_mc_curve(px, py, save_dir=Path("mc_curve.png"), names=(), xlabel="Confidence", ylabel="Metric"): # Metric-confidence curve fig, ax = plt.subplots(1, 1, figsize=(9, 6), tight_layout=True) if 0 < len(names) < 21: # display per-class legend if < 21 classes for i, y in enumerate(py): - ax.plot(px, y, linewidth=1, label=f'{names[i]}') # plot(confidence, metric) + ax.plot(px, y, linewidth=1, label=f"{names[i]}") # plot(confidence, metric) else: - ax.plot(px, py.T, linewidth=1, color='grey') # plot(confidence, metric) + ax.plot(px, py.T, linewidth=1, color="grey") # plot(confidence, metric) y = smooth(py.mean(0), 0.05) - ax.plot(px, y, linewidth=3, color='blue', label=f'all classes {y.max():.2f} at {px[y.argmax()]:.3f}') + ax.plot(px, y, linewidth=3, color="blue", label=f"all classes {y.max():.2f} at {px[y.argmax()]:.3f}") ax.set_xlabel(xlabel) ax.set_ylabel(ylabel) ax.set_xlim(0, 1) ax.set_ylim(0, 1) - ax.legend(bbox_to_anchor=(1.04, 1), loc='upper left') - ax.set_title(f'{ylabel}-Confidence Curve') + ax.legend(bbox_to_anchor=(1.04, 1), loc="upper left") + ax.set_title(f"{ylabel}-Confidence Curve") fig.savefig(save_dir, dpi=250) plt.close(fig) diff --git a/utils/plots.py b/utils/plots.py index 5901ca2dbfaa..11c96a6372c3 100644 --- a/utils/plots.py +++ b/utils/plots.py @@ -1,7 +1,5 @@ # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license -""" -Plotting utils -""" +"""Plotting utils.""" import contextlib import math @@ -25,18 +23,38 @@ from utils.metrics import fitness # Settings -RANK = int(os.getenv('RANK', -1)) -matplotlib.rc('font', **{'size': 11}) -matplotlib.use('Agg') # for writing to files only +RANK = int(os.getenv("RANK", -1)) +matplotlib.rc("font", **{"size": 11}) +matplotlib.use("Agg") # for writing to files only class Colors: # Ultralytics color palette https://ultralytics.com/ def __init__(self): # hex = matplotlib.colors.TABLEAU_COLORS.values() - hexs = ('FF3838', 'FF9D97', 'FF701F', 'FFB21D', 'CFD231', '48F90A', '92CC17', '3DDB86', '1A9334', '00D4BB', - '2C99A8', '00C2FF', '344593', '6473FF', '0018EC', '8438FF', '520085', 'CB38FF', 'FF95C8', 'FF37C7') - self.palette = [self.hex2rgb(f'#{c}') for c in hexs] + hexs = ( + "FF3838", + "FF9D97", + "FF701F", + "FFB21D", + "CFD231", + "48F90A", + "92CC17", + "3DDB86", + "1A9334", + "00D4BB", + "2C99A8", + "00C2FF", + "344593", + "6473FF", + "0018EC", + "8438FF", + "520085", + "CB38FF", + "FF95C8", + "FF37C7", + ) + self.palette = [self.hex2rgb(f"#{c}") for c in hexs] self.n = len(self.palette) def __call__(self, i, bgr=False): @@ -45,13 +63,13 @@ def __call__(self, i, bgr=False): @staticmethod def hex2rgb(h): # rgb order (PIL) - return tuple(int(h[1 + i:1 + i + 2], 16) for i in (0, 2, 4)) + return tuple(int(h[1 + i : 1 + i + 2], 16) for i in (0, 2, 4)) colors = Colors() # create instance for 'from utils.plots import colors' -def feature_visualization(x, module_type, stage, n=32, save_dir=Path('runs/detect/exp')): +def feature_visualization(x, module_type, stage, n=32, save_dir=Path("runs/detect/exp")): """ x: Features to be visualized module_type: Module type @@ -59,9 +77,9 @@ def feature_visualization(x, module_type, stage, n=32, save_dir=Path('runs/detec n: Maximum number of feature maps to plot save_dir: Directory to save results """ - if ('Detect' - not in module_type) and ('Segment' - not in module_type): # 'Detect' for Object Detect task,'Segment' for Segment task + if ("Detect" not in module_type) and ( + "Segment" not in module_type + ): # 'Detect' for Object Detect task,'Segment' for Segment task batch, channels, height, width = x.shape # batch, channels, height, width if height > 1 and width > 1: f = save_dir / f"stage{stage}_{module_type.split('.')[-1]}_features.png" # filename @@ -73,12 +91,12 @@ def feature_visualization(x, module_type, stage, n=32, save_dir=Path('runs/detec plt.subplots_adjust(wspace=0.05, hspace=0.05) for i in range(n): ax[i].imshow(blocks[i].squeeze()) # cmap='gray' - ax[i].axis('off') + ax[i].axis("off") - LOGGER.info(f'Saving {f}... ({n}/{channels})') - plt.savefig(f, dpi=300, bbox_inches='tight') + LOGGER.info(f"Saving {f}... ({n}/{channels})") + plt.savefig(f, dpi=300, bbox_inches="tight") plt.close() - np.save(str(f.with_suffix('.npy')), x[0].cpu().numpy()) # npy save + np.save(str(f.with_suffix(".npy")), x[0].cpu().numpy()) # npy save def hist2d(x, y, n=100): @@ -97,7 +115,7 @@ def butter_lowpass_filtfilt(data, cutoff=1500, fs=50000, order=5): def butter_lowpass(cutoff, fs, order): nyq = 0.5 * fs normal_cutoff = cutoff / nyq - return butter(order, normal_cutoff, btype='low', analog=False) + return butter(order, normal_cutoff, btype="low", analog=False) b, a = butter_lowpass(cutoff, fs, order=order) return filtfilt(b, a, data) # forward-backward filter @@ -114,7 +132,7 @@ def output_to_target(output, max_det=300): @threaded -def plot_images(images, targets, paths=None, fname='images.jpg', names=None): +def plot_images(images, targets, paths=None, fname="images.jpg", names=None): # Plot image grid with labels if isinstance(images, torch.Tensor): images = images.cpu().float().numpy() @@ -125,7 +143,7 @@ def plot_images(images, targets, paths=None, fname='images.jpg', names=None): max_subplots = 16 # max image subplots, i.e. 4x4 bs, _, h, w = images.shape # batch size, _, height, width bs = min(bs, max_subplots) # limit plot images - ns = np.ceil(bs ** 0.5) # number of subplots (square) + ns = np.ceil(bs**0.5) # number of subplots (square) if np.max(images[0]) <= 1: images *= 255 # de-normalise (optional) @@ -136,7 +154,7 @@ def plot_images(images, targets, paths=None, fname='images.jpg', names=None): break x, y = int(w * (i // ns)), int(h * (i % ns)) # block origin im = im.transpose(1, 2, 0) - mosaic[y:y + h, x:x + w, :] = im + mosaic[y : y + h, x : x + w, :] = im # Resize (optional) scale = max_size / ns / max(h, w) @@ -156,7 +174,7 @@ def plot_images(images, targets, paths=None, fname='images.jpg', names=None): if len(targets) > 0: ti = targets[targets[:, 0] == i] # image targets boxes = xywh2xyxy(ti[:, 2:6]).T - classes = ti[:, 1].astype('int') + classes = ti[:, 1].astype("int") labels = ti.shape[1] == 6 # labels if no conf column conf = None if labels else ti[:, 6] # check for confidence presence (label vs pred) @@ -173,59 +191,59 @@ def plot_images(images, targets, paths=None, fname='images.jpg', names=None): color = colors(cls) cls = names[cls] if names else cls if labels or conf[j] > 0.25: # 0.25 conf thresh - label = f'{cls}' if labels else f'{cls} {conf[j]:.1f}' + label = f"{cls}" if labels else f"{cls} {conf[j]:.1f}" annotator.box_label(box, label, color=color) annotator.im.save(fname) # save -def plot_lr_scheduler(optimizer, scheduler, epochs=300, save_dir=''): +def plot_lr_scheduler(optimizer, scheduler, epochs=300, save_dir=""): # Plot LR simulating training for full epochs optimizer, scheduler = copy(optimizer), copy(scheduler) # do not modify originals y = [] for _ in range(epochs): scheduler.step() - y.append(optimizer.param_groups[0]['lr']) - plt.plot(y, '.-', label='LR') - plt.xlabel('epoch') - plt.ylabel('LR') + y.append(optimizer.param_groups[0]["lr"]) + plt.plot(y, ".-", label="LR") + plt.xlabel("epoch") + plt.ylabel("LR") plt.grid() plt.xlim(0, epochs) plt.ylim(0) - plt.savefig(Path(save_dir) / 'LR.png', dpi=200) + plt.savefig(Path(save_dir) / "LR.png", dpi=200) plt.close() def plot_val_txt(): # from utils.plots import *; plot_val() # Plot val.txt histograms - x = np.loadtxt('val.txt', dtype=np.float32) + x = np.loadtxt("val.txt", dtype=np.float32) box = xyxy2xywh(x[:, :4]) cx, cy = box[:, 0], box[:, 1] fig, ax = plt.subplots(1, 1, figsize=(6, 6), tight_layout=True) ax.hist2d(cx, cy, bins=600, cmax=10, cmin=0) - ax.set_aspect('equal') - plt.savefig('hist2d.png', dpi=300) + ax.set_aspect("equal") + plt.savefig("hist2d.png", dpi=300) fig, ax = plt.subplots(1, 2, figsize=(12, 6), tight_layout=True) ax[0].hist(cx, bins=600) ax[1].hist(cy, bins=600) - plt.savefig('hist1d.png', dpi=200) + plt.savefig("hist1d.png", dpi=200) def plot_targets_txt(): # from utils.plots import *; plot_targets_txt() # Plot targets.txt histograms - x = np.loadtxt('targets.txt', dtype=np.float32).T - s = ['x targets', 'y targets', 'width targets', 'height targets'] + x = np.loadtxt("targets.txt", dtype=np.float32).T + s = ["x targets", "y targets", "width targets", "height targets"] fig, ax = plt.subplots(2, 2, figsize=(8, 8), tight_layout=True) ax = ax.ravel() for i in range(4): - ax[i].hist(x[i], bins=100, label=f'{x[i].mean():.3g} +/- {x[i].std():.3g}') + ax[i].hist(x[i], bins=100, label=f"{x[i].mean():.3g} +/- {x[i].std():.3g}") ax[i].legend() ax[i].set_title(s[i]) - plt.savefig('targets.jpg', dpi=200) + plt.savefig("targets.jpg", dpi=200) -def plot_val_study(file='', dir='', x=None): # from utils.plots import *; plot_val_study() +def plot_val_study(file="", dir="", x=None): # from utils.plots import *; plot_val_study() # Plot file=study.txt generated by val.py (or plot all study*.txt in dir) save_dir = Path(file).parent if file else Path(dir) plot2 = False # plot additional results @@ -234,69 +252,74 @@ def plot_val_study(file='', dir='', x=None): # from utils.plots import *; plot_ fig2, ax2 = plt.subplots(1, 1, figsize=(8, 4), tight_layout=True) # for f in [save_dir / f'study_coco_{x}.txt' for x in ['yolov5n6', 'yolov5s6', 'yolov5m6', 'yolov5l6', 'yolov5x6']]: - for f in sorted(save_dir.glob('study*.txt')): + for f in sorted(save_dir.glob("study*.txt")): y = np.loadtxt(f, dtype=np.float32, usecols=[0, 1, 2, 3, 7, 8, 9], ndmin=2).T x = np.arange(y.shape[1]) if x is None else np.array(x) if plot2: - s = ['P', 'R', 'mAP@.5', 'mAP@.5:.95', 't_preprocess (ms/img)', 't_inference (ms/img)', 't_NMS (ms/img)'] + s = ["P", "R", "mAP@.5", "mAP@.5:.95", "t_preprocess (ms/img)", "t_inference (ms/img)", "t_NMS (ms/img)"] for i in range(7): - ax[i].plot(x, y[i], '.-', linewidth=2, markersize=8) + ax[i].plot(x, y[i], ".-", linewidth=2, markersize=8) ax[i].set_title(s[i]) j = y[3].argmax() + 1 - ax2.plot(y[5, 1:j], - y[3, 1:j] * 1E2, - '.-', - linewidth=2, - markersize=8, - label=f.stem.replace('study_coco_', '').replace('yolo', 'YOLO')) - - ax2.plot(1E3 / np.array([209, 140, 97, 58, 35, 18]), [34.6, 40.5, 43.0, 47.5, 49.7, 51.5], - 'k.-', - linewidth=2, - markersize=8, - alpha=.25, - label='EfficientDet') + ax2.plot( + y[5, 1:j], + y[3, 1:j] * 1e2, + ".-", + linewidth=2, + markersize=8, + label=f.stem.replace("study_coco_", "").replace("yolo", "YOLO"), + ) + + ax2.plot( + 1e3 / np.array([209, 140, 97, 58, 35, 18]), + [34.6, 40.5, 43.0, 47.5, 49.7, 51.5], + "k.-", + linewidth=2, + markersize=8, + alpha=0.25, + label="EfficientDet", + ) ax2.grid(alpha=0.2) ax2.set_yticks(np.arange(20, 60, 5)) ax2.set_xlim(0, 57) ax2.set_ylim(25, 55) - ax2.set_xlabel('GPU Speed (ms/img)') - ax2.set_ylabel('COCO AP val') - ax2.legend(loc='lower right') - f = save_dir / 'study.png' - print(f'Saving {f}...') + ax2.set_xlabel("GPU Speed (ms/img)") + ax2.set_ylabel("COCO AP val") + ax2.legend(loc="lower right") + f = save_dir / "study.png" + print(f"Saving {f}...") plt.savefig(f, dpi=300) @TryExcept() # known issue https://github.com/ultralytics/yolov5/issues/5395 -def plot_labels(labels, names=(), save_dir=Path('')): +def plot_labels(labels, names=(), save_dir=Path("")): # plot dataset labels LOGGER.info(f"Plotting labels to {save_dir / 'labels.jpg'}... ") c, b = labels[:, 0], labels[:, 1:].transpose() # classes, boxes nc = int(c.max() + 1) # number of classes - x = pd.DataFrame(b.transpose(), columns=['x', 'y', 'width', 'height']) + x = pd.DataFrame(b.transpose(), columns=["x", "y", "width", "height"]) # seaborn correlogram - sn.pairplot(x, corner=True, diag_kind='auto', kind='hist', diag_kws=dict(bins=50), plot_kws=dict(pmax=0.9)) - plt.savefig(save_dir / 'labels_correlogram.jpg', dpi=200) + sn.pairplot(x, corner=True, diag_kind="auto", kind="hist", diag_kws=dict(bins=50), plot_kws=dict(pmax=0.9)) + plt.savefig(save_dir / "labels_correlogram.jpg", dpi=200) plt.close() # matplotlib labels - matplotlib.use('svg') # faster + matplotlib.use("svg") # faster ax = plt.subplots(2, 2, figsize=(8, 8), tight_layout=True)[1].ravel() y = ax[0].hist(c, bins=np.linspace(0, nc, nc + 1) - 0.5, rwidth=0.8) with contextlib.suppress(Exception): # color histogram bars by class [y[2].patches[i].set_color([x / 255 for x in colors(i)]) for i in range(nc)] # known issue #3195 - ax[0].set_ylabel('instances') + ax[0].set_ylabel("instances") if 0 < len(names) < 30: ax[0].set_xticks(range(len(names))) ax[0].set_xticklabels(list(names.values()), rotation=90, fontsize=10) else: - ax[0].set_xlabel('classes') - sn.histplot(x, x='x', y='y', ax=ax[2], bins=50, pmax=0.9) - sn.histplot(x, x='width', y='height', ax=ax[3], bins=50, pmax=0.9) + ax[0].set_xlabel("classes") + sn.histplot(x, x="x", y="y", ax=ax[2], bins=50, pmax=0.9) + sn.histplot(x, x="width", y="height", ax=ax[3], bins=50, pmax=0.9) # rectangles labels[:, 1:3] = 0.5 # center @@ -305,47 +328,48 @@ def plot_labels(labels, names=(), save_dir=Path('')): for cls, *box in labels[:1000]: ImageDraw.Draw(img).rectangle(box, width=1, outline=colors(cls)) # plot ax[1].imshow(img) - ax[1].axis('off') + ax[1].axis("off") for a in [0, 1, 2, 3]: - for s in ['top', 'right', 'left', 'bottom']: + for s in ["top", "right", "left", "bottom"]: ax[a].spines[s].set_visible(False) - plt.savefig(save_dir / 'labels.jpg', dpi=200) - matplotlib.use('Agg') + plt.savefig(save_dir / "labels.jpg", dpi=200) + matplotlib.use("Agg") plt.close() -def imshow_cls(im, labels=None, pred=None, names=None, nmax=25, verbose=False, f=Path('images.jpg')): +def imshow_cls(im, labels=None, pred=None, names=None, nmax=25, verbose=False, f=Path("images.jpg")): # Show classification image grid with labels (optional) and predictions (optional) from utils.augmentations import denormalize - names = names or [f'class{i}' for i in range(1000)] - blocks = torch.chunk(denormalize(im.clone()).cpu().float(), len(im), - dim=0) # select batch index 0, block by channels + names = names or [f"class{i}" for i in range(1000)] + blocks = torch.chunk( + denormalize(im.clone()).cpu().float(), len(im), dim=0 + ) # select batch index 0, block by channels n = min(len(blocks), nmax) # number of plots - m = min(8, round(n ** 0.5)) # 8 x 8 default + m = min(8, round(n**0.5)) # 8 x 8 default fig, ax = plt.subplots(math.ceil(n / m), m) # 8 rows x n/8 cols ax = ax.ravel() if m > 1 else [ax] # plt.subplots_adjust(wspace=0.05, hspace=0.05) for i in range(n): ax[i].imshow(blocks[i].squeeze().permute((1, 2, 0)).numpy().clip(0.0, 1.0)) - ax[i].axis('off') + ax[i].axis("off") if labels is not None: - s = names[labels[i]] + (f'—{names[pred[i]]}' if pred is not None else '') - ax[i].set_title(s, fontsize=8, verticalalignment='top') - plt.savefig(f, dpi=300, bbox_inches='tight') + s = names[labels[i]] + (f"—{names[pred[i]]}" if pred is not None else "") + ax[i].set_title(s, fontsize=8, verticalalignment="top") + plt.savefig(f, dpi=300, bbox_inches="tight") plt.close() if verbose: - LOGGER.info(f'Saving {f}') + LOGGER.info(f"Saving {f}") if labels is not None: - LOGGER.info('True: ' + ' '.join(f'{names[i]:3s}' for i in labels[:nmax])) + LOGGER.info("True: " + " ".join(f"{names[i]:3s}" for i in labels[:nmax])) if pred is not None: - LOGGER.info('Predicted:' + ' '.join(f'{names[i]:3s}' for i in pred[:nmax])) + LOGGER.info("Predicted:" + " ".join(f"{names[i]:3s}" for i in pred[:nmax])) return f -def plot_evolve(evolve_csv='path/to/evolve.csv'): # from utils.plots import *; plot_evolve() +def plot_evolve(evolve_csv="path/to/evolve.csv"): # from utils.plots import *; plot_evolve() # Plot evolve.csv hyp evolution results evolve_csv = Path(evolve_csv) data = pd.read_csv(evolve_csv) @@ -354,83 +378,83 @@ def plot_evolve(evolve_csv='path/to/evolve.csv'): # from utils.plots import *; f = fitness(x) j = np.argmax(f) # max fitness index plt.figure(figsize=(10, 12), tight_layout=True) - matplotlib.rc('font', **{'size': 8}) - print(f'Best results from row {j} of {evolve_csv}:') + matplotlib.rc("font", **{"size": 8}) + print(f"Best results from row {j} of {evolve_csv}:") for i, k in enumerate(keys[7:]): v = x[:, 7 + i] mu = v[j] # best single result plt.subplot(6, 5, i + 1) - plt.scatter(v, f, c=hist2d(v, f, 20), cmap='viridis', alpha=.8, edgecolors='none') - plt.plot(mu, f.max(), 'k+', markersize=15) - plt.title(f'{k} = {mu:.3g}', fontdict={'size': 9}) # limit to 40 characters + plt.scatter(v, f, c=hist2d(v, f, 20), cmap="viridis", alpha=0.8, edgecolors="none") + plt.plot(mu, f.max(), "k+", markersize=15) + plt.title(f"{k} = {mu:.3g}", fontdict={"size": 9}) # limit to 40 characters if i % 5 != 0: plt.yticks([]) - print(f'{k:>15}: {mu:.3g}') - f = evolve_csv.with_suffix('.png') # filename + print(f"{k:>15}: {mu:.3g}") + f = evolve_csv.with_suffix(".png") # filename plt.savefig(f, dpi=200) plt.close() - print(f'Saved {f}') + print(f"Saved {f}") -def plot_results(file='path/to/results.csv', dir=''): +def plot_results(file="path/to/results.csv", dir=""): # Plot training results.csv. Usage: from utils.plots import *; plot_results('path/to/results.csv') save_dir = Path(file).parent if file else Path(dir) fig, ax = plt.subplots(2, 5, figsize=(12, 6), tight_layout=True) ax = ax.ravel() - files = list(save_dir.glob('results*.csv')) - assert len(files), f'No results.csv files found in {save_dir.resolve()}, nothing to plot.' + files = list(save_dir.glob("results*.csv")) + assert len(files), f"No results.csv files found in {save_dir.resolve()}, nothing to plot." for f in files: try: data = pd.read_csv(f) s = [x.strip() for x in data.columns] x = data.values[:, 0] for i, j in enumerate([1, 2, 3, 4, 5, 8, 9, 10, 6, 7]): - y = data.values[:, j].astype('float') + y = data.values[:, j].astype("float") # y[y == 0] = np.nan # don't show zero values - ax[i].plot(x, y, marker='.', label=f.stem, linewidth=2, markersize=8) # actual results - ax[i].plot(x, gaussian_filter1d(y, sigma=3), ':', label='smooth', linewidth=2) # smoothing line + ax[i].plot(x, y, marker=".", label=f.stem, linewidth=2, markersize=8) # actual results + ax[i].plot(x, gaussian_filter1d(y, sigma=3), ":", label="smooth", linewidth=2) # smoothing line ax[i].set_title(s[j], fontsize=12) # if j in [8, 9, 10]: # share train and val loss y axes # ax[i].get_shared_y_axes().join(ax[i], ax[i - 5]) except Exception as e: - LOGGER.info(f'Warning: Plotting error for {f}: {e}') + LOGGER.info(f"Warning: Plotting error for {f}: {e}") ax[1].legend() - fig.savefig(save_dir / 'results.png', dpi=200) + fig.savefig(save_dir / "results.png", dpi=200) plt.close() -def profile_idetection(start=0, stop=0, labels=(), save_dir=''): +def profile_idetection(start=0, stop=0, labels=(), save_dir=""): # Plot iDetection '*.txt' per-image logs. from utils.plots import *; profile_idetection() ax = plt.subplots(2, 4, figsize=(12, 6), tight_layout=True)[1].ravel() - s = ['Images', 'Free Storage (GB)', 'RAM Usage (GB)', 'Battery', 'dt_raw (ms)', 'dt_smooth (ms)', 'real-world FPS'] - files = list(Path(save_dir).glob('frames*.txt')) + s = ["Images", "Free Storage (GB)", "RAM Usage (GB)", "Battery", "dt_raw (ms)", "dt_smooth (ms)", "real-world FPS"] + files = list(Path(save_dir).glob("frames*.txt")) for fi, f in enumerate(files): try: results = np.loadtxt(f, ndmin=2).T[:, 90:-30] # clip first and last rows n = results.shape[1] # number of rows x = np.arange(start, min(stop, n) if stop else n) results = results[:, x] - t = (results[0] - results[0].min()) # set t0=0s + t = results[0] - results[0].min() # set t0=0s results[0] = x for i, a in enumerate(ax): if i < len(results): - label = labels[fi] if len(labels) else f.stem.replace('frames_', '') - a.plot(t, results[i], marker='.', label=label, linewidth=1, markersize=5) + label = labels[fi] if len(labels) else f.stem.replace("frames_", "") + a.plot(t, results[i], marker=".", label=label, linewidth=1, markersize=5) a.set_title(s[i]) - a.set_xlabel('time (s)') + a.set_xlabel("time (s)") # if fi == len(files) - 1: # a.set_ylim(bottom=0) - for side in ['top', 'right']: + for side in ["top", "right"]: a.spines[side].set_visible(False) else: a.remove() except Exception as e: - print(f'Warning: Plotting error for {f}; {e}') + print(f"Warning: Plotting error for {f}; {e}") ax[1].legend() - plt.savefig(Path(save_dir) / 'idetection_profile.png', dpi=200) + plt.savefig(Path(save_dir) / "idetection_profile.png", dpi=200) -def save_one_box(xyxy, im, file=Path('im.jpg'), gain=1.02, pad=10, square=False, BGR=False, save=True): +def save_one_box(xyxy, im, file=Path("im.jpg"), gain=1.02, pad=10, square=False, BGR=False, save=True): # Save image crop as {file} with crop size multiple {gain} and {pad} pixels. Save and/or return crop xyxy = torch.tensor(xyxy).view(-1, 4) b = xyxy2xywh(xyxy) # boxes @@ -439,10 +463,10 @@ def save_one_box(xyxy, im, file=Path('im.jpg'), gain=1.02, pad=10, square=False, b[:, 2:] = b[:, 2:] * gain + pad # box wh * gain + pad xyxy = xywh2xyxy(b).long() clip_boxes(xyxy, im.shape) - crop = im[int(xyxy[0, 1]):int(xyxy[0, 3]), int(xyxy[0, 0]):int(xyxy[0, 2]), ::(1 if BGR else -1)] + crop = im[int(xyxy[0, 1]) : int(xyxy[0, 3]), int(xyxy[0, 0]) : int(xyxy[0, 2]), :: (1 if BGR else -1)] if save: file.parent.mkdir(parents=True, exist_ok=True) # make directory - f = str(increment_path(file).with_suffix('.jpg')) + f = str(increment_path(file).with_suffix(".jpg")) # cv2.imwrite(f, crop) # save BGR, https://github.com/ultralytics/yolov5/issues/7007 chroma subsampling issue Image.fromarray(crop[..., ::-1]).save(f, quality=95, subsampling=0) # save RGB return crop diff --git a/utils/segment/augmentations.py b/utils/segment/augmentations.py index f8154b834869..56636b65d93a 100644 --- a/utils/segment/augmentations.py +++ b/utils/segment/augmentations.py @@ -1,7 +1,5 @@ # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license -""" -Image augmentation functions -""" +"""Image augmentation functions.""" import math import random @@ -22,15 +20,9 @@ def mixup(im, labels, segments, im2, labels2, segments2): return im, labels, segments -def random_perspective(im, - targets=(), - segments=(), - degrees=10, - translate=.1, - scale=.1, - shear=10, - perspective=0.0, - border=(0, 0)): +def random_perspective( + im, targets=(), segments=(), degrees=10, translate=0.1, scale=0.1, shear=10, perspective=0.0, border=(0, 0) +): # torchvision.transforms.RandomAffine(degrees=(-10, 10), translate=(.1, .1), scale=(.9, 1.1), shear=(-10, 10)) # targets = [cls, xyxy] @@ -62,8 +54,8 @@ def random_perspective(im, # Translation T = np.eye(3) - T[0, 2] = (random.uniform(0.5 - translate, 0.5 + translate) * width) # x translation (pixels) - T[1, 2] = (random.uniform(0.5 - translate, 0.5 + translate) * height) # y translation (pixels) + T[0, 2] = random.uniform(0.5 - translate, 0.5 + translate) * width # x translation (pixels) + T[1, 2] = random.uniform(0.5 - translate, 0.5 + translate) * height # y translation (pixels) # Combined rotation matrix M = T @ S @ R @ P @ C # order of operations (right to left) is IMPORTANT @@ -89,7 +81,7 @@ def random_perspective(im, xy = np.ones((len(segment), 3)) xy[:, :2] = segment xy = xy @ M.T # transform - xy = (xy[:, :2] / xy[:, 2:3] if perspective else xy[:, :2]) # perspective rescale or affine + xy = xy[:, :2] / xy[:, 2:3] if perspective else xy[:, :2] # perspective rescale or affine # clip new[i] = segment2box(xy, width, height) diff --git a/utils/segment/dataloaders.py b/utils/segment/dataloaders.py index 5398617eef68..b0b3a7424216 100644 --- a/utils/segment/dataloaders.py +++ b/utils/segment/dataloaders.py @@ -1,7 +1,5 @@ # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license -""" -Dataloaders -""" +"""Dataloaders.""" import os import random @@ -17,30 +15,32 @@ from ..torch_utils import torch_distributed_zero_first from .augmentations import mixup, random_perspective -RANK = int(os.getenv('RANK', -1)) - - -def create_dataloader(path, - imgsz, - batch_size, - stride, - single_cls=False, - hyp=None, - augment=False, - cache=False, - pad=0.0, - rect=False, - rank=-1, - workers=8, - image_weights=False, - quad=False, - prefix='', - shuffle=False, - mask_downsample_ratio=1, - overlap_mask=False, - seed=0): +RANK = int(os.getenv("RANK", -1)) + + +def create_dataloader( + path, + imgsz, + batch_size, + stride, + single_cls=False, + hyp=None, + augment=False, + cache=False, + pad=0.0, + rect=False, + rank=-1, + workers=8, + image_weights=False, + quad=False, + prefix="", + shuffle=False, + mask_downsample_ratio=1, + overlap_mask=False, + seed=0, +): if rect and shuffle: - LOGGER.warning('WARNING ⚠️ --rect is incompatible with DataLoader shuffle, setting shuffle=False') + LOGGER.warning("WARNING ⚠️ --rect is incompatible with DataLoader shuffle, setting shuffle=False") shuffle = False with torch_distributed_zero_first(rank): # init dataset *.cache only once if DDP dataset = LoadImagesAndLabelsAndMasks( @@ -58,7 +58,8 @@ def create_dataloader(path, prefix=prefix, downsample_ratio=mask_downsample_ratio, overlap=overlap_mask, - rank=rank) + rank=rank, + ) batch_size = min(batch_size, len(dataset)) nd = torch.cuda.device_count() # number of CUDA devices @@ -81,7 +82,6 @@ def create_dataloader(path, class LoadImagesAndLabelsAndMasks(LoadImagesAndLabels): # for training/testing - def __init__( self, path, @@ -96,14 +96,29 @@ def __init__( stride=32, pad=0, min_items=0, - prefix='', + prefix="", downsample_ratio=1, overlap=False, rank=-1, seed=0, ): - super().__init__(path, img_size, batch_size, augment, hyp, rect, image_weights, cache_images, single_cls, - stride, pad, min_items, prefix, rank, seed) + super().__init__( + path, + img_size, + batch_size, + augment, + hyp, + rect, + image_weights, + cache_images, + single_cls, + stride, + pad, + min_items, + prefix, + rank, + seed, + ) self.downsample_ratio = downsample_ratio self.overlap = overlap @@ -111,7 +126,7 @@ def __getitem__(self, index): index = self.indices[index] # linear, shuffled, or image_weights hyp = self.hyp - mosaic = self.mosaic and random.random() < hyp['mosaic'] + mosaic = self.mosaic and random.random() < hyp["mosaic"] masks = [] if mosaic: # Load mosaic @@ -119,7 +134,7 @@ def __getitem__(self, index): shapes = None # MixUp augmentation - if random.random() < hyp['mixup']: + if random.random() < hyp["mixup"]: img, labels, segments = mixup(img, labels, segments, *self.load_mosaic(random.randint(0, self.n - 1))) else: @@ -147,30 +162,36 @@ def __getitem__(self, index): labels[:, 1:] = xywhn2xyxy(labels[:, 1:], ratio[0] * w, ratio[1] * h, padw=pad[0], padh=pad[1]) if self.augment: - img, labels, segments = random_perspective(img, - labels, - segments=segments, - degrees=hyp['degrees'], - translate=hyp['translate'], - scale=hyp['scale'], - shear=hyp['shear'], - perspective=hyp['perspective']) + img, labels, segments = random_perspective( + img, + labels, + segments=segments, + degrees=hyp["degrees"], + translate=hyp["translate"], + scale=hyp["scale"], + shear=hyp["shear"], + perspective=hyp["perspective"], + ) nl = len(labels) # number of labels if nl: labels[:, 1:5] = xyxy2xywhn(labels[:, 1:5], w=img.shape[1], h=img.shape[0], clip=True, eps=1e-3) if self.overlap: - masks, sorted_idx = polygons2masks_overlap(img.shape[:2], - segments, - downsample_ratio=self.downsample_ratio) + masks, sorted_idx = polygons2masks_overlap( + img.shape[:2], segments, downsample_ratio=self.downsample_ratio + ) masks = masks[None] # (640, 640) -> (1, 640, 640) labels = labels[sorted_idx] else: masks = polygons2masks(img.shape[:2], segments, color=1, downsample_ratio=self.downsample_ratio) - masks = (torch.from_numpy(masks) if len(masks) else torch.zeros(1 if self.overlap else nl, img.shape[0] // - self.downsample_ratio, img.shape[1] // - self.downsample_ratio)) + masks = ( + torch.from_numpy(masks) + if len(masks) + else torch.zeros( + 1 if self.overlap else nl, img.shape[0] // self.downsample_ratio, img.shape[1] // self.downsample_ratio + ) + ) # TODO: albumentations support if self.augment: # Albumentations @@ -180,17 +201,17 @@ def __getitem__(self, index): nl = len(labels) # update after albumentations # HSV color-space - augment_hsv(img, hgain=hyp['hsv_h'], sgain=hyp['hsv_s'], vgain=hyp['hsv_v']) + augment_hsv(img, hgain=hyp["hsv_h"], sgain=hyp["hsv_s"], vgain=hyp["hsv_v"]) # Flip up-down - if random.random() < hyp['flipud']: + if random.random() < hyp["flipud"]: img = np.flipud(img) if nl: labels[:, 2] = 1 - labels[:, 2] masks = torch.flip(masks, dims=[1]) # Flip left-right - if random.random() < hyp['fliplr']: + if random.random() < hyp["fliplr"]: img = np.fliplr(img) if nl: labels[:, 1] = 1 - labels[:, 1] @@ -254,16 +275,18 @@ def load_mosaic(self, index): # img4, labels4 = replicate(img4, labels4) # replicate # Augment - img4, labels4, segments4 = copy_paste(img4, labels4, segments4, p=self.hyp['copy_paste']) - img4, labels4, segments4 = random_perspective(img4, - labels4, - segments4, - degrees=self.hyp['degrees'], - translate=self.hyp['translate'], - scale=self.hyp['scale'], - shear=self.hyp['shear'], - perspective=self.hyp['perspective'], - border=self.mosaic_border) # border to remove + img4, labels4, segments4 = copy_paste(img4, labels4, segments4, p=self.hyp["copy_paste"]) + img4, labels4, segments4 = random_perspective( + img4, + labels4, + segments4, + degrees=self.hyp["degrees"], + translate=self.hyp["translate"], + scale=self.hyp["scale"], + shear=self.hyp["shear"], + perspective=self.hyp["perspective"], + border=self.mosaic_border, + ) # border to remove return img4, labels4, segments4 @staticmethod @@ -312,8 +335,10 @@ def polygons2masks(img_size, polygons, color, downsample_ratio=1): def polygons2masks_overlap(img_size, segments, downsample_ratio=1): """Return a (640, 640) overlap mask.""" - masks = np.zeros((img_size[0] // downsample_ratio, img_size[1] // downsample_ratio), - dtype=np.int32 if len(segments) > 255 else np.uint8) + masks = np.zeros( + (img_size[0] // downsample_ratio, img_size[1] // downsample_ratio), + dtype=np.int32 if len(segments) > 255 else np.uint8, + ) areas = [] ms = [] for si in range(len(segments)): diff --git a/utils/segment/general.py b/utils/segment/general.py index f1b2f1dd120f..8cbc745b4a90 100644 --- a/utils/segment/general.py +++ b/utils/segment/general.py @@ -6,8 +6,7 @@ def crop_mask(masks, boxes): """ - "Crop" predicted masks by zeroing out everything not in the predicted bbox. - Vectorized by Chong (thanks Chong). + "Crop" predicted masks by zeroing out everything not in the predicted bbox. Vectorized by Chong (thanks Chong). Args: - masks should be a size [n, h, w] tensor of masks @@ -35,7 +34,7 @@ def process_mask_upsample(protos, masks_in, bboxes, shape): c, mh, mw = protos.shape # CHW masks = (masks_in @ protos.float().view(c, -1)).sigmoid().view(-1, mh, mw) - masks = F.interpolate(masks[None], shape, mode='bilinear', align_corners=False)[0] # CHW + masks = F.interpolate(masks[None], shape, mode="bilinear", align_corners=False)[0] # CHW masks = crop_mask(masks, bboxes) # CHW return masks.gt_(0.5) @@ -63,7 +62,7 @@ def process_mask(protos, masks_in, bboxes, shape, upsample=False): masks = crop_mask(masks, downsampled_bboxes) # CHW if upsample: - masks = F.interpolate(masks[None], shape, mode='bilinear', align_corners=False)[0] # CHW + masks = F.interpolate(masks[None], shape, mode="bilinear", align_corners=False)[0] # CHW return masks.gt_(0.5) @@ -85,7 +84,7 @@ def process_mask_native(protos, masks_in, bboxes, shape): bottom, right = int(mh - pad[1]), int(mw - pad[0]) masks = masks[:, top:bottom, left:right] - masks = F.interpolate(masks[None], shape, mode='bilinear', align_corners=False)[0] # CHW + masks = F.interpolate(masks[None], shape, mode="bilinear", align_corners=False)[0] # CHW masks = crop_mask(masks, bboxes) # CHW return masks.gt_(0.5) @@ -144,17 +143,17 @@ def masks_iou(mask1, mask2, eps=1e-7): return intersection / (union + eps) -def masks2segments(masks, strategy='largest'): +def masks2segments(masks, strategy="largest"): # Convert masks(n,160,160) into segments(n,xy) segments = [] - for x in masks.int().cpu().numpy().astype('uint8'): + for x in masks.int().cpu().numpy().astype("uint8"): c = cv2.findContours(x, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[0] if c: - if strategy == 'concat': # concatenate all segments + if strategy == "concat": # concatenate all segments c = np.concatenate([x.reshape(-1, 2) for x in c]) - elif strategy == 'largest': # select largest segment + elif strategy == "largest": # select largest segment c = np.array(c[np.array([len(x) for x in c]).argmax()]).reshape(-1, 2) else: c = np.zeros((0, 2)) # no segments found - segments.append(c.astype('float32')) + segments.append(c.astype("float32")) return segments diff --git a/utils/segment/loss.py b/utils/segment/loss.py index caeff3cad586..1e007271fa9c 100644 --- a/utils/segment/loss.py +++ b/utils/segment/loss.py @@ -18,14 +18,14 @@ def __init__(self, model, autobalance=False, overlap=False): h = model.hyp # hyperparameters # Define criteria - BCEcls = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h['cls_pw']], device=device)) - BCEobj = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h['obj_pw']], device=device)) + BCEcls = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h["cls_pw"]], device=device)) + BCEobj = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h["obj_pw"]], device=device)) # Class label smoothing https://arxiv.org/pdf/1902.04103.pdf eqn 3 - self.cp, self.cn = smooth_BCE(eps=h.get('label_smoothing', 0.0)) # positive, negative BCE targets + self.cp, self.cn = smooth_BCE(eps=h.get("label_smoothing", 0.0)) # positive, negative BCE targets # Focal loss - g = h['fl_gamma'] # focal loss gamma + g = h["fl_gamma"] # focal loss gamma if g > 0: BCEcls, BCEobj = FocalLoss(BCEcls, g), FocalLoss(BCEobj, g) @@ -82,7 +82,7 @@ def __call__(self, preds, targets, masks): # predictions, targets, model # Mask regression if tuple(masks.shape[-2:]) != (mask_h, mask_w): # downsample - masks = F.interpolate(masks[None], (mask_h, mask_w), mode='nearest')[0] + masks = F.interpolate(masks[None], (mask_h, mask_w), mode="nearest")[0] marea = xywhn[i][:, 2:].prod(1) # mask width, height normalized mxyxy = xywh2xyxy(xywhn[i] * torch.tensor([mask_w, mask_h, mask_w, mask_h], device=self.device)) for bi in b.unique(): @@ -100,10 +100,10 @@ def __call__(self, preds, targets, masks): # predictions, targets, model if self.autobalance: self.balance = [x / self.balance[self.ssi] for x in self.balance] - lbox *= self.hyp['box'] - lobj *= self.hyp['obj'] - lcls *= self.hyp['cls'] - lseg *= self.hyp['box'] / bs + lbox *= self.hyp["box"] + lobj *= self.hyp["obj"] + lcls *= self.hyp["cls"] + lseg *= self.hyp["box"] / bs loss = lbox + lobj + lcls + lseg return loss * bs, torch.cat((lbox, lseg, lobj, lcls)).detach() @@ -111,7 +111,7 @@ def __call__(self, preds, targets, masks): # predictions, targets, model def single_mask_loss(self, gt_mask, pred, proto, xyxy, area): # Mask loss for one image pred_mask = (pred @ proto.view(self.nm, -1)).view(-1, *proto.shape[1:]) # (n,32) @ (32,80,80) -> (n,80,80) - loss = F.binary_cross_entropy_with_logits(pred_mask, gt_mask, reduction='none') + loss = F.binary_cross_entropy_with_logits(pred_mask, gt_mask, reduction="none") return (crop_mask(loss, xyxy).mean(dim=(1, 2)) / area).mean() def build_targets(self, p, targets): @@ -132,16 +132,20 @@ def build_targets(self, p, targets): targets = torch.cat((targets.repeat(na, 1, 1), ai[..., None], ti[..., None]), 2) # append anchor indices g = 0.5 # bias - off = torch.tensor( - [ - [0, 0], - [1, 0], - [0, 1], - [-1, 0], - [0, -1], # j,k,l,m - # [1, 1], [1, -1], [-1, 1], [-1, -1], # jk,jm,lk,lm - ], - device=self.device).float() * g # offsets + off = ( + torch.tensor( + [ + [0, 0], + [1, 0], + [0, 1], + [-1, 0], + [0, -1], # j,k,l,m + # [1, 1], [1, -1], [-1, 1], [-1, -1], # jk,jm,lk,lm + ], + device=self.device, + ).float() + * g + ) # offsets for i in range(self.nl): anchors, shape = self.anchors[i], p[i].shape @@ -152,7 +156,7 @@ def build_targets(self, p, targets): if nt: # Matches r = t[..., 4:6] / anchors[:, None] # wh ratio - j = torch.max(r, 1 / r).max(2)[0] < self.hyp['anchor_t'] # compare + j = torch.max(r, 1 / r).max(2)[0] < self.hyp["anchor_t"] # compare # j = wh_iou(anchors, t[:, 4:6]) > model.hyp['iou_t'] # iou(3,n)=wh_iou(anchors(3,2), gwh(n,2)) t = t[j] # filter diff --git a/utils/segment/metrics.py b/utils/segment/metrics.py index 787961bee1bf..222a749b5986 100644 --- a/utils/segment/metrics.py +++ b/utils/segment/metrics.py @@ -1,7 +1,5 @@ # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license -""" -Model validation metrics -""" +"""Model validation metrics.""" import numpy as np @@ -15,14 +13,14 @@ def fitness(x): def ap_per_class_box_and_mask( - tp_m, - tp_b, - conf, - pred_cls, - target_cls, - plot=False, - save_dir='.', - names=(), + tp_m, + tp_b, + conf, + pred_cls, + target_cls, + plot=False, + save_dir=".", + names=(), ): """ Args: @@ -30,41 +28,33 @@ def ap_per_class_box_and_mask( tp_m: tp of masks. other arguments see `func: ap_per_class`. """ - results_boxes = ap_per_class(tp_b, - conf, - pred_cls, - target_cls, - plot=plot, - save_dir=save_dir, - names=names, - prefix='Box')[2:] - results_masks = ap_per_class(tp_m, - conf, - pred_cls, - target_cls, - plot=plot, - save_dir=save_dir, - names=names, - prefix='Mask')[2:] + results_boxes = ap_per_class( + tp_b, conf, pred_cls, target_cls, plot=plot, save_dir=save_dir, names=names, prefix="Box" + )[2:] + results_masks = ap_per_class( + tp_m, conf, pred_cls, target_cls, plot=plot, save_dir=save_dir, names=names, prefix="Mask" + )[2:] results = { - 'boxes': { - 'p': results_boxes[0], - 'r': results_boxes[1], - 'ap': results_boxes[3], - 'f1': results_boxes[2], - 'ap_class': results_boxes[4]}, - 'masks': { - 'p': results_masks[0], - 'r': results_masks[1], - 'ap': results_masks[3], - 'f1': results_masks[2], - 'ap_class': results_masks[4]}} + "boxes": { + "p": results_boxes[0], + "r": results_boxes[1], + "ap": results_boxes[3], + "f1": results_boxes[2], + "ap_class": results_boxes[4], + }, + "masks": { + "p": results_masks[0], + "r": results_masks[1], + "ap": results_masks[3], + "f1": results_masks[2], + "ap_class": results_masks[4], + }, + } return results class Metric: - def __init__(self) -> None: self.p = [] # (nc, ) self.r = [] # (nc, ) @@ -74,7 +64,9 @@ def __init__(self) -> None: @property def ap50(self): - """AP@0.5 of all classes. + """ + AP@0.5 of all classes. + Return: (nc, ) or []. """ @@ -90,7 +82,9 @@ def ap(self): @property def mp(self): - """mean precision of all classes. + """ + Mean precision of all classes. + Return: float. """ @@ -98,7 +92,9 @@ def mp(self): @property def mr(self): - """mean recall of all classes. + """ + Mean recall of all classes. + Return: float. """ @@ -106,7 +102,9 @@ def mr(self): @property def map50(self): - """Mean AP@0.5 of all classes. + """ + Mean AP@0.5 of all classes. + Return: float. """ @@ -114,18 +112,20 @@ def map50(self): @property def map(self): - """Mean AP@0.5:0.95 of all classes. + """ + Mean AP@0.5:0.95 of all classes. + Return: float. """ return self.all_ap.mean() if len(self.all_ap) else 0.0 def mean_results(self): - """Mean of results, return mp, mr, map50, map""" + """Mean of results, return mp, mr, map50, map.""" return (self.mp, self.mr, self.map50, self.map) def class_result(self, i): - """class-aware result, return p[i], r[i], ap50[i], ap[i]""" + """Class-aware result, return p[i], r[i], ap50[i], ap[i]""" return (self.p[i], self.r[i], self.ap50[i], self.ap[i]) def get_maps(self, nc): @@ -159,8 +159,8 @@ def update(self, results): Args: results: Dict{'boxes': Dict{}, 'masks': Dict{}} """ - self.metric_box.update(list(results['boxes'].values())) - self.metric_mask.update(list(results['masks'].values())) + self.metric_box.update(list(results["boxes"].values())) + self.metric_mask.update(list(results["masks"].values())) def mean_results(self): return self.metric_box.mean_results() + self.metric_mask.mean_results() @@ -178,33 +178,35 @@ def ap_class_index(self): KEYS = [ - 'train/box_loss', - 'train/seg_loss', # train loss - 'train/obj_loss', - 'train/cls_loss', - 'metrics/precision(B)', - 'metrics/recall(B)', - 'metrics/mAP_0.5(B)', - 'metrics/mAP_0.5:0.95(B)', # metrics - 'metrics/precision(M)', - 'metrics/recall(M)', - 'metrics/mAP_0.5(M)', - 'metrics/mAP_0.5:0.95(M)', # metrics - 'val/box_loss', - 'val/seg_loss', # val loss - 'val/obj_loss', - 'val/cls_loss', - 'x/lr0', - 'x/lr1', - 'x/lr2', ] + "train/box_loss", + "train/seg_loss", # train loss + "train/obj_loss", + "train/cls_loss", + "metrics/precision(B)", + "metrics/recall(B)", + "metrics/mAP_0.5(B)", + "metrics/mAP_0.5:0.95(B)", # metrics + "metrics/precision(M)", + "metrics/recall(M)", + "metrics/mAP_0.5(M)", + "metrics/mAP_0.5:0.95(M)", # metrics + "val/box_loss", + "val/seg_loss", # val loss + "val/obj_loss", + "val/cls_loss", + "x/lr0", + "x/lr1", + "x/lr2", +] BEST_KEYS = [ - 'best/epoch', - 'best/precision(B)', - 'best/recall(B)', - 'best/mAP_0.5(B)', - 'best/mAP_0.5:0.95(B)', - 'best/precision(M)', - 'best/recall(M)', - 'best/mAP_0.5(M)', - 'best/mAP_0.5:0.95(M)', ] + "best/epoch", + "best/precision(B)", + "best/recall(B)", + "best/mAP_0.5(B)", + "best/mAP_0.5:0.95(B)", + "best/precision(M)", + "best/recall(M)", + "best/mAP_0.5(M)", + "best/mAP_0.5:0.95(M)", +] diff --git a/utils/segment/plots.py b/utils/segment/plots.py index f9938cd1b06a..0e30c61be66f 100644 --- a/utils/segment/plots.py +++ b/utils/segment/plots.py @@ -14,7 +14,7 @@ @threaded -def plot_images_and_masks(images, targets, masks, paths=None, fname='images.jpg', names=None): +def plot_images_and_masks(images, targets, masks, paths=None, fname="images.jpg", names=None): # Plot image grid with labels if isinstance(images, torch.Tensor): images = images.cpu().float().numpy() @@ -27,7 +27,7 @@ def plot_images_and_masks(images, targets, masks, paths=None, fname='images.jpg' max_subplots = 16 # max image subplots, i.e. 4x4 bs, _, h, w = images.shape # batch size, _, height, width bs = min(bs, max_subplots) # limit plot images - ns = np.ceil(bs ** 0.5) # number of subplots (square) + ns = np.ceil(bs**0.5) # number of subplots (square) if np.max(images[0]) <= 1: images *= 255 # de-normalise (optional) @@ -38,7 +38,7 @@ def plot_images_and_masks(images, targets, masks, paths=None, fname='images.jpg' break x, y = int(w * (i // ns)), int(h * (i % ns)) # block origin im = im.transpose(1, 2, 0) - mosaic[y:y + h, x:x + w, :] = im + mosaic[y : y + h, x : x + w, :] = im # Resize (optional) scale = max_size / ns / max(h, w) @@ -60,7 +60,7 @@ def plot_images_and_masks(images, targets, masks, paths=None, fname='images.jpg' ti = targets[idx] # image targets boxes = xywh2xyxy(ti[:, 2:6]).T - classes = ti[:, 1].astype('int') + classes = ti[:, 1].astype("int") labels = ti.shape[1] == 6 # labels if no conf column conf = None if labels else ti[:, 6] # check for confidence presence (label vs pred) @@ -77,7 +77,7 @@ def plot_images_and_masks(images, targets, masks, paths=None, fname='images.jpg' color = colors(cls) cls = names[cls] if names else cls if labels or conf[j] > 0.25: # 0.25 conf thresh - label = f'{cls}' if labels else f'{cls} {conf[j]:.1f}' + label = f"{cls}" if labels else f"{cls} {conf[j]:.1f}" annotator.box_label(box, label, color=color) # Plot masks @@ -103,41 +103,44 @@ def plot_images_and_masks(images, targets, masks, paths=None, fname='images.jpg' else: mask = image_masks[j].astype(bool) with contextlib.suppress(Exception): - im[y:y + h, x:x + w, :][mask] = im[y:y + h, x:x + w, :][mask] * 0.4 + np.array(color) * 0.6 + im[y : y + h, x : x + w, :][mask] = ( + im[y : y + h, x : x + w, :][mask] * 0.4 + np.array(color) * 0.6 + ) annotator.fromarray(im) annotator.im.save(fname) # save -def plot_results_with_masks(file='path/to/results.csv', dir='', best=True): +def plot_results_with_masks(file="path/to/results.csv", dir="", best=True): # Plot training results.csv. Usage: from utils.plots import *; plot_results('path/to/results.csv') save_dir = Path(file).parent if file else Path(dir) fig, ax = plt.subplots(2, 8, figsize=(18, 6), tight_layout=True) ax = ax.ravel() - files = list(save_dir.glob('results*.csv')) - assert len(files), f'No results.csv files found in {save_dir.resolve()}, nothing to plot.' + files = list(save_dir.glob("results*.csv")) + assert len(files), f"No results.csv files found in {save_dir.resolve()}, nothing to plot." for f in files: try: data = pd.read_csv(f) - index = np.argmax(0.9 * data.values[:, 8] + 0.1 * data.values[:, 7] + 0.9 * data.values[:, 12] + - 0.1 * data.values[:, 11]) + index = np.argmax( + 0.9 * data.values[:, 8] + 0.1 * data.values[:, 7] + 0.9 * data.values[:, 12] + 0.1 * data.values[:, 11] + ) s = [x.strip() for x in data.columns] x = data.values[:, 0] for i, j in enumerate([1, 2, 3, 4, 5, 6, 9, 10, 13, 14, 15, 16, 7, 8, 11, 12]): y = data.values[:, j] # y[y == 0] = np.nan # don't show zero values - ax[i].plot(x, y, marker='.', label=f.stem, linewidth=2, markersize=2) + ax[i].plot(x, y, marker=".", label=f.stem, linewidth=2, markersize=2) if best: # best - ax[i].scatter(index, y[index], color='r', label=f'best:{index}', marker='*', linewidth=3) - ax[i].set_title(s[j] + f'\n{round(y[index], 5)}') + ax[i].scatter(index, y[index], color="r", label=f"best:{index}", marker="*", linewidth=3) + ax[i].set_title(s[j] + f"\n{round(y[index], 5)}") else: # last - ax[i].scatter(x[-1], y[-1], color='r', label='last', marker='*', linewidth=3) - ax[i].set_title(s[j] + f'\n{round(y[-1], 5)}') + ax[i].scatter(x[-1], y[-1], color="r", label="last", marker="*", linewidth=3) + ax[i].set_title(s[j] + f"\n{round(y[-1], 5)}") # if j in [8, 9, 10]: # share train and val loss y axes # ax[i].get_shared_y_axes().join(ax[i], ax[i - 5]) except Exception as e: - print(f'Warning: Plotting error for {f}: {e}') + print(f"Warning: Plotting error for {f}: {e}") ax[1].legend() - fig.savefig(save_dir / 'results.png', dpi=200) + fig.savefig(save_dir / "results.png", dpi=200) plt.close() diff --git a/utils/torch_utils.py b/utils/torch_utils.py index 13a356f3238c..6bc4b4c7fd04 100644 --- a/utils/torch_utils.py +++ b/utils/torch_utils.py @@ -1,7 +1,5 @@ # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license -""" -PyTorch utils -""" +"""PyTorch utils.""" import math import os @@ -21,9 +19,9 @@ from utils.general import LOGGER, check_version, colorstr, file_date, git_describe -LOCAL_RANK = int(os.getenv('LOCAL_RANK', -1)) # https://pytorch.org/docs/stable/elastic/run.html -RANK = int(os.getenv('RANK', -1)) -WORLD_SIZE = int(os.getenv('WORLD_SIZE', 1)) +LOCAL_RANK = int(os.getenv("LOCAL_RANK", -1)) # https://pytorch.org/docs/stable/elastic/run.html +RANK = int(os.getenv("RANK", -1)) +WORLD_SIZE = int(os.getenv("WORLD_SIZE", 1)) try: import thop # for FLOPs computation @@ -31,11 +29,11 @@ thop = None # Suppress PyTorch warnings -warnings.filterwarnings('ignore', message='User provided device_type of \'cuda\', but CUDA is not available. Disabling') -warnings.filterwarnings('ignore', category=UserWarning) +warnings.filterwarnings("ignore", message="User provided device_type of 'cuda', but CUDA is not available. Disabling") +warnings.filterwarnings("ignore", category=UserWarning) -def smart_inference_mode(torch_1_9=check_version(torch.__version__, '1.9.0')): +def smart_inference_mode(torch_1_9=check_version(torch.__version__, "1.9.0")): # Applies torch.inference_mode() decorator if torch>=1.9.0 else torch.no_grad() decorator def decorate(fn): return (torch.inference_mode if torch_1_9 else torch.no_grad)()(fn) @@ -45,19 +43,20 @@ def decorate(fn): def smartCrossEntropyLoss(label_smoothing=0.0): # Returns nn.CrossEntropyLoss with label smoothing enabled for torch>=1.10.0 - if check_version(torch.__version__, '1.10.0'): + if check_version(torch.__version__, "1.10.0"): return nn.CrossEntropyLoss(label_smoothing=label_smoothing) if label_smoothing > 0: - LOGGER.warning(f'WARNING ⚠️ label smoothing {label_smoothing} requires torch>=1.10.0') + LOGGER.warning(f"WARNING ⚠️ label smoothing {label_smoothing} requires torch>=1.10.0") return nn.CrossEntropyLoss() def smart_DDP(model): # Model DDP creation with checks - assert not check_version(torch.__version__, '1.12.0', pinned=True), \ - 'torch==1.12.0 torchvision==0.13.0 DDP training is not supported due to a known issue. ' \ - 'Please upgrade or downgrade torch to use DDP. See https://github.com/ultralytics/yolov5/issues/8395' - if check_version(torch.__version__, '1.11.0'): + assert not check_version(torch.__version__, "1.12.0", pinned=True), ( + "torch==1.12.0 torchvision==0.13.0 DDP training is not supported due to a known issue. " + "Please upgrade or downgrade torch to use DDP. See https://github.com/ultralytics/yolov5/issues/8395" + ) + if check_version(torch.__version__, "1.11.0"): return DDP(model, device_ids=[LOCAL_RANK], output_device=LOCAL_RANK, static_graph=True) else: return DDP(model, device_ids=[LOCAL_RANK], output_device=LOCAL_RANK) @@ -66,7 +65,8 @@ def smart_DDP(model): def reshape_classifier_output(model, n=1000): # Update a TorchVision classification model to class count 'n' if required from models.common import Classify - name, m = list((model.model if hasattr(model, 'model') else model).named_children())[-1] # last module + + name, m = list((model.model if hasattr(model, "model") else model).named_children())[-1] # last module if isinstance(m, Classify): # YOLOv5 Classify() head if m.linear.out_features != n: m.linear = nn.Linear(m.linear.in_features, n) @@ -97,43 +97,44 @@ def torch_distributed_zero_first(local_rank: int): def device_count(): # Returns number of CUDA devices available. Safe version of torch.cuda.device_count(). Supports Linux and Windows - assert platform.system() in ('Linux', 'Windows'), 'device_count() only supported on Linux or Windows' + assert platform.system() in ("Linux", "Windows"), "device_count() only supported on Linux or Windows" try: - cmd = 'nvidia-smi -L | wc -l' if platform.system() == 'Linux' else 'nvidia-smi -L | find /c /v ""' # Windows + cmd = "nvidia-smi -L | wc -l" if platform.system() == "Linux" else 'nvidia-smi -L | find /c /v ""' # Windows return int(subprocess.run(cmd, shell=True, capture_output=True, check=True).stdout.decode().split()[-1]) except Exception: return 0 -def select_device(device='', batch_size=0, newline=True): +def select_device(device="", batch_size=0, newline=True): # device = None or 'cpu' or 0 or '0' or '0,1,2,3' - s = f'YOLOv5 🚀 {git_describe() or file_date()} Python-{platform.python_version()} torch-{torch.__version__} ' - device = str(device).strip().lower().replace('cuda:', '').replace('none', '') # to string, 'cuda:0' to '0' - cpu = device == 'cpu' - mps = device == 'mps' # Apple Metal Performance Shaders (MPS) + s = f"YOLOv5 🚀 {git_describe() or file_date()} Python-{platform.python_version()} torch-{torch.__version__} " + device = str(device).strip().lower().replace("cuda:", "").replace("none", "") # to string, 'cuda:0' to '0' + cpu = device == "cpu" + mps = device == "mps" # Apple Metal Performance Shaders (MPS) if cpu or mps: - os.environ['CUDA_VISIBLE_DEVICES'] = '-1' # force torch.cuda.is_available() = False + os.environ["CUDA_VISIBLE_DEVICES"] = "-1" # force torch.cuda.is_available() = False elif device: # non-cpu device requested - os.environ['CUDA_VISIBLE_DEVICES'] = device # set environment variable - must be before assert is_available() - assert torch.cuda.is_available() and torch.cuda.device_count() >= len(device.replace(',', '')), \ - f"Invalid CUDA '--device {device}' requested, use '--device cpu' or pass valid CUDA device(s)" + os.environ["CUDA_VISIBLE_DEVICES"] = device # set environment variable - must be before assert is_available() + assert torch.cuda.is_available() and torch.cuda.device_count() >= len( + device.replace(",", "") + ), f"Invalid CUDA '--device {device}' requested, use '--device cpu' or pass valid CUDA device(s)" if not cpu and not mps and torch.cuda.is_available(): # prefer GPU if available - devices = device.split(',') if device else '0' # range(torch.cuda.device_count()) # i.e. 0,1,6,7 + devices = device.split(",") if device else "0" # range(torch.cuda.device_count()) # i.e. 0,1,6,7 n = len(devices) # device count if n > 1 and batch_size > 0: # check batch_size is divisible by device_count - assert batch_size % n == 0, f'batch-size {batch_size} not multiple of GPU count {n}' - space = ' ' * (len(s) + 1) + assert batch_size % n == 0, f"batch-size {batch_size} not multiple of GPU count {n}" + space = " " * (len(s) + 1) for i, d in enumerate(devices): p = torch.cuda.get_device_properties(i) s += f"{'' if i == 0 else space}CUDA:{d} ({p.name}, {p.total_memory / (1 << 20):.0f}MiB)\n" # bytes to MB - arg = 'cuda:0' - elif mps and getattr(torch, 'has_mps', False) and torch.backends.mps.is_available(): # prefer MPS if available - s += 'MPS\n' - arg = 'mps' + arg = "cuda:0" + elif mps and getattr(torch, "has_mps", False) and torch.backends.mps.is_available(): # prefer MPS if available + s += "MPS\n" + arg = "mps" else: # revert to CPU - s += 'CPU\n' - arg = 'cpu' + s += "CPU\n" + arg = "cpu" if not newline: s = s.rstrip() @@ -149,7 +150,7 @@ def time_sync(): def profile(input, ops, n=10, device=None): - """ YOLOv5 speed/memory/FLOPs profiler + """YOLOv5 speed/memory/FLOPs profiler Usage: input = torch.randn(16, 3, 640, 640) m1 = lambda x: x * torch.sigmoid(x) @@ -159,18 +160,20 @@ def profile(input, ops, n=10, device=None): results = [] if not isinstance(device, torch.device): device = select_device(device) - print(f"{'Params':>12s}{'GFLOPs':>12s}{'GPU_mem (GB)':>14s}{'forward (ms)':>14s}{'backward (ms)':>14s}" - f"{'input':>24s}{'output':>24s}") + print( + f"{'Params':>12s}{'GFLOPs':>12s}{'GPU_mem (GB)':>14s}{'forward (ms)':>14s}{'backward (ms)':>14s}" + f"{'input':>24s}{'output':>24s}" + ) for x in input if isinstance(input, list) else [input]: x = x.to(device) x.requires_grad = True for m in ops if isinstance(ops, list) else [ops]: - m = m.to(device) if hasattr(m, 'to') else m # device - m = m.half() if hasattr(m, 'half') and isinstance(x, torch.Tensor) and x.dtype is torch.float16 else m + m = m.to(device) if hasattr(m, "to") else m # device + m = m.half() if hasattr(m, "half") and isinstance(x, torch.Tensor) and x.dtype is torch.float16 else m tf, tb, t = 0, 0, [0, 0, 0] # dt forward, backward try: - flops = thop.profile(m, inputs=(x, ), verbose=False)[0] / 1E9 * 2 # GFLOPs + flops = thop.profile(m, inputs=(x,), verbose=False)[0] / 1e9 * 2 # GFLOPs except Exception: flops = 0 @@ -184,13 +187,13 @@ def profile(input, ops, n=10, device=None): t[2] = time_sync() except Exception: # no backward method # print(e) # for debug - t[2] = float('nan') + t[2] = float("nan") tf += (t[1] - t[0]) * 1000 / n # ms per op forward tb += (t[2] - t[1]) * 1000 / n # ms per op backward - mem = torch.cuda.memory_reserved() / 1E9 if torch.cuda.is_available() else 0 # (GB) - s_in, s_out = (tuple(x.shape) if isinstance(x, torch.Tensor) else 'list' for x in (x, y)) # shapes + mem = torch.cuda.memory_reserved() / 1e9 if torch.cuda.is_available() else 0 # (GB) + s_in, s_out = (tuple(x.shape) if isinstance(x, torch.Tensor) else "list" for x in (x, y)) # shapes p = sum(x.numel() for x in m.parameters()) if isinstance(m, nn.Module) else 0 # parameters - print(f'{p:12}{flops:12.4g}{mem:>14.3f}{tf:14.4g}{tb:14.4g}{str(s_in):>24s}{str(s_out):>24s}') + print(f"{p:12}{flops:12.4g}{mem:>14.3f}{tf:14.4g}{tb:14.4g}{str(s_in):>24s}{str(s_out):>24s}") results.append([p, flops, mem, tf, tb, s_in, s_out]) except Exception as e: print(e) @@ -238,23 +241,30 @@ def sparsity(model): def prune(model, amount=0.3): # Prune model to requested global sparsity import torch.nn.utils.prune as prune + for name, m in model.named_modules(): if isinstance(m, nn.Conv2d): - prune.l1_unstructured(m, name='weight', amount=amount) # prune - prune.remove(m, 'weight') # make permanent - LOGGER.info(f'Model pruned to {sparsity(model):.3g} global sparsity') + prune.l1_unstructured(m, name="weight", amount=amount) # prune + prune.remove(m, "weight") # make permanent + LOGGER.info(f"Model pruned to {sparsity(model):.3g} global sparsity") def fuse_conv_and_bn(conv, bn): # Fuse Conv2d() and BatchNorm2d() layers https://tehnokv.com/posts/fusing-batchnorm-and-conv/ - fusedconv = nn.Conv2d(conv.in_channels, - conv.out_channels, - kernel_size=conv.kernel_size, - stride=conv.stride, - padding=conv.padding, - dilation=conv.dilation, - groups=conv.groups, - bias=True).requires_grad_(False).to(conv.weight.device) + fusedconv = ( + nn.Conv2d( + conv.in_channels, + conv.out_channels, + kernel_size=conv.kernel_size, + stride=conv.stride, + padding=conv.padding, + dilation=conv.dilation, + groups=conv.groups, + bias=True, + ) + .requires_grad_(False) + .to(conv.weight.device) + ) # Prepare filters w_conv = conv.weight.clone().view(conv.out_channels, -1) @@ -276,22 +286,24 @@ def model_info(model, verbose=False, imgsz=640): if verbose: print(f"{'layer':>5} {'name':>40} {'gradient':>9} {'parameters':>12} {'shape':>20} {'mu':>10} {'sigma':>10}") for i, (name, p) in enumerate(model.named_parameters()): - name = name.replace('module_list.', '') - print('%5g %40s %9s %12g %20s %10.3g %10.3g' % - (i, name, p.requires_grad, p.numel(), list(p.shape), p.mean(), p.std())) + name = name.replace("module_list.", "") + print( + "%5g %40s %9s %12g %20s %10.3g %10.3g" + % (i, name, p.requires_grad, p.numel(), list(p.shape), p.mean(), p.std()) + ) try: # FLOPs p = next(model.parameters()) - stride = max(int(model.stride.max()), 32) if hasattr(model, 'stride') else 32 # max stride + stride = max(int(model.stride.max()), 32) if hasattr(model, "stride") else 32 # max stride im = torch.empty((1, p.shape[1], stride, stride), device=p.device) # input image in BCHW format - flops = thop.profile(deepcopy(model), inputs=(im, ), verbose=False)[0] / 1E9 * 2 # stride GFLOPs + flops = thop.profile(deepcopy(model), inputs=(im,), verbose=False)[0] / 1e9 * 2 # stride GFLOPs imgsz = imgsz if isinstance(imgsz, list) else [imgsz, imgsz] # expand if int/float - fs = f', {flops * imgsz[0] / stride * imgsz[1] / stride:.1f} GFLOPs' # 640x640 GFLOPs + fs = f", {flops * imgsz[0] / stride * imgsz[1] / stride:.1f} GFLOPs" # 640x640 GFLOPs except Exception: - fs = '' + fs = "" - name = Path(model.yaml_file).stem.replace('yolov5', 'YOLOv5') if hasattr(model, 'yaml_file') else 'Model' - LOGGER.info(f'{name} summary: {len(list(model.modules()))} layers, {n_p} parameters, {n_g} gradients{fs}') + name = Path(model.yaml_file).stem.replace("yolov5", "YOLOv5") if hasattr(model, "yaml_file") else "Model" + LOGGER.info(f"{name} summary: {len(list(model.modules()))} layers, {n_p} parameters, {n_g} gradients{fs}") def scale_img(img, ratio=1.0, same_shape=False, gs=32): # img(16,3,256,416) @@ -300,7 +312,7 @@ def scale_img(img, ratio=1.0, same_shape=False, gs=32): # img(16,3,256,416) return img h, w = img.shape[2:] s = (int(h * ratio), int(w * ratio)) # new size - img = F.interpolate(img, size=s, mode='bilinear', align_corners=False) # resize + img = F.interpolate(img, size=s, mode="bilinear", align_corners=False) # resize if not same_shape: # pad/crop img h, w = (math.ceil(x * ratio / gs) * gs for x in (h, w)) return F.pad(img, [0, w - s[1], 0, h - s[0]], value=0.447) # value = imagenet mean @@ -309,72 +321,76 @@ def scale_img(img, ratio=1.0, same_shape=False, gs=32): # img(16,3,256,416) def copy_attr(a, b, include=(), exclude=()): # Copy attributes from b to a, options to only include [...] and to exclude [...] for k, v in b.__dict__.items(): - if (len(include) and k not in include) or k.startswith('_') or k in exclude: + if (len(include) and k not in include) or k.startswith("_") or k in exclude: continue else: setattr(a, k, v) -def smart_optimizer(model, name='Adam', lr=0.001, momentum=0.9, decay=1e-5): +def smart_optimizer(model, name="Adam", lr=0.001, momentum=0.9, decay=1e-5): # YOLOv5 3-param group optimizer: 0) weights with decay, 1) weights no decay, 2) biases no decay g = [], [], [] # optimizer parameter groups - bn = tuple(v for k, v in nn.__dict__.items() if 'Norm' in k) # normalization layers, i.e. BatchNorm2d() + bn = tuple(v for k, v in nn.__dict__.items() if "Norm" in k) # normalization layers, i.e. BatchNorm2d() for v in model.modules(): for p_name, p in v.named_parameters(recurse=0): - if p_name == 'bias': # bias (no decay) + if p_name == "bias": # bias (no decay) g[2].append(p) - elif p_name == 'weight' and isinstance(v, bn): # weight (no decay) + elif p_name == "weight" and isinstance(v, bn): # weight (no decay) g[1].append(p) else: g[0].append(p) # weight (with decay) - if name == 'Adam': + if name == "Adam": optimizer = torch.optim.Adam(g[2], lr=lr, betas=(momentum, 0.999)) # adjust beta1 to momentum - elif name == 'AdamW': + elif name == "AdamW": optimizer = torch.optim.AdamW(g[2], lr=lr, betas=(momentum, 0.999), weight_decay=0.0) - elif name == 'RMSProp': + elif name == "RMSProp": optimizer = torch.optim.RMSprop(g[2], lr=lr, momentum=momentum) - elif name == 'SGD': + elif name == "SGD": optimizer = torch.optim.SGD(g[2], lr=lr, momentum=momentum, nesterov=True) else: - raise NotImplementedError(f'Optimizer {name} not implemented.') - - optimizer.add_param_group({'params': g[0], 'weight_decay': decay}) # add g0 with weight_decay - optimizer.add_param_group({'params': g[1], 'weight_decay': 0.0}) # add g1 (BatchNorm2d weights) - LOGGER.info(f"{colorstr('optimizer:')} {type(optimizer).__name__}(lr={lr}) with parameter groups " - f'{len(g[1])} weight(decay=0.0), {len(g[0])} weight(decay={decay}), {len(g[2])} bias') + raise NotImplementedError(f"Optimizer {name} not implemented.") + + optimizer.add_param_group({"params": g[0], "weight_decay": decay}) # add g0 with weight_decay + optimizer.add_param_group({"params": g[1], "weight_decay": 0.0}) # add g1 (BatchNorm2d weights) + LOGGER.info( + f"{colorstr('optimizer:')} {type(optimizer).__name__}(lr={lr}) with parameter groups " + f'{len(g[1])} weight(decay=0.0), {len(g[0])} weight(decay={decay}), {len(g[2])} bias' + ) return optimizer -def smart_hub_load(repo='ultralytics/yolov5', model='yolov5s', **kwargs): +def smart_hub_load(repo="ultralytics/yolov5", model="yolov5s", **kwargs): # YOLOv5 torch.hub.load() wrapper with smart error/issue handling - if check_version(torch.__version__, '1.9.1'): - kwargs['skip_validation'] = True # validation causes GitHub API rate limit errors - if check_version(torch.__version__, '1.12.0'): - kwargs['trust_repo'] = True # argument required starting in torch 0.12 + if check_version(torch.__version__, "1.9.1"): + kwargs["skip_validation"] = True # validation causes GitHub API rate limit errors + if check_version(torch.__version__, "1.12.0"): + kwargs["trust_repo"] = True # argument required starting in torch 0.12 try: return torch.hub.load(repo, model, **kwargs) except Exception: return torch.hub.load(repo, model, force_reload=True, **kwargs) -def smart_resume(ckpt, optimizer, ema=None, weights='yolov5s.pt', epochs=300, resume=True): +def smart_resume(ckpt, optimizer, ema=None, weights="yolov5s.pt", epochs=300, resume=True): # Resume training from a partially trained checkpoint best_fitness = 0.0 - start_epoch = ckpt['epoch'] + 1 - if ckpt['optimizer'] is not None: - optimizer.load_state_dict(ckpt['optimizer']) # optimizer - best_fitness = ckpt['best_fitness'] - if ema and ckpt.get('ema'): - ema.ema.load_state_dict(ckpt['ema'].float().state_dict()) # EMA - ema.updates = ckpt['updates'] + start_epoch = ckpt["epoch"] + 1 + if ckpt["optimizer"] is not None: + optimizer.load_state_dict(ckpt["optimizer"]) # optimizer + best_fitness = ckpt["best_fitness"] + if ema and ckpt.get("ema"): + ema.ema.load_state_dict(ckpt["ema"].float().state_dict()) # EMA + ema.updates = ckpt["updates"] if resume: - assert start_epoch > 0, f'{weights} training to {epochs} epochs is finished, nothing to resume.\n' \ - f"Start a new training without --resume, i.e. 'python train.py --weights {weights}'" - LOGGER.info(f'Resuming training from {weights} from epoch {start_epoch} to {epochs} total epochs') + assert start_epoch > 0, ( + f"{weights} training to {epochs} epochs is finished, nothing to resume.\n" + f"Start a new training without --resume, i.e. 'python train.py --weights {weights}'" + ) + LOGGER.info(f"Resuming training from {weights} from epoch {start_epoch} to {epochs} total epochs") if epochs < start_epoch: LOGGER.info(f"{weights} has been trained for {ckpt['epoch']} epochs. Fine-tuning for {epochs} more epochs.") - epochs += ckpt['epoch'] # finetune additional epochs + epochs += ckpt["epoch"] # finetune additional epochs return best_fitness, start_epoch, epochs @@ -383,7 +399,7 @@ class EarlyStopping: def __init__(self, patience=30): self.best_fitness = 0.0 # i.e. mAP self.best_epoch = 0 - self.patience = patience or float('inf') # epochs to wait after fitness stops improving to stop + self.patience = patience or float("inf") # epochs to wait after fitness stops improving to stop self.possible_stop = False # possible stop may occur next epoch def __call__(self, epoch, fitness): @@ -394,15 +410,17 @@ def __call__(self, epoch, fitness): self.possible_stop = delta >= (self.patience - 1) # possible stop may occur next epoch stop = delta >= self.patience # stop training if patience exceeded if stop: - LOGGER.info(f'Stopping training early as no improvement observed in last {self.patience} epochs. ' - f'Best results observed at epoch {self.best_epoch}, best model saved as best.pt.\n' - f'To update EarlyStopping(patience={self.patience}) pass a new patience value, ' - f'i.e. `python train.py --patience 300` or use `--patience 0` to disable EarlyStopping.') + LOGGER.info( + f"Stopping training early as no improvement observed in last {self.patience} epochs. " + f"Best results observed at epoch {self.best_epoch}, best model saved as best.pt.\n" + f"To update EarlyStopping(patience={self.patience}) pass a new patience value, " + f"i.e. `python train.py --patience 300` or use `--patience 0` to disable EarlyStopping." + ) return stop class ModelEMA: - """ Updated Exponential Moving Average (EMA) from https://github.com/rwightman/pytorch-image-models + """Updated Exponential Moving Average (EMA) from https://github.com/rwightman/pytorch-image-models Keeps a moving average of everything in the model state_dict (parameters and buffers) For EMA details see https://www.tensorflow.org/api_docs/python/tf/train/ExponentialMovingAverage """ @@ -427,6 +445,6 @@ def update(self, model): v += (1 - d) * msd[k].detach() # assert v.dtype == msd[k].dtype == torch.float32, f'{k}: EMA {v.dtype} and model {msd[k].dtype} must be FP32' - def update_attr(self, model, include=(), exclude=('process_group', 'reducer')): + def update_attr(self, model, include=(), exclude=("process_group", "reducer")): # Update EMA attributes copy_attr(self.ema, model, include, exclude) diff --git a/utils/triton.py b/utils/triton.py index b5153dad940d..9584d07fbcf0 100644 --- a/utils/triton.py +++ b/utils/triton.py @@ -1,6 +1,5 @@ # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license -""" Utils to interact with the Triton Inference Server -""" +"""Utils to interact with the Triton Inference Server.""" import typing from urllib.parse import urlparse @@ -9,9 +8,11 @@ class TritonRemoteModel: - """ A wrapper over a model served by the Triton Inference Server. It can - be configured to communicate over GRPC or HTTP. It accepts Torch Tensors - as input and returns them as outputs. + """ + A wrapper over a model served by the Triton Inference Server. + + It can be configured to communicate over GRPC or HTTP. It accepts Torch Tensors as input and returns them as + outputs. """ def __init__(self, url: str): @@ -21,7 +22,7 @@ def __init__(self, url: str): """ parsed_url = urlparse(url) - if parsed_url.scheme == 'grpc': + if parsed_url.scheme == "grpc": from tritonclient.grpc import InferenceServerClient, InferInput self.client = InferenceServerClient(parsed_url.netloc) # Triton GRPC client @@ -31,51 +32,55 @@ def __init__(self, url: str): def create_input_placeholders() -> typing.List[InferInput]: return [ - InferInput(i['name'], [int(s) for s in i['shape']], i['datatype']) for i in self.metadata['inputs']] + InferInput(i["name"], [int(s) for s in i["shape"]], i["datatype"]) for i in self.metadata["inputs"] + ] else: from tritonclient.http import InferenceServerClient, InferInput self.client = InferenceServerClient(parsed_url.netloc) # Triton HTTP client model_repository = self.client.get_model_repository_index() - self.model_name = model_repository[0]['name'] + self.model_name = model_repository[0]["name"] self.metadata = self.client.get_model_metadata(self.model_name) def create_input_placeholders() -> typing.List[InferInput]: return [ - InferInput(i['name'], [int(s) for s in i['shape']], i['datatype']) for i in self.metadata['inputs']] + InferInput(i["name"], [int(s) for s in i["shape"]], i["datatype"]) for i in self.metadata["inputs"] + ] self._create_input_placeholders_fn = create_input_placeholders @property def runtime(self): - """Returns the model runtime""" - return self.metadata.get('backend', self.metadata.get('platform')) + """Returns the model runtime.""" + return self.metadata.get("backend", self.metadata.get("platform")) def __call__(self, *args, **kwargs) -> typing.Union[torch.Tensor, typing.Tuple[torch.Tensor, ...]]: - """ Invokes the model. Parameters can be provided via args or kwargs. - args, if provided, are assumed to match the order of inputs of the model. - kwargs are matched with the model input names. + """ + Invokes the model. + + Parameters can be provided via args or kwargs. args, if provided, are assumed to match the order of inputs of + the model. kwargs are matched with the model input names. """ inputs = self._create_inputs(*args, **kwargs) response = self.client.infer(model_name=self.model_name, inputs=inputs) result = [] - for output in self.metadata['outputs']: - tensor = torch.as_tensor(response.as_numpy(output['name'])) + for output in self.metadata["outputs"]: + tensor = torch.as_tensor(response.as_numpy(output["name"])) result.append(tensor) return result[0] if len(result) == 1 else result def _create_inputs(self, *args, **kwargs): args_len, kwargs_len = len(args), len(kwargs) if not args_len and not kwargs_len: - raise RuntimeError('No inputs provided.') + raise RuntimeError("No inputs provided.") if args_len and kwargs_len: - raise RuntimeError('Cannot specify args and kwargs at the same time') + raise RuntimeError("Cannot specify args and kwargs at the same time") placeholders = self._create_input_placeholders_fn() if args_len: if args_len != len(placeholders): - raise RuntimeError(f'Expected {len(placeholders)} inputs, got {args_len}.') + raise RuntimeError(f"Expected {len(placeholders)} inputs, got {args_len}.") for input, value in zip(placeholders, args): input.set_data_from_numpy(value.cpu().numpy()) else: diff --git a/val.py b/val.py index 1a4219c38962..6cc1d37a0a26 100644 --- a/val.py +++ b/val.py @@ -1,6 +1,6 @@ # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license """ -Validate a trained YOLOv5 detection model on a detection dataset +Validate a trained YOLOv5 detection model on a detection dataset. Usage: $ python val.py --weights yolov5s.pt --data coco128.yaml --img 640 @@ -39,9 +39,23 @@ from models.common import DetectMultiBackend from utils.callbacks import Callbacks from utils.dataloaders import create_dataloader -from utils.general import (LOGGER, TQDM_BAR_FORMAT, Profile, check_dataset, check_img_size, check_requirements, - check_yaml, coco80_to_coco91_class, colorstr, increment_path, non_max_suppression, - print_args, scale_boxes, xywh2xyxy, xyxy2xywh) +from utils.general import ( + LOGGER, + TQDM_BAR_FORMAT, + Profile, + check_dataset, + check_img_size, + check_requirements, + check_yaml, + coco80_to_coco91_class, + colorstr, + increment_path, + non_max_suppression, + print_args, + scale_boxes, + xywh2xyxy, + xyxy2xywh, +) from utils.metrics import ConfusionMatrix, ap_per_class, box_iou from utils.plots import output_to_target, plot_images, plot_val_study from utils.torch_utils import select_device, smart_inference_mode @@ -53,8 +67,8 @@ def save_one_txt(predn, save_conf, shape, file): for *xyxy, conf, cls in predn.tolist(): xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh line = (cls, *xywh, conf) if save_conf else (cls, *xywh) # label format - with open(file, 'a') as f: - f.write(('%g ' * len(line)).rstrip() % line + '\n') + with open(file, "a") as f: + f.write(("%g " * len(line)).rstrip() % line + "\n") def save_one_json(predn, jdict, path, class_map): @@ -63,11 +77,14 @@ def save_one_json(predn, jdict, path, class_map): box = xyxy2xywh(predn[:, :4]) # xywh box[:, :2] -= box[:, 2:] / 2 # xy center to top-left corner for p, b in zip(predn.tolist(), box.tolist()): - jdict.append({ - 'image_id': image_id, - 'category_id': class_map[int(p[5])], - 'bbox': [round(x, 3) for x in b], - 'score': round(p[4], 5)}) + jdict.append( + { + "image_id": image_id, + "category_id": class_map[int(p[5])], + "bbox": [round(x, 3) for x in b], + "score": round(p[4], 5), + } + ) def process_batch(detections, labels, iouv): @@ -98,47 +115,47 @@ def process_batch(detections, labels, iouv): @smart_inference_mode() def run( - data, - weights=None, # model.pt path(s) - batch_size=32, # batch size - imgsz=640, # inference size (pixels) - conf_thres=0.001, # confidence threshold - iou_thres=0.6, # NMS IoU threshold - max_det=300, # maximum detections per image - task='val', # train, val, test, speed or study - device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu - workers=8, # max dataloader workers (per RANK in DDP mode) - single_cls=False, # treat as single-class dataset - augment=False, # augmented inference - verbose=False, # verbose output - save_txt=False, # save results to *.txt - save_hybrid=False, # save label+prediction hybrid results to *.txt - save_conf=False, # save confidences in --save-txt labels - save_json=False, # save a COCO-JSON results file - project=ROOT / 'runs/val', # save to project/name - name='exp', # save to project/name - exist_ok=False, # existing project/name ok, do not increment - half=True, # use FP16 half-precision inference - dnn=False, # use OpenCV DNN for ONNX inference - model=None, - dataloader=None, - save_dir=Path(''), - plots=True, - callbacks=Callbacks(), - compute_loss=None, + data, + weights=None, # model.pt path(s) + batch_size=32, # batch size + imgsz=640, # inference size (pixels) + conf_thres=0.001, # confidence threshold + iou_thres=0.6, # NMS IoU threshold + max_det=300, # maximum detections per image + task="val", # train, val, test, speed or study + device="", # cuda device, i.e. 0 or 0,1,2,3 or cpu + workers=8, # max dataloader workers (per RANK in DDP mode) + single_cls=False, # treat as single-class dataset + augment=False, # augmented inference + verbose=False, # verbose output + save_txt=False, # save results to *.txt + save_hybrid=False, # save label+prediction hybrid results to *.txt + save_conf=False, # save confidences in --save-txt labels + save_json=False, # save a COCO-JSON results file + project=ROOT / "runs/val", # save to project/name + name="exp", # save to project/name + exist_ok=False, # existing project/name ok, do not increment + half=True, # use FP16 half-precision inference + dnn=False, # use OpenCV DNN for ONNX inference + model=None, + dataloader=None, + save_dir=Path(""), + plots=True, + callbacks=Callbacks(), + compute_loss=None, ): # Initialize/load model and set device training = model is not None if training: # called by train.py device, pt, jit, engine = next(model.parameters()).device, True, False, False # get model device, PyTorch model - half &= device.type != 'cpu' # half precision only supported on CUDA + half &= device.type != "cpu" # half precision only supported on CUDA model.half() if half else model.float() else: # called directly device = select_device(device, batch_size=batch_size) # Directories save_dir = increment_path(Path(project) / name, exist_ok=exist_ok) # increment run - (save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir + (save_dir / "labels" if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir # Load model model = DetectMultiBackend(weights, device=device, dnn=dnn, data=data, fp16=half) @@ -151,16 +168,16 @@ def run( device = model.device if not (pt or jit): batch_size = 1 # export.py models default to batch-size 1 - LOGGER.info(f'Forcing --batch-size 1 square inference (1,3,{imgsz},{imgsz}) for non-PyTorch models') + LOGGER.info(f"Forcing --batch-size 1 square inference (1,3,{imgsz},{imgsz}) for non-PyTorch models") # Data data = check_dataset(data) # check # Configure model.eval() - cuda = device.type != 'cpu' - is_coco = isinstance(data.get('val'), str) and data['val'].endswith(f'coco{os.sep}val2017.txt') # COCO dataset - nc = 1 if single_cls else int(data['nc']) # number of classes + cuda = device.type != "cpu" + is_coco = isinstance(data.get("val"), str) and data["val"].endswith(f"coco{os.sep}val2017.txt") # COCO dataset + nc = 1 if single_cls else int(data["nc"]) # number of classes iouv = torch.linspace(0.5, 0.95, 10, device=device) # iou vector for mAP@0.5:0.95 niou = iouv.numel() @@ -168,36 +185,40 @@ def run( if not training: if pt and not single_cls: # check --weights are trained on --data ncm = model.model.nc - assert ncm == nc, f'{weights} ({ncm} classes) trained on different --data than what you passed ({nc} ' \ - f'classes). Pass correct combination of --weights and --data that are trained together.' + assert ncm == nc, ( + f"{weights} ({ncm} classes) trained on different --data than what you passed ({nc} " + f"classes). Pass correct combination of --weights and --data that are trained together." + ) model.warmup(imgsz=(1 if pt else batch_size, 3, imgsz, imgsz)) # warmup - pad, rect = (0.0, False) if task == 'speed' else (0.5, pt) # square inference for benchmarks - task = task if task in ('train', 'val', 'test') else 'val' # path to train/val/test images - dataloader = create_dataloader(data[task], - imgsz, - batch_size, - stride, - single_cls, - pad=pad, - rect=rect, - workers=workers, - prefix=colorstr(f'{task}: '))[0] + pad, rect = (0.0, False) if task == "speed" else (0.5, pt) # square inference for benchmarks + task = task if task in ("train", "val", "test") else "val" # path to train/val/test images + dataloader = create_dataloader( + data[task], + imgsz, + batch_size, + stride, + single_cls, + pad=pad, + rect=rect, + workers=workers, + prefix=colorstr(f"{task}: "), + )[0] seen = 0 confusion_matrix = ConfusionMatrix(nc=nc) - names = model.names if hasattr(model, 'names') else model.module.names # get class names + names = model.names if hasattr(model, "names") else model.module.names # get class names if isinstance(names, (list, tuple)): # old format names = dict(enumerate(names)) class_map = coco80_to_coco91_class() if is_coco else list(range(1000)) - s = ('%22s' + '%11s' * 6) % ('Class', 'Images', 'Instances', 'P', 'R', 'mAP50', 'mAP50-95') + s = ("%22s" + "%11s" * 6) % ("Class", "Images", "Instances", "P", "R", "mAP50", "mAP50-95") tp, fp, p, r, f1, mp, mr, map50, ap50, map = 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 dt = Profile(device=device), Profile(device=device), Profile(device=device) # profiling times loss = torch.zeros(3, device=device) jdict, stats, ap, ap_class = [], [], [], [] - callbacks.run('on_val_start') + callbacks.run("on_val_start") pbar = tqdm(dataloader, desc=s, bar_format=TQDM_BAR_FORMAT) # progress bar for batch_i, (im, targets, paths, shapes) in enumerate(pbar): - callbacks.run('on_val_batch_start') + callbacks.run("on_val_batch_start") with dt[0]: if cuda: im = im.to(device, non_blocking=True) @@ -218,13 +239,9 @@ def run( targets[:, 2:] *= torch.tensor((width, height, width, height), device=device) # to pixels lb = [targets[targets[:, 0] == i, 1:] for i in range(nb)] if save_hybrid else [] # for autolabelling with dt[2]: - preds = non_max_suppression(preds, - conf_thres, - iou_thres, - labels=lb, - multi_label=True, - agnostic=single_cls, - max_det=max_det) + preds = non_max_suppression( + preds, conf_thres, iou_thres, labels=lb, multi_label=True, agnostic=single_cls, max_det=max_det + ) # Metrics for si, pred in enumerate(preds): @@ -259,18 +276,18 @@ def run( # Save/log if save_txt: - (save_dir / 'labels').mkdir(parents=True, exist_ok=True) - save_one_txt(predn, save_conf, shape, file=save_dir / 'labels' / f'{path.stem}.txt') + (save_dir / "labels").mkdir(parents=True, exist_ok=True) + save_one_txt(predn, save_conf, shape, file=save_dir / "labels" / f"{path.stem}.txt") if save_json: save_one_json(predn, jdict, path, class_map) # append to COCO-JSON dictionary - callbacks.run('on_val_image_end', pred, predn, path, names, im[si]) + callbacks.run("on_val_image_end", pred, predn, path, names, im[si]) # Plot images if plots and batch_i < 3: - plot_images(im, targets, paths, save_dir / f'val_batch{batch_i}_labels.jpg', names) # labels - plot_images(im, output_to_target(preds), paths, save_dir / f'val_batch{batch_i}_pred.jpg', names) # pred + plot_images(im, targets, paths, save_dir / f"val_batch{batch_i}_labels.jpg", names) # labels + plot_images(im, output_to_target(preds), paths, save_dir / f"val_batch{batch_i}_pred.jpg", names) # pred - callbacks.run('on_val_batch_end', batch_i, im, targets, paths, shapes, preds) + callbacks.run("on_val_batch_end", batch_i, im, targets, paths, shapes, preds) # Compute metrics stats = [torch.cat(x, 0).cpu().numpy() for x in zip(*stats)] # to numpy @@ -281,10 +298,10 @@ def run( nt = np.bincount(stats[3].astype(int), minlength=nc) # number of targets per class # Print results - pf = '%22s' + '%11i' * 2 + '%11.3g' * 4 # print format - LOGGER.info(pf % ('all', seen, nt.sum(), mp, mr, map50, map)) + pf = "%22s" + "%11i" * 2 + "%11.3g" * 4 # print format + LOGGER.info(pf % ("all", seen, nt.sum(), mp, mr, map50, map)) if nt.sum() == 0: - LOGGER.warning(f'WARNING ⚠️ no labels found in {task} set, can not compute metrics without labels') + LOGGER.warning(f"WARNING ⚠️ no labels found in {task} set, can not compute metrics without labels") # Print results per class if (verbose or (nc < 50 and not training)) and nc > 1 and len(stats): @@ -292,35 +309,35 @@ def run( LOGGER.info(pf % (names[c], seen, nt[c], p[i], r[i], ap50[i], ap[i])) # Print speeds - t = tuple(x.t / seen * 1E3 for x in dt) # speeds per image + t = tuple(x.t / seen * 1e3 for x in dt) # speeds per image if not training: shape = (batch_size, 3, imgsz, imgsz) - LOGGER.info(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {shape}' % t) + LOGGER.info(f"Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {shape}" % t) # Plots if plots: confusion_matrix.plot(save_dir=save_dir, names=list(names.values())) - callbacks.run('on_val_end', nt, tp, fp, p, r, f1, ap, ap50, ap_class, confusion_matrix) + callbacks.run("on_val_end", nt, tp, fp, p, r, f1, ap, ap50, ap_class, confusion_matrix) # Save JSON if save_json and len(jdict): - w = Path(weights[0] if isinstance(weights, list) else weights).stem if weights is not None else '' # weights - anno_json = str(Path('../datasets/coco/annotations/instances_val2017.json')) # annotations + w = Path(weights[0] if isinstance(weights, list) else weights).stem if weights is not None else "" # weights + anno_json = str(Path("../datasets/coco/annotations/instances_val2017.json")) # annotations if not os.path.exists(anno_json): - anno_json = os.path.join(data['path'], 'annotations', 'instances_val2017.json') - pred_json = str(save_dir / f'{w}_predictions.json') # predictions - LOGGER.info(f'\nEvaluating pycocotools mAP... saving {pred_json}...') - with open(pred_json, 'w') as f: + anno_json = os.path.join(data["path"], "annotations", "instances_val2017.json") + pred_json = str(save_dir / f"{w}_predictions.json") # predictions + LOGGER.info(f"\nEvaluating pycocotools mAP... saving {pred_json}...") + with open(pred_json, "w") as f: json.dump(jdict, f) try: # https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocoEvalDemo.ipynb - check_requirements('pycocotools>=2.0.6') + check_requirements("pycocotools>=2.0.6") from pycocotools.coco import COCO from pycocotools.cocoeval import COCOeval anno = COCO(anno_json) # init annotations api pred = anno.loadRes(pred_json) # init predictions api - eval = COCOeval(anno, pred, 'bbox') + eval = COCOeval(anno, pred, "bbox") if is_coco: eval.params.imgIds = [int(Path(x).stem) for x in dataloader.dataset.im_files] # image IDs to evaluate eval.evaluate() @@ -328,12 +345,12 @@ def run( eval.summarize() map, map50 = eval.stats[:2] # update results (mAP@0.5:0.95, mAP@0.5) except Exception as e: - LOGGER.info(f'pycocotools unable to run: {e}') + LOGGER.info(f"pycocotools unable to run: {e}") # Return results model.float() # for training if not training: - s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else '' + s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else "" LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}{s}") maps = np.zeros(nc) + map for i, c in enumerate(ap_class): @@ -343,71 +360,71 @@ def run( def parse_opt(): parser = argparse.ArgumentParser() - parser.add_argument('--data', type=str, default=ROOT / 'data/coco128.yaml', help='dataset.yaml path') - parser.add_argument('--weights', nargs='+', type=str, default=ROOT / 'yolov5s.pt', help='model path(s)') - parser.add_argument('--batch-size', type=int, default=32, help='batch size') - parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=640, help='inference size (pixels)') - parser.add_argument('--conf-thres', type=float, default=0.001, help='confidence threshold') - parser.add_argument('--iou-thres', type=float, default=0.6, help='NMS IoU threshold') - parser.add_argument('--max-det', type=int, default=300, help='maximum detections per image') - parser.add_argument('--task', default='val', help='train, val, test, speed or study') - parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') - parser.add_argument('--workers', type=int, default=8, help='max dataloader workers (per RANK in DDP mode)') - parser.add_argument('--single-cls', action='store_true', help='treat as single-class dataset') - parser.add_argument('--augment', action='store_true', help='augmented inference') - parser.add_argument('--verbose', action='store_true', help='report mAP by class') - parser.add_argument('--save-txt', action='store_true', help='save results to *.txt') - parser.add_argument('--save-hybrid', action='store_true', help='save label+prediction hybrid results to *.txt') - parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels') - parser.add_argument('--save-json', action='store_true', help='save a COCO-JSON results file') - parser.add_argument('--project', default=ROOT / 'runs/val', help='save to project/name') - parser.add_argument('--name', default='exp', help='save to project/name') - parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment') - parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference') - parser.add_argument('--dnn', action='store_true', help='use OpenCV DNN for ONNX inference') + parser.add_argument("--data", type=str, default=ROOT / "data/coco128.yaml", help="dataset.yaml path") + parser.add_argument("--weights", nargs="+", type=str, default=ROOT / "yolov5s.pt", help="model path(s)") + parser.add_argument("--batch-size", type=int, default=32, help="batch size") + parser.add_argument("--imgsz", "--img", "--img-size", type=int, default=640, help="inference size (pixels)") + parser.add_argument("--conf-thres", type=float, default=0.001, help="confidence threshold") + parser.add_argument("--iou-thres", type=float, default=0.6, help="NMS IoU threshold") + parser.add_argument("--max-det", type=int, default=300, help="maximum detections per image") + parser.add_argument("--task", default="val", help="train, val, test, speed or study") + parser.add_argument("--device", default="", help="cuda device, i.e. 0 or 0,1,2,3 or cpu") + parser.add_argument("--workers", type=int, default=8, help="max dataloader workers (per RANK in DDP mode)") + parser.add_argument("--single-cls", action="store_true", help="treat as single-class dataset") + parser.add_argument("--augment", action="store_true", help="augmented inference") + parser.add_argument("--verbose", action="store_true", help="report mAP by class") + parser.add_argument("--save-txt", action="store_true", help="save results to *.txt") + parser.add_argument("--save-hybrid", action="store_true", help="save label+prediction hybrid results to *.txt") + parser.add_argument("--save-conf", action="store_true", help="save confidences in --save-txt labels") + parser.add_argument("--save-json", action="store_true", help="save a COCO-JSON results file") + parser.add_argument("--project", default=ROOT / "runs/val", help="save to project/name") + parser.add_argument("--name", default="exp", help="save to project/name") + parser.add_argument("--exist-ok", action="store_true", help="existing project/name ok, do not increment") + parser.add_argument("--half", action="store_true", help="use FP16 half-precision inference") + parser.add_argument("--dnn", action="store_true", help="use OpenCV DNN for ONNX inference") opt = parser.parse_args() opt.data = check_yaml(opt.data) # check YAML - opt.save_json |= opt.data.endswith('coco.yaml') + opt.save_json |= opt.data.endswith("coco.yaml") opt.save_txt |= opt.save_hybrid print_args(vars(opt)) return opt def main(opt): - check_requirements(ROOT / 'requirements.txt', exclude=('tensorboard', 'thop')) + check_requirements(ROOT / "requirements.txt", exclude=("tensorboard", "thop")) - if opt.task in ('train', 'val', 'test'): # run normally + if opt.task in ("train", "val", "test"): # run normally if opt.conf_thres > 0.001: # https://github.com/ultralytics/yolov5/issues/1466 - LOGGER.info(f'WARNING ⚠️ confidence threshold {opt.conf_thres} > 0.001 produces invalid results') + LOGGER.info(f"WARNING ⚠️ confidence threshold {opt.conf_thres} > 0.001 produces invalid results") if opt.save_hybrid: - LOGGER.info('WARNING ⚠️ --save-hybrid will return high mAP from hybrid labels, not from predictions alone') + LOGGER.info("WARNING ⚠️ --save-hybrid will return high mAP from hybrid labels, not from predictions alone") run(**vars(opt)) else: weights = opt.weights if isinstance(opt.weights, list) else [opt.weights] - opt.half = torch.cuda.is_available() and opt.device != 'cpu' # FP16 for fastest results - if opt.task == 'speed': # speed benchmarks + opt.half = torch.cuda.is_available() and opt.device != "cpu" # FP16 for fastest results + if opt.task == "speed": # speed benchmarks # python val.py --task speed --data coco.yaml --batch 1 --weights yolov5n.pt yolov5s.pt... opt.conf_thres, opt.iou_thres, opt.save_json = 0.25, 0.45, False for opt.weights in weights: run(**vars(opt), plots=False) - elif opt.task == 'study': # speed vs mAP benchmarks + elif opt.task == "study": # speed vs mAP benchmarks # python val.py --task study --data coco.yaml --iou 0.7 --weights yolov5n.pt yolov5s.pt... for opt.weights in weights: - f = f'study_{Path(opt.data).stem}_{Path(opt.weights).stem}.txt' # filename to save to + f = f"study_{Path(opt.data).stem}_{Path(opt.weights).stem}.txt" # filename to save to x, y = list(range(256, 1536 + 128, 128)), [] # x axis (image sizes), y axis for opt.imgsz in x: # img-size - LOGGER.info(f'\nRunning {f} --imgsz {opt.imgsz}...') + LOGGER.info(f"\nRunning {f} --imgsz {opt.imgsz}...") r, _, t = run(**vars(opt), plots=False) y.append(r + t) # results and times - np.savetxt(f, y, fmt='%10.4g') # save - subprocess.run(['zip', '-r', 'study.zip', 'study_*.txt']) + np.savetxt(f, y, fmt="%10.4g") # save + subprocess.run(["zip", "-r", "study.zip", "study_*.txt"]) plot_val_study(x=x) # plot else: raise NotImplementedError(f'--task {opt.task} not in ("train", "val", "test", "speed", "study")') -if __name__ == '__main__': +if __name__ == "__main__": opt = parse_opt() main(opt) From 4733b4df90d9a359601c4e481b0312a5ca521fc5 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 8 Jan 2024 18:55:43 +0100 Subject: [PATCH 1848/1976] Update links.yml (#12601) Signed-off-by: Glenn Jocher --- .github/workflows/links.yml | 36 ++++++++++++++++++------------------ 1 file changed, 18 insertions(+), 18 deletions(-) diff --git a/.github/workflows/links.yml b/.github/workflows/links.yml index 1c7b28257d80..cb0ad264cc13 100644 --- a/.github/workflows/links.yml +++ b/.github/workflows/links.yml @@ -34,13 +34,13 @@ jobs: retry_wait_seconds: 60 max_attempts: 3 command: | - lychee - --accept 403,429,500,502,999 - --exclude-loopback - --exclude 'https?://(www\.)?(linkedin\.com|twitter\.com|instagram\.com|kaggle\.com|fonts\.gstatic\.com|url\.com)' - --exclude-path '**/ci.yaml' - --github-token ${{ secrets.GITHUB_TOKEN }} - './**/*.md' + lychee \ + --accept 403,429,500,502,999 \ + --exclude-loopback \ + --exclude 'https?://(www\.)?(linkedin\.com|twitter\.com|instagram\.com|kaggle\.com|fonts\.gstatic\.com|url\.com)' \ + --exclude-path '**/ci.yaml' \ + --github-token ${{ secrets.GITHUB_TOKEN }} \ + './**/*.md' \ './**/*.html' - name: Test Markdown, HTML, YAML, Python and Notebook links with retry @@ -51,15 +51,15 @@ jobs: retry_wait_seconds: 60 max_attempts: 3 command: | - lychee - --accept 429,999 - --exclude-loopback - --exclude 'https?://(www\.)?(linkedin\.com|twitter\.com|instagram\.com|kaggle\.com|fonts\.gstatic\.com|url\.com)' - --exclude-path '**/ci.yaml' - --github-token ${{ secrets.GITHUB_TOKEN }} - './**/*.md' - './**/*.html' - './**/*.yml' - './**/*.yaml' - './**/*.py' + lychee \ + --accept 429,999 \ + --exclude-loopback \ + --exclude 'https?://(www\.)?(linkedin\.com|twitter\.com|instagram\.com|kaggle\.com|fonts\.gstatic\.com|url\.com)' \ + --exclude-path '**/ci.yaml' \ + --github-token ${{ secrets.GITHUB_TOKEN }} \ + './**/*.md' \ + './**/*.html' \ + './**/*.yml' \ + './**/*.yaml' \ + './**/*.py' \ './**/*.ipynb' From 9abbef522f6e36b36c7d1ce91afa3f58ac4e7fde Mon Sep 17 00:00:00 2001 From: Muhammad Rizwan Munawar Date: Wed, 10 Jan 2024 20:16:40 +0500 Subject: [PATCH 1849/1976] Update banners for YOLOv8 release v8.1.0 (#12605) * Auto-format by Ultralytics actions * updated git banner * Update README.md Signed-off-by: Glenn Jocher * Update README.zh-CN.md Signed-off-by: Glenn Jocher --------- Signed-off-by: Glenn Jocher Co-authored-by: UltralyticsAssistant Co-authored-by: Glenn Jocher --- README.md | 4 ++-- README.zh-CN.md | 4 ++-- utils/flask_rest_api/README.md | 7 ++----- utils/loggers/clearml/README.md | 23 ++++++++++------------- utils/loggers/comet/README.md | 26 +++++++++----------------- 5 files changed, 25 insertions(+), 39 deletions(-) diff --git a/README.md b/README.md index c778a17258e9..6d94bcaea761 100644 --- a/README.md +++ b/README.md @@ -1,7 +1,7 @@

- - + + runs/exp{sep}2, runs/exp{sep}3, ... etc. + """ + Generates an incremented file or directory path if it exists, with optional mkdir; args: path, exist_ok=False, + sep="", mkdir=False. + + Example: runs/exp --> runs/exp{sep}2, runs/exp{sep}3, ... etc + """ path = Path(path) # os-agnostic if path.exists() and not exist_ok: path, suffix = (path.with_suffix(""), path.suffix) if path.is_file() else (path, "") @@ -1202,10 +1250,14 @@ def increment_path(path, exist_ok=False, sep="", mkdir=False): def imread(filename, flags=cv2.IMREAD_COLOR): + """Reads an image from a file and returns it as a numpy array, using OpenCV's imdecode to support multilanguage + paths. + """ return cv2.imdecode(np.fromfile(filename, np.uint8), flags) def imwrite(filename, img): + """Writes an image to a file, returns True on success and False on failure, supports multilanguage paths.""" try: cv2.imencode(Path(filename).suffix, img)[1].tofile(filename) return True @@ -1214,6 +1266,7 @@ def imwrite(filename, img): def imshow(path, im): + """Displays an image using Unicode path, requires encoded path and image matrix as input.""" imshow_(path.encode("unicode_escape").decode(), im) diff --git a/utils/loggers/__init__.py b/utils/loggers/__init__.py index 36792979913a..c3fbded50a3c 100644 --- a/utils/loggers/__init__.py +++ b/utils/loggers/__init__.py @@ -73,6 +73,7 @@ def _json_default(value): class Loggers: # YOLOv5 Loggers class def __init__(self, save_dir=None, weights=None, opt=None, hyp=None, logger=None, include=LOGGERS): + """Initializes loggers for YOLOv5 training and validation metrics, paths, and options.""" self.save_dir = save_dir self.weights = weights self.opt = opt @@ -150,7 +151,7 @@ def __init__(self, save_dir=None, weights=None, opt=None, hyp=None, logger=None, @property def remote_dataset(self): - # Get data_dict if custom dataset artifact link is provided + """Fetches dataset dictionary from remote logging services like ClearML, Weights & Biases, or Comet ML.""" data_dict = None if self.clearml: data_dict = self.clearml.data_dict @@ -162,15 +163,17 @@ def remote_dataset(self): return data_dict def on_train_start(self): + """Initializes the training process for Comet ML logger if it's configured.""" if self.comet_logger: self.comet_logger.on_train_start() def on_pretrain_routine_start(self): + """Invokes pre-training routine start hook for Comet ML logger if available.""" if self.comet_logger: self.comet_logger.on_pretrain_routine_start() def on_pretrain_routine_end(self, labels, names): - # Callback runs on pre-train routine end + """Callback that runs at the end of pre-training routine, logging label plots if enabled.""" if self.plots: plot_labels(labels, names, self.save_dir) paths = self.save_dir.glob("*labels*.jpg") # training labels @@ -183,6 +186,7 @@ def on_pretrain_routine_end(self, labels, names): self.clearml.log_plot(title=path.stem, plot_path=path) def on_train_batch_end(self, model, ni, imgs, targets, paths, vals): + """Logs training batch end events, plots images, and updates external loggers with batch-end data.""" log_dict = dict(zip(self.keys[:3], vals)) # Callback runs on train batch end # ni: number integrated batches (since train start) @@ -203,7 +207,7 @@ def on_train_batch_end(self, model, ni, imgs, targets, paths, vals): self.comet_logger.on_train_batch_end(log_dict, step=ni) def on_train_epoch_end(self, epoch): - # Callback runs on train epoch end + """Callback that updates the current epoch in Weights & Biases at the end of a training epoch.""" if self.wandb: self.wandb.current_epoch = epoch + 1 @@ -211,22 +215,24 @@ def on_train_epoch_end(self, epoch): self.comet_logger.on_train_epoch_end(epoch) def on_val_start(self): + """Callback that signals the start of a validation phase to the Comet logger.""" if self.comet_logger: self.comet_logger.on_val_start() def on_val_image_end(self, pred, predn, path, names, im): - # Callback runs on val image end + """Callback that logs a validation image and its predictions to WandB or ClearML.""" if self.wandb: self.wandb.val_one_image(pred, predn, path, names, im) if self.clearml: self.clearml.log_image_with_boxes(path, pred, names, im) def on_val_batch_end(self, batch_i, im, targets, paths, shapes, out): + """Logs validation batch results to Comet ML during training at the end of each validation batch.""" if self.comet_logger: self.comet_logger.on_val_batch_end(batch_i, im, targets, paths, shapes, out) def on_val_end(self, nt, tp, fp, p, r, f1, ap, ap50, ap_class, confusion_matrix): - # Callback runs on val end + """Logs validation results to WandB or ClearML at the end of the validation process.""" if self.wandb or self.clearml: files = sorted(self.save_dir.glob("val*.jpg")) if self.wandb: @@ -238,7 +244,7 @@ def on_val_end(self, nt, tp, fp, p, r, f1, ap, ap50, ap_class, confusion_matrix) self.comet_logger.on_val_end(nt, tp, fp, p, r, f1, ap, ap50, ap_class, confusion_matrix) def on_fit_epoch_end(self, vals, epoch, best_fitness, fi): - # Callback runs at the end of each fit (train+val) epoch + """Callback that logs metrics and saves them to CSV or NDJSON at the end of each fit (train+val) epoch.""" x = dict(zip(self.keys, vals)) if self.csv: file = self.save_dir / "results.csv" @@ -277,7 +283,7 @@ def on_fit_epoch_end(self, vals, epoch, best_fitness, fi): self.comet_logger.on_fit_epoch_end(x, epoch=epoch) def on_model_save(self, last, epoch, final_epoch, best_fitness, fi): - # Callback runs on model save event + """Callback that handles model saving events, logging to Weights & Biases or ClearML if enabled.""" if (epoch + 1) % self.opt.save_period == 0 and not final_epoch and self.opt.save_period != -1: if self.wandb: self.wandb.log_model(last.parent, self.opt, epoch, fi, best_model=best_fitness == fi) @@ -290,7 +296,7 @@ def on_model_save(self, last, epoch, final_epoch, best_fitness, fi): self.comet_logger.on_model_save(last, epoch, final_epoch, best_fitness, fi) def on_train_end(self, last, best, epoch, results): - # Callback runs on training end, i.e. saving best model + """Callback that runs at the end of training to save plots and log results.""" if self.plots: plot_results(file=self.save_dir / "results.csv") # save results.png files = ["results.png", "confusion_matrix.png", *(f"{x}_curve.png" for x in ("F1", "PR", "P", "R"))] @@ -326,7 +332,7 @@ def on_train_end(self, last, best, epoch, results): self.comet_logger.on_train_end(files, self.save_dir, last, best, epoch, final_results) def on_params_update(self, params: dict): - # Update hyperparams or configs of the experiment + """Updates experiment hyperparameters or configurations in WandB, Comet, or ClearML.""" if self.wandb: self.wandb.wandb_run.config.update(params, allow_val_change=True) if self.comet_logger: @@ -346,7 +352,7 @@ class GenericLogger: """ def __init__(self, opt, console_logger, include=("tb", "wandb", "clearml")): - # init default loggers + """Initializes a generic logger with optional TensorBoard, W&B, and ClearML support.""" self.save_dir = Path(opt.save_dir) self.include = include self.console_logger = console_logger @@ -381,7 +387,7 @@ def __init__(self, opt, console_logger, include=("tb", "wandb", "clearml")): self.clearml = None def log_metrics(self, metrics, epoch): - # Log metrics dictionary to all loggers + """Logs metrics to CSV, TensorBoard, W&B, and ClearML; `metrics` is a dict, `epoch` is an int.""" if self.csv: keys, vals = list(metrics.keys()), list(metrics.values()) n = len(metrics) + 1 # number of cols @@ -400,7 +406,7 @@ def log_metrics(self, metrics, epoch): self.clearml.log_scalars(metrics, epoch) def log_images(self, files, name="Images", epoch=0): - # Log images to all loggers + """Logs images to all loggers with optional naming and epoch specification.""" files = [Path(f) for f in (files if isinstance(files, (tuple, list)) else [files])] # to Path files = [f for f in files if f.exists()] # filter by exists @@ -418,11 +424,12 @@ def log_images(self, files, name="Images", epoch=0): self.clearml.log_debug_samples(files, title=name) def log_graph(self, model, imgsz=(640, 640)): - # Log model graph to all loggers + """Logs model graph to all configured loggers with specified input image size.""" if self.tb: log_tensorboard_graph(self.tb, model, imgsz) def log_model(self, model_path, epoch=0, metadata=None): + """Logs the model to all configured loggers with optional epoch and metadata.""" if metadata is None: metadata = {} # Log model to all loggers @@ -434,7 +441,7 @@ def log_model(self, model_path, epoch=0, metadata=None): self.clearml.log_model(model_path=model_path, model_name=model_path.stem) def update_params(self, params): - # Update the parameters logged + """Updates logged parameters in WandB and/or ClearML if enabled.""" if self.wandb: wandb.run.config.update(params, allow_val_change=True) if self.clearml: @@ -442,7 +449,7 @@ def update_params(self, params): def log_tensorboard_graph(tb, model, imgsz=(640, 640)): - # Log model graph to TensorBoard + """Logs the model graph to TensorBoard with specified image size and model.""" try: p = next(model.parameters()) # for device, type imgsz = (imgsz, imgsz) if isinstance(imgsz, int) else imgsz # expand @@ -455,7 +462,7 @@ def log_tensorboard_graph(tb, model, imgsz=(640, 640)): def web_project_name(project): - # Convert local project name to web project name + """Converts a local project name to a standardized web project name with optional suffixes.""" if not project.startswith("runs/train"): return project suffix = "-Classify" if project.endswith("-cls") else "-Segment" if project.endswith("-seg") else "" diff --git a/utils/loggers/comet/__init__.py b/utils/loggers/comet/__init__.py index cec46f5af1fb..076eb3ccecab 100644 --- a/utils/loggers/comet/__init__.py +++ b/utils/loggers/comet/__init__.py @@ -165,6 +165,7 @@ def __init__(self, opt, hyp, run_id=None, job_type="Training", **experiment_kwar self.experiment.log_other("optimizer_parameters", json.dumps(self.hyp)) def _get_experiment(self, mode, experiment_id=None): + """Returns a new or existing Comet.ml experiment based on mode and optional experiment_id.""" if mode == "offline": return ( comet_ml.ExistingOfflineExperiment( @@ -197,21 +198,27 @@ def _get_experiment(self, mode, experiment_id=None): return def log_metrics(self, log_dict, **kwargs): + """Logs metrics to the current experiment, accepting a dictionary of metric names and values.""" self.experiment.log_metrics(log_dict, **kwargs) def log_parameters(self, log_dict, **kwargs): + """Logs parameters to the current experiment, accepting a dictionary of parameter names and values.""" self.experiment.log_parameters(log_dict, **kwargs) def log_asset(self, asset_path, **kwargs): + """Logs a file or directory as an asset to the current experiment.""" self.experiment.log_asset(asset_path, **kwargs) def log_asset_data(self, asset, **kwargs): + """Logs in-memory data as an asset to the current experiment, with optional kwargs.""" self.experiment.log_asset_data(asset, **kwargs) def log_image(self, img, **kwargs): + """Logs an image to the current experiment with optional kwargs.""" self.experiment.log_image(img, **kwargs) def log_model(self, path, opt, epoch, fitness_score, best_model=False): + """Logs model checkpoint to experiment with path, options, epoch, fitness, and best model flag.""" if not self.save_model: return @@ -235,6 +242,7 @@ def log_model(self, path, opt, epoch, fitness_score, best_model=False): ) def check_dataset(self, data_file): + """Validates the dataset configuration by loading the YAML file specified in `data_file`.""" with open(data_file) as f: data_config = yaml.safe_load(f) @@ -247,6 +255,7 @@ def check_dataset(self, data_file): return check_dataset(data_file) def log_predictions(self, image, labelsn, path, shape, predn): + """Logs predictions with IOU filtering, given image, labels, path, shape, and predictions.""" if self.logged_images_count >= self.max_images: return detections = predn[predn[:, 4] > self.conf_thres] @@ -287,6 +296,7 @@ def log_predictions(self, image, labelsn, path, shape, predn): return def preprocess_prediction(self, image, labels, shape, pred): + """Processes prediction data, resizing labels and adding dataset metadata.""" nl, _ = labels.shape[0], pred.shape[0] # Predictions @@ -306,6 +316,7 @@ def preprocess_prediction(self, image, labels, shape, pred): return predn, labelsn def add_assets_to_artifact(self, artifact, path, asset_path, split): + """Adds image and label assets to a wandb artifact given dataset split and paths.""" img_paths = sorted(glob.glob(f"{asset_path}/*")) label_paths = img2label_paths(img_paths) @@ -331,6 +342,7 @@ def add_assets_to_artifact(self, artifact, path, asset_path, split): return artifact def upload_dataset_artifact(self): + """Uploads a YOLOv5 dataset as an artifact to the Comet.ml platform.""" dataset_name = self.data_dict.get("dataset_name", "yolov5-dataset") path = str((ROOT / Path(self.data_dict["path"])).resolve()) @@ -355,6 +367,7 @@ def upload_dataset_artifact(self): return def download_dataset_artifact(self, artifact_path): + """Downloads a dataset artifact to a specified directory using the experiment's logged artifact.""" logged_artifact = self.experiment.get_artifact(artifact_path) artifact_save_dir = str(Path(self.opt.save_dir) / logged_artifact.name) logged_artifact.download(artifact_save_dir) @@ -374,6 +387,7 @@ def download_dataset_artifact(self, artifact_path): return self.update_data_paths(data_dict) def update_data_paths(self, data_dict): + """Updates data paths in the dataset dictionary, defaulting 'path' to an empty string if not present.""" path = data_dict.get("path", "") for split in ["train", "val", "test"]: @@ -386,6 +400,7 @@ def update_data_paths(self, data_dict): return data_dict def on_pretrain_routine_end(self, paths): + """Called at the end of pretraining routine to handle paths if training is not being resumed.""" if self.opt.resume: return @@ -398,20 +413,25 @@ def on_pretrain_routine_end(self, paths): return def on_train_start(self): + """Logs hyperparameters at the start of training.""" self.log_parameters(self.hyp) def on_train_epoch_start(self): + """Called at the start of each training epoch.""" return def on_train_epoch_end(self, epoch): + """Updates the current epoch in the experiment tracking at the end of each epoch.""" self.experiment.curr_epoch = epoch return def on_train_batch_start(self): + """Called at the start of each training batch.""" return def on_train_batch_end(self, log_dict, step): + """Callback function that updates and logs metrics at the end of each training batch if conditions are met.""" self.experiment.curr_step = step if self.log_batch_metrics and (step % self.comet_log_batch_interval == 0): self.log_metrics(log_dict, step=step) @@ -419,6 +439,7 @@ def on_train_batch_end(self, log_dict, step): return def on_train_end(self, files, save_dir, last, best, epoch, results): + """Logs metadata and optionally saves model files at the end of training.""" if self.comet_log_predictions: curr_epoch = self.experiment.curr_epoch self.experiment.log_asset_data(self.metadata_dict, "image-metadata.json", epoch=curr_epoch) @@ -446,12 +467,15 @@ def on_train_end(self, files, save_dir, last, best, epoch, results): self.finish_run() def on_val_start(self): + """Called at the start of validation, currently a placeholder with no functionality.""" return def on_val_batch_start(self): + """Placeholder called at the start of a validation batch with no current functionality.""" return def on_val_batch_end(self, batch_i, images, targets, paths, shapes, outputs): + """Callback executed at the end of a validation batch, conditionally logs predictions to Comet ML.""" if not (self.comet_log_predictions and ((batch_i + 1) % self.comet_log_prediction_interval == 0)): return @@ -470,6 +494,7 @@ def on_val_batch_end(self, batch_i, images, targets, paths, shapes, outputs): return def on_val_end(self, nt, tp, fp, p, r, f1, ap, ap50, ap_class, confusion_matrix): + """Logs per-class metrics to Comet.ml after validation if enabled and more than one class exists.""" if self.comet_log_per_class_metrics and self.num_classes > 1: for i, c in enumerate(ap_class): class_name = self.class_names[c] @@ -504,14 +529,18 @@ def on_val_end(self, nt, tp, fp, p, r, f1, ap, ap50, ap_class, confusion_matrix) ) def on_fit_epoch_end(self, result, epoch): + """Logs metrics at the end of each training epoch.""" self.log_metrics(result, epoch=epoch) def on_model_save(self, last, epoch, final_epoch, best_fitness, fi): + """Callback to save model checkpoints periodically if conditions are met.""" if ((epoch + 1) % self.opt.save_period == 0 and not final_epoch) and self.opt.save_period != -1: self.log_model(last.parent, self.opt, epoch, fi, best_model=best_fitness == fi) def on_params_update(self, params): + """Logs updated parameters during training.""" self.log_parameters(params) def finish_run(self): + """Ends the current experiment and logs its completion.""" self.experiment.end() diff --git a/utils/loggers/comet/comet_utils.py b/utils/loggers/comet/comet_utils.py index 6e8fad68c6cc..7eca1f504d69 100644 --- a/utils/loggers/comet/comet_utils.py +++ b/utils/loggers/comet/comet_utils.py @@ -17,6 +17,7 @@ def download_model_checkpoint(opt, experiment): + """Downloads YOLOv5 model checkpoint from Comet ML experiment, updating `opt.weights` with download path.""" model_dir = f"{opt.project}/{experiment.name}" os.makedirs(model_dir, exist_ok=True) diff --git a/utils/loggers/comet/hpo.py b/utils/loggers/comet/hpo.py index a9e6fabec1cd..8ca08ddc858a 100644 --- a/utils/loggers/comet/hpo.py +++ b/utils/loggers/comet/hpo.py @@ -25,6 +25,9 @@ def get_args(known=False): + """Parses command-line arguments for YOLOv5 training, supporting configuration of weights, data paths, + hyperparameters, and more. + """ parser = argparse.ArgumentParser() parser.add_argument("--weights", type=str, default=ROOT / "yolov5s.pt", help="initial weights path") parser.add_argument("--cfg", type=str, default="", help="model.yaml path") @@ -83,6 +86,7 @@ def get_args(known=False): def run(parameters, opt): + """Executes YOLOv5 training with given hyperparameters and options, setting up device and training directories.""" hyp_dict = {k: v for k, v in parameters.items() if k not in ["epochs", "batch_size"]} opt.save_dir = str(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok or opt.evolve)) diff --git a/utils/loggers/wandb/wandb_utils.py b/utils/loggers/wandb/wandb_utils.py index 0af8bda12d85..4083312e6a59 100644 --- a/utils/loggers/wandb/wandb_utils.py +++ b/utils/loggers/wandb/wandb_utils.py @@ -152,6 +152,7 @@ def log_model(self, path, opt, epoch, fitness_score, best_model=False): LOGGER.info(f"Saving model artifact on epoch {epoch + 1}") def val_one_image(self, pred, predn, path, names, im): + """Evaluates model prediction for a single image, returning metrics and visualizations.""" pass def log(self, log_dict): diff --git a/utils/loss.py b/utils/loss.py index 26b8c06bf333..8a910e12ad6f 100644 --- a/utils/loss.py +++ b/utils/loss.py @@ -16,11 +16,17 @@ def smooth_BCE(eps=0.1): # https://github.com/ultralytics/yolov3/issues/238#iss class BCEBlurWithLogitsLoss(nn.Module): # BCEwithLogitLoss() with reduced missing label effects. def __init__(self, alpha=0.05): + """Initializes a modified BCEWithLogitsLoss with reduced missing label effects, taking optional alpha smoothing + parameter. + """ super().__init__() self.loss_fcn = nn.BCEWithLogitsLoss(reduction="none") # must be nn.BCEWithLogitsLoss() self.alpha = alpha def forward(self, pred, true): + """Computes modified BCE loss for YOLOv5 with reduced missing label effects, taking pred and true tensors, + returns mean loss. + """ loss = self.loss_fcn(pred, true) pred = torch.sigmoid(pred) # prob from logits dx = pred - true # reduce only missing label effects @@ -33,6 +39,9 @@ def forward(self, pred, true): class FocalLoss(nn.Module): # Wraps focal loss around existing loss_fcn(), i.e. criteria = FocalLoss(nn.BCEWithLogitsLoss(), gamma=1.5) def __init__(self, loss_fcn, gamma=1.5, alpha=0.25): + """Initializes FocalLoss with specified loss function, gamma, and alpha values; modifies loss reduction to + 'none'. + """ super().__init__() self.loss_fcn = loss_fcn # must be nn.BCEWithLogitsLoss() self.gamma = gamma @@ -41,6 +50,7 @@ def __init__(self, loss_fcn, gamma=1.5, alpha=0.25): self.loss_fcn.reduction = "none" # required to apply FL to each element def forward(self, pred, true): + """Calculates the focal loss between predicted and true labels using a modified BCEWithLogitsLoss.""" loss = self.loss_fcn(pred, true) # p_t = torch.exp(-loss) # loss *= self.alpha * (1.000001 - p_t) ** self.gamma # non-zero power for gradient stability @@ -63,6 +73,7 @@ def forward(self, pred, true): class QFocalLoss(nn.Module): # Wraps Quality focal loss around existing loss_fcn(), i.e. criteria = FocalLoss(nn.BCEWithLogitsLoss(), gamma=1.5) def __init__(self, loss_fcn, gamma=1.5, alpha=0.25): + """Initializes Quality Focal Loss with given loss function, gamma, alpha; modifies reduction to 'none'.""" super().__init__() self.loss_fcn = loss_fcn # must be nn.BCEWithLogitsLoss() self.gamma = gamma @@ -71,6 +82,9 @@ def __init__(self, loss_fcn, gamma=1.5, alpha=0.25): self.loss_fcn.reduction = "none" # required to apply FL to each element def forward(self, pred, true): + """Computes the focal loss between `pred` and `true` using BCEWithLogitsLoss, adjusting for imbalance with + `gamma` and `alpha`. + """ loss = self.loss_fcn(pred, true) pred_prob = torch.sigmoid(pred) # prob from logits @@ -91,6 +105,7 @@ class ComputeLoss: # Compute losses def __init__(self, model, autobalance=False): + """Initializes ComputeLoss with model and autobalance option, autobalances losses if True.""" device = next(model.parameters()).device # get model device h = model.hyp # hyperparameters @@ -173,7 +188,9 @@ def __call__(self, p, targets): # predictions, targets return (lbox + lobj + lcls) * bs, torch.cat((lbox, lobj, lcls)).detach() def build_targets(self, p, targets): - # Build targets for compute_loss(), input targets(image,class,x,y,w,h) + """Prepares model targets from input targets (image,class,x,y,w,h) for loss computation, returning class, box, + indices, and anchors. + """ na, nt = self.na, targets.shape[0] # number of anchors, targets tcls, tbox, indices, anch = [], [], [], [] gain = torch.ones(7, device=self.device) # normalized to gridspace gain diff --git a/utils/metrics.py b/utils/metrics.py index 5f45621dc372..e572355fec1e 100644 --- a/utils/metrics.py +++ b/utils/metrics.py @@ -13,13 +13,13 @@ def fitness(x): - # Model fitness as a weighted combination of metrics + """Calculates fitness of a model using weighted sum of metrics P, R, mAP@0.5, mAP@0.5:0.95.""" w = [0.0, 0.0, 0.1, 0.9] # weights for [P, R, mAP@0.5, mAP@0.5:0.95] return (x[:, :4] * w).sum(1) def smooth(y, f=0.05): - # Box filter of fraction f + """Applies box filter smoothing to array `y` with fraction `f`, yielding a smoothed array.""" nf = round(len(y) * f * 2) // 2 + 1 # number of filter elements (must be odd) p = np.ones(nf // 2) # ones padding yp = np.concatenate((p * y[0], y, p * y[-1]), 0) # y padded @@ -126,6 +126,7 @@ def compute_ap(recall, precision): class ConfusionMatrix: # Updated version of https://github.com/kaanakan/object_detection_confusion_matrix def __init__(self, nc, conf=0.25, iou_thres=0.45): + """Initializes ConfusionMatrix with given number of classes, confidence, and IoU threshold.""" self.matrix = np.zeros((nc + 1, nc + 1)) self.nc = nc # number of classes self.conf = conf @@ -179,6 +180,9 @@ def process_batch(self, detections, labels): self.matrix[dc, self.nc] += 1 # predicted background def tp_fp(self): + """Calculates true positives (tp) and false positives (fp) excluding the background class from the confusion + matrix. + """ tp = self.matrix.diagonal() # true positives fp = self.matrix.sum(1) - tp # false positives # fn = self.matrix.sum(0) - tp # false negatives (missed detections) @@ -186,6 +190,7 @@ def tp_fp(self): @TryExcept("WARNING ⚠️ ConfusionMatrix plot failure") def plot(self, normalize=True, save_dir="", names=()): + """Plots confusion matrix using seaborn, optional normalization; can save plot to specified directory.""" import seaborn as sn array = self.matrix / ((self.matrix.sum(0).reshape(1, -1) + 1e-9) if normalize else 1) # normalize columns @@ -217,12 +222,17 @@ def plot(self, normalize=True, save_dir="", names=()): plt.close(fig) def print(self): + """Prints the confusion matrix row-wise, with each class and its predictions separated by spaces.""" for i in range(self.nc + 1): print(" ".join(map(str, self.matrix[i]))) def bbox_iou(box1, box2, xywh=True, GIoU=False, DIoU=False, CIoU=False, eps=1e-7): - # Returns Intersection over Union (IoU) of box1(1,4) to box2(n,4) + """ + Calculates IoU, GIoU, DIoU, or CIoU between two boxes, supporting xywh/xyxy formats. + + Input shapes are box1(1,4) to box2(n,4). + """ # Get the coordinates of bounding boxes if xywh: # transform from xywh to xyxy @@ -312,7 +322,9 @@ def bbox_ioa(box1, box2, eps=1e-7): def wh_iou(wh1, wh2, eps=1e-7): - # Returns the nxm IoU matrix. wh1 is nx2, wh2 is mx2 + """Calculates the Intersection over Union (IoU) for two sets of widths and heights; `wh1` and `wh2` should be nx2 + and mx2 tensors. + """ wh1 = wh1[:, None] # [N,1,2] wh2 = wh2[None] # [1,M,2] inter = torch.min(wh1, wh2).prod(2) # [N,M] @@ -324,7 +336,9 @@ def wh_iou(wh1, wh2, eps=1e-7): @threaded def plot_pr_curve(px, py, ap, save_dir=Path("pr_curve.png"), names=()): - # Precision-recall curve + """Plots precision-recall curve, optionally per class, saving to `save_dir`; `px`, `py` are lists, `ap` is Nx2 + array, `names` optional. + """ fig, ax = plt.subplots(1, 1, figsize=(9, 6), tight_layout=True) py = np.stack(py, axis=1) @@ -347,7 +361,7 @@ def plot_pr_curve(px, py, ap, save_dir=Path("pr_curve.png"), names=()): @threaded def plot_mc_curve(px, py, save_dir=Path("mc_curve.png"), names=(), xlabel="Confidence", ylabel="Metric"): - # Metric-confidence curve + """Plots a metric-confidence curve for model predictions, supporting per-class visualization and smoothing.""" fig, ax = plt.subplots(1, 1, figsize=(9, 6), tight_layout=True) if 0 < len(names) < 21: # display per-class legend if < 21 classes diff --git a/utils/plots.py b/utils/plots.py index 11c96a6372c3..e1b073dfb1ad 100644 --- a/utils/plots.py +++ b/utils/plots.py @@ -31,7 +31,11 @@ class Colors: # Ultralytics color palette https://ultralytics.com/ def __init__(self): - # hex = matplotlib.colors.TABLEAU_COLORS.values() + """ + Initializes the Colors class with a palette derived from Ultralytics color scheme, converting hex codes to RGB. + + Colors derived from `hex = matplotlib.colors.TABLEAU_COLORS.values()`. + """ hexs = ( "FF3838", "FF9D97", @@ -58,6 +62,7 @@ def __init__(self): self.n = len(self.palette) def __call__(self, i, bgr=False): + """Returns color from palette by index `i`, in BGR format if `bgr=True`, else RGB; `i` is an integer index.""" c = self.palette[int(i) % self.n] return (c[2], c[1], c[0]) if bgr else c @@ -100,7 +105,11 @@ def feature_visualization(x, module_type, stage, n=32, save_dir=Path("runs/detec def hist2d(x, y, n=100): - # 2d histogram used in labels.png and evolve.png + """ + Generates a logarithmic 2D histogram, useful for visualizing label or evolution distributions. + + Used in used in labels.png and evolve.png. + """ xedges, yedges = np.linspace(x.min(), x.max(), n), np.linspace(y.min(), y.max(), n) hist, xedges, yedges = np.histogram2d(x, y, (xedges, yedges)) xidx = np.clip(np.digitize(x, xedges) - 1, 0, hist.shape[0] - 1) @@ -109,6 +118,7 @@ def hist2d(x, y, n=100): def butter_lowpass_filtfilt(data, cutoff=1500, fs=50000, order=5): + """Applies a low-pass Butterworth filter to `data` with specified `cutoff`, `fs`, and `order`.""" from scipy.signal import butter, filtfilt # https://stackoverflow.com/questions/28536191/how-to-filter-smooth-with-scipy-numpy @@ -122,7 +132,9 @@ def butter_lowpass(cutoff, fs, order): def output_to_target(output, max_det=300): - # Convert model output to target format [batch_id, class_id, x, y, w, h, conf] for plotting + """Converts YOLOv5 model output to [batch_id, class_id, x, y, w, h, conf] format for plotting, limiting detections + to `max_det`. + """ targets = [] for i, o in enumerate(output): box, conf, cls = o[:max_det, :6].cpu().split((4, 1, 1), 1) @@ -133,7 +145,7 @@ def output_to_target(output, max_det=300): @threaded def plot_images(images, targets, paths=None, fname="images.jpg", names=None): - # Plot image grid with labels + """Plots an image grid with labels from YOLOv5 predictions or targets, saving to `fname`.""" if isinstance(images, torch.Tensor): images = images.cpu().float().numpy() if isinstance(targets, torch.Tensor): @@ -197,7 +209,7 @@ def plot_images(images, targets, paths=None, fname="images.jpg", names=None): def plot_lr_scheduler(optimizer, scheduler, epochs=300, save_dir=""): - # Plot LR simulating training for full epochs + """Plots learning rate schedule for given optimizer and scheduler, saving plot to `save_dir`.""" optimizer, scheduler = copy(optimizer), copy(scheduler) # do not modify originals y = [] for _ in range(epochs): @@ -295,7 +307,7 @@ def plot_val_study(file="", dir="", x=None): # from utils.plots import *; plot_ @TryExcept() # known issue https://github.com/ultralytics/yolov5/issues/5395 def plot_labels(labels, names=(), save_dir=Path("")): - # plot dataset labels + """Plots dataset labels, saving correlogram and label images, handles classes, and visualizes bounding boxes.""" LOGGER.info(f"Plotting labels to {save_dir / 'labels.jpg'}... ") c, b = labels[:, 0], labels[:, 1:].transpose() # classes, boxes nc = int(c.max() + 1) # number of classes @@ -340,7 +352,7 @@ def plot_labels(labels, names=(), save_dir=Path("")): def imshow_cls(im, labels=None, pred=None, names=None, nmax=25, verbose=False, f=Path("images.jpg")): - # Show classification image grid with labels (optional) and predictions (optional) + """Displays a grid of images with optional labels and predictions, saving to a file.""" from utils.augmentations import denormalize names = names or [f"class{i}" for i in range(1000)] @@ -397,7 +409,11 @@ def plot_evolve(evolve_csv="path/to/evolve.csv"): # from utils.plots import *; def plot_results(file="path/to/results.csv", dir=""): - # Plot training results.csv. Usage: from utils.plots import *; plot_results('path/to/results.csv') + """ + Plots training results from a 'results.csv' file; accepts file path and directory as arguments. + + Example: from utils.plots import *; plot_results('path/to/results.csv') + """ save_dir = Path(file).parent if file else Path(dir) fig, ax = plt.subplots(2, 5, figsize=(12, 6), tight_layout=True) ax = ax.ravel() @@ -424,7 +440,11 @@ def plot_results(file="path/to/results.csv", dir=""): def profile_idetection(start=0, stop=0, labels=(), save_dir=""): - # Plot iDetection '*.txt' per-image logs. from utils.plots import *; profile_idetection() + """ + Plots per-image iDetection logs, comparing metrics like storage and performance over time. + + Example: from utils.plots import *; profile_idetection() + """ ax = plt.subplots(2, 4, figsize=(12, 6), tight_layout=True)[1].ravel() s = ["Images", "Free Storage (GB)", "RAM Usage (GB)", "Battery", "dt_raw (ms)", "dt_smooth (ms)", "real-world FPS"] files = list(Path(save_dir).glob("frames*.txt")) @@ -455,7 +475,9 @@ def profile_idetection(start=0, stop=0, labels=(), save_dir=""): def save_one_box(xyxy, im, file=Path("im.jpg"), gain=1.02, pad=10, square=False, BGR=False, save=True): - # Save image crop as {file} with crop size multiple {gain} and {pad} pixels. Save and/or return crop + """Crops and saves an image from bounding box `xyxy`, applied with `gain` and `pad`, optionally squares and adjusts + for BGR. + """ xyxy = torch.tensor(xyxy).view(-1, 4) b = xyxy2xywh(xyxy) # boxes if square: diff --git a/utils/segment/augmentations.py b/utils/segment/augmentations.py index 56636b65d93a..e13a53d34821 100644 --- a/utils/segment/augmentations.py +++ b/utils/segment/augmentations.py @@ -12,7 +12,11 @@ def mixup(im, labels, segments, im2, labels2, segments2): - # Applies MixUp augmentation https://arxiv.org/pdf/1710.09412.pdf + """ + Applies MixUp augmentation blending two images, labels, and segments with a random ratio. + + See https://arxiv.org/pdf/1710.09412.pdf + """ r = np.random.beta(32.0, 32.0) # mixup ratio, alpha=beta=32.0 im = (im * r + im2 * (1 - r)).astype(np.uint8) labels = np.concatenate((labels, labels2), 0) diff --git a/utils/segment/dataloaders.py b/utils/segment/dataloaders.py index b0b3a7424216..9d2e9bef0b09 100644 --- a/utils/segment/dataloaders.py +++ b/utils/segment/dataloaders.py @@ -123,6 +123,7 @@ def __init__( self.overlap = overlap def __getitem__(self, index): + """Returns a transformed item from the dataset at the specified index, handling indexing and image weighting.""" index = self.indices[index] # linear, shuffled, or image_weights hyp = self.hyp @@ -230,7 +231,7 @@ def __getitem__(self, index): return (torch.from_numpy(img), labels_out, self.im_files[index], shapes, masks) def load_mosaic(self, index): - # YOLOv5 4-mosaic loader. Loads 1 image + 3 random images into a 4-image mosaic + """Loads 1 image + 3 random images into a 4-image YOLOv5 mosaic, adjusting labels and segments accordingly.""" labels4, segments4 = [], [] s = self.img_size yc, xc = (int(random.uniform(-x, 2 * s + x)) for x in self.mosaic_border) # mosaic center x, y @@ -291,6 +292,7 @@ def load_mosaic(self, index): @staticmethod def collate_fn(batch): + """Custom collation function for DataLoader, batches images, labels, paths, shapes, and segmentation masks.""" img, label, path, shapes, masks = zip(*batch) # transposed batched_masks = torch.cat(masks, 0) for i, l in enumerate(label): diff --git a/utils/segment/general.py b/utils/segment/general.py index 8cbc745b4a90..f292496c0da9 100644 --- a/utils/segment/general.py +++ b/utils/segment/general.py @@ -144,7 +144,9 @@ def masks_iou(mask1, mask2, eps=1e-7): def masks2segments(masks, strategy="largest"): - # Convert masks(n,160,160) into segments(n,xy) + """Converts binary (n,160,160) masks to polygon segments with options for concatenation or selecting the largest + segment. + """ segments = [] for x in masks.int().cpu().numpy().astype("uint8"): c = cv2.findContours(x, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[0] diff --git a/utils/segment/loss.py b/utils/segment/loss.py index 1e007271fa9c..29f1bcbb7e77 100644 --- a/utils/segment/loss.py +++ b/utils/segment/loss.py @@ -12,6 +12,9 @@ class ComputeLoss: # Compute losses def __init__(self, model, autobalance=False, overlap=False): + """Initializes the compute loss function for YOLOv5 models with options for autobalancing and overlap + handling. + """ self.sort_obj_iou = False self.overlap = overlap device = next(model.parameters()).device # get model device @@ -109,13 +112,15 @@ def __call__(self, preds, targets, masks): # predictions, targets, model return loss * bs, torch.cat((lbox, lseg, lobj, lcls)).detach() def single_mask_loss(self, gt_mask, pred, proto, xyxy, area): - # Mask loss for one image + """Calculates and normalizes single mask loss for YOLOv5 between predicted and ground truth masks.""" pred_mask = (pred @ proto.view(self.nm, -1)).view(-1, *proto.shape[1:]) # (n,32) @ (32,80,80) -> (n,80,80) loss = F.binary_cross_entropy_with_logits(pred_mask, gt_mask, reduction="none") return (crop_mask(loss, xyxy).mean(dim=(1, 2)) / area).mean() def build_targets(self, p, targets): - # Build targets for compute_loss(), input targets(image,class,x,y,w,h) + """Prepares YOLOv5 targets for loss computation; inputs targets (image, class, x, y, w, h), output target + classes/boxes. + """ na, nt = self.na, targets.shape[0] # number of anchors, targets tcls, tbox, indices, anch, tidxs, xywhn = [], [], [], [], [], [] gain = torch.ones(8, device=self.device) # normalized to gridspace gain diff --git a/utils/segment/metrics.py b/utils/segment/metrics.py index 7811e7eb364a..973b398eb6b9 100644 --- a/utils/segment/metrics.py +++ b/utils/segment/metrics.py @@ -7,7 +7,7 @@ def fitness(x): - # Model fitness as a weighted combination of metrics + """Evaluates model fitness by a weighted sum of 8 metrics, `x`: [N,8] array, weights: [0.1, 0.9] for mAP and F1.""" w = [0.0, 0.0, 0.1, 0.9, 0.0, 0.0, 0.1, 0.9] return (x[:, :8] * w).sum(1) @@ -128,6 +128,7 @@ def class_result(self, i): return (self.p[i], self.r[i], self.ap50[i], self.ap[i]) def get_maps(self, nc): + """Calculates and returns mean Average Precision (mAP) for each class given number of classes `nc`.""" maps = np.zeros(nc) + self.map for i, c in enumerate(self.ap_class_index): maps[c] = self.ap[i] @@ -162,17 +163,22 @@ def update(self, results): self.metric_mask.update(list(results["masks"].values())) def mean_results(self): + """Computes and returns the mean results for both box and mask metrics by summing their individual means.""" return self.metric_box.mean_results() + self.metric_mask.mean_results() def class_result(self, i): + """Returns the sum of box and mask metric results for a specified class index `i`.""" return self.metric_box.class_result(i) + self.metric_mask.class_result(i) def get_maps(self, nc): + """Calculates and returns the sum of mean average precisions (mAPs) for both box and mask metrics for `nc` + classes. + """ return self.metric_box.get_maps(nc) + self.metric_mask.get_maps(nc) @property def ap_class_index(self): - # boxes and masks have the same ap_class_index + """Returns the class index for average precision, shared by both box and mask metrics.""" return self.metric_box.ap_class_index diff --git a/utils/segment/plots.py b/utils/segment/plots.py index 0e30c61be66f..ce01988be937 100644 --- a/utils/segment/plots.py +++ b/utils/segment/plots.py @@ -15,7 +15,7 @@ @threaded def plot_images_and_masks(images, targets, masks, paths=None, fname="images.jpg", names=None): - # Plot image grid with labels + """Plots a grid of images, their labels, and masks with optional resizing and annotations, saving to fname.""" if isinstance(images, torch.Tensor): images = images.cpu().float().numpy() if isinstance(targets, torch.Tensor): @@ -111,7 +111,11 @@ def plot_images_and_masks(images, targets, masks, paths=None, fname="images.jpg" def plot_results_with_masks(file="path/to/results.csv", dir="", best=True): - # Plot training results.csv. Usage: from utils.plots import *; plot_results('path/to/results.csv') + """ + Plots training results from CSV files, plotting best or last result highlights based on `best` parameter. + + Example: from utils.plots import *; plot_results('path/to/results.csv') + """ save_dir = Path(file).parent if file else Path(dir) fig, ax = plt.subplots(2, 8, figsize=(18, 6), tight_layout=True) ax = ax.ravel() diff --git a/utils/torch_utils.py b/utils/torch_utils.py index 6bc4b4c7fd04..c2c760efa404 100644 --- a/utils/torch_utils.py +++ b/utils/torch_utils.py @@ -34,7 +34,8 @@ def smart_inference_mode(torch_1_9=check_version(torch.__version__, "1.9.0")): - # Applies torch.inference_mode() decorator if torch>=1.9.0 else torch.no_grad() decorator + """Applies torch.inference_mode() if torch>=1.9.0, else torch.no_grad() as a decorator for functions.""" + def decorate(fn): return (torch.inference_mode if torch_1_9 else torch.no_grad)()(fn) @@ -42,7 +43,9 @@ def decorate(fn): def smartCrossEntropyLoss(label_smoothing=0.0): - # Returns nn.CrossEntropyLoss with label smoothing enabled for torch>=1.10.0 + """Returns a CrossEntropyLoss with optional label smoothing for torch>=1.10.0; warns if smoothing on lower + versions. + """ if check_version(torch.__version__, "1.10.0"): return nn.CrossEntropyLoss(label_smoothing=label_smoothing) if label_smoothing > 0: @@ -51,7 +54,7 @@ def smartCrossEntropyLoss(label_smoothing=0.0): def smart_DDP(model): - # Model DDP creation with checks + """Initializes DistributedDataParallel (DDP) for model training, respecting torch version constraints.""" assert not check_version(torch.__version__, "1.12.0", pinned=True), ( "torch==1.12.0 torchvision==0.13.0 DDP training is not supported due to a known issue. " "Please upgrade or downgrade torch to use DDP. See https://github.com/ultralytics/yolov5/issues/8395" @@ -63,7 +66,7 @@ def smart_DDP(model): def reshape_classifier_output(model, n=1000): - # Update a TorchVision classification model to class count 'n' if required + """Reshapes last layer of model to match class count 'n', supporting Classify, Linear, Sequential types.""" from models.common import Classify name, m = list((model.model if hasattr(model, "model") else model).named_children())[-1] # last module @@ -87,7 +90,9 @@ def reshape_classifier_output(model, n=1000): @contextmanager def torch_distributed_zero_first(local_rank: int): - # Decorator to make all processes in distributed training wait for each local_master to do something + """Context manager ensuring ordered operations in distributed training by making all processes wait for the leading + process. + """ if local_rank not in [-1, 0]: dist.barrier(device_ids=[local_rank]) yield @@ -96,7 +101,7 @@ def torch_distributed_zero_first(local_rank: int): def device_count(): - # Returns number of CUDA devices available. Safe version of torch.cuda.device_count(). Supports Linux and Windows + """Returns the number of available CUDA devices; works on Linux and Windows by invoking `nvidia-smi`.""" assert platform.system() in ("Linux", "Windows"), "device_count() only supported on Linux or Windows" try: cmd = "nvidia-smi -L | wc -l" if platform.system() == "Linux" else 'nvidia-smi -L | find /c /v ""' # Windows @@ -106,7 +111,7 @@ def device_count(): def select_device(device="", batch_size=0, newline=True): - # device = None or 'cpu' or 0 or '0' or '0,1,2,3' + """Selects computing device (CPU, CUDA GPU, MPS) for YOLOv5 model deployment, logging device info.""" s = f"YOLOv5 🚀 {git_describe() or file_date()} Python-{platform.python_version()} torch-{torch.__version__} " device = str(device).strip().lower().replace("cuda:", "").replace("none", "") # to string, 'cuda:0' to '0' cpu = device == "cpu" @@ -143,7 +148,7 @@ def select_device(device="", batch_size=0, newline=True): def time_sync(): - # PyTorch-accurate time + """Synchronizes PyTorch for accurate timing, leveraging CUDA if available, and returns the current time.""" if torch.cuda.is_available(): torch.cuda.synchronize() return time.time() @@ -203,16 +208,19 @@ def profile(input, ops, n=10, device=None): def is_parallel(model): - # Returns True if model is of type DP or DDP + """Checks if the model is using Data Parallelism (DP) or Distributed Data Parallelism (DDP).""" return type(model) in (nn.parallel.DataParallel, nn.parallel.DistributedDataParallel) def de_parallel(model): - # De-parallelize a model: returns single-GPU model if model is of type DP or DDP + """Returns a single-GPU model by removing Data Parallelism (DP) or Distributed Data Parallelism (DDP) if applied.""" return model.module if is_parallel(model) else model def initialize_weights(model): + """Initializes weights of Conv2d, BatchNorm2d, and activations (Hardswish, LeakyReLU, ReLU, ReLU6, SiLU) in the + model. + """ for m in model.modules(): t = type(m) if t is nn.Conv2d: @@ -225,12 +233,14 @@ def initialize_weights(model): def find_modules(model, mclass=nn.Conv2d): - # Finds layer indices matching module class 'mclass' + """Finds and returns list of layer indices in `model.module_list` matching the specified `mclass`.""" return [i for i, m in enumerate(model.module_list) if isinstance(m, mclass)] def sparsity(model): - # Return global model sparsity + """Calculates and returns the global sparsity of a model as the ratio of zero-valued parameters to total + parameters. + """ a, b = 0, 0 for p in model.parameters(): a += p.numel() @@ -239,7 +249,7 @@ def sparsity(model): def prune(model, amount=0.3): - # Prune model to requested global sparsity + """Prunes Conv2d layers in a model to a specified sparsity using L1 unstructured pruning.""" import torch.nn.utils.prune as prune for name, m in model.named_modules(): @@ -250,7 +260,11 @@ def prune(model, amount=0.3): def fuse_conv_and_bn(conv, bn): - # Fuse Conv2d() and BatchNorm2d() layers https://tehnokv.com/posts/fusing-batchnorm-and-conv/ + """ + Fuses Conv2d and BatchNorm2d layers into a single Conv2d layer. + + See https://tehnokv.com/posts/fusing-batchnorm-and-conv/. + """ fusedconv = ( nn.Conv2d( conv.in_channels, @@ -280,7 +294,11 @@ def fuse_conv_and_bn(conv, bn): def model_info(model, verbose=False, imgsz=640): - # Model information. img_size may be int or list, i.e. img_size=640 or img_size=[640, 320] + """ + Prints model summary including layers, parameters, gradients, and FLOPs; imgsz may be int or list. + + Example: img_size=640 or img_size=[640, 320] + """ n_p = sum(x.numel() for x in model.parameters()) # number parameters n_g = sum(x.numel() for x in model.parameters() if x.requires_grad) # number gradients if verbose: @@ -319,7 +337,7 @@ def scale_img(img, ratio=1.0, same_shape=False, gs=32): # img(16,3,256,416) def copy_attr(a, b, include=(), exclude=()): - # Copy attributes from b to a, options to only include [...] and to exclude [...] + """Copies attributes from object b to a, optionally filtering with include and exclude lists.""" for k, v in b.__dict__.items(): if (len(include) and k not in include) or k.startswith("_") or k in exclude: continue @@ -328,7 +346,11 @@ def copy_attr(a, b, include=(), exclude=()): def smart_optimizer(model, name="Adam", lr=0.001, momentum=0.9, decay=1e-5): - # YOLOv5 3-param group optimizer: 0) weights with decay, 1) weights no decay, 2) biases no decay + """ + Initializes YOLOv5 smart optimizer with 3 parameter groups for different decay configurations. + + Groups are 0) weights with decay, 1) weights no decay, 2) biases no decay. + """ g = [], [], [] # optimizer parameter groups bn = tuple(v for k, v in nn.__dict__.items() if "Norm" in k) # normalization layers, i.e. BatchNorm2d() for v in model.modules(): @@ -361,7 +383,7 @@ def smart_optimizer(model, name="Adam", lr=0.001, momentum=0.9, decay=1e-5): def smart_hub_load(repo="ultralytics/yolov5", model="yolov5s", **kwargs): - # YOLOv5 torch.hub.load() wrapper with smart error/issue handling + """YOLOv5 torch.hub.load() wrapper with smart error handling, adjusting torch arguments for compatibility.""" if check_version(torch.__version__, "1.9.1"): kwargs["skip_validation"] = True # validation causes GitHub API rate limit errors if check_version(torch.__version__, "1.12.0"): @@ -373,7 +395,7 @@ def smart_hub_load(repo="ultralytics/yolov5", model="yolov5s", **kwargs): def smart_resume(ckpt, optimizer, ema=None, weights="yolov5s.pt", epochs=300, resume=True): - # Resume training from a partially trained checkpoint + """Resumes training from a checkpoint, updating optimizer, ema, and epochs, with optional resume verification.""" best_fitness = 0.0 start_epoch = ckpt["epoch"] + 1 if ckpt["optimizer"] is not None: @@ -397,12 +419,14 @@ def smart_resume(ckpt, optimizer, ema=None, weights="yolov5s.pt", epochs=300, re class EarlyStopping: # YOLOv5 simple early stopper def __init__(self, patience=30): + """Initializes simple early stopping mechanism for YOLOv5, with adjustable patience for non-improving epochs.""" self.best_fitness = 0.0 # i.e. mAP self.best_epoch = 0 self.patience = patience or float("inf") # epochs to wait after fitness stops improving to stop self.possible_stop = False # possible stop may occur next epoch def __call__(self, epoch, fitness): + """Evaluates if training should stop based on fitness improvement and patience, returning a boolean.""" if fitness >= self.best_fitness: # >= 0 to allow for early zero-fitness stage of training self.best_epoch = epoch self.best_fitness = fitness @@ -426,7 +450,9 @@ class ModelEMA: """ def __init__(self, model, decay=0.9999, tau=2000, updates=0): - # Create EMA + """Initializes EMA with model parameters, decay rate, tau for decay adjustment, and update count; sets model to + evaluation mode. + """ self.ema = deepcopy(de_parallel(model)).eval() # FP32 EMA self.updates = updates # number of EMA updates self.decay = lambda x: decay * (1 - math.exp(-x / tau)) # decay exponential ramp (to help early epochs) @@ -434,7 +460,7 @@ def __init__(self, model, decay=0.9999, tau=2000, updates=0): p.requires_grad_(False) def update(self, model): - # Update EMA parameters + """Updates the Exponential Moving Average (EMA) parameters based on the current model's parameters.""" self.updates += 1 d = self.decay(self.updates) @@ -446,5 +472,7 @@ def update(self, model): # assert v.dtype == msd[k].dtype == torch.float32, f'{k}: EMA {v.dtype} and model {msd[k].dtype} must be FP32' def update_attr(self, model, include=(), exclude=("process_group", "reducer")): - # Update EMA attributes + """Updates EMA attributes by copying specified attributes from model to EMA, excluding certain attributes by + default. + """ copy_attr(self.ema, model, include, exclude) diff --git a/utils/triton.py b/utils/triton.py index 9584d07fbcf0..87524c9c7801 100644 --- a/utils/triton.py +++ b/utils/triton.py @@ -71,6 +71,7 @@ def __call__(self, *args, **kwargs) -> typing.Union[torch.Tensor, typing.Tuple[t return result[0] if len(result) == 1 else result def _create_inputs(self, *args, **kwargs): + """Creates input tensors from args or kwargs, not both; raises error if none or both are provided.""" args_len, kwargs_len = len(args), len(kwargs) if not args_len and not kwargs_len: raise RuntimeError("No inputs provided.") diff --git a/val.py b/val.py index 6cc1d37a0a26..1c8c65ba89aa 100644 --- a/val.py +++ b/val.py @@ -62,7 +62,7 @@ def save_one_txt(predn, save_conf, shape, file): - # Save one txt result + """Saves one detection result to a txt file in normalized xywh format, optionally including confidence.""" gn = torch.tensor(shape)[[1, 0, 1, 0]] # normalization gain whwh for *xyxy, conf, cls in predn.tolist(): xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh @@ -72,7 +72,11 @@ def save_one_txt(predn, save_conf, shape, file): def save_one_json(predn, jdict, path, class_map): - # Save one JSON result {"image_id": 42, "category_id": 18, "bbox": [258.15, 41.29, 348.26, 243.78], "score": 0.236} + """ + Saves one JSON detection result with image ID, category ID, bounding box, and score. + + Example: {"image_id": 42, "category_id": 18, "bbox": [258.15, 41.29, 348.26, 243.78], "score": 0.236} + """ image_id = int(path.stem) if path.stem.isnumeric() else path.stem box = xyxy2xywh(predn[:, :4]) # xywh box[:, :2] -= box[:, 2:] / 2 # xy center to top-left corner @@ -359,6 +363,7 @@ def run( def parse_opt(): + """Parses command-line options for YOLOv5 model inference configuration.""" parser = argparse.ArgumentParser() parser.add_argument("--data", type=str, default=ROOT / "data/coco128.yaml", help="dataset.yaml path") parser.add_argument("--weights", nargs="+", type=str, default=ROOT / "yolov5s.pt", help="model path(s)") @@ -391,6 +396,9 @@ def parse_opt(): def main(opt): + """Executes YOLOv5 tasks like training, validation, testing, speed, and study benchmarks based on provided + options. + """ check_requirements(ROOT / "requirements.txt", exclude=("tensorboard", "thop")) if opt.task in ("train", "val", "test"): # run normally From 574331f984c0aa9c26c4ea78dac90133cfe6b2d0 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 26 Feb 2024 23:16:32 +0100 Subject: [PATCH 1865/1976] Replace inline comments with docstrings (#12764) * Add docstrings * Add docstrings * Add docstrings * Add docstrings * Add docstrings * Add docstrings * Add docstrings * Add docstrings * Add docstrings * Add docstrings * Auto-format by https://ultralytics.com/actions * Add docstrings * Add docstrings * Add docstrings * Add docstrings * Auto-format by https://ultralytics.com/actions * Update plots.py Signed-off-by: Glenn Jocher --------- Signed-off-by: Glenn Jocher Co-authored-by: UltralyticsAssistant --- models/common.py | 68 ++++++++++++++++++++++++++++++++---------- models/experimental.py | 16 +++++++--- models/tf.py | 42 +++++++++++++++++++++----- models/yolo.py | 29 ++++++++++++------ segment/train.py | 7 ++++- train.py | 8 ++++- utils/activations.py | 7 +++-- utils/augmentations.py | 31 +++++++++++++++---- utils/dataloaders.py | 11 +++++-- utils/general.py | 25 ++++++++++++---- utils/loss.py | 5 ++-- utils/plots.py | 37 +++++++++++++++++------ utils/segment/loss.py | 1 + utils/torch_utils.py | 4 ++- 14 files changed, 225 insertions(+), 66 deletions(-) diff --git a/models/common.py b/models/common.py index fd8c998149f5..8925897099c1 100644 --- a/models/common.py +++ b/models/common.py @@ -57,8 +57,12 @@ from utils.torch_utils import copy_attr, smart_inference_mode -def autopad(k, p=None, d=1): # kernel, padding, dilation - # Pad to 'same' shape outputs +def autopad(k, p=None, d=1): + """ + Pads kernel to 'same' output shape, adjusting for optional dilation; returns padding size. + + `k`: kernel, `p`: padding, `d`: dilation. + """ if d > 1: k = d * (k - 1) + 1 if isinstance(k, int) else [d * (x - 1) + 1 for x in k] # actual kernel-size if p is None: @@ -88,13 +92,19 @@ def forward_fuse(self, x): class DWConv(Conv): # Depth-wise convolution - def __init__(self, c1, c2, k=1, s=1, d=1, act=True): # ch_in, ch_out, kernel, stride, dilation, activation + def __init__(self, c1, c2, k=1, s=1, d=1, act=True): + """Initializes a depth-wise convolution layer with optional activation; args: input channels (c1), output + channels (c2), kernel size (k), stride (s), dilation (d), and activation flag (act). + """ super().__init__(c1, c2, k, s, g=math.gcd(c1, c2), d=d, act=act) class DWConvTranspose2d(nn.ConvTranspose2d): # Depth-wise transpose convolution - def __init__(self, c1, c2, k=1, s=1, p1=0, p2=0): # ch_in, ch_out, kernel, stride, padding, padding_out + def __init__(self, c1, c2, k=1, s=1, p1=0, p2=0): + """Initializes a depth-wise transpose convolutional layer for YOLOv5; args: input channels (c1), output channels + (c2), kernel size (k), stride (s), input padding (p1), output padding (p2). + """ super().__init__(c1, c2, k, s, p1, p2, groups=math.gcd(c1, c2)) @@ -148,7 +158,10 @@ def forward(self, x): class Bottleneck(nn.Module): # Standard bottleneck - def __init__(self, c1, c2, shortcut=True, g=1, e=0.5): # ch_in, ch_out, shortcut, groups, expansion + def __init__(self, c1, c2, shortcut=True, g=1, e=0.5): + """Initializes a standard bottleneck layer with optional shortcut and group convolution, supporting channel + expansion. + """ super().__init__() c_ = int(c2 * e) # hidden channels self.cv1 = Conv(c1, c_, 1, 1) @@ -164,7 +177,10 @@ def forward(self, x): class BottleneckCSP(nn.Module): # CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks - def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion + def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): + """Initializes CSP bottleneck with optional shortcuts; args: ch_in, ch_out, number of repeats, shortcut bool, + groups, expansion. + """ super().__init__() c_ = int(c2 * e) # hidden channels self.cv1 = Conv(c1, c_, 1, 1) @@ -206,7 +222,10 @@ def forward(self, x): class C3(nn.Module): # CSP Bottleneck with 3 convolutions - def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion + def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): + """Initializes C3 module with options for channel count, bottleneck repetition, shortcut usage, group + convolutions, and expansion. + """ super().__init__() c_ = int(c2 * e) # hidden channels self.cv1 = Conv(c1, c_, 1, 1) @@ -283,7 +302,13 @@ def forward(self, x): class SPPF(nn.Module): # Spatial Pyramid Pooling - Fast (SPPF) layer for YOLOv5 by Glenn Jocher - def __init__(self, c1, c2, k=5): # equivalent to SPP(k=(5, 9, 13)) + def __init__(self, c1, c2, k=5): + """ + Initializes YOLOv5 SPPF layer with given channels and kernel size for YOLOv5 model, combining convolution and + max pooling. + + Equivalent to SPP(k=(5, 9, 13)). + """ super().__init__() c_ = c1 // 2 # hidden channels self.cv1 = Conv(c1, c_, 1, 1) @@ -302,19 +327,26 @@ def forward(self, x): class Focus(nn.Module): # Focus wh information into c-space - def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True): # ch_in, ch_out, kernel, stride, padding, groups + def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True): + """Initializes Focus module to concentrate width-height info into channel space with configurable convolution + parameters. + """ super().__init__() self.conv = Conv(c1 * 4, c2, k, s, p, g, act=act) # self.contract = Contract(gain=2) - def forward(self, x): # x(b,c,w,h) -> y(b,4c,w/2,h/2) + def forward(self, x): + """Processes input through Focus mechanism, reshaping (b,c,w,h) to (b,4c,w/2,h/2) then applies convolution.""" return self.conv(torch.cat((x[..., ::2, ::2], x[..., 1::2, ::2], x[..., ::2, 1::2], x[..., 1::2, 1::2]), 1)) # return self.conv(self.contract(x)) class GhostConv(nn.Module): # Ghost Convolution https://github.com/huawei-noah/ghostnet - def __init__(self, c1, c2, k=1, s=1, g=1, act=True): # ch_in, ch_out, kernel, stride, groups + def __init__(self, c1, c2, k=1, s=1, g=1, act=True): + """Initializes GhostConv with in/out channels, kernel size, stride, groups, and activation; halves out channels + for efficiency. + """ super().__init__() c_ = c2 // 2 # hidden channels self.cv1 = Conv(c1, c_, k, s, None, g, act=act) @@ -328,7 +360,8 @@ def forward(self, x): class GhostBottleneck(nn.Module): # Ghost Bottleneck https://github.com/huawei-noah/ghostnet - def __init__(self, c1, c2, k=3, s=1): # ch_in, ch_out, kernel, stride + def __init__(self, c1, c2, k=3, s=1): + """Initializes GhostBottleneck with ch_in `c1`, ch_out `c2`, kernel size `k`, stride `s`; see https://github.com/huawei-noah/ghostnet.""" super().__init__() c_ = c2 // 2 self.conv = nn.Sequential( @@ -982,10 +1015,14 @@ def print(self): """Logs the string representation of the current object's state via the LOGGER.""" LOGGER.info(self.__str__()) - def __len__(self): # override len(results) + def __len__(self): + """Returns the number of results stored, overrides the default len(results).""" return self.n - def __str__(self): # override print(results) + def __str__(self): + """Returns a string representation of the model's results, suitable for printing, overrides default + print(results). + """ return self._run(pprint=True) # print results def __repr__(self): @@ -995,7 +1032,8 @@ def __repr__(self): class Proto(nn.Module): # YOLOv5 mask Proto module for segmentation models - def __init__(self, c1, c_=256, c2=32): # ch_in, number of protos, number of masks + def __init__(self, c1, c_=256, c2=32): + """Initializes YOLOv5 Proto module for segmentation with input, proto, and mask channels configuration.""" super().__init__() self.cv1 = Conv(c1, c_, k=3) self.upsample = nn.Upsample(scale_factor=2, mode="nearest") diff --git a/models/experimental.py b/models/experimental.py index ab229d50e30f..834c7201fbd4 100644 --- a/models/experimental.py +++ b/models/experimental.py @@ -10,8 +10,12 @@ class Sum(nn.Module): - # Weighted sum of 2 or more layers https://arxiv.org/abs/1911.09070 - def __init__(self, n, weight=False): # n: number of inputs + """Weighted sum of 2 or more layers https://arxiv.org/abs/1911.09070.""" + + def __init__(self, n, weight=False): + """Initializes a module to sum outputs of layers with number of inputs `n` and optional weighting, supporting 2+ + inputs. + """ super().__init__() self.weight = weight # apply weights boolean self.iter = range(n - 1) # iter object @@ -32,8 +36,12 @@ def forward(self, x): class MixConv2d(nn.Module): - # Mixed Depth-wise Conv https://arxiv.org/abs/1907.09595 - def __init__(self, c1, c2, k=(1, 3), s=1, equal_ch=True): # ch_in, ch_out, kernel, stride, ch_strategy + """Mixed Depth-wise Conv https://arxiv.org/abs/1907.09595.""" + + def __init__(self, c1, c2, k=(1, 3), s=1, equal_ch=True): + """Initializes MixConv2d with mixed depth-wise convolutional layers, taking input and output channels (c1, c2), + kernel sizes (k), stride (s), and channel distribution strategy (equal_ch). + """ super().__init__() n = len(k) # number of convolutions if equal_ch: # equal c_ per group diff --git a/models/tf.py b/models/tf.py index 006a66d2b0f6..2a5cd566c406 100644 --- a/models/tf.py +++ b/models/tf.py @@ -190,15 +190,25 @@ def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True, w=None): super().__init__() self.conv = TFConv(c1 * 4, c2, k, s, p, g, act, w.conv) - def call(self, inputs): # x(b,w,h,c) -> y(b,w/2,h/2,4c) - # inputs = inputs / 255 # normalize 0-255 to 0-1 + def call(self, inputs): + """ + Performs pixel shuffling and convolution on input tensor, downsampling by 2 and expanding channels by 4. + + Example x(b,w,h,c) -> y(b,w/2,h/2,4c). + """ inputs = [inputs[:, ::2, ::2, :], inputs[:, 1::2, ::2, :], inputs[:, ::2, 1::2, :], inputs[:, 1::2, 1::2, :]] return self.conv(tf.concat(inputs, 3)) class TFBottleneck(keras.layers.Layer): # Standard bottleneck - def __init__(self, c1, c2, shortcut=True, g=1, e=0.5, w=None): # ch_in, ch_out, shortcut, groups, expansion + def __init__(self, c1, c2, shortcut=True, g=1, e=0.5, w=None): + """ + Initializes a standard bottleneck layer for TensorFlow models, expanding and contracting channels with optional + shortcut. + + Arguments are ch_in, ch_out, shortcut, groups, expansion. + """ super().__init__() c_ = int(c2 * e) # hidden channels self.cv1 = TFConv(c1, c_, 1, 1, w=w.cv1) @@ -364,7 +374,10 @@ def call(self, inputs): class TFDetect(keras.layers.Layer): # TF YOLOv5 Detect layer - def __init__(self, nc=80, anchors=(), ch=(), imgsz=(640, 640), w=None): # detection layer + def __init__(self, nc=80, anchors=(), ch=(), imgsz=(640, 640), w=None): + """Initializes YOLOv5 detection layer for TensorFlow with configurable classes, anchors, channels, and image + size. + """ super().__init__() self.stride = tf.convert_to_tensor(w.stride.numpy(), dtype=tf.float32) self.nc = nc # number of classes @@ -454,7 +467,13 @@ def call(self, inputs): class TFUpsample(keras.layers.Layer): # TF version of torch.nn.Upsample() - def __init__(self, size, scale_factor, mode, w=None): # warning: all arguments needed including 'w' + def __init__(self, size, scale_factor, mode, w=None): + """ + Initializes a TensorFlow upsampling layer with specified size, scale_factor, and mode, ensuring scale_factor is + even. + + Warning: all arguments needed including 'w' + """ super().__init__() assert scale_factor % 2 == 0, "scale_factor must be multiple of 2" self.upsample = lambda x: tf.image.resize(x, (x.shape[1] * scale_factor, x.shape[2] * scale_factor), mode) @@ -481,7 +500,8 @@ def call(self, inputs): return tf.concat(inputs, self.d) -def parse_model(d, ch, model, imgsz): # model_dict, input_channels(3) +def parse_model(d, ch, model, imgsz): + """Parses a model definition dict `d` to create YOLOv5 model layers, including dynamic channel adjustments.""" LOGGER.info(f"\n{'':>3}{'from':>18}{'n':>3}{'params':>10} {'module':<40}{'arguments':<30}") anchors, nc, gd, gw, ch_mul = ( d["anchors"], @@ -562,7 +582,10 @@ def parse_model(d, ch, model, imgsz): # model_dict, input_channels(3) class TFModel: # TF YOLOv5 model - def __init__(self, cfg="yolov5s.yaml", ch=3, nc=None, model=None, imgsz=(640, 640)): # model, channels, classes + def __init__(self, cfg="yolov5s.yaml", ch=3, nc=None, model=None, imgsz=(640, 640)): + """Initializes TF YOLOv5 model with specified configuration, channels, classes, model instance, and input + size. + """ super().__init__() if isinstance(cfg, dict): self.yaml = cfg # model dict @@ -640,7 +663,10 @@ def call(self, input, topk_all, iou_thres, conf_thres): ) @staticmethod - def _nms(x, topk_all=100, iou_thres=0.45, conf_thres=0.25): # agnostic NMS + def _nms(x, topk_all=100, iou_thres=0.45, conf_thres=0.25): + """Performs agnostic non-maximum suppression (NMS) on detected objects, filtering based on IoU and confidence + thresholds. + """ boxes, classes, scores = x class_inds = tf.cast(tf.argmax(classes, axis=-1), tf.float32) scores_inp = tf.reduce_max(scores, -1) diff --git a/models/yolo.py b/models/yolo.py index ef6c1015f41e..ca62f934fc3f 100644 --- a/models/yolo.py +++ b/models/yolo.py @@ -75,7 +75,8 @@ class Detect(nn.Module): dynamic = False # force grid reconstruction export = False # export mode - def __init__(self, nc=80, anchors=(), ch=(), inplace=True): # detection layer + def __init__(self, nc=80, anchors=(), ch=(), inplace=True): + """Initializes YOLOv5 detection layer with specified classes, anchors, channels, and inplace operations.""" super().__init__() self.nc = nc # number of classes self.no = nc + 5 # number of outputs per anchor @@ -183,7 +184,8 @@ def _profile_one_layer(self, m, x, dt): if c: LOGGER.info(f"{sum(dt):10.2f} {'-':>10s} {'-':>10s} Total") - def fuse(self): # fuse model Conv2d() + BatchNorm2d() layers + def fuse(self): + """Fuses Conv2d() and BatchNorm2d() layers in the model to improve inference speed.""" LOGGER.info("Fusing layers... ") for m in self.model.modules(): if isinstance(m, (Conv, DWConv)) and hasattr(m, "bn"): @@ -193,7 +195,8 @@ def fuse(self): # fuse model Conv2d() + BatchNorm2d() layers self.info() return self - def info(self, verbose=False, img_size=640): # print model information + def info(self, verbose=False, img_size=640): + """Prints model information given verbosity and image size, e.g., `info(verbose=True, img_size=640)`.""" model_info(self, verbose, img_size) def _apply(self, fn): @@ -212,7 +215,8 @@ def _apply(self, fn): class DetectionModel(BaseModel): # YOLOv5 detection model - def __init__(self, cfg="yolov5s.yaml", ch=3, nc=None, anchors=None): # model, input channels, number of classes + def __init__(self, cfg="yolov5s.yaml", ch=3, nc=None, anchors=None): + """Initializes YOLOv5 model with configuration file, input channels, number of classes, and custom anchors.""" super().__init__() if isinstance(cfg, dict): self.yaml = cfg # model dict @@ -303,8 +307,12 @@ def _clip_augmented(self, y): y[-1] = y[-1][:, i:] # small return y - def _initialize_biases(self, cf=None): # initialize biases into Detect(), cf is class frequency - # https://arxiv.org/abs/1708.02002 section 3.3 + def _initialize_biases(self, cf=None): + """ + Initializes biases for YOLOv5's Detect() module, optionally using class frequencies (cf). + + For details see https://arxiv.org/abs/1708.02002 section 3.3. + """ # cf = torch.bincount(torch.tensor(np.concatenate(dataset.labels, 0)[:, 0]).long(), minlength=nc) + 1. m = self.model[-1] # Detect() module for mi, s in zip(m.m, m.stride): # from @@ -328,7 +336,10 @@ def __init__(self, cfg="yolov5s-seg.yaml", ch=3, nc=None, anchors=None): class ClassificationModel(BaseModel): # YOLOv5 classification model - def __init__(self, cfg=None, model=None, nc=1000, cutoff=10): # yaml, model, number of classes, cutoff index + def __init__(self, cfg=None, model=None, nc=1000, cutoff=10): + """Initializes YOLOv5 model with config file `cfg`, input channels `ch`, number of classes `nc`, and `cuttoff` + index. + """ super().__init__() self._from_detection_model(model, nc, cutoff) if model is not None else self._from_yaml(cfg) @@ -354,8 +365,8 @@ def _from_yaml(self, cfg): self.model = None -def parse_model(d, ch): # model_dict, input_channels(3) - # Parse a YOLOv5 model.yaml dictionary +def parse_model(d, ch): + """Parses a YOLOv5 model from a dict `d`, configuring layers based on input channels `ch` and model architecture.""" LOGGER.info(f"\n{'':>3}{'from':>18}{'n':>3}{'params':>10} {'module':<40}{'arguments':<30}") anchors, nc, gd, gw, act, ch_mul = ( d["anchors"], diff --git a/segment/train.py b/segment/train.py index ce59df9c635b..5a6e9afb8ec0 100644 --- a/segment/train.py +++ b/segment/train.py @@ -95,7 +95,12 @@ GIT_INFO = check_git_info() -def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictionary +def train(hyp, opt, device, callbacks): + """ + Trains the YOLOv5 model on a dataset, managing hyperparameters, model optimization, logging, and validation. + + `hyp` is path/to/hyp.yaml or hyp dictionary. + """ ( save_dir, epochs, diff --git a/train.py b/train.py index 3f2f64385c90..df0972a67c70 100644 --- a/train.py +++ b/train.py @@ -100,7 +100,13 @@ GIT_INFO = check_git_info() -def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictionary +def train(hyp, opt, device, callbacks): + """ + Trains YOLOv5 model with given hyperparameters, options, and device, managing datasets, model architecture, loss + computation, and optimizer steps. + + `hyp` argument is path/to/hyp.yaml or hyp dictionary. + """ save_dir, epochs, batch_size, weights, single_cls, evolve, data, cfg, resume, noval, nosave, workers, freeze = ( Path(opt.save_dir), opt.epochs, diff --git a/utils/activations.py b/utils/activations.py index 6218eb58440a..928ae55a0b60 100644 --- a/utils/activations.py +++ b/utils/activations.py @@ -59,8 +59,10 @@ def forward(self, x): class FReLU(nn.Module): - # FReLU activation https://arxiv.org/abs/2007.11824 + """FReLU activation https://arxiv.org/abs/2007.11824.""" + def __init__(self, c1, k=3): # ch_in, kernel + """Initializes FReLU activation with channel `c1` and kernel size `k`.""" super().__init__() self.conv = nn.Conv2d(c1, c1, k, 1, 1, groups=c1, bias=False) self.bn = nn.BatchNorm2d(c1) @@ -103,7 +105,8 @@ class MetaAconC(nn.Module): See "Activate or Not: Learning Customized Activation" https://arxiv.org/pdf/2009.04759.pdf. """ - def __init__(self, c1, k=1, s=1, r=16): # ch_in, kernel, stride, r + def __init__(self, c1, k=1, s=1, r=16): + """Initializes MetaAconC with params: channel_in (c1), kernel size (k=1), stride (s=1), reduction (r=16).""" super().__init__() c2 = max(r, c1 // r) self.p1 = nn.Parameter(torch.randn(1, c1, 1, 1)) diff --git a/utils/augmentations.py b/utils/augmentations.py index 500e13248a06..3025ebdb7092 100644 --- a/utils/augmentations.py +++ b/utils/augmentations.py @@ -310,8 +310,13 @@ def mixup(im, labels, im2, labels2): return im, labels -def box_candidates(box1, box2, wh_thr=2, ar_thr=100, area_thr=0.1, eps=1e-16): # box1(4,n), box2(4,n) - # Compute candidate boxes: box1 before augment, box2 after augment, wh_thr (pixels), aspect_ratio_thr, area_ratio +def box_candidates(box1, box2, wh_thr=2, ar_thr=100, area_thr=0.1, eps=1e-16): + """ + Filters bounding box candidates by minimum width-height threshold `wh_thr` (pixels), aspect ratio threshold + `ar_thr`, and area ratio threshold `area_thr`. + + box1(4,n) is before augmentation, box2(4,n) is after augmentation. + """ w1, h1 = box1[2] - box1[0], box1[3] - box1[1] w2, h2 = box2[2] - box2[0], box2[3] - box2[1] ar = np.maximum(w2 / (h2 + eps), h2 / (w2 + eps)) # aspect ratio @@ -380,7 +385,12 @@ def __init__(self, size=(640, 640), auto=False, stride=32): self.auto = auto # pass max size integer, automatically solve for short side using stride self.stride = stride # used with auto - def __call__(self, im): # im = np.array HWC + def __call__(self, im): + """ + Resizes and pads input image `im` (HWC format) to specified dimensions, maintaining aspect ratio. + + im = np.array HWC + """ imh, imw = im.shape[:2] r = min(self.h / imh, self.w / imw) # ratio of new/old h, w = round(imh * r), round(imw * r) # resized image @@ -398,7 +408,12 @@ def __init__(self, size=640): super().__init__() self.h, self.w = (size, size) if isinstance(size, int) else size - def __call__(self, im): # im = np.array HWC + def __call__(self, im): + """ + Applies center crop to the input image and resizes it to a specified size, maintaining aspect ratio. + + im = np.array HWC + """ imh, imw = im.shape[:2] m = min(imh, imw) # min dimension top, left = (imh - m) // 2, (imw - m) // 2 @@ -412,7 +427,13 @@ def __init__(self, half=False): super().__init__() self.half = half - def __call__(self, im): # im = np.array HWC in BGR order + def __call__(self, im): + """ + Converts BGR np.array image from HWC to RGB CHW format, and normalizes to [0, 1], with support for FP16 if + `half=True`. + + im = np.array HWC in BGR order + """ im = np.ascontiguousarray(im.transpose((2, 0, 1))[::-1]) # HWC to CHW -> BGR to RGB -> contiguous im = torch.from_numpy(im) # to torch im = im.half() if self.half else im.float() # uint8 to fp16/32 diff --git a/utils/dataloaders.py b/utils/dataloaders.py index 3e636717fb84..55f8a0ce3513 100644 --- a/utils/dataloaders.py +++ b/utils/dataloaders.py @@ -1060,8 +1060,13 @@ def flatten_recursive(path=DATASETS_DIR / "coco128"): shutil.copyfile(file, new_path / Path(file).name) -def extract_boxes(path=DATASETS_DIR / "coco128"): # from utils.dataloaders import *; extract_boxes() - # Convert detection dataset into classification dataset, with one directory per class +def extract_boxes(path=DATASETS_DIR / "coco128"): + """ + Converts a detection dataset to a classification dataset, creating a directory for each class and extracting + bounding boxes. + + Example: from utils.dataloaders import *; extract_boxes() + """ path = Path(path) # images dir shutil.rmtree(path / "classification") if (path / "classification").is_dir() else None # remove existing files = list(path.rglob("*.*")) @@ -1253,7 +1258,7 @@ def get_json(self, save=False, verbose=False): """Generates dataset JSON for Ultralytics HUB, optionally saves or prints it; save=bool, verbose=bool.""" def _round(labels): - # Update labels to integer class and 6 decimal place floats + """Rounds class labels to integers and coordinates to 4 decimal places for improved label accuracy.""" return [[int(c), *(round(x, 4) for x in points)] for c, *points in labels] for split in "train", "val", "test": diff --git a/utils/general.py b/utils/general.py index 661475354adc..5a9325eec757 100644 --- a/utils/general.py +++ b/utils/general.py @@ -351,8 +351,12 @@ def run_once(): return run_once() or run_once() # check twice to increase robustness to intermittent connectivity issues -def git_describe(path=ROOT): # path must be a directory - # Return human-readable git description, i.e. v5.0-5-g3e25f1e https://git-scm.com/docs/git-describe +def git_describe(path=ROOT): + """ + Returns a human-readable git description of the repository at `path`, or an empty string on failure. + + Example output is 'fv5.0-5-g3e25f1e'. See https://git-scm.com/docs/git-describe. + """ try: assert (Path(path) / ".git").is_dir() return check_output(f"git -C {path} describe --tags --long --always", shell=True).decode()[:-1] @@ -767,8 +771,12 @@ def labels_to_image_weights(labels, nc=80, class_weights=np.ones(80)): return (class_weights.reshape(1, nc) * class_counts).sum(1) -def coco80_to_coco91_class(): # converts 80-index (val2014) to 91-index (paper) - # https://tech.amikelive.com/node-718/what-object-categories-labels-are-in-coco-dataset/ +def coco80_to_coco91_class(): + """ + Converts COCO 80-class index to COCO 91-class index used in the paper. + + Reference: https://tech.amikelive.com/node-718/what-object-categories-labels-are-in-coco-dataset/ + """ # a = np.loadtxt('data/coco.names', dtype='str', delimiter='\n') # b = np.loadtxt('data/coco_paper.names', dtype='str', delimiter='\n') # x1 = [list(a[i] == b).index(True) + 1 for i in range(80)] # darknet to coco @@ -1108,8 +1116,13 @@ def non_max_suppression( return output -def strip_optimizer(f="best.pt", s=""): # from utils.general import *; strip_optimizer() - # Strip optimizer from 'f' to finalize training, optionally save as 's' +def strip_optimizer(f="best.pt", s=""): + """ + Strips optimizer and optionally saves checkpoint to finalize training; arguments are file path 'f' and save path + 's'. + + Example: from utils.general import *; strip_optimizer() + """ x = torch.load(f, map_location=torch.device("cpu")) if x.get("ema"): x["model"] = x["ema"] # replace model with ema diff --git a/utils/loss.py b/utils/loss.py index 8a910e12ad6f..e8563e186f40 100644 --- a/utils/loss.py +++ b/utils/loss.py @@ -8,8 +8,8 @@ from utils.torch_utils import de_parallel -def smooth_BCE(eps=0.1): # https://github.com/ultralytics/yolov3/issues/238#issuecomment-598028441 - # return positive, negative label smoothing BCE targets +def smooth_BCE(eps=0.1): + """Returns label smoothing BCE targets for reducing overfitting; pos: `1.0 - 0.5*eps`, neg: `0.5*eps`. For details see ttps://github.com/ultralytics/yolov3/issues/238#issuecomment-598028441""" return 1.0 - 0.5 * eps, 0.5 * eps @@ -132,6 +132,7 @@ def __init__(self, model, autobalance=False): self.device = device def __call__(self, p, targets): # predictions, targets + """Performs forward pass, calculating class, box, and object loss for given predictions and targets.""" lcls = torch.zeros(1, device=self.device) # class loss lbox = torch.zeros(1, device=self.device) # box loss lobj = torch.zeros(1, device=self.device) # object loss diff --git a/utils/plots.py b/utils/plots.py index e1b073dfb1ad..cb5edabc6c41 100644 --- a/utils/plots.py +++ b/utils/plots.py @@ -67,7 +67,8 @@ def __call__(self, i, bgr=False): return (c[2], c[1], c[0]) if bgr else c @staticmethod - def hex2rgb(h): # rgb order (PIL) + def hex2rgb(h): + """Converts hexadecimal color `h` to an RGB tuple (PIL-compatible) with order (R, G, B).""" return tuple(int(h[1 + i : 1 + i + 2], 16) for i in (0, 2, 4)) @@ -225,8 +226,13 @@ def plot_lr_scheduler(optimizer, scheduler, epochs=300, save_dir=""): plt.close() -def plot_val_txt(): # from utils.plots import *; plot_val() - # Plot val.txt histograms +def plot_val_txt(): + """ + Plots 2D and 1D histograms of bounding box centers from 'val.txt' using matplotlib, saving as 'hist2d.png' and + 'hist1d.png'. + + Example: from utils.plots import *; plot_val() + """ x = np.loadtxt("val.txt", dtype=np.float32) box = xyxy2xywh(x[:, :4]) cx, cy = box[:, 0], box[:, 1] @@ -242,8 +248,12 @@ def plot_val_txt(): # from utils.plots import *; plot_val() plt.savefig("hist1d.png", dpi=200) -def plot_targets_txt(): # from utils.plots import *; plot_targets_txt() - # Plot targets.txt histograms +def plot_targets_txt(): + """ + Plots histograms of object detection targets from 'targets.txt', saving the figure as 'targets.jpg'. + + Example: from utils.plots import *; plot_targets_txt() + """ x = np.loadtxt("targets.txt", dtype=np.float32).T s = ["x targets", "y targets", "width targets", "height targets"] fig, ax = plt.subplots(2, 2, figsize=(8, 8), tight_layout=True) @@ -255,8 +265,13 @@ def plot_targets_txt(): # from utils.plots import *; plot_targets_txt() plt.savefig("targets.jpg", dpi=200) -def plot_val_study(file="", dir="", x=None): # from utils.plots import *; plot_val_study() - # Plot file=study.txt generated by val.py (or plot all study*.txt in dir) +def plot_val_study(file="", dir="", x=None): + """ + Plots validation study results from 'study*.txt' files in a directory or a specific file, comparing model + performance and speed. + + Example: from utils.plots import *; plot_val_study() + """ save_dir = Path(file).parent if file else Path(dir) plot2 = False # plot additional results if plot2: @@ -381,8 +396,12 @@ def imshow_cls(im, labels=None, pred=None, names=None, nmax=25, verbose=False, f return f -def plot_evolve(evolve_csv="path/to/evolve.csv"): # from utils.plots import *; plot_evolve() - # Plot evolve.csv hyp evolution results +def plot_evolve(evolve_csv="path/to/evolve.csv"): + """ + Plots hyperparameter evolution results from a given CSV, saving the plot and displaying best results. + + Example: from utils.plots import *; plot_evolve() + """ evolve_csv = Path(evolve_csv) data = pd.read_csv(evolve_csv) keys = [x.strip() for x in data.columns] diff --git a/utils/segment/loss.py b/utils/segment/loss.py index 29f1bcbb7e77..fa0c10939b70 100644 --- a/utils/segment/loss.py +++ b/utils/segment/loss.py @@ -44,6 +44,7 @@ def __init__(self, model, autobalance=False, overlap=False): self.device = device def __call__(self, preds, targets, masks): # predictions, targets, model + """Evaluates YOLOv5 model's loss for given predictions, targets, and masks; returns total loss components.""" p, proto = preds bs, nm, mask_h, mask_w = proto.shape # batch size, number of masks, mask height, mask width lcls = torch.zeros(1, device=self.device) diff --git a/utils/torch_utils.py b/utils/torch_utils.py index c2c760efa404..4929d21cdf83 100644 --- a/utils/torch_utils.py +++ b/utils/torch_utils.py @@ -325,7 +325,9 @@ def model_info(model, verbose=False, imgsz=640): def scale_img(img, ratio=1.0, same_shape=False, gs=32): # img(16,3,256,416) - # Scales img(bs,3,y,x) by ratio constrained to gs-multiple + """Scales an image tensor `img` of shape (bs,3,y,x) by `ratio`, optionally maintaining the original shape, padded to + multiples of `gs`. + """ if ratio == 1.0: return img h, w = img.shape[2:] From b939236170e48c9ea11c01d0b0c587fe1867d701 Mon Sep 17 00:00:00 2001 From: Hongbo <12580159+ya0guang@users.noreply.github.com> Date: Fri, 1 Mar 2024 06:32:29 -0500 Subject: [PATCH 1866/1976] Correct the output dir in dataloaders.py (#12771) Correct the output dir in dataloaders.py Signed-off-by: Hongbo <12580159+ya0guang@users.noreply.github.com> --- utils/dataloaders.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/dataloaders.py b/utils/dataloaders.py index 55f8a0ce3513..04420c77a673 100644 --- a/utils/dataloaders.py +++ b/utils/dataloaders.py @@ -1085,7 +1085,7 @@ def extract_boxes(path=DATASETS_DIR / "coco128"): for j, x in enumerate(lb): c = int(x[0]) # class - f = (path / "classifier") / f"{c}" / f"{path.stem}_{im_file.stem}_{j}.jpg" # new filename + f = (path / "classification") / f"{c}" / f"{path.stem}_{im_file.stem}_{j}.jpg" # new filename if not f.parent.is_dir(): f.parent.mkdir(parents=True) From b24177bf99e6094003d309fcaae4478ab962bc5a Mon Sep 17 00:00:00 2001 From: Kumar Selvakumaran <62794224+kumar-selvakumaran@users.noreply.github.com> Date: Mon, 4 Mar 2024 15:11:17 -0500 Subject: [PATCH 1867/1976] A minor correction in a comment (#12782) * A minor correction in a comment I added the 'h' in 'https' in the link to the label smoothing issue. Signed-off-by: Kumar Selvakumaran <62794224+kumar-selvakumaran@users.noreply.github.com> * Auto-format by https://ultralytics.com/actions --------- Signed-off-by: Kumar Selvakumaran <62794224+kumar-selvakumaran@users.noreply.github.com> Co-authored-by: UltralyticsAssistant --- export.py | 6 +++--- models/experimental.py | 1 + utils/loggers/__init__.py | 1 + utils/loggers/clearml/clearml_utils.py | 1 + utils/loss.py | 2 +- 5 files changed, 7 insertions(+), 4 deletions(-) diff --git a/export.py b/export.py index 9ea2b936d740..8fe7ce8fdce1 100644 --- a/export.py +++ b/export.py @@ -742,9 +742,9 @@ def pipeline_coreml(model, im, file, names, y, prefix=colorstr("CoreML Pipeline: model = ct.models.MLModel(pipeline.spec) model.input_description["image"] = "Input image" model.input_description["iouThreshold"] = f"(optional) IOU Threshold override (default: {nms.iouThreshold})" - model.input_description[ - "confidenceThreshold" - ] = f"(optional) Confidence Threshold override (default: {nms.confidenceThreshold})" + model.input_description["confidenceThreshold"] = ( + f"(optional) Confidence Threshold override (default: {nms.confidenceThreshold})" + ) model.output_description["confidence"] = 'Boxes × Class confidence (see user-defined metadata "classes")' model.output_description["coordinates"] = "Boxes × [x, y, width, height] (relative to image size)" model.save(f) # pipelined diff --git a/models/experimental.py b/models/experimental.py index 834c7201fbd4..6152cef1b389 100644 --- a/models/experimental.py +++ b/models/experimental.py @@ -1,5 +1,6 @@ # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license """Experimental modules.""" + import math import numpy as np diff --git a/utils/loggers/__init__.py b/utils/loggers/__init__.py index c3fbded50a3c..2a2c5d734c2e 100644 --- a/utils/loggers/__init__.py +++ b/utils/loggers/__init__.py @@ -1,5 +1,6 @@ # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license """Logging utils.""" + import json import os import warnings diff --git a/utils/loggers/clearml/clearml_utils.py b/utils/loggers/clearml/clearml_utils.py index 1bbea61effc2..6a6ed7636c88 100644 --- a/utils/loggers/clearml/clearml_utils.py +++ b/utils/loggers/clearml/clearml_utils.py @@ -1,4 +1,5 @@ """Main Logger class for ClearML experiment tracking.""" + import glob import re from pathlib import Path diff --git a/utils/loss.py b/utils/loss.py index e8563e186f40..9d09f9df0261 100644 --- a/utils/loss.py +++ b/utils/loss.py @@ -9,7 +9,7 @@ def smooth_BCE(eps=0.1): - """Returns label smoothing BCE targets for reducing overfitting; pos: `1.0 - 0.5*eps`, neg: `0.5*eps`. For details see ttps://github.com/ultralytics/yolov3/issues/238#issuecomment-598028441""" + """Returns label smoothing BCE targets for reducing overfitting; pos: `1.0 - 0.5*eps`, neg: `0.5*eps`. For details see https://github.com/ultralytics/yolov3/issues/238#issuecomment-598028441""" return 1.0 - 0.5 * eps, 0.5 * eps From b2ffe05569161b7af4e1e3bae617ae25f59d588f Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 4 Mar 2024 21:12:53 +0100 Subject: [PATCH 1868/1976] Add Discord badge (#12783) * Add Discord badge Signed-off-by: Glenn Jocher * Auto-format by https://ultralytics.com/actions --------- Signed-off-by: Glenn Jocher Co-authored-by: UltralyticsAssistant --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index 496a232a4d17..3a4dbb9d67e8 100644 --- a/README.md +++ b/README.md @@ -14,6 +14,7 @@ YOLOv5 CI YOLOv5 Citation Docker Pulls + Discord
Run on Gradient Open In Colab From a083914a19d1d65749d2e3940b48c48ee2ad2310 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 10 Mar 2024 13:17:51 +0100 Subject: [PATCH 1869/1976] Limit TF export to `tensorflow<2.15.1` (#12800) * Update ci-testing.yml Signed-off-by: Glenn Jocher * bump tests to 3.12 Signed-off-by: Glenn Jocher * Update ci-testing.yml Signed-off-by: Glenn Jocher * Update ci-testing.yml Signed-off-by: Glenn Jocher * Update ci-testing.yml Signed-off-by: Glenn Jocher --------- Signed-off-by: Glenn Jocher --- .github/workflows/ci-testing.yml | 12 +++--------- 1 file changed, 3 insertions(+), 9 deletions(-) diff --git a/.github/workflows/ci-testing.yml b/.github/workflows/ci-testing.yml index 1e2dd3bad914..4a300314f65c 100644 --- a/.github/workflows/ci-testing.yml +++ b/.github/workflows/ci-testing.yml @@ -18,7 +18,7 @@ jobs: fail-fast: false matrix: os: [ubuntu-latest] - python-version: ["3.11"] # requires python<=3.10 + python-version: ["3.11"] # requires python<=3.11 model: [yolov5n] steps: - uses: actions/checkout@v4 @@ -29,7 +29,7 @@ jobs: - name: Install requirements run: | python -m pip install --upgrade pip wheel - pip install -r requirements.txt coremltools openvino-dev tensorflow-cpu --extra-index-url https://download.pytorch.org/whl/cpu + pip install -r requirements.txt coremltools openvino-dev "tensorflow-cpu<2.15.1" --extra-index-url https://download.pytorch.org/whl/cpu yolo checks pip list - name: Benchmark DetectionModel @@ -51,16 +51,10 @@ jobs: strategy: fail-fast: false matrix: - os: [ubuntu-latest, windows-latest] # macos-latest bug https://github.com/ultralytics/yolov5/pull/9049 + os: [ubuntu-latest, windows-latest, macos-14] # macos-latest bug https://github.com/ultralytics/yolov5/pull/9049 python-version: ["3.11"] model: [yolov5n] include: - - os: ubuntu-latest - python-version: "3.8" # '3.6.8' min - model: yolov5n - - os: ubuntu-latest - python-version: "3.9" - model: yolov5n - os: ubuntu-latest python-version: "3.8" # torch 1.8.0 requires python >=3.6, <=3.8 model: yolov5n From d6900cde52fbcd418c893760985ee1b7d0ff6f9d Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 10 Mar 2024 13:27:53 +0100 Subject: [PATCH 1870/1976] [Snyk] Security upgrade wheel from 0.32.2 to 0.38.0 (#12791) fix: requirements.txt to reduce vulnerabilities The following vulnerabilities are fixed by pinning transitive dependencies: - https://snyk.io/vuln/SNYK-PYTHON-WHEEL-3180413 Co-authored-by: snyk-bot --- requirements.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/requirements.txt b/requirements.txt index 545d3c1c2921..7e25a9a4914c 100644 --- a/requirements.txt +++ b/requirements.txt @@ -47,3 +47,4 @@ setuptools>=65.5.1 # Snyk vulnerability fix # mss # screenshots # albumentations>=1.0.3 # pycocotools>=2.0.6 # COCO mAP +wheel>=0.38.0 # not directly required, pinned by Snyk to avoid a vulnerability From a428ff76892cd442fae4b66165a724058baeb352 Mon Sep 17 00:00:00 2001 From: Abhishek Sharma <140809134+AbhishekSharma-17@users.noreply.github.com> Date: Sun, 10 Mar 2024 17:58:53 +0530 Subject: [PATCH 1871/1976] Update pyproject.toml (#12740) * Update pyproject.toml sio Signed-off-by: Abhishek Sharma <140809134+AbhishekSharma-17@users.noreply.github.com> * Update pyproject.toml Signed-off-by: Glenn Jocher --------- Signed-off-by: Abhishek Sharma <140809134+AbhishekSharma-17@users.noreply.github.com> Signed-off-by: Glenn Jocher Co-authored-by: Glenn Jocher --- pyproject.toml | 1 + 1 file changed, 1 insertion(+) diff --git a/pyproject.toml b/pyproject.toml index 26765dfadada..fc3bf6766c44 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -24,6 +24,7 @@ build-backend = "setuptools.build_meta" # Project settings ----------------------------------------------------------------------------------------------------- [project] +version = 7.0.0 name = "YOLOv5" description = "Ultralytics YOLOv5 for SOTA object detection, instance segmentation and image classification." readme = "README.md" From db125a20175384d75560dc9af7fb1100d67213fe Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 10 Mar 2024 13:49:37 +0100 Subject: [PATCH 1872/1976] Update export.py to `tensorflow<=2.15.1` (#12803) * Update export.py Signed-off-by: Glenn Jocher * Update export.py Signed-off-by: Glenn Jocher * Auto-format by https://ultralytics.com/actions --------- Signed-off-by: Glenn Jocher Co-authored-by: UltralyticsAssistant --- export.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/export.py b/export.py index 8fe7ce8fdce1..c660ce660aa5 100644 --- a/export.py +++ b/export.py @@ -405,7 +405,8 @@ def export_saved_model( try: import tensorflow as tf except Exception: - check_requirements(f"tensorflow{'' if torch.cuda.is_available() else '-macos' if MACOS else '-cpu'}") + check_requirements(f"tensorflow{'' if torch.cuda.is_available() else '-macos' if MACOS else '-cpu'}<=2.15.1") + import tensorflow as tf from tensorflow.python.framework.convert_to_constants import convert_variables_to_constants_v2 From ac6c4383bc0c7a2a4f7ca18f8733821b49e916bd Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 24 Mar 2024 18:31:58 +0100 Subject: [PATCH 1873/1976] Update Discord and Contributing Guide URLs (#12847) * Update Discord and Contributing Guide URLs * Update __init__.py Signed-off-by: Glenn Jocher --------- Signed-off-by: Glenn Jocher --- utils/loggers/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/loggers/__init__.py b/utils/loggers/__init__.py index 2a2c5d734c2e..011ec7c8915b 100644 --- a/utils/loggers/__init__.py +++ b/utils/loggers/__init__.py @@ -382,7 +382,7 @@ def __init__(self, opt, console_logger, include=("tb", "wandb", "clearml")): prefix = colorstr("ClearML: ") LOGGER.warning( f"{prefix}WARNING ⚠️ ClearML is installed but not configured, skipping ClearML logging." - f" See https://github.com/ultralytics/yolov5/tree/master/utils/loggers/clearml#readme" + f" See https://docs.ultralytics.com/yolov5/tutorials/clearml_logging_integration" ) else: self.clearml = None From ae4ef3b267ffbe7b4b59b95c3149d12eab0e9fd1 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 7 Apr 2024 00:46:25 +0200 Subject: [PATCH 1874/1976] [Snyk] Security upgrade pillow from 9.5.0 to 10.3.0 (#12868) * fix: requirements.txt to reduce vulnerabilities The following vulnerabilities are fixed by pinning transitive dependencies: - https://snyk.io/vuln/SNYK-PYTHON-PILLOW-6514866 * Update requirements.txt Signed-off-by: Glenn Jocher --------- Signed-off-by: Glenn Jocher Co-authored-by: snyk-bot --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 7e25a9a4914c..3892abe07308 100644 --- a/requirements.txt +++ b/requirements.txt @@ -6,7 +6,7 @@ gitpython>=3.1.30 matplotlib>=3.3 numpy>=1.23.5 opencv-python>=4.1.1 -Pillow>=9.4.0 +pillow>=10.3.0 psutil # system resources PyYAML>=5.3.1 requests>=2.23.0 From d07d0cf633b8b8bca90509d4c6183e62d3ebaca6 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 9 Apr 2024 14:52:45 +0200 Subject: [PATCH 1875/1976] Create cla.yml (#12899) Signed-off-by: Glenn Jocher --- .github/workflows/cla.yml | 39 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 39 insertions(+) create mode 100644 .github/workflows/cla.yml diff --git a/.github/workflows/cla.yml b/.github/workflows/cla.yml new file mode 100644 index 000000000000..4f87c77701dd --- /dev/null +++ b/.github/workflows/cla.yml @@ -0,0 +1,39 @@ +# Ultralytics YOLO 🚀, AGPL-3.0 license +# Ultralytics Contributor License Agreement (CLA) action https://docs.ultralytics.com/help/CLA +# This workflow automatically requests Pull Requests (PR) authors to sign the Ultralytics CLA before PRs can be merged + +name: CLA Assistant +on: + issue_comment: + types: + - created + pull_request_target: + types: + - reopened + - opened + - synchronize + +jobs: + CLA: + if: github.repository == 'ultralytics/yolov5' + runs-on: ubuntu-latest + steps: + - name: CLA Assistant + if: (github.event.comment.body == 'recheck' || github.event.comment.body == 'I have read the CLA Document and I sign the CLA') || github.event_name == 'pull_request_target' + uses: contributor-assistant/github-action@v2.3.2 + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + # must be repository secret token + PERSONAL_ACCESS_TOKEN: ${{ secrets.PERSONAL_ACCESS_TOKEN }} + with: + path-to-signatures: "signatures/version1/cla.json" + path-to-document: "https://docs.ultralytics.com/help/CLA" # CLA document + # branch should not be protected + branch: "main" + allowlist: dependabot[bot],github-actions,[pre-commit*,pre-commit*,bot* + + remote-organization-name: ultralytics + remote-repository-name: cla + custom-pr-sign-comment: "I have read the CLA Document and I sign the CLA" + custom-allsigned-prcomment: All Contributors have signed the CLA. ✅ + #custom-notsigned-prcomment: 'pull request comment with Introductory message to ask new contributors to sign' From 21f8f94d1169bd28c5d59fe6ccc23b13ddb997e2 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 13 Apr 2024 17:21:03 +0200 Subject: [PATCH 1876/1976] Sort imports with Ruff and iSort (#12915) * Sort imports with Ruff and iSort Signed-off-by: Glenn Jocher * Auto-format by https://ultralytics.com/actions --------- Signed-off-by: Glenn Jocher Co-authored-by: UltralyticsAssistant From 77aa0e61afebadf574f22b4473b0e283c546d7d9 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 14 Apr 2024 06:14:49 -0700 Subject: [PATCH 1877/1976] Create merge-main-into-prs.yml (#12918) * Create merge-main-into-prs.yml Signed-off-by: Glenn Jocher * Update merge-main-into-prs.yml Signed-off-by: Glenn Jocher --------- Signed-off-by: Glenn Jocher --- .github/workflows/merge-main-into-prs.yml | 58 +++++++++++++++++++++++ 1 file changed, 58 insertions(+) create mode 100644 .github/workflows/merge-main-into-prs.yml diff --git a/.github/workflows/merge-main-into-prs.yml b/.github/workflows/merge-main-into-prs.yml new file mode 100644 index 000000000000..7ac983be10ae --- /dev/null +++ b/.github/workflows/merge-main-into-prs.yml @@ -0,0 +1,58 @@ +# Ultralytics YOLO 🚀, AGPL-3.0 license +# Automatically merges repository 'main' branch into all open PRs to keep them up-to-date +# Action runs on updates to main branch so when one PR merges to main all others update + +name: Merge main into PRs + +on: + workflow_dispatch: + push: + branches: + - main + +jobs: + Merge: + if: github.repository == 'ultralytics/yolov5' + runs-on: ubuntu-latest + steps: + - name: Checkout repository + uses: actions/checkout@v4 + with: + fetch-depth: 0 + ref: main + - uses: actions/setup-python@v5 + with: + python-version: "3.11" + cache: "pip" # caching pip dependencies + - name: Install requirements + run: | + pip install pygithub + - name: Merge main into PRs + shell: python + run: | + from github import Github + import os + + # Authenticate with the GitHub Token + g = Github(os.getenv('GITHUB_TOKEN')) + + # Get the repository dynamically + repo = g.get_repo(os.getenv('GITHUB_REPOSITORY')) + + # List all open pull requests + open_pulls = repo.get_pulls(state='open', sort='created') + + for pr in open_pulls: + # Compare PR head with main to see if it's behind + comparison = repo.compare(pr.base.ref, pr.head.ref) # Ensure correct order of base and head + if comparison.behind_by > 0: + try: + # Merge main into the PR branch + success = pr.update_branch() + assert success, "Branch update failed" + print(f"Merged 'main' into PR #{pr.number} ({pr.head.ref}) successfully.") + except Exception as e: + print(f"Could not merge 'main' into PR #{pr.number} ({pr.head.ref}): {e}") + env: + GITHUB_TOKEN: ${{ secrets.PERSONAL_ACCESS_TOKEN }} + GITHUB_REPOSITORY: ${{ github.repository }} From a3ddc1795e159699418c9f63403f4ad6a92f75aa Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 14 Apr 2024 06:19:07 -0700 Subject: [PATCH 1878/1976] Update to `ultralytics>=8.1.47` (#12919) Signed-off-by: Glenn Jocher --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index fc3bf6766c44..1472a9820ac7 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -78,7 +78,7 @@ dependencies = [ "thop>=0.1.1", # FLOPs computation "pandas>=1.1.4", "seaborn>=0.11.0", # plotting - "ultralytics>=8.0.232" + "ultralytics>=8.1.47" ] # Optional dependencies ------------------------------------------------------------------------------------------------ From 6c8f6933521ee32600b506970af1402ce02b443a Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 14 Apr 2024 06:23:17 -0700 Subject: [PATCH 1879/1976] Update merge-main-into-prs.yml (#12920) Signed-off-by: Glenn Jocher --- .github/workflows/merge-main-into-prs.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/merge-main-into-prs.yml b/.github/workflows/merge-main-into-prs.yml index 7ac983be10ae..445dd2ac7fd8 100644 --- a/.github/workflows/merge-main-into-prs.yml +++ b/.github/workflows/merge-main-into-prs.yml @@ -9,6 +9,7 @@ on: push: branches: - main + - master jobs: Merge: @@ -19,7 +20,6 @@ jobs: uses: actions/checkout@v4 with: fetch-depth: 0 - ref: main - uses: actions/setup-python@v5 with: python-version: "3.11" @@ -50,9 +50,9 @@ jobs: # Merge main into the PR branch success = pr.update_branch() assert success, "Branch update failed" - print(f"Merged 'main' into PR #{pr.number} ({pr.head.ref}) successfully.") + print(f"Merged 'master' into PR #{pr.number} ({pr.head.ref}) successfully.") except Exception as e: - print(f"Could not merge 'main' into PR #{pr.number} ({pr.head.ref}): {e}") + print(f"Could not merge 'master' into PR #{pr.number} ({pr.head.ref}): {e}") env: GITHUB_TOKEN: ${{ secrets.PERSONAL_ACCESS_TOKEN }} GITHUB_REPOSITORY: ${{ github.repository }} From 51dc1af6e82fbcc0bb2eccddc2148bbc0268695c Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 14 Apr 2024 06:30:37 -0700 Subject: [PATCH 1880/1976] Update merge-main-into-prs.yml (#12921) Signed-off-by: Glenn Jocher --- .github/workflows/merge-main-into-prs.yml | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/.github/workflows/merge-main-into-prs.yml b/.github/workflows/merge-main-into-prs.yml index 445dd2ac7fd8..06757d3f1942 100644 --- a/.github/workflows/merge-main-into-prs.yml +++ b/.github/workflows/merge-main-into-prs.yml @@ -44,15 +44,15 @@ jobs: for pr in open_pulls: # Compare PR head with main to see if it's behind - comparison = repo.compare(pr.base.ref, pr.head.ref) # Ensure correct order of base and head - if comparison.behind_by > 0: - try: + try: + comparison = repo.compare(pr.base.ref, pr.head.ref) # Ensure correct order of base and head + if comparison.behind_by > 0: # Merge main into the PR branch success = pr.update_branch() assert success, "Branch update failed" print(f"Merged 'master' into PR #{pr.number} ({pr.head.ref}) successfully.") - except Exception as e: - print(f"Could not merge 'master' into PR #{pr.number} ({pr.head.ref}): {e}") + except Exception as e: + print(f"Could not merge 'master' into PR #{pr.number} ({pr.head.ref}): {e}") env: GITHUB_TOKEN: ${{ secrets.PERSONAL_ACCESS_TOKEN }} GITHUB_REPOSITORY: ${{ github.repository }} From cf8b67b7e9b49bed087ef27b4847edd84c687c34 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 14 Apr 2024 07:36:44 -0700 Subject: [PATCH 1881/1976] Update merge-main-into-prs.yml (#12922) Signed-off-by: Glenn Jocher --- .github/workflows/merge-main-into-prs.yml | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/.github/workflows/merge-main-into-prs.yml b/.github/workflows/merge-main-into-prs.yml index 06757d3f1942..9ed945c78978 100644 --- a/.github/workflows/merge-main-into-prs.yml +++ b/.github/workflows/merge-main-into-prs.yml @@ -45,12 +45,10 @@ jobs: for pr in open_pulls: # Compare PR head with main to see if it's behind try: - comparison = repo.compare(pr.base.ref, pr.head.ref) # Ensure correct order of base and head - if comparison.behind_by > 0: - # Merge main into the PR branch - success = pr.update_branch() - assert success, "Branch update failed" - print(f"Merged 'master' into PR #{pr.number} ({pr.head.ref}) successfully.") + # Merge main into the PR branch + success = pr.update_branch() + assert success, "Branch update failed" + print(f"Merged 'master' into PR #{pr.number} ({pr.head.ref}) successfully.") except Exception as e: print(f"Could not merge 'master' into PR #{pr.number} ({pr.head.ref}): {e}") env: From 22361691457f6c6f222829fd390772325da678c5 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 17 Apr 2024 20:41:59 -0700 Subject: [PATCH 1882/1976] Bump gunicorn from 19.10.0 to 22.0.0 in /utils/google_app_engine (#12929) * Bump gunicorn from 19.10.0 to 22.0.0 in /utils/google_app_engine Bumps [gunicorn](https://github.com/benoitc/gunicorn) from 19.10.0 to 22.0.0. - [Release notes](https://github.com/benoitc/gunicorn/releases) - [Commits](https://github.com/benoitc/gunicorn/compare/19.10.0...22.0.0) --- updated-dependencies: - dependency-name: gunicorn dependency-type: direct:production ... Signed-off-by: dependabot[bot] * [Snyk] Security upgrade gunicorn from 19.10.0 to 22.0.0 (#12938) fix: utils/google_app_engine/additional_requirements.txt to reduce vulnerabilities The following vulnerabilities are fixed by pinning transitive dependencies: - https://snyk.io/vuln/SNYK-PYTHON-GUNICORN-6615672 Co-authored-by: snyk-bot --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Glenn Jocher Co-authored-by: snyk-bot --- utils/google_app_engine/additional_requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/google_app_engine/additional_requirements.txt b/utils/google_app_engine/additional_requirements.txt index c1a2af2c1145..821c3caf3cbf 100644 --- a/utils/google_app_engine/additional_requirements.txt +++ b/utils/google_app_engine/additional_requirements.txt @@ -1,5 +1,5 @@ # add these requirements in your app on top of the existing ones pip==23.3 Flask==2.3.2 -gunicorn==19.10.0 +gunicorn==22.0.0 werkzeug>=3.0.1 # not directly required, pinned by Snyk to avoid a vulnerability From 4456c95352cbc47f1ebb132956b8412d5b9f4c37 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sat, 27 Apr 2024 13:22:02 +0200 Subject: [PATCH 1883/1976] Bump slackapi/slack-github-action from 1.25.0 to 1.26.0 in /.github/workflows (#12948) * Bump slackapi/slack-github-action in /.github/workflows Bumps [slackapi/slack-github-action](https://github.com/slackapi/slack-github-action) from 1.25.0 to 1.26.0. - [Release notes](https://github.com/slackapi/slack-github-action/releases) - [Commits](https://github.com/slackapi/slack-github-action/compare/v1.25.0...v1.26.0) --- updated-dependencies: - dependency-name: slackapi/slack-github-action dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * Update ci-testing.yml Signed-off-by: Glenn Jocher --------- Signed-off-by: dependabot[bot] Signed-off-by: Glenn Jocher Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Glenn Jocher --- .github/workflows/ci-testing.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/ci-testing.yml b/.github/workflows/ci-testing.yml index 4a300314f65c..b70e0eb89245 100644 --- a/.github/workflows/ci-testing.yml +++ b/.github/workflows/ci-testing.yml @@ -25,7 +25,7 @@ jobs: - uses: actions/setup-python@v5 with: python-version: ${{ matrix.python-version }} - cache: "pip" # caching pip dependencies + cache: "pip" # cache pip dependencies - name: Install requirements run: | python -m pip install --upgrade pip wheel @@ -141,7 +141,7 @@ jobs: steps: - name: Check for failure and notify if: (needs.Benchmarks.result == 'failure' || needs.Tests.result == 'failure' || needs.Benchmarks.result == 'cancelled' || needs.Tests.result == 'cancelled') && github.repository == 'ultralytics/yolov5' && (github.event_name == 'schedule' || github.event_name == 'push') - uses: slackapi/slack-github-action@v1.25.0 + uses: slackapi/slack-github-action@v1.26.0 with: payload: | {"text": " GitHub Actions error for ${{ github.workflow }} ❌\n\n\n*Repository:* https://github.com/${{ github.repository }}\n*Action:* https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}\n*Author:* ${{ github.actor }}\n*Event:* ${{ github.event_name }}\n"} From b599ae42d9adb8bcb96a1de6ad093436aac9fe6b Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 28 Apr 2024 17:18:39 +0200 Subject: [PATCH 1884/1976] Add Turkish and Vietnamese Docs (#12972) * Add Turkish and Vietnamese Docs Signed-off-by: Glenn Jocher * Update README.md Signed-off-by: Glenn Jocher * Update pyproject.toml Signed-off-by: Glenn Jocher --------- Signed-off-by: Glenn Jocher --- README.md | 2 +- README.zh-CN.md | 2 +- pyproject.toml | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index 3a4dbb9d67e8..55f813747fe5 100644 --- a/README.md +++ b/README.md @@ -8,7 +8,7 @@ -->

-[中文](https://docs.ultralytics.com/zh/) | [한국어](https://docs.ultralytics.com/ko/) | [日本語](https://docs.ultralytics.com/ja/) | [Русский](https://docs.ultralytics.com/ru/) | [Deutsch](https://docs.ultralytics.com/de/) | [Français](https://docs.ultralytics.com/fr/) | [Español](https://docs.ultralytics.com/es/) | [Português](https://docs.ultralytics.com/pt/) | [हिन्दी](https://docs.ultralytics.com/hi/) | [العربية](https://docs.ultralytics.com/ar/) +[中文](https://docs.ultralytics.com/zh/) | [한국어](https://docs.ultralytics.com/ko/) | [日本語](https://docs.ultralytics.com/ja/) | [Русский](https://docs.ultralytics.com/ru/) | [Deutsch](https://docs.ultralytics.com/de/) | [Français](https://docs.ultralytics.com/fr/) | [Español](https://docs.ultralytics.com/es/) | [Português](https://docs.ultralytics.com/pt/) | [Türkçe](https://docs.ultralytics.com/tr/) | [Tiếng Việt](https://docs.ultralytics.com/vi/) | [हिन्दी](https://docs.ultralytics.com/hi/) | [العربية](https://docs.ultralytics.com/ar/)
YOLOv5 CI diff --git a/README.zh-CN.md b/README.zh-CN.md index c81feb86c202..69ce9b72d332 100644 --- a/README.zh-CN.md +++ b/README.zh-CN.md @@ -8,7 +8,7 @@ -->

-[中文](https://docs.ultralytics.com/zh/) | [한국어](https://docs.ultralytics.com/ko/) | [日本語](https://docs.ultralytics.com/ja/) | [Русский](https://docs.ultralytics.com/ru/) | [Deutsch](https://docs.ultralytics.com/de/) | [Français](https://docs.ultralytics.com/fr/) | [Español](https://docs.ultralytics.com/es/) | [Português](https://docs.ultralytics.com/pt/) | [हिन्दी](https://docs.ultralytics.com/hi/) | [العربية](https://docs.ultralytics.com/ar/) +[中文](https://docs.ultralytics.com/zh/) | [한국어](https://docs.ultralytics.com/ko/) | [日本語](https://docs.ultralytics.com/ja/) | [Русский](https://docs.ultralytics.com/ru/) | [Deutsch](https://docs.ultralytics.com/de/) | [Français](https://docs.ultralytics.com/fr/) | [Español](https://docs.ultralytics.com/es/) | [Português](https://docs.ultralytics.com/pt/) | [Türkçe](https://docs.ultralytics.com/tr/) | [Tiếng Việt](https://docs.ultralytics.com/vi/) | [हिन्दी](https://docs.ultralytics.com/hi/) | [العربية](https://docs.ultralytics.com/ar/)
YOLOv5 CI diff --git a/pyproject.toml b/pyproject.toml index 1472a9820ac7..5748b907cf30 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -24,7 +24,7 @@ build-backend = "setuptools.build_meta" # Project settings ----------------------------------------------------------------------------------------------------- [project] -version = 7.0.0 +version = "7.0.0" name = "YOLOv5" description = "Ultralytics YOLOv5 for SOTA object detection, instance segmentation and image classification." readme = "README.md" From 920c721ea77315291f987ea5b911be7fb619d134 Mon Sep 17 00:00:00 2001 From: Nick Martin <284356+n1mmy@users.noreply.github.com> Date: Sun, 5 May 2024 02:55:12 -0700 Subject: [PATCH 1885/1976] Backport compatibility with TensorRT version 10 from yolov8 (#12984) Add compatibility with TensorRT version 10. Based on the is_trt10 code in yolov8. --- export.py | 14 +++++++++----- models/common.py | 40 ++++++++++++++++++++++++++++------------ 2 files changed, 37 insertions(+), 17 deletions(-) diff --git a/export.py b/export.py index c660ce660aa5..214d903c2998 100644 --- a/export.py +++ b/export.py @@ -346,6 +346,7 @@ def export_engine(model, im, file, half, dynamic, simplify, workspace=4, verbose onnx = file.with_suffix(".onnx") LOGGER.info(f"\n{prefix} starting export with TensorRT {trt.__version__}...") + is_trt10 = int(trt.__version__.split(".")[0]) >= 10 # is TensorRT >= 10 assert onnx.exists(), f"failed to export ONNX file: {onnx}" f = file.with_suffix(".engine") # TensorRT engine file logger = trt.Logger(trt.Logger.INFO) @@ -354,9 +355,10 @@ def export_engine(model, im, file, half, dynamic, simplify, workspace=4, verbose builder = trt.Builder(logger) config = builder.create_builder_config() - config.max_workspace_size = workspace * 1 << 30 - # config.set_memory_pool_limit(trt.MemoryPoolType.WORKSPACE, workspace << 30) # fix TRT 8.4 deprecation notice - + if is_trt10: + config.set_memory_pool_limit(trt.MemoryPoolType.WORKSPACE, workspace << 30) + else: # TensorRT versions 7, 8 + config.max_workspace_size = workspace * 1 << 30 flag = 1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH) network = builder.create_network(flag) parser = trt.OnnxParser(network, logger) @@ -381,8 +383,10 @@ def export_engine(model, im, file, half, dynamic, simplify, workspace=4, verbose LOGGER.info(f"{prefix} building FP{16 if builder.platform_has_fast_fp16 and half else 32} engine as {f}") if builder.platform_has_fast_fp16 and half: config.set_flag(trt.BuilderFlag.FP16) - with builder.build_engine(network, config) as engine, open(f, "wb") as t: - t.write(engine.serialize()) + + build = builder.build_serialized_network if is_trt10 else builder.build_engine + with build(network, config) as engine, open(f, "wb") as t: + t.write(engine if is_trt10 else engine.serialize()) return f, None diff --git a/models/common.py b/models/common.py index 8925897099c1..12244fd4b3cf 100644 --- a/models/common.py +++ b/models/common.py @@ -527,18 +527,34 @@ def __init__(self, weights="yolov5s.pt", device=torch.device("cpu"), dnn=False, output_names = [] fp16 = False # default updated below dynamic = False - for i in range(model.num_bindings): - name = model.get_binding_name(i) - dtype = trt.nptype(model.get_binding_dtype(i)) - if model.binding_is_input(i): - if -1 in tuple(model.get_binding_shape(i)): # dynamic - dynamic = True - context.set_binding_shape(i, tuple(model.get_profile_shape(0, i)[2])) - if dtype == np.float16: - fp16 = True - else: # output - output_names.append(name) - shape = tuple(context.get_binding_shape(i)) + is_trt10 = not hasattr(model, "num_bindings") + num = range(model.num_io_tensors) if is_trt10 else range(model.num_bindings) + for i in num: + if is_trt10: + name = model.get_tensor_name(i) + dtype = trt.nptype(model.get_tensor_dtype(name)) + is_input = model.get_tensor_mode(name) == trt.TensorIOMode.INPUT + if is_input: + if -1 in tuple(model.get_tensor_shape(name)): # dynamic + dynamic = True + context.set_input_shape(name, tuple(model.get_profile_shape(name, 0)[2])) + if dtype == np.float16: + fp16 = True + else: # output + output_names.append(name) + shape = tuple(context.get_tensor_shape(name)) + else: + name = model.get_binding_name(i) + dtype = trt.nptype(model.get_binding_dtype(i)) + if model.binding_is_input(i): + if -1 in tuple(model.get_binding_shape(i)): # dynamic + dynamic = True + context.set_binding_shape(i, tuple(model.get_profile_shape(0, i)[2])) + if dtype == np.float16: + fp16 = True + else: # output + output_names.append(name) + shape = tuple(context.get_binding_shape(i)) im = torch.from_numpy(np.empty(shape, dtype=dtype)).to(device) bindings[name] = Binding(name, dtype, shape, im, int(im.data_ptr())) binding_addrs = OrderedDict((n, d.ptr) for n, d in bindings.items()) From 2f6b91ee8c038a59ac51fe1151e3defd6cd0f907 Mon Sep 17 00:00:00 2001 From: inisis <46103969+inisis@users.noreply.github.com> Date: Sun, 12 May 2024 23:10:18 +0800 Subject: [PATCH 1886/1976] Set `TORCH_CPP_LOG_LEVEL=ERROR` for reduced verbosity (#12989) * modify torch cpp log level to Error to avoid annoying print * Auto-format by https://ultralytics.com/actions --------- Co-authored-by: UltralyticsAssistant --- export.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/export.py b/export.py index 214d903c2998..cd2202c9c072 100644 --- a/export.py +++ b/export.py @@ -48,6 +48,8 @@ import contextlib import json import os + +os.environ["TORCH_CPP_LOG_LEVEL"] = "ERROR" import platform import re import subprocess From 331c39e3c3b829519727d592358d0bb42433d3b5 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 12 May 2024 18:25:53 +0200 Subject: [PATCH 1887/1976] Add `pip install --retries 3` to CI to resolve transients (#13001) * Add `pip install --retries 3` to CI to resolve transients Signed-off-by: Glenn Jocher * Update ci-testing.yml Signed-off-by: Glenn Jocher --------- Signed-off-by: Glenn Jocher --- .github/workflows/ci-testing.yml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/ci-testing.yml b/.github/workflows/ci-testing.yml index b70e0eb89245..aa5f9e316ee6 100644 --- a/.github/workflows/ci-testing.yml +++ b/.github/workflows/ci-testing.yml @@ -29,7 +29,7 @@ jobs: - name: Install requirements run: | python -m pip install --upgrade pip wheel - pip install -r requirements.txt coremltools openvino-dev "tensorflow-cpu<2.15.1" --extra-index-url https://download.pytorch.org/whl/cpu + pip install --retries 3 -r requirements.txt coremltools openvino-dev "tensorflow-cpu<2.15.1" --extra-index-url https://download.pytorch.org/whl/cpu yolo checks pip list - name: Benchmark DetectionModel @@ -68,11 +68,11 @@ jobs: - name: Install requirements run: | python -m pip install --upgrade pip wheel + torch="" if [ "${{ matrix.torch }}" == "1.8.0" ]; then - pip install -r requirements.txt torch==1.8.0 torchvision==0.9.0 --extra-index-url https://download.pytorch.org/whl/cpu - else - pip install -r requirements.txt --extra-index-url https://download.pytorch.org/whl/cpu + torch="torch==1.8.0 torchvision==0.9.0" fi + pip install --retries 3 -r requirements.txt $torch --extra-index-url https://download.pytorch.org/whl/cpu shell: bash # for Windows compatibility - name: Check environment run: | From d1ea6b1efa3fe64f666205c4419b36af20780a86 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 12 May 2024 18:48:42 +0200 Subject: [PATCH 1888/1976] Revert CI `pip install` retries to default (#13002) Signed-off-by: Glenn Jocher --- .github/workflows/ci-testing.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/ci-testing.yml b/.github/workflows/ci-testing.yml index aa5f9e316ee6..c20a07e6c0b4 100644 --- a/.github/workflows/ci-testing.yml +++ b/.github/workflows/ci-testing.yml @@ -29,7 +29,7 @@ jobs: - name: Install requirements run: | python -m pip install --upgrade pip wheel - pip install --retries 3 -r requirements.txt coremltools openvino-dev "tensorflow-cpu<2.15.1" --extra-index-url https://download.pytorch.org/whl/cpu + pip install -r requirements.txt coremltools openvino-dev "tensorflow-cpu<2.15.1" --extra-index-url https://download.pytorch.org/whl/cpu yolo checks pip list - name: Benchmark DetectionModel @@ -72,7 +72,7 @@ jobs: if [ "${{ matrix.torch }}" == "1.8.0" ]; then torch="torch==1.8.0 torchvision==0.9.0" fi - pip install --retries 3 -r requirements.txt $torch --extra-index-url https://download.pytorch.org/whl/cpu + pip install -r requirements.txt $torch --extra-index-url https://download.pytorch.org/whl/cpu shell: bash # for Windows compatibility - name: Check environment run: | From 28e06aa3911ecf8bcfb9f13aa3be378e4ab62593 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 12 May 2024 22:16:54 +0200 Subject: [PATCH 1889/1976] Centralize ENV variable definition in utils/general.py (#13004) * Centralize ENV variable definition in utils/general.py Signed-off-by: Glenn Jocher * Update export.py Signed-off-by: Glenn Jocher * Auto-format by https://ultralytics.com/actions --------- Signed-off-by: Glenn Jocher Co-authored-by: UltralyticsAssistant --- export.py | 2 -- utils/general.py | 2 ++ 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/export.py b/export.py index cd2202c9c072..214d903c2998 100644 --- a/export.py +++ b/export.py @@ -48,8 +48,6 @@ import contextlib import json import os - -os.environ["TORCH_CPP_LOG_LEVEL"] = "ERROR" import platform import re import subprocess diff --git a/utils/general.py b/utils/general.py index 5a9325eec757..64ae50626e14 100644 --- a/utils/general.py +++ b/utils/general.py @@ -68,6 +68,8 @@ os.environ["NUMEXPR_MAX_THREADS"] = str(NUM_THREADS) # NumExpr max threads os.environ["OMP_NUM_THREADS"] = "1" if platform.system() == "darwin" else str(NUM_THREADS) # OpenMP (PyTorch and SciPy) os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2" # suppress verbose TF compiler warnings in Colab +os.environ["TORCH_CPP_LOG_LEVEL"] = "ERROR" # suppress "NNPACK.cpp could not initialize NNPACK" warnings +os.environ["KINETO_LOG_LEVEL"] = "5" # suppress verbose PyTorch profiler output when computing FLOPs def is_ascii(s=""): From 1bcd17ee6941f5e31e7a4939f6f1ab43b3815c7e Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 13 May 2024 11:46:16 +0200 Subject: [PATCH 1890/1976] Bump contributor-assistant/github-action from 2.3.2 to 2.4.0 in /.github/workflows (#13006) Bump contributor-assistant/github-action in /.github/workflows Bumps [contributor-assistant/github-action](https://github.com/contributor-assistant/github-action) from 2.3.2 to 2.4.0. - [Release notes](https://github.com/contributor-assistant/github-action/releases) - [Commits](https://github.com/contributor-assistant/github-action/compare/v2.3.2...v2.4.0) --- updated-dependencies: - dependency-name: contributor-assistant/github-action dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/cla.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/cla.yml b/.github/workflows/cla.yml index 4f87c77701dd..df8d4fd51303 100644 --- a/.github/workflows/cla.yml +++ b/.github/workflows/cla.yml @@ -20,7 +20,7 @@ jobs: steps: - name: CLA Assistant if: (github.event.comment.body == 'recheck' || github.event.comment.body == 'I have read the CLA Document and I sign the CLA') || github.event_name == 'pull_request_target' - uses: contributor-assistant/github-action@v2.3.2 + uses: contributor-assistant/github-action@v2.4.0 env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} # must be repository secret token From 712de55a20cd584a12a4aebe96226c92f816dcf3 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 18 May 2024 15:52:21 +0200 Subject: [PATCH 1891/1976] Reformat Markdown code blocks (#13023) --- README.md | 4 +--- README.zh-CN.md | 4 +--- 2 files changed, 2 insertions(+), 6 deletions(-) diff --git a/README.md b/README.md index 55f813747fe5..9eb712587c82 100644 --- a/README.md +++ b/README.md @@ -395,9 +395,7 @@ python classify/predict.py --weights yolov5s-cls.pt --source data/images/bus.jpg ``` ```python -model = torch.hub.load( - "ultralytics/yolov5", "custom", "yolov5s-cls.pt" -) # load from PyTorch Hub +model = torch.hub.load("ultralytics/yolov5", "custom", "yolov5s-cls.pt") # load from PyTorch Hub ``` ### Export diff --git a/README.zh-CN.md b/README.zh-CN.md index 69ce9b72d332..36b9e6a0a773 100644 --- a/README.zh-CN.md +++ b/README.zh-CN.md @@ -394,9 +394,7 @@ python classify/predict.py --weights yolov5s-cls.pt --source data/images/bus.jpg ``` ```python -model = torch.hub.load( - "ultralytics/yolov5", "custom", "yolov5s-cls.pt" -) # load from PyTorch Hub +model = torch.hub.load("ultralytics/yolov5", "custom", "yolov5s-cls.pt") # load from PyTorch Hub ``` ### 模型导出 From 60dde7f18b9b102ea4462ec1ec5ab42bc8287811 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 24 May 2024 07:50:10 +0200 Subject: [PATCH 1892/1976] [Snyk] Security upgrade requests from 2.31.0 to 2.32.0 (#13039) fix: requirements.txt to reduce vulnerabilities The following vulnerabilities are fixed by pinning transitive dependencies: - https://snyk.io/vuln/SNYK-PYTHON-REQUESTS-6928867 Co-authored-by: snyk-bot --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 3892abe07308..7248b72086b7 100644 --- a/requirements.txt +++ b/requirements.txt @@ -9,7 +9,7 @@ opencv-python>=4.1.1 pillow>=10.3.0 psutil # system resources PyYAML>=5.3.1 -requests>=2.23.0 +requests>=2.32.0 scipy>=1.4.1 thop>=0.1.1 # FLOPs computation torch>=1.8.0 # see https://pytorch.org/get-started/locally (recommended) From 892e8a82633cf5348da9bab3e36315c4dcc8960e Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 28 May 2024 16:56:27 +0200 Subject: [PATCH 1893/1976] Update Ultralytics YouTube URL (#13046) * Update Ultralytics YouTube URL * Update README.md Signed-off-by: Glenn Jocher * Auto-format by https://ultralytics.com/actions --------- Signed-off-by: Glenn Jocher Co-authored-by: UltralyticsAssistant --- README.md | 4 ++-- README.zh-CN.md | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index 9eb712587c82..fe9120be4832 100644 --- a/README.md +++ b/README.md @@ -35,7 +35,7 @@ To request an Enterprise License please complete the form at [Ultralytics Licens Ultralytics Twitter - Ultralytics YouTube + Ultralytics YouTube Ultralytics TikTok @@ -460,7 +460,7 @@ For YOLOv5 bug reports and feature requests please visit [GitHub Issues](https:/ Ultralytics Twitter - Ultralytics YouTube + Ultralytics YouTube Ultralytics TikTok diff --git a/README.zh-CN.md b/README.zh-CN.md index 36b9e6a0a773..2fb0bf9b3164 100644 --- a/README.zh-CN.md +++ b/README.zh-CN.md @@ -34,7 +34,7 @@ YOLOv5 🚀 是世界上最受欢迎的视觉 AI,代表 Ultralytics Twitter - Ultralytics YouTube + Ultralytics YouTube Ultralytics TikTok @@ -459,7 +459,7 @@ Ultralytics 提供两种许可证选项以适应各种使用场景: Ultralytics Twitter - Ultralytics YouTube + Ultralytics YouTube Ultralytics TikTok From 2b7bcf664b08eaf92fff2b1a9d3882e9c3bcb208 Mon Sep 17 00:00:00 2001 From: RainRat Date: Wed, 29 May 2024 13:09:30 -0700 Subject: [PATCH 1894/1976] Fix typos (#13049) * fix typos no functional change * Update hpo.py better fix for typo no functional change --- segment/train.py | 2 +- utils/augmentations.py | 2 +- utils/dataloaders.py | 2 +- utils/loggers/clearml/hpo.py | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/segment/train.py b/segment/train.py index 5a6e9afb8ec0..a0d0c5119639 100644 --- a/segment/train.py +++ b/segment/train.py @@ -746,7 +746,7 @@ def run(**kwargs): """ Executes YOLOv5 training with given parameters, altering options programmatically; returns updated options. - Example: mport train; train.run(data='coco128.yaml', imgsz=320, weights='yolov5m.pt') + Example: import train; train.run(data='coco128.yaml', imgsz=320, weights='yolov5m.pt') """ opt = parse_opt(True) for k, v in kwargs.items(): diff --git a/utils/augmentations.py b/utils/augmentations.py index 3025ebdb7092..3556bffeda18 100644 --- a/utils/augmentations.py +++ b/utils/augmentations.py @@ -353,7 +353,7 @@ def classify_albumentations( if vflip > 0: T += [A.VerticalFlip(p=vflip)] if jitter > 0: - color_jitter = (float(jitter),) * 3 # repeat value for brightness, contrast, satuaration, 0 hue + color_jitter = (float(jitter),) * 3 # repeat value for brightness, contrast, saturation, 0 hue T += [A.ColorJitter(*color_jitter, 0)] else: # Use fixed crop for eval set (reproducibility) T = [A.SmallestMaxSize(max_size=size), A.CenterCrop(height=size, width=size)] diff --git a/utils/dataloaders.py b/utils/dataloaders.py index 04420c77a673..7ca451aa0929 100644 --- a/utils/dataloaders.py +++ b/utils/dataloaders.py @@ -136,7 +136,7 @@ def __iter__(self): g = torch.Generator() g.manual_seed(self.seed + self.epoch) - # determine the the eventual size (n) of self.indices (DDP indices) + # determine the eventual size (n) of self.indices (DDP indices) n = int((len(self.dataset) - self.rank - 1) / self.num_replicas) + 1 # num_replicas == WORLD_SIZE idx = torch.randperm(n, generator=g) if not self.shuffle: diff --git a/utils/loggers/clearml/hpo.py b/utils/loggers/clearml/hpo.py index 4e314ea868df..aa1b7ab9494d 100644 --- a/utils/loggers/clearml/hpo.py +++ b/utils/loggers/clearml/hpo.py @@ -58,7 +58,7 @@ # now we decide if we want to maximize it or minimize it (accuracy we maximize) objective_metric_sign="max", # let us limit the number of concurrent experiments, - # this in turn will make sure we do dont bombard the scheduler with experiments. + # this in turn will make sure we don't bombard the scheduler with experiments. # if we have an auto-scaler connected, this, by proxy, will limit the number of machine max_number_of_concurrent_tasks=1, # this is the optimizer class (actually doing the optimization) From 004037947f7b302550fd409851590a76e90a07ab Mon Sep 17 00:00:00 2001 From: goksmisama <80083119+goksmisama@users.noreply.github.com> Date: Thu, 30 May 2024 04:12:17 +0800 Subject: [PATCH 1895/1976] Fix reshape_classifier_output function to correctly reshape the final output layer (#13052) * Fix reshape_classifier_output function to correctly reshape the final output layer * Update torch_utils.py Signed-off-by: Glenn Jocher --------- Signed-off-by: Glenn Jocher Co-authored-by: Ultralytics Assistant <135830346+UltralyticsAssistant@users.noreply.github.com> Co-authored-by: Glenn Jocher --- utils/torch_utils.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/utils/torch_utils.py b/utils/torch_utils.py index 4929d21cdf83..28893c71deef 100644 --- a/utils/torch_utils.py +++ b/utils/torch_utils.py @@ -79,11 +79,11 @@ def reshape_classifier_output(model, n=1000): elif isinstance(m, nn.Sequential): types = [type(x) for x in m] if nn.Linear in types: - i = types.index(nn.Linear) # nn.Linear index + i = len(types) - 1 - types[::-1].index(nn.Linear) # last nn.Linear index if m[i].out_features != n: m[i] = nn.Linear(m[i].in_features, n) elif nn.Conv2d in types: - i = types.index(nn.Conv2d) # nn.Conv2d index + i = len(types) - 1 - types[::-1].index(nn.Conv2d) # last nn.Conv2d index if m[i].out_channels != n: m[i] = nn.Conv2d(m[i].in_channels, n, m[i].kernel_size, m[i].stride, bias=m[i].bias is not None) From c0380fd85549c8b315bb1fb2f423f9297e9744ec Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 30 May 2024 12:37:30 +0200 Subject: [PATCH 1896/1976] Add Prettier for YAML formatting to Ultralytics Actions (#13053) * Add Prettier for YAML formatting to Ultralytics Actions Signed-off-by: Glenn Jocher * Update format.yml Signed-off-by: Glenn Jocher * Update format.yml Signed-off-by: Glenn Jocher --------- Signed-off-by: Glenn Jocher --- .github/workflows/format.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.github/workflows/format.yml b/.github/workflows/format.yml index 4723d0f7dc70..da66ffe5e503 100644 --- a/.github/workflows/format.yml +++ b/.github/workflows/format.yml @@ -19,7 +19,8 @@ jobs: with: token: ${{ secrets.GITHUB_TOKEN }} # automatically generated, do not modify python: true # format Python code and docstrings - markdown: true # format Markdown and YAML + markdown: true # format Markdown + prettier: true # format YAML spelling: true # check spelling links: false # check broken links summary: true # print PR summary with GPT4 (requires 'openai_api_key' or 'openai_azure_api_key' and 'openai_azure_endpoint') From be2b49fc895da7b121855993e0a65cad7146fabc Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sat, 8 Jun 2024 22:19:39 +0200 Subject: [PATCH 1897/1976] Update tensorflow requirement from <=2.13.1 to <=2.16.1 (#12974) Updates the requirements on [tensorflow](https://github.com/tensorflow/tensorflow) to permit the latest version. - [Release notes](https://github.com/tensorflow/tensorflow/releases) - [Changelog](https://github.com/tensorflow/tensorflow/blob/v2.16.1/RELEASE.md) - [Commits](https://github.com/tensorflow/tensorflow/compare/tflite-v0.1.7...v2.16.1) --- updated-dependencies: - dependency-name: tensorflow dependency-type: direct:production ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Ultralytics Assistant <135830346+UltralyticsAssistant@users.noreply.github.com> --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index 5748b907cf30..11d151f4293f 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -99,7 +99,7 @@ export = [ "onnx>=1.12.0", # ONNX export "coremltools>=7.0; platform_system != 'Windows'", # CoreML only supported on macOS and Linux "openvino-dev>=2023.0", # OpenVINO export - "tensorflow<=2.13.1", # TF bug https://github.com/ultralytics/ultralytics/issues/5161 + "tensorflow<=2.16.1", # TF bug https://github.com/ultralytics/ultralytics/issues/5161 "tensorflowjs>=3.9.0", # TF.js export, automatically installs tensorflow ] # tensorflow>=2.4.1,<=2.13.1 # TF exports (-cpu, -aarch64, -macos) From 3ec95f9e3dfc7d563d037440237a4a69d9984fd4 Mon Sep 17 00:00:00 2001 From: Ultralytics Assistant <135830346+UltralyticsAssistant@users.noreply.github.com> Date: Sat, 8 Jun 2024 22:29:29 +0200 Subject: [PATCH 1898/1976] Update header line in Python files (#13072) * Add license line to .github/ISSUE_TEMPLATE/bug-report.yml * Add license line to .github/ISSUE_TEMPLATE/config.yml * Add license line to .github/ISSUE_TEMPLATE/feature-request.yml * Add license line to .github/ISSUE_TEMPLATE/question.yml * Add license line to .github/dependabot.yml * Add license line to .github/workflows/ci-testing.yml * Add license line to .github/workflows/cla.yml * Add license line to .github/workflows/codeql-analysis.yml * Add license line to .github/workflows/docker.yml * Add license line to .github/workflows/format.yml * Add license line to .github/workflows/greetings.yml * Add license line to .github/workflows/links.yml * Add license line to .github/workflows/merge-main-into-prs.yml * Add license line to .github/workflows/stale.yml * Add license line to benchmarks.py * Add license line to classify/predict.py * Add license line to classify/train.py * Add license line to classify/val.py * Add license line to data/Argoverse.yaml * Add license line to data/GlobalWheat2020.yaml * Add license line to data/ImageNet.yaml * Add license line to data/ImageNet10.yaml * Add license line to data/ImageNet100.yaml * Add license line to data/ImageNet1000.yaml * Add license line to data/Objects365.yaml * Add license line to data/SKU-110K.yaml * Add license line to data/VOC.yaml * Add license line to data/VisDrone.yaml * Add license line to data/coco.yaml * Add license line to data/coco128-seg.yaml * Add license line to data/coco128.yaml * Add license line to data/hyps/hyp.Objects365.yaml * Add license line to data/hyps/hyp.VOC.yaml * Add license line to data/hyps/hyp.no-augmentation.yaml * Add license line to data/hyps/hyp.scratch-high.yaml * Add license line to data/hyps/hyp.scratch-low.yaml * Add license line to data/hyps/hyp.scratch-med.yaml * Add license line to data/xView.yaml * Add license line to detect.py * Add license line to export.py * Add license line to hubconf.py * Add license line to models/common.py * Add license line to models/experimental.py * Add license line to models/hub/anchors.yaml * Add license line to models/hub/yolov3-spp.yaml * Add license line to models/hub/yolov3-tiny.yaml * Add license line to models/hub/yolov3.yaml * Add license line to models/hub/yolov5-bifpn.yaml * Add license line to models/hub/yolov5-fpn.yaml * Add license line to models/hub/yolov5-p2.yaml * Add license line to models/hub/yolov5-p34.yaml * Add license line to models/hub/yolov5-p6.yaml * Add license line to models/hub/yolov5-p7.yaml * Add license line to models/hub/yolov5-panet.yaml * Add license line to models/hub/yolov5l6.yaml * Add license line to models/hub/yolov5m6.yaml * Add license line to models/hub/yolov5n6.yaml * Add license line to models/hub/yolov5s-LeakyReLU.yaml * Add license line to models/hub/yolov5s-ghost.yaml * Add license line to models/hub/yolov5s-transformer.yaml * Add license line to models/hub/yolov5s6.yaml * Add license line to models/hub/yolov5x6.yaml * Add license line to models/segment/yolov5l-seg.yaml * Add license line to models/segment/yolov5m-seg.yaml * Add license line to models/segment/yolov5n-seg.yaml * Add license line to models/segment/yolov5s-seg.yaml * Add license line to models/segment/yolov5x-seg.yaml * Add license line to models/tf.py * Add license line to models/yolo.py * Add license line to models/yolov5l.yaml * Add license line to models/yolov5m.yaml * Add license line to models/yolov5n.yaml * Add license line to models/yolov5s.yaml * Add license line to models/yolov5x.yaml * Add license line to pyproject.toml * Add license line to segment/predict.py * Add license line to segment/train.py * Add license line to segment/val.py * Add license line to train.py * Add license line to utils/__init__.py * Add license line to utils/activations.py * Add license line to utils/augmentations.py * Add license line to utils/autoanchor.py * Add license line to utils/autobatch.py * Add license line to utils/aws/resume.py * Add license line to utils/callbacks.py * Add license line to utils/dataloaders.py * Add license line to utils/downloads.py * Add license line to utils/flask_rest_api/example_request.py * Add license line to utils/flask_rest_api/restapi.py * Add license line to utils/general.py * Add license line to utils/google_app_engine/app.yaml * Add license line to utils/loggers/__init__.py * Add license line to utils/loggers/clearml/clearml_utils.py * Add license line to utils/loggers/clearml/hpo.py * Add license line to utils/loggers/comet/__init__.py * Add license line to utils/loggers/comet/comet_utils.py * Add license line to utils/loggers/comet/hpo.py * Add license line to utils/loggers/wandb/wandb_utils.py * Add license line to utils/loss.py * Add license line to utils/metrics.py * Add license line to utils/plots.py * Add license line to utils/segment/augmentations.py * Add license line to utils/segment/dataloaders.py * Add license line to utils/segment/general.py * Add license line to utils/segment/loss.py * Add license line to utils/segment/metrics.py * Add license line to utils/segment/plots.py * Add license line to utils/torch_utils.py * Add license line to utils/triton.py * Add license line to val.py * Auto-format by https://ultralytics.com/actions * Update ImageNet1000.yaml Signed-off-by: Glenn Jocher * Auto-format by https://ultralytics.com/actions --------- Signed-off-by: Glenn Jocher Co-authored-by: Glenn Jocher --- .github/ISSUE_TEMPLATE/bug-report.yml | 2 ++ .github/ISSUE_TEMPLATE/config.yml | 2 ++ .github/ISSUE_TEMPLATE/feature-request.yml | 2 ++ .github/ISSUE_TEMPLATE/question.yml | 2 ++ .github/dependabot.yml | 2 +- .github/workflows/ci-testing.yml | 2 +- .github/workflows/cla.yml | 2 +- .github/workflows/codeql-analysis.yml | 1 + .github/workflows/docker.yml | 2 +- .github/workflows/format.yml | 2 +- .github/workflows/greetings.yml | 2 +- .github/workflows/links.yml | 2 +- .github/workflows/merge-main-into-prs.yml | 2 +- .github/workflows/stale.yml | 2 +- benchmarks.py | 2 +- classify/predict.py | 2 +- classify/train.py | 2 +- classify/val.py | 2 +- data/Argoverse.yaml | 2 +- data/GlobalWheat2020.yaml | 2 +- data/ImageNet.yaml | 2 +- data/ImageNet10.yaml | 2 +- data/ImageNet100.yaml | 2 +- data/ImageNet1000.yaml | 2 +- data/Objects365.yaml | 2 +- data/SKU-110K.yaml | 2 +- data/VOC.yaml | 2 +- data/VisDrone.yaml | 2 +- data/coco.yaml | 2 +- data/coco128-seg.yaml | 2 +- data/coco128.yaml | 2 +- data/hyps/hyp.Objects365.yaml | 2 +- data/hyps/hyp.VOC.yaml | 2 +- data/hyps/hyp.no-augmentation.yaml | 2 +- data/hyps/hyp.scratch-high.yaml | 2 +- data/hyps/hyp.scratch-low.yaml | 2 +- data/hyps/hyp.scratch-med.yaml | 2 +- data/xView.yaml | 2 +- detect.py | 2 +- export.py | 2 +- hubconf.py | 2 +- models/common.py | 2 +- models/experimental.py | 2 +- models/hub/anchors.yaml | 2 +- models/hub/yolov3-spp.yaml | 2 +- models/hub/yolov3-tiny.yaml | 2 +- models/hub/yolov3.yaml | 2 +- models/hub/yolov5-bifpn.yaml | 2 +- models/hub/yolov5-fpn.yaml | 2 +- models/hub/yolov5-p2.yaml | 2 +- models/hub/yolov5-p34.yaml | 2 +- models/hub/yolov5-p6.yaml | 2 +- models/hub/yolov5-p7.yaml | 2 +- models/hub/yolov5-panet.yaml | 2 +- models/hub/yolov5l6.yaml | 2 +- models/hub/yolov5m6.yaml | 2 +- models/hub/yolov5n6.yaml | 2 +- models/hub/yolov5s-LeakyReLU.yaml | 2 +- models/hub/yolov5s-ghost.yaml | 2 +- models/hub/yolov5s-transformer.yaml | 2 +- models/hub/yolov5s6.yaml | 2 +- models/hub/yolov5x6.yaml | 2 +- models/segment/yolov5l-seg.yaml | 2 +- models/segment/yolov5m-seg.yaml | 2 +- models/segment/yolov5n-seg.yaml | 2 +- models/segment/yolov5s-seg.yaml | 2 +- models/segment/yolov5x-seg.yaml | 2 +- models/tf.py | 2 +- models/yolo.py | 2 +- models/yolov5l.yaml | 2 +- models/yolov5m.yaml | 2 +- models/yolov5n.yaml | 2 +- models/yolov5s.yaml | 2 +- models/yolov5x.yaml | 2 +- pyproject.toml | 2 +- segment/predict.py | 2 +- segment/train.py | 2 +- segment/val.py | 2 +- train.py | 2 +- utils/__init__.py | 2 +- utils/activations.py | 2 +- utils/augmentations.py | 2 +- utils/autoanchor.py | 2 +- utils/autobatch.py | 2 +- utils/aws/resume.py | 1 + utils/callbacks.py | 2 +- utils/dataloaders.py | 2 +- utils/downloads.py | 2 +- utils/flask_rest_api/example_request.py | 2 +- utils/flask_rest_api/restapi.py | 2 +- utils/general.py | 2 +- utils/google_app_engine/app.yaml | 2 ++ utils/loggers/__init__.py | 2 +- utils/loggers/clearml/clearml_utils.py | 1 + utils/loggers/clearml/hpo.py | 2 ++ utils/loggers/comet/__init__.py | 2 ++ utils/loggers/comet/comet_utils.py | 2 ++ utils/loggers/comet/hpo.py | 2 ++ utils/loggers/wandb/wandb_utils.py | 2 +- utils/loss.py | 2 +- utils/metrics.py | 2 +- utils/plots.py | 2 +- utils/segment/augmentations.py | 2 +- utils/segment/dataloaders.py | 2 +- utils/segment/general.py | 2 ++ utils/segment/loss.py | 2 ++ utils/segment/metrics.py | 2 +- utils/segment/plots.py | 2 ++ utils/torch_utils.py | 2 +- utils/triton.py | 2 +- val.py | 2 +- 111 files changed, 123 insertions(+), 96 deletions(-) diff --git a/.github/ISSUE_TEMPLATE/bug-report.yml b/.github/ISSUE_TEMPLATE/bug-report.yml index 04f9c76fde1f..76b426a195e4 100644 --- a/.github/ISSUE_TEMPLATE/bug-report.yml +++ b/.github/ISSUE_TEMPLATE/bug-report.yml @@ -1,3 +1,5 @@ +# Ultralytics YOLOv5 🚀, AGPL-3.0 license + name: 🐛 Bug Report # title: " " description: Problems with YOLOv5 diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml index 37080927c0b9..e23dfcf9c56b 100644 --- a/.github/ISSUE_TEMPLATE/config.yml +++ b/.github/ISSUE_TEMPLATE/config.yml @@ -1,3 +1,5 @@ +# Ultralytics YOLOv5 🚀, AGPL-3.0 license + blank_issues_enabled: true contact_links: - name: 📄 Docs diff --git a/.github/ISSUE_TEMPLATE/feature-request.yml b/.github/ISSUE_TEMPLATE/feature-request.yml index 1d3d53df217e..9282e62d2a57 100644 --- a/.github/ISSUE_TEMPLATE/feature-request.yml +++ b/.github/ISSUE_TEMPLATE/feature-request.yml @@ -1,3 +1,5 @@ +# Ultralytics YOLOv5 🚀, AGPL-3.0 license + name: 🚀 Feature Request description: Suggest a YOLOv5 idea # title: " " diff --git a/.github/ISSUE_TEMPLATE/question.yml b/.github/ISSUE_TEMPLATE/question.yml index 8e0993c68bab..5b62af675e2a 100644 --- a/.github/ISSUE_TEMPLATE/question.yml +++ b/.github/ISSUE_TEMPLATE/question.yml @@ -1,3 +1,5 @@ +# Ultralytics YOLOv5 🚀, AGPL-3.0 license + name: ❓ Question description: Ask a YOLOv5 question # title: " " diff --git a/.github/dependabot.yml b/.github/dependabot.yml index 2d4ae31873b8..061d61f1bc24 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -1,4 +1,4 @@ -# Ultralytics YOLO 🚀, AGPL-3.0 license +# Ultralytics YOLOv5 🚀, AGPL-3.0 license # Dependabot for package version updates # https://docs.github.com/github/administering-a-repository/configuration-options-for-dependency-updates diff --git a/.github/workflows/ci-testing.yml b/.github/workflows/ci-testing.yml index c20a07e6c0b4..63eab050fc28 100644 --- a/.github/workflows/ci-testing.yml +++ b/.github/workflows/ci-testing.yml @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license +# Ultralytics YOLOv5 🚀, AGPL-3.0 license # YOLOv5 Continuous Integration (CI) GitHub Actions tests name: YOLOv5 CI diff --git a/.github/workflows/cla.yml b/.github/workflows/cla.yml index df8d4fd51303..b012ae4abf89 100644 --- a/.github/workflows/cla.yml +++ b/.github/workflows/cla.yml @@ -1,4 +1,4 @@ -# Ultralytics YOLO 🚀, AGPL-3.0 license +# Ultralytics YOLOv5 🚀, AGPL-3.0 license # Ultralytics Contributor License Agreement (CLA) action https://docs.ultralytics.com/help/CLA # This workflow automatically requests Pull Requests (PR) authors to sign the Ultralytics CLA before PRs can be merged diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index 2897fd0b454d..77054fd0d7c5 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -1,3 +1,4 @@ +# Ultralytics YOLOv5 🚀, AGPL-3.0 license # This action runs GitHub's industry-leading static analysis engine, CodeQL, against a repository's source code to find security vulnerabilities. # https://github.com/github/codeql-action diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index 8b035e26c902..0decb4ce65b5 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license +# Ultralytics YOLOv5 🚀, AGPL-3.0 license # Builds ultralytics/yolov5:latest images on DockerHub https://hub.docker.com/r/ultralytics/yolov5 name: Publish Docker Images diff --git a/.github/workflows/format.yml b/.github/workflows/format.yml index da66ffe5e503..ee176abee996 100644 --- a/.github/workflows/format.yml +++ b/.github/workflows/format.yml @@ -1,4 +1,4 @@ -# Ultralytics 🚀 - AGPL-3.0 license +# Ultralytics YOLOv5 🚀, AGPL-3.0 license # Ultralytics Actions https://github.com/ultralytics/actions # This workflow automatically formats code and documentation in PRs to official Ultralytics standards diff --git a/.github/workflows/greetings.yml b/.github/workflows/greetings.yml index 3058d78b0a66..9a938b3fb008 100644 --- a/.github/workflows/greetings.yml +++ b/.github/workflows/greetings.yml @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license +# Ultralytics YOLOv5 🚀, AGPL-3.0 license name: Greetings diff --git a/.github/workflows/links.yml b/.github/workflows/links.yml index 3acae3ec2d4d..80e2a48be71f 100644 --- a/.github/workflows/links.yml +++ b/.github/workflows/links.yml @@ -1,4 +1,4 @@ -# Ultralytics YOLO 🚀, AGPL-3.0 license +# Ultralytics YOLOv5 🚀, AGPL-3.0 license # Continuous Integration (CI) GitHub Actions tests broken link checker using https://github.com/lycheeverse/lychee # Ignores the following status codes to reduce false positives: # - 403(OpenVINO, 'forbidden') diff --git a/.github/workflows/merge-main-into-prs.yml b/.github/workflows/merge-main-into-prs.yml index 9ed945c78978..e2c3dc379edd 100644 --- a/.github/workflows/merge-main-into-prs.yml +++ b/.github/workflows/merge-main-into-prs.yml @@ -1,4 +1,4 @@ -# Ultralytics YOLO 🚀, AGPL-3.0 license +# Ultralytics YOLOv5 🚀, AGPL-3.0 license # Automatically merges repository 'main' branch into all open PRs to keep them up-to-date # Action runs on updates to main branch so when one PR merges to main all others update diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml index 720ba10b08ce..374bc01ab0b7 100644 --- a/.github/workflows/stale.yml +++ b/.github/workflows/stale.yml @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license +# Ultralytics YOLOv5 🚀, AGPL-3.0 license name: Close stale issues on: diff --git a/benchmarks.py b/benchmarks.py index 100cabacdc97..4ca7122551e0 100644 --- a/benchmarks.py +++ b/benchmarks.py @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license +# Ultralytics YOLOv5 🚀, AGPL-3.0 license """ Run YOLOv5 benchmarks on all supported export formats. diff --git a/classify/predict.py b/classify/predict.py index 3139d82e7b7d..4dc3735f3a71 100644 --- a/classify/predict.py +++ b/classify/predict.py @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license +# Ultralytics YOLOv5 🚀, AGPL-3.0 license """ Run YOLOv5 classification inference on images, videos, directories, globs, YouTube, webcam, streams, etc. diff --git a/classify/train.py b/classify/train.py index 5556e03edff5..79045e9fb87a 100644 --- a/classify/train.py +++ b/classify/train.py @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license +# Ultralytics YOLOv5 🚀, AGPL-3.0 license """ Train a YOLOv5 classifier model on a classification dataset. diff --git a/classify/val.py b/classify/val.py index 427618791d65..23dbe7bfa420 100644 --- a/classify/val.py +++ b/classify/val.py @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license +# Ultralytics YOLOv5 🚀, AGPL-3.0 license """ Validate a trained YOLOv5 classification model on a classification dataset. diff --git a/data/Argoverse.yaml b/data/Argoverse.yaml index 17b286cb7320..366552ea4f31 100644 --- a/data/Argoverse.yaml +++ b/data/Argoverse.yaml @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license +# Ultralytics YOLOv5 🚀, AGPL-3.0 license # Argoverse-HD dataset (ring-front-center camera) http://www.cs.cmu.edu/~mengtial/proj/streaming/ by Argo AI # Example usage: python train.py --data Argoverse.yaml # parent diff --git a/data/GlobalWheat2020.yaml b/data/GlobalWheat2020.yaml index 36d79302fc87..2039ccd3cce8 100644 --- a/data/GlobalWheat2020.yaml +++ b/data/GlobalWheat2020.yaml @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license +# Ultralytics YOLOv5 🚀, AGPL-3.0 license # Global Wheat 2020 dataset http://www.global-wheat.com/ by University of Saskatchewan # Example usage: python train.py --data GlobalWheat2020.yaml # parent diff --git a/data/ImageNet.yaml b/data/ImageNet.yaml index fee72c796a1d..979a0e4de337 100644 --- a/data/ImageNet.yaml +++ b/data/ImageNet.yaml @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license +# Ultralytics YOLOv5 🚀, AGPL-3.0 license # ImageNet-1k dataset https://www.image-net.org/index.php by Stanford University # Simplified class names from https://github.com/anishathalye/imagenet-simple-labels # Example usage: python classify/train.py --data imagenet diff --git a/data/ImageNet10.yaml b/data/ImageNet10.yaml index a3f19342d6e8..2189def7d457 100644 --- a/data/ImageNet10.yaml +++ b/data/ImageNet10.yaml @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license +# Ultralytics YOLOv5 🚀, AGPL-3.0 license # ImageNet-1k dataset https://www.image-net.org/index.php by Stanford University # Simplified class names from https://github.com/anishathalye/imagenet-simple-labels # Example usage: python classify/train.py --data imagenet diff --git a/data/ImageNet100.yaml b/data/ImageNet100.yaml index 68c86ed76853..560cdecdbae9 100644 --- a/data/ImageNet100.yaml +++ b/data/ImageNet100.yaml @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license +# Ultralytics YOLOv5 🚀, AGPL-3.0 license # ImageNet-1k dataset https://www.image-net.org/index.php by Stanford University # Simplified class names from https://github.com/anishathalye/imagenet-simple-labels # Example usage: python classify/train.py --data imagenet diff --git a/data/ImageNet1000.yaml b/data/ImageNet1000.yaml index c5224786a9e7..aa17e9e059cf 100644 --- a/data/ImageNet1000.yaml +++ b/data/ImageNet1000.yaml @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license +# Ultralytics YOLOv5 🚀, AGPL-3.0 license # ImageNet-1k dataset https://www.image-net.org/index.php by Stanford University # Simplified class names from https://github.com/anishathalye/imagenet-simple-labels # Example usage: python classify/train.py --data imagenet diff --git a/data/Objects365.yaml b/data/Objects365.yaml index 2a4fef135f38..f1f0a1ae4891 100644 --- a/data/Objects365.yaml +++ b/data/Objects365.yaml @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license +# Ultralytics YOLOv5 🚀, AGPL-3.0 license # Objects365 dataset https://www.objects365.org/ by Megvii # Example usage: python train.py --data Objects365.yaml # parent diff --git a/data/SKU-110K.yaml b/data/SKU-110K.yaml index b072f671e482..b012bec3128b 100644 --- a/data/SKU-110K.yaml +++ b/data/SKU-110K.yaml @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license +# Ultralytics YOLOv5 🚀, AGPL-3.0 license # SKU-110K retail items dataset https://github.com/eg4000/SKU110K_CVPR19 by Trax Retail # Example usage: python train.py --data SKU-110K.yaml # parent diff --git a/data/VOC.yaml b/data/VOC.yaml index 27fa80bccaed..6929f015dcbf 100644 --- a/data/VOC.yaml +++ b/data/VOC.yaml @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license +# Ultralytics YOLOv5 🚀, AGPL-3.0 license # PASCAL VOC dataset http://host.robots.ox.ac.uk/pascal/VOC by University of Oxford # Example usage: python train.py --data VOC.yaml # parent diff --git a/data/VisDrone.yaml b/data/VisDrone.yaml index 6b6e797226df..1dc4e3e0d054 100644 --- a/data/VisDrone.yaml +++ b/data/VisDrone.yaml @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license +# Ultralytics YOLOv5 🚀, AGPL-3.0 license # VisDrone2019-DET dataset https://github.com/VisDrone/VisDrone-Dataset by Tianjin University # Example usage: python train.py --data VisDrone.yaml # parent diff --git a/data/coco.yaml b/data/coco.yaml index 102b3c2d1b83..4bfd52a9d536 100644 --- a/data/coco.yaml +++ b/data/coco.yaml @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license +# Ultralytics YOLOv5 🚀, AGPL-3.0 license # COCO 2017 dataset http://cocodataset.org by Microsoft # Example usage: python train.py --data coco.yaml # parent diff --git a/data/coco128-seg.yaml b/data/coco128-seg.yaml index 9a40c28a4d67..a96ee8ff6696 100644 --- a/data/coco128-seg.yaml +++ b/data/coco128-seg.yaml @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license +# Ultralytics YOLOv5 🚀, AGPL-3.0 license # COCO128-seg dataset https://www.kaggle.com/ultralytics/coco128 (first 128 images from COCO train2017) by Ultralytics # Example usage: python train.py --data coco128.yaml # parent diff --git a/data/coco128.yaml b/data/coco128.yaml index 7e3e6c03feec..074903dd0ddf 100644 --- a/data/coco128.yaml +++ b/data/coco128.yaml @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license +# Ultralytics YOLOv5 🚀, AGPL-3.0 license # COCO128 dataset https://www.kaggle.com/ultralytics/coco128 (first 128 images from COCO train2017) by Ultralytics # Example usage: python train.py --data coco128.yaml # parent diff --git a/data/hyps/hyp.Objects365.yaml b/data/hyps/hyp.Objects365.yaml index c4b6e8051d7b..7a6c507c73bf 100644 --- a/data/hyps/hyp.Objects365.yaml +++ b/data/hyps/hyp.Objects365.yaml @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license +# Ultralytics YOLOv5 🚀, AGPL-3.0 license # Hyperparameters for Objects365 training # python train.py --weights yolov5m.pt --data Objects365.yaml --evolve # See Hyperparameter Evolution tutorial for details https://github.com/ultralytics/yolov5#tutorials diff --git a/data/hyps/hyp.VOC.yaml b/data/hyps/hyp.VOC.yaml index ce20dbbddbdb..c04c63e21ea0 100644 --- a/data/hyps/hyp.VOC.yaml +++ b/data/hyps/hyp.VOC.yaml @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license +# Ultralytics YOLOv5 🚀, AGPL-3.0 license # Hyperparameters for VOC training # python train.py --batch 128 --weights yolov5m6.pt --data VOC.yaml --epochs 50 --img 512 --hyp hyp.scratch-med.yaml --evolve # See Hyperparameter Evolution tutorial for details https://github.com/ultralytics/yolov5#tutorials diff --git a/data/hyps/hyp.no-augmentation.yaml b/data/hyps/hyp.no-augmentation.yaml index 74e10145e34a..adc360bb83f8 100644 --- a/data/hyps/hyp.no-augmentation.yaml +++ b/data/hyps/hyp.no-augmentation.yaml @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license +# Ultralytics YOLOv5 🚀, AGPL-3.0 license # Hyperparameters when using Albumentations frameworks # python train.py --hyp hyp.no-augmentation.yaml # See https://github.com/ultralytics/yolov5/pull/3882 for YOLOv5 + Albumentations Usage examples diff --git a/data/hyps/hyp.scratch-high.yaml b/data/hyps/hyp.scratch-high.yaml index d9110f93f67b..3e913e36df16 100644 --- a/data/hyps/hyp.scratch-high.yaml +++ b/data/hyps/hyp.scratch-high.yaml @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license +# Ultralytics YOLOv5 🚀, AGPL-3.0 license # Hyperparameters for high-augmentation COCO training from scratch # python train.py --batch 32 --cfg yolov5m6.yaml --weights '' --data coco.yaml --img 1280 --epochs 300 # See tutorials for hyperparameter evolution https://github.com/ultralytics/yolov5#tutorials diff --git a/data/hyps/hyp.scratch-low.yaml b/data/hyps/hyp.scratch-low.yaml index cb29929bb0da..ff0d1e7ff1ac 100644 --- a/data/hyps/hyp.scratch-low.yaml +++ b/data/hyps/hyp.scratch-low.yaml @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license +# Ultralytics YOLOv5 🚀, AGPL-3.0 license # Hyperparameters for low-augmentation COCO training from scratch # python train.py --batch 64 --cfg yolov5n6.yaml --weights '' --data coco.yaml --img 640 --epochs 300 --linear # See tutorials for hyperparameter evolution https://github.com/ultralytics/yolov5#tutorials diff --git a/data/hyps/hyp.scratch-med.yaml b/data/hyps/hyp.scratch-med.yaml index 183e47bd0c03..c2fba1fc2b80 100644 --- a/data/hyps/hyp.scratch-med.yaml +++ b/data/hyps/hyp.scratch-med.yaml @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license +# Ultralytics YOLOv5 🚀, AGPL-3.0 license # Hyperparameters for medium-augmentation COCO training from scratch # python train.py --batch 32 --cfg yolov5m6.yaml --weights '' --data coco.yaml --img 1280 --epochs 300 # See tutorials for hyperparameter evolution https://github.com/ultralytics/yolov5#tutorials diff --git a/data/xView.yaml b/data/xView.yaml index e215868efb6e..407159831e7c 100644 --- a/data/xView.yaml +++ b/data/xView.yaml @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license +# Ultralytics YOLOv5 🚀, AGPL-3.0 license # DIUx xView 2018 Challenge https://challenge.xviewdataset.org by U.S. National Geospatial-Intelligence Agency (NGA) # -------- DOWNLOAD DATA MANUALLY and jar xf val_images.zip to 'datasets/xView' before running train command! -------- # Example usage: python train.py --data xView.yaml diff --git a/detect.py b/detect.py index c58aa80a68fc..8d1cc56d998a 100644 --- a/detect.py +++ b/detect.py @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license +# Ultralytics YOLOv5 🚀, AGPL-3.0 license """ Run YOLOv5 detection inference on images, videos, directories, globs, YouTube, webcam, streams, etc. diff --git a/export.py b/export.py index 214d903c2998..32f6d303acce 100644 --- a/export.py +++ b/export.py @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license +# Ultralytics YOLOv5 🚀, AGPL-3.0 license """ Export a YOLOv5 PyTorch model to other formats. TensorFlow exports authored by https://github.com/zldrobit diff --git a/hubconf.py b/hubconf.py index 53afdff62aea..4b0c36b8daed 100644 --- a/hubconf.py +++ b/hubconf.py @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license +# Ultralytics YOLOv5 🚀, AGPL-3.0 license """ PyTorch Hub models https://pytorch.org/hub/ultralytics_yolov5 diff --git a/models/common.py b/models/common.py index 12244fd4b3cf..781f999445db 100644 --- a/models/common.py +++ b/models/common.py @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license +# Ultralytics YOLOv5 🚀, AGPL-3.0 license """Common modules.""" import ast diff --git a/models/experimental.py b/models/experimental.py index 6152cef1b389..ab9b0ed23dc3 100644 --- a/models/experimental.py +++ b/models/experimental.py @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license +# Ultralytics YOLOv5 🚀, AGPL-3.0 license """Experimental modules.""" import math diff --git a/models/hub/anchors.yaml b/models/hub/anchors.yaml index eb77a25b91b9..c8089311b267 100644 --- a/models/hub/anchors.yaml +++ b/models/hub/anchors.yaml @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license +# Ultralytics YOLOv5 🚀, AGPL-3.0 license # Default anchors for COCO data # P5 ------------------------------------------------------------------------------------------------------------------- diff --git a/models/hub/yolov3-spp.yaml b/models/hub/yolov3-spp.yaml index 31d3df8d087c..0e073667bf70 100644 --- a/models/hub/yolov3-spp.yaml +++ b/models/hub/yolov3-spp.yaml @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license +# Ultralytics YOLOv5 🚀, AGPL-3.0 license # Parameters nc: 80 # number of classes diff --git a/models/hub/yolov3-tiny.yaml b/models/hub/yolov3-tiny.yaml index ba06f22f8788..0a74fff715f8 100644 --- a/models/hub/yolov3-tiny.yaml +++ b/models/hub/yolov3-tiny.yaml @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license +# Ultralytics YOLOv5 🚀, AGPL-3.0 license # Parameters nc: 80 # number of classes diff --git a/models/hub/yolov3.yaml b/models/hub/yolov3.yaml index cc00d7a5098b..ce4a980c8200 100644 --- a/models/hub/yolov3.yaml +++ b/models/hub/yolov3.yaml @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license +# Ultralytics YOLOv5 🚀, AGPL-3.0 license # Parameters nc: 80 # number of classes diff --git a/models/hub/yolov5-bifpn.yaml b/models/hub/yolov5-bifpn.yaml index 948f4a746b83..bf05e434c6a2 100644 --- a/models/hub/yolov5-bifpn.yaml +++ b/models/hub/yolov5-bifpn.yaml @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license +# Ultralytics YOLOv5 🚀, AGPL-3.0 license # Parameters nc: 80 # number of classes diff --git a/models/hub/yolov5-fpn.yaml b/models/hub/yolov5-fpn.yaml index 9882a1bfae90..dcfdd14a7d2c 100644 --- a/models/hub/yolov5-fpn.yaml +++ b/models/hub/yolov5-fpn.yaml @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license +# Ultralytics YOLOv5 🚀, AGPL-3.0 license # Parameters nc: 80 # number of classes diff --git a/models/hub/yolov5-p2.yaml b/models/hub/yolov5-p2.yaml index 6f84c44ca448..2626e734835e 100644 --- a/models/hub/yolov5-p2.yaml +++ b/models/hub/yolov5-p2.yaml @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license +# Ultralytics YOLOv5 🚀, AGPL-3.0 license # Parameters nc: 80 # number of classes diff --git a/models/hub/yolov5-p34.yaml b/models/hub/yolov5-p34.yaml index 745ac9c97599..fba35ec10a1e 100644 --- a/models/hub/yolov5-p34.yaml +++ b/models/hub/yolov5-p34.yaml @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license +# Ultralytics YOLOv5 🚀, AGPL-3.0 license # Parameters nc: 80 # number of classes diff --git a/models/hub/yolov5-p6.yaml b/models/hub/yolov5-p6.yaml index a6d2107819da..c997df2db505 100644 --- a/models/hub/yolov5-p6.yaml +++ b/models/hub/yolov5-p6.yaml @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license +# Ultralytics YOLOv5 🚀, AGPL-3.0 license # Parameters nc: 80 # number of classes diff --git a/models/hub/yolov5-p7.yaml b/models/hub/yolov5-p7.yaml index d7a0bd33c278..14e6ce05d791 100644 --- a/models/hub/yolov5-p7.yaml +++ b/models/hub/yolov5-p7.yaml @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license +# Ultralytics YOLOv5 🚀, AGPL-3.0 license # Parameters nc: 80 # number of classes diff --git a/models/hub/yolov5-panet.yaml b/models/hub/yolov5-panet.yaml index 85ad66d6b18b..f0857f92d53a 100644 --- a/models/hub/yolov5-panet.yaml +++ b/models/hub/yolov5-panet.yaml @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license +# Ultralytics YOLOv5 🚀, AGPL-3.0 license # Parameters nc: 80 # number of classes diff --git a/models/hub/yolov5l6.yaml b/models/hub/yolov5l6.yaml index 5bc5b8c43812..05501a9d134b 100644 --- a/models/hub/yolov5l6.yaml +++ b/models/hub/yolov5l6.yaml @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license +# Ultralytics YOLOv5 🚀, AGPL-3.0 license # Parameters nc: 80 # number of classes diff --git a/models/hub/yolov5m6.yaml b/models/hub/yolov5m6.yaml index a8e4580b095d..1512e2b6e08d 100644 --- a/models/hub/yolov5m6.yaml +++ b/models/hub/yolov5m6.yaml @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license +# Ultralytics YOLOv5 🚀, AGPL-3.0 license # Parameters nc: 80 # number of classes diff --git a/models/hub/yolov5n6.yaml b/models/hub/yolov5n6.yaml index b54031053835..11350413ecf8 100644 --- a/models/hub/yolov5n6.yaml +++ b/models/hub/yolov5n6.yaml @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license +# Ultralytics YOLOv5 🚀, AGPL-3.0 license # Parameters nc: 80 # number of classes diff --git a/models/hub/yolov5s-LeakyReLU.yaml b/models/hub/yolov5s-LeakyReLU.yaml index 7219c24380b4..6e9d4a8820e2 100644 --- a/models/hub/yolov5s-LeakyReLU.yaml +++ b/models/hub/yolov5s-LeakyReLU.yaml @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license +# Ultralytics YOLOv5 🚀, AGPL-3.0 license # Parameters nc: 80 # number of classes diff --git a/models/hub/yolov5s-ghost.yaml b/models/hub/yolov5s-ghost.yaml index 866d87e4126e..cc4336948760 100644 --- a/models/hub/yolov5s-ghost.yaml +++ b/models/hub/yolov5s-ghost.yaml @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license +# Ultralytics YOLOv5 🚀, AGPL-3.0 license # Parameters nc: 80 # number of classes diff --git a/models/hub/yolov5s-transformer.yaml b/models/hub/yolov5s-transformer.yaml index 6bf0fe057c44..1b2d62c5a3fe 100644 --- a/models/hub/yolov5s-transformer.yaml +++ b/models/hub/yolov5s-transformer.yaml @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license +# Ultralytics YOLOv5 🚀, AGPL-3.0 license # Parameters nc: 80 # number of classes diff --git a/models/hub/yolov5s6.yaml b/models/hub/yolov5s6.yaml index 0772d9827b48..2a4c1162575b 100644 --- a/models/hub/yolov5s6.yaml +++ b/models/hub/yolov5s6.yaml @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license +# Ultralytics YOLOv5 🚀, AGPL-3.0 license # Parameters nc: 80 # number of classes diff --git a/models/hub/yolov5x6.yaml b/models/hub/yolov5x6.yaml index c5a850ed3159..0c8f29e600c3 100644 --- a/models/hub/yolov5x6.yaml +++ b/models/hub/yolov5x6.yaml @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license +# Ultralytics YOLOv5 🚀, AGPL-3.0 license # Parameters nc: 80 # number of classes diff --git a/models/segment/yolov5l-seg.yaml b/models/segment/yolov5l-seg.yaml index 77fa4977d1d8..de430f4fbdf3 100644 --- a/models/segment/yolov5l-seg.yaml +++ b/models/segment/yolov5l-seg.yaml @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license +# Ultralytics YOLOv5 🚀, AGPL-3.0 license # Parameters nc: 80 # number of classes diff --git a/models/segment/yolov5m-seg.yaml b/models/segment/yolov5m-seg.yaml index bb2b266a0412..288577778252 100644 --- a/models/segment/yolov5m-seg.yaml +++ b/models/segment/yolov5m-seg.yaml @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license +# Ultralytics YOLOv5 🚀, AGPL-3.0 license # Parameters nc: 80 # number of classes diff --git a/models/segment/yolov5n-seg.yaml b/models/segment/yolov5n-seg.yaml index fac31af2ff43..faf5228fd3ef 100644 --- a/models/segment/yolov5n-seg.yaml +++ b/models/segment/yolov5n-seg.yaml @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license +# Ultralytics YOLOv5 🚀, AGPL-3.0 license # Parameters nc: 80 # number of classes diff --git a/models/segment/yolov5s-seg.yaml b/models/segment/yolov5s-seg.yaml index 10d876aaeffe..a199f1d82388 100644 --- a/models/segment/yolov5s-seg.yaml +++ b/models/segment/yolov5s-seg.yaml @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license +# Ultralytics YOLOv5 🚀, AGPL-3.0 license # Parameters nc: 80 # number of classes diff --git a/models/segment/yolov5x-seg.yaml b/models/segment/yolov5x-seg.yaml index 9f04e4492789..75f426386e3a 100644 --- a/models/segment/yolov5x-seg.yaml +++ b/models/segment/yolov5x-seg.yaml @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license +# Ultralytics YOLOv5 🚀, AGPL-3.0 license # Parameters nc: 80 # number of classes diff --git a/models/tf.py b/models/tf.py index 2a5cd566c406..c65938c4b31b 100644 --- a/models/tf.py +++ b/models/tf.py @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license +# Ultralytics YOLOv5 🚀, AGPL-3.0 license """ TensorFlow, Keras and TFLite versions of YOLOv5 Authored by https://github.com/zldrobit in PR https://github.com/ultralytics/yolov5/pull/1127 diff --git a/models/yolo.py b/models/yolo.py index ca62f934fc3f..5390db6a5ec0 100644 --- a/models/yolo.py +++ b/models/yolo.py @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license +# Ultralytics YOLOv5 🚀, AGPL-3.0 license """ YOLO-specific modules. diff --git a/models/yolov5l.yaml b/models/yolov5l.yaml index f22eab9575a9..7cac7ead20aa 100644 --- a/models/yolov5l.yaml +++ b/models/yolov5l.yaml @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license +# Ultralytics YOLOv5 🚀, AGPL-3.0 license # Parameters nc: 80 # number of classes diff --git a/models/yolov5m.yaml b/models/yolov5m.yaml index 0a3f484c77eb..820e6070499c 100644 --- a/models/yolov5m.yaml +++ b/models/yolov5m.yaml @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license +# Ultralytics YOLOv5 🚀, AGPL-3.0 license # Parameters nc: 80 # number of classes diff --git a/models/yolov5n.yaml b/models/yolov5n.yaml index 6f08a0b3e921..d3b84ace2b76 100644 --- a/models/yolov5n.yaml +++ b/models/yolov5n.yaml @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license +# Ultralytics YOLOv5 🚀, AGPL-3.0 license # Parameters nc: 80 # number of classes diff --git a/models/yolov5s.yaml b/models/yolov5s.yaml index 5cd3c6c22870..090cb67c2bec 100644 --- a/models/yolov5s.yaml +++ b/models/yolov5s.yaml @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license +# Ultralytics YOLOv5 🚀, AGPL-3.0 license # Parameters nc: 80 # number of classes diff --git a/models/yolov5x.yaml b/models/yolov5x.yaml index 26a5ba7253dc..8c1a6be1b7a8 100644 --- a/models/yolov5x.yaml +++ b/models/yolov5x.yaml @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license +# Ultralytics YOLOv5 🚀, AGPL-3.0 license # Parameters nc: 80 # number of classes diff --git a/pyproject.toml b/pyproject.toml index 11d151f4293f..8905a28fc702 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,4 +1,4 @@ -# Ultralyticsv5 YOLO 🚀, AGPL-3.0 license +# Ultralytics YOLOv5 🚀, AGPL-3.0 license # Overview: # This pyproject.toml file manages the build, packaging, and distribution of the Ultralytics library. diff --git a/segment/predict.py b/segment/predict.py index bea9bfe2f21c..109a68415b0d 100644 --- a/segment/predict.py +++ b/segment/predict.py @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license +# Ultralytics YOLOv5 🚀, AGPL-3.0 license """ Run YOLOv5 segmentation inference on images, videos, directories, streams, etc. diff --git a/segment/train.py b/segment/train.py index a0d0c5119639..2b1dca115d9a 100644 --- a/segment/train.py +++ b/segment/train.py @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license +# Ultralytics YOLOv5 🚀, AGPL-3.0 license """ Train a YOLOv5 segment model on a segment dataset Models and datasets download automatically from the latest YOLOv5 release. diff --git a/segment/val.py b/segment/val.py index bafdb5dcec07..b0a941faa151 100644 --- a/segment/val.py +++ b/segment/val.py @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license +# Ultralytics YOLOv5 🚀, AGPL-3.0 license """ Validate a trained YOLOv5 segment model on a segment dataset. diff --git a/train.py b/train.py index df0972a67c70..a870262a9eed 100644 --- a/train.py +++ b/train.py @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license +# Ultralytics YOLOv5 🚀, AGPL-3.0 license """ Train a YOLOv5 model on a custom dataset. Models and datasets download automatically from the latest YOLOv5 release. diff --git a/utils/__init__.py b/utils/__init__.py index 0b7e1fdfc31a..91fc7694676f 100644 --- a/utils/__init__.py +++ b/utils/__init__.py @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license +# Ultralytics YOLOv5 🚀, AGPL-3.0 license """utils/initialization.""" import contextlib diff --git a/utils/activations.py b/utils/activations.py index 928ae55a0b60..47f0a998024e 100644 --- a/utils/activations.py +++ b/utils/activations.py @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license +# Ultralytics YOLOv5 🚀, AGPL-3.0 license """Activation functions.""" import torch diff --git a/utils/augmentations.py b/utils/augmentations.py index 3556bffeda18..1840d47d46c9 100644 --- a/utils/augmentations.py +++ b/utils/augmentations.py @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license +# Ultralytics YOLOv5 🚀, AGPL-3.0 license """Image augmentation functions.""" import math diff --git a/utils/autoanchor.py b/utils/autoanchor.py index 62c39811657b..79b79db0fc12 100644 --- a/utils/autoanchor.py +++ b/utils/autoanchor.py @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license +# Ultralytics YOLOv5 🚀, AGPL-3.0 license """AutoAnchor utils.""" import random diff --git a/utils/autobatch.py b/utils/autobatch.py index 52a71f62c47c..08a0de841a98 100644 --- a/utils/autobatch.py +++ b/utils/autobatch.py @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license +# Ultralytics YOLOv5 🚀, AGPL-3.0 license """Auto-batch utils.""" from copy import deepcopy diff --git a/utils/aws/resume.py b/utils/aws/resume.py index 4525ba96749a..ea432a1615ca 100644 --- a/utils/aws/resume.py +++ b/utils/aws/resume.py @@ -1,3 +1,4 @@ +# Ultralytics YOLOv5 🚀, AGPL-3.0 license # Resume all interrupted trainings in yolov5/ dir including DDP trainings # Usage: $ python utils/aws/resume.py diff --git a/utils/callbacks.py b/utils/callbacks.py index 3275789fa12e..0a0bcbdb2b96 100644 --- a/utils/callbacks.py +++ b/utils/callbacks.py @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license +# Ultralytics YOLOv5 🚀, AGPL-3.0 license """Callback utils.""" import threading diff --git a/utils/dataloaders.py b/utils/dataloaders.py index 7ca451aa0929..dacb0e0b33d7 100644 --- a/utils/dataloaders.py +++ b/utils/dataloaders.py @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license +# Ultralytics YOLOv5 🚀, AGPL-3.0 license """Dataloaders and dataset utils.""" import contextlib diff --git a/utils/downloads.py b/utils/downloads.py index 071e1b077bf6..a7b599efad20 100644 --- a/utils/downloads.py +++ b/utils/downloads.py @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license +# Ultralytics YOLOv5 🚀, AGPL-3.0 license """Download utils.""" import logging diff --git a/utils/flask_rest_api/example_request.py b/utils/flask_rest_api/example_request.py index 7b850051cca0..104249002aa3 100644 --- a/utils/flask_rest_api/example_request.py +++ b/utils/flask_rest_api/example_request.py @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license +# Ultralytics YOLOv5 🚀, AGPL-3.0 license """Perform test request.""" import pprint diff --git a/utils/flask_rest_api/restapi.py b/utils/flask_rest_api/restapi.py index b9bd16f1a63e..7e03d3a6679a 100644 --- a/utils/flask_rest_api/restapi.py +++ b/utils/flask_rest_api/restapi.py @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license +# Ultralytics YOLOv5 🚀, AGPL-3.0 license """Run a Flask REST API exposing one or more YOLOv5s models.""" import argparse diff --git a/utils/general.py b/utils/general.py index 64ae50626e14..bce9d6d7455b 100644 --- a/utils/general.py +++ b/utils/general.py @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license +# Ultralytics YOLOv5 🚀, AGPL-3.0 license """General utils.""" import contextlib diff --git a/utils/google_app_engine/app.yaml b/utils/google_app_engine/app.yaml index 5056b7c1186d..4c1751f55429 100644 --- a/utils/google_app_engine/app.yaml +++ b/utils/google_app_engine/app.yaml @@ -1,3 +1,5 @@ +# Ultralytics YOLOv5 🚀, AGPL-3.0 license + runtime: custom env: flex diff --git a/utils/loggers/__init__.py b/utils/loggers/__init__.py index 011ec7c8915b..a7575a049bc1 100644 --- a/utils/loggers/__init__.py +++ b/utils/loggers/__init__.py @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license +# Ultralytics YOLOv5 🚀, AGPL-3.0 license """Logging utils.""" import json diff --git a/utils/loggers/clearml/clearml_utils.py b/utils/loggers/clearml/clearml_utils.py index 6a6ed7636c88..f1b56650461e 100644 --- a/utils/loggers/clearml/clearml_utils.py +++ b/utils/loggers/clearml/clearml_utils.py @@ -1,3 +1,4 @@ +# Ultralytics YOLOv5 🚀, AGPL-3.0 license """Main Logger class for ClearML experiment tracking.""" import glob diff --git a/utils/loggers/clearml/hpo.py b/utils/loggers/clearml/hpo.py index aa1b7ab9494d..5a9be757aac0 100644 --- a/utils/loggers/clearml/hpo.py +++ b/utils/loggers/clearml/hpo.py @@ -1,3 +1,5 @@ +# Ultralytics YOLOv5 🚀, AGPL-3.0 license + from clearml import Task # Connecting ClearML with the current process, diff --git a/utils/loggers/comet/__init__.py b/utils/loggers/comet/__init__.py index 076eb3ccecab..3a91c49258a8 100644 --- a/utils/loggers/comet/__init__.py +++ b/utils/loggers/comet/__init__.py @@ -1,3 +1,5 @@ +# Ultralytics YOLOv5 🚀, AGPL-3.0 license + import glob import json import logging diff --git a/utils/loggers/comet/comet_utils.py b/utils/loggers/comet/comet_utils.py index 7eca1f504d69..cf936ab48453 100644 --- a/utils/loggers/comet/comet_utils.py +++ b/utils/loggers/comet/comet_utils.py @@ -1,3 +1,5 @@ +# Ultralytics YOLOv5 🚀, AGPL-3.0 license + import logging import os from urllib.parse import urlparse diff --git a/utils/loggers/comet/hpo.py b/utils/loggers/comet/hpo.py index 8ca08ddc858a..c225ebbd0484 100644 --- a/utils/loggers/comet/hpo.py +++ b/utils/loggers/comet/hpo.py @@ -1,3 +1,5 @@ +# Ultralytics YOLOv5 🚀, AGPL-3.0 license + import argparse import json import logging diff --git a/utils/loggers/wandb/wandb_utils.py b/utils/loggers/wandb/wandb_utils.py index 4083312e6a59..930f2c7543af 100644 --- a/utils/loggers/wandb/wandb_utils.py +++ b/utils/loggers/wandb/wandb_utils.py @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license +# Ultralytics YOLOv5 🚀, AGPL-3.0 license # WARNING ⚠️ wandb is deprecated and will be removed in future release. # See supported integrations at https://github.com/ultralytics/yolov5#integrations diff --git a/utils/loss.py b/utils/loss.py index 9d09f9df0261..e8f148e77c74 100644 --- a/utils/loss.py +++ b/utils/loss.py @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license +# Ultralytics YOLOv5 🚀, AGPL-3.0 license """Loss functions.""" import torch diff --git a/utils/metrics.py b/utils/metrics.py index e572355fec1e..385fdc471748 100644 --- a/utils/metrics.py +++ b/utils/metrics.py @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license +# Ultralytics YOLOv5 🚀, AGPL-3.0 license """Model validation metrics.""" import math diff --git a/utils/plots.py b/utils/plots.py index cb5edabc6c41..062658cda979 100644 --- a/utils/plots.py +++ b/utils/plots.py @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license +# Ultralytics YOLOv5 🚀, AGPL-3.0 license """Plotting utils.""" import contextlib diff --git a/utils/segment/augmentations.py b/utils/segment/augmentations.py index e13a53d34821..5773b56f4d7f 100644 --- a/utils/segment/augmentations.py +++ b/utils/segment/augmentations.py @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license +# Ultralytics YOLOv5 🚀, AGPL-3.0 license """Image augmentation functions.""" import math diff --git a/utils/segment/dataloaders.py b/utils/segment/dataloaders.py index 9d2e9bef0b09..d25d98148602 100644 --- a/utils/segment/dataloaders.py +++ b/utils/segment/dataloaders.py @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license +# Ultralytics YOLOv5 🚀, AGPL-3.0 license """Dataloaders.""" import os diff --git a/utils/segment/general.py b/utils/segment/general.py index f292496c0da9..2f65d60238dd 100644 --- a/utils/segment/general.py +++ b/utils/segment/general.py @@ -1,3 +1,5 @@ +# Ultralytics YOLOv5 🚀, AGPL-3.0 license + import cv2 import numpy as np import torch diff --git a/utils/segment/loss.py b/utils/segment/loss.py index fa0c10939b70..d4bc9d3aed54 100644 --- a/utils/segment/loss.py +++ b/utils/segment/loss.py @@ -1,3 +1,5 @@ +# Ultralytics YOLOv5 🚀, AGPL-3.0 license + import torch import torch.nn as nn import torch.nn.functional as F diff --git a/utils/segment/metrics.py b/utils/segment/metrics.py index 973b398eb6b9..7bdf3258abb7 100644 --- a/utils/segment/metrics.py +++ b/utils/segment/metrics.py @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license +# Ultralytics YOLOv5 🚀, AGPL-3.0 license """Model validation metrics.""" import numpy as np diff --git a/utils/segment/plots.py b/utils/segment/plots.py index ce01988be937..f5b81711cc93 100644 --- a/utils/segment/plots.py +++ b/utils/segment/plots.py @@ -1,3 +1,5 @@ +# Ultralytics YOLOv5 🚀, AGPL-3.0 license + import contextlib import math from pathlib import Path diff --git a/utils/torch_utils.py b/utils/torch_utils.py index 28893c71deef..0b006d80562b 100644 --- a/utils/torch_utils.py +++ b/utils/torch_utils.py @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license +# Ultralytics YOLOv5 🚀, AGPL-3.0 license """PyTorch utils.""" import math diff --git a/utils/triton.py b/utils/triton.py index 87524c9c7801..3d529ec88a07 100644 --- a/utils/triton.py +++ b/utils/triton.py @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license +# Ultralytics YOLOv5 🚀, AGPL-3.0 license """Utils to interact with the Triton Inference Server.""" import typing diff --git a/val.py b/val.py index 1c8c65ba89aa..221226b4a45a 100644 --- a/val.py +++ b/val.py @@ -1,4 +1,4 @@ -# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license +# Ultralytics YOLOv5 🚀, AGPL-3.0 license """ Validate a trained YOLOv5 detection model on a detection dataset. From 3742ab493dbf4573c930a46aefc537b9127b4370 Mon Sep 17 00:00:00 2001 From: Paula Derrenger <107626595+pderrenger@users.noreply.github.com> Date: Sun, 9 Jun 2024 04:21:38 +0200 Subject: [PATCH 1899/1976] Code Refactor for Speed and Readability (#13074) Refactor code for speed and clarity Co-authored-by: Glenn Jocher --- utils/general.py | 4 +++- utils/loggers/clearml/clearml_utils.py | 9 +++++---- 2 files changed, 8 insertions(+), 5 deletions(-) diff --git a/utils/general.py b/utils/general.py index bce9d6d7455b..ed38cc0d60cf 100644 --- a/utils/general.py +++ b/utils/general.py @@ -616,10 +616,12 @@ def yaml_load(file="data.yaml"): return yaml.safe_load(f) -def yaml_save(file="data.yaml", data={}): +def yaml_save(file="data.yaml", data=None): """Safely saves `data` to a YAML file specified by `file`, converting `Path` objects to strings; `data` is a dictionary. """ + if data is None: + data = {} with open(file, "w") as f: yaml.safe_dump({k: str(v) if isinstance(v, Path) else v for k, v in data.items()}, f, sort_keys=False) diff --git a/utils/loggers/clearml/clearml_utils.py b/utils/loggers/clearml/clearml_utils.py index f1b56650461e..2b5351ef8533 100644 --- a/utils/loggers/clearml/clearml_utils.py +++ b/utils/loggers/clearml/clearml_utils.py @@ -47,10 +47,11 @@ def construct_dataset(clearml_info_string): {"train", "test", "val", "nc", "names"} ), "The right keys were not found in the yaml file, make sure it at least has the following keys: ('train', 'test', 'val', 'nc', 'names')" - data_dict = {} - data_dict["train"] = ( - str((dataset_root_path / dataset_definition["train"]).resolve()) if dataset_definition["train"] else None - ) + data_dict = { + "train": ( + str((dataset_root_path / dataset_definition["train"]).resolve()) if dataset_definition["train"] else None + ) + } data_dict["test"] = ( str((dataset_root_path / dataset_definition["test"]).resolve()) if dataset_definition["test"] else None ) From c5807f7776d9058609fa74fd404b5b2a5523bebd Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 16 Jun 2024 22:08:42 +0200 Subject: [PATCH 1900/1976] Update ci-testing.yml (#13094) * Update ci-testing.yml Signed-off-by: Glenn Jocher * Auto-format by https://ultralytics.com/actions * Update requirements.txt Signed-off-by: Glenn Jocher --------- Signed-off-by: Glenn Jocher Co-authored-by: UltralyticsAssistant --- .github/workflows/ci-testing.yml | 1 + requirements.txt | 2 +- utils/segment/dataloaders.py | 2 +- 3 files changed, 3 insertions(+), 2 deletions(-) diff --git a/.github/workflows/ci-testing.yml b/.github/workflows/ci-testing.yml index 63eab050fc28..639d0449c281 100644 --- a/.github/workflows/ci-testing.yml +++ b/.github/workflows/ci-testing.yml @@ -10,6 +10,7 @@ on: branches: [master] schedule: - cron: "0 0 * * *" # runs at 00:00 UTC every day + workflow_dispatch: jobs: Benchmarks: diff --git a/requirements.txt b/requirements.txt index 7248b72086b7..7b7504d99c2c 100644 --- a/requirements.txt +++ b/requirements.txt @@ -15,7 +15,7 @@ thop>=0.1.1 # FLOPs computation torch>=1.8.0 # see https://pytorch.org/get-started/locally (recommended) torchvision>=0.9.0 tqdm>=4.64.0 -ultralytics>=8.0.232 +ultralytics>=8.2.32 # https://ultralytics.com # protobuf<=3.20.1 # https://github.com/ultralytics/yolov5/issues/8012 # Logging --------------------------------------------------------------------- diff --git a/utils/segment/dataloaders.py b/utils/segment/dataloaders.py index d25d98148602..0804818deca7 100644 --- a/utils/segment/dataloaders.py +++ b/utils/segment/dataloaders.py @@ -7,7 +7,7 @@ import cv2 import numpy as np import torch -from torch.utils.data import DataLoader, distributed +from torch.utils.data import DataLoader from ..augmentations import augment_hsv, copy_paste, letterbox from ..dataloaders import InfiniteDataLoader, LoadImagesAndLabels, SmartDistributedSampler, seed_worker From dfdfbcf3d7d977a4e71075f65868d9f694434692 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 16 Jun 2024 22:39:28 +0200 Subject: [PATCH 1901/1976] Code Refactor `ruff check --fix --extend-select I` (#13093) * Refactor code for speed and clarity * Auto-format by https://ultralytics.com/actions * Update train.py Signed-off-by: Glenn Jocher * Update README.md Signed-off-by: Glenn Jocher --------- Signed-off-by: Glenn Jocher Co-authored-by: UltralyticsAssistant Co-authored-by: Ultralytics Assistant <135830346+UltralyticsAssistant@users.noreply.github.com> --- .github/workflows/merge-main-into-prs.yml | 78 +++++++++++------------ classify/train.py | 4 +- models/yolo.py | 5 +- segment/train.py | 5 +- train.py | 5 +- utils/loggers/__init__.py | 5 +- utils/loggers/clearml/README.md | 12 ---- 7 files changed, 58 insertions(+), 56 deletions(-) diff --git a/.github/workflows/merge-main-into-prs.yml b/.github/workflows/merge-main-into-prs.yml index e2c3dc379edd..2cd4b028c8b5 100644 --- a/.github/workflows/merge-main-into-prs.yml +++ b/.github/workflows/merge-main-into-prs.yml @@ -10,47 +10,47 @@ on: branches: - main - master - + jobs: Merge: if: github.repository == 'ultralytics/yolov5' runs-on: ubuntu-latest steps: - - name: Checkout repository - uses: actions/checkout@v4 - with: - fetch-depth: 0 - - uses: actions/setup-python@v5 - with: - python-version: "3.11" - cache: "pip" # caching pip dependencies - - name: Install requirements - run: | - pip install pygithub - - name: Merge main into PRs - shell: python - run: | - from github import Github - import os - - # Authenticate with the GitHub Token - g = Github(os.getenv('GITHUB_TOKEN')) - - # Get the repository dynamically - repo = g.get_repo(os.getenv('GITHUB_REPOSITORY')) - - # List all open pull requests - open_pulls = repo.get_pulls(state='open', sort='created') - - for pr in open_pulls: - # Compare PR head with main to see if it's behind - try: - # Merge main into the PR branch - success = pr.update_branch() - assert success, "Branch update failed" - print(f"Merged 'master' into PR #{pr.number} ({pr.head.ref}) successfully.") - except Exception as e: - print(f"Could not merge 'master' into PR #{pr.number} ({pr.head.ref}): {e}") - env: - GITHUB_TOKEN: ${{ secrets.PERSONAL_ACCESS_TOKEN }} - GITHUB_REPOSITORY: ${{ github.repository }} + - name: Checkout repository + uses: actions/checkout@v4 + with: + fetch-depth: 0 + - uses: actions/setup-python@v5 + with: + python-version: "3.11" + cache: "pip" # caching pip dependencies + - name: Install requirements + run: | + pip install pygithub + - name: Merge main into PRs + shell: python + run: | + from github import Github + import os + + # Authenticate with the GitHub Token + g = Github(os.getenv('GITHUB_TOKEN')) + + # Get the repository dynamically + repo = g.get_repo(os.getenv('GITHUB_REPOSITORY')) + + # List all open pull requests + open_pulls = repo.get_pulls(state='open', sort='created') + + for pr in open_pulls: + # Compare PR head with main to see if it's behind + try: + # Merge main into the PR branch + success = pr.update_branch() + assert success, "Branch update failed" + print(f"Merged 'master' into PR #{pr.number} ({pr.head.ref}) successfully.") + except Exception as e: + print(f"Could not merge 'master' into PR #{pr.number} ({pr.head.ref}): {e}") + env: + GITHUB_TOKEN: ${{ secrets.PERSONAL_ACCESS_TOKEN }} + GITHUB_REPOSITORY: ${{ github.repository }} diff --git a/classify/train.py b/classify/train.py index 79045e9fb87a..6f9eda40be62 100644 --- a/classify/train.py +++ b/classify/train.py @@ -178,7 +178,9 @@ def train(opt, device): # Scheduler lrf = 0.01 # final lr (fraction of lr0) # lf = lambda x: ((1 + math.cos(x * math.pi / epochs)) / 2) * (1 - lrf) + lrf # cosine - lf = lambda x: (1 - x / epochs) * (1 - lrf) + lrf # linear + def lf(x): + return (1 - x / epochs) * (1 - lrf) + lrf # linear + scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf) # scheduler = lr_scheduler.OneCycleLR(optimizer, max_lr=lr0, total_steps=epochs, pct_start=0.1, # final_div_factor=1 / 25 / lrf) diff --git a/models/yolo.py b/models/yolo.py index 5390db6a5ec0..ebd0e830296f 100644 --- a/models/yolo.py +++ b/models/yolo.py @@ -244,7 +244,10 @@ def __init__(self, cfg="yolov5s.yaml", ch=3, nc=None, anchors=None): if isinstance(m, (Detect, Segment)): s = 256 # 2x min stride m.inplace = self.inplace - forward = lambda x: self.forward(x)[0] if isinstance(m, Segment) else self.forward(x) + + def forward(x): + return self.forward(x)[0] if isinstance(m, Segment) else self.forward(x) + m.stride = torch.tensor([s / x.shape[-2] for x in forward(torch.zeros(1, ch, s, s))]) # forward check_anchor_order(m) m.anchors /= m.stride.view(-1, 1, 1) diff --git a/segment/train.py b/segment/train.py index 2b1dca115d9a..ffd1746ade0f 100644 --- a/segment/train.py +++ b/segment/train.py @@ -214,7 +214,10 @@ def train(hyp, opt, device, callbacks): if opt.cos_lr: lf = one_cycle(1, hyp["lrf"], epochs) # cosine 1->hyp['lrf'] else: - lf = lambda x: (1 - x / epochs) * (1.0 - hyp["lrf"]) + hyp["lrf"] # linear + + def lf(x): + return (1 - x / epochs) * (1.0 - hyp["lrf"]) + hyp["lrf"] # linear + scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf) # plot_lr_scheduler(optimizer, scheduler, epochs) # EMA diff --git a/train.py b/train.py index a870262a9eed..44cbd1ac0622 100644 --- a/train.py +++ b/train.py @@ -224,7 +224,10 @@ def train(hyp, opt, device, callbacks): if opt.cos_lr: lf = one_cycle(1, hyp["lrf"], epochs) # cosine 1->hyp['lrf'] else: - lf = lambda x: (1 - x / epochs) * (1.0 - hyp["lrf"]) + hyp["lrf"] # linear + + def lf(x): + return (1 - x / epochs) * (1.0 - hyp["lrf"]) + hyp["lrf"] # linear + scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf) # plot_lr_scheduler(optimizer, scheduler, epochs) # EMA diff --git a/utils/loggers/__init__.py b/utils/loggers/__init__.py index a7575a049bc1..1c2f4ccfb99c 100644 --- a/utils/loggers/__init__.py +++ b/utils/loggers/__init__.py @@ -21,7 +21,10 @@ try: from torch.utils.tensorboard import SummaryWriter except ImportError: - SummaryWriter = lambda *args: None # None = SummaryWriter(str) + + def SummaryWriter(*args): + return None # None = SummaryWriter(str) + try: import wandb diff --git a/utils/loggers/clearml/README.md b/utils/loggers/clearml/README.md index bc40919ab0ea..2810c92a6a88 100644 --- a/utils/loggers/clearml/README.md +++ b/utils/loggers/clearml/README.md @@ -16,16 +16,10 @@ 🔭 Turn your newly trained YOLOv5 model into an API with just a few commands using ClearML Serving -
And so much more. It's up to you how many of these tools you want to use, you can stick to the experiment manager, or chain them all together into an impressive pipeline! -
-
![ClearML scalars dashboard](https://github.com/thepycoder/clearml_screenshots/raw/main/experiment_manager_with_compare.gif) -
-
- ## 🦾 Setting Things Up To keep track of your experiments and/or data, ClearML needs to communicate to a server. You have 2 options to get one: @@ -46,8 +40,6 @@ Either sign up for free to the [ClearML Hosted Service](https://cutt.ly/yolov5-t That's it! You're done 😎 -
- ## 🚀 Training YOLOv5 With ClearML To enable ClearML experiment tracking, simply install the ClearML pip package. @@ -89,8 +81,6 @@ That's a lot right? 🤯 Now, we can visualize all of this information in the Cl There even more we can do with all of this information, like hyperparameter optimization and remote execution, so keep reading if you want to see how that works! -
- ## 🔗 Dataset Version Management Versioning your data separately from your code is generally a good idea and makes it easy to acquire the latest version too. This repository supports supplying a dataset version ID, and it will make sure to get the data if it's not there yet. Next to that, this workflow also saves the used dataset ID as part of the task parameters, so you will always know for sure which data was used in which experiment! @@ -157,8 +147,6 @@ Now that you have a ClearML dataset, you can very simply use it to train custom python train.py --img 640 --batch 16 --epochs 3 --data clearml:// --weights yolov5s.pt --cache ``` -
- ## 👀 Hyperparameter Optimization Now that we have our experiments and data versioned, it's time to take a look at what we can build on top! From bb27bf7868468058af4c2dccb62787d1fac8c470 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 17 Jun 2024 02:05:36 +0200 Subject: [PATCH 1902/1976] Update requirements.txt to `ultralytics>=8.2.34` (#13095) Update requirements.txt Signed-off-by: Glenn Jocher --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 7b7504d99c2c..e496ad8206b6 100644 --- a/requirements.txt +++ b/requirements.txt @@ -15,7 +15,7 @@ thop>=0.1.1 # FLOPs computation torch>=1.8.0 # see https://pytorch.org/get-started/locally (recommended) torchvision>=0.9.0 tqdm>=4.64.0 -ultralytics>=8.2.32 # https://ultralytics.com +ultralytics>=8.2.34 # https://ultralytics.com # protobuf<=3.20.1 # https://github.com/ultralytics/yolov5/issues/8012 # Logging --------------------------------------------------------------------- From cf8933cacab6c74aa083526c1a27bdbff8e1ba33 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 19 Jun 2024 02:26:10 +0200 Subject: [PATCH 1903/1976] [Snyk] Security upgrade urllib3 from 2.0.7 to 2.2.2 (#13104) * fix: requirements.txt to reduce vulnerabilities The following vulnerabilities are fixed by pinning transitive dependencies: - https://snyk.io/vuln/SNYK-PYTHON-URLLIB3-7267250 * Update requirements.txt Signed-off-by: Glenn Jocher * Auto-format by https://ultralytics.com/actions --------- Signed-off-by: Glenn Jocher Co-authored-by: snyk-bot Co-authored-by: UltralyticsAssistant --- classify/train.py | 1 + requirements.txt | 1 - 2 files changed, 1 insertion(+), 1 deletion(-) diff --git a/classify/train.py b/classify/train.py index 6f9eda40be62..5ae6980716a4 100644 --- a/classify/train.py +++ b/classify/train.py @@ -177,6 +177,7 @@ def train(opt, device): # Scheduler lrf = 0.01 # final lr (fraction of lr0) + # lf = lambda x: ((1 + math.cos(x * math.pi / epochs)) / 2) * (1 - lrf) + lrf # cosine def lf(x): return (1 - x / epochs) * (1 - lrf) + lrf # linear diff --git a/requirements.txt b/requirements.txt index e496ad8206b6..3b2674596258 100644 --- a/requirements.txt +++ b/requirements.txt @@ -47,4 +47,3 @@ setuptools>=65.5.1 # Snyk vulnerability fix # mss # screenshots # albumentations>=1.0.3 # pycocotools>=2.0.6 # COCO mAP -wheel>=0.38.0 # not directly required, pinned by Snyk to avoid a vulnerability From ec331cbda7434f6c04c1c1c310dcb3948dcc5d3e Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 19 Jun 2024 02:29:08 +0200 Subject: [PATCH 1904/1976] Add missing Python function docstrings (#13105) * Add missing Python function docstrings * Auto-format by https://ultralytics.com/actions --------- Co-authored-by: UltralyticsAssistant Co-authored-by: Ultralytics Assistant <135830346+UltralyticsAssistant@users.noreply.github.com> --- models/yolo.py | 9 +++++---- utils/loggers/__init__.py | 1 + 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/models/yolo.py b/models/yolo.py index ebd0e830296f..d89c5da018de 100644 --- a/models/yolo.py +++ b/models/yolo.py @@ -242,13 +242,14 @@ def __init__(self, cfg="yolov5s.yaml", ch=3, nc=None, anchors=None): # Build strides, anchors m = self.model[-1] # Detect() if isinstance(m, (Detect, Segment)): - s = 256 # 2x min stride - m.inplace = self.inplace - def forward(x): + def _forward(x): + """Passes the input 'x' through the model and returns the processed output.""" return self.forward(x)[0] if isinstance(m, Segment) else self.forward(x) - m.stride = torch.tensor([s / x.shape[-2] for x in forward(torch.zeros(1, ch, s, s))]) # forward + s = 256 # 2x min stride + m.inplace = self.inplace + m.stride = torch.tensor([s / x.shape[-2] for x in _forward(torch.zeros(1, ch, s, s))]) # forward check_anchor_order(m) m.anchors /= m.stride.view(-1, 1, 1) self.stride = m.stride diff --git a/utils/loggers/__init__.py b/utils/loggers/__init__.py index 1c2f4ccfb99c..2bd8583d2ade 100644 --- a/utils/loggers/__init__.py +++ b/utils/loggers/__init__.py @@ -23,6 +23,7 @@ except ImportError: def SummaryWriter(*args): + """Fall back to SummaryWriter returning None if TensorBoard is not installed.""" return None # None = SummaryWriter(str) From 098ce03f62d3488b46bcfad1682d6b6c0e82f6a8 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 20 Jun 2024 14:45:02 +0200 Subject: [PATCH 1905/1976] Update CLA Action (#13112) Signed-off-by: Glenn Jocher --- .github/workflows/cla.yml | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/.github/workflows/cla.yml b/.github/workflows/cla.yml index b012ae4abf89..004acaf6461a 100644 --- a/.github/workflows/cla.yml +++ b/.github/workflows/cla.yml @@ -1,4 +1,4 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics YOLO 🚀, AGPL-3.0 license # Ultralytics Contributor License Agreement (CLA) action https://docs.ultralytics.com/help/CLA # This workflow automatically requests Pull Requests (PR) authors to sign the Ultralytics CLA before PRs can be merged @@ -13,6 +13,12 @@ on: - opened - synchronize +permissions: + actions: write + contents: write + pull-requests: write + statuses: write + jobs: CLA: if: github.repository == 'ultralytics/yolov5' @@ -23,17 +29,16 @@ jobs: uses: contributor-assistant/github-action@v2.4.0 env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - # must be repository secret token + # Must be repository secret PAT PERSONAL_ACCESS_TOKEN: ${{ secrets.PERSONAL_ACCESS_TOKEN }} with: path-to-signatures: "signatures/version1/cla.json" path-to-document: "https://docs.ultralytics.com/help/CLA" # CLA document - # branch should not be protected - branch: "main" + # Branch must not be protected + branch: "cla-signatures" allowlist: dependabot[bot],github-actions,[pre-commit*,pre-commit*,bot* remote-organization-name: ultralytics remote-repository-name: cla custom-pr-sign-comment: "I have read the CLA Document and I sign the CLA" custom-allsigned-prcomment: All Contributors have signed the CLA. ✅ - #custom-notsigned-prcomment: 'pull request comment with Introductory message to ask new contributors to sign' From a5c1cb71381e750cb698e68df55871952424007d Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 20 Jun 2024 18:51:13 +0200 Subject: [PATCH 1906/1976] Update README.md (#13114) * Update README.md Signed-off-by: Glenn Jocher * Auto-format by https://ultralytics.com/actions * Update README.zh-CN.md Signed-off-by: Glenn Jocher --------- Signed-off-by: Glenn Jocher Co-authored-by: UltralyticsAssistant --- README.md | 4 +- README.zh-CN.md | 6 +-- utils/flask_rest_api/README.md | 72 +++++++++++++++++----------------- 3 files changed, 41 insertions(+), 41 deletions(-) diff --git a/README.md b/README.md index fe9120be4832..115ea455375f 100644 --- a/README.md +++ b/README.md @@ -185,7 +185,7 @@ python train.py --data coco.yaml --epochs 300 --weights '' --cfg yolov5n.yaml -
-| Roboflow | ClearML ⭐ NEW | Comet ⭐ NEW | Neural Magic ⭐ NEW | +| Roboflow | ClearML ⭐ NEW | Comet ⭐ NEW | Neural Magic ⭐ NEW | | :--------------------------------------------------------------------------------------------------------------------------: | :---------------------------------------------------------------------------------------------------------------------------------: | :--------------------------------------------------------------------------------------------------------------------------------------------------------: | :----------------------------------------------------------------------------------------------------: | | Label and export your custom datasets directly to YOLOv5 for training with [Roboflow](https://roboflow.com/?ref=ultralytics) | Automatically track, visualize and even remotely train YOLOv5 using [ClearML](https://cutt.ly/yolov5-readme-clearml) (open-source!) | Free forever, [Comet](https://bit.ly/yolov5-readme-comet2) lets you save YOLOv5 models, resume training, and interactively visualise and debug predictions | Run YOLOv5 inference up to 6x faster with [Neural Magic DeepSparse](https://bit.ly/yolov5-neuralmagic) | @@ -230,7 +230,7 @@ YOLOv5 has been designed to be super easy to get started and simple to learn. We | [YOLOv5s6](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5s6.pt) | 1280 | 44.8 | 63.7 | 385 | 8.2 | 3.6 | 12.6 | 16.8 | | [YOLOv5m6](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5m6.pt) | 1280 | 51.3 | 69.3 | 887 | 11.1 | 6.8 | 35.7 | 50.0 | | [YOLOv5l6](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5l6.pt) | 1280 | 53.7 | 71.3 | 1784 | 15.8 | 10.5 | 76.8 | 111.4 | -| [YOLOv5x6](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5x6.pt)
+ [TTA] | 1280
1536 | 55.0
**55.8** | 72.7
**72.7** | 3136
- | 26.2
- | 19.4
- | 140.7
- | 209.8
- | +| [YOLOv5x6](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5x6.pt)
+ [TTA] | 1280
1536 | 55.0
**55.8** | 72.7
**72.7** | 3136
- | 26.2
- | 19.4
- | 140.7
- | 209.8
- |
Table Notes diff --git a/README.zh-CN.md b/README.zh-CN.md index 2fb0bf9b3164..2de4c87d22b0 100644 --- a/README.zh-CN.md +++ b/README.zh-CN.md @@ -228,7 +228,7 @@ YOLOv5 超级容易上手,简单易学。我们优先考虑现实世界的结 | [YOLOv5s6](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5s6.pt) | 1280 | 44.8 | 63.7 | 385 | 8.2 | 3.6 | 12.6 | 16.8 | | [YOLOv5m6](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5m6.pt) | 1280 | 51.3 | 69.3 | 887 | 11.1 | 6.8 | 35.7 | 50.0 | | [YOLOv5l6](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5l6.pt) | 1280 | 53.7 | 71.3 | 1784 | 15.8 | 10.5 | 76.8 | 111.4 | -| [YOLOv5x6](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5x6.pt)
+[TTA] | 1280
1536 | 55.0
**55.8** | 72.7
**72.7** | 3136
- | 26.2
- | 19.4
- | 140.7
- | 209.8
- | +| [YOLOv5x6](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5x6.pt)
+[TTA] | 1280
1536 | 55.0
**55.8** | 72.7
**72.7** | 3136
- | 26.2
- | 19.4
- | 140.7
- | 209.8
- |
笔记 @@ -358,8 +358,8 @@ YOLOv5 [release v6.2](https://github.com/ultralytics/yolov5/releases) 带来对 - **准确性**都在单模型单尺度上计算,数据集使用 [ImageNet-1k](https://www.image-net.org/index.php) 。
复现命令 `python classify/val.py --data ../datasets/imagenet --img 224` - **推理速度**是使用 100 个推理图像进行平均得到,测试环境使用谷歌 [Colab Pro](https://colab.research.google.com/signup) V100 高 RAM 实例。
复现命令 `python classify/val.py --data ../datasets/imagenet --img 224 --batch 1` - **模型导出**到 FP32 的 ONNX 和 FP16 的 TensorRT 使用 `export.py` 。
复现命令 `python export.py --weights yolov5s-cls.pt --include engine onnx --imgsz 224` -
-
+
+
分类训练示例  Open In Colab diff --git a/utils/flask_rest_api/README.md b/utils/flask_rest_api/README.md index b18a3011cf32..47ad8fa79523 100644 --- a/utils/flask_rest_api/README.md +++ b/utils/flask_rest_api/README.md @@ -28,42 +28,42 @@ The model inference results are returned as a JSON response: ```json [ - { - "class": 0, - "confidence": 0.8900438547, - "height": 0.9318675399, - "name": "person", - "width": 0.3264600933, - "xcenter": 0.7438579798, - "ycenter": 0.5207948685 - }, - { - "class": 0, - "confidence": 0.8440024257, - "height": 0.7155083418, - "name": "person", - "width": 0.6546785235, - "xcenter": 0.427829951, - "ycenter": 0.6334488392 - }, - { - "class": 27, - "confidence": 0.3771208823, - "height": 0.3902671337, - "name": "tie", - "width": 0.0696444362, - "xcenter": 0.3675483763, - "ycenter": 0.7991207838 - }, - { - "class": 27, - "confidence": 0.3527112305, - "height": 0.1540903747, - "name": "tie", - "width": 0.0336618312, - "xcenter": 0.7814827561, - "ycenter": 0.5065554976 - } + { + "class": 0, + "confidence": 0.8900438547, + "height": 0.9318675399, + "name": "person", + "width": 0.3264600933, + "xcenter": 0.7438579798, + "ycenter": 0.5207948685 + }, + { + "class": 0, + "confidence": 0.8440024257, + "height": 0.7155083418, + "name": "person", + "width": 0.6546785235, + "xcenter": 0.427829951, + "ycenter": 0.6334488392 + }, + { + "class": 27, + "confidence": 0.3771208823, + "height": 0.3902671337, + "name": "tie", + "width": 0.0696444362, + "xcenter": 0.3675483763, + "ycenter": 0.7991207838 + }, + { + "class": 27, + "confidence": 0.3527112305, + "height": 0.1540903747, + "name": "tie", + "width": 0.0336618312, + "xcenter": 0.7814827561, + "ycenter": 0.5065554976 + } ] ``` From 8f9a4248e4536d450bf50767dd099443c7775e56 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 20 Jun 2024 19:04:21 +0200 Subject: [PATCH 1907/1976] Update README.md (#13115) * Update README.md Signed-off-by: Glenn Jocher * Auto-format by https://ultralytics.com/actions --------- Signed-off-by: Glenn Jocher Co-authored-by: UltralyticsAssistant --- README.md | 2 +- README.zh-CN.md | 2 +- utils/flask_rest_api/README.md | 72 ++++++++++++++++----------------- utils/loggers/clearml/README.md | 12 +++--- 4 files changed, 44 insertions(+), 44 deletions(-) diff --git a/README.md b/README.md index 115ea455375f..b3b58995550b 100644 --- a/README.md +++ b/README.md @@ -230,7 +230,7 @@ YOLOv5 has been designed to be super easy to get started and simple to learn. We | [YOLOv5s6](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5s6.pt) | 1280 | 44.8 | 63.7 | 385 | 8.2 | 3.6 | 12.6 | 16.8 | | [YOLOv5m6](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5m6.pt) | 1280 | 51.3 | 69.3 | 887 | 11.1 | 6.8 | 35.7 | 50.0 | | [YOLOv5l6](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5l6.pt) | 1280 | 53.7 | 71.3 | 1784 | 15.8 | 10.5 | 76.8 | 111.4 | -| [YOLOv5x6](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5x6.pt)
+ [TTA] | 1280
1536 | 55.0
**55.8** | 72.7
**72.7** | 3136
- | 26.2
- | 19.4
- | 140.7
- | 209.8
- | +| [YOLOv5x6](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5x6.pt)
+ [TTA] | 1280
1536 | 55.0
**55.8** | 72.7
**72.7** | 3136
- | 26.2
- | 19.4
- | 140.7
- | 209.8
- |
Table Notes diff --git a/README.zh-CN.md b/README.zh-CN.md index 2de4c87d22b0..f0cacaeb0356 100644 --- a/README.zh-CN.md +++ b/README.zh-CN.md @@ -228,7 +228,7 @@ YOLOv5 超级容易上手,简单易学。我们优先考虑现实世界的结 | [YOLOv5s6](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5s6.pt) | 1280 | 44.8 | 63.7 | 385 | 8.2 | 3.6 | 12.6 | 16.8 | | [YOLOv5m6](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5m6.pt) | 1280 | 51.3 | 69.3 | 887 | 11.1 | 6.8 | 35.7 | 50.0 | | [YOLOv5l6](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5l6.pt) | 1280 | 53.7 | 71.3 | 1784 | 15.8 | 10.5 | 76.8 | 111.4 | -| [YOLOv5x6](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5x6.pt)
+[TTA] | 1280
1536 | 55.0
**55.8** | 72.7
**72.7** | 3136
- | 26.2
- | 19.4
- | 140.7
- | 209.8
- | +| [YOLOv5x6](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5x6.pt)
+[TTA] | 1280
1536 | 55.0
**55.8** | 72.7
**72.7** | 3136
- | 26.2
- | 19.4
- | 140.7
- | 209.8
- |
笔记 diff --git a/utils/flask_rest_api/README.md b/utils/flask_rest_api/README.md index 47ad8fa79523..b18a3011cf32 100644 --- a/utils/flask_rest_api/README.md +++ b/utils/flask_rest_api/README.md @@ -28,42 +28,42 @@ The model inference results are returned as a JSON response: ```json [ - { - "class": 0, - "confidence": 0.8900438547, - "height": 0.9318675399, - "name": "person", - "width": 0.3264600933, - "xcenter": 0.7438579798, - "ycenter": 0.5207948685 - }, - { - "class": 0, - "confidence": 0.8440024257, - "height": 0.7155083418, - "name": "person", - "width": 0.6546785235, - "xcenter": 0.427829951, - "ycenter": 0.6334488392 - }, - { - "class": 27, - "confidence": 0.3771208823, - "height": 0.3902671337, - "name": "tie", - "width": 0.0696444362, - "xcenter": 0.3675483763, - "ycenter": 0.7991207838 - }, - { - "class": 27, - "confidence": 0.3527112305, - "height": 0.1540903747, - "name": "tie", - "width": 0.0336618312, - "xcenter": 0.7814827561, - "ycenter": 0.5065554976 - } + { + "class": 0, + "confidence": 0.8900438547, + "height": 0.9318675399, + "name": "person", + "width": 0.3264600933, + "xcenter": 0.7438579798, + "ycenter": 0.5207948685 + }, + { + "class": 0, + "confidence": 0.8440024257, + "height": 0.7155083418, + "name": "person", + "width": 0.6546785235, + "xcenter": 0.427829951, + "ycenter": 0.6334488392 + }, + { + "class": 27, + "confidence": 0.3771208823, + "height": 0.3902671337, + "name": "tie", + "width": 0.0696444362, + "xcenter": 0.3675483763, + "ycenter": 0.7991207838 + }, + { + "class": 27, + "confidence": 0.3527112305, + "height": 0.1540903747, + "name": "tie", + "width": 0.0336618312, + "xcenter": 0.7814827561, + "ycenter": 0.5065554976 + } ] ``` diff --git a/utils/loggers/clearml/README.md b/utils/loggers/clearml/README.md index 2810c92a6a88..a61e3025445f 100644 --- a/utils/loggers/clearml/README.md +++ b/utils/loggers/clearml/README.md @@ -28,15 +28,15 @@ Either sign up for free to the [ClearML Hosted Service](https://cutt.ly/yolov5-t 1. Install the `clearml` python package: - ```bash - pip install clearml - ``` + ```bash + pip install clearml + ``` 2. Connect the ClearML SDK to the server by [creating credentials](https://app.clear.ml/settings/workspace-configuration) (go right top to Settings -> Workspace -> Create new credentials), then execute the command below and follow the instructions: - ```bash - clearml-init - ``` + ```bash + clearml-init + ``` That's it! You're done 😎 From b20fa8027732a471053b547938e2bb5ddd8a3811 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 20 Jun 2024 22:30:35 +0200 Subject: [PATCH 1908/1976] Ultralytics Refactor https://ultralytics.com/actions (#13116) Refactor code for speed and clarity --- README.md | 4 ++-- README.zh-CN.md | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index b3b58995550b..707f051985a3 100644 --- a/README.md +++ b/README.md @@ -39,7 +39,7 @@ To request an Enterprise License please complete the form at [Ultralytics Licens Ultralytics TikTok - Ultralytics Instagram + Ultralytics Instagram Ultralytics Discord @@ -464,7 +464,7 @@ For YOLOv5 bug reports and feature requests please visit [GitHub Issues](https:/ Ultralytics TikTok - Ultralytics Instagram + Ultralytics Instagram Ultralytics Discord diff --git a/README.zh-CN.md b/README.zh-CN.md index f0cacaeb0356..99ecf07b0a54 100644 --- a/README.zh-CN.md +++ b/README.zh-CN.md @@ -38,7 +38,7 @@ YOLOv5 🚀 是世界上最受欢迎的视觉 AI,代表 Ultralytics TikTok - Ultralytics Instagram + Ultralytics Instagram Ultralytics Discord @@ -463,7 +463,7 @@ Ultralytics 提供两种许可证选项以适应各种使用场景: Ultralytics TikTok - Ultralytics Instagram + Ultralytics Instagram Ultralytics Discord From ab364c984b9fdace147fcde339ae4afccc31b642 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 22 Jun 2024 13:17:45 +0200 Subject: [PATCH 1909/1976] Fix HUB link https://ultralytics.com/hub (#13120) Refactor code for speed and clarity --- README.md | 10 +++++----- README.zh-CN.md | 10 +++++----- tutorial.ipynb | 2 +- 3 files changed, 11 insertions(+), 11 deletions(-) diff --git a/README.md b/README.md index 707f051985a3..e59d57889ced 100644 --- a/README.md +++ b/README.md @@ -39,7 +39,7 @@ To request an Enterprise License please complete the form at [Ultralytics Licens Ultralytics TikTok - Ultralytics Instagram + Ultralytics BiliBili Ultralytics Discord @@ -166,7 +166,7 @@ python train.py --data coco.yaml --epochs 300 --weights '' --cfg yolov5n.yaml - ##
Integrations

- +

@@ -191,9 +191,9 @@ python train.py --data coco.yaml --epochs 300 --weights '' --cfg yolov5n.yaml - ##
Ultralytics HUB
-Experience seamless AI with [Ultralytics HUB](https://bit.ly/ultralytics_hub) ⭐, the all-in-one solution for data visualization, YOLOv5 and YOLOv8 🚀 model training and deployment, without any coding. Transform images into actionable insights and bring your AI visions to life with ease using our cutting-edge platform and user-friendly [Ultralytics App](https://ultralytics.com/app_install). Start your journey for **Free** now! +Experience seamless AI with [Ultralytics HUB](https://ultralytics.com/hub) ⭐, the all-in-one solution for data visualization, YOLOv5 and YOLOv8 🚀 model training and deployment, without any coding. Transform images into actionable insights and bring your AI visions to life with ease using our cutting-edge platform and user-friendly [Ultralytics App](https://ultralytics.com/app_install). Start your journey for **Free** now! - + ##
Why YOLOv5
@@ -464,7 +464,7 @@ For YOLOv5 bug reports and feature requests please visit [GitHub Issues](https:/ Ultralytics TikTok - Ultralytics Instagram + Ultralytics BiliBili Ultralytics Discord diff --git a/README.zh-CN.md b/README.zh-CN.md index 99ecf07b0a54..338b9ca689b6 100644 --- a/README.zh-CN.md +++ b/README.zh-CN.md @@ -38,7 +38,7 @@ YOLOv5 🚀 是世界上最受欢迎的视觉 AI,代表 Ultralytics TikTok - Ultralytics Instagram + Ultralytics BiliBili Ultralytics Discord @@ -164,7 +164,7 @@ python train.py --data coco.yaml --epochs 300 --weights '' --cfg yolov5n.yaml - ##
模块集成

- +

@@ -189,9 +189,9 @@ python train.py --data coco.yaml --epochs 300 --weights '' --cfg yolov5n.yaml - ##
Ultralytics HUB
-[Ultralytics HUB](https://bit.ly/ultralytics_hub) 是我们的⭐**新的**用于可视化数据集、训练 YOLOv5 🚀 模型并以无缝体验部署到现实世界的无代码解决方案。现在开始 **免费** 使用他! +[Ultralytics HUB](https://ultralytics.com/hub) 是我们的⭐**新的**用于可视化数据集、训练 YOLOv5 🚀 模型并以无缝体验部署到现实世界的无代码解决方案。现在开始 **免费** 使用他! - + ##
为什么选择 YOLOv5
@@ -463,7 +463,7 @@ Ultralytics 提供两种许可证选项以适应各种使用场景: Ultralytics TikTok - Ultralytics Instagram + Ultralytics BiliBili Ultralytics Discord diff --git a/tutorial.ipynb b/tutorial.ipynb index 1657c67965b0..d7953a06599f 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -253,7 +253,7 @@ "source": [ "# 3. Train\n", "\n", - "

\n", + "

\n", "Close the active learning loop by sampling images from your inference conditions with the `roboflow` pip package\n", "

\n", "\n", From 05e59bdec1a63b5bd70635fb33ab315b6e69fbfd Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sun, 30 Jun 2024 03:20:12 +0200 Subject: [PATCH 1910/1976] Bump docker/build-push-action from 5 to 6 in /.github/workflows (#13123) Bumps [docker/build-push-action](https://github.com/docker/build-push-action) from 5 to 6. - [Release notes](https://github.com/docker/build-push-action/releases) - [Commits](https://github.com/docker/build-push-action/compare/v5...v6) --- updated-dependencies: - dependency-name: docker/build-push-action dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/docker.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index 0decb4ce65b5..2cd1c13f78b0 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -32,7 +32,7 @@ jobs: password: ${{ secrets.DOCKERHUB_TOKEN }} - name: Build and push arm64 image - uses: docker/build-push-action@v5 + uses: docker/build-push-action@v6 continue-on-error: true with: context: . @@ -42,7 +42,7 @@ jobs: tags: ultralytics/yolov5:latest-arm64 - name: Build and push CPU image - uses: docker/build-push-action@v5 + uses: docker/build-push-action@v6 continue-on-error: true with: context: . @@ -51,7 +51,7 @@ jobs: tags: ultralytics/yolov5:latest-cpu - name: Build and push GPU image - uses: docker/build-push-action@v5 + uses: docker/build-push-action@v6 continue-on-error: true with: context: . From b9019671f440d2f4204caacd10a34c862e18ed56 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 30 Jun 2024 12:34:04 +0200 Subject: [PATCH 1911/1976] Ultralytics Code Refactor https://ultralytics.com/actions (#13152) Refactor code for speed and clarity --- classify/train.py | 1 + export.py | 4 +++- segment/train.py | 1 + segment/val.py | 1 + train.py | 1 + utils/__init__.py | 1 + utils/autoanchor.py | 4 ++++ utils/downloads.py | 2 +- utils/general.py | 6 +++--- utils/loggers/comet/__init__.py | 3 +++ utils/plots.py | 3 +++ utils/segment/metrics.py | 6 ++++++ utils/torch_utils.py | 1 + 13 files changed, 29 insertions(+), 5 deletions(-) diff --git a/classify/train.py b/classify/train.py index 5ae6980716a4..8ff9d1582d2a 100644 --- a/classify/train.py +++ b/classify/train.py @@ -180,6 +180,7 @@ def train(opt, device): # lf = lambda x: ((1 + math.cos(x * math.pi / epochs)) / 2) * (1 - lrf) + lrf # cosine def lf(x): + """Linear learning rate scheduler function, scaling learning rate from initial value to `lrf` over `epochs`.""" return (1 - x / epochs) * (1 - lrf) + lrf # linear scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf) diff --git a/export.py b/export.py index 32f6d303acce..7d79e8bf6a4b 100644 --- a/export.py +++ b/export.py @@ -134,6 +134,7 @@ def try_export(inner_func): inner_args = get_default_args(inner_func) def outer_func(*args, **kwargs): + """Logs success/failure and execution details of model export functions wrapped with @try_export decorator.""" prefix = inner_args["prefix"] try: with Profile() as dt: @@ -224,7 +225,7 @@ def export_onnx(model, im, file, opset, dynamic, simplify, prefix=colorstr("ONNX @try_export def export_openvino(file, metadata, half, int8, data, prefix=colorstr("OpenVINO:")): - # YOLOv5 OpenVINO export + """Exports a YOLOv5 model to OpenVINO format with optional FP16 and INT8 quantization; see https://pypi.org/project/openvino-dev/.""" check_requirements("openvino-dev>=2023.0") # requires openvino-dev: https://pypi.org/project/openvino-dev/ import openvino.runtime as ov # noqa from openvino.tools import mo # noqa @@ -244,6 +245,7 @@ def export_openvino(file, metadata, half, int8, data, prefix=colorstr("OpenVINO: from utils.dataloaders import create_dataloader def gen_dataloader(yaml_path, task="train", imgsz=640, workers=4): + """Generates a DataLoader for model training or validation based on the given YAML dataset configuration.""" data_yaml = check_yaml(yaml_path) data = check_dataset(data_yaml) dataloader = create_dataloader( diff --git a/segment/train.py b/segment/train.py index ffd1746ade0f..379fed0b2f14 100644 --- a/segment/train.py +++ b/segment/train.py @@ -216,6 +216,7 @@ def train(hyp, opt, device, callbacks): else: def lf(x): + """Linear learning rate scheduler decreasing from 1 to hyp['lrf'] over 'epochs'.""" return (1 - x / epochs) * (1.0 - hyp["lrf"]) + hyp["lrf"] # linear scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf) # plot_lr_scheduler(optimizer, scheduler, epochs) diff --git a/segment/val.py b/segment/val.py index b0a941faa151..b5e9f7557ec1 100644 --- a/segment/val.py +++ b/segment/val.py @@ -91,6 +91,7 @@ def save_one_json(predn, jdict, path, class_map, pred_masks): from pycocotools.mask import encode def single_encode(x): + """Encodes binary mask arrays into RLE (Run-Length Encoding) format for JSON serialization.""" rle = encode(np.asarray(x[:, :, None], order="F", dtype="uint8"))[0] rle["counts"] = rle["counts"].decode("utf-8") return rle diff --git a/train.py b/train.py index 44cbd1ac0622..472c1d395981 100644 --- a/train.py +++ b/train.py @@ -226,6 +226,7 @@ def train(hyp, opt, device, callbacks): else: def lf(x): + """Linear learning rate scheduler function with decay calculated by epoch proportion.""" return (1 - x / epochs) * (1.0 - hyp["lrf"]) + hyp["lrf"] # linear scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf) # plot_lr_scheduler(optimizer, scheduler, epochs) diff --git a/utils/__init__.py b/utils/__init__.py index 91fc7694676f..c7ece49fae10 100644 --- a/utils/__init__.py +++ b/utils/__init__.py @@ -34,6 +34,7 @@ def threaded(func): """Decorator @threaded to run a function in a separate thread, returning the thread instance.""" def wrapper(*args, **kwargs): + """Runs the decorated function in a separate daemon thread and returns the thread instance.""" thread = threading.Thread(target=func, args=args, kwargs=kwargs, daemon=True) thread.start() return thread diff --git a/utils/autoanchor.py b/utils/autoanchor.py index 79b79db0fc12..00eee2eb776d 100644 --- a/utils/autoanchor.py +++ b/utils/autoanchor.py @@ -33,6 +33,7 @@ def check_anchors(dataset, model, thr=4.0, imgsz=640): wh = torch.tensor(np.concatenate([l[:, 3:5] * s for s, l in zip(shapes * scale, dataset.labels)])).float() # wh def metric(k): # compute metric + """Computes ratio metric, anchors above threshold, and best possible recall for YOLOv5 anchor evaluation.""" r = wh[:, None] / k[None] x = torch.min(r, 1 / r).min(2)[0] # ratio metric best = x.max(1)[0] # best_x @@ -86,16 +87,19 @@ def kmean_anchors(dataset="./data/coco128.yaml", n=9, img_size=640, thr=4.0, gen thr = 1 / thr def metric(k, wh): # compute metrics + """Computes ratio metric, anchors above threshold, and best possible recall for YOLOv5 anchor evaluation.""" r = wh[:, None] / k[None] x = torch.min(r, 1 / r).min(2)[0] # ratio metric # x = wh_iou(wh, torch.tensor(k)) # iou metric return x, x.max(1)[0] # x, best_x def anchor_fitness(k): # mutation fitness + """Evaluates fitness of YOLOv5 anchors by computing recall and ratio metrics for an anchor evolution process.""" _, best = metric(torch.tensor(k, dtype=torch.float32), wh) return (best * (best > thr).float()).mean() # fitness def print_results(k, verbose=True): + """Sorts and logs kmeans-evolved anchor metrics and best possible recall values for YOLOv5 anchor evaluation.""" k = k[np.argsort(k.prod(1))] # sort small to large x, best = metric(k, wh0) bpr, aat = (best > thr).float().mean(), (x > thr).float().mean() * n # best possible recall, anch > thr diff --git a/utils/downloads.py b/utils/downloads.py index a7b599efad20..c7e2273c794e 100644 --- a/utils/downloads.py +++ b/utils/downloads.py @@ -92,7 +92,7 @@ def attempt_download(file, repo="ultralytics/yolov5", release="v7.0"): from utils.general import LOGGER def github_assets(repository, version="latest"): - # Return GitHub repo tag (i.e. 'v7.0') and assets (i.e. ['yolov5s.pt', 'yolov5m.pt', ...]) + """Fetches GitHub repository release tag and asset names using the GitHub API.""" if version != "latest": version = f"tags/{version}" # i.e. tags/v7.0 response = requests.get(f"https://api.github.com/repos/{repository}/releases/{version}").json() # github api diff --git a/utils/general.py b/utils/general.py index ed38cc0d60cf..95a76644776f 100644 --- a/utils/general.py +++ b/utils/general.py @@ -343,7 +343,7 @@ def check_online(): import socket def run_once(): - # Check once + """Checks internet connectivity by attempting to create a connection to "1.1.1.1" on port 443.""" try: socket.create_connection(("1.1.1.1", 443), 5) # check host accessibility return True @@ -587,7 +587,7 @@ def check_amp(model): from models.common import AutoShape, DetectMultiBackend def amp_allclose(model, im): - # All close FP32 vs AMP results + """Compares FP32 and AMP model inference outputs, ensuring they are close within a 10% absolute tolerance.""" m = AutoShape(model, verbose=False) # model a = m(im).xywhn[0] # FP32 inference m.amp = True @@ -652,7 +652,7 @@ def download(url, dir=".", unzip=True, delete=True, curl=False, threads=1, retry """Downloads and optionally unzips files concurrently, supporting retries and curl fallback.""" def download_one(url, dir): - # Download 1 file + """Downloads a single file from `url` to `dir`, with retry support and optional curl fallback.""" success = True if os.path.isfile(url): f = Path(url) # filename diff --git a/utils/loggers/comet/__init__.py b/utils/loggers/comet/__init__.py index 3a91c49258a8..846dcb42a225 100644 --- a/utils/loggers/comet/__init__.py +++ b/utils/loggers/comet/__init__.py @@ -67,6 +67,9 @@ class CometLogger: """Log metrics, parameters, source code, models and much more with Comet.""" def __init__(self, opt, hyp, run_id=None, job_type="Training", **experiment_kwargs) -> None: + """Initializes CometLogger with given options, hyperparameters, run ID, job type, and additional experiment + arguments. + """ self.job_type = job_type self.opt = opt self.hyp = hyp diff --git a/utils/plots.py b/utils/plots.py index 062658cda979..9bec34a159fb 100644 --- a/utils/plots.py +++ b/utils/plots.py @@ -124,6 +124,9 @@ def butter_lowpass_filtfilt(data, cutoff=1500, fs=50000, order=5): # https://stackoverflow.com/questions/28536191/how-to-filter-smooth-with-scipy-numpy def butter_lowpass(cutoff, fs, order): + """Applies a low-pass Butterworth filter to a signal with specified cutoff frequency, sample rate, and filter + order. + """ nyq = 0.5 * fs normal_cutoff = cutoff / nyq return butter(order, normal_cutoff, btype="low", analog=False) diff --git a/utils/segment/metrics.py b/utils/segment/metrics.py index 7bdf3258abb7..6f57dec132e2 100644 --- a/utils/segment/metrics.py +++ b/utils/segment/metrics.py @@ -55,6 +55,9 @@ def ap_per_class_box_and_mask( class Metric: def __init__(self) -> None: + """Initializes performance metric attributes for precision, recall, F1 score, average precision, and class + indices. + """ self.p = [] # (nc, ) self.r = [] # (nc, ) self.f1 = [] # (nc, ) @@ -151,6 +154,9 @@ class Metrics: """Metric for boxes and masks.""" def __init__(self) -> None: + """Initializes Metric objects for bounding boxes and masks to compute performance metrics in the Metrics + class. + """ self.metric_box = Metric() self.metric_mask = Metric() diff --git a/utils/torch_utils.py b/utils/torch_utils.py index 0b006d80562b..d15f1f73f6c3 100644 --- a/utils/torch_utils.py +++ b/utils/torch_utils.py @@ -37,6 +37,7 @@ def smart_inference_mode(torch_1_9=check_version(torch.__version__, "1.9.0")): """Applies torch.inference_mode() if torch>=1.9.0, else torch.no_grad() as a decorator for functions.""" def decorate(fn): + """Applies torch.inference_mode() if torch>=1.9.0, else torch.no_grad() to the decorated function.""" return (torch.inference_mode if torch_1_9 else torch.no_grad)()(fn) return decorate From 100a423b66fee81e0a2915d5da934d7872f12c8c Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 1 Jul 2024 00:17:24 +0200 Subject: [PATCH 1912/1976] Ultralytics Code Refactor https://ultralytics.com/actions (#13153) * Refactor code for speed and clarity * Update dataloaders.py Signed-off-by: Glenn Jocher --------- Signed-off-by: Glenn Jocher --- benchmarks.py | 2 ++ classify/predict.py | 1 + classify/val.py | 1 + detect.py | 1 + export.py | 5 +++++ models/common.py | 3 +++ models/tf.py | 2 ++ segment/predict.py | 1 + segment/val.py | 3 +++ utils/augmentations.py | 4 ++++ utils/dataloaders.py | 3 +++ utils/segment/augmentations.py | 1 + utils/segment/dataloaders.py | 2 ++ val.py | 1 + 14 files changed, 30 insertions(+) diff --git a/benchmarks.py b/benchmarks.py index 4ca7122551e0..c849eed6f078 100644 --- a/benchmarks.py +++ b/benchmarks.py @@ -60,6 +60,7 @@ def run( pt_only=False, # test PyTorch only hard_fail=False, # throw error on benchmark failure ): + """Run YOLOv5 benchmarks on multiple export formats and log results for model performance evaluation.""" y, t = [], time.time() device = select_device(device) model_type = type(attempt_load(weights, fuse=False)) # DetectionModel, SegmentationModel, etc. @@ -124,6 +125,7 @@ def test( pt_only=False, # test PyTorch only hard_fail=False, # throw error on benchmark failure ): + """Run YOLOv5 export tests for all supported formats and log the results, including inference speed and mAP.""" y, t = [], time.time() device = select_device(device) for i, (name, f, suffix, gpu) in export.export_formats().iterrows(): # index, (name, file, suffix, gpu-capable) diff --git a/classify/predict.py b/classify/predict.py index 4dc3735f3a71..33140e9b56ca 100644 --- a/classify/predict.py +++ b/classify/predict.py @@ -84,6 +84,7 @@ def run( dnn=False, # use OpenCV DNN for ONNX inference vid_stride=1, # video frame-rate stride ): + """Conducts YOLOv5 classification inference on diverse input sources and saves results.""" source = str(source) save_img = not nosave and not source.endswith(".txt") # save inference images is_file = Path(source).suffix[1:] in (IMG_FORMATS + VID_FORMATS) diff --git a/classify/val.py b/classify/val.py index 23dbe7bfa420..8ce48f0645bf 100644 --- a/classify/val.py +++ b/classify/val.py @@ -68,6 +68,7 @@ def run( criterion=None, pbar=None, ): + """Validates a YOLOv5 classification model on a dataset, computing metrics like top1 and top5 accuracy.""" # Initialize/load model and set device training = model is not None if training: # called by train.py diff --git a/detect.py b/detect.py index 8d1cc56d998a..f791faa09087 100644 --- a/detect.py +++ b/detect.py @@ -97,6 +97,7 @@ def run( dnn=False, # use OpenCV DNN for ONNX inference vid_stride=1, # video frame-rate stride ): + """Runs YOLOv5 detection inference on various sources like images, videos, directories, streams, etc.""" source = str(source) save_img = not nosave and not source.endswith(".txt") # save inference images is_file = Path(source).suffix[1:] in (IMG_FORMATS + VID_FORMATS) diff --git a/export.py b/export.py index 7d79e8bf6a4b..77c972ba27f6 100644 --- a/export.py +++ b/export.py @@ -407,6 +407,9 @@ def export_saved_model( keras=False, prefix=colorstr("TensorFlow SavedModel:"), ): + """Exports a YOLOv5 model to TensorFlow SavedModel format, supporting dynamic axes and non-maximum suppression + (NMS). + """ # YOLOv5 TensorFlow SavedModel export try: import tensorflow as tf @@ -477,6 +480,7 @@ def export_tflite( keras_model, im, file, int8, per_tensor, data, nms, agnostic_nms, prefix=colorstr("TensorFlow Lite:") ): # YOLOv5 TensorFlow Lite export + """Exports YOLOv5 model to TensorFlow Lite format with optional FP16, INT8, and NMS support.""" import tensorflow as tf LOGGER.info(f"\n{prefix} starting export with tensorflow {tf.__version__}...") @@ -784,6 +788,7 @@ def run( iou_thres=0.45, # TF.js NMS: IoU threshold conf_thres=0.25, # TF.js NMS: confidence threshold ): + """Exports YOLOv5 model to specified formats including ONNX, TensorRT, CoreML, and TensorFlow; see https://github.com/ultralytics/yolov5.""" t = time.time() include = [x.lower() for x in include] # to lowercase fmts = tuple(export_formats()["Argument"][1:]) # --include arguments diff --git a/models/common.py b/models/common.py index 781f999445db..049dfc0b9e00 100644 --- a/models/common.py +++ b/models/common.py @@ -1066,6 +1066,9 @@ class Classify(nn.Module): def __init__( self, c1, c2, k=1, s=1, p=None, g=1, dropout_p=0.0 ): # ch_in, ch_out, kernel, stride, padding, groups, dropout probability + """Initializes YOLOv5 classification head with convolution, pooling, and dropout layers for input to output + channel transformation. + """ super().__init__() c_ = 1280 # efficientnet_b0 size self.conv = Conv(c1, c_, k, s, autopad(k, p), g) diff --git a/models/tf.py b/models/tf.py index c65938c4b31b..9884ec3db355 100644 --- a/models/tf.py +++ b/models/tf.py @@ -612,6 +612,7 @@ def predict( iou_thres=0.45, conf_thres=0.25, ): + """Runs inference on input data, with an option for TensorFlow NMS.""" y = [] # outputs x = inputs for m in self.model.layers: @@ -730,6 +731,7 @@ def run( dynamic=False, # dynamic batch size ): # PyTorch model + """Exports YOLOv5 model from PyTorch to TensorFlow and Keras formats, performing inference for validation.""" im = torch.zeros((batch_size, 3, *imgsz)) # BCHW image model = attempt_load(weights, device=torch.device("cpu"), inplace=True, fuse=False) _ = model(im) # inference diff --git a/segment/predict.py b/segment/predict.py index 109a68415b0d..0bccaaaae9f7 100644 --- a/segment/predict.py +++ b/segment/predict.py @@ -97,6 +97,7 @@ def run( vid_stride=1, # video frame-rate stride retina_masks=False, ): + """Run YOLOv5 segmentation inference on diverse sources including images, videos, directories, and streams.""" source = str(source) save_img = not nosave and not source.endswith(".txt") # save inference images is_file = Path(source).suffix[1:] in (IMG_FORMATS + VID_FORMATS) diff --git a/segment/val.py b/segment/val.py index b5e9f7557ec1..ab8a66a90c34 100644 --- a/segment/val.py +++ b/segment/val.py @@ -184,6 +184,9 @@ def run( compute_loss=None, callbacks=Callbacks(), ): + """Validates a YOLOv5 segmentation model on specified dataset, producing metrics, plots, and optional JSON + output. + """ if save_json: check_requirements("pycocotools>=2.0.6") process = process_mask_native # more accurate diff --git a/utils/augmentations.py b/utils/augmentations.py index 1840d47d46c9..4a6e441d7c45 100644 --- a/utils/augmentations.py +++ b/utils/augmentations.py @@ -157,6 +157,7 @@ def random_perspective( # torchvision.transforms.RandomAffine(degrees=(-10, 10), translate=(0.1, 0.1), scale=(0.9, 1.1), shear=(-10, 10)) # targets = [cls, xyxy] + """Applies random perspective transformation to an image, modifying the image and corresponding labels.""" height = im.shape[0] + border[0] * 2 # shape(h,w,c) width = im.shape[1] + border[1] * 2 @@ -336,6 +337,9 @@ def classify_albumentations( auto_aug=False, ): # YOLOv5 classification Albumentations (optional, only used if package is installed) + """Sets up and returns Albumentations transforms for YOLOv5 classification tasks depending on augmentation + settings. + """ prefix = colorstr("albumentations: ") try: import albumentations as A diff --git a/utils/dataloaders.py b/utils/dataloaders.py index dacb0e0b33d7..21308f0cedbd 100644 --- a/utils/dataloaders.py +++ b/utils/dataloaders.py @@ -174,6 +174,7 @@ def create_dataloader( shuffle=False, seed=0, ): + """Creates and returns a configured DataLoader instance for loading and processing image datasets.""" if rect and shuffle: LOGGER.warning("WARNING ⚠️ --rect is incompatible with DataLoader shuffle, setting shuffle=False") shuffle = False @@ -552,6 +553,7 @@ def __init__( rank=-1, seed=0, ): + """Initializes the YOLOv5 dataset loader, handling images and their labels, caching, and preprocessing.""" self.img_size = img_size self.augment = augment self.hyp = hyp @@ -1351,6 +1353,7 @@ def create_classification_dataloader( path, imgsz=224, batch_size=16, augment=True, cache=False, rank=-1, workers=8, shuffle=True ): # Returns Dataloader object to be used with YOLOv5 Classifier + """Creates a DataLoader for image classification, supporting caching, augmentation, and distributed training.""" with torch_distributed_zero_first(rank): # init dataset *.cache only once if DDP dataset = ClassificationDataset(root=path, imgsz=imgsz, augment=augment, cache=cache) batch_size = min(batch_size, len(dataset)) diff --git a/utils/segment/augmentations.py b/utils/segment/augmentations.py index 5773b56f4d7f..d7dd8aec6691 100644 --- a/utils/segment/augmentations.py +++ b/utils/segment/augmentations.py @@ -30,6 +30,7 @@ def random_perspective( # torchvision.transforms.RandomAffine(degrees=(-10, 10), translate=(.1, .1), scale=(.9, 1.1), shear=(-10, 10)) # targets = [cls, xyxy] + """Applies random perspective, rotation, scale, shear, and translation augmentations to an image and targets.""" height = im.shape[0] + border[0] * 2 # shape(h,w,c) width = im.shape[1] + border[1] * 2 diff --git a/utils/segment/dataloaders.py b/utils/segment/dataloaders.py index 0804818deca7..c2be5f0dfe9a 100644 --- a/utils/segment/dataloaders.py +++ b/utils/segment/dataloaders.py @@ -39,6 +39,7 @@ def create_dataloader( overlap_mask=False, seed=0, ): + """Creates a dataloader for training, validating, or testing YOLO models with various dataset options.""" if rect and shuffle: LOGGER.warning("WARNING ⚠️ --rect is incompatible with DataLoader shuffle, setting shuffle=False") shuffle = False @@ -102,6 +103,7 @@ def __init__( rank=-1, seed=0, ): + """Initializes the dataset with image, label, and mask loading capabilities for training/testing.""" super().__init__( path, img_size, diff --git a/val.py b/val.py index 221226b4a45a..c1e8a6aa3094 100644 --- a/val.py +++ b/val.py @@ -148,6 +148,7 @@ def run( callbacks=Callbacks(), compute_loss=None, ): + """Evaluates model on a dataset and logs performance metrics, results are saved to specific directories.""" # Initialize/load model and set device training = model is not None if training: # called by train.py From 40f490d9b0d102d3dea832fbabb300d18ab1ec87 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 5 Jul 2024 14:32:06 +0200 Subject: [PATCH 1913/1976] Add Discourse at https://community.ultralytics.com (#13168) Refactor code for speed and clarity --- .github/workflows/greetings.yml | 2 +- README.md | 4 ++-- README.zh-CN.md | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/greetings.yml b/.github/workflows/greetings.yml index 9a938b3fb008..212211d2445e 100644 --- a/.github/workflows/greetings.yml +++ b/.github/workflows/greetings.yml @@ -27,7 +27,7 @@ jobs: If this is a 🐛 Bug Report, please provide a **minimum reproducible example** to help us debug it. - If this is a custom training ❓ Question, please provide as much information as possible, including dataset image examples and training logs, and verify you are following our [Tips for Best Training Results](https://docs.ultralytics.com/yolov5/tutorials/tips_for_best_training_results/). + If this is a custom training ❓ Question, please provide as much information as possible, including dataset image examples and training logs, and verify you are following our [Tips for Best Training Results](https://docs.ultralytics.com/guides/model-training-tips//). ## Requirements diff --git a/README.md b/README.md index e59d57889ced..0440a920775d 100644 --- a/README.md +++ b/README.md @@ -14,7 +14,7 @@ YOLOv5 CI YOLOv5 Citation Docker Pulls - Discord + Discord Ultralytics Forums
Run on Gradient Open In Colab @@ -145,7 +145,7 @@ python train.py --data coco.yaml --epochs 300 --weights '' --cfg yolov5n.yaml - Tutorials - [Train Custom Data](https://docs.ultralytics.com/yolov5/tutorials/train_custom_data) 🚀 RECOMMENDED -- [Tips for Best Training Results](https://docs.ultralytics.com/yolov5/tutorials/tips_for_best_training_results) ☘️ +- [Tips for Best Training Results](https://docs.ultralytics.com/guides/model-training-tips/) ☘️ - [Multi-GPU Training](https://docs.ultralytics.com/yolov5/tutorials/multi_gpu_training) - [PyTorch Hub](https://docs.ultralytics.com/yolov5/tutorials/pytorch_hub_model_loading) 🌟 NEW - [TFLite, ONNX, CoreML, TensorRT Export](https://docs.ultralytics.com/yolov5/tutorials/model_export) 🚀 diff --git a/README.zh-CN.md b/README.zh-CN.md index 338b9ca689b6..cea3c26f28d1 100644 --- a/README.zh-CN.md +++ b/README.zh-CN.md @@ -143,7 +143,7 @@ python train.py --data coco.yaml --epochs 300 --weights '' --cfg yolov5n.yaml - 教程 - [训练自定义数据](https://docs.ultralytics.com/yolov5/tutorials/train_custom_data) 🚀 推荐 -- [获得最佳训练结果的技巧](https://docs.ultralytics.com/yolov5/tutorials/tips_for_best_training_results) ☘️ +- [获得最佳训练结果的技巧](https://docs.ultralytics.com/guides/model-training-tips/) ☘️ - [多GPU训练](https://docs.ultralytics.com/yolov5/tutorials/multi_gpu_training) - [PyTorch Hub](https://docs.ultralytics.com/yolov5/tutorials/pytorch_hub_model_loading) 🌟 新 - [TFLite,ONNX,CoreML,TensorRT导出](https://docs.ultralytics.com/yolov5/tutorials/model_export) 🚀 From 900a1262cd17aae7da176f02ad4ff4fef0b76ce2 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 8 Jul 2024 19:41:45 +0200 Subject: [PATCH 1914/1976] Update tensorflow requirement from <=2.16.1 to <=2.16.2 (#13155) * Update tensorflow requirement from <=2.16.1 to <=2.16.2 Updates the requirements on [tensorflow](https://github.com/tensorflow/tensorflow) to permit the latest version. - [Release notes](https://github.com/tensorflow/tensorflow/releases) - [Changelog](https://github.com/tensorflow/tensorflow/blob/v2.16.2/RELEASE.md) - [Commits](https://github.com/tensorflow/tensorflow/compare/tflite-v0.1.7...v2.16.2) --- updated-dependencies: - dependency-name: tensorflow dependency-type: direct:production ... Signed-off-by: dependabot[bot] * Update pyproject.toml Signed-off-by: Glenn Jocher --------- Signed-off-by: dependabot[bot] Signed-off-by: Glenn Jocher Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Ultralytics Assistant <135830346+UltralyticsAssistant@users.noreply.github.com> Co-authored-by: Glenn Jocher --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index 8905a28fc702..2bcf6592988d 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -99,7 +99,7 @@ export = [ "onnx>=1.12.0", # ONNX export "coremltools>=7.0; platform_system != 'Windows'", # CoreML only supported on macOS and Linux "openvino-dev>=2023.0", # OpenVINO export - "tensorflow<=2.16.1", # TF bug https://github.com/ultralytics/ultralytics/issues/5161 + "tensorflow>=2.0.0", # TF bug https://github.com/ultralytics/ultralytics/issues/5161 "tensorflowjs>=3.9.0", # TF.js export, automatically installs tensorflow ] # tensorflow>=2.4.1,<=2.13.1 # TF exports (-cpu, -aarch64, -macos) From 8257e0a68cc1d192c6ae1392bce1ed3917119795 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 8 Jul 2024 21:19:04 +0200 Subject: [PATCH 1915/1976] Ultralytics Code Refactor https://ultralytics.com/actions (#13174) Refactor code for speed and clarity --- benchmarks.py | 106 ++++++++- detect.py | 117 +++++++++- export.py | 621 ++++++++++++++++++++++++++++++++++++++++++++++++-- hubconf.py | 346 ++++++++++++++++++++++++++-- train.py | 144 +++++++++++- val.py | 191 +++++++++++++++- 6 files changed, 1448 insertions(+), 77 deletions(-) diff --git a/benchmarks.py b/benchmarks.py index c849eed6f078..90cce4b3ff3c 100644 --- a/benchmarks.py +++ b/benchmarks.py @@ -60,7 +60,41 @@ def run( pt_only=False, # test PyTorch only hard_fail=False, # throw error on benchmark failure ): - """Run YOLOv5 benchmarks on multiple export formats and log results for model performance evaluation.""" + """ + Run YOLOv5 benchmarks on multiple export formats and log results for model performance evaluation. + + Args: + weights (Path | str): Path to the model weights file (default: ROOT / "yolov5s.pt"). + imgsz (int): Inference size in pixels (default: 640). + batch_size (int): Batch size for inference (default: 1). + data (Path | str): Path to the dataset.yaml file (default: ROOT / "data/coco128.yaml"). + device (str): CUDA device, e.g., '0' or '0,1,2,3' or 'cpu' (default: None). + half (bool): Use FP16 half-precision inference (default: False). + test (bool): Test export formats only (default: False). + pt_only (bool): Test PyTorch format only (default: False). + hard_fail (bool): Throw an error on benchmark failure if True (default: False). + + Returns: + None. Logs information about the benchmark results, including the format, size, mAP50-95, and inference time. + + Notes: + Supported export formats and models include PyTorch, TorchScript, ONNX, OpenVINO, TensorRT, CoreML, TensorFlow + SavedModel, TensorFlow GraphDef, TensorFlow Lite, and TensorFlow Edge TPU. Edge TPU and TF.js are unsupported. + + Examples: + ```python + $ python benchmarks.py --weights yolov5s.pt --img 640 + ``` + + Usage: + Install required packages: + $ pip install -r requirements.txt coremltools onnx onnx-simplifier onnxruntime openvino-dev tensorflow-cpu # CPU support + $ pip install -r requirements.txt coremltools onnx onnx-simplifier onnxruntime-gpu openvino-dev tensorflow # GPU support + $ pip install -U nvidia-tensorrt --index-url https://pypi.ngc.nvidia.com # TensorRT + + Run benchmarks: + $ python benchmarks.py --weights yolov5s.pt --img 640 + """ y, t = [], time.time() device = select_device(device) model_type = type(attempt_load(weights, fuse=False)) # DetectionModel, SegmentationModel, etc. @@ -125,7 +159,23 @@ def test( pt_only=False, # test PyTorch only hard_fail=False, # throw error on benchmark failure ): - """Run YOLOv5 export tests for all supported formats and log the results, including inference speed and mAP.""" + """ + Run YOLOv5 export tests for all supported formats and log the results, including export statuses. + + Args: + weights (Path | str): Path to the model weights file (.pt format). Default is 'ROOT / "yolov5s.pt"'. + imgsz (int): Inference image size (in pixels). Default is 640. + batch_size (int): Batch size for testing. Default is 1. + data (Path | str): Path to the dataset configuration file (.yaml format). Default is 'ROOT / "data/coco128.yaml"'. + device (str): Device for running the tests, can be 'cpu' or a specific CUDA device ('0', '0,1,2,3', etc.). Default is an empty string. + half (bool): Use FP16 half-precision for inference if True. Default is False. + test (bool): Test export formats only without running inference. Default is False. + pt_only (bool): Test only the PyTorch model if True. Default is False. + hard_fail (bool): Raise error on export or test failure if True. Default is False. + + Returns: + pd.DataFrame: DataFrame containing the results of the export tests, including format names and export statuses. + """ y, t = [], time.time() device = select_device(device) for i, (name, f, suffix, gpu) in export.export_formats().iterrows(): # index, (name, file, suffix, gpu-capable) @@ -151,7 +201,28 @@ def test( def parse_opt(): - """Parses command-line arguments for YOLOv5 model inference configuration.""" + """ + Parses command-line arguments for YOLOv5 model inference configuration. + + Args: + weights (str): The path to the weights file. Defaults to 'ROOT / "yolov5s.pt"'. + imgsz (int): Inference size in pixels. Defaults to 640. + batch_size (int): Batch size. Defaults to 1. + data (str): Path to the dataset YAML file. Defaults to 'ROOT / "data/coco128.yaml"'. + device (str): CUDA device, e.g., '0' or '0,1,2,3' or 'cpu'. Defaults to an empty string (auto-select). + half (bool): Use FP16 half-precision inference. This is a flag and defaults to False. + test (bool): Test exports only. This is a flag and defaults to False. + pt_only (bool): Test PyTorch only. This is a flag and defaults to False. + hard_fail (bool|str): Throw an error on benchmark failure. Can be a boolean or a string representing a minimum metric + floor, i.e., '0.29'. Defaults to False. + + Returns: + argparse.Namespace: Parsed command-line arguments encapsulated in an argparse Namespace object. + + Notes: + The function modifies the 'opt.data' by checking and validating the YAML path using 'check_yaml()'. + The parsed arguments are printed for reference using 'print_args()'. + """ parser = argparse.ArgumentParser() parser.add_argument("--weights", type=str, default=ROOT / "yolov5s.pt", help="weights path") parser.add_argument("--imgsz", "--img", "--img-size", type=int, default=640, help="inference size (pixels)") @@ -169,7 +240,34 @@ def parse_opt(): def main(opt): - """Executes a test run if `opt.test` is True, otherwise starts training or inference with provided options.""" + """ + Executes YOLOv5 benchmark tests or main training/inference routines based on the provided command-line arguments. + + Args: + opt (argparse.Namespace): Parsed command-line arguments including options for weights, image size, batch size, data + configuration, device, and other flags for inference settings. + + Returns: + None: This function does not return any value. It leverages side-effects such as logging and running benchmarks. + + Example: + ```python + if __name__ == "__main__": + opt = parse_opt() + main(opt) + ``` + + Notes: + - For a complete list of supported export formats and their respective requirements, refer to the + [Ultralytics YOLOv5 Export Formats](https://github.com/ultralytics/yolov5#export-formats). + - Ensure that you have installed all necessary dependencies by following the installation instructions detailed in + the [main repository](https://github.com/ultralytics/yolov5#installation). + + ```shell + # Running benchmarks on default weights and image size + $ python benchmarks.py --weights yolov5s.pt --img 640 + ``` + """ test(**vars(opt)) if opt.test else run(**vars(opt)) diff --git a/detect.py b/detect.py index f791faa09087..b774370f54ed 100644 --- a/detect.py +++ b/detect.py @@ -97,7 +97,56 @@ def run( dnn=False, # use OpenCV DNN for ONNX inference vid_stride=1, # video frame-rate stride ): - """Runs YOLOv5 detection inference on various sources like images, videos, directories, streams, etc.""" + """ + Runs YOLOv5 detection inference on various sources like images, videos, directories, streams, etc. + + Args: + weights (str | Path): Path to the model weights file or a Triton URL. Default is 'yolov5s.pt'. + source (str | Path): Input source, which can be a file, directory, URL, glob pattern, screen capture, or webcam index. + Default is 'data/images'. + data (str | Path): Path to the dataset YAML file. Default is 'data/coco128.yaml'. + imgsz (tuple[int, int]): Inference image size as a tuple (height, width). Default is (640, 640). + conf_thres (float): Confidence threshold for detections. Default is 0.25. + iou_thres (float): Intersection Over Union (IOU) threshold for non-max suppression. Default is 0.45. + max_det (int): Maximum number of detections per image. Default is 1000. + device (str): CUDA device identifier (e.g., '0' or '0,1,2,3') or 'cpu'. Default is an empty string, which + uses the best available device. + view_img (bool): If True, display inference results using OpenCV. Default is False. + save_txt (bool): If True, save results in a text file. Default is False. + save_csv (bool): If True, save results in a CSV file. Default is False. + save_conf (bool): If True, include confidence scores in the saved results. Default is False. + save_crop (bool): If True, save cropped prediction boxes. Default is False. + nosave (bool): If True, do not save inference images or videos. Default is False. + classes (list[int]): List of class indices to filter detections by. Default is None. + agnostic_nms (bool): If True, perform class-agnostic non-max suppression. Default is False. + augment (bool): If True, use augmented inference. Default is False. + visualize (bool): If True, visualize feature maps. Default is False. + update (bool): If True, update all models' weights. Default is False. + project (str | Path): Directory to save results. Default is 'runs/detect'. + name (str): Name of the current experiment; used to create a subdirectory within 'project'. Default is 'exp'. + exist_ok (bool): If True, existing directories with the same name are reused instead of being incremented. Default is + False. + line_thickness (int): Thickness of bounding box lines in pixels. Default is 3. + hide_labels (bool): If True, do not display labels on bounding boxes. Default is False. + hide_conf (bool): If True, do not display confidence scores on bounding boxes. Default is False. + half (bool): If True, use FP16 half-precision inference. Default is False. + dnn (bool): If True, use OpenCV DNN backend for ONNX inference. Default is False. + vid_stride (int): Stride for processing video frames, to skip frames between processing. Default is 1. + + Returns: + None + + Examples: + ```python + from ultralytics import run + + # Run inference on an image + run(source='data/images/example.jpg', weights='yolov5s.pt', device='0') + + # Run inference on a video with specific confidence threshold + run(source='data/videos/example.mp4', weights='yolov5s.pt', conf_thres=0.4, device='0') + ``` + """ source = str(source) save_img = not nosave and not source.endswith(".txt") # save inference images is_file = Path(source).suffix[1:] in (IMG_FORMATS + VID_FORMATS) @@ -266,7 +315,48 @@ def write_to_csv(image_name, prediction, confidence): def parse_opt(): - """Parses command-line arguments for YOLOv5 detection, setting inference options and model configurations.""" + """ + Parses command-line arguments for YOLOv5 detection, setting inference options and model configurations. + + Args: + --weights (str | list[str], optional): Model path or Triton URL. Defaults to ROOT / 'yolov5s.pt'. + --source (str, optional): File/dir/URL/glob/screen/0(webcam). Defaults to ROOT / 'data/images'. + --data (str, optional): Dataset YAML path. Provides dataset configuration information. + --imgsz (list[int], optional): Inference size (height, width). Defaults to [640]. + --conf-thres (float, optional): Confidence threshold. Defaults to 0.25. + --iou-thres (float, optional): NMS IoU threshold. Defaults to 0.45. + --max-det (int, optional): Maximum number of detections per image. Defaults to 1000. + --device (str, optional): CUDA device, i.e., '0' or '0,1,2,3' or 'cpu'. Defaults to "". + --view-img (bool, optional): Flag to display results. Defaults to False. + --save-txt (bool, optional): Flag to save results to *.txt files. Defaults to False. + --save-csv (bool, optional): Flag to save results in CSV format. Defaults to False. + --save-conf (bool, optional): Flag to save confidences in labels saved via --save-txt. Defaults to False. + --save-crop (bool, optional): Flag to save cropped prediction boxes. Defaults to False. + --nosave (bool, optional): Flag to prevent saving images/videos. Defaults to False. + --classes (list[int], optional): List of classes to filter results by, e.g., '--classes 0 2 3'. Defaults to None. + --agnostic-nms (bool, optional): Flag for class-agnostic NMS. Defaults to False. + --augment (bool, optional): Flag for augmented inference. Defaults to False. + --visualize (bool, optional): Flag for visualizing features. Defaults to False. + --update (bool, optional): Flag to update all models in the model directory. Defaults to False. + --project (str, optional): Directory to save results. Defaults to ROOT / 'runs/detect'. + --name (str, optional): Sub-directory name for saving results within --project. Defaults to 'exp'. + --exist-ok (bool, optional): Flag to allow overwriting if the project/name already exists. Defaults to False. + --line-thickness (int, optional): Thickness (in pixels) of bounding boxes. Defaults to 3. + --hide-labels (bool, optional): Flag to hide labels in the output. Defaults to False. + --hide-conf (bool, optional): Flag to hide confidences in the output. Defaults to False. + --half (bool, optional): Flag to use FP16 half-precision inference. Defaults to False. + --dnn (bool, optional): Flag to use OpenCV DNN for ONNX inference. Defaults to False. + --vid-stride (int, optional): Video frame-rate stride, determining the number of frames to skip in between consecutive frames. Defaults to 1. + + Returns: + argparse.Namespace: Parsed command-line arguments as an argparse.Namespace object. + + Example: + ```python + from ultralytics import YOLOv5 + args = YOLOv5.parse_opt() + ``` + """ parser = argparse.ArgumentParser() parser.add_argument("--weights", nargs="+", type=str, default=ROOT / "yolov5s.pt", help="model path or triton URL") parser.add_argument("--source", type=str, default=ROOT / "data/images", help="file/dir/URL/glob/screen/0(webcam)") @@ -303,7 +393,28 @@ def parse_opt(): def main(opt): - """Executes YOLOv5 model inference with given options, checking requirements before running the model.""" + """ + Executes YOLOv5 model inference based on provided command-line arguments, validating dependencies before running. + + Args: + opt (argparse.Namespace): Command-line arguments for YOLOv5 detection. See function `parse_opt` for details. + + Returns: + None + + Note: + This function performs essential pre-execution checks and initiates the YOLOv5 detection process based on user-specified options. + Refer to the usage guide and examples for more information about different sources and formats at: + https://github.com/ultralytics/ultralytics + + Example usage: + + ```python + if __name__ == "__main__": + opt = parse_opt() + main(opt) + ``` + """ check_requirements(ROOT / "requirements.txt", exclude=("tensorboard", "thop")) run(**vars(opt)) diff --git a/export.py b/export.py index 77c972ba27f6..bf68ae123ab3 100644 --- a/export.py +++ b/export.py @@ -92,7 +92,22 @@ class iOSModel(torch.nn.Module): def __init__(self, model, im): - """Initializes an iOS compatible model with normalization based on image dimensions.""" + """ + Initializes an iOS compatible model with normalization based on image dimensions. + + Args: + model (torch.nn.Module): The PyTorch model to be adapted for iOS compatibility. + im (torch.Tensor): An input tensor representing a batch of images with shape (batch, channel, height, width). + + Returns: + None: This method does not return any value. + + Notes: + This initializer configures normalization based on the input image dimensions, which is critical for + ensuring the model's compatibility and proper functionality on iOS devices. The normalization step + involves dividing by the image width if the image is square; otherwise, additional conditions might + apply (trimmed for brevity). + """ super().__init__() b, c, h, w = im.shape # batch, channel, height, width self.model = model @@ -105,13 +120,48 @@ def __init__(self, model, im): # self.normalize = torch.tensor([1. / w, 1. / h, 1. / w, 1. / h]).expand(np, 4) # explicit (faster, larger) def forward(self, x): - """Runs forward pass on the input tensor, returning class confidences and normalized coordinates.""" + """ + Runs a forward pass on the input tensor, returning class confidences and normalized coordinates. + + Args: + x (torch.Tensor): Input tensor containing the image data. + + Returns: + torch.Tensor: Concatenated tensor with normalized coordinates (xywh), confidence scores (conf), and class probabilities (cls). + + Examples: + ```python + model = iOSModel(pretrained_model, input_image) + output = model.forward(torch_input_tensor) + ``` + """ xywh, conf, cls = self.model(x)[0].squeeze().split((4, 1, self.nc), 1) return cls * conf, xywh * self.normalize # confidence (3780, 80), coordinates (3780, 4) def export_formats(): - """Returns a DataFrame of supported YOLOv5 model export formats and their properties.""" + """ + Returns a DataFrame of supported YOLOv5 model export formats and their properties. + + Returns: + pd.DataFrame: A pandas DataFrame containing supported export formats and their properties. The DataFrame includes + columns for format name, CLI argument suffix, file extension or directory name, and boolean flags indicating if the + export format supports training and detection. + + Examples: + ```python + formats = export_formats() + print(formats) + ``` + + Notes: + The DataFrame contains the following columns: + - Format: The name of the model format (e.g., PyTorch, TorchScript, ONNX, etc.) + - Include Argument: The argument to use with the export script to include this format. + - File Suffix: File extension or directory name associated with the format. + - Supports Training: Whether the format supports training. + - Supports Detection: Whether the format supports detection. + """ x = [ ["PyTorch", "-", ".pt", True, True], ["TorchScript", "torchscript", ".torchscript", True, True], @@ -130,7 +180,28 @@ def export_formats(): def try_export(inner_func): - """Decorator @try_export for YOLOv5 model export functions that logs success/failure, time taken, and file size.""" + """ + Logs success or failure, execution time, and file size for YOLOv5 model export functions wrapped with @try_export. + + Args: + inner_func (Callable): The model export function to be wrapped by the decorator. + + Returns: + Callable: The wrapped function that logs execution details. When executed, this wrapper function returns either: + - Tuple (str, torch.nn.Module): On success — the file path of the exported model and the model instance. + - Tuple (None, None): On failure — None values indicating export failed. + + Examples: + @try_export + def export_onnx(model, filepath): + # implementation here + pass + + exported_file, exported_model = export_onnx(yolo_model, 'path/to/save/model.onnx') + + Notes: + For additional requirements and model export formats, refer to the [Ultralytics YOLOv5 GitHub repository](https://github.com/ultralytics/ultralytics). + """ inner_args = get_default_args(inner_func) def outer_func(*args, **kwargs): @@ -150,8 +221,46 @@ def outer_func(*args, **kwargs): @try_export def export_torchscript(model, im, file, optimize, prefix=colorstr("TorchScript:")): - """Exports YOLOv5 model to TorchScript format, optionally optimized for mobile, with image shape and stride - metadata. + """ + Exports a YOLOv5 model to the TorchScript format. + + Args: + model (torch.nn.Module): The YOLOv5 model to be exported. + im (torch.Tensor): Example input tensor to be used for tracing the TorchScript model. + file (Path): File path where the exported TorchScript model will be saved. + optimize (bool): If True, applies optimizations for mobile deployment. + prefix (str): Optional prefix for log messages. Default is 'TorchScript:'. + + Returns: + (str | None, torch.jit.ScriptModule | None): A tuple containing the file path of the exported model + (as a string) and the TorchScript model (as a torch.jit.ScriptModule). If the export fails, both elements + of the tuple will be None. + + Notes: + - This function uses tracing to create the TorchScript model. + - Metadata, including the input image shape, model stride, and class names, is saved in an extra file (`config.txt`) + within the TorchScript model package. + - For mobile optimization, refer to the PyTorch tutorial: https://pytorch.org/tutorials/recipes/mobile_interpreter.html + + Example: + ```python + from pathlib import Path + import torch + from models.experimental import attempt_load + from utils.torch_utils import select_device + + # Load model + weights = 'yolov5s.pt' + device = select_device('') + model = attempt_load(weights, map_location=device) + + # Example input tensor + im = torch.zeros(1, 3, 640, 640).to(device) + + # Export model + file = Path('yolov5s.torchscript') + export_torchscript(model, im, file, optimize=False) + ``` """ LOGGER.info(f"\n{prefix} starting export with torch {torch.__version__}...") f = file.with_suffix(".torchscript") @@ -168,7 +277,31 @@ def export_torchscript(model, im, file, optimize, prefix=colorstr("TorchScript:" @try_export def export_onnx(model, im, file, opset, dynamic, simplify, prefix=colorstr("ONNX:")): - """Exports a YOLOv5 model to ONNX format with dynamic axes and optional simplification.""" + """ + Export a YOLOv5 model to ONNX format with dynamic axes support and optional model simplification. + + Args: + model (torch.nn.Module): The YOLOv5 model to be exported. + im (torch.Tensor): A sample input tensor for model tracing, usually the shape is (1, 3, height, width). + file (pathlib.Path | str): The output file path where the ONNX model will be saved. + opset (int): The ONNX opset version to use for export. + dynamic (bool): If True, enables dynamic axes for batch, height, and width dimensions. + simplify (bool): If True, applies ONNX model simplification for optimization. + prefix (str): A prefix string for logging messages, defaults to 'ONNX:'. + + Returns: + tuple[pathlib.Path | str, None]: The path to the saved ONNX model file and None (consistent with decorator). + + Raises: + ImportError: If required libraries for export (e.g., 'onnx', 'onnx-simplifier') are not installed. + AssertionError: If the simplification check fails. + + Notes: + The required packages for this function can be installed via: + ``` + pip install onnx onnx-simplifier onnxruntime onnxruntime-gpu + ``` + """ check_requirements("onnx>=1.12.0") import onnx @@ -225,7 +358,41 @@ def export_onnx(model, im, file, opset, dynamic, simplify, prefix=colorstr("ONNX @try_export def export_openvino(file, metadata, half, int8, data, prefix=colorstr("OpenVINO:")): - """Exports a YOLOv5 model to OpenVINO format with optional FP16 and INT8 quantization; see https://pypi.org/project/openvino-dev/.""" + """ + Exports a YOLOv5 model to OpenVINO format with optional FP16 and INT8 quantization; see + https://pypi.org/project/openvino-dev/. + + Args: + file (Path): The path to the output file where the OpenVINO model will be saved. + metadata (dict): Dictionary including model metadata such as names and strides. + half (bool): If True, export the model with FP16 precision. + int8 (bool): If True, export the model with INT8 quantization. + data (str): Path to the dataset YAML file required for INT8 quantization. + prefix (str): Prefix string for logging purposes (default is "OpenVINO:"). + + Returns: + (str, openvino.runtime.Model | None): Returns the OpenVINO model file path and openvino.runtime.Model object if + export is successful; otherwise, returns None. + + Notes: + - Requires `openvino-dev` package version 2023.0 or higher. Install with: + `$ pip install openvino-dev>=2023.0` + - For INT8 quantization, also requires `nncf` library version 2.5.0 or higher. Install with: + `$ pip install nncf>=2.5.0` + + Examples: + ```python + from pathlib import Path + from ultralytics import YOLOv5 + + model = YOLOv5('yolov5s.pt') + export_openvino(Path('yolov5s.onnx'), metadata={'names': model.names, 'stride': model.stride}, half=True, + int8=False, data='data.yaml') + ``` + + This will export the YOLOv5 model to OpenVINO with FP16 precision but without INT8 quantization, saving it to + the specified file path. + """ check_requirements("openvino-dev>=2023.0") # requires openvino-dev: https://pypi.org/project/openvino-dev/ import openvino.runtime as ov # noqa from openvino.tools import mo # noqa @@ -282,8 +449,39 @@ def transform_fn(data_item): @try_export def export_paddle(model, im, file, metadata, prefix=colorstr("PaddlePaddle:")): - """Exports a YOLOv5 model to PaddlePaddle format using X2Paddle, saving to `save_dir` and adding a metadata.yaml - file. + """ + Exports a YOLOv5 model to PaddlePaddle format using X2Paddle, saving the converted model and metadata. + + Args: + model (torch.nn.Module): The YOLOv5 model to be exported. + im (torch.Tensor): Input tensor used for model tracing during export. + file (pathlib.Path): Path to the source file to be converted. + metadata (dict): Additional metadata to be saved alongside the model. + prefix (str): Prefix for logging information. + + Returns: + tuple (str, None): A tuple where the first element is the path to the saved PaddlePaddle model, and the + second element is None. + + Examples: + ```python + from pathlib import Path + import torch + + # Assume 'model' is a pre-trained YOLOv5 model and 'im' is an example input tensor + model = ... # Load your model here + im = torch.randn((1, 3, 640, 640)) # Dummy input tensor for tracing + file = Path("yolov5s.pt") + metadata = {"stride": 32, "names": ["person", "bicycle", "car", "motorbike"]} + + export_paddle(model=model, im=im, file=file, metadata=metadata) + ``` + Notes: + Ensure that `paddlepaddle` and `x2paddle` are installed, as these are required for the export function. You can + install them via pip: + ``` + $ pip install paddlepaddle x2paddle + ``` """ check_requirements(("paddlepaddle", "x2paddle")) import x2paddle @@ -299,7 +497,36 @@ def export_paddle(model, im, file, metadata, prefix=colorstr("PaddlePaddle:")): @try_export def export_coreml(model, im, file, int8, half, nms, prefix=colorstr("CoreML:")): - """Exports YOLOv5 model to CoreML format with optional NMS, INT8, and FP16 support; requires coremltools.""" + """ + Export a YOLOv5 model to CoreML format with optional NMS, INT8, and FP16 support. + + Args: + model (torch.nn.Module): The YOLOv5 model to be exported. + im (torch.Tensor): Example input tensor to trace the model. + file (pathlib.Path): Path object where the CoreML model will be saved. + int8 (bool): Flag indicating whether to use INT8 quantization (default is False). + half (bool): Flag indicating whether to use FP16 quantization (default is False). + nms (bool): Flag indicating whether to include Non-Maximum Suppression (default is False). + prefix (str): Prefix string for logging purposes (default is 'CoreML:'). + + Returns: + tuple[pathlib.Path | None, None]: The path to the saved CoreML model file, or (None, None) if there is an error. + + Notes: + The exported CoreML model will be saved with a .mlmodel extension. + Quantization is supported only on macOS. + + Example: + ```python + from pathlib import Path + import torch + from models.yolo import Model + model = Model(cfg, ch=3, nc=80) + im = torch.randn(1, 3, 640, 640) + file = Path("yolov5s_coreml") + export_coreml(model, im, file, int8=False, half=False, nms=True) + ``` + """ check_requirements("coremltools") import coremltools as ct @@ -327,7 +554,36 @@ def export_engine(model, im, file, half, dynamic, simplify, workspace=4, verbose """ Exports a YOLOv5 model to TensorRT engine format, requiring GPU and TensorRT>=7.0.0. - https://developer.nvidia.com/tensorrt + Args: + model (torch.nn.Module): YOLOv5 model to be exported. + im (torch.Tensor): Input tensor of shape (B,C,H,W). + file (Path): Path to save the exported model. + half (bool): Set to True to export with FP16 precision. + dynamic (bool): Set to True to enable dynamic input shapes. + simplify (bool): Set to True to simplify the model during export. + workspace (int): Workspace size in GB (default is 4). + verbose (bool): Set to True for verbose logging output. + prefix (str): Log message prefix. + + Returns: + (Path, None): Tuple containing the path to the exported model and None. + + Raises: + AssertionError: If executed on CPU instead of GPU. + RuntimeError: If there is a failure in parsing the ONNX file. + + Example: + ```python + from ultralytics import YOLOv5 + import torch + from pathlib import Path + + model = YOLOv5('yolov5s.pt') # Load a pre-trained YOLOv5 model + input_tensor = torch.randn(1, 3, 640, 640).cuda() # example input tensor on GPU + export_path = Path('yolov5s.engine') # export destination + + export_engine(model.model, input_tensor, export_path, half=True, dynamic=True, simplify=True, workspace=8, verbose=True) + ``` """ assert im.device.type != "cpu", "export running on CPU but must be on GPU, i.e. `python export.py --device 0`" try: @@ -407,8 +663,40 @@ def export_saved_model( keras=False, prefix=colorstr("TensorFlow SavedModel:"), ): - """Exports a YOLOv5 model to TensorFlow SavedModel format, supporting dynamic axes and non-maximum suppression - (NMS). + """ + Exports a YOLOv5 model to TensorFlow SavedModel format, supporting dynamic axes and non-maximum suppression (NMS). + + Args: + model (torch.nn.Module): The PyTorch model to convert. + im (torch.Tensor): Sample input tensor with shape (B, C, H, W) for tracing. + file (pathlib.Path): File path to save the exported model. + dynamic (bool): Flag to indicate whether dynamic axes should be used. + tf_nms (bool, optional): Enable TensorFlow non-maximum suppression (NMS). Default is False. + agnostic_nms (bool, optional): Enable class-agnostic NMS. Default is False. + topk_per_class (int, optional): Top K detections per class to keep before applying NMS. Default is 100. + topk_all (int, optional): Top K detections across all classes to keep before applying NMS. Default is 100. + iou_thres (float, optional): IoU threshold for NMS. Default is 0.45. + conf_thres (float, optional): Confidence threshold for detections. Default is 0.25. + keras (bool, optional): Save the model in Keras format if True. Default is False. + prefix (str, optional): Prefix for logging messages. Default is "TensorFlow SavedModel:". + + Returns: + tuple: A tuple containing the path to the saved model folder (str) and the Keras model instance (tf.keras.Model | None). + + Notes: + - The method supports TensorFlow versions up to 2.15.1. + - TensorFlow NMS may not be supported in older TensorFlow versions. + - If the TensorFlow version exceeds 2.13.1, it might cause issues when exporting to TFLite. + Refer to: https://github.com/ultralytics/yolov5/issues/12489 + + Raises: + Exception: If TensorFlow is not installed. + + Example: + ```python + model, im = ... # Initialize your PyTorch model and input tensor + export_saved_model(model, im, Path("yolov5_saved_model"), dynamic=True) + ``` """ # YOLOv5 TensorFlow SavedModel export try: @@ -460,7 +748,28 @@ def export_saved_model( @try_export def export_pb(keras_model, file, prefix=colorstr("TensorFlow GraphDef:")): - """Exports YOLOv5 model to TensorFlow GraphDef *.pb format; see https://github.com/leimao/Frozen_Graph_TensorFlow for details.""" + """ + Exports YOLOv5 model to TensorFlow GraphDef (*.pb) format. + + Args: + keras_model (tf.keras.Model): The Keras model to be converted. + file (Path): The output file path where the GraphDef will be saved. + prefix (str): Optional prefix string; defaults to a colored string indicating TensorFlow GraphDef export status. + + Returns: + Tuple[Path, None]: The file path where the GraphDef model was saved and a None placeholder. + + Notes: + For more details, refer to the guide on frozen graphs: https://github.com/leimao/Frozen_Graph_TensorFlow + + Example: + ```python + from pathlib import Path + keras_model = ... # assume an existing Keras model + file = Path("model.pb") + export_pb(keras_model, file) + ``` + """ import tensorflow as tf from tensorflow.python.framework.convert_to_constants import convert_variables_to_constants_v2 @@ -480,7 +789,46 @@ def export_tflite( keras_model, im, file, int8, per_tensor, data, nms, agnostic_nms, prefix=colorstr("TensorFlow Lite:") ): # YOLOv5 TensorFlow Lite export - """Exports YOLOv5 model to TensorFlow Lite format with optional FP16, INT8, and NMS support.""" + """ + Exports YOLOv5 model to TensorFlow Lite format with optional FP16, INT8, and NMS support. + + Args: + keras_model (tf.keras.Model): The Keras model to be exported. + im (torch.Tensor): Image tensor for normalization and model tracing. + file (Path): The file path to save the exported TensorFlow Lite model. + int8 (bool): Enables INT8 quantization if True. + per_tensor (bool): If True, disable per-channel quantization (applicable when int8 is True). + data (str): Path to dataset for representative dataset generation in INT8 quantization. + nms (bool): Enables Non-Maximum Suppression (NMS) support if True. + agnostic_nms (bool): Enables class-agnostic NMS support if True. + prefix (str): Prefix for logging messages. + + Returns: + (str | None, tf.lite.Model | None): The file path of the saved TFLite model, and the TFLite model instance if successful. + + Example: + ```python + from pathlib import Path + import torch + import tensorflow as tf + from torchvision import models + + # Load a pre-trained model from torchvision + model = models.yolov5() # Placeholder for actual YOLOv5 model loading + im = torch.zeros(1, 3, 640, 640) # Example image tensor + + # Provide the Keras model wrapping the PyTorch YOLOv5 model + keras_model = tf.keras.models.load_model('path/to/keras_model.h5') + + # Export the model to TensorFlow Lite format + file_path = export_tflite(keras_model, im, Path('model.tflite'), int8=False, per_tensor=False, + data='path/to/dataset.yaml', nms=False, agnostic_nms=False) + ``` + + Notes: + Ensure the TensorFlow and TensorFlow Lite dependencies are installed. The exported TFLite model can be used for + efficient inference on mobile and edge devices. + """ import tensorflow as tf LOGGER.info(f"\n{prefix} starting export with tensorflow {tf.__version__}...") @@ -515,9 +863,31 @@ def export_tflite( @try_export def export_edgetpu(file, prefix=colorstr("Edge TPU:")): """ + Clear and concise summary line describing the function's purpose: + Exports a YOLOv5 model to Edge TPU compatible TFLite format; requires Linux and Edge TPU compiler. - https://coral.ai/docs/edgetpu/models-intro/ + Args: + file (Path): Path to the YOLOv5 model file to be exported (.pt format). + prefix (str, optional): Prefix for logging messages. Defaults to colorstr("Edge TPU:"). + + Returns: + tuple[Path, None]: Path to the exported Edge TPU compatible TFLite model, None. + + Raises: + AssertionError: If the system is not Linux. + subprocess.CalledProcessError: If any subprocess call to install or run the Edge TPU compiler fails. + + Notes: + To use this function, ensure you have the Edge TPU compiler installed on your Linux system. You can find + installation instructions here: https://coral.ai/docs/edgetpu/compiler/. + + Example: + ```python + from pathlib import Path + file = Path('yolov5s.pt') + export_edgetpu(file) + ``` """ cmd = "edgetpu_compiler --version" help_url = "https://coral.ai/docs/edgetpu/compiler/" @@ -556,7 +926,30 @@ def export_edgetpu(file, prefix=colorstr("Edge TPU:")): @try_export def export_tfjs(file, int8, prefix=colorstr("TensorFlow.js:")): - """Exports a YOLOv5 model to TensorFlow.js format, optionally with uint8 quantization.""" + """ + Exports a YOLOv5 model to TensorFlow.js format, optionally with uint8 quantization. + + Args: + file (Path): Path to the input model file. + int8 (bool): If True, applies uint8 quantization. + prefix (str): Prefix for logging information (default: colorstr("TensorFlow.js:")). + + Returns: + tuple: Output directory path (str), None + + Notes: + This function requires `tensorflowjs` to be installed. You can install it using: + ```shell + pip install tensorflowjs + ``` + + Example usage: + ```python + export_tfjs(Path('yolov5s.onnx'), int8=False) + ``` + + The TensorFlow.js converted model is saved in the directory specified by `file` with "_web_model" suffix. + """ check_requirements("tensorflowjs") import tensorflowjs as tfjs @@ -596,7 +989,30 @@ def add_tflite_metadata(file, metadata, num_outputs): """ Adds TFLite metadata to a model file, supporting multiple outputs, as specified by TensorFlow guidelines. - https://www.tensorflow.org/lite/models/convert/metadata + Args: + file (str): The path to the TensorFlow Lite model file to which metadata will be added. + metadata (dict): Metadata information to be added to the model, structured as required by TFLite metadata schema. + num_outputs (int): Number of output tensors the model has, to properly configure the metadata. + + Returns: + None + + Example: + ```python + metadata = { + "name": "yolov5", + "description": "YOLOv5 object detection model", + "version": "1.0", + "author": "Ultralytics", + "license": "Apache License 2.0" + } + add_tflite_metadata("model.tflite", metadata, num_outputs=4) + ``` + + Note: + TFLite metadata can include information such as model name, version, author, and other relevant details. + For more details and structure of the metadata, refer to the TensorFlow Lite + [metadata guidelines](https://www.tensorflow.org/lite/models/convert/metadata). """ with contextlib.suppress(ImportError): # check_requirements('tflite_support') @@ -630,8 +1046,49 @@ def add_tflite_metadata(file, metadata, num_outputs): def pipeline_coreml(model, im, file, names, y, prefix=colorstr("CoreML Pipeline:")): - """Converts a PyTorch YOLOv5 model to CoreML format with NMS, handling different input/output shapes and saving the - model. + """ + Converts a PyTorch YOLOv5 model to CoreML format with Non-Maximum Suppression (NMS), handling different input/output + shapes and saving the model. + + Args: + model (torch.nn.Module): The YOLOv5 PyTorch model. + im (torch.Tensor): Input tensor example with shape [N, C, H, W], where N is the batch size, C is the number of + channels, H is the height, and W is the width. + file (Path): Path to save the converted CoreML model. + names (dict[int, str]): Dictionary mapping class indices to class names. + y (torch.Tensor): Output tensor from the PyTorch model's forward pass. + prefix (str): Custom prefix for logging messages. + + Returns: + Path: Path to the saved CoreML model (.mlmodel). + + Raises: + AssertionError: If the number of class names does not match the number of classes in the model. + + Notes: + - This function requires `coremltools` to be installed. + - Running this function on a non-macOS environment might not support some features. + - Flexible input shapes and additional NMS options can be customized within the function. + + Examples: + ```python + from pathlib import Path + import torch + + # Load YOLOv5 model and an example input tensor + model = torch.load("yolov5s.pt") + im = torch.zeros(1, 3, 640, 640) # Example input tensor + + # Define class names + names = {0: "person", 1: "bicycle", 2: "car", ...} + + # Perform forward pass to get model output + y = model(im) + + # Convert to CoreML + output_file = Path("yolov5s.mlmodel") + pipeline_coreml(model, im, output_file, names, y) + ``` """ import coremltools as ct from PIL import Image @@ -788,7 +1245,70 @@ def run( iou_thres=0.45, # TF.js NMS: IoU threshold conf_thres=0.25, # TF.js NMS: confidence threshold ): - """Exports YOLOv5 model to specified formats including ONNX, TensorRT, CoreML, and TensorFlow; see https://github.com/ultralytics/yolov5.""" + """ + Exports a YOLOv5 model to specified formats including ONNX, TensorRT, CoreML, and TensorFlow. + + Args: + data (str | Path): Path to the dataset YAML configuration file. Default is 'data/coco128.yaml'. + weights (str | Path): Path to the pretrained model weights file. Default is 'yolov5s.pt'. + imgsz (tuple): Image size as (height, width). Default is (640, 640). + batch_size (int): Batch size for exporting the model. Default is 1. + device (str): Device to run the export on, e.g., '0' for GPU, 'cpu' for CPU. Default is 'cpu'. + include (tuple): Formats to include in the export. Default is ('torchscript', 'onnx'). + half (bool): Flag to export model with FP16 half-precision. Default is False. + inplace (bool): Set the YOLOv5 Detect() module inplace=True. Default is False. + keras (bool): Flag to use Keras for TensorFlow SavedModel export. Default is False. + optimize (bool): Optimize TorchScript model for mobile deployment. Default is False. + int8 (bool): Apply INT8 quantization for CoreML or TensorFlow models. Default is False. + per_tensor (bool): Apply per tensor quantization for TensorFlow models. Default is False. + dynamic (bool): Enable dynamic axes for ONNX, TensorFlow, or TensorRT exports. Default is False. + simplify (bool): Simplify the ONNX model during export. Default is False. + opset (int): ONNX opset version. Default is 12. + verbose (bool): Enable verbose logging for TensorRT export. Default is False. + workspace (int): TensorRT workspace size in GB. Default is 4. + nms (bool): Add non-maximum suppression (NMS) to the TensorFlow model. Default is False. + agnostic_nms (bool): Add class-agnostic NMS to the TensorFlow model. Default is False. + topk_per_class (int): Top-K boxes per class to keep for TensorFlow.js NMS. Default is 100. + topk_all (int): Top-K boxes for all classes to keep for TensorFlow.js NMS. Default is 100. + iou_thres (float): IoU threshold for NMS. Default is 0.45. + conf_thres (float): Confidence threshold for NMS. Default is 0.25. + + Returns: + None + + Notes: + - Model export is based on the specified formats in the 'include' argument. + - Be cautious of combinations where certain flags are mutually exclusive, such as `--half` and `--dynamic`. + + Example: + ```python + run( + data="data/coco128.yaml", + weights="yolov5s.pt", + imgsz=(640, 640), + batch_size=1, + device="cpu", + include=("torchscript", "onnx"), + half=False, + inplace=False, + keras=False, + optimize=False, + int8=False, + per_tensor=False, + dynamic=False, + simplify=False, + opset=12, + verbose=False, + workspace=4, + nms=False, + agnostic_nms=False, + topk_per_class=100, + topk_all=100, + iou_thres=0.45, + conf_thres=0.25, + ) + ``` + """ t = time.time() include = [x.lower() for x in include] # to lowercase fmts = tuple(export_formats()["Argument"][1:]) # --include arguments @@ -901,7 +1421,23 @@ def run( def parse_opt(known=False): - """Parses command-line arguments for YOLOv5 model export configurations, returning the parsed options.""" + """ + Parses command-line arguments for YOLOv5 model export configurations. + + Args: + known (bool): If True, `argparse.ArgumentParser.parse_known_args` is used to parse command-line arguments; otherwise, + `argparse.ArgumentParser.parse_args` is used. Defaults to False. + + Returns: + argparse.Namespace: An object containing parsed command-line arguments. + + Example: + ```python + opts = parse_opt() + print(opts.data) + print(opts.weights) + ``` + """ parser = argparse.ArgumentParser() parser.add_argument("--data", type=str, default=ROOT / "data/coco128.yaml", help="dataset.yaml path") parser.add_argument("--weights", nargs="+", type=str, default=ROOT / "yolov5s.pt", help="model.pt path(s)") @@ -937,7 +1473,44 @@ def parse_opt(known=False): def main(opt): - """Executes the YOLOv5 model inference or export with specified weights and options.""" + """ + ```python Exports the YOLOv5 model to specified formats, including ONNX, TensorRT, CoreML, and TensorFlow. + + Args: + opt (argparse.Namespace): Parsed command-line arguments containing the export configurations. + - data (str): Path to the dataset.yaml. + - weights (list[str]): Paths to model (.pt) file(s). + - imgsz (list[int]): Image size (height, width). + - batch_size (int): Batch size. + - device (str): CUDA device, e.g., '0' or '0,1,2,3' or 'cpu'. + - half (bool): FP16 half-precision export flag. + - inplace (bool): Set YOLOv5 Detect() inplace to True. + - keras (bool): Use Keras for TensorFlow models. + - optimize (bool): Optimize TorchScript model for mobile. + - int8 (bool): INT8 quantization flag. + - per_tensor (bool): Per tensor quantization for TensorFlow. + - dynamic (bool): Dynamic axes for ONNX/TF/TensorRT. + - simplify (bool): Simplify ONNX model. + - opset (int): ONNX opset version. + - verbose (bool): Verbose logging for TensorRT. + - workspace (int): Workspace size for TensorRT (in GB). + - nms (bool): Add NMS to TensorFlow model. + - agnostic_nms (bool): Add agnostic NMS to TensorFlow model. + - topk_per_class (int): Top-k per class for TensorFlow.js NMS. + - topk_all (int): Top-k for all classes for TensorFlow.js NMS. + - iou_thres (float): IoU threshold for TensorFlow.js NMS. + - conf_thres (float): Confidence threshold for TensorFlow.js NMS. + - include (list[str]): List of formats to include in export, e.g., ['torchscript', 'onnx']. + + Returns: + list[str]: List of exported file paths. + ```python + + # Example usage: + # opt = parse_opt() + # main(opt) + ``` + """ for opt.weights in opt.weights if isinstance(opt.weights, list) else [opt.weights]: run(**vars(opt)) diff --git a/hubconf.py b/hubconf.py index 4b0c36b8daed..de93c79bfae5 100644 --- a/hubconf.py +++ b/hubconf.py @@ -15,19 +15,38 @@ def _create(name, pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None): """ - Creates or loads a YOLOv5 model. + Creates or loads a YOLOv5 model, with options for pretrained weights and model customization. - Arguments: - name (str): model name 'yolov5s' or path 'path/to/best.pt' - pretrained (bool): load pretrained weights into the model - channels (int): number of input channels - classes (int): number of model classes - autoshape (bool): apply YOLOv5 .autoshape() wrapper to model - verbose (bool): print all information to screen - device (str, torch.device, None): device to use for model parameters + Args: + name (str): Model name (e.g., 'yolov5s') or path to the model checkpoint (e.g., 'path/to/best.pt'). + pretrained (bool, optional): If True, loads pretrained weights into the model. Defaults to True. + channels (int, optional): Number of input channels the model expects. Defaults to 3. + classes (int, optional): Number of classes the model is expected to detect. Defaults to 80. + autoshape (bool, optional): If True, applies the YOLOv5 .autoshape() wrapper for various input formats. Defaults to True. + verbose (bool, optional): If True, prints detailed information during the model creation/loading process. Defaults to True. + device (str | torch.device | None, optional): Device to use for model parameters (e.g., 'cpu', 'cuda'). If None, selects the best available device. Defaults to None. Returns: - YOLOv5 model + DetectMultiBackend | AutoShape: The loaded YOLOv5 model, potentially wrapped with AutoShape if specified. + + Examples: + ```python + import torch + from ultralytics import _create + + # Load an official YOLOv5s model with pretrained weights + model = _create('yolov5s') + + # Load a custom model from a local checkpoint + model = _create('path/to/custom_model.pt', pretrained=False) + + # Load a model with specific input channels and classes + model = _create('yolov5s', channels=1, classes=10) + ``` + + Notes: + For more information on model loading and customization, visit the + [YOLOv5 PyTorch Hub Documentation](https://docs.ultralytics.com/yolov5/tutorials/pytorch_hub_model_loading). """ from pathlib import Path @@ -84,76 +103,355 @@ def _create(name, pretrained=True, channels=3, classes=80, autoshape=True, verbo def custom(path="path/to/model.pt", autoshape=True, _verbose=True, device=None): - """Loads a custom or local YOLOv5 model from a given path with optional autoshaping and device specification.""" + """ + Loads a custom or local YOLOv5 model from a given path with optional autoshaping and device specification. + + Args: + path (str): Path to the custom model file (e.g., 'path/to/model.pt'). + autoshape (bool): Apply YOLOv5 .autoshape() wrapper to model if True, enabling compatibility with various input types + (default is True). + _verbose (bool): If True, prints all informational messages to the screen; otherwise, operates silently + (default is True). + device (str | torch.device | None): Device to load the model on, e.g., 'cpu', 'cuda', torch.device('cuda:0'), etc. + (default is None, which automatically selects the best available device). + + Returns: + torch.nn.Module: A YOLOv5 model loaded with the specified parameters. + + Notes: + For more details on loading models from PyTorch Hub: + https://docs.ultralytics.com/yolov5/tutorials/pytorch_hub_model_loading + + Examples: + ```python + # Load model from a given path with autoshape enabled on the best available device + model = torch.hub.load('ultralytics/yolov5', 'custom', 'yolov5s.pt') + + # Load model from a local path without autoshape on the CPU device + model = torch.hub.load('.', 'custom', 'yolov5s.pt', source='local', autoshape=False, device='cpu') + ``` + """ return _create(path, autoshape=autoshape, verbose=_verbose, device=device) def yolov5n(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device=None): - """Instantiates the YOLOv5-nano model with options for pretraining, input channels, class count, autoshaping, + """ + Instantiates the YOLOv5-nano model with options for pretraining, input channels, class count, autoshaping, verbosity, and device. + + Args: + pretrained (bool): If True, loads pretrained weights into the model. Defaults to True. + channels (int): Number of input channels for the model. Defaults to 3. + classes (int): Number of classes for object detection. Defaults to 80. + autoshape (bool): If True, applies the YOLOv5 .autoshape() wrapper to the model for various formats (file/URI/PIL/ + cv2/np) and non-maximum suppression (NMS) during inference. Defaults to True. + _verbose (bool): If True, prints detailed information to the screen. Defaults to True. + device (str | torch.device | None): Specifies the device to use for model computation. If None, uses the best device + available (i.e., GPU if available, otherwise CPU). Defaults to None. + + Returns: + DetectionModel | ClassificationModel | SegmentationModel: The instantiated YOLOv5-nano model, potentially with + pretrained weights and autoshaping applied. + + Notes: + For further details on loading models from PyTorch Hub, refer to [PyTorch Hub models](https://pytorch.org/hub/ + ultralytics_yolov5). + + Examples: + ```python + import torch + from ultralytics import yolov5n + + # Load the YOLOv5-nano model with defaults + model = yolov5n() + + # Load the YOLOv5-nano model with a specific device + model = yolov5n(device='cuda') + ``` """ return _create("yolov5n", pretrained, channels, classes, autoshape, _verbose, device) def yolov5s(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device=None): - """Creates YOLOv5-small model with options for pretraining, input channels, class count, autoshaping, verbosity, and + """ + Creates YOLOv5-small model with options for pretraining, input channels, class count, autoshaping, verbosity, and device. + + Args: + pretrained (bool, optional): Flag to load pretrained weights into the model. Defaults to True. + channels (int, optional): Number of input channels. Defaults to 3. + classes (int, optional): Number of model classes. Defaults to 80. + autoshape (bool, optional): Whether to apply YOLOv5 .autoshape() wrapper to the model for preprocessed inputs. + Defaults to True. + _verbose (bool, optional): Flag to print detailed information on model loading. Defaults to True. + device (str | torch.device | None, optional): Device to use for model parameters, e.g., 'cpu', 'cuda'. If None, + auto-select the best available device. Defaults to None. + + Returns: + YOLOv5 model (torch.nn.Module): The YOLOv5-small model loaded with specified configurations and optionally + pretrained weights. + + Usage: + ```python + import torch + model = torch.hub.load('ultralytics/yolov5', 'yolov5s') # official model + model = torch.hub.load('ultralytics/yolov5:master', 'yolov5s') # from branch + model = torch.hub.load('ultralytics/yolov5', 'custom', 'yolov5s.pt') # custom/local model + model = torch.hub.load('.', 'custom', 'yolov5s.pt', source='local') # local repo + ``` + + For more information, visit https://pytorch.org/hub/ultralytics_yolov5. """ return _create("yolov5s", pretrained, channels, classes, autoshape, _verbose, device) def yolov5m(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device=None): - """Instantiates the YOLOv5-medium model with customizable pretraining, channel count, class count, autoshaping, + """ + Instantiates the YOLOv5-medium model with customizable pretraining, channel count, class count, autoshaping, verbosity, and device. + + Args: + pretrained (bool, optional): Whether to load pretrained weights into the model. Default is True. + channels (int, optional): Number of input channels. Default is 3. + classes (int, optional): Number of model classes. Default is 80. + autoshape (bool, optional): Apply YOLOv5 .autoshape() wrapper to the model for handling various input formats. Default is True. + _verbose (bool, optional): Whether to print detailed information to the screen. Default is True. + device (str | torch.device | None, optional): Device specification to use for model parameters (e.g., 'cpu', 'cuda'). Default is None. + + Returns: + torch.nn.Module: The instantiated YOLOv5-medium model. + + Usage Example: + ```python + import torch + + model = torch.hub.load('ultralytics/yolov5', 'yolov5m') # Load YOLOv5-medium from Ultralytics repository + model = torch.hub.load('ultralytics/yolov5:master', 'yolov5m') # Load from the master branch + model = torch.hub.load('ultralytics/yolov5', 'custom', 'yolov5m.pt') # Load a custom/local YOLOv5-medium model + model = torch.hub.load('.', 'custom', 'yolov5m.pt', source='local') # Load from a local repository + ``` """ return _create("yolov5m", pretrained, channels, classes, autoshape, _verbose, device) def yolov5l(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device=None): - """Creates YOLOv5-large model with options for pretraining, channels, classes, autoshaping, verbosity, and device + """ + Creates YOLOv5-large model with options for pretraining, channels, classes, autoshaping, verbosity, and device selection. + + Args: + pretrained (bool): Load pretrained weights into the model. Default is True. + channels (int): Number of input channels. Default is 3. + classes (int): Number of model classes. Default is 80. + autoshape (bool): Apply YOLOv5 .autoshape() wrapper to model. Default is True. + _verbose (bool): Print all information to screen. Default is True. + device (str | torch.device | None): Device to use for model parameters, e.g., 'cpu', 'cuda', or a torch.device instance. Default is None. + + Returns: + YOLOv5 model (torch.nn.Module). + + Example: + ```python + import torch + model = torch.hub.load('ultralytics/yolov5', 'yolov5l') + ``` + + Notes: + For additional details, refer to the PyTorch Hub models documentation: + https://pytorch.org/hub/ultralytics_yolov5 """ return _create("yolov5l", pretrained, channels, classes, autoshape, _verbose, device) def yolov5x(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device=None): - """Instantiates the YOLOv5-xlarge model with customizable pretraining, channel count, class count, autoshaping, + """ + Instantiates the YOLOv5-xlarge model with customizable pretraining, channel count, class count, autoshaping, verbosity, and device. + + Args: + pretrained (bool): If True, loads pretrained weights into the model. Defaults to True. + channels (int): Number of input channels. Defaults to 3. + classes (int): Number of model classes. Defaults to 80. + autoshape (bool): If True, applies YOLOv5 .autoshape() wrapper to the model for easier image handling. Defaults to + True. + _verbose (bool): If True, prints detailed information to the screen. Defaults to True. + device (str | torch.device | None): Device for model parameters, e.g., 'cpu', 'cuda:0', or a torch.device object. + Defaults to None. + + Returns: + torch.nn.Module: The instantiated YOLOv5-xlarge model. + + Example: + ```python + import torch + model = torch.hub.load('ultralytics/yolov5', 'yolov5x') + ``` + + For more details and usage, refer to the official YOLOv5 PyTorch Hub models documentation: + https://pytorch.org/hub/ultralytics_yolov5 """ return _create("yolov5x", pretrained, channels, classes, autoshape, _verbose, device) def yolov5n6(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device=None): - """Creates YOLOv5-nano-P6 model with options for pretraining, channels, classes, autoshaping, verbosity, and - device. + """ + Creates YOLOv5-nano-P6 model with options for pretraining, channels, classes, autoshaping, verbosity, and device. + + Args: + pretrained (bool, optional): If True, loads pretrained weights into the model. Default is True. + channels (int, optional): Number of input channels. Default is 3. + classes (int, optional): Number of model classes. Default is 80. + autoshape (bool, optional): If True, applies the YOLOv5 .autoshape() wrapper to the model. Default is True. + _verbose (bool, optional): If True, prints all information to screen. Default is True. + device (str | torch.device | None, optional): Device to use for model parameters. Can be 'cpu', 'cuda', or None. + Default is None. + + Returns: + torch.nn.Module: YOLOv5 model loaded with the specified configurations. + + Example: + ```python + import torch + model = yolov5n6(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device='cuda') + ``` + + Notes: + For more information on PyTorch Hub models, visit: https://pytorch.org/hub/ultralytics_yolov5 """ return _create("yolov5n6", pretrained, channels, classes, autoshape, _verbose, device) def yolov5s6(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device=None): - """Instantiate YOLOv5-small-P6 model with options for pretraining, input channels, number of classes, autoshaping, + """ + Instantiate the YOLOv5-small-P6 model with options for pretraining, input channels, number of classes, autoshaping, verbosity, and device selection. + + Args: + pretrained (bool): If True, loads pretrained weights. Default is True. + channels (int): Number of input channels. Default is 3. + classes (int): Number of object detection classes. Default is 80. + autoshape (bool): If True, applies YOLOv5 .autoshape() wrapper to the model, allowing for varied input formats. + Default is True. + _verbose (bool): If True, prints detailed information during model loading. Default is True. + device (str | torch.device | None): Device specification for model parameters (e.g., 'cpu', 'cuda', or torch.device). + Default is None, which selects an available device automatically. + + Returns: + torch.nn.Module: The YOLOv5-small-P6 model instance. + + Usage: + ```python + import torch + + model = torch.hub.load('ultralytics/yolov5', 'yolov5s6') + model = torch.hub.load('ultralytics/yolov5:master', 'yolov5s6') # load from a specific branch + model = torch.hub.load('ultralytics/yolov5', 'custom', 'path/to/yolov5s6.pt') # custom/local model + model = torch.hub.load('.', 'custom', 'path/to/yolov5s6.pt', source='local') # local repo model + ``` + + Notes: + - For more information, refer to the PyTorch Hub models documentation at https://pytorch.org/hub/ultralytics_yolov5 + + Raises: + Exception: If there is an error during model creation or loading, with a suggestion to visit the YOLOv5 + tutorials for help. """ return _create("yolov5s6", pretrained, channels, classes, autoshape, _verbose, device) def yolov5m6(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device=None): - """Creates YOLOv5-medium-P6 model with options for pretraining, channel count, class count, autoshaping, verbosity, - and device. + """ + Creates YOLOv5-medium-P6 model with options for pretraining, channel count, class count, autoshaping, verbosity, and + device. + + Args: + pretrained (bool): If True, loads pretrained weights. Default is True. + channels (int): Number of input channels. Default is 3. + classes (int): Number of model classes. Default is 80. + autoshape (bool): Apply YOLOv5 .autoshape() wrapper to the model for file/URI/PIL/cv2/np inputs and NMS. Default is True. + _verbose (bool): If True, prints detailed information to the screen. Default is True. + device (str | torch.device | None): Device to use for model parameters. Default is None, which uses the best available device. + + Returns: + torch.nn.Module: The YOLOv5-medium-P6 model. + + Refer to the PyTorch Hub models documentation: https://pytorch.org/hub/ultralytics_yolov5 for additional details. + + Example: + ```python + import torch + + # Load YOLOv5-medium-P6 model + model = torch.hub.load('ultralytics/yolov5', 'yolov5m6') + ``` + + Notes: + - The model can be loaded with pre-trained weights for better performance on specific tasks. + - The autoshape feature simplifies input handling by allowing various popular data formats. """ return _create("yolov5m6", pretrained, channels, classes, autoshape, _verbose, device) def yolov5l6(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device=None): - """Instantiates the YOLOv5-large-P6 model with customizable pretraining, channel and class counts, autoshaping, + """ + Instantiates the YOLOv5-large-P6 model with customizable pretraining, channel and class counts, autoshaping, verbosity, and device selection. + + Args: + pretrained (bool, optional): If True, load pretrained weights into the model. Default is True. + channels (int, optional): Number of input channels. Default is 3. + classes (int, optional): Number of model classes. Default is 80. + autoshape (bool, optional): If True, apply YOLOv5 .autoshape() wrapper to the model for input flexibility. + Default is True. + _verbose (bool, optional): If True, print all information to the screen. Default is True. + device (str | torch.device | None, optional): Device to use for model parameters, e.g., 'cpu', 'cuda', or + torch.device. If None, automatically selects the best available + device. Default is None. + + Returns: + torch.nn.Module: The instantiated YOLOv5-large-P6 model. + + Usage: + ```python + import torch + model = torch.hub.load('ultralytics/yolov5', 'yolov5l6') # official model + model = torch.hub.load('ultralytics/yolov5:master', 'yolov5l6') # from specific branch + model = torch.hub.load('ultralytics/yolov5', 'custom', 'path/to/yolov5l6.pt') # custom/local model + model = torch.hub.load('.', 'custom', 'path/to/yolov5l6.pt', source='local') # local repository + ``` + + Note: + Refer to [PyTorch Hub Documentation](https://pytorch.org/hub/ultralytics_yolov5) for additional usage instructions. """ return _create("yolov5l6", pretrained, channels, classes, autoshape, _verbose, device) def yolov5x6(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device=None): - """Creates YOLOv5-xlarge-P6 model with options for pretraining, channels, classes, autoshaping, verbosity, and - device. + """ + Creates the YOLOv5-xlarge-P6 model with options for pretraining, number of input channels, class count, autoshaping, + verbosity, and device selection. + + Args: + pretrained (bool): If True, loads pretrained weights into the model. Default is True. + channels (int): Number of input channels. Default is 3. + classes (int): Number of model classes. Default is 80. + autoshape (bool): If True, applies YOLOv5 .autoshape() wrapper to the model. Default is True. + _verbose (bool): If True, prints all information to the screen. Default is True. + device (str | torch.device | None): Device to use for model parameters, can be a string, torch.device object, or + None for default device selection. Default is None. + + Returns: + torch.nn.Module: The instantiated YOLOv5-xlarge-P6 model. + + Example: + ```python + import torch + model = torch.hub.load('ultralytics/yolov5', 'yolov5x6') # load the YOLOv5-xlarge-P6 model + ``` + + Note: + For more information on YOLOv5 models, visit the official documentation: + https://docs.ultralytics.com/yolov5 """ return _create("yolov5x6", pretrained, channels, classes, autoshape, _verbose, device) diff --git a/train.py b/train.py index 472c1d395981..65e6806b22e0 100644 --- a/train.py +++ b/train.py @@ -102,10 +102,37 @@ def train(hyp, opt, device, callbacks): """ - Trains YOLOv5 model with given hyperparameters, options, and device, managing datasets, model architecture, loss - computation, and optimizer steps. - - `hyp` argument is path/to/hyp.yaml or hyp dictionary. + Trains a YOLOv5 model on a custom dataset using specified hyperparameters, options, and device, managing datasets, + model architecture, loss computation, and optimizer steps. + + Args: + hyp (str | dict): Path to the hyperparameters YAML file or a dictionary of hyperparameters. + opt (argparse.Namespace): Parsed command-line arguments containing training options. + device (torch.device): Device on which training occurs, e.g., 'cuda' or 'cpu'. + callbacks (Callbacks): Callback functions for various training events. + + Returns: + None + + Models and datasets download automatically from the latest YOLOv5 release. + + Example: + Single-GPU training: + ```bash + $ python train.py --data coco128.yaml --weights yolov5s.pt --img 640 # from pretrained (recommended) + $ python train.py --data coco128.yaml --weights '' --cfg yolov5s.yaml --img 640 # from scratch + ``` + + Multi-GPU DDP training: + ```bash + $ python -m torch.distributed.run --nproc_per_node 4 --master_port 1 train.py --data coco128.yaml --weights + yolov5s.pt --img 640 --device 0,1,2,3 + ``` + + For more usage details, refer to: + - Models: https://github.com/ultralytics/yolov5/tree/master/models + - Datasets: https://github.com/ultralytics/yolov5/tree/master/data + - Tutorial: https://docs.ultralytics.com/yolov5/tutorials/train_custom_data """ save_dir, epochs, batch_size, weights, single_cls, evolve, data, cfg, resume, noval, nosave, workers, freeze = ( Path(opt.save_dir), @@ -515,7 +542,27 @@ def lf(x): def parse_opt(known=False): - """Parses command-line arguments for YOLOv5 training, validation, and testing.""" + """ + Parses command-line arguments for YOLOv5 training, validation, and testing. + + Args: + known (bool, optional): If True, parses known arguments, ignoring the unknown. Defaults to False. + + Returns: + argparse.Namespace: Parsed command-line arguments. + + Example: + ```python + from ultralytics.yolo import parse_opt + opt = parse_opt() + print(opt) + ``` + + Links: + Models: https://github.com/ultralytics/yolov5/tree/master/models + Datasets: https://github.com/ultralytics/yolov5/tree/master/data + Tutorial: https://docs.ultralytics.com/yolov5/tutorials/train_custom_data + """ parser = argparse.ArgumentParser() parser.add_argument("--weights", type=str, default=ROOT / "yolov5s.pt", help="initial weights path") parser.add_argument("--cfg", type=str, default="", help="model.yaml path") @@ -570,7 +617,21 @@ def parse_opt(known=False): def main(opt, callbacks=Callbacks()): - """Runs training or hyperparameter evolution with specified options and optional callbacks.""" + """ + Runs training or hyperparameter evolution with specified options and optional callbacks. + + Args: + opt (argparse.Namespace): The command-line arguments parsed for YOLOv5 training and evolution. + callbacks (ultralytics.utils.callbacks.Callbacks, optional): Callback functions for various training stages. + Defaults to Callbacks(). + + Returns: + None + + Note: + For detailed usage, visit: + https://github.com/ultralytics/yolov5/tree/master/models + """ if RANK in {-1, 0}: print_args(vars(opt)) check_git_status() @@ -826,7 +887,25 @@ def main(opt, callbacks=Callbacks()): def generate_individual(input_ranges, individual_length): - """Generates a list of random values within specified input ranges for each gene in the individual.""" + """ + Generate a random individual with gene values within specified input ranges. + + Args: + input_ranges (list[tuple[float, float]]): List of tuples where each tuple contains the lower and upper bounds + for the corresponding gene. + individual_length (int): The number of genes in the individual. + + Returns: + list[float]: A list representing a generated individual with random gene values within the specified ranges. + + Examples: + ```python + input_ranges = [(0.01, 0.1), (0.1, 1.0), (0.9, 2.0)] + individual_length = 3 + individual = generate_individual(input_ranges, individual_length) + print(individual) # Output: [0.035, 0.678, 1.456] (example output) + ``` + """ individual = [] for i in range(individual_length): lower_bound, upper_bound = input_ranges[i] @@ -836,9 +915,54 @@ def generate_individual(input_ranges, individual_length): def run(**kwargs): """ - Executes YOLOv5 training with given options, overriding with any kwargs provided. - - Example: import train; train.run(data='coco128.yaml', imgsz=320, weights='yolov5m.pt') + Executes YOLOv5 training with given options, allowing optional overrides through keyword arguments. + + Args: + weights (str): Path to initial weights. Defaults to ROOT / 'yolov5s.pt'. + cfg (str): Path to model YAML configuration. Defaults to an empty string. + data (str): Path to dataset YAML configuration. Defaults to ROOT / 'data/coco128.yaml'. + hyp (str): Path to hyperparameters YAML configuration. Defaults to ROOT / 'data/hyps/hyp.scratch-low.yaml'. + epochs (int): Total number of training epochs. Defaults to 100. + batch_size (int): Total batch size for all GPUs. Use -1 for automatic batch size determination. Defaults to 16. + imgsz (int): Image size (pixels) for training and validation. Defaults to 640. + rect (bool): Use rectangular training. Defaults to False. + resume (bool | str): Resume most recent training with an optional path. Defaults to False. + nosave (bool): Only save final checkpoint. Defaults to False. + noval (bool): Only validate at the final epoch. Defaults to False. + noautoanchor (bool): Disable AutoAnchor. Defaults to False. + noplots (bool): Do not save plot files. Defaults to False. + evolve (int): Evolve hyperparameters for a specified number of generations. Use 300 if provided without a value. + evolve_population (str): Directory for loading population during evolution. Defaults to ROOT / 'data/hyps'. + resume_evolve (str): Resume hyperparameter evolution from the last generation. Defaults to None. + bucket (str): gsutil bucket for saving checkpoints. Defaults to an empty string. + cache (str): Cache image data in 'ram' or 'disk'. Defaults to None. + image_weights (bool): Use weighted image selection for training. Defaults to False. + device (str): CUDA device identifier, e.g., '0', '0,1,2,3', or 'cpu'. Defaults to an empty string. + multi_scale (bool): Use multi-scale training, varying image size by ±50%. Defaults to False. + single_cls (bool): Train with multi-class data as single-class. Defaults to False. + optimizer (str): Optimizer type, choices are ['SGD', 'Adam', 'AdamW']. Defaults to 'SGD'. + sync_bn (bool): Use synchronized BatchNorm, only available in DDP mode. Defaults to False. + workers (int): Maximum dataloader workers per rank in DDP mode. Defaults to 8. + project (str): Directory for saving training runs. Defaults to ROOT / 'runs/train'. + name (str): Name for saving the training run. Defaults to 'exp'. + exist_ok (bool): Allow existing project/name without incrementing. Defaults to False. + quad (bool): Use quad dataloader. Defaults to False. + cos_lr (bool): Use cosine learning rate scheduler. Defaults to False. + label_smoothing (float): Label smoothing epsilon value. Defaults to 0.0. + patience (int): Patience for early stopping, measured in epochs without improvement. Defaults to 100. + freeze (list): Layers to freeze, e.g., backbone=10, first 3 layers = [0, 1, 2]. Defaults to [0]. + save_period (int): Frequency in epochs to save checkpoints. Disabled if < 1. Defaults to -1. + seed (int): Global training random seed. Defaults to 0. + local_rank (int): Automatic DDP Multi-GPU argument. Do not modify. Defaults to -1. + + Returns: + None: The function initiates YOLOv5 training or hyperparameter evolution based on the provided options. + + Examples: + ```python + import train + train.run(data='coco128.yaml', imgsz=320, weights='yolov5m.pt') + ``` """ opt = parse_opt(True) for k, v in kwargs.items(): diff --git a/val.py b/val.py index c1e8a6aa3094..deb12624c32f 100644 --- a/val.py +++ b/val.py @@ -62,7 +62,30 @@ def save_one_txt(predn, save_conf, shape, file): - """Saves one detection result to a txt file in normalized xywh format, optionally including confidence.""" + """ + Saves one detection result to a txt file in normalized xywh format, optionally including confidence. + + Args: + predn (torch.Tensor): Predicted bounding boxes and associated confidence scores and classes + in xyxy format, tensor of shape (N, 6) where N is the number of detections. + save_conf (bool): If True, saves the confidence scores along with the bounding box coordinates. + shape (tuple): Shape of the original image as (height, width). + file (str | Path): File path where the result will be saved. + + Returns: + None + + Notes: + The xyxy bounding box format represents the coordinates (xmin, ymin, xmax, ymax). + The xywh format represents the coordinates (center_x, center_y, width, height) and is + normalized by the width and height of the image. + + Example: + ```python + predn = torch.tensor([[10, 20, 30, 40, 0.9, 1]]) # example prediction + save_one_txt(predn, save_conf=True, shape=(640, 480), file="output.txt") + ``` + """ gn = torch.tensor(shape)[[1, 0, 1, 0]] # normalization gain whwh for *xyxy, conf, cls in predn.tolist(): xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh @@ -73,9 +96,36 @@ def save_one_txt(predn, save_conf, shape, file): def save_one_json(predn, jdict, path, class_map): """ - Saves one JSON detection result with image ID, category ID, bounding box, and score. + Saves a single JSON detection result, including image ID, category ID, bounding box, and confidence score. + + Args: + predn (torch.Tensor): Predicted detections in xyxy format with shape (n, 6) where n is the number of detections. + The tensor should contain [x_min, y_min, x_max, y_max, confidence, class_id] for each detection. + jdict (list[dict]): List to collect JSON formatted detection results. + path (pathlib.Path): Path object of the image file, used to extract image_id. + class_map (dict[int, int]): Mapping from model class indices to dataset-specific category IDs. - Example: {"image_id": 42, "category_id": 18, "bbox": [258.15, 41.29, 348.26, 243.78], "score": 0.236} + Returns: + None: Appends detection results as dictionaries to `jdict` list in-place. + + Example: + ```python + predn = torch.tensor([[100, 50, 200, 150, 0.9, 0], [50, 30, 100, 80, 0.8, 1]]) + jdict = [] + path = Path("42.jpg") + class_map = {0: 18, 1: 19} + save_one_json(predn, jdict, path, class_map) + ``` + This will append to `jdict`: + ``` + [ + {'image_id': 42, 'category_id': 18, 'bbox': [125.0, 75.0, 100.0, 100.0], 'score': 0.9}, + {'image_id': 42, 'category_id': 19, 'bbox': [75.0, 55.0, 50.0, 50.0], 'score': 0.8} + ] + ``` + + Notes: + The `bbox` values are formatted as [x, y, width, height], where x and y represent the top-left corner of the box. """ image_id = int(path.stem) if path.stem.isnumeric() else path.stem box = xyxy2xywh(predn[:, :4]) # xywh @@ -93,13 +143,30 @@ def save_one_json(predn, jdict, path, class_map): def process_batch(detections, labels, iouv): """ - Return correct prediction matrix. + Return a correct prediction matrix given detections and labels at various IoU thresholds. + + Args: + detections (np.ndarray): Array of shape (N, 6) where each row corresponds to a detection with + format [x1, y1, x2, y2, conf, class]. + labels (np.ndarray): Array of shape (M, 5) where each row corresponds to a ground truth label with + format [class, x1, y1, x2, y2]. + iouv (np.ndarray): Array of IoU thresholds to evaluate at. - Arguments: - detections (array[N, 6]), x1, y1, x2, y2, conf, class - labels (array[M, 5]), class, x1, y1, x2, y2 Returns: - correct (array[N, 10]), for 10 IoU levels + correct (np.ndarray): A binary array of shape (N, len(iouv)) indicating whether each detection + is a true positive for each IoU threshold. There are 10 IoU levels used in the evaluation. + + Example: + ```python + detections = np.array([[50, 50, 200, 200, 0.9, 1], [30, 30, 150, 150, 0.7, 0]]) + labels = np.array([[1, 50, 50, 200, 200]]) + iouv = np.linspace(0.5, 0.95, 10) + correct = process_batch(detections, labels, iouv) + ``` + + Notes: + - This function is used as part of the evaluation pipeline for object detection models. + - IoU (Intersection over Union) is a common evaluation metric for object detection performance. """ correct = np.zeros((detections.shape[0], iouv.shape[0])).astype(bool) iou = box_iou(labels[:, 1:], detections[:, :4]) @@ -148,7 +215,44 @@ def run( callbacks=Callbacks(), compute_loss=None, ): - """Evaluates model on a dataset and logs performance metrics, results are saved to specific directories.""" + """ + Evaluates a YOLOv5 model on a dataset and logs performance metrics. + + Args: + data (str | dict): Path to a dataset yaml file or a dataset dictionary. + weights (str | list[str], optional): Path to the model weights file(s). Supports various formats: PyTorch, + TorchScript, ONNX, OpenVINO, TensorRT, CoreML, TensorFlow SavedModel, TensorFlow GraphDef, TensorFlow Lite, + TensorFlow Edge TPU, and PaddlePaddle. + batch_size (int, optional): Batch size for inference. Default is 32. + imgsz (int, optional): Input image size (pixels). Default is 640. + conf_thres (float, optional): Confidence threshold for object detection. Default is 0.001. + iou_thres (float, optional): IoU threshold for Non-Maximum Suppression (NMS). Default is 0.6. + max_det (int, optional): Maximum number of detections per image. Default is 300. + task (str, optional): Task type - 'train', 'val', 'test', 'speed', or 'study'. Default is 'val'. + device (str, optional): Device to use for computation, e.g., '0' or '0,1,2,3' for CUDA or 'cpu' for CPU. Default is ''. + workers (int, optional): Number of dataloader workers. Default is 8. + single_cls (bool, optional): Treat dataset as a single class. Default is False. + augment (bool, optional): Enable augmented inference. Default is False. + verbose (bool, optional): Enable verbose output. Default is False. + save_txt (bool, optional): Save results to *.txt files. Default is False. + save_hybrid (bool, optional): Save label and prediction hybrid results to *.txt files. Default is False. + save_conf (bool, optional): Save confidences in --save-txt labels. Default is False. + save_json (bool, optional): Save a COCO-JSON results file. Default is False. + project (str | Path, optional): Directory to save results. Default is ROOT/'runs/val'. + name (str, optional): Name of the run. Default is 'exp'. + exist_ok (bool, optional): Overwrite existing project/name without incrementing. Default is False. + half (bool, optional): Use FP16 half-precision inference. Default is True. + dnn (bool, optional): Use OpenCV DNN for ONNX inference. Default is False. + model (torch.nn.Module, optional): Model object for training. Default is None. + dataloader (torch.utils.data.DataLoader, optional): Dataloader object. Default is None. + save_dir (Path, optional): Directory to save results. Default is Path(''). + plots (bool, optional): Plot validation images and metrics. Default is True. + callbacks (utils.callbacks.Callbacks, optional): Callbacks for logging and monitoring. Default is Callbacks(). + compute_loss (function, optional): Loss function for training. Default is None. + + Returns: + dict: Contains performance metrics including precision, recall, mAP50, and mAP50-95. + """ # Initialize/load model and set device training = model is not None if training: # called by train.py @@ -364,7 +468,53 @@ def run( def parse_opt(): - """Parses command-line options for YOLOv5 model inference configuration.""" + """ + Parses command-line options for YOLOv5 model inference configuration. + + Args: + data (str): Path to the dataset YAML file, default is 'data/coco128.yaml'. + weights (List[str]): List of paths to the model weight files, default is 'yolov5s.pt'. + batch_size (int): Batch size for inference, default is 32. + imgsz (int): Inference image size in pixels, default is 640. + conf_thres (float): Confidence threshold for predictions, default is 0.001. + iou_thres (float): IoU threshold for Non-Max Suppression (NMS), default is 0.6. + max_det (int): Maximum number of detections per image, default is 300. + task (str): Task type - options are 'train', 'val', 'test', 'speed', or 'study'. Default is 'val'. + device (str): Device to run the model on, e.g., '0' or '0,1,2,3' or 'cpu'. Default is empty to let + the system choose automatically. + workers (int): Maximum number of dataloader workers per rank in DDP mode, default is 8. + single_cls (bool): If set, treats the dataset as a single-class dataset. Default is False. + augment (bool): If set, performs augmented inference. Default is False. + verbose (bool): If set, reports mAP by class. Default is False. + save_txt (bool): If set, saves results to *.txt files. Default is False. + save_hybrid (bool): If set, saves label+prediction hybrid results to *.txt files. Default is False. + save_conf (bool): If set, saves confidences in --save-txt labels. Default is False. + save_json (bool): If set, saves results to a COCO-JSON file. Default is False. + project (str): Project directory to save results to. Default is 'runs/val'. + name (str): Name of the directory to save results to. Default is 'exp'. + exist_ok (bool): If set, existing directory will not be incremented. Default is False. + half (bool): If set, uses FP16 half-precision inference. Default is False. + dnn (bool): If set, uses OpenCV DNN for ONNX inference. Default is False. + + Returns: + argparse.Namespace: Parsed command-line options + + Notes: + - The '--data' parameter is checked to ensure it ends with 'coco.yaml' if '--save-json' is set. + - The '--save-txt' option is set to True if '--save-hybrid' is enabled. + - Args are printed using `print_args` to facilitate debugging. + + Example: + To validate a trained YOLOv5 model on a COCO dataset: + ```python + $ python val.py --weights yolov5s.pt --data coco128.yaml --img 640 + ``` + Different model formats could be used instead of yolov5s.pt: + ```python + $ python val.py --weights yolov5s.pt yolov5s.torchscript yolov5s.onnx yolov5s_openvino_model yolov5s.engine + ``` + Additional options include saving results in different formats, selecting devices, and more. + """ parser = argparse.ArgumentParser() parser.add_argument("--data", type=str, default=ROOT / "data/coco128.yaml", help="dataset.yaml path") parser.add_argument("--weights", nargs="+", type=str, default=ROOT / "yolov5s.pt", help="model path(s)") @@ -397,8 +547,25 @@ def parse_opt(): def main(opt): - """Executes YOLOv5 tasks like training, validation, testing, speed, and study benchmarks based on provided - options. + """ + Executes YOLOv5 tasks like training, validation, testing, speed, and study benchmarks based on provided options. + + Args: + opt (argparse.Namespace): Parsed command-line options. + - This includes values for parameters like 'data', 'weights', 'batch_size', 'imgsz', 'conf_thres', 'iou_thres', + 'max_det', 'task', 'device', 'workers', 'single_cls', 'augment', 'verbose', 'save_txt', 'save_hybrid', + 'save_conf', 'save_json', 'project', 'name', 'exist_ok', 'half', and 'dnn', essential for configuring + the YOLOv5 tasks. + + Returns: + None + + Examples: + To validate a trained YOLOv5 model on the COCO dataset with a specific weights file, use: + + ```python + $ python val.py --weights yolov5s.pt --data coco128.yaml --img 640 + ``` """ check_requirements(ROOT / "requirements.txt", exclude=("tensorboard", "thop")) From ff06328498373fded9eb326dd99dc36a51cd3c9b Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 8 Jul 2024 22:05:56 +0200 Subject: [PATCH 1916/1976] Ultralytics Code Refactor https://ultralytics.com/actions (#13175) * Refactor code for speed and clarity * Update export.py Signed-off-by: Glenn Jocher * Update export.py Signed-off-by: Glenn Jocher * Add missing Python function docstrings * Auto-format by https://ultralytics.com/actions * Update export.py Signed-off-by: Glenn Jocher * Add missing Python function docstrings * Auto-format by https://ultralytics.com/actions --------- Signed-off-by: Glenn Jocher Co-authored-by: UltralyticsAssistant --- benchmarks.py | 24 +++++- detect.py | 74 +++++++++--------- export.py | 202 +++++++++++++++++++++++++------------------------- hubconf.py | 41 +++++----- train.py | 107 +++++++++++++------------- val.py | 77 ++++++++++--------- 6 files changed, 275 insertions(+), 250 deletions(-) diff --git a/benchmarks.py b/benchmarks.py index 90cce4b3ff3c..ea11ca1f8590 100644 --- a/benchmarks.py +++ b/benchmarks.py @@ -68,7 +68,7 @@ def run( imgsz (int): Inference size in pixels (default: 640). batch_size (int): Batch size for inference (default: 1). data (Path | str): Path to the dataset.yaml file (default: ROOT / "data/coco128.yaml"). - device (str): CUDA device, e.g., '0' or '0,1,2,3' or 'cpu' (default: None). + device (str): CUDA device, e.g., '0' or '0,1,2,3' or 'cpu' (default: ""). half (bool): Use FP16 half-precision inference (default: False). test (bool): Test export formats only (default: False). pt_only (bool): Test PyTorch format only (default: False). @@ -175,6 +175,24 @@ def test( Returns: pd.DataFrame: DataFrame containing the results of the export tests, including format names and export statuses. + + Examples: + ```python + $ python benchmarks.py --weights yolov5s.pt --img 640 + ``` + + Notes: + Supported export formats and models include PyTorch, TorchScript, ONNX, OpenVINO, TensorRT, CoreML, TensorFlow + SavedModel, TensorFlow GraphDef, TensorFlow Lite, and TensorFlow Edge TPU. Edge TPU and TF.js are unsupported. + + Usage: + Install required packages: + $ pip install -r requirements.txt coremltools onnx onnx-simplifier onnxruntime openvino-dev tensorflow-cpu # CPU support + $ pip install -r requirements.txt coremltools onnx onnx-simplifier onnxruntime-gpu openvino-dev tensorflow # GPU support + $ pip install -U nvidia-tensorrt --index-url https://pypi.ngc.nvidia.com # TensorRT + + Run export tests: + $ python benchmarks.py --weights yolov5s.pt --img 640 """ y, t = [], time.time() device = select_device(device) @@ -213,8 +231,8 @@ def parse_opt(): half (bool): Use FP16 half-precision inference. This is a flag and defaults to False. test (bool): Test exports only. This is a flag and defaults to False. pt_only (bool): Test PyTorch only. This is a flag and defaults to False. - hard_fail (bool|str): Throw an error on benchmark failure. Can be a boolean or a string representing a minimum metric - floor, i.e., '0.29'. Defaults to False. + hard_fail (bool | str): Throw an error on benchmark failure. Can be a boolean or a string representing a minimum + metric floor, e.g., '0.29'. Defaults to False. Returns: argparse.Namespace: Parsed command-line arguments encapsulated in an argparse Namespace object. diff --git a/detect.py b/detect.py index b774370f54ed..e35d220c23f4 100644 --- a/detect.py +++ b/detect.py @@ -101,40 +101,40 @@ def run( Runs YOLOv5 detection inference on various sources like images, videos, directories, streams, etc. Args: - weights (str | Path): Path to the model weights file or a Triton URL. Default is 'yolov5s.pt'. - source (str | Path): Input source, which can be a file, directory, URL, glob pattern, screen capture, or webcam index. - Default is 'data/images'. - data (str | Path): Path to the dataset YAML file. Default is 'data/coco128.yaml'. - imgsz (tuple[int, int]): Inference image size as a tuple (height, width). Default is (640, 640). - conf_thres (float): Confidence threshold for detections. Default is 0.25. - iou_thres (float): Intersection Over Union (IOU) threshold for non-max suppression. Default is 0.45. - max_det (int): Maximum number of detections per image. Default is 1000. - device (str): CUDA device identifier (e.g., '0' or '0,1,2,3') or 'cpu'. Default is an empty string, which - uses the best available device. - view_img (bool): If True, display inference results using OpenCV. Default is False. - save_txt (bool): If True, save results in a text file. Default is False. - save_csv (bool): If True, save results in a CSV file. Default is False. - save_conf (bool): If True, include confidence scores in the saved results. Default is False. - save_crop (bool): If True, save cropped prediction boxes. Default is False. - nosave (bool): If True, do not save inference images or videos. Default is False. - classes (list[int]): List of class indices to filter detections by. Default is None. - agnostic_nms (bool): If True, perform class-agnostic non-max suppression. Default is False. - augment (bool): If True, use augmented inference. Default is False. - visualize (bool): If True, visualize feature maps. Default is False. - update (bool): If True, update all models' weights. Default is False. - project (str | Path): Directory to save results. Default is 'runs/detect'. - name (str): Name of the current experiment; used to create a subdirectory within 'project'. Default is 'exp'. - exist_ok (bool): If True, existing directories with the same name are reused instead of being incremented. Default is - False. - line_thickness (int): Thickness of bounding box lines in pixels. Default is 3. - hide_labels (bool): If True, do not display labels on bounding boxes. Default is False. - hide_conf (bool): If True, do not display confidence scores on bounding boxes. Default is False. - half (bool): If True, use FP16 half-precision inference. Default is False. - dnn (bool): If True, use OpenCV DNN backend for ONNX inference. Default is False. - vid_stride (int): Stride for processing video frames, to skip frames between processing. Default is 1. + weights (str | Path): Path to the model weights file or a Triton URL. Default is 'yolov5s.pt'. + source (str | Path): Input source, which can be a file, directory, URL, glob pattern, screen capture, or webcam index. + Default is 'data/images'. + data (str | Path): Path to the dataset YAML file. Default is 'data/coco128.yaml'. + imgsz (tuple[int, int]): Inference image size as a tuple (height, width). Default is (640, 640). + conf_thres (float): Confidence threshold for detections. Default is 0.25. + iou_thres (float): Intersection Over Union (IOU) threshold for non-max suppression. Default is 0.45. + max_det (int): Maximum number of detections per image. Default is 1000. + device (str): CUDA device identifier (e.g., '0' or '0,1,2,3') or 'cpu'. Default is an empty string, which + uses the best available device. + view_img (bool): If True, display inference results using OpenCV. Default is False. + save_txt (bool): If True, save results in a text file. Default is False. + save_csv (bool): If True, save results in a CSV file. Default is False. + save_conf (bool): If True, include confidence scores in the saved results. Default is False. + save_crop (bool): If True, save cropped prediction boxes. Default is False. + nosave (bool): If True, do not save inference images or videos. Default is False. + classes (list[int]): List of class indices to filter detections by. Default is None. + agnostic_nms (bool): If True, perform class-agnostic non-max suppression. Default is False. + augment (bool): If True, use augmented inference. Default is False. + visualize (bool): If True, visualize feature maps. Default is False. + update (bool): If True, update all models' weights. Default is False. + project (str | Path): Directory to save results. Default is 'runs/detect'. + name (str): Name of the current experiment; used to create a subdirectory within 'project'. Default is 'exp'. + exist_ok (bool): If True, existing directories with the same name are reused instead of being incremented. Default is + False. + line_thickness (int): Thickness of bounding box lines in pixels. Default is 3. + hide_labels (bool): If True, do not display labels on bounding boxes. Default is False. + hide_conf (bool): If True, do not display confidence scores on bounding boxes. Default is False. + half (bool): If True, use FP16 half-precision inference. Default is False. + dnn (bool): If True, use OpenCV DNN backend for ONNX inference. Default is False. + vid_stride (int): Stride for processing video frames, to skip frames between processing. Default is 1. Returns: - None + None Examples: ```python @@ -397,15 +397,15 @@ def main(opt): Executes YOLOv5 model inference based on provided command-line arguments, validating dependencies before running. Args: - opt (argparse.Namespace): Command-line arguments for YOLOv5 detection. See function `parse_opt` for details. + opt (argparse.Namespace): Command-line arguments for YOLOv5 detection. See function `parse_opt` for details. Returns: - None + None Note: - This function performs essential pre-execution checks and initiates the YOLOv5 detection process based on user-specified options. - Refer to the usage guide and examples for more information about different sources and formats at: - https://github.com/ultralytics/ultralytics + This function performs essential pre-execution checks and initiates the YOLOv5 detection process based on user-specified options. + Refer to the usage guide and examples for more information about different sources and formats at: + https://github.com/ultralytics/ultralytics Example usage: diff --git a/export.py b/export.py index bf68ae123ab3..3688dfd848a6 100644 --- a/export.py +++ b/export.py @@ -103,10 +103,9 @@ def __init__(self, model, im): None: This method does not return any value. Notes: - This initializer configures normalization based on the input image dimensions, which is critical for - ensuring the model's compatibility and proper functionality on iOS devices. The normalization step - involves dividing by the image width if the image is square; otherwise, additional conditions might - apply (trimmed for brevity). + This initializer configures normalization based on the input image dimensions, which is critical for ensuring the + model's compatibility and proper functionality on iOS devices. The normalization step involves dividing by the image + width if the image is square; otherwise, additional conditions might apply (trimmed for brevity). """ super().__init__() b, c, h, w = im.shape # batch, channel, height, width @@ -124,16 +123,17 @@ def forward(self, x): Runs a forward pass on the input tensor, returning class confidences and normalized coordinates. Args: - x (torch.Tensor): Input tensor containing the image data. + x (torch.Tensor): Input tensor containing the image data. Returns: - torch.Tensor: Concatenated tensor with normalized coordinates (xywh), confidence scores (conf), and class probabilities (cls). + torch.Tensor: Concatenated tensor with normalized coordinates (xywh), confidence scores (conf), + and class probabilities (cls). Examples: - ```python - model = iOSModel(pretrained_model, input_image) - output = model.forward(torch_input_tensor) - ``` + ```python + model = iOSModel(pretrained_model, input_image) + output = model.forward(torch_input_tensor) + ``` """ xywh, conf, cls = self.model(x)[0].squeeze().split((4, 1, self.nc), 1) return cls * conf, xywh * self.normalize # confidence (3780, 80), coordinates (3780, 4) @@ -151,7 +151,7 @@ def export_formats(): Examples: ```python formats = export_formats() - print(formats) + print(f"Supported export formats:\n{formats}") ``` Notes: @@ -188,16 +188,18 @@ def try_export(inner_func): Returns: Callable: The wrapped function that logs execution details. When executed, this wrapper function returns either: - - Tuple (str, torch.nn.Module): On success — the file path of the exported model and the model instance. - - Tuple (None, None): On failure — None values indicating export failed. + - Tuple (str | torch.nn.Module): On success — the file path of the exported model and the model instance. + - Tuple (None, None): On failure — None values indicating export failure. Examples: + ```python @try_export def export_onnx(model, filepath): # implementation here pass exported_file, exported_model = export_onnx(yolo_model, 'path/to/save/model.onnx') + ``` Notes: For additional requirements and model export formats, refer to the [Ultralytics YOLOv5 GitHub repository](https://github.com/ultralytics/ultralytics). @@ -239,7 +241,7 @@ def export_torchscript(model, im, file, optimize, prefix=colorstr("TorchScript:" Notes: - This function uses tracing to create the TorchScript model. - Metadata, including the input image shape, model stride, and class names, is saved in an extra file (`config.txt`) - within the TorchScript model package. + within the TorchScript model package. - For mobile optimization, refer to the PyTorch tutorial: https://pytorch.org/tutorials/recipes/mobile_interpreter.html Example: @@ -359,8 +361,7 @@ def export_onnx(model, im, file, opset, dynamic, simplify, prefix=colorstr("ONNX @try_export def export_openvino(file, metadata, half, int8, data, prefix=colorstr("OpenVINO:")): """ - Exports a YOLOv5 model to OpenVINO format with optional FP16 and INT8 quantization; see - https://pypi.org/project/openvino-dev/. + Exports a YOLOv5 model to OpenVINO format with optional FP16 and INT8 quantization. Args: file (Path): The path to the output file where the OpenVINO model will be saved. @@ -450,7 +451,7 @@ def transform_fn(data_item): @try_export def export_paddle(model, im, file, metadata, prefix=colorstr("PaddlePaddle:")): """ - Exports a YOLOv5 model to PaddlePaddle format using X2Paddle, saving the converted model and metadata. + Export a YOLOv5 model to PaddlePaddle format using X2Paddle, saving the converted model and metadata. Args: model (torch.nn.Module): The YOLOv5 model to be exported. @@ -476,6 +477,7 @@ def export_paddle(model, im, file, metadata, prefix=colorstr("PaddlePaddle:")): export_paddle(model=model, im=im, file=file, metadata=metadata) ``` + Notes: Ensure that `paddlepaddle` and `x2paddle` are installed, as these are required for the export function. You can install them via pip: @@ -556,7 +558,7 @@ def export_engine(model, im, file, half, dynamic, simplify, workspace=4, verbose Args: model (torch.nn.Module): YOLOv5 model to be exported. - im (torch.Tensor): Input tensor of shape (B,C,H,W). + im (torch.Tensor): Input tensor of shape (B, C, H, W). file (Path): Path to save the exported model. half (bool): Set to True to export with FP16 precision. dynamic (bool): Set to True to enable dynamic input shapes. @@ -664,7 +666,8 @@ def export_saved_model( prefix=colorstr("TensorFlow SavedModel:"), ): """ - Exports a YOLOv5 model to TensorFlow SavedModel format, supporting dynamic axes and non-maximum suppression (NMS). + Exports a YOLOv5 model to the TensorFlow SavedModel format, supporting dynamic axes and non-maximum suppression + (NMS). Args: model (torch.nn.Module): The PyTorch model to convert. @@ -681,7 +684,8 @@ def export_saved_model( prefix (str, optional): Prefix for logging messages. Default is "TensorFlow SavedModel:". Returns: - tuple: A tuple containing the path to the saved model folder (str) and the Keras model instance (tf.keras.Model | None). + tuple[str, tf.keras.Model | None]: A tuple containing the path to the saved model folder and the Keras model instance, + or None if TensorFlow export fails. Notes: - The method supports TensorFlow versions up to 2.15.1. @@ -689,9 +693,6 @@ def export_saved_model( - If the TensorFlow version exceeds 2.13.1, it might cause issues when exporting to TFLite. Refer to: https://github.com/ultralytics/yolov5/issues/12489 - Raises: - Exception: If TensorFlow is not installed. - Example: ```python model, im = ... # Initialize your PyTorch model and input tensor @@ -749,7 +750,7 @@ def export_saved_model( @try_export def export_pb(keras_model, file, prefix=colorstr("TensorFlow GraphDef:")): """ - Exports YOLOv5 model to TensorFlow GraphDef (*.pb) format. + Export YOLOv5 model to TensorFlow GraphDef (*.pb) format. Args: keras_model (tf.keras.Model): The Keras model to be converted. @@ -790,44 +791,43 @@ def export_tflite( ): # YOLOv5 TensorFlow Lite export """ - Exports YOLOv5 model to TensorFlow Lite format with optional FP16, INT8, and NMS support. + Exports a YOLOv5 model to TensorFlow Lite format with optional INT8 quantization and NMS support. Args: keras_model (tf.keras.Model): The Keras model to be exported. - im (torch.Tensor): Image tensor for normalization and model tracing. - file (Path): The file path to save the exported TensorFlow Lite model. + im (torch.Tensor): An input image tensor for normalization and model tracing. + file (Path): The file path to save the TensorFlow Lite model. int8 (bool): Enables INT8 quantization if True. - per_tensor (bool): If True, disable per-channel quantization (applicable when int8 is True). - data (str): Path to dataset for representative dataset generation in INT8 quantization. - nms (bool): Enables Non-Maximum Suppression (NMS) support if True. - agnostic_nms (bool): Enables class-agnostic NMS support if True. - prefix (str): Prefix for logging messages. + per_tensor (bool): If True, disables per-channel quantization. + data (str): Path to the dataset for representative dataset generation in INT8 quantization. + nms (bool): Enables Non-Maximum Suppression (NMS) if True. + agnostic_nms (bool): Enables class-agnostic NMS if True. + prefix (str): Prefix for log messages. Returns: - (str | None, tf.lite.Model | None): The file path of the saved TFLite model, and the TFLite model instance if successful. + (str | None, tflite.Model | None): The file path of the exported TFLite model and the TFLite model instance, or None if export failed. Example: ```python from pathlib import Path import torch import tensorflow as tf - from torchvision import models - # Load a pre-trained model from torchvision - model = models.yolov5() # Placeholder for actual YOLOv5 model loading - im = torch.zeros(1, 3, 640, 640) # Example image tensor - - # Provide the Keras model wrapping the PyTorch YOLOv5 model + # Load a Keras model wrapping a YOLOv5 model keras_model = tf.keras.models.load_model('path/to/keras_model.h5') - # Export the model to TensorFlow Lite format - file_path = export_tflite(keras_model, im, Path('model.tflite'), int8=False, per_tensor=False, - data='path/to/dataset.yaml', nms=False, agnostic_nms=False) + # Example input tensor + im = torch.zeros(1, 3, 640, 640) + + # Export the model + export_tflite(keras_model, im, Path('model.tflite'), int8=True, per_tensor=False, data='data/coco.yaml', + nms=True, agnostic_nms=False) ``` Notes: - Ensure the TensorFlow and TensorFlow Lite dependencies are installed. The exported TFLite model can be used for - efficient inference on mobile and edge devices. + - Ensure TensorFlow and TensorFlow Lite dependencies are installed. + - INT8 quantization requires a representative dataset to achieve optimal accuracy. + - TensorFlow Lite models are suitable for efficient inference on mobile and edge devices. """ import tensorflow as tf @@ -935,18 +935,18 @@ def export_tfjs(file, int8, prefix=colorstr("TensorFlow.js:")): prefix (str): Prefix for logging information (default: colorstr("TensorFlow.js:")). Returns: - tuple: Output directory path (str), None + (str, None): The output directory path as a string and None. Notes: This function requires `tensorflowjs` to be installed. You can install it using: - ```shell - pip install tensorflowjs - ``` + ```shell + pip install tensorflowjs + ``` Example usage: - ```python - export_tfjs(Path('yolov5s.onnx'), int8=False) - ``` + ```python + export_tfjs(Path('yolov5s.onnx'), int8=False) + ``` The TensorFlow.js converted model is saved in the directory specified by `file` with "_web_model" suffix. """ @@ -987,12 +987,12 @@ def export_tfjs(file, int8, prefix=colorstr("TensorFlow.js:")): def add_tflite_metadata(file, metadata, num_outputs): """ - Adds TFLite metadata to a model file, supporting multiple outputs, as specified by TensorFlow guidelines. + Adds metadata to a TensorFlow Lite (TFLite) model file, supporting multiple outputs, based on TensorFlow guidelines. Args: - file (str): The path to the TensorFlow Lite model file to which metadata will be added. - metadata (dict): Metadata information to be added to the model, structured as required by TFLite metadata schema. - num_outputs (int): Number of output tensors the model has, to properly configure the metadata. + file (str): Path to the TFLite model file to which metadata will be added. + metadata (dict): Metadata information to be added to the model, structured as required by the TFLite metadata schema. + num_outputs (int): Number of output tensors the model has, used to configure the metadata properly. Returns: None @@ -1011,7 +1011,7 @@ def add_tflite_metadata(file, metadata, num_outputs): Note: TFLite metadata can include information such as model name, version, author, and other relevant details. - For more details and structure of the metadata, refer to the TensorFlow Lite + For more details on the structure of the metadata, refer to TensorFlow Lite [metadata guidelines](https://www.tensorflow.org/lite/models/convert/metadata). """ with contextlib.suppress(ImportError): @@ -1071,24 +1071,24 @@ def pipeline_coreml(model, im, file, names, y, prefix=colorstr("CoreML Pipeline: - Flexible input shapes and additional NMS options can be customized within the function. Examples: - ```python - from pathlib import Path - import torch + ```python + from pathlib import Path + import torch - # Load YOLOv5 model and an example input tensor - model = torch.load("yolov5s.pt") - im = torch.zeros(1, 3, 640, 640) # Example input tensor + # Load YOLOv5 model and an example input tensor + model = torch.load("yolov5s.pt") + im = torch.zeros(1, 3, 640, 640) # Example input tensor - # Define class names - names = {0: "person", 1: "bicycle", 2: "car", ...} + # Define class names + names = {0: "person", 1: "bicycle", 2: "car", ...} - # Perform forward pass to get model output - y = model(im) + # Perform forward pass to get model output + y = model(im) - # Convert to CoreML - output_file = Path("yolov5s.mlmodel") - pipeline_coreml(model, im, output_file, names, y) - ``` + # Convert to CoreML + output_file = Path("yolov5s.mlmodel") + pipeline_coreml(model, im, output_file, names, y) + ``` """ import coremltools as ct from PIL import Image @@ -1246,6 +1246,8 @@ def run( conf_thres=0.25, # TF.js NMS: confidence threshold ): """ + Clear and concise summary line describing the function's purpose: + Exports a YOLOv5 model to specified formats including ONNX, TensorRT, CoreML, and TensorFlow. Args: @@ -1474,42 +1476,42 @@ def parse_opt(known=False): def main(opt): """ - ```python Exports the YOLOv5 model to specified formats, including ONNX, TensorRT, CoreML, and TensorFlow. + Exports the YOLOv5 model to specified formats, including ONNX, TensorRT, CoreML, and TensorFlow. Args: opt (argparse.Namespace): Parsed command-line arguments containing the export configurations. - - data (str): Path to the dataset.yaml. - - weights (list[str]): Paths to model (.pt) file(s). - - imgsz (list[int]): Image size (height, width). - - batch_size (int): Batch size. - - device (str): CUDA device, e.g., '0' or '0,1,2,3' or 'cpu'. - - half (bool): FP16 half-precision export flag. - - inplace (bool): Set YOLOv5 Detect() inplace to True. - - keras (bool): Use Keras for TensorFlow models. - - optimize (bool): Optimize TorchScript model for mobile. - - int8 (bool): INT8 quantization flag. - - per_tensor (bool): Per tensor quantization for TensorFlow. - - dynamic (bool): Dynamic axes for ONNX/TF/TensorRT. - - simplify (bool): Simplify ONNX model. - - opset (int): ONNX opset version. - - verbose (bool): Verbose logging for TensorRT. - - workspace (int): Workspace size for TensorRT (in GB). - - nms (bool): Add NMS to TensorFlow model. - - agnostic_nms (bool): Add agnostic NMS to TensorFlow model. - - topk_per_class (int): Top-k per class for TensorFlow.js NMS. - - topk_all (int): Top-k for all classes for TensorFlow.js NMS. - - iou_thres (float): IoU threshold for TensorFlow.js NMS. - - conf_thres (float): Confidence threshold for TensorFlow.js NMS. - - include (list[str]): List of formats to include in export, e.g., ['torchscript', 'onnx']. + - data (str): Path to the dataset YAML configuration file (e.g., 'data/coco128.yaml'). + - weights (list[str] | str): Paths to the pretrained model weights file(s) (e.g., 'yolov5s.pt'). + - imgsz (list[int]): Image size as a list [height, width]. + - batch_size (int): Batch size for exporting the model. + - device (str): Device to run the export on, such as '0' for GPU, or 'cpu' for CPU. + - half (bool): Flag to export the model with FP16 half-precision. + - inplace (bool): Set the YOLOv5 Detect() module inplace mode to True. + - keras (bool): Flag to use Keras for TensorFlow SavedModel export. + - optimize (bool): Optimize TorchScript model for mobile deployment. + - int8 (bool): Apply INT8 quantization for CoreML or TensorFlow models. + - per_tensor (bool): Apply per-tensor quantization for TensorFlow models. + - dynamic (bool): Enable dynamic axes for ONNX, TensorFlow, or TensorRT exports. + - simplify (bool): Simplify ONNX model during export. + - opset (int): ONNX opset version. + - verbose (bool): Enable verbose logging for TensorRT export. + - workspace (int): TensorRT workspace size in GB. + - nms (bool): Add non-maximum suppression (NMS) to the TensorFlow model. + - agnostic_nms (bool): Add class-agnostic NMS to the TensorFlow model. + - topk_per_class (int): Top-K boxes per class to keep for TensorFlow.js NMS. + - topk_all (int): Top-K boxes for all classes to keep for TensorFlow.js NMS. + - iou_thres (float): IoU threshold for NMS. + - conf_thres (float): Confidence threshold for NMS. + - include (list[str]): List of formats to include in export (e.g., ['torchscript', 'onnx']). Returns: - list[str]: List of exported file paths. - ```python + list[str]: List of paths to the exported model files. - # Example usage: - # opt = parse_opt() - # main(opt) - ``` + Example: + ```python + opt = parse_opt() + main(opt) + ``` """ for opt.weights in opt.weights if isinstance(opt.weights, list) else [opt.weights]: run(**vars(opt)) diff --git a/hubconf.py b/hubconf.py index de93c79bfae5..5815d9e67b84 100644 --- a/hubconf.py +++ b/hubconf.py @@ -188,8 +188,7 @@ def yolov5s(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=Tr auto-select the best available device. Defaults to None. Returns: - YOLOv5 model (torch.nn.Module): The YOLOv5-small model loaded with specified configurations and optionally - pretrained weights. + torch.nn.Module: The YOLOv5-small model loaded with specified configurations and optionally pretrained weights. Usage: ```python @@ -245,10 +244,12 @@ def yolov5l(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=Tr classes (int): Number of model classes. Default is 80. autoshape (bool): Apply YOLOv5 .autoshape() wrapper to model. Default is True. _verbose (bool): Print all information to screen. Default is True. - device (str | torch.device | None): Device to use for model parameters, e.g., 'cpu', 'cuda', or a torch.device instance. Default is None. + device (str | torch.device | None): Device to use for model parameters, e.g., 'cpu', 'cuda', or a torch.device instance. + Default is None. Returns: - YOLOv5 model (torch.nn.Module). + YOLOv5 model (torch.nn.Module): The YOLOv5-large model instantiated with specified configurations and possibly + pretrained weights. Example: ```python @@ -368,9 +369,11 @@ def yolov5m6(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=T pretrained (bool): If True, loads pretrained weights. Default is True. channels (int): Number of input channels. Default is 3. classes (int): Number of model classes. Default is 80. - autoshape (bool): Apply YOLOv5 .autoshape() wrapper to the model for file/URI/PIL/cv2/np inputs and NMS. Default is True. + autoshape (bool): Apply YOLOv5 .autoshape() wrapper to the model for file/URI/PIL/cv2/np inputs and NMS. + Default is True. _verbose (bool): If True, prints detailed information to the screen. Default is True. - device (str | torch.device | None): Device to use for model parameters. Default is None, which uses the best available device. + device (str | torch.device | None): Device to use for model parameters. Default is None, which uses the + best available device. Returns: torch.nn.Module: The YOLOv5-medium-P6 model. @@ -378,12 +381,12 @@ def yolov5m6(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=T Refer to the PyTorch Hub models documentation: https://pytorch.org/hub/ultralytics_yolov5 for additional details. Example: - ```python - import torch + ```python + import torch - # Load YOLOv5-medium-P6 model - model = torch.hub.load('ultralytics/yolov5', 'yolov5m6') - ``` + # Load YOLOv5-medium-P6 model + model = torch.hub.load('ultralytics/yolov5', 'yolov5m6') + ``` Notes: - The model can be loaded with pre-trained weights for better performance on specific tasks. @@ -401,12 +404,10 @@ def yolov5l6(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=T pretrained (bool, optional): If True, load pretrained weights into the model. Default is True. channels (int, optional): Number of input channels. Default is 3. classes (int, optional): Number of model classes. Default is 80. - autoshape (bool, optional): If True, apply YOLOv5 .autoshape() wrapper to the model for input flexibility. - Default is True. + autoshape (bool, optional): If True, apply YOLOv5 .autoshape() wrapper to the model for input flexibility. Default is True. _verbose (bool, optional): If True, print all information to the screen. Default is True. - device (str | torch.device | None, optional): Device to use for model parameters, e.g., 'cpu', 'cuda', or - torch.device. If None, automatically selects the best available - device. Default is None. + device (str | torch.device | None, optional): Device to use for model parameters, e.g., 'cpu', 'cuda', or torch.device. + If None, automatically selects the best available device. Default is None. Returns: torch.nn.Module: The instantiated YOLOv5-large-P6 model. @@ -444,10 +445,10 @@ def yolov5x6(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=T torch.nn.Module: The instantiated YOLOv5-xlarge-P6 model. Example: - ```python - import torch - model = torch.hub.load('ultralytics/yolov5', 'yolov5x6') # load the YOLOv5-xlarge-P6 model - ``` + ```python + import torch + model = torch.hub.load('ultralytics/yolov5', 'yolov5x6') # load the YOLOv5-xlarge-P6 model + ``` Note: For more information on YOLOv5 models, visit the official documentation: diff --git a/train.py b/train.py index 65e6806b22e0..9e3382787695 100644 --- a/train.py +++ b/train.py @@ -546,10 +546,10 @@ def parse_opt(known=False): Parses command-line arguments for YOLOv5 training, validation, and testing. Args: - known (bool, optional): If True, parses known arguments, ignoring the unknown. Defaults to False. + known (bool, optional): If True, parses known arguments, ignoring the unknown. Defaults to False. Returns: - argparse.Namespace: Parsed command-line arguments. + argparse.Namespace: Parsed command-line arguments. Example: ```python @@ -559,9 +559,9 @@ def parse_opt(known=False): ``` Links: - Models: https://github.com/ultralytics/yolov5/tree/master/models - Datasets: https://github.com/ultralytics/yolov5/tree/master/data - Tutorial: https://docs.ultralytics.com/yolov5/tutorials/train_custom_data + Models: https://github.com/ultralytics/yolov5/tree/master/models + Datasets: https://github.com/ultralytics/yolov5/tree/master/data + Tutorial: https://docs.ultralytics.com/yolov5/tutorials/train_custom_data """ parser = argparse.ArgumentParser() parser.add_argument("--weights", type=str, default=ROOT / "yolov5s.pt", help="initial weights path") @@ -621,15 +621,15 @@ def main(opt, callbacks=Callbacks()): Runs training or hyperparameter evolution with specified options and optional callbacks. Args: - opt (argparse.Namespace): The command-line arguments parsed for YOLOv5 training and evolution. - callbacks (ultralytics.utils.callbacks.Callbacks, optional): Callback functions for various training stages. - Defaults to Callbacks(). + opt (argparse.Namespace): The command-line arguments parsed for YOLOv5 training and evolution. + callbacks (ultralytics.utils.callbacks.Callbacks, optional): Callback functions for various training stages. + Defaults to Callbacks(). Returns: - None + None Note: - For detailed usage, visit: + For detailed usage, visit: https://github.com/ultralytics/yolov5/tree/master/models """ if RANK in {-1, 0}: @@ -918,51 +918,56 @@ def run(**kwargs): Executes YOLOv5 training with given options, allowing optional overrides through keyword arguments. Args: - weights (str): Path to initial weights. Defaults to ROOT / 'yolov5s.pt'. - cfg (str): Path to model YAML configuration. Defaults to an empty string. - data (str): Path to dataset YAML configuration. Defaults to ROOT / 'data/coco128.yaml'. - hyp (str): Path to hyperparameters YAML configuration. Defaults to ROOT / 'data/hyps/hyp.scratch-low.yaml'. - epochs (int): Total number of training epochs. Defaults to 100. - batch_size (int): Total batch size for all GPUs. Use -1 for automatic batch size determination. Defaults to 16. - imgsz (int): Image size (pixels) for training and validation. Defaults to 640. - rect (bool): Use rectangular training. Defaults to False. - resume (bool | str): Resume most recent training with an optional path. Defaults to False. - nosave (bool): Only save final checkpoint. Defaults to False. - noval (bool): Only validate at the final epoch. Defaults to False. - noautoanchor (bool): Disable AutoAnchor. Defaults to False. - noplots (bool): Do not save plot files. Defaults to False. - evolve (int): Evolve hyperparameters for a specified number of generations. Use 300 if provided without a value. - evolve_population (str): Directory for loading population during evolution. Defaults to ROOT / 'data/hyps'. - resume_evolve (str): Resume hyperparameter evolution from the last generation. Defaults to None. - bucket (str): gsutil bucket for saving checkpoints. Defaults to an empty string. - cache (str): Cache image data in 'ram' or 'disk'. Defaults to None. - image_weights (bool): Use weighted image selection for training. Defaults to False. - device (str): CUDA device identifier, e.g., '0', '0,1,2,3', or 'cpu'. Defaults to an empty string. - multi_scale (bool): Use multi-scale training, varying image size by ±50%. Defaults to False. - single_cls (bool): Train with multi-class data as single-class. Defaults to False. - optimizer (str): Optimizer type, choices are ['SGD', 'Adam', 'AdamW']. Defaults to 'SGD'. - sync_bn (bool): Use synchronized BatchNorm, only available in DDP mode. Defaults to False. - workers (int): Maximum dataloader workers per rank in DDP mode. Defaults to 8. - project (str): Directory for saving training runs. Defaults to ROOT / 'runs/train'. - name (str): Name for saving the training run. Defaults to 'exp'. - exist_ok (bool): Allow existing project/name without incrementing. Defaults to False. - quad (bool): Use quad dataloader. Defaults to False. - cos_lr (bool): Use cosine learning rate scheduler. Defaults to False. - label_smoothing (float): Label smoothing epsilon value. Defaults to 0.0. - patience (int): Patience for early stopping, measured in epochs without improvement. Defaults to 100. - freeze (list): Layers to freeze, e.g., backbone=10, first 3 layers = [0, 1, 2]. Defaults to [0]. - save_period (int): Frequency in epochs to save checkpoints. Disabled if < 1. Defaults to -1. - seed (int): Global training random seed. Defaults to 0. - local_rank (int): Automatic DDP Multi-GPU argument. Do not modify. Defaults to -1. + weights (str, optional): Path to initial weights. Defaults to ROOT / 'yolov5s.pt'. + cfg (str, optional): Path to model YAML configuration. Defaults to an empty string. + data (str, optional): Path to dataset YAML configuration. Defaults to ROOT / 'data/coco128.yaml'. + hyp (str, optional): Path to hyperparameters YAML configuration. Defaults to ROOT / 'data/hyps/hyp.scratch-low.yaml'. + epochs (int, optional): Total number of training epochs. Defaults to 100. + batch_size (int, optional): Total batch size for all GPUs. Use -1 for automatic batch size determination. Defaults to 16. + imgsz (int, optional): Image size (pixels) for training and validation. Defaults to 640. + rect (bool, optional): Use rectangular training. Defaults to False. + resume (bool | str, optional): Resume most recent training with an optional path. Defaults to False. + nosave (bool, optional): Only save the final checkpoint. Defaults to False. + noval (bool, optional): Only validate at the final epoch. Defaults to False. + noautoanchor (bool, optional): Disable AutoAnchor. Defaults to False. + noplots (bool, optional): Do not save plot files. Defaults to False. + evolve (int, optional): Evolve hyperparameters for a specified number of generations. Use 300 if provided without a value. + evolve_population (str, optional): Directory for loading population during evolution. Defaults to ROOT / 'data/hyps'. + resume_evolve (str, optional): Resume hyperparameter evolution from the last generation. Defaults to None. + bucket (str, optional): gsutil bucket for saving checkpoints. Defaults to an empty string. + cache (str, optional): Cache image data in 'ram' or 'disk'. Defaults to None. + image_weights (bool, optional): Use weighted image selection for training. Defaults to False. + device (str, optional): CUDA device identifier, e.g., '0', '0,1,2,3', or 'cpu'. Defaults to an empty string. + multi_scale (bool, optional): Use multi-scale training, varying image size by ±50%. Defaults to False. + single_cls (bool, optional): Train with multi-class data as single-class. Defaults to False. + optimizer (str, optional): Optimizer type, choices are ['SGD', 'Adam', 'AdamW']. Defaults to 'SGD'. + sync_bn (bool, optional): Use synchronized BatchNorm, only available in DDP mode. Defaults to False. + workers (int, optional): Maximum dataloader workers per rank in DDP mode. Defaults to 8. + project (str, optional): Directory for saving training runs. Defaults to ROOT / 'runs/train'. + name (str, optional): Name for saving the training run. Defaults to 'exp'. + exist_ok (bool, optional): Allow existing project/name without incrementing. Defaults to False. + quad (bool, optional): Use quad dataloader. Defaults to False. + cos_lr (bool, optional): Use cosine learning rate scheduler. Defaults to False. + label_smoothing (float, optional): Label smoothing epsilon value. Defaults to 0.0. + patience (int, optional): Patience for early stopping, measured in epochs without improvement. Defaults to 100. + freeze (list, optional): Layers to freeze, e.g., backbone=10, first 3 layers = [0, 1, 2]. Defaults to [0]. + save_period (int, optional): Frequency in epochs to save checkpoints. Disabled if < 1. Defaults to -1. + seed (int, optional): Global training random seed. Defaults to 0. + local_rank (int, optional): Automatic DDP Multi-GPU argument. Do not modify. Defaults to -1. Returns: - None: The function initiates YOLOv5 training or hyperparameter evolution based on the provided options. + None: The function initiates YOLOv5 training or hyperparameter evolution based on the provided options. Examples: - ```python - import train - train.run(data='coco128.yaml', imgsz=320, weights='yolov5m.pt') - ``` + ```python + import train + train.run(data='coco128.yaml', imgsz=320, weights='yolov5m.pt') + ``` + + Notes: + - Models: https://github.com/ultralytics/yolov5/tree/master/models + - Datasets: https://github.com/ultralytics/yolov5/tree/master/data + - Tutorial: https://docs.ultralytics.com/yolov5/tutorials/train_custom_data """ opt = parse_opt(True) for k, v in kwargs.items(): diff --git a/val.py b/val.py index deb12624c32f..218e0748dd5d 100644 --- a/val.py +++ b/val.py @@ -66,8 +66,8 @@ def save_one_txt(predn, save_conf, shape, file): Saves one detection result to a txt file in normalized xywh format, optionally including confidence. Args: - predn (torch.Tensor): Predicted bounding boxes and associated confidence scores and classes - in xyxy format, tensor of shape (N, 6) where N is the number of detections. + predn (torch.Tensor): Predicted bounding boxes and associated confidence scores and classes in xyxy format, + tensor of shape (N, 6) where N is the number of detections. save_conf (bool): If True, saves the confidence scores along with the bounding box coordinates. shape (tuple): Shape of the original image as (height, width). file (str | Path): File path where the result will be saved. @@ -77,8 +77,8 @@ def save_one_txt(predn, save_conf, shape, file): Notes: The xyxy bounding box format represents the coordinates (xmin, ymin, xmax, ymax). - The xywh format represents the coordinates (center_x, center_y, width, height) and is - normalized by the width and height of the image. + The xywh format represents the coordinates (center_x, center_y, width, height) and is normalized by the width and + height of the image. Example: ```python @@ -99,33 +99,33 @@ def save_one_json(predn, jdict, path, class_map): Saves a single JSON detection result, including image ID, category ID, bounding box, and confidence score. Args: - predn (torch.Tensor): Predicted detections in xyxy format with shape (n, 6) where n is the number of detections. - The tensor should contain [x_min, y_min, x_max, y_max, confidence, class_id] for each detection. - jdict (list[dict]): List to collect JSON formatted detection results. - path (pathlib.Path): Path object of the image file, used to extract image_id. - class_map (dict[int, int]): Mapping from model class indices to dataset-specific category IDs. + predn (torch.Tensor): Predicted detections in xyxy format with shape (n, 6) where n is the number of detections. + The tensor should contain [x_min, y_min, x_max, y_max, confidence, class_id] for each detection. + jdict (list[dict]): List to collect JSON formatted detection results. + path (pathlib.Path): Path object of the image file, used to extract image_id. + class_map (dict[int, int]): Mapping from model class indices to dataset-specific category IDs. Returns: - None: Appends detection results as dictionaries to `jdict` list in-place. + None: Appends detection results as dictionaries to `jdict` list in-place. Example: - ```python - predn = torch.tensor([[100, 50, 200, 150, 0.9, 0], [50, 30, 100, 80, 0.8, 1]]) - jdict = [] - path = Path("42.jpg") - class_map = {0: 18, 1: 19} - save_one_json(predn, jdict, path, class_map) - ``` - This will append to `jdict`: - ``` - [ - {'image_id': 42, 'category_id': 18, 'bbox': [125.0, 75.0, 100.0, 100.0], 'score': 0.9}, - {'image_id': 42, 'category_id': 19, 'bbox': [75.0, 55.0, 50.0, 50.0], 'score': 0.8} - ] - ``` + ```python + predn = torch.tensor([[100, 50, 200, 150, 0.9, 0], [50, 30, 100, 80, 0.8, 1]]) + jdict = [] + path = Path("42.jpg") + class_map = {0: 18, 1: 19} + save_one_json(predn, jdict, path, class_map) + ``` + This will append to `jdict`: + ``` + [ + {'image_id': 42, 'category_id': 18, 'bbox': [125.0, 75.0, 100.0, 100.0], 'score': 0.9}, + {'image_id': 42, 'category_id': 19, 'bbox': [75.0, 55.0, 50.0, 50.0], 'score': 0.8} + ] + ``` Notes: - The `bbox` values are formatted as [x, y, width, height], where x and y represent the top-left corner of the box. + The `bbox` values are formatted as [x, y, width, height], where x and y represent the top-left corner of the box. """ image_id = int(path.stem) if path.stem.isnumeric() else path.stem box = xyxy2xywh(predn[:, :4]) # xywh @@ -146,15 +146,15 @@ def process_batch(detections, labels, iouv): Return a correct prediction matrix given detections and labels at various IoU thresholds. Args: - detections (np.ndarray): Array of shape (N, 6) where each row corresponds to a detection with - format [x1, y1, x2, y2, conf, class]. - labels (np.ndarray): Array of shape (M, 5) where each row corresponds to a ground truth label with - format [class, x1, y1, x2, y2]. + detections (np.ndarray): Array of shape (N, 6) where each row corresponds to a detection with format + [x1, y1, x2, y2, conf, class]. + labels (np.ndarray): Array of shape (M, 5) where each row corresponds to a ground truth label with format + [class, x1, y1, x2, y2]. iouv (np.ndarray): Array of IoU thresholds to evaluate at. Returns: - correct (np.ndarray): A binary array of shape (N, len(iouv)) indicating whether each detection - is a true positive for each IoU threshold. There are 10 IoU levels used in the evaluation. + correct (np.ndarray): A binary array of shape (N, len(iouv)) indicating whether each detection is a true positive + for each IoU threshold. There are 10 IoU levels used in the evaluation. Example: ```python @@ -220,7 +220,7 @@ def run( Args: data (str | dict): Path to a dataset yaml file or a dataset dictionary. - weights (str | list[str], optional): Path to the model weights file(s). Supports various formats: PyTorch, + weights (str | list[str], optional): Path to the model weights file(s). Supports various formats including PyTorch, TorchScript, ONNX, OpenVINO, TensorRT, CoreML, TensorFlow SavedModel, TensorFlow GraphDef, TensorFlow Lite, TensorFlow Edge TPU, and PaddlePaddle. batch_size (int, optional): Batch size for inference. Default is 32. @@ -473,15 +473,14 @@ def parse_opt(): Args: data (str): Path to the dataset YAML file, default is 'data/coco128.yaml'. - weights (List[str]): List of paths to the model weight files, default is 'yolov5s.pt'. + weights (list[str]): List of paths to the model weight files, default is 'yolov5s.pt'. batch_size (int): Batch size for inference, default is 32. imgsz (int): Inference image size in pixels, default is 640. conf_thres (float): Confidence threshold for predictions, default is 0.001. iou_thres (float): IoU threshold for Non-Max Suppression (NMS), default is 0.6. max_det (int): Maximum number of detections per image, default is 300. task (str): Task type - options are 'train', 'val', 'test', 'speed', or 'study'. Default is 'val'. - device (str): Device to run the model on, e.g., '0' or '0,1,2,3' or 'cpu'. Default is empty to let - the system choose automatically. + device (str): Device to run the model on, e.g., '0' or '0,1,2,3' or 'cpu'. Default is empty to let the system choose automatically. workers (int): Maximum number of dataloader workers per rank in DDP mode, default is 8. single_cls (bool): If set, treats the dataset as a single-class dataset. Default is False. augment (bool): If set, performs augmented inference. Default is False. @@ -552,10 +551,10 @@ def main(opt): Args: opt (argparse.Namespace): Parsed command-line options. - - This includes values for parameters like 'data', 'weights', 'batch_size', 'imgsz', 'conf_thres', 'iou_thres', - 'max_det', 'task', 'device', 'workers', 'single_cls', 'augment', 'verbose', 'save_txt', 'save_hybrid', - 'save_conf', 'save_json', 'project', 'name', 'exist_ok', 'half', and 'dnn', essential for configuring - the YOLOv5 tasks. + This includes values for parameters like 'data', 'weights', 'batch_size', 'imgsz', 'conf_thres', 'iou_thres', + 'max_det', 'task', 'device', 'workers', 'single_cls', 'augment', 'verbose', 'save_txt', 'save_hybrid', + 'save_conf', 'save_json', 'project', 'name', 'exist_ok', 'half', and 'dnn', essential for configuring + the YOLOv5 tasks. Returns: None From 150a1a318d2a09ad8269d91c594f4a2fd208a842 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 10 Jul 2024 20:47:16 +0200 Subject: [PATCH 1917/1976] Ultralytics Asset URL Update (#13181) Refactor code for speed and clarity --- README.md | 12 ++++++------ README.zh-CN.md | 12 ++++++------ classify/train.py | 2 +- classify/tutorial.ipynb | 2 +- data/GlobalWheat2020.yaml | 2 +- data/VOC.yaml | 2 +- data/VisDrone.yaml | 8 ++++---- data/coco.yaml | 2 +- data/coco128-seg.yaml | 2 +- data/coco128.yaml | 2 +- segment/tutorial.ipynb | 4 ++-- tutorial.ipynb | 4 ++-- utils/general.py | 4 ++-- 13 files changed, 29 insertions(+), 29 deletions(-) diff --git a/README.md b/README.md index 0440a920775d..394aeacdbbaf 100644 --- a/README.md +++ b/README.md @@ -414,22 +414,22 @@ Get started in seconds with our verified environments. Click each icon below for ##
Contribute
diff --git a/README.zh-CN.md b/README.zh-CN.md index cea3c26f28d1..a24d7a2d44e3 100644 --- a/README.zh-CN.md +++ b/README.zh-CN.md @@ -413,22 +413,22 @@ python export.py --weights yolov5s-cls.pt resnet50.pt efficientnet_b0.pt --inclu ##
贡献
diff --git a/classify/train.py b/classify/train.py index 8ff9d1582d2a..9c12a66c326f 100644 --- a/classify/train.py +++ b/classify/train.py @@ -109,7 +109,7 @@ def train(opt, device): if str(data) == "imagenet": subprocess.run(["bash", str(ROOT / "data/scripts/get_imagenet.sh")], shell=True, check=True) else: - url = f"https://github.com/ultralytics/yolov5/releases/download/v1.0/{data}.zip" + url = f"https://github.com/ultralytics/assets/releases/download/v0.0.0/{data}.zip" download(url, dir=data_dir.parent) s = f"Dataset download success ✅ ({time.time() - t:.1f}s), saved to {colorstr('bold', data_dir)}\n" LOGGER.info(s) diff --git a/classify/tutorial.ipynb b/classify/tutorial.ipynb index f85715ca844e..77c2b94a1b18 100644 --- a/classify/tutorial.ipynb +++ b/classify/tutorial.ipynb @@ -1290,7 +1290,7 @@ "\u001b[34m\u001b[1mTensorBoard: \u001b[0mStart with 'tensorboard --logdir runs/train-cls', view at http://localhost:6006/\n", "\n", "Dataset not found ⚠️, missing path /content/datasets/imagenette160, attempting download...\n", - "Downloading https://github.com/ultralytics/yolov5/releases/download/v1.0/imagenette160.zip to /content/datasets/imagenette160.zip...\n", + "Downloading https://github.com/ultralytics/assets/releases/download/v0.0.0/imagenette160.zip to /content/datasets/imagenette160.zip...\n", "100% 103M/103M [00:00<00:00, 347MB/s] \n", "Unzipping /content/datasets/imagenette160.zip...\n", "Dataset download success ✅ (3.3s), saved to \u001b[1m/content/datasets/imagenette160\u001b[0m\n", diff --git a/data/GlobalWheat2020.yaml b/data/GlobalWheat2020.yaml index 2039ccd3cce8..acb88290f7e3 100644 --- a/data/GlobalWheat2020.yaml +++ b/data/GlobalWheat2020.yaml @@ -36,7 +36,7 @@ download: | # Download dir = Path(yaml['path']) # dataset root dir urls = ['https://zenodo.org/record/4298502/files/global-wheat-codalab-official.zip', - 'https://github.com/ultralytics/yolov5/releases/download/v1.0/GlobalWheat2020_labels.zip'] + 'https://github.com/ultralytics/assets/releases/download/v0.0.0/GlobalWheat2020_labels.zip'] download(urls, dir=dir) # Make Directories diff --git a/data/VOC.yaml b/data/VOC.yaml index 6929f015dcbf..227d91d76e10 100644 --- a/data/VOC.yaml +++ b/data/VOC.yaml @@ -75,7 +75,7 @@ download: | # Download dir = Path(yaml['path']) # dataset root dir - url = 'https://github.com/ultralytics/yolov5/releases/download/v1.0/' + url = 'https://github.com/ultralytics/assets/releases/download/v0.0.0/' urls = [f'{url}VOCtrainval_06-Nov-2007.zip', # 446MB, 5012 images f'{url}VOCtest_06-Nov-2007.zip', # 438MB, 4953 images f'{url}VOCtrainval_11-May-2012.zip'] # 1.95GB, 17126 images diff --git a/data/VisDrone.yaml b/data/VisDrone.yaml index 1dc4e3e0d054..20ff1d39cdd8 100644 --- a/data/VisDrone.yaml +++ b/data/VisDrone.yaml @@ -57,10 +57,10 @@ download: | # Download dir = Path(yaml['path']) # dataset root dir - urls = ['https://github.com/ultralytics/yolov5/releases/download/v1.0/VisDrone2019-DET-train.zip', - 'https://github.com/ultralytics/yolov5/releases/download/v1.0/VisDrone2019-DET-val.zip', - 'https://github.com/ultralytics/yolov5/releases/download/v1.0/VisDrone2019-DET-test-dev.zip', - 'https://github.com/ultralytics/yolov5/releases/download/v1.0/VisDrone2019-DET-test-challenge.zip'] + urls = ['https://github.com/ultralytics/assets/releases/download/v0.0.0/VisDrone2019-DET-train.zip', + 'https://github.com/ultralytics/assets/releases/download/v0.0.0/VisDrone2019-DET-val.zip', + 'https://github.com/ultralytics/assets/releases/download/v0.0.0/VisDrone2019-DET-test-dev.zip', + 'https://github.com/ultralytics/assets/releases/download/v0.0.0/VisDrone2019-DET-test-challenge.zip'] download(urls, dir=dir, curl=True, threads=4) # Convert diff --git a/data/coco.yaml b/data/coco.yaml index 4bfd52a9d536..816efa5cf180 100644 --- a/data/coco.yaml +++ b/data/coco.yaml @@ -103,7 +103,7 @@ download: | # Download labels segments = False # segment or box labels dir = Path(yaml['path']) # dataset root dir - url = 'https://github.com/ultralytics/yolov5/releases/download/v1.0/' + url = 'https://github.com/ultralytics/assets/releases/download/v0.0.0/' urls = [url + ('coco2017labels-segments.zip' if segments else 'coco2017labels.zip')] # labels download(urls, dir=dir.parent) diff --git a/data/coco128-seg.yaml b/data/coco128-seg.yaml index a96ee8ff6696..aea711c98396 100644 --- a/data/coco128-seg.yaml +++ b/data/coco128-seg.yaml @@ -96,4 +96,4 @@ names: 79: toothbrush # Download script/URL (optional) -download: https://ultralytics.com/assets/coco128-seg.zip +download: https://github.com/ultralytics/assets/releases/download/v0.0.0/coco128-seg.zip diff --git a/data/coco128.yaml b/data/coco128.yaml index 074903dd0ddf..2ed35c06ea7e 100644 --- a/data/coco128.yaml +++ b/data/coco128.yaml @@ -96,4 +96,4 @@ names: 79: toothbrush # Download script/URL (optional) -download: https://ultralytics.com/assets/coco128.zip +download: https://github.com/ultralytics/assets/releases/download/v0.0.0/coco128.zip diff --git a/segment/tutorial.ipynb b/segment/tutorial.ipynb index 0e6091869b7c..c96876801adb 100644 --- a/segment/tutorial.ipynb +++ b/segment/tutorial.ipynb @@ -162,7 +162,7 @@ "output_type": "stream", "name": "stdout", "text": [ - "Downloading https://github.com/ultralytics/yolov5/releases/download/v1.0/coco2017labels-segments.zip ...\n", + "Downloading https://github.com/ultralytics/assets/releases/download/v0.0.0/coco2017labels-segments.zip ...\n", "Downloading http://images.cocodataset.org/zips/val2017.zip ...\n", "######################################################################## 100.0%\n", "######################################################################## 100.0%\n" @@ -286,7 +286,7 @@ "\u001b[34m\u001b[1mTensorBoard: \u001b[0mStart with 'tensorboard --logdir runs/train-seg', view at http://localhost:6006/\n", "\n", "Dataset not found ⚠️, missing paths ['/content/datasets/coco128-seg/images/train2017']\n", - "Downloading https://ultralytics.com/assets/coco128-seg.zip to coco128-seg.zip...\n", + "Downloading https://github.com/ultralytics/assets/releases/download/v0.0.0/coco128-seg.zip to coco128-seg.zip...\n", "100% 6.79M/6.79M [00:01<00:00, 6.73MB/s]\n", "Dataset download success ✅ (1.9s), saved to \u001b[1m/content/datasets\u001b[0m\n", "\n", diff --git a/tutorial.ipynb b/tutorial.ipynb index d7953a06599f..cb47d27e7e69 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -170,7 +170,7 @@ }, "source": [ "# Download COCO val\n", - "torch.hub.download_url_to_file('https://ultralytics.com/assets/coco2017val.zip', 'tmp.zip') # download (780M - 5000 images)\n", + "torch.hub.download_url_to_file('https://github.com/ultralytics/assets/releases/download/v0.0.0/coco2017val.zip', 'tmp.zip') # download (780M - 5000 images)\n", "!unzip -q tmp.zip -d ../datasets && rm tmp.zip # unzip" ], "execution_count": null, @@ -326,7 +326,7 @@ "\u001b[34m\u001b[1mTensorBoard: \u001b[0mStart with 'tensorboard --logdir runs/train', view at http://localhost:6006/\n", "\n", "Dataset not found ⚠️, missing paths ['/content/datasets/coco128/images/train2017']\n", - "Downloading https://ultralytics.com/assets/coco128.zip to coco128.zip...\n", + "Downloading https://github.com/ultralytics/assets/releases/download/v0.0.0/coco128.zip to coco128.zip...\n", "100% 6.66M/6.66M [00:00<00:00, 75.6MB/s]\n", "Dataset download success ✅ (0.6s), saved to \u001b[1m/content/datasets\u001b[0m\n", "\n", diff --git a/utils/general.py b/utils/general.py index 95a76644776f..e311504b3031 100644 --- a/utils/general.py +++ b/utils/general.py @@ -59,7 +59,7 @@ AUTOINSTALL = str(os.getenv("YOLOv5_AUTOINSTALL", True)).lower() == "true" # global auto-install mode VERBOSE = str(os.getenv("YOLOv5_VERBOSE", True)).lower() == "true" # global verbose mode TQDM_BAR_FORMAT = "{l_bar}{bar:10}{r_bar}" # tqdm bar format -FONT = "Arial.ttf" # https://ultralytics.com/assets/Arial.ttf +FONT = "Arial.ttf" # https://github.com/ultralytics/assets/releases/download/v0.0.0/Arial.ttf torch.set_printoptions(linewidth=320, precision=5, profile="long") np.set_printoptions(linewidth=320, formatter={"float_kind": "{:11.5g}".format}) # format short g, %precision=5 @@ -511,7 +511,7 @@ def check_font(font=FONT, progress=False): font = Path(font) file = CONFIG_DIR / font.name if not font.exists() and not file.exists(): - url = f"https://ultralytics.com/assets/{font.name}" + url = f"https://github.com/ultralytics/assets/releases/download/v0.0.0/{font.name}" LOGGER.info(f"Downloading {url} to {file}...") torch.hub.download_url_to_file(url, str(file), progress=progress) From 17c500461d7b14a24133d91bc6437af62914074c Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 13 Jul 2024 16:06:43 +0200 Subject: [PATCH 1918/1976] [Snyk] Security upgrade zipp from 3.15.0 to 3.19.1 (#13183) fix: utils/google_app_engine/additional_requirements.txt to reduce vulnerabilities The following vulnerabilities are fixed by pinning transitive dependencies: - https://snyk.io/vuln/SNYK-PYTHON-ZIPP-7430899 Co-authored-by: snyk-bot --- utils/google_app_engine/additional_requirements.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/utils/google_app_engine/additional_requirements.txt b/utils/google_app_engine/additional_requirements.txt index 821c3caf3cbf..08c276f7b46d 100644 --- a/utils/google_app_engine/additional_requirements.txt +++ b/utils/google_app_engine/additional_requirements.txt @@ -3,3 +3,4 @@ pip==23.3 Flask==2.3.2 gunicorn==22.0.0 werkzeug>=3.0.1 # not directly required, pinned by Snyk to avoid a vulnerability +zipp>=3.19.1 # not directly required, pinned by Snyk to avoid a vulnerability From 41e26a7f836576af3695f2045746d8bf7c996fb4 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 16 Jul 2024 01:20:38 +0200 Subject: [PATCH 1919/1976] [Snyk] Security upgrade zipp from 3.15.0 to 3.19.1 (#13188) fix: utils/google_app_engine/additional_requirements.txt to reduce vulnerabilities The following vulnerabilities are fixed by pinning transitive dependencies: - https://snyk.io/vuln/SNYK-PYTHON-ZIPP-7430899 Co-authored-by: snyk-bot From 12be49963c1695e83dea059f2ab90750e5da5ca4 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 16 Jul 2024 01:20:57 +0200 Subject: [PATCH 1920/1976] Ultralytics Code Refactor https://ultralytics.com/actions (#13191) * Refactor code for speed and clarity * Auto-format by https://ultralytics.com/actions --------- Co-authored-by: UltralyticsAssistant --- benchmarks.py | 10 +-- detect.py | 31 +++++---- export.py | 186 +++++++++++++++++++++++--------------------------- hubconf.py | 102 +++++++++++++++------------ train.py | 35 ++++++---- val.py | 65 +++++++++--------- 6 files changed, 216 insertions(+), 213 deletions(-) diff --git a/benchmarks.py b/benchmarks.py index ea11ca1f8590..e92a645fbe84 100644 --- a/benchmarks.py +++ b/benchmarks.py @@ -78,10 +78,11 @@ def run( None. Logs information about the benchmark results, including the format, size, mAP50-95, and inference time. Notes: - Supported export formats and models include PyTorch, TorchScript, ONNX, OpenVINO, TensorRT, CoreML, TensorFlow - SavedModel, TensorFlow GraphDef, TensorFlow Lite, and TensorFlow Edge TPU. Edge TPU and TF.js are unsupported. + Supported export formats and models include PyTorch, TorchScript, ONNX, OpenVINO, TensorRT, CoreML, + TensorFlow SavedModel, TensorFlow GraphDef, TensorFlow Lite, and TensorFlow Edge TPU. Edge TPU and TF.js + are unsupported. - Examples: + Example: ```python $ python benchmarks.py --weights yolov5s.pt --img 640 ``` @@ -190,7 +191,6 @@ def test( $ pip install -r requirements.txt coremltools onnx onnx-simplifier onnxruntime openvino-dev tensorflow-cpu # CPU support $ pip install -r requirements.txt coremltools onnx onnx-simplifier onnxruntime-gpu openvino-dev tensorflow # GPU support $ pip install -U nvidia-tensorrt --index-url https://pypi.ngc.nvidia.com # TensorRT - Run export tests: $ python benchmarks.py --weights yolov5s.pt --img 640 """ @@ -263,7 +263,7 @@ def main(opt): Args: opt (argparse.Namespace): Parsed command-line arguments including options for weights, image size, batch size, data - configuration, device, and other flags for inference settings. + configuration, device, and other flags for inference settings. Returns: None: This function does not return any value. It leverages side-effects such as logging and running benchmarks. diff --git a/detect.py b/detect.py index e35d220c23f4..61dd8d5499cf 100644 --- a/detect.py +++ b/detect.py @@ -102,15 +102,15 @@ def run( Args: weights (str | Path): Path to the model weights file or a Triton URL. Default is 'yolov5s.pt'. - source (str | Path): Input source, which can be a file, directory, URL, glob pattern, screen capture, or webcam index. - Default is 'data/images'. + source (str | Path): Input source, which can be a file, directory, URL, glob pattern, screen capture, or webcam + index. Default is 'data/images'. data (str | Path): Path to the dataset YAML file. Default is 'data/coco128.yaml'. imgsz (tuple[int, int]): Inference image size as a tuple (height, width). Default is (640, 640). conf_thres (float): Confidence threshold for detections. Default is 0.25. iou_thres (float): Intersection Over Union (IOU) threshold for non-max suppression. Default is 0.45. max_det (int): Maximum number of detections per image. Default is 1000. - device (str): CUDA device identifier (e.g., '0' or '0,1,2,3') or 'cpu'. Default is an empty string, which - uses the best available device. + device (str): CUDA device identifier (e.g., '0' or '0,1,2,3') or 'cpu'. Default is an empty string, which uses the + best available device. view_img (bool): If True, display inference results using OpenCV. Default is False. save_txt (bool): If True, save results in a text file. Default is False. save_csv (bool): If True, save results in a CSV file. Default is False. @@ -137,15 +137,15 @@ def run( None Examples: - ```python - from ultralytics import run + ```python + from ultralytics import run - # Run inference on an image - run(source='data/images/example.jpg', weights='yolov5s.pt', device='0') + # Run inference on an image + run(source='data/images/example.jpg', weights='yolov5s.pt', device='0') - # Run inference on a video with specific confidence threshold - run(source='data/videos/example.mp4', weights='yolov5s.pt', conf_thres=0.4, device='0') - ``` + # Run inference on a video with specific confidence threshold + run(source='data/videos/example.mp4', weights='yolov5s.pt', conf_thres=0.4, device='0') + ``` """ source = str(source) save_img = not nosave and not source.endswith(".txt") # save inference images @@ -316,7 +316,7 @@ def write_to_csv(image_name, prediction, confidence): def parse_opt(): """ - Parses command-line arguments for YOLOv5 detection, setting inference options and model configurations. + Parse command-line arguments for YOLOv5 detection, allowing custom inference options and model configurations. Args: --weights (str | list[str], optional): Model path or Triton URL. Defaults to ROOT / 'yolov5s.pt'. @@ -346,7 +346,8 @@ def parse_opt(): --hide-conf (bool, optional): Flag to hide confidences in the output. Defaults to False. --half (bool, optional): Flag to use FP16 half-precision inference. Defaults to False. --dnn (bool, optional): Flag to use OpenCV DNN for ONNX inference. Defaults to False. - --vid-stride (int, optional): Video frame-rate stride, determining the number of frames to skip in between consecutive frames. Defaults to 1. + --vid-stride (int, optional): Video frame-rate stride, determining the number of frames to skip in between + consecutive frames. Defaults to 1. Returns: argparse.Namespace: Parsed command-line arguments as an argparse.Namespace object. @@ -403,8 +404,8 @@ def main(opt): None Note: - This function performs essential pre-execution checks and initiates the YOLOv5 detection process based on user-specified options. - Refer to the usage guide and examples for more information about different sources and formats at: + This function performs essential pre-execution checks and initiates the YOLOv5 detection process based on user-specified + options. Refer to the usage guide and examples for more information about different sources and formats at: https://github.com/ultralytics/ultralytics Example usage: diff --git a/export.py b/export.py index 3688dfd848a6..30944a93aff8 100644 --- a/export.py +++ b/export.py @@ -97,15 +97,15 @@ def __init__(self, model, im): Args: model (torch.nn.Module): The PyTorch model to be adapted for iOS compatibility. - im (torch.Tensor): An input tensor representing a batch of images with shape (batch, channel, height, width). + im (torch.Tensor): An input tensor representing a batch of images with shape (B, C, H, W). Returns: None: This method does not return any value. Notes: - This initializer configures normalization based on the input image dimensions, which is critical for ensuring the - model's compatibility and proper functionality on iOS devices. The normalization step involves dividing by the image - width if the image is square; otherwise, additional conditions might apply (trimmed for brevity). + This initializer configures normalization based on the input image dimensions, which is critical for + ensuring the model's compatibility and proper functionality on iOS devices. The normalization step + involves dividing by the image width if the image is square; otherwise, additional conditions might apply. """ super().__init__() b, c, h, w = im.shape # batch, channel, height, width @@ -120,14 +120,15 @@ def __init__(self, model, im): def forward(self, x): """ - Runs a forward pass on the input tensor, returning class confidences and normalized coordinates. + Run a forward pass on the input tensor, returning class confidences and normalized coordinates. Args: - x (torch.Tensor): Input tensor containing the image data. + x (torch.Tensor): Input tensor containing the image data with shape (batch, channels, height, width). Returns: torch.Tensor: Concatenated tensor with normalized coordinates (xywh), confidence scores (conf), - and class probabilities (cls). + and class probabilities (cls), having shape (N, 4 + 1 + C), where N is the number of predictions, + and C is the number of classes. Examples: ```python @@ -144,9 +145,9 @@ def export_formats(): Returns a DataFrame of supported YOLOv5 model export formats and their properties. Returns: - pd.DataFrame: A pandas DataFrame containing supported export formats and their properties. The DataFrame includes - columns for format name, CLI argument suffix, file extension or directory name, and boolean flags indicating if the - export format supports training and detection. + pandas.DataFrame: A DataFrame containing supported export formats and their properties. The DataFrame + includes columns for format name, CLI argument suffix, file extension or directory name, and boolean flags + indicating if the export format supports training and detection. Examples: ```python @@ -156,7 +157,7 @@ def export_formats(): Notes: The DataFrame contains the following columns: - - Format: The name of the model format (e.g., PyTorch, TorchScript, ONNX, etc.) + - Format: The name of the model format (e.g., PyTorch, TorchScript, ONNX, etc.). - Include Argument: The argument to use with the export script to include this format. - File Suffix: File extension or directory name associated with the format. - Supports Training: Whether the format supports training. @@ -181,7 +182,7 @@ def export_formats(): def try_export(inner_func): """ - Logs success or failure, execution time, and file size for YOLOv5 model export functions wrapped with @try_export. + Log success or failure, execution time, and file size for YOLOv5 model export functions wrapped with @try_export. Args: inner_func (Callable): The model export function to be wrapped by the decorator. @@ -202,7 +203,8 @@ def export_onnx(model, filepath): ``` Notes: - For additional requirements and model export formats, refer to the [Ultralytics YOLOv5 GitHub repository](https://github.com/ultralytics/ultralytics). + For additional requirements and model export formats, refer to the + [Ultralytics YOLOv5 GitHub repository](https://github.com/ultralytics/ultralytics). """ inner_args = get_default_args(inner_func) @@ -224,7 +226,7 @@ def outer_func(*args, **kwargs): @try_export def export_torchscript(model, im, file, optimize, prefix=colorstr("TorchScript:")): """ - Exports a YOLOv5 model to the TorchScript format. + Export a YOLOv5 model to the TorchScript format. Args: model (torch.nn.Module): The YOLOv5 model to be exported. @@ -235,8 +237,8 @@ def export_torchscript(model, im, file, optimize, prefix=colorstr("TorchScript:" Returns: (str | None, torch.jit.ScriptModule | None): A tuple containing the file path of the exported model - (as a string) and the TorchScript model (as a torch.jit.ScriptModule). If the export fails, both elements - of the tuple will be None. + (as a string) and the TorchScript model (as a torch.jit.ScriptModule). If the export fails, both elements + of the tuple will be None. Notes: - This function uses tracing to create the TorchScript model. @@ -303,6 +305,26 @@ def export_onnx(model, im, file, opset, dynamic, simplify, prefix=colorstr("ONNX ``` pip install onnx onnx-simplifier onnxruntime onnxruntime-gpu ``` + + Example: + ```python + from pathlib import Path + import torch + from models.experimental import attempt_load + from utils.torch_utils import select_device + + # Load model + weights = 'yolov5s.pt' + device = select_device('') + model = attempt_load(weights, map_location=device) + + # Example input tensor + im = torch.zeros(1, 3, 640, 640).to(device) + + # Export model + file_path = Path('yolov5s.onnx') + export_onnx(model, im, file_path, opset=12, dynamic=True, simplify=True) + ``` """ check_requirements("onnx>=1.12.0") import onnx @@ -361,10 +383,10 @@ def export_onnx(model, im, file, opset, dynamic, simplify, prefix=colorstr("ONNX @try_export def export_openvino(file, metadata, half, int8, data, prefix=colorstr("OpenVINO:")): """ - Exports a YOLOv5 model to OpenVINO format with optional FP16 and INT8 quantization. + Export a YOLOv5 model to OpenVINO format with optional FP16 and INT8 quantization. Args: - file (Path): The path to the output file where the OpenVINO model will be saved. + file (Path): Path to the output file where the OpenVINO model will be saved. metadata (dict): Dictionary including model metadata such as names and strides. half (bool): If True, export the model with FP16 precision. int8 (bool): If True, export the model with INT8 quantization. @@ -372,8 +394,8 @@ def export_openvino(file, metadata, half, int8, data, prefix=colorstr("OpenVINO: prefix (str): Prefix string for logging purposes (default is "OpenVINO:"). Returns: - (str, openvino.runtime.Model | None): Returns the OpenVINO model file path and openvino.runtime.Model object if - export is successful; otherwise, returns None. + (str, openvino.runtime.Model | None): The OpenVINO model file path and openvino.runtime.Model object if export is + successful; otherwise, None. Notes: - Requires `openvino-dev` package version 2023.0 or higher. Install with: @@ -451,7 +473,7 @@ def transform_fn(data_item): @try_export def export_paddle(model, im, file, metadata, prefix=colorstr("PaddlePaddle:")): """ - Export a YOLOv5 model to PaddlePaddle format using X2Paddle, saving the converted model and metadata. + Export a YOLOv5 PyTorch model to PaddlePaddle format using X2Paddle, saving the converted model and metadata. Args: model (torch.nn.Module): The YOLOv5 model to be exported. @@ -554,12 +576,12 @@ def export_coreml(model, im, file, int8, half, nms, prefix=colorstr("CoreML:")): @try_export def export_engine(model, im, file, half, dynamic, simplify, workspace=4, verbose=False, prefix=colorstr("TensorRT:")): """ - Exports a YOLOv5 model to TensorRT engine format, requiring GPU and TensorRT>=7.0.0. + Export a YOLOv5 model to TensorRT engine format, requiring GPU and TensorRT>=7.0.0. Args: model (torch.nn.Module): YOLOv5 model to be exported. im (torch.Tensor): Input tensor of shape (B, C, H, W). - file (Path): Path to save the exported model. + file (pathlib.Path): Path to save the exported model. half (bool): Set to True to export with FP16 precision. dynamic (bool): Set to True to enable dynamic input shapes. simplify (bool): Set to True to simplify the model during export. @@ -568,7 +590,7 @@ def export_engine(model, im, file, half, dynamic, simplify, workspace=4, verbose prefix (str): Log message prefix. Returns: - (Path, None): Tuple containing the path to the exported model and None. + (pathlib.Path, None): Tuple containing the path to the exported model and None. Raises: AssertionError: If executed on CPU instead of GPU. @@ -666,7 +688,7 @@ def export_saved_model( prefix=colorstr("TensorFlow SavedModel:"), ): """ - Exports a YOLOv5 model to the TensorFlow SavedModel format, supporting dynamic axes and non-maximum suppression + Export a YOLOv5 model to the TensorFlow SavedModel format, supporting dynamic axes and non-maximum suppression (NMS). Args: @@ -791,7 +813,7 @@ def export_tflite( ): # YOLOv5 TensorFlow Lite export """ - Exports a YOLOv5 model to TensorFlow Lite format with optional INT8 quantization and NMS support. + Export a YOLOv5 model to TensorFlow Lite format with optional INT8 quantization and NMS support. Args: keras_model (tf.keras.Model): The Keras model to be exported. @@ -805,7 +827,8 @@ def export_tflite( prefix (str): Prefix for log messages. Returns: - (str | None, tflite.Model | None): The file path of the exported TFLite model and the TFLite model instance, or None if export failed. + (str | None, tflite.Model | None): The file path of the exported TFLite model and the TFLite model instance, or None + if the export failed. Example: ```python @@ -863,8 +886,6 @@ def export_tflite( @try_export def export_edgetpu(file, prefix=colorstr("Edge TPU:")): """ - Clear and concise summary line describing the function's purpose: - Exports a YOLOv5 model to Edge TPU compatible TFLite format; requires Linux and Edge TPU compiler. Args: @@ -927,28 +948,30 @@ def export_edgetpu(file, prefix=colorstr("Edge TPU:")): @try_export def export_tfjs(file, int8, prefix=colorstr("TensorFlow.js:")): """ - Exports a YOLOv5 model to TensorFlow.js format, optionally with uint8 quantization. + Convert a YOLOv5 model to TensorFlow.js format with optional uint8 quantization. Args: - file (Path): Path to the input model file. - int8 (bool): If True, applies uint8 quantization. - prefix (str): Prefix for logging information (default: colorstr("TensorFlow.js:")). + file (Path): Path to the YOLOv5 model file to be converted, typically having a ".pt" or ".onnx" extension. + int8 (bool): If True, applies uint8 quantization during the conversion process. + prefix (str): Optional prefix for logging messages, default is 'TensorFlow.js:' with color formatting. Returns: - (str, None): The output directory path as a string and None. + (str, None): Tuple containing the output directory path as a string and None. Notes: - This function requires `tensorflowjs` to be installed. You can install it using: - ```shell - pip install tensorflowjs - ``` + - This function requires the `tensorflowjs` package. Install it using: + ```shell + pip install tensorflowjs + ``` + - The converted TensorFlow.js model will be saved in a directory with the "_web_model" suffix appended to the original file name. + - The conversion involves running shell commands that invoke the TensorFlow.js converter tool. - Example usage: + Example: ```python - export_tfjs(Path('yolov5s.onnx'), int8=False) + from pathlib import Path + file = Path('yolov5.onnx') + export_tfjs(file, int8=False) ``` - - The TensorFlow.js converted model is saved in the directory specified by `file` with "_web_model" suffix. """ check_requirements("tensorflowjs") import tensorflowjs as tfjs @@ -987,11 +1010,13 @@ def export_tfjs(file, int8, prefix=colorstr("TensorFlow.js:")): def add_tflite_metadata(file, metadata, num_outputs): """ - Adds metadata to a TensorFlow Lite (TFLite) model file, supporting multiple outputs, based on TensorFlow guidelines. + Adds metadata to a TensorFlow Lite (TFLite) model file, supporting multiple outputs according to TensorFlow + guidelines. Args: file (str): Path to the TFLite model file to which metadata will be added. metadata (dict): Metadata information to be added to the model, structured as required by the TFLite metadata schema. + Common keys include "name", "description", "version", "author", and "license". num_outputs (int): Number of output tensors the model has, used to configure the metadata properly. Returns: @@ -1047,20 +1072,20 @@ def add_tflite_metadata(file, metadata, num_outputs): def pipeline_coreml(model, im, file, names, y, prefix=colorstr("CoreML Pipeline:")): """ - Converts a PyTorch YOLOv5 model to CoreML format with Non-Maximum Suppression (NMS), handling different input/output - shapes and saving the model. + Convert a PyTorch YOLOv5 model to CoreML format with Non-Maximum Suppression (NMS), handling different input/output + shapes, and saving the model. Args: - model (torch.nn.Module): The YOLOv5 PyTorch model. - im (torch.Tensor): Input tensor example with shape [N, C, H, W], where N is the batch size, C is the number of - channels, H is the height, and W is the width. + model (torch.nn.Module): The YOLOv5 PyTorch model to be converted. + im (torch.Tensor): Example input tensor with shape (N, C, H, W), where N is the batch size, C is the number of channels, + H is the height, and W is the width. file (Path): Path to save the converted CoreML model. names (dict[int, str]): Dictionary mapping class indices to class names. y (torch.Tensor): Output tensor from the PyTorch model's forward pass. prefix (str): Custom prefix for logging messages. Returns: - Path: Path to the saved CoreML model (.mlmodel). + (Path): Path to the saved CoreML model (.mlmodel). Raises: AssertionError: If the number of class names does not match the number of classes in the model. @@ -1075,18 +1100,14 @@ def pipeline_coreml(model, im, file, names, y, prefix=colorstr("CoreML Pipeline: from pathlib import Path import torch - # Load YOLOv5 model and an example input tensor - model = torch.load("yolov5s.pt") - im = torch.zeros(1, 3, 640, 640) # Example input tensor + model = torch.load('yolov5s.pt') # Load YOLOv5 model + im = torch.zeros((1, 3, 640, 640)) # Example input tensor - # Define class names - names = {0: "person", 1: "bicycle", 2: "car", ...} + names = {0: "person", 1: "bicycle", 2: "car", ...} # Define class names - # Perform forward pass to get model output - y = model(im) + y = model(im) # Perform forward pass to get model output - # Convert to CoreML - output_file = Path("yolov5s.mlmodel") + output_file = Path('yolov5s.mlmodel') # Convert to CoreML pipeline_coreml(model, im, output_file, names, y) ``` """ @@ -1246,8 +1267,6 @@ def run( conf_thres=0.25, # TF.js NMS: confidence threshold ): """ - Clear and concise summary line describing the function's purpose: - Exports a YOLOv5 model to specified formats including ONNX, TensorRT, CoreML, and TensorFlow. Args: @@ -1424,14 +1443,14 @@ def run( def parse_opt(known=False): """ - Parses command-line arguments for YOLOv5 model export configurations. + Parse command-line options for YOLOv5 model export configurations. Args: - known (bool): If True, `argparse.ArgumentParser.parse_known_args` is used to parse command-line arguments; otherwise, - `argparse.ArgumentParser.parse_args` is used. Defaults to False. + known (bool): If True, uses `argparse.ArgumentParser.parse_known_args`; otherwise, uses `argparse.ArgumentParser.parse_args`. + Default is False. Returns: - argparse.Namespace: An object containing parsed command-line arguments. + argparse.Namespace: Object containing parsed command-line arguments. Example: ```python @@ -1475,44 +1494,7 @@ def parse_opt(known=False): def main(opt): - """ - Exports the YOLOv5 model to specified formats, including ONNX, TensorRT, CoreML, and TensorFlow. - - Args: - opt (argparse.Namespace): Parsed command-line arguments containing the export configurations. - - data (str): Path to the dataset YAML configuration file (e.g., 'data/coco128.yaml'). - - weights (list[str] | str): Paths to the pretrained model weights file(s) (e.g., 'yolov5s.pt'). - - imgsz (list[int]): Image size as a list [height, width]. - - batch_size (int): Batch size for exporting the model. - - device (str): Device to run the export on, such as '0' for GPU, or 'cpu' for CPU. - - half (bool): Flag to export the model with FP16 half-precision. - - inplace (bool): Set the YOLOv5 Detect() module inplace mode to True. - - keras (bool): Flag to use Keras for TensorFlow SavedModel export. - - optimize (bool): Optimize TorchScript model for mobile deployment. - - int8 (bool): Apply INT8 quantization for CoreML or TensorFlow models. - - per_tensor (bool): Apply per-tensor quantization for TensorFlow models. - - dynamic (bool): Enable dynamic axes for ONNX, TensorFlow, or TensorRT exports. - - simplify (bool): Simplify ONNX model during export. - - opset (int): ONNX opset version. - - verbose (bool): Enable verbose logging for TensorRT export. - - workspace (int): TensorRT workspace size in GB. - - nms (bool): Add non-maximum suppression (NMS) to the TensorFlow model. - - agnostic_nms (bool): Add class-agnostic NMS to the TensorFlow model. - - topk_per_class (int): Top-K boxes per class to keep for TensorFlow.js NMS. - - topk_all (int): Top-K boxes for all classes to keep for TensorFlow.js NMS. - - iou_thres (float): IoU threshold for NMS. - - conf_thres (float): Confidence threshold for NMS. - - include (list[str]): List of formats to include in export (e.g., ['torchscript', 'onnx']). - - Returns: - list[str]: List of paths to the exported model files. - - Example: - ```python - opt = parse_opt() - main(opt) - ``` - """ + """Run(**vars(opt)) # Execute the run function with parsed options.""" for opt.weights in opt.weights if isinstance(opt.weights, list) else [opt.weights]: run(**vars(opt)) diff --git a/hubconf.py b/hubconf.py index 5815d9e67b84..98e399421809 100644 --- a/hubconf.py +++ b/hubconf.py @@ -24,10 +24,11 @@ def _create(name, pretrained=True, channels=3, classes=80, autoshape=True, verbo classes (int, optional): Number of classes the model is expected to detect. Defaults to 80. autoshape (bool, optional): If True, applies the YOLOv5 .autoshape() wrapper for various input formats. Defaults to True. verbose (bool, optional): If True, prints detailed information during the model creation/loading process. Defaults to True. - device (str | torch.device | None, optional): Device to use for model parameters (e.g., 'cpu', 'cuda'). If None, selects the best available device. Defaults to None. + device (str | torch.device | None, optional): Device to use for model parameters (e.g., 'cpu', 'cuda'). If None, selects + the best available device. Defaults to None. Returns: - DetectMultiBackend | AutoShape: The loaded YOLOv5 model, potentially wrapped with AutoShape if specified. + (DetectMultiBackend | AutoShape): The loaded YOLOv5 model, potentially wrapped with AutoShape if specified. Examples: ```python @@ -108,8 +109,8 @@ def custom(path="path/to/model.pt", autoshape=True, _verbose=True, device=None): Args: path (str): Path to the custom model file (e.g., 'path/to/model.pt'). - autoshape (bool): Apply YOLOv5 .autoshape() wrapper to model if True, enabling compatibility with various input types - (default is True). + autoshape (bool): Apply YOLOv5 .autoshape() wrapper to model if True, enabling compatibility with various input + types (default is True). _verbose (bool): If True, prints all informational messages to the screen; otherwise, operates silently (default is True). device (str | torch.device | None): Device to load the model on, e.g., 'cpu', 'cuda', torch.device('cuda:0'), etc. @@ -174,32 +175,42 @@ def yolov5n(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=Tr def yolov5s(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device=None): """ - Creates YOLOv5-small model with options for pretraining, input channels, class count, autoshaping, verbosity, and - device. + Create a YOLOv5-small (yolov5s) model with options for pretraining, input channels, class count, autoshaping, + verbosity, and device configuration. Args: pretrained (bool, optional): Flag to load pretrained weights into the model. Defaults to True. channels (int, optional): Number of input channels. Defaults to 3. classes (int, optional): Number of model classes. Defaults to 80. - autoshape (bool, optional): Whether to apply YOLOv5 .autoshape() wrapper to the model for preprocessed inputs. + autoshape (bool, optional): Whether to wrap the model with YOLOv5's .autoshape() for handling various input formats. Defaults to True. - _verbose (bool, optional): Flag to print detailed information on model loading. Defaults to True. - device (str | torch.device | None, optional): Device to use for model parameters, e.g., 'cpu', 'cuda'. If None, - auto-select the best available device. Defaults to None. + _verbose (bool, optional): Flag to print detailed information regarding model loading. Defaults to True. + device (str | torch.device | None, optional): Device to use for model computation, can be 'cpu', 'cuda', or + torch.device instances. If None, automatically selects the best available device. Defaults to None. Returns: - torch.nn.Module: The YOLOv5-small model loaded with specified configurations and optionally pretrained weights. + torch.nn.Module: The YOLOv5-small model configured and loaded according to the specified parameters. - Usage: + Example: ```python import torch - model = torch.hub.load('ultralytics/yolov5', 'yolov5s') # official model - model = torch.hub.load('ultralytics/yolov5:master', 'yolov5s') # from branch - model = torch.hub.load('ultralytics/yolov5', 'custom', 'yolov5s.pt') # custom/local model - model = torch.hub.load('.', 'custom', 'yolov5s.pt', source='local') # local repo + + # Load the official YOLOv5-small model with pretrained weights + model = torch.hub.load('ultralytics/yolov5', 'yolov5s') + + # Load the YOLOv5-small model from a specific branch + model = torch.hub.load('ultralytics/yolov5:master', 'yolov5s') + + # Load a custom YOLOv5-small model from a local checkpoint + model = torch.hub.load('ultralytics/yolov5', 'custom', 'yolov5s.pt') + + # Load a local YOLOv5-small model specifying source as local repository + model = torch.hub.load('.', 'custom', 'yolov5s.pt', source='local') ``` - For more information, visit https://pytorch.org/hub/ultralytics_yolov5. + Notes: + For more details on model loading and customization, visit + the [YOLOv5 PyTorch Hub Documentation](https://pytorch.org/hub/ultralytics_yolov5). """ return _create("yolov5s", pretrained, channels, classes, autoshape, _verbose, device) @@ -213,22 +224,26 @@ def yolov5m(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=Tr pretrained (bool, optional): Whether to load pretrained weights into the model. Default is True. channels (int, optional): Number of input channels. Default is 3. classes (int, optional): Number of model classes. Default is 80. - autoshape (bool, optional): Apply YOLOv5 .autoshape() wrapper to the model for handling various input formats. Default is True. + autoshape (bool, optional): Apply YOLOv5 .autoshape() wrapper to the model for handling various input formats. + Default is True. _verbose (bool, optional): Whether to print detailed information to the screen. Default is True. - device (str | torch.device | None, optional): Device specification to use for model parameters (e.g., 'cpu', 'cuda'). Default is None. + device (str | torch.device | None, optional): Device specification to use for model parameters (e.g., 'cpu', 'cuda'). + Default is None. Returns: torch.nn.Module: The instantiated YOLOv5-medium model. Usage Example: - ```python - import torch + ```python + import torch - model = torch.hub.load('ultralytics/yolov5', 'yolov5m') # Load YOLOv5-medium from Ultralytics repository - model = torch.hub.load('ultralytics/yolov5:master', 'yolov5m') # Load from the master branch - model = torch.hub.load('ultralytics/yolov5', 'custom', 'yolov5m.pt') # Load a custom/local YOLOv5-medium model - model = torch.hub.load('.', 'custom', 'yolov5m.pt', source='local') # Load from a local repository - ``` + model = torch.hub.load('ultralytics/yolov5', 'yolov5m') # Load YOLOv5-medium from Ultralytics repository + model = torch.hub.load('ultralytics/yolov5:master', 'yolov5m') # Load from the master branch + model = torch.hub.load('ultralytics/yolov5', 'custom', 'yolov5m.pt') # Load a custom/local YOLOv5-medium model + model = torch.hub.load('.', 'custom', 'yolov5m.pt', source='local') # Load from a local repository + ``` + + For more information, visit https://pytorch.org/hub/ultralytics_yolov5. """ return _create("yolov5m", pretrained, channels, classes, autoshape, _verbose, device) @@ -251,7 +266,7 @@ def yolov5l(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=Tr YOLOv5 model (torch.nn.Module): The YOLOv5-large model instantiated with specified configurations and possibly pretrained weights. - Example: + Examples: ```python import torch model = torch.hub.load('ultralytics/yolov5', 'yolov5l') @@ -266,21 +281,22 @@ def yolov5l(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=Tr def yolov5x(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device=None): """ - Instantiates the YOLOv5-xlarge model with customizable pretraining, channel count, class count, autoshaping, - verbosity, and device. + Perform object detection using the YOLOv5-xlarge model with options for pretraining, input channels, class count, + autoshaping, verbosity, and device specification. Args: pretrained (bool): If True, loads pretrained weights into the model. Defaults to True. - channels (int): Number of input channels. Defaults to 3. - classes (int): Number of model classes. Defaults to 80. - autoshape (bool): If True, applies YOLOv5 .autoshape() wrapper to the model for easier image handling. Defaults to + channels (int): Number of input channels for the model. Defaults to 3. + classes (int): Number of model classes for object detection. Defaults to 80. + autoshape (bool): If True, applies the YOLOv5 .autoshape() wrapper for handling different input formats. Defaults to True. - _verbose (bool): If True, prints detailed information to the screen. Defaults to True. - device (str | torch.device | None): Device for model parameters, e.g., 'cpu', 'cuda:0', or a torch.device object. + _verbose (bool): If True, prints detailed information during model loading. Defaults to True. + device (str | torch.device | None): Device specification for computing the model, e.g., 'cpu', 'cuda:0', torch.device('cuda'). Defaults to None. Returns: - torch.nn.Module: The instantiated YOLOv5-xlarge model. + torch.nn.Module: The YOLOv5-xlarge model loaded with the specified parameters, optionally with pretrained weights and + autoshaping applied. Example: ```python @@ -288,7 +304,7 @@ def yolov5x(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=Tr model = torch.hub.load('ultralytics/yolov5', 'yolov5x') ``` - For more details and usage, refer to the official YOLOv5 PyTorch Hub models documentation: + For additional details, refer to the official YOLOv5 PyTorch Hub models documentation: https://pytorch.org/hub/ultralytics_yolov5 """ return _create("yolov5x", pretrained, channels, classes, autoshape, _verbose, device) @@ -308,7 +324,7 @@ def yolov5n6(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=T Default is None. Returns: - torch.nn.Module: YOLOv5 model loaded with the specified configurations. + torch.nn.Module: YOLOv5-nano-P6 model loaded with the specified configurations. Example: ```python @@ -362,7 +378,7 @@ def yolov5s6(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=T def yolov5m6(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device=None): """ - Creates YOLOv5-medium-P6 model with options for pretraining, channel count, class count, autoshaping, verbosity, and + Create YOLOv5-medium-P6 model with options for pretraining, channel count, class count, autoshaping, verbosity, and device. Args: @@ -389,15 +405,15 @@ def yolov5m6(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=T ``` Notes: - - The model can be loaded with pre-trained weights for better performance on specific tasks. - - The autoshape feature simplifies input handling by allowing various popular data formats. + - The model can be loaded with pre-trained weights for better performance on specific tasks. + - The autoshape feature simplifies input handling by allowing various popular data formats. """ return _create("yolov5m6", pretrained, channels, classes, autoshape, _verbose, device) def yolov5l6(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device=None): """ - Instantiates the YOLOv5-large-P6 model with customizable pretraining, channel and class counts, autoshaping, + Instantiate the YOLOv5-large-P6 model with options for pretraining, channel and class counts, autoshaping, verbosity, and device selection. Args: @@ -407,12 +423,12 @@ def yolov5l6(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=T autoshape (bool, optional): If True, apply YOLOv5 .autoshape() wrapper to the model for input flexibility. Default is True. _verbose (bool, optional): If True, print all information to the screen. Default is True. device (str | torch.device | None, optional): Device to use for model parameters, e.g., 'cpu', 'cuda', or torch.device. - If None, automatically selects the best available device. Default is None. + If None, automatically selects the best available device. Default is None. Returns: torch.nn.Module: The instantiated YOLOv5-large-P6 model. - Usage: + Example: ```python import torch model = torch.hub.load('ultralytics/yolov5', 'yolov5l6') # official model diff --git a/train.py b/train.py index 9e3382787695..b4395d7e8d15 100644 --- a/train.py +++ b/train.py @@ -102,7 +102,7 @@ def train(hyp, opt, device, callbacks): """ - Trains a YOLOv5 model on a custom dataset using specified hyperparameters, options, and device, managing datasets, + Train a YOLOv5 model on a custom dataset using specified hyperparameters, options, and device, managing datasets, model architecture, loss computation, and optimizer steps. Args: @@ -543,13 +543,13 @@ def lf(x): def parse_opt(known=False): """ - Parses command-line arguments for YOLOv5 training, validation, and testing. + Parse command-line arguments for YOLOv5 training, validation, and testing. Args: known (bool, optional): If True, parses known arguments, ignoring the unknown. Defaults to False. Returns: - argparse.Namespace: Parsed command-line arguments. + (argparse.Namespace): Parsed command-line arguments containing options for YOLOv5 execution. Example: ```python @@ -559,9 +559,9 @@ def parse_opt(known=False): ``` Links: - Models: https://github.com/ultralytics/yolov5/tree/master/models - Datasets: https://github.com/ultralytics/yolov5/tree/master/data - Tutorial: https://docs.ultralytics.com/yolov5/tutorials/train_custom_data + - Models: https://github.com/ultralytics/yolov5/tree/master/models + - Datasets: https://github.com/ultralytics/yolov5/tree/master/data + - Tutorial: https://docs.ultralytics.com/yolov5/tutorials/train_custom_data """ parser = argparse.ArgumentParser() parser.add_argument("--weights", type=str, default=ROOT / "yolov5s.pt", help="initial weights path") @@ -618,7 +618,7 @@ def parse_opt(known=False): def main(opt, callbacks=Callbacks()): """ - Runs training or hyperparameter evolution with specified options and optional callbacks. + Runs the main entry point for training or hyperparameter evolution with specified options and optional callbacks. Args: opt (argparse.Namespace): The command-line arguments parsed for YOLOv5 training and evolution. @@ -629,7 +629,7 @@ def main(opt, callbacks=Callbacks()): None Note: - For detailed usage, visit: + For detailed usage, refer to: https://github.com/ultralytics/yolov5/tree/master/models """ if RANK in {-1, 0}: @@ -888,23 +888,27 @@ def main(opt, callbacks=Callbacks()): def generate_individual(input_ranges, individual_length): """ - Generate a random individual with gene values within specified input ranges. + Generate an individual with random hyperparameters within specified ranges. Args: input_ranges (list[tuple[float, float]]): List of tuples where each tuple contains the lower and upper bounds - for the corresponding gene. - individual_length (int): The number of genes in the individual. + for the corresponding gene (hyperparameter). + individual_length (int): The number of genes (hyperparameters) in the individual. Returns: list[float]: A list representing a generated individual with random gene values within the specified ranges. - Examples: + Example: ```python input_ranges = [(0.01, 0.1), (0.1, 1.0), (0.9, 2.0)] individual_length = 3 individual = generate_individual(input_ranges, individual_length) print(individual) # Output: [0.035, 0.678, 1.456] (example output) ``` + + Note: + The individual returned will have a length equal to `individual_length`, with each gene value being a floating-point + number within its specified range in `input_ranges`. """ individual = [] for i in range(individual_length): @@ -915,7 +919,7 @@ def generate_individual(input_ranges, individual_length): def run(**kwargs): """ - Executes YOLOv5 training with given options, allowing optional overrides through keyword arguments. + Execute YOLOv5 training with specified options, allowing optional overrides through keyword arguments. Args: weights (str, optional): Path to initial weights. Defaults to ROOT / 'yolov5s.pt'. @@ -931,8 +935,9 @@ def run(**kwargs): noval (bool, optional): Only validate at the final epoch. Defaults to False. noautoanchor (bool, optional): Disable AutoAnchor. Defaults to False. noplots (bool, optional): Do not save plot files. Defaults to False. - evolve (int, optional): Evolve hyperparameters for a specified number of generations. Use 300 if provided without a value. - evolve_population (str, optional): Directory for loading population during evolution. Defaults to ROOT / 'data/hyps'. + evolve (int, optional): Evolve hyperparameters for a specified number of generations. Use 300 if provided without a + value. + evolve_population (str, optional): Directory for loading population during evolution. Defaults to ROOT / 'data/ hyps'. resume_evolve (str, optional): Resume hyperparameter evolution from the last generation. Defaults to None. bucket (str, optional): gsutil bucket for saving checkpoints. Defaults to an empty string. cache (str, optional): Cache image data in 'ram' or 'disk'. Defaults to None. diff --git a/val.py b/val.py index 218e0748dd5d..c4e1e402e445 100644 --- a/val.py +++ b/val.py @@ -66,8 +66,8 @@ def save_one_txt(predn, save_conf, shape, file): Saves one detection result to a txt file in normalized xywh format, optionally including confidence. Args: - predn (torch.Tensor): Predicted bounding boxes and associated confidence scores and classes in xyxy format, - tensor of shape (N, 6) where N is the number of detections. + predn (torch.Tensor): Predicted bounding boxes and associated confidence scores and classes in xyxy format, tensor + of shape (N, 6) where N is the number of detections. save_conf (bool): If True, saves the confidence scores along with the bounding box coordinates. shape (tuple): Shape of the original image as (height, width). file (str | Path): File path where the result will be saved. @@ -219,7 +219,7 @@ def run( Evaluates a YOLOv5 model on a dataset and logs performance metrics. Args: - data (str | dict): Path to a dataset yaml file or a dataset dictionary. + data (str | dict): Path to a dataset YAML file or a dataset dictionary. weights (str | list[str], optional): Path to the model weights file(s). Supports various formats including PyTorch, TorchScript, ONNX, OpenVINO, TensorRT, CoreML, TensorFlow SavedModel, TensorFlow GraphDef, TensorFlow Lite, TensorFlow Edge TPU, and PaddlePaddle. @@ -469,34 +469,34 @@ def run( def parse_opt(): """ - Parses command-line options for YOLOv5 model inference configuration. + Parse command-line options for configuring YOLOv5 model inference. Args: - data (str): Path to the dataset YAML file, default is 'data/coco128.yaml'. - weights (list[str]): List of paths to the model weight files, default is 'yolov5s.pt'. - batch_size (int): Batch size for inference, default is 32. - imgsz (int): Inference image size in pixels, default is 640. - conf_thres (float): Confidence threshold for predictions, default is 0.001. - iou_thres (float): IoU threshold for Non-Max Suppression (NMS), default is 0.6. - max_det (int): Maximum number of detections per image, default is 300. - task (str): Task type - options are 'train', 'val', 'test', 'speed', or 'study'. Default is 'val'. - device (str): Device to run the model on, e.g., '0' or '0,1,2,3' or 'cpu'. Default is empty to let the system choose automatically. - workers (int): Maximum number of dataloader workers per rank in DDP mode, default is 8. - single_cls (bool): If set, treats the dataset as a single-class dataset. Default is False. - augment (bool): If set, performs augmented inference. Default is False. - verbose (bool): If set, reports mAP by class. Default is False. - save_txt (bool): If set, saves results to *.txt files. Default is False. - save_hybrid (bool): If set, saves label+prediction hybrid results to *.txt files. Default is False. - save_conf (bool): If set, saves confidences in --save-txt labels. Default is False. - save_json (bool): If set, saves results to a COCO-JSON file. Default is False. - project (str): Project directory to save results to. Default is 'runs/val'. - name (str): Name of the directory to save results to. Default is 'exp'. - exist_ok (bool): If set, existing directory will not be incremented. Default is False. - half (bool): If set, uses FP16 half-precision inference. Default is False. - dnn (bool): If set, uses OpenCV DNN for ONNX inference. Default is False. + data (str, optional): Path to the dataset YAML file. Default is 'data/coco128.yaml'. + weights (list[str], optional): List of paths to model weight files. Default is 'yolov5s.pt'. + batch_size (int, optional): Batch size for inference. Default is 32. + imgsz (int, optional): Inference image size in pixels. Default is 640. + conf_thres (float, optional): Confidence threshold for predictions. Default is 0.001. + iou_thres (float, optional): IoU threshold for Non-Max Suppression (NMS). Default is 0.6. + max_det (int, optional): Maximum number of detections per image. Default is 300. + task (str, optional): Task type - options are 'train', 'val', 'test', 'speed', or 'study'. Default is 'val'. + device (str, optional): Device to run the model on. e.g., '0' or '0,1,2,3' or 'cpu'. Default is empty to let the system choose automatically. + workers (int, optional): Maximum number of dataloader workers per rank in DDP mode. Default is 8. + single_cls (bool, optional): If set, treats the dataset as a single-class dataset. Default is False. + augment (bool, optional): If set, performs augmented inference. Default is False. + verbose (bool, optional): If set, reports mAP by class. Default is False. + save_txt (bool, optional): If set, saves results to *.txt files. Default is False. + save_hybrid (bool, optional): If set, saves label+prediction hybrid results to *.txt files. Default is False. + save_conf (bool, optional): If set, saves confidences in --save-txt labels. Default is False. + save_json (bool, optional): If set, saves results to a COCO-JSON file. Default is False. + project (str, optional): Project directory to save results to. Default is 'runs/val'. + name (str, optional): Name of the directory to save results to. Default is 'exp'. + exist_ok (bool, optional): If set, existing directory will not be incremented. Default is False. + half (bool, optional): If set, uses FP16 half-precision inference. Default is False. + dnn (bool, optional): If set, uses OpenCV DNN for ONNX inference. Default is False. Returns: - argparse.Namespace: Parsed command-line options + argparse.Namespace: Parsed command-line options. Notes: - The '--data' parameter is checked to ensure it ends with 'coco.yaml' if '--save-json' is set. @@ -508,7 +508,7 @@ def parse_opt(): ```python $ python val.py --weights yolov5s.pt --data coco128.yaml --img 640 ``` - Different model formats could be used instead of yolov5s.pt: + Different model formats could be used instead of `yolov5s.pt`: ```python $ python val.py --weights yolov5s.pt yolov5s.torchscript yolov5s.onnx yolov5s_openvino_model yolov5s.engine ``` @@ -551,17 +551,16 @@ def main(opt): Args: opt (argparse.Namespace): Parsed command-line options. - This includes values for parameters like 'data', 'weights', 'batch_size', 'imgsz', 'conf_thres', 'iou_thres', - 'max_det', 'task', 'device', 'workers', 'single_cls', 'augment', 'verbose', 'save_txt', 'save_hybrid', - 'save_conf', 'save_json', 'project', 'name', 'exist_ok', 'half', and 'dnn', essential for configuring - the YOLOv5 tasks. + This includes values for parameters like 'data', 'weights', 'batch_size', 'imgsz', 'conf_thres', + 'iou_thres', 'max_det', 'task', 'device', 'workers', 'single_cls', 'augment', 'verbose', 'save_txt', + 'save_hybrid', 'save_conf', 'save_json', 'project', 'name', 'exist_ok', 'half', and 'dnn', essential + for configuring the YOLOv5 tasks. Returns: None Examples: To validate a trained YOLOv5 model on the COCO dataset with a specific weights file, use: - ```python $ python val.py --weights yolov5s.pt --data coco128.yaml --img 640 ``` From 9223b2dacdba7885bc6d8a878572ba7f48b6fc0a Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 17 Jul 2024 17:29:00 +0200 Subject: [PATCH 1921/1976] [Snyk] Security upgrade zipp from 3.15.0 to 3.19.1 (#13192) fix: utils/google_app_engine/additional_requirements.txt to reduce vulnerabilities The following vulnerabilities are fixed by pinning transitive dependencies: - https://snyk.io/vuln/SNYK-PYTHON-ZIPP-7430899 Co-authored-by: snyk-bot From 1d282a9346c0e7ffe595b65399b74cef492a5576 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 17 Jul 2024 17:29:10 +0200 Subject: [PATCH 1922/1976] [Snyk] Security upgrade setuptools from 68.0.0 to 70.0.0 (#13193) fix: requirements.txt to reduce vulnerabilities The following vulnerabilities are fixed by pinning transitive dependencies: - https://snyk.io/vuln/SNYK-PYTHON-SETUPTOOLS-7448482 Co-authored-by: snyk-bot --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 3b2674596258..e10fd0cfd7d9 100644 --- a/requirements.txt +++ b/requirements.txt @@ -39,7 +39,7 @@ seaborn>=0.11.0 # openvino-dev>=2023.0 # OpenVINO export # Deploy ---------------------------------------------------------------------- -setuptools>=65.5.1 # Snyk vulnerability fix +setuptools>=70.0.0 # Snyk vulnerability fix # tritonclient[all]~=2.24.0 # Extras ---------------------------------------------------------------------- From 8003649c79369c914692dd0277fc37ac17828aa2 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 17 Jul 2024 18:07:57 +0200 Subject: [PATCH 1923/1976] [Snyk] Security upgrade setuptools from 40.5.0 to 65.5.1 (#13197) fix: requirements.txt to reduce vulnerabilities The following vulnerabilities are fixed by pinning transitive dependencies: - https://snyk.io/vuln/SNYK-PYTHON-SETUPTOOLS-3180412 Co-authored-by: snyk-bot From ab64c8814a0b700786ae79b4dd431951af9da3fb Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 23 Jul 2024 00:44:02 +0200 Subject: [PATCH 1924/1976] Update format.yml with Issue autolabel (#13210) Update format.yml Signed-off-by: Glenn Jocher --- .github/workflows/format.yml | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/.github/workflows/format.yml b/.github/workflows/format.yml index ee176abee996..8ded9d727ffc 100644 --- a/.github/workflows/format.yml +++ b/.github/workflows/format.yml @@ -5,8 +5,8 @@ name: Ultralytics Actions on: - push: - branches: [main, master] + issues: + types: [opened, edited] pull_request_target: branches: [main, master] @@ -17,7 +17,8 @@ jobs: - name: Run Ultralytics Formatting uses: ultralytics/actions@main with: - token: ${{ secrets.GITHUB_TOKEN }} # automatically generated, do not modify + token: ${{ secrets.PERSONAL_ACCESS_TOKEN || secrets.GITHUB_TOKEN }} # note GITHUB_TOKEN automatically generated + labels: true # autolabel issues and PRs python: true # format Python code and docstrings markdown: true # format Markdown prettier: true # format YAML From d6112173f5b2b809ec7f3ff1f8048ac0e465092c Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 23 Jul 2024 09:39:55 +0200 Subject: [PATCH 1925/1976] Update links.yml (#13206) Signed-off-by: Glenn Jocher Co-authored-by: Ultralytics Assistant <135830346+UltralyticsAssistant@users.noreply.github.com> --- .github/workflows/links.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.github/workflows/links.yml b/.github/workflows/links.yml index 80e2a48be71f..82c1f3f2348c 100644 --- a/.github/workflows/links.yml +++ b/.github/workflows/links.yml @@ -43,6 +43,7 @@ jobs: --exclude 'https?://(www\.)?(linkedin\.com|twitter\.com|instagram\.com|kaggle\.com|fonts\.gstatic\.com|url\.com)' \ --exclude-path '**/ci.yaml' \ --github-token ${{ secrets.GITHUB_TOKEN }} \ + --header "User-Agent=Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.6478.183 Safari/537.36" \ './**/*.md' \ './**/*.html' @@ -63,6 +64,7 @@ jobs: --exclude 'https?://(www\.)?(linkedin\.com|twitter\.com|instagram\.com|kaggle\.com|fonts\.gstatic\.com|url\.com)' \ --exclude-path '**/ci.yaml' \ --github-token ${{ secrets.GITHUB_TOKEN }} \ + --header "User-Agent=Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.6478.183 Safari/537.36" \ './**/*.md' \ './**/*.html' \ './**/*.yml' \ From 6deb2d75cb6b518111cadec8dd04b8b9018ee000 Mon Sep 17 00:00:00 2001 From: Sangbum Daniel Choi <34004152+SangbumChoi@users.noreply.github.com> Date: Thu, 25 Jul 2024 21:02:17 +0900 Subject: [PATCH 1926/1976] Remove deprecated `map_location` in export.py (#13217) Signed-off-by: Sangbum Daniel Choi <34004152+SangbumChoi@users.noreply.github.com> --- export.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/export.py b/export.py index 30944a93aff8..f5db08096025 100644 --- a/export.py +++ b/export.py @@ -256,7 +256,7 @@ def export_torchscript(model, im, file, optimize, prefix=colorstr("TorchScript:" # Load model weights = 'yolov5s.pt' device = select_device('') - model = attempt_load(weights, map_location=device) + model = attempt_load(weights, device=device) # Example input tensor im = torch.zeros(1, 3, 640, 640).to(device) From dcf124255839f1817321857951bb4abbb593c57a Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 29 Jul 2024 02:08:29 +0200 Subject: [PATCH 1927/1976] [Snyk] Security upgrade setuptools from 40.5.0 to 70.0.0 (#13223) fix: requirements.txt to reduce vulnerabilities The following vulnerabilities are fixed by pinning transitive dependencies: - https://snyk.io/vuln/SNYK-PYTHON-SETUPTOOLS-7448482 Co-authored-by: snyk-bot From 6096750fcc85ee9c45896e81d93d80fd5e33dd2c Mon Sep 17 00:00:00 2001 From: Ryan Hirasaki <4690732+RyanHir@users.noreply.github.com> Date: Sun, 28 Jul 2024 19:09:57 -0500 Subject: [PATCH 1928/1976] Update CoreML exports to support newer *.mlpackage outputs (#13222) * Implement and default mlpackage generation for CoreML model exports Signed-off-by: Ryan Hirasaki * Provide command line argument to export as *.mlmodel instead of *.mlpackage for CoreML Signed-off-by: Ryan Hirasaki * Remove macOS check for CoreML quantization Requirements for macOS during quantization was removed from coremltools 6.0 Signed-off-by: Ryan Hirasaki * Undo removal of warning catching Signed-off-by: Ryan Hirasaki * Change file extension references from mlmodel to mlpackage Signed-off-by: Ryan Hirasaki * Auto-format by https://ultralytics.com/actions --------- Signed-off-by: Ryan Hirasaki Co-authored-by: UltralyticsAssistant Co-authored-by: Glenn Jocher Co-authored-by: Ultralytics Assistant <135830346+UltralyticsAssistant@users.noreply.github.com> --- .gitignore | 1 + benchmarks.py | 2 +- detect.py | 2 +- export.py | 66 ++++++++++++++++++++++++++++++++++++------------ models/common.py | 2 +- val.py | 2 +- 6 files changed, 55 insertions(+), 20 deletions(-) diff --git a/.gitignore b/.gitignore index 6bcedfac610d..d8b9c068bf0a 100755 --- a/.gitignore +++ b/.gitignore @@ -54,6 +54,7 @@ VOC/ *.onnx *.engine *.mlmodel +*.mlpackage *.torchscript *.tflite *.h5 diff --git a/benchmarks.py b/benchmarks.py index e92a645fbe84..996b8d438053 100644 --- a/benchmarks.py +++ b/benchmarks.py @@ -9,7 +9,7 @@ ONNX | `onnx` | yolov5s.onnx OpenVINO | `openvino` | yolov5s_openvino_model/ TensorRT | `engine` | yolov5s.engine -CoreML | `coreml` | yolov5s.mlmodel +CoreML | `coreml` | yolov5s.mlpackage TensorFlow SavedModel | `saved_model` | yolov5s_saved_model/ TensorFlow GraphDef | `pb` | yolov5s.pb TensorFlow Lite | `tflite` | yolov5s.tflite diff --git a/detect.py b/detect.py index 61dd8d5499cf..8a25ac235a3c 100644 --- a/detect.py +++ b/detect.py @@ -20,7 +20,7 @@ yolov5s.onnx # ONNX Runtime or OpenCV DNN with --dnn yolov5s_openvino_model # OpenVINO yolov5s.engine # TensorRT - yolov5s.mlmodel # CoreML (macOS-only) + yolov5s.mlpackage # CoreML (macOS-only) yolov5s_saved_model # TensorFlow SavedModel yolov5s.pb # TensorFlow GraphDef yolov5s.tflite # TensorFlow Lite diff --git a/export.py b/export.py index f5db08096025..c1524ec2d48a 100644 --- a/export.py +++ b/export.py @@ -169,7 +169,7 @@ def export_formats(): ["ONNX", "onnx", ".onnx", True, True], ["OpenVINO", "openvino", "_openvino_model", True, False], ["TensorRT", "engine", ".engine", False, True], - ["CoreML", "coreml", ".mlmodel", True, False], + ["CoreML", "coreml", ".mlpackage", True, False], ["TensorFlow SavedModel", "saved_model", "_saved_model", True, True], ["TensorFlow GraphDef", "pb", ".pb", True, True], ["TensorFlow Lite", "tflite", ".tflite", True, False], @@ -520,7 +520,7 @@ def export_paddle(model, im, file, metadata, prefix=colorstr("PaddlePaddle:")): @try_export -def export_coreml(model, im, file, int8, half, nms, prefix=colorstr("CoreML:")): +def export_coreml(model, im, file, int8, half, nms, mlmodel, prefix=colorstr("CoreML:")): """ Export a YOLOv5 model to CoreML format with optional NMS, INT8, and FP16 support. @@ -531,6 +531,7 @@ def export_coreml(model, im, file, int8, half, nms, prefix=colorstr("CoreML:")): int8 (bool): Flag indicating whether to use INT8 quantization (default is False). half (bool): Flag indicating whether to use FP16 quantization (default is False). nms (bool): Flag indicating whether to include Non-Maximum Suppression (default is False). + mlmodel (bool): Flag indicating whether to export as older *.mlmodel format (default is False). prefix (str): Prefix string for logging purposes (default is 'CoreML:'). Returns: @@ -548,27 +549,46 @@ def export_coreml(model, im, file, int8, half, nms, prefix=colorstr("CoreML:")): model = Model(cfg, ch=3, nc=80) im = torch.randn(1, 3, 640, 640) file = Path("yolov5s_coreml") - export_coreml(model, im, file, int8=False, half=False, nms=True) + export_coreml(model, im, file, int8=False, half=False, nms=True, mlmodel=False) ``` """ check_requirements("coremltools") import coremltools as ct LOGGER.info(f"\n{prefix} starting export with coremltools {ct.__version__}...") - f = file.with_suffix(".mlmodel") + if mlmodel: + f = file.with_suffix(".mlmodel") + convert_to = "neuralnetwork" + precision = None + else: + f = file.with_suffix(".mlpackage") + convert_to = "mlprogram" + if half: + precision = ct.precision.FLOAT16 + else: + precision = ct.precision.FLOAT32 if nms: model = iOSModel(model, im) ts = torch.jit.trace(model, im, strict=False) # TorchScript model - ct_model = ct.convert(ts, inputs=[ct.ImageType("image", shape=im.shape, scale=1 / 255, bias=[0, 0, 0])]) - bits, mode = (8, "kmeans_lut") if int8 else (16, "linear") if half else (32, None) + ct_model = ct.convert( + ts, + inputs=[ct.ImageType("image", shape=im.shape, scale=1 / 255, bias=[0, 0, 0])], + convert_to=convert_to, + compute_precision=precision, + ) + bits, mode = (8, "kmeans") if int8 else (16, "linear") if half else (32, None) if bits < 32: - if MACOS: # quantization only supported on macOS + if mlmodel: with warnings.catch_warnings(): - warnings.filterwarnings("ignore", category=DeprecationWarning) # suppress numpy==1.20 float warning + warnings.filterwarnings( + "ignore", category=DeprecationWarning + ) # suppress numpy==1.20 float warning, fixed in coremltools==7.0 ct_model = ct.models.neural_network.quantization_utils.quantize_weights(ct_model, bits, mode) - else: - print(f"{prefix} quantization only supported on macOS, skipping...") + elif bits == 8: + op_config = ct.optimize.coreml.OpPalettizerConfig(mode=mode, nbits=bits, weight_threshold=512) + config = ct.optimize.coreml.OptimizationConfig(global_config=op_config) + ct_model = ct.optimize.coreml.palettize_weights(ct_model, config) ct_model.save(f) return f, ct_model @@ -1070,7 +1090,7 @@ def add_tflite_metadata(file, metadata, num_outputs): tmp_file.unlink() -def pipeline_coreml(model, im, file, names, y, prefix=colorstr("CoreML Pipeline:")): +def pipeline_coreml(model, im, file, names, y, mlmodel, prefix=colorstr("CoreML Pipeline:")): """ Convert a PyTorch YOLOv5 model to CoreML format with Non-Maximum Suppression (NMS), handling different input/output shapes, and saving the model. @@ -1082,6 +1102,7 @@ def pipeline_coreml(model, im, file, names, y, prefix=colorstr("CoreML Pipeline: file (Path): Path to save the converted CoreML model. names (dict[int, str]): Dictionary mapping class indices to class names. y (torch.Tensor): Output tensor from the PyTorch model's forward pass. + mlmodel (bool): Flag indicating whether to export as older *.mlmodel format (default is False). prefix (str): Custom prefix for logging messages. Returns: @@ -1114,6 +1135,11 @@ def pipeline_coreml(model, im, file, names, y, prefix=colorstr("CoreML Pipeline: import coremltools as ct from PIL import Image + if mlmodel: + f = file.with_suffix(".mlmodel") # filename + else: + f = file.with_suffix(".mlpackage") # filename + print(f"{prefix} starting pipeline with coremltools {ct.__version__}...") batch_size, ch, h, w = list(im.shape) # BCHW t = time.time() @@ -1156,7 +1182,12 @@ def pipeline_coreml(model, im, file, names, y, prefix=colorstr("CoreML Pipeline: print(spec.description) # Model from spec - model = ct.models.MLModel(spec) + weights_dir = None + if mlmodel: + weights_dir = None + else: + weights_dir = str(f / "Data/com.apple.CoreML/weights") + model = ct.models.MLModel(spec, weights_dir=weights_dir) # 3. Create NMS protobuf nms_spec = ct.proto.Model_pb2.Model() @@ -1227,8 +1258,7 @@ def pipeline_coreml(model, im, file, names, y, prefix=colorstr("CoreML Pipeline: ) # Save the model - f = file.with_suffix(".mlmodel") # filename - model = ct.models.MLModel(pipeline.spec) + model = ct.models.MLModel(pipeline.spec, weights_dir=weights_dir) model.input_description["image"] = "Input image" model.input_description["iouThreshold"] = f"(optional) IOU Threshold override (default: {nms.iouThreshold})" model.input_description["confidenceThreshold"] = ( @@ -1256,6 +1286,7 @@ def run( per_tensor=False, # TF per tensor quantization dynamic=False, # ONNX/TF/TensorRT: dynamic axes simplify=False, # ONNX: simplify model + mlmodel=False, # CoreML: Export in *.mlmodel format opset=12, # ONNX: opset version verbose=False, # TensorRT: verbose log workspace=4, # TensorRT: workspace size (GB) @@ -1293,6 +1324,7 @@ def run( topk_all (int): Top-K boxes for all classes to keep for TensorFlow.js NMS. Default is 100. iou_thres (float): IoU threshold for NMS. Default is 0.45. conf_thres (float): Confidence threshold for NMS. Default is 0.25. + mlmodel (bool): Flag to use *.mlmodel for CoreML export. Default is False. Returns: None @@ -1320,6 +1352,7 @@ def run( simplify=False, opset=12, verbose=False, + mlmodel=False, workspace=4, nms=False, agnostic_nms=False, @@ -1383,9 +1416,9 @@ def run( if xml: # OpenVINO f[3], _ = export_openvino(file, metadata, half, int8, data) if coreml: # CoreML - f[4], ct_model = export_coreml(model, im, file, int8, half, nms) + f[4], ct_model = export_coreml(model, im, file, int8, half, nms, mlmodel) if nms: - pipeline_coreml(ct_model, im, file, model.names, y) + pipeline_coreml(ct_model, im, file, model.names, y, mlmodel) if any((saved_model, pb, tflite, edgetpu, tfjs)): # TensorFlow formats assert not tflite or not tfjs, "TFLite and TF.js models must be exported separately, please pass only one type." assert not isinstance(model, ClassificationModel), "ClassificationModel export to TF formats not yet supported." @@ -1473,6 +1506,7 @@ def parse_opt(known=False): parser.add_argument("--per-tensor", action="store_true", help="TF per-tensor quantization") parser.add_argument("--dynamic", action="store_true", help="ONNX/TF/TensorRT: dynamic axes") parser.add_argument("--simplify", action="store_true", help="ONNX: simplify model") + parser.add_argument("--mlmodel", action="store_true", help="CoreML: Export in *.mlmodel format") parser.add_argument("--opset", type=int, default=17, help="ONNX: opset version") parser.add_argument("--verbose", action="store_true", help="TensorRT: verbose log") parser.add_argument("--workspace", type=int, default=4, help="TensorRT: workspace size (GB)") diff --git a/models/common.py b/models/common.py index 049dfc0b9e00..1e0ffdd3abdb 100644 --- a/models/common.py +++ b/models/common.py @@ -444,7 +444,7 @@ def __init__(self, weights="yolov5s.pt", device=torch.device("cpu"), dnn=False, # ONNX Runtime: *.onnx # ONNX OpenCV DNN: *.onnx --dnn # OpenVINO: *_openvino_model - # CoreML: *.mlmodel + # CoreML: *.mlpackage # TensorRT: *.engine # TensorFlow SavedModel: *_saved_model # TensorFlow GraphDef: *.pb diff --git a/val.py b/val.py index c4e1e402e445..b8db6122f54a 100644 --- a/val.py +++ b/val.py @@ -11,7 +11,7 @@ yolov5s.onnx # ONNX Runtime or OpenCV DNN with --dnn yolov5s_openvino_model # OpenVINO yolov5s.engine # TensorRT - yolov5s.mlmodel # CoreML (macOS-only) + yolov5s.mlpackage # CoreML (macOS-only) yolov5s_saved_model # TensorFlow SavedModel yolov5s.pb # TensorFlow GraphDef yolov5s.tflite # TensorFlow Lite From 19ce90290c6bd013b69dbe037e8ef1bc3d3c32d0 Mon Sep 17 00:00:00 2001 From: Kayzwer <68285002+Kayzwer@users.noreply.github.com> Date: Sun, 11 Aug 2024 16:31:53 +0800 Subject: [PATCH 1929/1976] Replace ONNXSim with ONNXSlim (#13236) Signed-off-by: Kayzwer <68285002+Kayzwer@users.noreply.github.com> --- export.py | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/export.py b/export.py index c1524ec2d48a..dfb1c06fb5e2 100644 --- a/export.py +++ b/export.py @@ -368,12 +368,11 @@ def export_onnx(model, im, file, opset, dynamic, simplify, prefix=colorstr("ONNX if simplify: try: cuda = torch.cuda.is_available() - check_requirements(("onnxruntime-gpu" if cuda else "onnxruntime", "onnx-simplifier>=0.4.1")) - import onnxsim + check_requirements(("onnxruntime-gpu" if cuda else "onnxruntime", "onnxslim")) + import onnxslim - LOGGER.info(f"{prefix} simplifying with onnx-simplifier {onnxsim.__version__}...") - model_onnx, check = onnxsim.simplify(model_onnx) - assert check, "assert check failed" + LOGGER.info(f"{prefix} slimming with onnxslim {onnxslim.__version__}...") + model_onnx = onnxslim.slim(model_onnx) onnx.save(model_onnx, f) except Exception as e: LOGGER.info(f"{prefix} simplifier failure: {e}") From 6f3e69d92fd3ff9156e6790f7ce8ad5b5d5a306d Mon Sep 17 00:00:00 2001 From: Ultralytics Assistant <135830346+UltralyticsAssistant@users.noreply.github.com> Date: Wed, 14 Aug 2024 20:57:35 +0200 Subject: [PATCH 1930/1976] Ultralytics Actions JSON, CSS and autolabel support (#13260) * Update Ultralytics Actions * Update format.yml Signed-off-by: Glenn Jocher --------- Signed-off-by: Glenn Jocher Co-authored-by: Glenn Jocher --- .github/workflows/format.yml | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/.github/workflows/format.yml b/.github/workflows/format.yml index 8ded9d727ffc..7ba7e5822a72 100644 --- a/.github/workflows/format.yml +++ b/.github/workflows/format.yml @@ -1,4 +1,4 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 - AGPL-3.0 License https://ultralytics.com/license # Ultralytics Actions https://github.com/ultralytics/actions # This workflow automatically formats code and documentation in PRs to official Ultralytics standards @@ -6,9 +6,10 @@ name: Ultralytics Actions on: issues: - types: [opened, edited] + types: [opened] pull_request_target: - branches: [main, master] + branches: [main,master] + types: [opened, closed, synchronize, review_requested] jobs: format: @@ -20,10 +21,9 @@ jobs: token: ${{ secrets.PERSONAL_ACCESS_TOKEN || secrets.GITHUB_TOKEN }} # note GITHUB_TOKEN automatically generated labels: true # autolabel issues and PRs python: true # format Python code and docstrings - markdown: true # format Markdown - prettier: true # format YAML + prettier: true # format YAML, JSON, Markdown and CSS spelling: true # check spelling links: false # check broken links - summary: true # print PR summary with GPT4 (requires 'openai_api_key' or 'openai_azure_api_key' and 'openai_azure_endpoint') + summary: true # print PR summary with GPT4o (requires 'openai_api_key') openai_azure_api_key: ${{ secrets.OPENAI_AZURE_API_KEY }} openai_azure_endpoint: ${{ secrets.OPENAI_AZURE_ENDPOINT }} From 5eca7b9cec69f6ee74627272ce5445959cb351db Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 15 Aug 2024 04:22:18 +0800 Subject: [PATCH 1931/1976] YOLO Vision 2024 updates https://ultralytics.com/events/yolovision (#13261) * Refactor code for speed and clarity * Update README.md Signed-off-by: Glenn Jocher * Update README.md Signed-off-by: Glenn Jocher * Update README.zh-CN.md Signed-off-by: Glenn Jocher --------- Signed-off-by: Glenn Jocher --- .github/workflows/format.yml | 2 +- README.md | 6 +----- README.zh-CN.md | 6 +----- 3 files changed, 3 insertions(+), 11 deletions(-) diff --git a/.github/workflows/format.yml b/.github/workflows/format.yml index 7ba7e5822a72..b326e778da64 100644 --- a/.github/workflows/format.yml +++ b/.github/workflows/format.yml @@ -8,7 +8,7 @@ on: issues: types: [opened] pull_request_target: - branches: [main,master] + branches: [main, master] types: [opened, closed, synchronize, review_requested] jobs: diff --git a/README.md b/README.md index 394aeacdbbaf..766f30b89e85 100644 --- a/README.md +++ b/README.md @@ -1,11 +1,7 @@

- + -

[中文](https://docs.ultralytics.com/zh/) | [한국어](https://docs.ultralytics.com/ko/) | [日本語](https://docs.ultralytics.com/ja/) | [Русский](https://docs.ultralytics.com/ru/) | [Deutsch](https://docs.ultralytics.com/de/) | [Français](https://docs.ultralytics.com/fr/) | [Español](https://docs.ultralytics.com/es/) | [Português](https://docs.ultralytics.com/pt/) | [Türkçe](https://docs.ultralytics.com/tr/) | [Tiếng Việt](https://docs.ultralytics.com/vi/) | [हिन्दी](https://docs.ultralytics.com/hi/) | [العربية](https://docs.ultralytics.com/ar/) diff --git a/README.zh-CN.md b/README.zh-CN.md index a24d7a2d44e3..cc0d6fe1a738 100644 --- a/README.zh-CN.md +++ b/README.zh-CN.md @@ -1,11 +1,7 @@

- + -

[中文](https://docs.ultralytics.com/zh/) | [한국어](https://docs.ultralytics.com/ko/) | [日本語](https://docs.ultralytics.com/ja/) | [Русский](https://docs.ultralytics.com/ru/) | [Deutsch](https://docs.ultralytics.com/de/) | [Français](https://docs.ultralytics.com/fr/) | [Español](https://docs.ultralytics.com/es/) | [Português](https://docs.ultralytics.com/pt/) | [Türkçe](https://docs.ultralytics.com/tr/) | [Tiếng Việt](https://docs.ultralytics.com/vi/) | [हिन्दी](https://docs.ultralytics.com/hi/) | [العربية](https://docs.ultralytics.com/ar/) From b86f316eaf618ca0e0532283104dc5d5411ff82b Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 20 Aug 2024 05:02:37 +0800 Subject: [PATCH 1932/1976] Ultralytics Code Refactor https://ultralytics.com/actions (#13268) * Refactor code for speed and clarity * Auto-format by https://ultralytics.com/actions --------- Co-authored-by: UltralyticsAssistant --- README.md | 2 +- README.zh-CN.md | 2 +- classify/tutorial.ipynb | 2935 ++++++++++++++++++++------------------- segment/tutorial.ipynb | 1163 ++++++++-------- 4 files changed, 2058 insertions(+), 2044 deletions(-) diff --git a/README.md b/README.md index 766f30b89e85..ac8b63af87cf 100644 --- a/README.md +++ b/README.md @@ -4,7 +4,7 @@

-[中文](https://docs.ultralytics.com/zh/) | [한국어](https://docs.ultralytics.com/ko/) | [日本語](https://docs.ultralytics.com/ja/) | [Русский](https://docs.ultralytics.com/ru/) | [Deutsch](https://docs.ultralytics.com/de/) | [Français](https://docs.ultralytics.com/fr/) | [Español](https://docs.ultralytics.com/es/) | [Português](https://docs.ultralytics.com/pt/) | [Türkçe](https://docs.ultralytics.com/tr/) | [Tiếng Việt](https://docs.ultralytics.com/vi/) | [हिन्दी](https://docs.ultralytics.com/hi/) | [العربية](https://docs.ultralytics.com/ar/) +[中文](https://docs.ultralytics.com/zh/) | [한국어](https://docs.ultralytics.com/ko/) | [日本語](https://docs.ultralytics.com/ja/) | [Русский](https://docs.ultralytics.com/ru/) | [Deutsch](https://docs.ultralytics.com/de/) | [Français](https://docs.ultralytics.com/fr/) | [Español](https://docs.ultralytics.com/es/) | [Português](https://docs.ultralytics.com/pt/) | [Türkçe](https://docs.ultralytics.com/tr/) | [Tiếng Việt](https://docs.ultralytics.com/vi/) | [العربية](https://docs.ultralytics.com/ar/)
YOLOv5 CI diff --git a/README.zh-CN.md b/README.zh-CN.md index cc0d6fe1a738..bb45872a0577 100644 --- a/README.zh-CN.md +++ b/README.zh-CN.md @@ -4,7 +4,7 @@

-[中文](https://docs.ultralytics.com/zh/) | [한국어](https://docs.ultralytics.com/ko/) | [日本語](https://docs.ultralytics.com/ja/) | [Русский](https://docs.ultralytics.com/ru/) | [Deutsch](https://docs.ultralytics.com/de/) | [Français](https://docs.ultralytics.com/fr/) | [Español](https://docs.ultralytics.com/es/) | [Português](https://docs.ultralytics.com/pt/) | [Türkçe](https://docs.ultralytics.com/tr/) | [Tiếng Việt](https://docs.ultralytics.com/vi/) | [हिन्दी](https://docs.ultralytics.com/hi/) | [العربية](https://docs.ultralytics.com/ar/) +[中文](https://docs.ultralytics.com/zh/) | [한국어](https://docs.ultralytics.com/ko/) | [日本語](https://docs.ultralytics.com/ja/) | [Русский](https://docs.ultralytics.com/ru/) | [Deutsch](https://docs.ultralytics.com/de/) | [Français](https://docs.ultralytics.com/fr/) | [Español](https://docs.ultralytics.com/es/) | [Português](https://docs.ultralytics.com/pt/) | [Türkçe](https://docs.ultralytics.com/tr/) | [Tiếng Việt](https://docs.ultralytics.com/vi/) | [العربية](https://docs.ultralytics.com/ar/)
YOLOv5 CI diff --git a/classify/tutorial.ipynb b/classify/tutorial.ipynb index 77c2b94a1b18..e3bfbf674d31 100644 --- a/classify/tutorial.ipynb +++ b/classify/tutorial.ipynb @@ -1,1481 +1,1488 @@ { - "cells": [ - { - "cell_type": "markdown", - "metadata": { - "id": "t6MPjfT5NrKQ" - }, - "source": [ - "
\n", - "\n", - " \n", - " \n", - "\n", - "\n", - "
\n", - " \"Run\n", - " \"Open\n", - " \"Open\n", - "
\n", - "\n", - "This YOLOv5 🚀 notebook by Ultralytics presents simple train, validate and predict examples to help start your AI adventure.
See GitHub for community support or contact us for professional support.\n", - "\n", - "
" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "7mGmQbAO5pQb" - }, - "source": [ - "# Setup\n", - "\n", - "Clone GitHub [repository](https://github.com/ultralytics/yolov5), install [dependencies](https://github.com/ultralytics/yolov5/blob/master/requirements.txt) and check PyTorch and GPU." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "id": "wbvMlHd_QwMG", - "outputId": "0806e375-610d-4ec0-c867-763dbb518279" - }, - "outputs": [ - { - "output_type": "stream", - "name": "stderr", - "text": [ - "YOLOv5 🚀 v7.0-3-g61ebf5e Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n" - ] - }, - { - "output_type": "stream", - "name": "stdout", - "text": [ - "Setup complete ✅ (2 CPUs, 12.7 GB RAM, 22.6/78.2 GB disk)\n" - ] - } - ], - "source": [ - "!git clone https://github.com/ultralytics/yolov5 # clone\n", - "%cd yolov5\n", - "%pip install -qr requirements.txt # install\n", - "\n", - "import torch\n", - "import utils\n", - "display = utils.notebook_init() # checks" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "4JnkELT0cIJg" - }, - "source": [ - "# 1. Predict\n", - "\n", - "`classify/predict.py` runs YOLOv5 Classification inference on a variety of sources, downloading models automatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases), and saving results to `runs/predict-cls`. Example inference sources are:\n", - "\n", - "```shell\n", - "python classify/predict.py --source 0 # webcam\n", - " img.jpg # image \n", - " vid.mp4 # video\n", - " screen # screenshot\n", - " path/ # directory\n", - " 'path/*.jpg' # glob\n", - " 'https://youtu.be/LNwODJXcvt4' # YouTube\n", - " 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream\n", - "```" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "id": "zR9ZbuQCH7FX", - "outputId": "50504ef7-aa3e-4281-a4e3-d0c7df3c0ffe" - }, - "outputs": [ - { - "output_type": "stream", - "name": "stdout", - "text": [ - "\u001b[34m\u001b[1mclassify/predict: \u001b[0mweights=['yolov5s-cls.pt'], source=data/images, data=data/coco128.yaml, imgsz=[224, 224], device=, view_img=False, save_txt=False, nosave=False, augment=False, visualize=False, update=False, project=runs/predict-cls, name=exp, exist_ok=False, half=False, dnn=False, vid_stride=1\n", - "YOLOv5 🚀 v7.0-3-g61ebf5e Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n", - "\n", - "Downloading https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5s-cls.pt to yolov5s-cls.pt...\n", - "100% 10.5M/10.5M [00:00<00:00, 12.3MB/s]\n", - "\n", - "Fusing layers... \n", - "Model summary: 117 layers, 5447688 parameters, 0 gradients, 11.4 GFLOPs\n", - "image 1/2 /content/yolov5/data/images/bus.jpg: 224x224 minibus 0.39, police van 0.24, amphibious vehicle 0.05, recreational vehicle 0.04, trolleybus 0.03, 3.9ms\n", - "image 2/2 /content/yolov5/data/images/zidane.jpg: 224x224 suit 0.38, bow tie 0.19, bridegroom 0.18, rugby ball 0.04, stage 0.02, 4.6ms\n", - "Speed: 0.3ms pre-process, 4.3ms inference, 1.5ms NMS per image at shape (1, 3, 224, 224)\n", - "Results saved to \u001b[1mruns/predict-cls/exp\u001b[0m\n" - ] - } - ], - "source": [ - "!python classify/predict.py --weights yolov5s-cls.pt --img 224 --source data/images\n", - "# display.Image(filename='runs/predict-cls/exp/zidane.jpg', width=600)" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "hkAzDWJ7cWTr" - }, - "source": [ - "        \n", - "" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "0eq1SMWl6Sfn" - }, - "source": [ - "# 2. Validate\n", - "Validate a model's accuracy on the [Imagenet](https://image-net.org/) dataset's `val` or `test` splits. Models are downloaded automatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases). To show results by class use the `--verbose` flag." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "id": "WQPtK1QYVaD_", - "outputId": "20fc0630-141e-4a90-ea06-342cbd7ce496" - }, - "outputs": [ - { - "output_type": "stream", - "name": "stdout", - "text": [ - "--2022-11-22 19:53:40-- https://image-net.org/data/ILSVRC/2012/ILSVRC2012_img_val.tar\n", - "Resolving image-net.org (image-net.org)... 171.64.68.16\n", - "Connecting to image-net.org (image-net.org)|171.64.68.16|:443... connected.\n", - "HTTP request sent, awaiting response... 200 OK\n", - "Length: 6744924160 (6.3G) [application/x-tar]\n", - "Saving to: ‘ILSVRC2012_img_val.tar’\n", - "\n", - "ILSVRC2012_img_val. 100%[===================>] 6.28G 16.1MB/s in 10m 52s \n", - "\n", - "2022-11-22 20:04:32 (9.87 MB/s) - ‘ILSVRC2012_img_val.tar’ saved [6744924160/6744924160]\n", - "\n" - ] - } - ], - "source": [ - "# Download Imagenet val (6.3G, 50000 images)\n", - "!bash data/scripts/get_imagenet.sh --val" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "id": "X58w8JLpMnjH", - "outputId": "41843132-98e2-4c25-d474-4cd7b246fb8e" - }, - "outputs": [ - { - "output_type": "stream", - "name": "stdout", - "text": [ - "\u001b[34m\u001b[1mclassify/val: \u001b[0mdata=../datasets/imagenet, weights=['yolov5s-cls.pt'], batch_size=128, imgsz=224, device=, workers=8, verbose=True, project=runs/val-cls, name=exp, exist_ok=False, half=True, dnn=False\n", - "YOLOv5 🚀 v7.0-3-g61ebf5e Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n", - "\n", - "Fusing layers... \n", - "Model summary: 117 layers, 5447688 parameters, 0 gradients, 11.4 GFLOPs\n", - "validating: 100% 391/391 [04:57<00:00, 1.31it/s]\n", - " Class Images top1_acc top5_acc\n", - " all 50000 0.715 0.902\n", - " tench 50 0.94 0.98\n", - " goldfish 50 0.88 0.92\n", - " great white shark 50 0.78 0.96\n", - " tiger shark 50 0.68 0.96\n", - " hammerhead shark 50 0.82 0.92\n", - " electric ray 50 0.76 0.9\n", - " stingray 50 0.7 0.9\n", - " cock 50 0.78 0.92\n", - " hen 50 0.84 0.96\n", - " ostrich 50 0.98 1\n", - " brambling 50 0.9 0.96\n", - " goldfinch 50 0.92 0.98\n", - " house finch 50 0.88 0.96\n", - " junco 50 0.94 0.98\n", - " indigo bunting 50 0.86 0.88\n", - " American robin 50 0.9 0.96\n", - " bulbul 50 0.84 0.96\n", - " jay 50 0.9 0.96\n", - " magpie 50 0.84 0.96\n", - " chickadee 50 0.9 1\n", - " American dipper 50 0.82 0.92\n", - " kite 50 0.76 0.94\n", - " bald eagle 50 0.92 1\n", - " vulture 50 0.96 1\n", - " great grey owl 50 0.94 0.98\n", - " fire salamander 50 0.96 0.98\n", - " smooth newt 50 0.58 0.94\n", - " newt 50 0.74 0.9\n", - " spotted salamander 50 0.86 0.94\n", - " axolotl 50 0.86 0.96\n", - " American bullfrog 50 0.78 0.92\n", - " tree frog 50 0.84 0.96\n", - " tailed frog 50 0.48 0.8\n", - " loggerhead sea turtle 50 0.68 0.94\n", - " leatherback sea turtle 50 0.5 0.8\n", - " mud turtle 50 0.64 0.84\n", - " terrapin 50 0.52 0.98\n", - " box turtle 50 0.84 0.98\n", - " banded gecko 50 0.7 0.88\n", - " green iguana 50 0.76 0.94\n", - " Carolina anole 50 0.58 0.96\n", - "desert grassland whiptail lizard 50 0.82 0.94\n", - " agama 50 0.74 0.92\n", - " frilled-necked lizard 50 0.84 0.86\n", - " alligator lizard 50 0.58 0.78\n", - " Gila monster 50 0.72 0.8\n", - " European green lizard 50 0.42 0.9\n", - " chameleon 50 0.76 0.84\n", - " Komodo dragon 50 0.86 0.96\n", - " Nile crocodile 50 0.7 0.84\n", - " American alligator 50 0.76 0.96\n", - " triceratops 50 0.9 0.94\n", - " worm snake 50 0.76 0.88\n", - " ring-necked snake 50 0.8 0.92\n", - " eastern hog-nosed snake 50 0.58 0.88\n", - " smooth green snake 50 0.6 0.94\n", - " kingsnake 50 0.82 0.9\n", - " garter snake 50 0.88 0.94\n", - " water snake 50 0.7 0.94\n", - " vine snake 50 0.66 0.76\n", - " night snake 50 0.34 0.82\n", - " boa constrictor 50 0.8 0.96\n", - " African rock python 50 0.48 0.76\n", - " Indian cobra 50 0.82 0.94\n", - " green mamba 50 0.54 0.86\n", - " sea snake 50 0.62 0.9\n", - " Saharan horned viper 50 0.56 0.86\n", - "eastern diamondback rattlesnake 50 0.6 0.86\n", - " sidewinder 50 0.28 0.86\n", - " trilobite 50 0.98 0.98\n", - " harvestman 50 0.86 0.94\n", - " scorpion 50 0.86 0.94\n", - " yellow garden spider 50 0.92 0.96\n", - " barn spider 50 0.38 0.98\n", - " European garden spider 50 0.62 0.98\n", - " southern black widow 50 0.88 0.94\n", - " tarantula 50 0.94 1\n", - " wolf spider 50 0.82 0.92\n", - " tick 50 0.74 0.84\n", - " centipede 50 0.68 0.82\n", - " black grouse 50 0.88 0.98\n", - " ptarmigan 50 0.78 0.94\n", - " ruffed grouse 50 0.88 1\n", - " prairie grouse 50 0.92 1\n", - " peacock 50 0.88 0.9\n", - " quail 50 0.9 0.94\n", - " partridge 50 0.74 0.96\n", - " grey parrot 50 0.9 0.96\n", - " macaw 50 0.88 0.98\n", - "sulphur-crested cockatoo 50 0.86 0.92\n", - " lorikeet 50 0.96 1\n", - " coucal 50 0.82 0.88\n", - " bee eater 50 0.96 0.98\n", - " hornbill 50 0.9 0.96\n", - " hummingbird 50 0.88 0.96\n", - " jacamar 50 0.92 0.94\n", - " toucan 50 0.84 0.94\n", - " duck 50 0.76 0.94\n", - " red-breasted merganser 50 0.86 0.96\n", - " goose 50 0.74 0.96\n", - " black swan 50 0.94 0.98\n", - " tusker 50 0.54 0.92\n", - " echidna 50 0.98 1\n", - " platypus 50 0.72 0.84\n", - " wallaby 50 0.78 0.88\n", - " koala 50 0.84 0.92\n", - " wombat 50 0.78 0.84\n", - " jellyfish 50 0.88 0.96\n", - " sea anemone 50 0.72 0.9\n", - " brain coral 50 0.88 0.96\n", - " flatworm 50 0.8 0.98\n", - " nematode 50 0.86 0.9\n", - " conch 50 0.74 0.88\n", - " snail 50 0.78 0.88\n", - " slug 50 0.74 0.82\n", - " sea slug 50 0.88 0.98\n", - " chiton 50 0.88 0.98\n", - " chambered nautilus 50 0.88 0.92\n", - " Dungeness crab 50 0.78 0.94\n", - " rock crab 50 0.68 0.86\n", - " fiddler crab 50 0.64 0.86\n", - " red king crab 50 0.76 0.96\n", - " American lobster 50 0.78 0.96\n", - " spiny lobster 50 0.74 0.88\n", - " crayfish 50 0.56 0.86\n", - " hermit crab 50 0.78 0.96\n", - " isopod 50 0.66 0.78\n", - " white stork 50 0.88 0.96\n", - " black stork 50 0.84 0.98\n", - " spoonbill 50 0.96 1\n", - " flamingo 50 0.94 1\n", - " little blue heron 50 0.92 0.98\n", - " great egret 50 0.9 0.96\n", - " bittern 50 0.86 0.94\n", - " crane (bird) 50 0.62 0.9\n", - " limpkin 50 0.98 1\n", - " common gallinule 50 0.92 0.96\n", - " American coot 50 0.9 0.98\n", - " bustard 50 0.92 0.96\n", - " ruddy turnstone 50 0.94 1\n", - " dunlin 50 0.86 0.94\n", - " common redshank 50 0.9 0.96\n", - " dowitcher 50 0.84 0.96\n", - " oystercatcher 50 0.86 0.94\n", - " pelican 50 0.92 0.96\n", - " king penguin 50 0.88 0.96\n", - " albatross 50 0.9 1\n", - " grey whale 50 0.84 0.92\n", - " killer whale 50 0.92 1\n", - " dugong 50 0.84 0.96\n", - " sea lion 50 0.82 0.92\n", - " Chihuahua 50 0.66 0.84\n", - " Japanese Chin 50 0.72 0.98\n", - " Maltese 50 0.76 0.94\n", - " Pekingese 50 0.84 0.94\n", - " Shih Tzu 50 0.74 0.96\n", - " King Charles Spaniel 50 0.88 0.98\n", - " Papillon 50 0.86 0.94\n", - " toy terrier 50 0.48 0.94\n", - " Rhodesian Ridgeback 50 0.76 0.98\n", - " Afghan Hound 50 0.84 1\n", - " Basset Hound 50 0.8 0.92\n", - " Beagle 50 0.82 0.96\n", - " Bloodhound 50 0.48 0.72\n", - " Bluetick Coonhound 50 0.86 0.94\n", - " Black and Tan Coonhound 50 0.54 0.8\n", - "Treeing Walker Coonhound 50 0.66 0.98\n", - " English foxhound 50 0.32 0.84\n", - " Redbone Coonhound 50 0.62 0.94\n", - " borzoi 50 0.92 1\n", - " Irish Wolfhound 50 0.48 0.88\n", - " Italian Greyhound 50 0.76 0.98\n", - " Whippet 50 0.74 0.92\n", - " Ibizan Hound 50 0.6 0.86\n", - " Norwegian Elkhound 50 0.88 0.98\n", - " Otterhound 50 0.62 0.9\n", - " Saluki 50 0.72 0.92\n", - " Scottish Deerhound 50 0.86 0.98\n", - " Weimaraner 50 0.88 0.94\n", - "Staffordshire Bull Terrier 50 0.66 0.98\n", - "American Staffordshire Terrier 50 0.64 0.92\n", - " Bedlington Terrier 50 0.9 0.92\n", - " Border Terrier 50 0.86 0.92\n", - " Kerry Blue Terrier 50 0.78 0.98\n", - " Irish Terrier 50 0.7 0.96\n", - " Norfolk Terrier 50 0.68 0.9\n", - " Norwich Terrier 50 0.72 1\n", - " Yorkshire Terrier 50 0.66 0.9\n", - " Wire Fox Terrier 50 0.64 0.98\n", - " Lakeland Terrier 50 0.74 0.92\n", - " Sealyham Terrier 50 0.76 0.9\n", - " Airedale Terrier 50 0.82 0.92\n", - " Cairn Terrier 50 0.76 0.9\n", - " Australian Terrier 50 0.48 0.84\n", - " Dandie Dinmont Terrier 50 0.82 0.92\n", - " Boston Terrier 50 0.92 1\n", - " Miniature Schnauzer 50 0.68 0.9\n", - " Giant Schnauzer 50 0.72 0.98\n", - " Standard Schnauzer 50 0.74 1\n", - " Scottish Terrier 50 0.76 0.96\n", - " Tibetan Terrier 50 0.48 1\n", - "Australian Silky Terrier 50 0.66 0.96\n", - "Soft-coated Wheaten Terrier 50 0.74 0.96\n", - "West Highland White Terrier 50 0.88 0.96\n", - " Lhasa Apso 50 0.68 0.96\n", - " Flat-Coated Retriever 50 0.72 0.94\n", - " Curly-coated Retriever 50 0.82 0.94\n", - " Golden Retriever 50 0.86 0.94\n", - " Labrador Retriever 50 0.82 0.94\n", - "Chesapeake Bay Retriever 50 0.76 0.96\n", - "German Shorthaired Pointer 50 0.8 0.96\n", - " Vizsla 50 0.68 0.96\n", - " English Setter 50 0.7 1\n", - " Irish Setter 50 0.8 0.9\n", - " Gordon Setter 50 0.84 0.92\n", - " Brittany 50 0.84 0.96\n", - " Clumber Spaniel 50 0.92 0.96\n", - "English Springer Spaniel 50 0.88 1\n", - " Welsh Springer Spaniel 50 0.92 1\n", - " Cocker Spaniels 50 0.7 0.94\n", - " Sussex Spaniel 50 0.72 0.92\n", - " Irish Water Spaniel 50 0.88 0.98\n", - " Kuvasz 50 0.66 0.9\n", - " Schipperke 50 0.9 0.98\n", - " Groenendael 50 0.8 0.94\n", - " Malinois 50 0.86 0.98\n", - " Briard 50 0.52 0.8\n", - " Australian Kelpie 50 0.6 0.88\n", - " Komondor 50 0.88 0.94\n", - " Old English Sheepdog 50 0.94 0.98\n", - " Shetland Sheepdog 50 0.74 0.9\n", - " collie 50 0.6 0.96\n", - " Border Collie 50 0.74 0.96\n", - " Bouvier des Flandres 50 0.78 0.94\n", - " Rottweiler 50 0.88 0.96\n", - " German Shepherd Dog 50 0.8 0.98\n", - " Dobermann 50 0.68 0.96\n", - " Miniature Pinscher 50 0.76 0.88\n", - "Greater Swiss Mountain Dog 50 0.68 0.94\n", - " Bernese Mountain Dog 50 0.96 1\n", - " Appenzeller Sennenhund 50 0.22 1\n", - " Entlebucher Sennenhund 50 0.64 0.98\n", - " Boxer 50 0.7 0.92\n", - " Bullmastiff 50 0.78 0.98\n", - " Tibetan Mastiff 50 0.88 0.96\n", - " French Bulldog 50 0.84 0.94\n", - " Great Dane 50 0.54 0.9\n", - " St. Bernard 50 0.92 1\n", - " husky 50 0.46 0.98\n", - " Alaskan Malamute 50 0.76 0.96\n", - " Siberian Husky 50 0.46 0.98\n", - " Dalmatian 50 0.94 0.98\n", - " Affenpinscher 50 0.78 0.9\n", - " Basenji 50 0.92 0.94\n", - " pug 50 0.94 0.98\n", - " Leonberger 50 1 1\n", - " Newfoundland 50 0.78 0.96\n", - " Pyrenean Mountain Dog 50 0.78 0.96\n", - " Samoyed 50 0.96 1\n", - " Pomeranian 50 0.98 1\n", - " Chow Chow 50 0.9 0.96\n", - " Keeshond 50 0.88 0.94\n", - " Griffon Bruxellois 50 0.84 0.98\n", - " Pembroke Welsh Corgi 50 0.82 0.94\n", - " Cardigan Welsh Corgi 50 0.66 0.98\n", - " Toy Poodle 50 0.52 0.88\n", - " Miniature Poodle 50 0.52 0.92\n", - " Standard Poodle 50 0.8 1\n", - " Mexican hairless dog 50 0.88 0.98\n", - " grey wolf 50 0.82 0.92\n", - " Alaskan tundra wolf 50 0.78 0.98\n", - " red wolf 50 0.48 0.9\n", - " coyote 50 0.64 0.86\n", - " dingo 50 0.76 0.88\n", - " dhole 50 0.9 0.98\n", - " African wild dog 50 0.98 1\n", - " hyena 50 0.88 0.96\n", - " red fox 50 0.54 0.92\n", - " kit fox 50 0.72 0.98\n", - " Arctic fox 50 0.94 1\n", - " grey fox 50 0.7 0.94\n", - " tabby cat 50 0.54 0.92\n", - " tiger cat 50 0.22 0.94\n", - " Persian cat 50 0.9 0.98\n", - " Siamese cat 50 0.96 1\n", - " Egyptian Mau 50 0.54 0.8\n", - " cougar 50 0.9 1\n", - " lynx 50 0.72 0.88\n", - " leopard 50 0.78 0.98\n", - " snow leopard 50 0.9 0.98\n", - " jaguar 50 0.7 0.94\n", - " lion 50 0.9 0.98\n", - " tiger 50 0.92 0.98\n", - " cheetah 50 0.94 0.98\n", - " brown bear 50 0.94 0.98\n", - " American black bear 50 0.8 1\n", - " polar bear 50 0.84 0.96\n", - " sloth bear 50 0.72 0.92\n", - " mongoose 50 0.7 0.92\n", - " meerkat 50 0.82 0.92\n", - " tiger beetle 50 0.92 0.94\n", - " ladybug 50 0.86 0.94\n", - " ground beetle 50 0.64 0.94\n", - " longhorn beetle 50 0.62 0.88\n", - " leaf beetle 50 0.64 0.98\n", - " dung beetle 50 0.86 0.98\n", - " rhinoceros beetle 50 0.86 0.94\n", - " weevil 50 0.9 1\n", - " fly 50 0.78 0.94\n", - " bee 50 0.68 0.94\n", - " ant 50 0.68 0.78\n", - " grasshopper 50 0.5 0.92\n", - " cricket 50 0.64 0.92\n", - " stick insect 50 0.64 0.92\n", - " cockroach 50 0.72 0.8\n", - " mantis 50 0.64 0.86\n", - " cicada 50 0.9 0.96\n", - " leafhopper 50 0.88 0.94\n", - " lacewing 50 0.78 0.92\n", - " dragonfly 50 0.82 0.98\n", - " damselfly 50 0.82 1\n", - " red admiral 50 0.94 0.96\n", - " ringlet 50 0.86 0.98\n", - " monarch butterfly 50 0.9 0.92\n", - " small white 50 0.9 1\n", - " sulphur butterfly 50 0.92 1\n", - "gossamer-winged butterfly 50 0.88 1\n", - " starfish 50 0.88 0.92\n", - " sea urchin 50 0.84 0.94\n", - " sea cucumber 50 0.66 0.84\n", - " cottontail rabbit 50 0.72 0.94\n", - " hare 50 0.84 0.96\n", - " Angora rabbit 50 0.94 0.98\n", - " hamster 50 0.96 1\n", - " porcupine 50 0.88 0.98\n", - " fox squirrel 50 0.76 0.94\n", - " marmot 50 0.92 0.96\n", - " beaver 50 0.78 0.94\n", - " guinea pig 50 0.78 0.94\n", - " common sorrel 50 0.96 0.98\n", - " zebra 50 0.94 0.96\n", - " pig 50 0.5 0.76\n", - " wild boar 50 0.84 0.96\n", - " warthog 50 0.84 0.96\n", - " hippopotamus 50 0.88 0.96\n", - " ox 50 0.48 0.94\n", - " water buffalo 50 0.78 0.94\n", - " bison 50 0.88 0.96\n", - " ram 50 0.58 0.92\n", - " bighorn sheep 50 0.66 1\n", - " Alpine ibex 50 0.92 0.98\n", - " hartebeest 50 0.94 1\n", - " impala 50 0.82 0.96\n", - " gazelle 50 0.7 0.96\n", - " dromedary 50 0.9 1\n", - " llama 50 0.82 0.94\n", - " weasel 50 0.44 0.92\n", - " mink 50 0.78 0.96\n", - " European polecat 50 0.46 0.9\n", - " black-footed ferret 50 0.68 0.96\n", - " otter 50 0.66 0.88\n", - " skunk 50 0.96 0.96\n", - " badger 50 0.86 0.92\n", - " armadillo 50 0.88 0.9\n", - " three-toed sloth 50 0.96 1\n", - " orangutan 50 0.78 0.92\n", - " gorilla 50 0.82 0.94\n", - " chimpanzee 50 0.84 0.94\n", - " gibbon 50 0.76 0.86\n", - " siamang 50 0.68 0.94\n", - " guenon 50 0.8 0.94\n", - " patas monkey 50 0.62 0.82\n", - " baboon 50 0.9 0.98\n", - " macaque 50 0.8 0.86\n", - " langur 50 0.6 0.82\n", - " black-and-white colobus 50 0.86 0.9\n", - " proboscis monkey 50 1 1\n", - " marmoset 50 0.74 0.98\n", - " white-headed capuchin 50 0.72 0.9\n", - " howler monkey 50 0.86 0.94\n", - " titi 50 0.5 0.9\n", - "Geoffroy's spider monkey 50 0.42 0.8\n", - " common squirrel monkey 50 0.76 0.92\n", - " ring-tailed lemur 50 0.72 0.94\n", - " indri 50 0.9 0.96\n", - " Asian elephant 50 0.58 0.92\n", - " African bush elephant 50 0.7 0.98\n", - " red panda 50 0.94 0.94\n", - " giant panda 50 0.94 0.98\n", - " snoek 50 0.74 0.9\n", - " eel 50 0.6 0.84\n", - " coho salmon 50 0.84 0.96\n", - " rock beauty 50 0.88 0.98\n", - " clownfish 50 0.78 0.98\n", - " sturgeon 50 0.68 0.94\n", - " garfish 50 0.62 0.8\n", - " lionfish 50 0.96 0.96\n", - " pufferfish 50 0.88 0.96\n", - " abacus 50 0.74 0.88\n", - " abaya 50 0.84 0.92\n", - " academic gown 50 0.42 0.86\n", - " accordion 50 0.8 0.9\n", - " acoustic guitar 50 0.5 0.76\n", - " aircraft carrier 50 0.8 0.96\n", - " airliner 50 0.92 1\n", - " airship 50 0.76 0.82\n", - " altar 50 0.64 0.98\n", - " ambulance 50 0.88 0.98\n", - " amphibious vehicle 50 0.64 0.94\n", - " analog clock 50 0.52 0.92\n", - " apiary 50 0.82 0.96\n", - " apron 50 0.7 0.84\n", - " waste container 50 0.4 0.8\n", - " assault rifle 50 0.42 0.84\n", - " backpack 50 0.34 0.64\n", - " bakery 50 0.4 0.68\n", - " balance beam 50 0.8 0.98\n", - " balloon 50 0.86 0.96\n", - " ballpoint pen 50 0.52 0.96\n", - " Band-Aid 50 0.7 0.9\n", - " banjo 50 0.84 1\n", - " baluster 50 0.68 0.94\n", - " barbell 50 0.56 0.9\n", - " barber chair 50 0.7 0.92\n", - " barbershop 50 0.54 0.86\n", - " barn 50 0.96 0.96\n", - " barometer 50 0.84 0.98\n", - " barrel 50 0.56 0.88\n", - " wheelbarrow 50 0.66 0.88\n", - " baseball 50 0.74 0.98\n", - " basketball 50 0.88 0.98\n", - " bassinet 50 0.66 0.92\n", - " bassoon 50 0.74 0.98\n", - " swimming cap 50 0.62 0.88\n", - " bath towel 50 0.54 0.78\n", - " bathtub 50 0.4 0.88\n", - " station wagon 50 0.66 0.84\n", - " lighthouse 50 0.78 0.94\n", - " beaker 50 0.52 0.68\n", - " military cap 50 0.84 0.96\n", - " beer bottle 50 0.66 0.88\n", - " beer glass 50 0.6 0.84\n", - " bell-cot 50 0.56 0.96\n", - " bib 50 0.58 0.82\n", - " tandem bicycle 50 0.86 0.96\n", - " bikini 50 0.56 0.88\n", - " ring binder 50 0.64 0.84\n", - " binoculars 50 0.54 0.78\n", - " birdhouse 50 0.86 0.94\n", - " boathouse 50 0.74 0.92\n", - " bobsleigh 50 0.92 0.96\n", - " bolo tie 50 0.8 0.94\n", - " poke bonnet 50 0.64 0.86\n", - " bookcase 50 0.66 0.92\n", - " bookstore 50 0.62 0.88\n", - " bottle cap 50 0.58 0.7\n", - " bow 50 0.72 0.86\n", - " bow tie 50 0.7 0.9\n", - " brass 50 0.92 0.96\n", - " bra 50 0.5 0.7\n", - " breakwater 50 0.62 0.86\n", - " breastplate 50 0.4 0.9\n", - " broom 50 0.6 0.86\n", - " bucket 50 0.66 0.8\n", - " buckle 50 0.5 0.68\n", - " bulletproof vest 50 0.5 0.78\n", - " high-speed train 50 0.94 0.96\n", - " butcher shop 50 0.74 0.94\n", - " taxicab 50 0.64 0.86\n", - " cauldron 50 0.44 0.66\n", - " candle 50 0.48 0.74\n", - " cannon 50 0.88 0.94\n", - " canoe 50 0.94 1\n", - " can opener 50 0.66 0.86\n", - " cardigan 50 0.68 0.8\n", - " car mirror 50 0.94 0.96\n", - " carousel 50 0.94 0.98\n", - " tool kit 50 0.56 0.78\n", - " carton 50 0.42 0.7\n", - " car wheel 50 0.38 0.74\n", - "automated teller machine 50 0.76 0.94\n", - " cassette 50 0.52 0.8\n", - " cassette player 50 0.28 0.9\n", - " castle 50 0.78 0.88\n", - " catamaran 50 0.78 1\n", - " CD player 50 0.52 0.82\n", - " cello 50 0.82 1\n", - " mobile phone 50 0.68 0.86\n", - " chain 50 0.38 0.66\n", - " chain-link fence 50 0.7 0.84\n", - " chain mail 50 0.64 0.9\n", - " chainsaw 50 0.84 0.92\n", - " chest 50 0.68 0.92\n", - " chiffonier 50 0.26 0.64\n", - " chime 50 0.62 0.84\n", - " china cabinet 50 0.82 0.96\n", - " Christmas stocking 50 0.92 0.94\n", - " church 50 0.62 0.9\n", - " movie theater 50 0.58 0.88\n", - " cleaver 50 0.32 0.62\n", - " cliff dwelling 50 0.88 1\n", - " cloak 50 0.32 0.64\n", - " clogs 50 0.58 0.88\n", - " cocktail shaker 50 0.62 0.7\n", - " coffee mug 50 0.44 0.72\n", - " coffeemaker 50 0.64 0.92\n", - " coil 50 0.66 0.84\n", - " combination lock 50 0.64 0.84\n", - " computer keyboard 50 0.7 0.82\n", - " confectionery store 50 0.54 0.86\n", - " container ship 50 0.82 0.98\n", - " convertible 50 0.78 0.98\n", - " corkscrew 50 0.82 0.92\n", - " cornet 50 0.46 0.88\n", - " cowboy boot 50 0.64 0.8\n", - " cowboy hat 50 0.64 0.82\n", - " cradle 50 0.38 0.8\n", - " crane (machine) 50 0.78 0.94\n", - " crash helmet 50 0.92 0.96\n", - " crate 50 0.52 0.82\n", - " infant bed 50 0.74 1\n", - " Crock Pot 50 0.78 0.9\n", - " croquet ball 50 0.9 0.96\n", - " crutch 50 0.46 0.7\n", - " cuirass 50 0.54 0.86\n", - " dam 50 0.74 0.92\n", - " desk 50 0.6 0.86\n", - " desktop computer 50 0.54 0.94\n", - " rotary dial telephone 50 0.88 0.94\n", - " diaper 50 0.68 0.84\n", - " digital clock 50 0.54 0.76\n", - " digital watch 50 0.58 0.86\n", - " dining table 50 0.76 0.9\n", - " dishcloth 50 0.94 1\n", - " dishwasher 50 0.44 0.78\n", - " disc brake 50 0.98 1\n", - " dock 50 0.54 0.94\n", - " dog sled 50 0.84 1\n", - " dome 50 0.72 0.92\n", - " doormat 50 0.56 0.82\n", - " drilling rig 50 0.84 0.96\n", - " drum 50 0.38 0.68\n", - " drumstick 50 0.56 0.72\n", - " dumbbell 50 0.62 0.9\n", - " Dutch oven 50 0.7 0.84\n", - " electric fan 50 0.82 0.86\n", - " electric guitar 50 0.62 0.84\n", - " electric locomotive 50 0.92 0.98\n", - " entertainment center 50 0.9 0.98\n", - " envelope 50 0.44 0.86\n", - " espresso machine 50 0.72 0.94\n", - " face powder 50 0.7 0.92\n", - " feather boa 50 0.7 0.84\n", - " filing cabinet 50 0.88 0.98\n", - " fireboat 50 0.94 0.98\n", - " fire engine 50 0.84 0.9\n", - " fire screen sheet 50 0.62 0.76\n", - " flagpole 50 0.74 0.88\n", - " flute 50 0.36 0.72\n", - " folding chair 50 0.62 0.84\n", - " football helmet 50 0.86 0.94\n", - " forklift 50 0.8 0.92\n", - " fountain 50 0.84 0.94\n", - " fountain pen 50 0.76 0.92\n", - " four-poster bed 50 0.78 0.94\n", - " freight car 50 0.96 1\n", - " French horn 50 0.76 0.92\n", - " frying pan 50 0.36 0.78\n", - " fur coat 50 0.84 0.96\n", - " garbage truck 50 0.9 0.98\n", - " gas mask 50 0.84 0.92\n", - " gas pump 50 0.9 0.98\n", - " goblet 50 0.68 0.82\n", - " go-kart 50 0.9 1\n", - " golf ball 50 0.84 0.9\n", - " golf cart 50 0.78 0.86\n", - " gondola 50 0.98 0.98\n", - " gong 50 0.74 0.92\n", - " gown 50 0.62 0.96\n", - " grand piano 50 0.7 0.96\n", - " greenhouse 50 0.8 0.98\n", - " grille 50 0.72 0.9\n", - " grocery store 50 0.66 0.94\n", - " guillotine 50 0.86 0.92\n", - " barrette 50 0.52 0.66\n", - " hair spray 50 0.5 0.74\n", - " half-track 50 0.78 0.9\n", - " hammer 50 0.56 0.76\n", - " hamper 50 0.64 0.84\n", - " hair dryer 50 0.56 0.74\n", - " hand-held computer 50 0.42 0.86\n", - " handkerchief 50 0.78 0.94\n", - " hard disk drive 50 0.76 0.84\n", - " harmonica 50 0.7 0.88\n", - " harp 50 0.88 0.96\n", - " harvester 50 0.78 1\n", - " hatchet 50 0.54 0.74\n", - " holster 50 0.66 0.84\n", - " home theater 50 0.64 0.94\n", - " honeycomb 50 0.56 0.88\n", - " hook 50 0.3 0.6\n", - " hoop skirt 50 0.64 0.86\n", - " horizontal bar 50 0.68 0.98\n", - " horse-drawn vehicle 50 0.88 0.94\n", - " hourglass 50 0.88 0.96\n", - " iPod 50 0.76 0.94\n", - " clothes iron 50 0.82 0.88\n", - " jack-o'-lantern 50 0.98 0.98\n", - " jeans 50 0.68 0.84\n", - " jeep 50 0.72 0.9\n", - " T-shirt 50 0.72 0.96\n", - " jigsaw puzzle 50 0.84 0.94\n", - " pulled rickshaw 50 0.86 0.94\n", - " joystick 50 0.8 0.9\n", - " kimono 50 0.84 0.96\n", - " knee pad 50 0.62 0.88\n", - " knot 50 0.66 0.8\n", - " lab coat 50 0.8 0.96\n", - " ladle 50 0.36 0.64\n", - " lampshade 50 0.48 0.84\n", - " laptop computer 50 0.26 0.88\n", - " lawn mower 50 0.78 0.96\n", - " lens cap 50 0.46 0.72\n", - " paper knife 50 0.26 0.5\n", - " library 50 0.54 0.9\n", - " lifeboat 50 0.92 0.98\n", - " lighter 50 0.56 0.78\n", - " limousine 50 0.76 0.92\n", - " ocean liner 50 0.88 0.94\n", - " lipstick 50 0.74 0.9\n", - " slip-on shoe 50 0.74 0.92\n", - " lotion 50 0.5 0.86\n", - " speaker 50 0.52 0.68\n", - " loupe 50 0.32 0.52\n", - " sawmill 50 0.72 0.9\n", - " magnetic compass 50 0.52 0.82\n", - " mail bag 50 0.68 0.92\n", - " mailbox 50 0.82 0.92\n", - " tights 50 0.22 0.94\n", - " tank suit 50 0.24 0.9\n", - " manhole cover 50 0.96 0.98\n", - " maraca 50 0.74 0.9\n", - " marimba 50 0.84 0.94\n", - " mask 50 0.44 0.82\n", - " match 50 0.66 0.9\n", - " maypole 50 0.96 1\n", - " maze 50 0.8 0.96\n", - " measuring cup 50 0.54 0.76\n", - " medicine chest 50 0.6 0.84\n", - " megalith 50 0.8 0.92\n", - " microphone 50 0.52 0.7\n", - " microwave oven 50 0.48 0.72\n", - " military uniform 50 0.62 0.84\n", - " milk can 50 0.68 0.82\n", - " minibus 50 0.7 1\n", - " miniskirt 50 0.46 0.76\n", - " minivan 50 0.38 0.8\n", - " missile 50 0.4 0.84\n", - " mitten 50 0.76 0.88\n", - " mixing bowl 50 0.8 0.92\n", - " mobile home 50 0.54 0.78\n", - " Model T 50 0.92 0.96\n", - " modem 50 0.58 0.86\n", - " monastery 50 0.44 0.9\n", - " monitor 50 0.4 0.86\n", - " moped 50 0.56 0.94\n", - " mortar 50 0.68 0.94\n", - " square academic cap 50 0.5 0.84\n", - " mosque 50 0.9 1\n", - " mosquito net 50 0.9 0.98\n", - " scooter 50 0.9 0.98\n", - " mountain bike 50 0.78 0.96\n", - " tent 50 0.88 0.96\n", - " computer mouse 50 0.42 0.82\n", - " mousetrap 50 0.76 0.88\n", - " moving van 50 0.4 0.72\n", - " muzzle 50 0.5 0.72\n", - " nail 50 0.68 0.74\n", - " neck brace 50 0.56 0.68\n", - " necklace 50 0.86 1\n", - " nipple 50 0.7 0.88\n", - " notebook computer 50 0.34 0.84\n", - " obelisk 50 0.8 0.92\n", - " oboe 50 0.6 0.84\n", - " ocarina 50 0.8 0.86\n", - " odometer 50 0.96 1\n", - " oil filter 50 0.58 0.82\n", - " organ 50 0.82 0.9\n", - " oscilloscope 50 0.9 0.96\n", - " overskirt 50 0.2 0.7\n", - " bullock cart 50 0.7 0.94\n", - " oxygen mask 50 0.46 0.84\n", - " packet 50 0.5 0.78\n", - " paddle 50 0.56 0.94\n", - " paddle wheel 50 0.86 0.96\n", - " padlock 50 0.74 0.78\n", - " paintbrush 50 0.62 0.8\n", - " pajamas 50 0.56 0.92\n", - " palace 50 0.64 0.96\n", - " pan flute 50 0.84 0.86\n", - " paper towel 50 0.66 0.84\n", - " parachute 50 0.92 0.94\n", - " parallel bars 50 0.62 0.96\n", - " park bench 50 0.74 0.9\n", - " parking meter 50 0.84 0.92\n", - " passenger car 50 0.5 0.82\n", - " patio 50 0.58 0.84\n", - " payphone 50 0.74 0.92\n", - " pedestal 50 0.52 0.9\n", - " pencil case 50 0.64 0.92\n", - " pencil sharpener 50 0.52 0.78\n", - " perfume 50 0.7 0.9\n", - " Petri dish 50 0.6 0.8\n", - " photocopier 50 0.88 0.98\n", - " plectrum 50 0.7 0.84\n", - " Pickelhaube 50 0.72 0.86\n", - " picket fence 50 0.84 0.94\n", - " pickup truck 50 0.64 0.92\n", - " pier 50 0.52 0.82\n", - " piggy bank 50 0.82 0.94\n", - " pill bottle 50 0.76 0.86\n", - " pillow 50 0.76 0.9\n", - " ping-pong ball 50 0.84 0.88\n", - " pinwheel 50 0.76 0.88\n", - " pirate ship 50 0.76 0.94\n", - " pitcher 50 0.46 0.84\n", - " hand plane 50 0.84 0.94\n", - " planetarium 50 0.88 0.98\n", - " plastic bag 50 0.36 0.62\n", - " plate rack 50 0.52 0.78\n", - " plow 50 0.78 0.88\n", - " plunger 50 0.42 0.7\n", - " Polaroid camera 50 0.84 0.92\n", - " pole 50 0.38 0.74\n", - " police van 50 0.76 0.94\n", - " poncho 50 0.58 0.86\n", - " billiard table 50 0.8 0.88\n", - " soda bottle 50 0.56 0.94\n", - " pot 50 0.78 0.92\n", - " potter's wheel 50 0.9 0.94\n", - " power drill 50 0.42 0.72\n", - " prayer rug 50 0.7 0.86\n", - " printer 50 0.54 0.86\n", - " prison 50 0.7 0.9\n", - " projectile 50 0.28 0.9\n", - " projector 50 0.62 0.84\n", - " hockey puck 50 0.92 0.96\n", - " punching bag 50 0.6 0.68\n", - " purse 50 0.42 0.78\n", - " quill 50 0.68 0.84\n", - " quilt 50 0.64 0.9\n", - " race car 50 0.72 0.92\n", - " racket 50 0.72 0.9\n", - " radiator 50 0.66 0.76\n", - " radio 50 0.64 0.92\n", - " radio telescope 50 0.9 0.96\n", - " rain barrel 50 0.8 0.98\n", - " recreational vehicle 50 0.84 0.94\n", - " reel 50 0.72 0.82\n", - " reflex camera 50 0.72 0.92\n", - " refrigerator 50 0.7 0.9\n", - " remote control 50 0.7 0.88\n", - " restaurant 50 0.5 0.66\n", - " revolver 50 0.82 1\n", - " rifle 50 0.38 0.7\n", - " rocking chair 50 0.62 0.84\n", - " rotisserie 50 0.88 0.92\n", - " eraser 50 0.54 0.76\n", - " rugby ball 50 0.86 0.94\n", - " ruler 50 0.68 0.86\n", - " running shoe 50 0.78 0.94\n", - " safe 50 0.82 0.92\n", - " safety pin 50 0.4 0.62\n", - " salt shaker 50 0.66 0.9\n", - " sandal 50 0.66 0.86\n", - " sarong 50 0.64 0.86\n", - " saxophone 50 0.66 0.88\n", - " scabbard 50 0.76 0.92\n", - " weighing scale 50 0.58 0.78\n", - " school bus 50 0.92 1\n", - " schooner 50 0.84 1\n", - " scoreboard 50 0.9 0.96\n", - " CRT screen 50 0.14 0.7\n", - " screw 50 0.9 0.98\n", - " screwdriver 50 0.3 0.58\n", - " seat belt 50 0.88 0.94\n", - " sewing machine 50 0.76 0.9\n", - " shield 50 0.56 0.82\n", - " shoe store 50 0.78 0.96\n", - " shoji 50 0.8 0.92\n", - " shopping basket 50 0.52 0.88\n", - " shopping cart 50 0.76 0.92\n", - " shovel 50 0.62 0.84\n", - " shower cap 50 0.7 0.84\n", - " shower curtain 50 0.64 0.82\n", - " ski 50 0.74 0.92\n", - " ski mask 50 0.72 0.88\n", - " sleeping bag 50 0.68 0.8\n", - " slide rule 50 0.72 0.88\n", - " sliding door 50 0.44 0.78\n", - " slot machine 50 0.94 0.98\n", - " snorkel 50 0.86 0.98\n", - " snowmobile 50 0.88 1\n", - " snowplow 50 0.84 0.98\n", - " soap dispenser 50 0.56 0.86\n", - " soccer ball 50 0.86 0.96\n", - " sock 50 0.62 0.76\n", - " solar thermal collector 50 0.72 0.96\n", - " sombrero 50 0.6 0.84\n", - " soup bowl 50 0.56 0.94\n", - " space bar 50 0.34 0.88\n", - " space heater 50 0.52 0.74\n", - " space shuttle 50 0.82 0.96\n", - " spatula 50 0.3 0.6\n", - " motorboat 50 0.86 1\n", - " spider web 50 0.7 0.9\n", - " spindle 50 0.86 0.98\n", - " sports car 50 0.6 0.94\n", - " spotlight 50 0.26 0.6\n", - " stage 50 0.68 0.86\n", - " steam locomotive 50 0.94 1\n", - " through arch bridge 50 0.84 0.96\n", - " steel drum 50 0.82 0.9\n", - " stethoscope 50 0.6 0.82\n", - " scarf 50 0.5 0.92\n", - " stone wall 50 0.76 0.9\n", - " stopwatch 50 0.58 0.9\n", - " stove 50 0.46 0.74\n", - " strainer 50 0.64 0.84\n", - " tram 50 0.88 0.96\n", - " stretcher 50 0.6 0.8\n", - " couch 50 0.8 0.96\n", - " stupa 50 0.88 0.88\n", - " submarine 50 0.72 0.92\n", - " suit 50 0.4 0.78\n", - " sundial 50 0.58 0.74\n", - " sunglass 50 0.14 0.58\n", - " sunglasses 50 0.28 0.58\n", - " sunscreen 50 0.32 0.7\n", - " suspension bridge 50 0.6 0.94\n", - " mop 50 0.74 0.92\n", - " sweatshirt 50 0.28 0.66\n", - " swimsuit 50 0.52 0.82\n", - " swing 50 0.76 0.84\n", - " switch 50 0.56 0.76\n", - " syringe 50 0.62 0.82\n", - " table lamp 50 0.6 0.88\n", - " tank 50 0.8 0.96\n", - " tape player 50 0.46 0.76\n", - " teapot 50 0.84 1\n", - " teddy bear 50 0.82 0.94\n", - " television 50 0.6 0.9\n", - " tennis ball 50 0.7 0.94\n", - " thatched roof 50 0.88 0.9\n", - " front curtain 50 0.8 0.92\n", - " thimble 50 0.6 0.8\n", - " threshing machine 50 0.56 0.88\n", - " throne 50 0.72 0.82\n", - " tile roof 50 0.72 0.94\n", - " toaster 50 0.66 0.84\n", - " tobacco shop 50 0.42 0.7\n", - " toilet seat 50 0.62 0.88\n", - " torch 50 0.64 0.84\n", - " totem pole 50 0.92 0.98\n", - " tow truck 50 0.62 0.88\n", - " toy store 50 0.6 0.94\n", - " tractor 50 0.76 0.98\n", - " semi-trailer truck 50 0.78 0.92\n", - " tray 50 0.46 0.64\n", - " trench coat 50 0.54 0.72\n", - " tricycle 50 0.72 0.94\n", - " trimaran 50 0.7 0.98\n", - " tripod 50 0.58 0.86\n", - " triumphal arch 50 0.92 0.98\n", - " trolleybus 50 0.9 1\n", - " trombone 50 0.54 0.88\n", - " tub 50 0.24 0.82\n", - " turnstile 50 0.84 0.94\n", - " typewriter keyboard 50 0.68 0.98\n", - " umbrella 50 0.52 0.7\n", - " unicycle 50 0.74 0.96\n", - " upright piano 50 0.76 0.9\n", - " vacuum cleaner 50 0.62 0.9\n", - " vase 50 0.5 0.78\n", - " vault 50 0.76 0.92\n", - " velvet 50 0.2 0.42\n", - " vending machine 50 0.9 1\n", - " vestment 50 0.54 0.82\n", - " viaduct 50 0.78 0.86\n", - " violin 50 0.68 0.78\n", - " volleyball 50 0.86 1\n", - " waffle iron 50 0.72 0.88\n", - " wall clock 50 0.54 0.88\n", - " wallet 50 0.52 0.9\n", - " wardrobe 50 0.68 0.88\n", - " military aircraft 50 0.9 0.98\n", - " sink 50 0.72 0.96\n", - " washing machine 50 0.78 0.94\n", - " water bottle 50 0.54 0.74\n", - " water jug 50 0.22 0.74\n", - " water tower 50 0.9 0.96\n", - " whiskey jug 50 0.64 0.74\n", - " whistle 50 0.72 0.84\n", - " wig 50 0.84 0.9\n", - " window screen 50 0.68 0.8\n", - " window shade 50 0.52 0.76\n", - " Windsor tie 50 0.22 0.66\n", - " wine bottle 50 0.42 0.82\n", - " wing 50 0.54 0.96\n", - " wok 50 0.46 0.82\n", - " wooden spoon 50 0.58 0.8\n", - " wool 50 0.32 0.82\n", - " split-rail fence 50 0.74 0.9\n", - " shipwreck 50 0.84 0.96\n", - " yawl 50 0.78 0.96\n", - " yurt 50 0.84 1\n", - " website 50 0.98 1\n", - " comic book 50 0.62 0.9\n", - " crossword 50 0.84 0.88\n", - " traffic sign 50 0.78 0.9\n", - " traffic light 50 0.8 0.94\n", - " dust jacket 50 0.72 0.94\n", - " menu 50 0.82 0.96\n", - " plate 50 0.44 0.88\n", - " guacamole 50 0.8 0.92\n", - " consomme 50 0.54 0.88\n", - " hot pot 50 0.86 0.98\n", - " trifle 50 0.92 0.98\n", - " ice cream 50 0.68 0.94\n", - " ice pop 50 0.62 0.84\n", - " baguette 50 0.62 0.88\n", - " bagel 50 0.64 0.92\n", - " pretzel 50 0.72 0.88\n", - " cheeseburger 50 0.9 1\n", - " hot dog 50 0.74 0.94\n", - " mashed potato 50 0.74 0.9\n", - " cabbage 50 0.84 0.96\n", - " broccoli 50 0.9 0.96\n", - " cauliflower 50 0.82 1\n", - " zucchini 50 0.74 0.9\n", - " spaghetti squash 50 0.8 0.96\n", - " acorn squash 50 0.82 0.96\n", - " butternut squash 50 0.7 0.94\n", - " cucumber 50 0.6 0.96\n", - " artichoke 50 0.84 0.94\n", - " bell pepper 50 0.84 0.98\n", - " cardoon 50 0.88 0.94\n", - " mushroom 50 0.38 0.92\n", - " Granny Smith 50 0.9 0.96\n", - " strawberry 50 0.6 0.88\n", - " orange 50 0.7 0.92\n", - " lemon 50 0.78 0.98\n", - " fig 50 0.82 0.96\n", - " pineapple 50 0.86 0.96\n", - " banana 50 0.84 0.96\n", - " jackfruit 50 0.9 0.98\n", - " custard apple 50 0.86 0.96\n", - " pomegranate 50 0.82 0.98\n", - " hay 50 0.8 0.92\n", - " carbonara 50 0.88 0.94\n", - " chocolate syrup 50 0.46 0.84\n", - " dough 50 0.4 0.6\n", - " meatloaf 50 0.58 0.84\n", - " pizza 50 0.84 0.96\n", - " pot pie 50 0.68 0.9\n", - " burrito 50 0.8 0.98\n", - " red wine 50 0.54 0.82\n", - " espresso 50 0.64 0.88\n", - " cup 50 0.38 0.7\n", - " eggnog 50 0.38 0.7\n", - " alp 50 0.54 0.88\n", - " bubble 50 0.8 0.96\n", - " cliff 50 0.64 1\n", - " coral reef 50 0.72 0.96\n", - " geyser 50 0.94 1\n", - " lakeshore 50 0.54 0.88\n", - " promontory 50 0.58 0.94\n", - " shoal 50 0.6 0.96\n", - " seashore 50 0.44 0.78\n", - " valley 50 0.72 0.94\n", - " volcano 50 0.78 0.96\n", - " baseball player 50 0.72 0.94\n", - " bridegroom 50 0.72 0.88\n", - " scuba diver 50 0.8 1\n", - " rapeseed 50 0.94 0.98\n", - " daisy 50 0.96 0.98\n", - " yellow lady's slipper 50 1 1\n", - " corn 50 0.4 0.88\n", - " acorn 50 0.92 0.98\n", - " rose hip 50 0.92 0.98\n", - " horse chestnut seed 50 0.94 0.98\n", - " coral fungus 50 0.96 0.96\n", - " agaric 50 0.82 0.94\n", - " gyromitra 50 0.98 1\n", - " stinkhorn mushroom 50 0.8 0.94\n", - " earth star 50 0.98 1\n", - " hen-of-the-woods 50 0.8 0.96\n", - " bolete 50 0.74 0.94\n", - " ear 50 0.48 0.94\n", - " toilet paper 50 0.36 0.68\n", - "Speed: 0.1ms pre-process, 0.3ms inference, 0.0ms post-process per image at shape (1, 3, 224, 224)\n", - "Results saved to \u001b[1mruns/val-cls/exp\u001b[0m\n" - ] - } - ], - "source": [ - "# Validate YOLOv5s on Imagenet val\n", - "!python classify/val.py --weights yolov5s-cls.pt --data ../datasets/imagenet --img 224 --half" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "ZY2VXXXu74w5" - }, - "source": [ - "# 3. Train\n", - "\n", - "

\n", - "Close the active learning loop by sampling images from your inference conditions with the `roboflow` pip package\n", - "

\n", - "\n", - "Train a YOLOv5s Classification model on the [Imagenette](https://image-net.org/) dataset with `--data imagenet`, starting from pretrained `--pretrained yolov5s-cls.pt`.\n", - "\n", - "- **Pretrained [Models](https://github.com/ultralytics/yolov5/tree/master/models)** are downloaded\n", - "automatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases)\n", - "- **Training Results** are saved to `runs/train-cls/` with incrementing run directories, i.e. `runs/train-cls/exp2`, `runs/train-cls/exp3` etc.\n", - "

\n", - "\n", - "A **Mosaic Dataloader** is used for training which combines 4 images into 1 mosaic.\n", - "\n", - "## Train on Custom Data with Roboflow 🌟 NEW\n", - "\n", - "[Roboflow](https://roboflow.com/?ref=ultralytics) enables you to easily **organize, label, and prepare** a high quality dataset with your own custom data. Roboflow also makes it easy to establish an active learning pipeline, collaborate with your team on dataset improvement, and integrate directly into your model building workflow with the `roboflow` pip package.\n", - "\n", - "- Custom Training Example: [https://blog.roboflow.com/train-yolov5-classification-custom-data/](https://blog.roboflow.com/train-yolov5-classification-custom-data/?ref=ultralytics)\n", - "- Custom Training Notebook: [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1KZiKUAjtARHAfZCXbJRv14-pOnIsBLPV?usp=sharing)\n", - "
\n", - "\n", - "

Label images lightning fast (including with model-assisted labeling)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "i3oKtE4g-aNn" - }, - "outputs": [], - "source": [ - "#@title Select YOLOv5 🚀 logger {run: 'auto'}\n", - "logger = 'Comet' #@param ['Comet', 'ClearML', 'TensorBoard']\n", - "\n", - "if logger == 'Comet':\n", - " %pip install -q comet_ml\n", - " import comet_ml; comet_ml.init()\n", - "elif logger == 'ClearML':\n", - " %pip install -q clearml\n", - " import clearml; clearml.browser_login()\n", - "elif logger == 'TensorBoard':\n", - " %load_ext tensorboard\n", - " %tensorboard --logdir runs/train" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "id": "1NcFxRcFdJ_O", - "outputId": "77c8d487-16db-4073-b3ea-06cabf2e7766" - }, - "outputs": [ - { - "output_type": "stream", - "name": "stdout", - "text": [ - "\u001b[34m\u001b[1mclassify/train: \u001b[0mmodel=yolov5s-cls.pt, data=imagenette160, epochs=5, batch_size=64, imgsz=224, nosave=False, cache=ram, device=, workers=8, project=runs/train-cls, name=exp, exist_ok=False, pretrained=True, optimizer=Adam, lr0=0.001, decay=5e-05, label_smoothing=0.1, cutoff=None, dropout=None, verbose=False, seed=0, local_rank=-1\n", - "\u001b[34m\u001b[1mgithub: \u001b[0mup to date with https://github.com/ultralytics/yolov5 ✅\n", - "YOLOv5 🚀 v7.0-3-g61ebf5e Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n", - "\n", - "\u001b[34m\u001b[1mTensorBoard: \u001b[0mStart with 'tensorboard --logdir runs/train-cls', view at http://localhost:6006/\n", - "\n", - "Dataset not found ⚠️, missing path /content/datasets/imagenette160, attempting download...\n", - "Downloading https://github.com/ultralytics/assets/releases/download/v0.0.0/imagenette160.zip to /content/datasets/imagenette160.zip...\n", - "100% 103M/103M [00:00<00:00, 347MB/s] \n", - "Unzipping /content/datasets/imagenette160.zip...\n", - "Dataset download success ✅ (3.3s), saved to \u001b[1m/content/datasets/imagenette160\u001b[0m\n", - "\n", - "\u001b[34m\u001b[1malbumentations: \u001b[0mRandomResizedCrop(p=1.0, height=224, width=224, scale=(0.08, 1.0), ratio=(0.75, 1.3333333333333333), interpolation=1), HorizontalFlip(p=0.5), ColorJitter(p=0.5, brightness=[0.6, 1.4], contrast=[0.6, 1.4], saturation=[0.6, 1.4], hue=[0, 0]), Normalize(p=1.0, mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225), max_pixel_value=255.0), ToTensorV2(always_apply=True, p=1.0, transpose_mask=False)\n", - "Model summary: 149 layers, 4185290 parameters, 4185290 gradients, 10.5 GFLOPs\n", - "\u001b[34m\u001b[1moptimizer:\u001b[0m Adam(lr=0.001) with parameter groups 32 weight(decay=0.0), 33 weight(decay=5e-05), 33 bias\n", - "Image sizes 224 train, 224 test\n", - "Using 1 dataloader workers\n", - "Logging results to \u001b[1mruns/train-cls/exp\u001b[0m\n", - "Starting yolov5s-cls.pt training on imagenette160 dataset with 10 classes for 5 epochs...\n", - "\n", - " Epoch GPU_mem train_loss val_loss top1_acc top5_acc\n", - " 1/5 1.47G 1.05 0.974 0.828 0.975: 100% 148/148 [00:38<00:00, 3.82it/s]\n", - " 2/5 1.73G 0.895 0.766 0.911 0.994: 100% 148/148 [00:36<00:00, 4.03it/s]\n", - " 3/5 1.73G 0.82 0.704 0.934 0.996: 100% 148/148 [00:35<00:00, 4.20it/s]\n", - " 4/5 1.73G 0.766 0.664 0.951 0.998: 100% 148/148 [00:36<00:00, 4.05it/s]\n", - " 5/5 1.73G 0.724 0.634 0.959 0.997: 100% 148/148 [00:37<00:00, 3.94it/s]\n", - "\n", - "Training complete (0.052 hours)\n", - "Results saved to \u001b[1mruns/train-cls/exp\u001b[0m\n", - "Predict: python classify/predict.py --weights runs/train-cls/exp/weights/best.pt --source im.jpg\n", - "Validate: python classify/val.py --weights runs/train-cls/exp/weights/best.pt --data /content/datasets/imagenette160\n", - "Export: python export.py --weights runs/train-cls/exp/weights/best.pt --include onnx\n", - "PyTorch Hub: model = torch.hub.load('ultralytics/yolov5', 'custom', 'runs/train-cls/exp/weights/best.pt')\n", - "Visualize: https://netron.app\n", - "\n" - ] - } - ], - "source": [ - "# Train YOLOv5s Classification on Imagenette160 for 3 epochs\n", - "!python classify/train.py --model yolov5s-cls.pt --data imagenette160 --epochs 5 --img 224 --cache" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "15glLzbQx5u0" - }, - "source": [ - "# 4. Visualize" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "nWOsI5wJR1o3" - }, - "source": [ - "## Comet Logging and Visualization 🌟 NEW\n", - "\n", - "[Comet](https://www.comet.com/site/lp/yolov5-with-comet/?utm_source=yolov5&utm_medium=partner&utm_campaign=partner_yolov5_2022&utm_content=yolov5_colab) is now fully integrated with YOLOv5. Track and visualize model metrics in real time, save your hyperparameters, datasets, and model checkpoints, and visualize your model predictions with [Comet Custom Panels](https://www.comet.com/docs/v2/guides/comet-dashboard/code-panels/about-panels/?utm_source=yolov5&utm_medium=partner&utm_campaign=partner_yolov5_2022&utm_content=yolov5_colab)! Comet makes sure you never lose track of your work and makes it easy to share results and collaborate across teams of all sizes!\n", - "\n", - "Getting started is easy:\n", - "```shell\n", - "pip install comet_ml # 1. install\n", - "export COMET_API_KEY= # 2. paste API key\n", - "python train.py --img 640 --epochs 3 --data coco128.yaml --weights yolov5s.pt # 3. train\n", - "```\n", - "To learn more about all of the supported Comet features for this integration, check out the [Comet Tutorial](https://docs.ultralytics.com/yolov5/tutorials/comet_logging_integration). If you'd like to learn more about Comet, head over to our [documentation](https://www.comet.com/docs/v2/?utm_source=yolov5&utm_medium=partner&utm_campaign=partner_yolov5_2022&utm_content=yolov5_colab). Get started by trying out the Comet Colab Notebook:\n", - "[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1RG0WOQyxlDlo5Km8GogJpIEJlg_5lyYO?usp=sharing)\n", - "\n", - "\n", - "\"Comet" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "Lay2WsTjNJzP" - }, - "source": [ - "## ClearML Logging and Automation 🌟 NEW\n", - "\n", - "[ClearML](https://cutt.ly/yolov5-notebook-clearml) is completely integrated into YOLOv5 to track your experimentation, manage dataset versions and even remotely execute training runs. To enable ClearML (check cells above):\n", - "\n", - "- `pip install clearml`\n", - "- run `clearml-init` to connect to a ClearML server (**deploy your own [open-source server](https://github.com/allegroai/clearml-server)**, or use our [free hosted server](https://cutt.ly/yolov5-notebook-clearml))\n", - "\n", - "You'll get all the great expected features from an experiment manager: live updates, model upload, experiment comparison etc. but ClearML also tracks uncommitted changes and installed packages for example. Thanks to that ClearML Tasks (which is what we call experiments) are also reproducible on different machines! With only 1 extra line, we can schedule a YOLOv5 training task on a queue to be executed by any number of ClearML Agents (workers).\n", - "\n", - "You can use ClearML Data to version your dataset and then pass it to YOLOv5 simply using its unique ID. This will help you keep track of your data without adding extra hassle. Explore the [ClearML Tutorial](https://docs.ultralytics.com/yolov5/tutorials/clearml_logging_integration) for details!\n", - "\n", - "\n", - "\"ClearML" - ] + "cells": [ + { + "cell_type": "markdown", + "metadata": { + "id": "t6MPjfT5NrKQ" + }, + "source": [ + "
\n", + "\n", + " \n", + " \n", + "\n", + "\n", + "
\n", + " \"Run\n", + " \"Open\n", + " \"Open\n", + "
\n", + "\n", + "This YOLOv5 🚀 notebook by Ultralytics presents simple train, validate and predict examples to help start your AI adventure.
See GitHub for community support or contact us for professional support.\n", + "\n", + "
" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "7mGmQbAO5pQb" + }, + "source": [ + "# Setup\n", + "\n", + "Clone GitHub [repository](https://github.com/ultralytics/yolov5), install [dependencies](https://github.com/ultralytics/yolov5/blob/master/requirements.txt) and check PyTorch and GPU." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" }, + "id": "wbvMlHd_QwMG", + "outputId": "0806e375-610d-4ec0-c867-763dbb518279" + }, + "outputs": [ { - "cell_type": "markdown", - "metadata": { - "id": "-WPvRbS5Swl6" - }, - "source": [ - "## Local Logging\n", - "\n", - "Training results are automatically logged with [Tensorboard](https://www.tensorflow.org/tensorboard) and [CSV](https://github.com/ultralytics/yolov5/pull/4148) loggers to `runs/train`, with a new experiment directory created for each new training as `runs/train/exp2`, `runs/train/exp3`, etc.\n", - "\n", - "This directory contains train and val statistics, mosaics, labels, predictions and augmentated mosaics, as well as metrics and charts including precision-recall (PR) curves and confusion matrices. \n", - "\n", - "\"Local\n" - ] + "name": "stderr", + "output_type": "stream", + "text": [ + "YOLOv5 🚀 v7.0-3-g61ebf5e Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n" + ] }, { - "cell_type": "markdown", - "metadata": { - "id": "Zelyeqbyt3GD" - }, - "source": [ - "# Environments\n", - "\n", - "YOLOv5 may be run in any of the following up-to-date verified environments (with all dependencies including [CUDA](https://developer.nvidia.com/cuda)/[CUDNN](https://developer.nvidia.com/cudnn), [Python](https://www.python.org/) and [PyTorch](https://pytorch.org/) preinstalled):\n", - "\n", - "- **Notebooks** with free GPU: \"Run \"Open \"Open\n", - "- **Google Cloud** Deep Learning VM. See [GCP Quickstart Guide](https://docs.ultralytics.com/yolov5/environments/google_cloud_quickstart_tutorial/)\n", - "- **Amazon** Deep Learning AMI. See [AWS Quickstart Guide](https://docs.ultralytics.com/yolov5/environments/aws_quickstart_tutorial/)\n", - "- **Docker Image**. See [Docker Quickstart Guide](https://docs.ultralytics.com/yolov5/environments/docker_image_quickstart_tutorial/) \"Docker\n" - ] + "name": "stdout", + "output_type": "stream", + "text": [ + "Setup complete ✅ (2 CPUs, 12.7 GB RAM, 22.6/78.2 GB disk)\n" + ] + } + ], + "source": [ + "!git clone https://github.com/ultralytics/yolov5 # clone\n", + "%cd yolov5\n", + "%pip install -qr requirements.txt # install\n", + "\n", + "import torch\n", + "\n", + "import utils\n", + "\n", + "display = utils.notebook_init() # checks" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "4JnkELT0cIJg" + }, + "source": [ + "# 1. Predict\n", + "\n", + "`classify/predict.py` runs YOLOv5 Classification inference on a variety of sources, downloading models automatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases), and saving results to `runs/predict-cls`. Example inference sources are:\n", + "\n", + "```shell\n", + "python classify/predict.py --source 0 # webcam\n", + " img.jpg # image \n", + " vid.mp4 # video\n", + " screen # screenshot\n", + " path/ # directory\n", + " 'path/*.jpg' # glob\n", + " 'https://youtu.be/LNwODJXcvt4' # YouTube\n", + " 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream\n", + "```" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" }, + "id": "zR9ZbuQCH7FX", + "outputId": "50504ef7-aa3e-4281-a4e3-d0c7df3c0ffe" + }, + "outputs": [ { - "cell_type": "markdown", - "metadata": { - "id": "6Qu7Iesl0p54" - }, - "source": [ - "# Status\n", - "\n", - "![YOLOv5 CI](https://github.com/ultralytics/yolov5/actions/workflows/ci-testing.yml/badge.svg)\n", - "\n", - "If this badge is green, all [YOLOv5 GitHub Actions](https://github.com/ultralytics/yolov5/actions) Continuous Integration (CI) tests are currently passing. CI tests verify correct operation of YOLOv5 training ([train.py](https://github.com/ultralytics/yolov5/blob/master/train.py)), testing ([val.py](https://github.com/ultralytics/yolov5/blob/master/val.py)), inference ([detect.py](https://github.com/ultralytics/yolov5/blob/master/detect.py)) and export ([export.py](https://github.com/ultralytics/yolov5/blob/master/export.py)) on macOS, Windows, and Ubuntu every 24 hours and on every commit.\n" - ] + "name": "stdout", + "output_type": "stream", + "text": [ + "\u001b[34m\u001b[1mclassify/predict: \u001b[0mweights=['yolov5s-cls.pt'], source=data/images, data=data/coco128.yaml, imgsz=[224, 224], device=, view_img=False, save_txt=False, nosave=False, augment=False, visualize=False, update=False, project=runs/predict-cls, name=exp, exist_ok=False, half=False, dnn=False, vid_stride=1\n", + "YOLOv5 🚀 v7.0-3-g61ebf5e Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n", + "\n", + "Downloading https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5s-cls.pt to yolov5s-cls.pt...\n", + "100% 10.5M/10.5M [00:00<00:00, 12.3MB/s]\n", + "\n", + "Fusing layers... \n", + "Model summary: 117 layers, 5447688 parameters, 0 gradients, 11.4 GFLOPs\n", + "image 1/2 /content/yolov5/data/images/bus.jpg: 224x224 minibus 0.39, police van 0.24, amphibious vehicle 0.05, recreational vehicle 0.04, trolleybus 0.03, 3.9ms\n", + "image 2/2 /content/yolov5/data/images/zidane.jpg: 224x224 suit 0.38, bow tie 0.19, bridegroom 0.18, rugby ball 0.04, stage 0.02, 4.6ms\n", + "Speed: 0.3ms pre-process, 4.3ms inference, 1.5ms NMS per image at shape (1, 3, 224, 224)\n", + "Results saved to \u001b[1mruns/predict-cls/exp\u001b[0m\n" + ] + } + ], + "source": [ + "!python classify/predict.py --weights yolov5s-cls.pt --img 224 --source data/images\n", + "# display.Image(filename='runs/predict-cls/exp/zidane.jpg', width=600)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "hkAzDWJ7cWTr" + }, + "source": [ + "        \n", + "" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "0eq1SMWl6Sfn" + }, + "source": [ + "# 2. Validate\n", + "Validate a model's accuracy on the [Imagenet](https://image-net.org/) dataset's `val` or `test` splits. Models are downloaded automatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases). To show results by class use the `--verbose` flag." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" }, + "id": "WQPtK1QYVaD_", + "outputId": "20fc0630-141e-4a90-ea06-342cbd7ce496" + }, + "outputs": [ { - "cell_type": "markdown", - "metadata": { - "id": "IEijrePND_2I" - }, - "source": [ - "# Appendix\n", - "\n", - "Additional content below." - ] + "name": "stdout", + "output_type": "stream", + "text": [ + "--2022-11-22 19:53:40-- https://image-net.org/data/ILSVRC/2012/ILSVRC2012_img_val.tar\n", + "Resolving image-net.org (image-net.org)... 171.64.68.16\n", + "Connecting to image-net.org (image-net.org)|171.64.68.16|:443... connected.\n", + "HTTP request sent, awaiting response... 200 OK\n", + "Length: 6744924160 (6.3G) [application/x-tar]\n", + "Saving to: ‘ILSVRC2012_img_val.tar’\n", + "\n", + "ILSVRC2012_img_val. 100%[===================>] 6.28G 16.1MB/s in 10m 52s \n", + "\n", + "2022-11-22 20:04:32 (9.87 MB/s) - ‘ILSVRC2012_img_val.tar’ saved [6744924160/6744924160]\n", + "\n" + ] + } + ], + "source": [ + "# Download Imagenet val (6.3G, 50000 images)\n", + "!bash data/scripts/get_imagenet.sh --val" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" }, + "id": "X58w8JLpMnjH", + "outputId": "41843132-98e2-4c25-d474-4cd7b246fb8e" + }, + "outputs": [ { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "GMusP4OAxFu6" - }, - "outputs": [], - "source": [ - "# YOLOv5 PyTorch HUB Inference (DetectionModels only)\n", - "import torch\n", - "\n", - "model = torch.hub.load('ultralytics/yolov5', 'yolov5s', force_reload=True, trust_repo=True) # or yolov5n - yolov5x6 or custom\n", - "im = 'https://ultralytics.com/images/zidane.jpg' # file, Path, PIL.Image, OpenCV, nparray, list\n", - "results = model(im) # inference\n", - "results.print() # or .show(), .save(), .crop(), .pandas(), etc." - ] + "name": "stdout", + "output_type": "stream", + "text": [ + "\u001b[34m\u001b[1mclassify/val: \u001b[0mdata=../datasets/imagenet, weights=['yolov5s-cls.pt'], batch_size=128, imgsz=224, device=, workers=8, verbose=True, project=runs/val-cls, name=exp, exist_ok=False, half=True, dnn=False\n", + "YOLOv5 🚀 v7.0-3-g61ebf5e Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n", + "\n", + "Fusing layers... \n", + "Model summary: 117 layers, 5447688 parameters, 0 gradients, 11.4 GFLOPs\n", + "validating: 100% 391/391 [04:57<00:00, 1.31it/s]\n", + " Class Images top1_acc top5_acc\n", + " all 50000 0.715 0.902\n", + " tench 50 0.94 0.98\n", + " goldfish 50 0.88 0.92\n", + " great white shark 50 0.78 0.96\n", + " tiger shark 50 0.68 0.96\n", + " hammerhead shark 50 0.82 0.92\n", + " electric ray 50 0.76 0.9\n", + " stingray 50 0.7 0.9\n", + " cock 50 0.78 0.92\n", + " hen 50 0.84 0.96\n", + " ostrich 50 0.98 1\n", + " brambling 50 0.9 0.96\n", + " goldfinch 50 0.92 0.98\n", + " house finch 50 0.88 0.96\n", + " junco 50 0.94 0.98\n", + " indigo bunting 50 0.86 0.88\n", + " American robin 50 0.9 0.96\n", + " bulbul 50 0.84 0.96\n", + " jay 50 0.9 0.96\n", + " magpie 50 0.84 0.96\n", + " chickadee 50 0.9 1\n", + " American dipper 50 0.82 0.92\n", + " kite 50 0.76 0.94\n", + " bald eagle 50 0.92 1\n", + " vulture 50 0.96 1\n", + " great grey owl 50 0.94 0.98\n", + " fire salamander 50 0.96 0.98\n", + " smooth newt 50 0.58 0.94\n", + " newt 50 0.74 0.9\n", + " spotted salamander 50 0.86 0.94\n", + " axolotl 50 0.86 0.96\n", + " American bullfrog 50 0.78 0.92\n", + " tree frog 50 0.84 0.96\n", + " tailed frog 50 0.48 0.8\n", + " loggerhead sea turtle 50 0.68 0.94\n", + " leatherback sea turtle 50 0.5 0.8\n", + " mud turtle 50 0.64 0.84\n", + " terrapin 50 0.52 0.98\n", + " box turtle 50 0.84 0.98\n", + " banded gecko 50 0.7 0.88\n", + " green iguana 50 0.76 0.94\n", + " Carolina anole 50 0.58 0.96\n", + "desert grassland whiptail lizard 50 0.82 0.94\n", + " agama 50 0.74 0.92\n", + " frilled-necked lizard 50 0.84 0.86\n", + " alligator lizard 50 0.58 0.78\n", + " Gila monster 50 0.72 0.8\n", + " European green lizard 50 0.42 0.9\n", + " chameleon 50 0.76 0.84\n", + " Komodo dragon 50 0.86 0.96\n", + " Nile crocodile 50 0.7 0.84\n", + " American alligator 50 0.76 0.96\n", + " triceratops 50 0.9 0.94\n", + " worm snake 50 0.76 0.88\n", + " ring-necked snake 50 0.8 0.92\n", + " eastern hog-nosed snake 50 0.58 0.88\n", + " smooth green snake 50 0.6 0.94\n", + " kingsnake 50 0.82 0.9\n", + " garter snake 50 0.88 0.94\n", + " water snake 50 0.7 0.94\n", + " vine snake 50 0.66 0.76\n", + " night snake 50 0.34 0.82\n", + " boa constrictor 50 0.8 0.96\n", + " African rock python 50 0.48 0.76\n", + " Indian cobra 50 0.82 0.94\n", + " green mamba 50 0.54 0.86\n", + " sea snake 50 0.62 0.9\n", + " Saharan horned viper 50 0.56 0.86\n", + "eastern diamondback rattlesnake 50 0.6 0.86\n", + " sidewinder 50 0.28 0.86\n", + " trilobite 50 0.98 0.98\n", + " harvestman 50 0.86 0.94\n", + " scorpion 50 0.86 0.94\n", + " yellow garden spider 50 0.92 0.96\n", + " barn spider 50 0.38 0.98\n", + " European garden spider 50 0.62 0.98\n", + " southern black widow 50 0.88 0.94\n", + " tarantula 50 0.94 1\n", + " wolf spider 50 0.82 0.92\n", + " tick 50 0.74 0.84\n", + " centipede 50 0.68 0.82\n", + " black grouse 50 0.88 0.98\n", + " ptarmigan 50 0.78 0.94\n", + " ruffed grouse 50 0.88 1\n", + " prairie grouse 50 0.92 1\n", + " peacock 50 0.88 0.9\n", + " quail 50 0.9 0.94\n", + " partridge 50 0.74 0.96\n", + " grey parrot 50 0.9 0.96\n", + " macaw 50 0.88 0.98\n", + "sulphur-crested cockatoo 50 0.86 0.92\n", + " lorikeet 50 0.96 1\n", + " coucal 50 0.82 0.88\n", + " bee eater 50 0.96 0.98\n", + " hornbill 50 0.9 0.96\n", + " hummingbird 50 0.88 0.96\n", + " jacamar 50 0.92 0.94\n", + " toucan 50 0.84 0.94\n", + " duck 50 0.76 0.94\n", + " red-breasted merganser 50 0.86 0.96\n", + " goose 50 0.74 0.96\n", + " black swan 50 0.94 0.98\n", + " tusker 50 0.54 0.92\n", + " echidna 50 0.98 1\n", + " platypus 50 0.72 0.84\n", + " wallaby 50 0.78 0.88\n", + " koala 50 0.84 0.92\n", + " wombat 50 0.78 0.84\n", + " jellyfish 50 0.88 0.96\n", + " sea anemone 50 0.72 0.9\n", + " brain coral 50 0.88 0.96\n", + " flatworm 50 0.8 0.98\n", + " nematode 50 0.86 0.9\n", + " conch 50 0.74 0.88\n", + " snail 50 0.78 0.88\n", + " slug 50 0.74 0.82\n", + " sea slug 50 0.88 0.98\n", + " chiton 50 0.88 0.98\n", + " chambered nautilus 50 0.88 0.92\n", + " Dungeness crab 50 0.78 0.94\n", + " rock crab 50 0.68 0.86\n", + " fiddler crab 50 0.64 0.86\n", + " red king crab 50 0.76 0.96\n", + " American lobster 50 0.78 0.96\n", + " spiny lobster 50 0.74 0.88\n", + " crayfish 50 0.56 0.86\n", + " hermit crab 50 0.78 0.96\n", + " isopod 50 0.66 0.78\n", + " white stork 50 0.88 0.96\n", + " black stork 50 0.84 0.98\n", + " spoonbill 50 0.96 1\n", + " flamingo 50 0.94 1\n", + " little blue heron 50 0.92 0.98\n", + " great egret 50 0.9 0.96\n", + " bittern 50 0.86 0.94\n", + " crane (bird) 50 0.62 0.9\n", + " limpkin 50 0.98 1\n", + " common gallinule 50 0.92 0.96\n", + " American coot 50 0.9 0.98\n", + " bustard 50 0.92 0.96\n", + " ruddy turnstone 50 0.94 1\n", + " dunlin 50 0.86 0.94\n", + " common redshank 50 0.9 0.96\n", + " dowitcher 50 0.84 0.96\n", + " oystercatcher 50 0.86 0.94\n", + " pelican 50 0.92 0.96\n", + " king penguin 50 0.88 0.96\n", + " albatross 50 0.9 1\n", + " grey whale 50 0.84 0.92\n", + " killer whale 50 0.92 1\n", + " dugong 50 0.84 0.96\n", + " sea lion 50 0.82 0.92\n", + " Chihuahua 50 0.66 0.84\n", + " Japanese Chin 50 0.72 0.98\n", + " Maltese 50 0.76 0.94\n", + " Pekingese 50 0.84 0.94\n", + " Shih Tzu 50 0.74 0.96\n", + " King Charles Spaniel 50 0.88 0.98\n", + " Papillon 50 0.86 0.94\n", + " toy terrier 50 0.48 0.94\n", + " Rhodesian Ridgeback 50 0.76 0.98\n", + " Afghan Hound 50 0.84 1\n", + " Basset Hound 50 0.8 0.92\n", + " Beagle 50 0.82 0.96\n", + " Bloodhound 50 0.48 0.72\n", + " Bluetick Coonhound 50 0.86 0.94\n", + " Black and Tan Coonhound 50 0.54 0.8\n", + "Treeing Walker Coonhound 50 0.66 0.98\n", + " English foxhound 50 0.32 0.84\n", + " Redbone Coonhound 50 0.62 0.94\n", + " borzoi 50 0.92 1\n", + " Irish Wolfhound 50 0.48 0.88\n", + " Italian Greyhound 50 0.76 0.98\n", + " Whippet 50 0.74 0.92\n", + " Ibizan Hound 50 0.6 0.86\n", + " Norwegian Elkhound 50 0.88 0.98\n", + " Otterhound 50 0.62 0.9\n", + " Saluki 50 0.72 0.92\n", + " Scottish Deerhound 50 0.86 0.98\n", + " Weimaraner 50 0.88 0.94\n", + "Staffordshire Bull Terrier 50 0.66 0.98\n", + "American Staffordshire Terrier 50 0.64 0.92\n", + " Bedlington Terrier 50 0.9 0.92\n", + " Border Terrier 50 0.86 0.92\n", + " Kerry Blue Terrier 50 0.78 0.98\n", + " Irish Terrier 50 0.7 0.96\n", + " Norfolk Terrier 50 0.68 0.9\n", + " Norwich Terrier 50 0.72 1\n", + " Yorkshire Terrier 50 0.66 0.9\n", + " Wire Fox Terrier 50 0.64 0.98\n", + " Lakeland Terrier 50 0.74 0.92\n", + " Sealyham Terrier 50 0.76 0.9\n", + " Airedale Terrier 50 0.82 0.92\n", + " Cairn Terrier 50 0.76 0.9\n", + " Australian Terrier 50 0.48 0.84\n", + " Dandie Dinmont Terrier 50 0.82 0.92\n", + " Boston Terrier 50 0.92 1\n", + " Miniature Schnauzer 50 0.68 0.9\n", + " Giant Schnauzer 50 0.72 0.98\n", + " Standard Schnauzer 50 0.74 1\n", + " Scottish Terrier 50 0.76 0.96\n", + " Tibetan Terrier 50 0.48 1\n", + "Australian Silky Terrier 50 0.66 0.96\n", + "Soft-coated Wheaten Terrier 50 0.74 0.96\n", + "West Highland White Terrier 50 0.88 0.96\n", + " Lhasa Apso 50 0.68 0.96\n", + " Flat-Coated Retriever 50 0.72 0.94\n", + " Curly-coated Retriever 50 0.82 0.94\n", + " Golden Retriever 50 0.86 0.94\n", + " Labrador Retriever 50 0.82 0.94\n", + "Chesapeake Bay Retriever 50 0.76 0.96\n", + "German Shorthaired Pointer 50 0.8 0.96\n", + " Vizsla 50 0.68 0.96\n", + " English Setter 50 0.7 1\n", + " Irish Setter 50 0.8 0.9\n", + " Gordon Setter 50 0.84 0.92\n", + " Brittany 50 0.84 0.96\n", + " Clumber Spaniel 50 0.92 0.96\n", + "English Springer Spaniel 50 0.88 1\n", + " Welsh Springer Spaniel 50 0.92 1\n", + " Cocker Spaniels 50 0.7 0.94\n", + " Sussex Spaniel 50 0.72 0.92\n", + " Irish Water Spaniel 50 0.88 0.98\n", + " Kuvasz 50 0.66 0.9\n", + " Schipperke 50 0.9 0.98\n", + " Groenendael 50 0.8 0.94\n", + " Malinois 50 0.86 0.98\n", + " Briard 50 0.52 0.8\n", + " Australian Kelpie 50 0.6 0.88\n", + " Komondor 50 0.88 0.94\n", + " Old English Sheepdog 50 0.94 0.98\n", + " Shetland Sheepdog 50 0.74 0.9\n", + " collie 50 0.6 0.96\n", + " Border Collie 50 0.74 0.96\n", + " Bouvier des Flandres 50 0.78 0.94\n", + " Rottweiler 50 0.88 0.96\n", + " German Shepherd Dog 50 0.8 0.98\n", + " Dobermann 50 0.68 0.96\n", + " Miniature Pinscher 50 0.76 0.88\n", + "Greater Swiss Mountain Dog 50 0.68 0.94\n", + " Bernese Mountain Dog 50 0.96 1\n", + " Appenzeller Sennenhund 50 0.22 1\n", + " Entlebucher Sennenhund 50 0.64 0.98\n", + " Boxer 50 0.7 0.92\n", + " Bullmastiff 50 0.78 0.98\n", + " Tibetan Mastiff 50 0.88 0.96\n", + " French Bulldog 50 0.84 0.94\n", + " Great Dane 50 0.54 0.9\n", + " St. Bernard 50 0.92 1\n", + " husky 50 0.46 0.98\n", + " Alaskan Malamute 50 0.76 0.96\n", + " Siberian Husky 50 0.46 0.98\n", + " Dalmatian 50 0.94 0.98\n", + " Affenpinscher 50 0.78 0.9\n", + " Basenji 50 0.92 0.94\n", + " pug 50 0.94 0.98\n", + " Leonberger 50 1 1\n", + " Newfoundland 50 0.78 0.96\n", + " Pyrenean Mountain Dog 50 0.78 0.96\n", + " Samoyed 50 0.96 1\n", + " Pomeranian 50 0.98 1\n", + " Chow Chow 50 0.9 0.96\n", + " Keeshond 50 0.88 0.94\n", + " Griffon Bruxellois 50 0.84 0.98\n", + " Pembroke Welsh Corgi 50 0.82 0.94\n", + " Cardigan Welsh Corgi 50 0.66 0.98\n", + " Toy Poodle 50 0.52 0.88\n", + " Miniature Poodle 50 0.52 0.92\n", + " Standard Poodle 50 0.8 1\n", + " Mexican hairless dog 50 0.88 0.98\n", + " grey wolf 50 0.82 0.92\n", + " Alaskan tundra wolf 50 0.78 0.98\n", + " red wolf 50 0.48 0.9\n", + " coyote 50 0.64 0.86\n", + " dingo 50 0.76 0.88\n", + " dhole 50 0.9 0.98\n", + " African wild dog 50 0.98 1\n", + " hyena 50 0.88 0.96\n", + " red fox 50 0.54 0.92\n", + " kit fox 50 0.72 0.98\n", + " Arctic fox 50 0.94 1\n", + " grey fox 50 0.7 0.94\n", + " tabby cat 50 0.54 0.92\n", + " tiger cat 50 0.22 0.94\n", + " Persian cat 50 0.9 0.98\n", + " Siamese cat 50 0.96 1\n", + " Egyptian Mau 50 0.54 0.8\n", + " cougar 50 0.9 1\n", + " lynx 50 0.72 0.88\n", + " leopard 50 0.78 0.98\n", + " snow leopard 50 0.9 0.98\n", + " jaguar 50 0.7 0.94\n", + " lion 50 0.9 0.98\n", + " tiger 50 0.92 0.98\n", + " cheetah 50 0.94 0.98\n", + " brown bear 50 0.94 0.98\n", + " American black bear 50 0.8 1\n", + " polar bear 50 0.84 0.96\n", + " sloth bear 50 0.72 0.92\n", + " mongoose 50 0.7 0.92\n", + " meerkat 50 0.82 0.92\n", + " tiger beetle 50 0.92 0.94\n", + " ladybug 50 0.86 0.94\n", + " ground beetle 50 0.64 0.94\n", + " longhorn beetle 50 0.62 0.88\n", + " leaf beetle 50 0.64 0.98\n", + " dung beetle 50 0.86 0.98\n", + " rhinoceros beetle 50 0.86 0.94\n", + " weevil 50 0.9 1\n", + " fly 50 0.78 0.94\n", + " bee 50 0.68 0.94\n", + " ant 50 0.68 0.78\n", + " grasshopper 50 0.5 0.92\n", + " cricket 50 0.64 0.92\n", + " stick insect 50 0.64 0.92\n", + " cockroach 50 0.72 0.8\n", + " mantis 50 0.64 0.86\n", + " cicada 50 0.9 0.96\n", + " leafhopper 50 0.88 0.94\n", + " lacewing 50 0.78 0.92\n", + " dragonfly 50 0.82 0.98\n", + " damselfly 50 0.82 1\n", + " red admiral 50 0.94 0.96\n", + " ringlet 50 0.86 0.98\n", + " monarch butterfly 50 0.9 0.92\n", + " small white 50 0.9 1\n", + " sulphur butterfly 50 0.92 1\n", + "gossamer-winged butterfly 50 0.88 1\n", + " starfish 50 0.88 0.92\n", + " sea urchin 50 0.84 0.94\n", + " sea cucumber 50 0.66 0.84\n", + " cottontail rabbit 50 0.72 0.94\n", + " hare 50 0.84 0.96\n", + " Angora rabbit 50 0.94 0.98\n", + " hamster 50 0.96 1\n", + " porcupine 50 0.88 0.98\n", + " fox squirrel 50 0.76 0.94\n", + " marmot 50 0.92 0.96\n", + " beaver 50 0.78 0.94\n", + " guinea pig 50 0.78 0.94\n", + " common sorrel 50 0.96 0.98\n", + " zebra 50 0.94 0.96\n", + " pig 50 0.5 0.76\n", + " wild boar 50 0.84 0.96\n", + " warthog 50 0.84 0.96\n", + " hippopotamus 50 0.88 0.96\n", + " ox 50 0.48 0.94\n", + " water buffalo 50 0.78 0.94\n", + " bison 50 0.88 0.96\n", + " ram 50 0.58 0.92\n", + " bighorn sheep 50 0.66 1\n", + " Alpine ibex 50 0.92 0.98\n", + " hartebeest 50 0.94 1\n", + " impala 50 0.82 0.96\n", + " gazelle 50 0.7 0.96\n", + " dromedary 50 0.9 1\n", + " llama 50 0.82 0.94\n", + " weasel 50 0.44 0.92\n", + " mink 50 0.78 0.96\n", + " European polecat 50 0.46 0.9\n", + " black-footed ferret 50 0.68 0.96\n", + " otter 50 0.66 0.88\n", + " skunk 50 0.96 0.96\n", + " badger 50 0.86 0.92\n", + " armadillo 50 0.88 0.9\n", + " three-toed sloth 50 0.96 1\n", + " orangutan 50 0.78 0.92\n", + " gorilla 50 0.82 0.94\n", + " chimpanzee 50 0.84 0.94\n", + " gibbon 50 0.76 0.86\n", + " siamang 50 0.68 0.94\n", + " guenon 50 0.8 0.94\n", + " patas monkey 50 0.62 0.82\n", + " baboon 50 0.9 0.98\n", + " macaque 50 0.8 0.86\n", + " langur 50 0.6 0.82\n", + " black-and-white colobus 50 0.86 0.9\n", + " proboscis monkey 50 1 1\n", + " marmoset 50 0.74 0.98\n", + " white-headed capuchin 50 0.72 0.9\n", + " howler monkey 50 0.86 0.94\n", + " titi 50 0.5 0.9\n", + "Geoffroy's spider monkey 50 0.42 0.8\n", + " common squirrel monkey 50 0.76 0.92\n", + " ring-tailed lemur 50 0.72 0.94\n", + " indri 50 0.9 0.96\n", + " Asian elephant 50 0.58 0.92\n", + " African bush elephant 50 0.7 0.98\n", + " red panda 50 0.94 0.94\n", + " giant panda 50 0.94 0.98\n", + " snoek 50 0.74 0.9\n", + " eel 50 0.6 0.84\n", + " coho salmon 50 0.84 0.96\n", + " rock beauty 50 0.88 0.98\n", + " clownfish 50 0.78 0.98\n", + " sturgeon 50 0.68 0.94\n", + " garfish 50 0.62 0.8\n", + " lionfish 50 0.96 0.96\n", + " pufferfish 50 0.88 0.96\n", + " abacus 50 0.74 0.88\n", + " abaya 50 0.84 0.92\n", + " academic gown 50 0.42 0.86\n", + " accordion 50 0.8 0.9\n", + " acoustic guitar 50 0.5 0.76\n", + " aircraft carrier 50 0.8 0.96\n", + " airliner 50 0.92 1\n", + " airship 50 0.76 0.82\n", + " altar 50 0.64 0.98\n", + " ambulance 50 0.88 0.98\n", + " amphibious vehicle 50 0.64 0.94\n", + " analog clock 50 0.52 0.92\n", + " apiary 50 0.82 0.96\n", + " apron 50 0.7 0.84\n", + " waste container 50 0.4 0.8\n", + " assault rifle 50 0.42 0.84\n", + " backpack 50 0.34 0.64\n", + " bakery 50 0.4 0.68\n", + " balance beam 50 0.8 0.98\n", + " balloon 50 0.86 0.96\n", + " ballpoint pen 50 0.52 0.96\n", + " Band-Aid 50 0.7 0.9\n", + " banjo 50 0.84 1\n", + " baluster 50 0.68 0.94\n", + " barbell 50 0.56 0.9\n", + " barber chair 50 0.7 0.92\n", + " barbershop 50 0.54 0.86\n", + " barn 50 0.96 0.96\n", + " barometer 50 0.84 0.98\n", + " barrel 50 0.56 0.88\n", + " wheelbarrow 50 0.66 0.88\n", + " baseball 50 0.74 0.98\n", + " basketball 50 0.88 0.98\n", + " bassinet 50 0.66 0.92\n", + " bassoon 50 0.74 0.98\n", + " swimming cap 50 0.62 0.88\n", + " bath towel 50 0.54 0.78\n", + " bathtub 50 0.4 0.88\n", + " station wagon 50 0.66 0.84\n", + " lighthouse 50 0.78 0.94\n", + " beaker 50 0.52 0.68\n", + " military cap 50 0.84 0.96\n", + " beer bottle 50 0.66 0.88\n", + " beer glass 50 0.6 0.84\n", + " bell-cot 50 0.56 0.96\n", + " bib 50 0.58 0.82\n", + " tandem bicycle 50 0.86 0.96\n", + " bikini 50 0.56 0.88\n", + " ring binder 50 0.64 0.84\n", + " binoculars 50 0.54 0.78\n", + " birdhouse 50 0.86 0.94\n", + " boathouse 50 0.74 0.92\n", + " bobsleigh 50 0.92 0.96\n", + " bolo tie 50 0.8 0.94\n", + " poke bonnet 50 0.64 0.86\n", + " bookcase 50 0.66 0.92\n", + " bookstore 50 0.62 0.88\n", + " bottle cap 50 0.58 0.7\n", + " bow 50 0.72 0.86\n", + " bow tie 50 0.7 0.9\n", + " brass 50 0.92 0.96\n", + " bra 50 0.5 0.7\n", + " breakwater 50 0.62 0.86\n", + " breastplate 50 0.4 0.9\n", + " broom 50 0.6 0.86\n", + " bucket 50 0.66 0.8\n", + " buckle 50 0.5 0.68\n", + " bulletproof vest 50 0.5 0.78\n", + " high-speed train 50 0.94 0.96\n", + " butcher shop 50 0.74 0.94\n", + " taxicab 50 0.64 0.86\n", + " cauldron 50 0.44 0.66\n", + " candle 50 0.48 0.74\n", + " cannon 50 0.88 0.94\n", + " canoe 50 0.94 1\n", + " can opener 50 0.66 0.86\n", + " cardigan 50 0.68 0.8\n", + " car mirror 50 0.94 0.96\n", + " carousel 50 0.94 0.98\n", + " tool kit 50 0.56 0.78\n", + " carton 50 0.42 0.7\n", + " car wheel 50 0.38 0.74\n", + "automated teller machine 50 0.76 0.94\n", + " cassette 50 0.52 0.8\n", + " cassette player 50 0.28 0.9\n", + " castle 50 0.78 0.88\n", + " catamaran 50 0.78 1\n", + " CD player 50 0.52 0.82\n", + " cello 50 0.82 1\n", + " mobile phone 50 0.68 0.86\n", + " chain 50 0.38 0.66\n", + " chain-link fence 50 0.7 0.84\n", + " chain mail 50 0.64 0.9\n", + " chainsaw 50 0.84 0.92\n", + " chest 50 0.68 0.92\n", + " chiffonier 50 0.26 0.64\n", + " chime 50 0.62 0.84\n", + " china cabinet 50 0.82 0.96\n", + " Christmas stocking 50 0.92 0.94\n", + " church 50 0.62 0.9\n", + " movie theater 50 0.58 0.88\n", + " cleaver 50 0.32 0.62\n", + " cliff dwelling 50 0.88 1\n", + " cloak 50 0.32 0.64\n", + " clogs 50 0.58 0.88\n", + " cocktail shaker 50 0.62 0.7\n", + " coffee mug 50 0.44 0.72\n", + " coffeemaker 50 0.64 0.92\n", + " coil 50 0.66 0.84\n", + " combination lock 50 0.64 0.84\n", + " computer keyboard 50 0.7 0.82\n", + " confectionery store 50 0.54 0.86\n", + " container ship 50 0.82 0.98\n", + " convertible 50 0.78 0.98\n", + " corkscrew 50 0.82 0.92\n", + " cornet 50 0.46 0.88\n", + " cowboy boot 50 0.64 0.8\n", + " cowboy hat 50 0.64 0.82\n", + " cradle 50 0.38 0.8\n", + " crane (machine) 50 0.78 0.94\n", + " crash helmet 50 0.92 0.96\n", + " crate 50 0.52 0.82\n", + " infant bed 50 0.74 1\n", + " Crock Pot 50 0.78 0.9\n", + " croquet ball 50 0.9 0.96\n", + " crutch 50 0.46 0.7\n", + " cuirass 50 0.54 0.86\n", + " dam 50 0.74 0.92\n", + " desk 50 0.6 0.86\n", + " desktop computer 50 0.54 0.94\n", + " rotary dial telephone 50 0.88 0.94\n", + " diaper 50 0.68 0.84\n", + " digital clock 50 0.54 0.76\n", + " digital watch 50 0.58 0.86\n", + " dining table 50 0.76 0.9\n", + " dishcloth 50 0.94 1\n", + " dishwasher 50 0.44 0.78\n", + " disc brake 50 0.98 1\n", + " dock 50 0.54 0.94\n", + " dog sled 50 0.84 1\n", + " dome 50 0.72 0.92\n", + " doormat 50 0.56 0.82\n", + " drilling rig 50 0.84 0.96\n", + " drum 50 0.38 0.68\n", + " drumstick 50 0.56 0.72\n", + " dumbbell 50 0.62 0.9\n", + " Dutch oven 50 0.7 0.84\n", + " electric fan 50 0.82 0.86\n", + " electric guitar 50 0.62 0.84\n", + " electric locomotive 50 0.92 0.98\n", + " entertainment center 50 0.9 0.98\n", + " envelope 50 0.44 0.86\n", + " espresso machine 50 0.72 0.94\n", + " face powder 50 0.7 0.92\n", + " feather boa 50 0.7 0.84\n", + " filing cabinet 50 0.88 0.98\n", + " fireboat 50 0.94 0.98\n", + " fire engine 50 0.84 0.9\n", + " fire screen sheet 50 0.62 0.76\n", + " flagpole 50 0.74 0.88\n", + " flute 50 0.36 0.72\n", + " folding chair 50 0.62 0.84\n", + " football helmet 50 0.86 0.94\n", + " forklift 50 0.8 0.92\n", + " fountain 50 0.84 0.94\n", + " fountain pen 50 0.76 0.92\n", + " four-poster bed 50 0.78 0.94\n", + " freight car 50 0.96 1\n", + " French horn 50 0.76 0.92\n", + " frying pan 50 0.36 0.78\n", + " fur coat 50 0.84 0.96\n", + " garbage truck 50 0.9 0.98\n", + " gas mask 50 0.84 0.92\n", + " gas pump 50 0.9 0.98\n", + " goblet 50 0.68 0.82\n", + " go-kart 50 0.9 1\n", + " golf ball 50 0.84 0.9\n", + " golf cart 50 0.78 0.86\n", + " gondola 50 0.98 0.98\n", + " gong 50 0.74 0.92\n", + " gown 50 0.62 0.96\n", + " grand piano 50 0.7 0.96\n", + " greenhouse 50 0.8 0.98\n", + " grille 50 0.72 0.9\n", + " grocery store 50 0.66 0.94\n", + " guillotine 50 0.86 0.92\n", + " barrette 50 0.52 0.66\n", + " hair spray 50 0.5 0.74\n", + " half-track 50 0.78 0.9\n", + " hammer 50 0.56 0.76\n", + " hamper 50 0.64 0.84\n", + " hair dryer 50 0.56 0.74\n", + " hand-held computer 50 0.42 0.86\n", + " handkerchief 50 0.78 0.94\n", + " hard disk drive 50 0.76 0.84\n", + " harmonica 50 0.7 0.88\n", + " harp 50 0.88 0.96\n", + " harvester 50 0.78 1\n", + " hatchet 50 0.54 0.74\n", + " holster 50 0.66 0.84\n", + " home theater 50 0.64 0.94\n", + " honeycomb 50 0.56 0.88\n", + " hook 50 0.3 0.6\n", + " hoop skirt 50 0.64 0.86\n", + " horizontal bar 50 0.68 0.98\n", + " horse-drawn vehicle 50 0.88 0.94\n", + " hourglass 50 0.88 0.96\n", + " iPod 50 0.76 0.94\n", + " clothes iron 50 0.82 0.88\n", + " jack-o'-lantern 50 0.98 0.98\n", + " jeans 50 0.68 0.84\n", + " jeep 50 0.72 0.9\n", + " T-shirt 50 0.72 0.96\n", + " jigsaw puzzle 50 0.84 0.94\n", + " pulled rickshaw 50 0.86 0.94\n", + " joystick 50 0.8 0.9\n", + " kimono 50 0.84 0.96\n", + " knee pad 50 0.62 0.88\n", + " knot 50 0.66 0.8\n", + " lab coat 50 0.8 0.96\n", + " ladle 50 0.36 0.64\n", + " lampshade 50 0.48 0.84\n", + " laptop computer 50 0.26 0.88\n", + " lawn mower 50 0.78 0.96\n", + " lens cap 50 0.46 0.72\n", + " paper knife 50 0.26 0.5\n", + " library 50 0.54 0.9\n", + " lifeboat 50 0.92 0.98\n", + " lighter 50 0.56 0.78\n", + " limousine 50 0.76 0.92\n", + " ocean liner 50 0.88 0.94\n", + " lipstick 50 0.74 0.9\n", + " slip-on shoe 50 0.74 0.92\n", + " lotion 50 0.5 0.86\n", + " speaker 50 0.52 0.68\n", + " loupe 50 0.32 0.52\n", + " sawmill 50 0.72 0.9\n", + " magnetic compass 50 0.52 0.82\n", + " mail bag 50 0.68 0.92\n", + " mailbox 50 0.82 0.92\n", + " tights 50 0.22 0.94\n", + " tank suit 50 0.24 0.9\n", + " manhole cover 50 0.96 0.98\n", + " maraca 50 0.74 0.9\n", + " marimba 50 0.84 0.94\n", + " mask 50 0.44 0.82\n", + " match 50 0.66 0.9\n", + " maypole 50 0.96 1\n", + " maze 50 0.8 0.96\n", + " measuring cup 50 0.54 0.76\n", + " medicine chest 50 0.6 0.84\n", + " megalith 50 0.8 0.92\n", + " microphone 50 0.52 0.7\n", + " microwave oven 50 0.48 0.72\n", + " military uniform 50 0.62 0.84\n", + " milk can 50 0.68 0.82\n", + " minibus 50 0.7 1\n", + " miniskirt 50 0.46 0.76\n", + " minivan 50 0.38 0.8\n", + " missile 50 0.4 0.84\n", + " mitten 50 0.76 0.88\n", + " mixing bowl 50 0.8 0.92\n", + " mobile home 50 0.54 0.78\n", + " Model T 50 0.92 0.96\n", + " modem 50 0.58 0.86\n", + " monastery 50 0.44 0.9\n", + " monitor 50 0.4 0.86\n", + " moped 50 0.56 0.94\n", + " mortar 50 0.68 0.94\n", + " square academic cap 50 0.5 0.84\n", + " mosque 50 0.9 1\n", + " mosquito net 50 0.9 0.98\n", + " scooter 50 0.9 0.98\n", + " mountain bike 50 0.78 0.96\n", + " tent 50 0.88 0.96\n", + " computer mouse 50 0.42 0.82\n", + " mousetrap 50 0.76 0.88\n", + " moving van 50 0.4 0.72\n", + " muzzle 50 0.5 0.72\n", + " nail 50 0.68 0.74\n", + " neck brace 50 0.56 0.68\n", + " necklace 50 0.86 1\n", + " nipple 50 0.7 0.88\n", + " notebook computer 50 0.34 0.84\n", + " obelisk 50 0.8 0.92\n", + " oboe 50 0.6 0.84\n", + " ocarina 50 0.8 0.86\n", + " odometer 50 0.96 1\n", + " oil filter 50 0.58 0.82\n", + " organ 50 0.82 0.9\n", + " oscilloscope 50 0.9 0.96\n", + " overskirt 50 0.2 0.7\n", + " bullock cart 50 0.7 0.94\n", + " oxygen mask 50 0.46 0.84\n", + " packet 50 0.5 0.78\n", + " paddle 50 0.56 0.94\n", + " paddle wheel 50 0.86 0.96\n", + " padlock 50 0.74 0.78\n", + " paintbrush 50 0.62 0.8\n", + " pajamas 50 0.56 0.92\n", + " palace 50 0.64 0.96\n", + " pan flute 50 0.84 0.86\n", + " paper towel 50 0.66 0.84\n", + " parachute 50 0.92 0.94\n", + " parallel bars 50 0.62 0.96\n", + " park bench 50 0.74 0.9\n", + " parking meter 50 0.84 0.92\n", + " passenger car 50 0.5 0.82\n", + " patio 50 0.58 0.84\n", + " payphone 50 0.74 0.92\n", + " pedestal 50 0.52 0.9\n", + " pencil case 50 0.64 0.92\n", + " pencil sharpener 50 0.52 0.78\n", + " perfume 50 0.7 0.9\n", + " Petri dish 50 0.6 0.8\n", + " photocopier 50 0.88 0.98\n", + " plectrum 50 0.7 0.84\n", + " Pickelhaube 50 0.72 0.86\n", + " picket fence 50 0.84 0.94\n", + " pickup truck 50 0.64 0.92\n", + " pier 50 0.52 0.82\n", + " piggy bank 50 0.82 0.94\n", + " pill bottle 50 0.76 0.86\n", + " pillow 50 0.76 0.9\n", + " ping-pong ball 50 0.84 0.88\n", + " pinwheel 50 0.76 0.88\n", + " pirate ship 50 0.76 0.94\n", + " pitcher 50 0.46 0.84\n", + " hand plane 50 0.84 0.94\n", + " planetarium 50 0.88 0.98\n", + " plastic bag 50 0.36 0.62\n", + " plate rack 50 0.52 0.78\n", + " plow 50 0.78 0.88\n", + " plunger 50 0.42 0.7\n", + " Polaroid camera 50 0.84 0.92\n", + " pole 50 0.38 0.74\n", + " police van 50 0.76 0.94\n", + " poncho 50 0.58 0.86\n", + " billiard table 50 0.8 0.88\n", + " soda bottle 50 0.56 0.94\n", + " pot 50 0.78 0.92\n", + " potter's wheel 50 0.9 0.94\n", + " power drill 50 0.42 0.72\n", + " prayer rug 50 0.7 0.86\n", + " printer 50 0.54 0.86\n", + " prison 50 0.7 0.9\n", + " projectile 50 0.28 0.9\n", + " projector 50 0.62 0.84\n", + " hockey puck 50 0.92 0.96\n", + " punching bag 50 0.6 0.68\n", + " purse 50 0.42 0.78\n", + " quill 50 0.68 0.84\n", + " quilt 50 0.64 0.9\n", + " race car 50 0.72 0.92\n", + " racket 50 0.72 0.9\n", + " radiator 50 0.66 0.76\n", + " radio 50 0.64 0.92\n", + " radio telescope 50 0.9 0.96\n", + " rain barrel 50 0.8 0.98\n", + " recreational vehicle 50 0.84 0.94\n", + " reel 50 0.72 0.82\n", + " reflex camera 50 0.72 0.92\n", + " refrigerator 50 0.7 0.9\n", + " remote control 50 0.7 0.88\n", + " restaurant 50 0.5 0.66\n", + " revolver 50 0.82 1\n", + " rifle 50 0.38 0.7\n", + " rocking chair 50 0.62 0.84\n", + " rotisserie 50 0.88 0.92\n", + " eraser 50 0.54 0.76\n", + " rugby ball 50 0.86 0.94\n", + " ruler 50 0.68 0.86\n", + " running shoe 50 0.78 0.94\n", + " safe 50 0.82 0.92\n", + " safety pin 50 0.4 0.62\n", + " salt shaker 50 0.66 0.9\n", + " sandal 50 0.66 0.86\n", + " sarong 50 0.64 0.86\n", + " saxophone 50 0.66 0.88\n", + " scabbard 50 0.76 0.92\n", + " weighing scale 50 0.58 0.78\n", + " school bus 50 0.92 1\n", + " schooner 50 0.84 1\n", + " scoreboard 50 0.9 0.96\n", + " CRT screen 50 0.14 0.7\n", + " screw 50 0.9 0.98\n", + " screwdriver 50 0.3 0.58\n", + " seat belt 50 0.88 0.94\n", + " sewing machine 50 0.76 0.9\n", + " shield 50 0.56 0.82\n", + " shoe store 50 0.78 0.96\n", + " shoji 50 0.8 0.92\n", + " shopping basket 50 0.52 0.88\n", + " shopping cart 50 0.76 0.92\n", + " shovel 50 0.62 0.84\n", + " shower cap 50 0.7 0.84\n", + " shower curtain 50 0.64 0.82\n", + " ski 50 0.74 0.92\n", + " ski mask 50 0.72 0.88\n", + " sleeping bag 50 0.68 0.8\n", + " slide rule 50 0.72 0.88\n", + " sliding door 50 0.44 0.78\n", + " slot machine 50 0.94 0.98\n", + " snorkel 50 0.86 0.98\n", + " snowmobile 50 0.88 1\n", + " snowplow 50 0.84 0.98\n", + " soap dispenser 50 0.56 0.86\n", + " soccer ball 50 0.86 0.96\n", + " sock 50 0.62 0.76\n", + " solar thermal collector 50 0.72 0.96\n", + " sombrero 50 0.6 0.84\n", + " soup bowl 50 0.56 0.94\n", + " space bar 50 0.34 0.88\n", + " space heater 50 0.52 0.74\n", + " space shuttle 50 0.82 0.96\n", + " spatula 50 0.3 0.6\n", + " motorboat 50 0.86 1\n", + " spider web 50 0.7 0.9\n", + " spindle 50 0.86 0.98\n", + " sports car 50 0.6 0.94\n", + " spotlight 50 0.26 0.6\n", + " stage 50 0.68 0.86\n", + " steam locomotive 50 0.94 1\n", + " through arch bridge 50 0.84 0.96\n", + " steel drum 50 0.82 0.9\n", + " stethoscope 50 0.6 0.82\n", + " scarf 50 0.5 0.92\n", + " stone wall 50 0.76 0.9\n", + " stopwatch 50 0.58 0.9\n", + " stove 50 0.46 0.74\n", + " strainer 50 0.64 0.84\n", + " tram 50 0.88 0.96\n", + " stretcher 50 0.6 0.8\n", + " couch 50 0.8 0.96\n", + " stupa 50 0.88 0.88\n", + " submarine 50 0.72 0.92\n", + " suit 50 0.4 0.78\n", + " sundial 50 0.58 0.74\n", + " sunglass 50 0.14 0.58\n", + " sunglasses 50 0.28 0.58\n", + " sunscreen 50 0.32 0.7\n", + " suspension bridge 50 0.6 0.94\n", + " mop 50 0.74 0.92\n", + " sweatshirt 50 0.28 0.66\n", + " swimsuit 50 0.52 0.82\n", + " swing 50 0.76 0.84\n", + " switch 50 0.56 0.76\n", + " syringe 50 0.62 0.82\n", + " table lamp 50 0.6 0.88\n", + " tank 50 0.8 0.96\n", + " tape player 50 0.46 0.76\n", + " teapot 50 0.84 1\n", + " teddy bear 50 0.82 0.94\n", + " television 50 0.6 0.9\n", + " tennis ball 50 0.7 0.94\n", + " thatched roof 50 0.88 0.9\n", + " front curtain 50 0.8 0.92\n", + " thimble 50 0.6 0.8\n", + " threshing machine 50 0.56 0.88\n", + " throne 50 0.72 0.82\n", + " tile roof 50 0.72 0.94\n", + " toaster 50 0.66 0.84\n", + " tobacco shop 50 0.42 0.7\n", + " toilet seat 50 0.62 0.88\n", + " torch 50 0.64 0.84\n", + " totem pole 50 0.92 0.98\n", + " tow truck 50 0.62 0.88\n", + " toy store 50 0.6 0.94\n", + " tractor 50 0.76 0.98\n", + " semi-trailer truck 50 0.78 0.92\n", + " tray 50 0.46 0.64\n", + " trench coat 50 0.54 0.72\n", + " tricycle 50 0.72 0.94\n", + " trimaran 50 0.7 0.98\n", + " tripod 50 0.58 0.86\n", + " triumphal arch 50 0.92 0.98\n", + " trolleybus 50 0.9 1\n", + " trombone 50 0.54 0.88\n", + " tub 50 0.24 0.82\n", + " turnstile 50 0.84 0.94\n", + " typewriter keyboard 50 0.68 0.98\n", + " umbrella 50 0.52 0.7\n", + " unicycle 50 0.74 0.96\n", + " upright piano 50 0.76 0.9\n", + " vacuum cleaner 50 0.62 0.9\n", + " vase 50 0.5 0.78\n", + " vault 50 0.76 0.92\n", + " velvet 50 0.2 0.42\n", + " vending machine 50 0.9 1\n", + " vestment 50 0.54 0.82\n", + " viaduct 50 0.78 0.86\n", + " violin 50 0.68 0.78\n", + " volleyball 50 0.86 1\n", + " waffle iron 50 0.72 0.88\n", + " wall clock 50 0.54 0.88\n", + " wallet 50 0.52 0.9\n", + " wardrobe 50 0.68 0.88\n", + " military aircraft 50 0.9 0.98\n", + " sink 50 0.72 0.96\n", + " washing machine 50 0.78 0.94\n", + " water bottle 50 0.54 0.74\n", + " water jug 50 0.22 0.74\n", + " water tower 50 0.9 0.96\n", + " whiskey jug 50 0.64 0.74\n", + " whistle 50 0.72 0.84\n", + " wig 50 0.84 0.9\n", + " window screen 50 0.68 0.8\n", + " window shade 50 0.52 0.76\n", + " Windsor tie 50 0.22 0.66\n", + " wine bottle 50 0.42 0.82\n", + " wing 50 0.54 0.96\n", + " wok 50 0.46 0.82\n", + " wooden spoon 50 0.58 0.8\n", + " wool 50 0.32 0.82\n", + " split-rail fence 50 0.74 0.9\n", + " shipwreck 50 0.84 0.96\n", + " yawl 50 0.78 0.96\n", + " yurt 50 0.84 1\n", + " website 50 0.98 1\n", + " comic book 50 0.62 0.9\n", + " crossword 50 0.84 0.88\n", + " traffic sign 50 0.78 0.9\n", + " traffic light 50 0.8 0.94\n", + " dust jacket 50 0.72 0.94\n", + " menu 50 0.82 0.96\n", + " plate 50 0.44 0.88\n", + " guacamole 50 0.8 0.92\n", + " consomme 50 0.54 0.88\n", + " hot pot 50 0.86 0.98\n", + " trifle 50 0.92 0.98\n", + " ice cream 50 0.68 0.94\n", + " ice pop 50 0.62 0.84\n", + " baguette 50 0.62 0.88\n", + " bagel 50 0.64 0.92\n", + " pretzel 50 0.72 0.88\n", + " cheeseburger 50 0.9 1\n", + " hot dog 50 0.74 0.94\n", + " mashed potato 50 0.74 0.9\n", + " cabbage 50 0.84 0.96\n", + " broccoli 50 0.9 0.96\n", + " cauliflower 50 0.82 1\n", + " zucchini 50 0.74 0.9\n", + " spaghetti squash 50 0.8 0.96\n", + " acorn squash 50 0.82 0.96\n", + " butternut squash 50 0.7 0.94\n", + " cucumber 50 0.6 0.96\n", + " artichoke 50 0.84 0.94\n", + " bell pepper 50 0.84 0.98\n", + " cardoon 50 0.88 0.94\n", + " mushroom 50 0.38 0.92\n", + " Granny Smith 50 0.9 0.96\n", + " strawberry 50 0.6 0.88\n", + " orange 50 0.7 0.92\n", + " lemon 50 0.78 0.98\n", + " fig 50 0.82 0.96\n", + " pineapple 50 0.86 0.96\n", + " banana 50 0.84 0.96\n", + " jackfruit 50 0.9 0.98\n", + " custard apple 50 0.86 0.96\n", + " pomegranate 50 0.82 0.98\n", + " hay 50 0.8 0.92\n", + " carbonara 50 0.88 0.94\n", + " chocolate syrup 50 0.46 0.84\n", + " dough 50 0.4 0.6\n", + " meatloaf 50 0.58 0.84\n", + " pizza 50 0.84 0.96\n", + " pot pie 50 0.68 0.9\n", + " burrito 50 0.8 0.98\n", + " red wine 50 0.54 0.82\n", + " espresso 50 0.64 0.88\n", + " cup 50 0.38 0.7\n", + " eggnog 50 0.38 0.7\n", + " alp 50 0.54 0.88\n", + " bubble 50 0.8 0.96\n", + " cliff 50 0.64 1\n", + " coral reef 50 0.72 0.96\n", + " geyser 50 0.94 1\n", + " lakeshore 50 0.54 0.88\n", + " promontory 50 0.58 0.94\n", + " shoal 50 0.6 0.96\n", + " seashore 50 0.44 0.78\n", + " valley 50 0.72 0.94\n", + " volcano 50 0.78 0.96\n", + " baseball player 50 0.72 0.94\n", + " bridegroom 50 0.72 0.88\n", + " scuba diver 50 0.8 1\n", + " rapeseed 50 0.94 0.98\n", + " daisy 50 0.96 0.98\n", + " yellow lady's slipper 50 1 1\n", + " corn 50 0.4 0.88\n", + " acorn 50 0.92 0.98\n", + " rose hip 50 0.92 0.98\n", + " horse chestnut seed 50 0.94 0.98\n", + " coral fungus 50 0.96 0.96\n", + " agaric 50 0.82 0.94\n", + " gyromitra 50 0.98 1\n", + " stinkhorn mushroom 50 0.8 0.94\n", + " earth star 50 0.98 1\n", + " hen-of-the-woods 50 0.8 0.96\n", + " bolete 50 0.74 0.94\n", + " ear 50 0.48 0.94\n", + " toilet paper 50 0.36 0.68\n", + "Speed: 0.1ms pre-process, 0.3ms inference, 0.0ms post-process per image at shape (1, 3, 224, 224)\n", + "Results saved to \u001b[1mruns/val-cls/exp\u001b[0m\n" + ] } - ], - "metadata": { - "accelerator": "GPU", + ], + "source": [ + "# Validate YOLOv5s on Imagenet val\n", + "!python classify/val.py --weights yolov5s-cls.pt --data ../datasets/imagenet --img 224 --half" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "ZY2VXXXu74w5" + }, + "source": [ + "# 3. Train\n", + "\n", + "

\n", + "Close the active learning loop by sampling images from your inference conditions with the `roboflow` pip package\n", + "

\n", + "\n", + "Train a YOLOv5s Classification model on the [Imagenette](https://image-net.org/) dataset with `--data imagenet`, starting from pretrained `--pretrained yolov5s-cls.pt`.\n", + "\n", + "- **Pretrained [Models](https://github.com/ultralytics/yolov5/tree/master/models)** are downloaded\n", + "automatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases)\n", + "- **Training Results** are saved to `runs/train-cls/` with incrementing run directories, i.e. `runs/train-cls/exp2`, `runs/train-cls/exp3` etc.\n", + "

\n", + "\n", + "A **Mosaic Dataloader** is used for training which combines 4 images into 1 mosaic.\n", + "\n", + "## Train on Custom Data with Roboflow 🌟 NEW\n", + "\n", + "[Roboflow](https://roboflow.com/?ref=ultralytics) enables you to easily **organize, label, and prepare** a high quality dataset with your own custom data. Roboflow also makes it easy to establish an active learning pipeline, collaborate with your team on dataset improvement, and integrate directly into your model building workflow with the `roboflow` pip package.\n", + "\n", + "- Custom Training Example: [https://blog.roboflow.com/train-yolov5-classification-custom-data/](https://blog.roboflow.com/train-yolov5-classification-custom-data/?ref=ultralytics)\n", + "- Custom Training Notebook: [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1KZiKUAjtARHAfZCXbJRv14-pOnIsBLPV?usp=sharing)\n", + "
\n", + "\n", + "

Label images lightning fast (including with model-assisted labeling)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "i3oKtE4g-aNn" + }, + "outputs": [], + "source": [ + "# @title Select YOLOv5 🚀 logger {run: 'auto'}\n", + "logger = \"Comet\" # @param ['Comet', 'ClearML', 'TensorBoard']\n", + "\n", + "if logger == \"Comet\":\n", + " %pip install -q comet_ml\n", + " import comet_ml\n", + "\n", + " comet_ml.init()\n", + "elif logger == \"ClearML\":\n", + " %pip install -q clearml\n", + " import clearml\n", + "\n", + " clearml.browser_login()\n", + "elif logger == \"TensorBoard\":\n", + " %load_ext tensorboard\n", + " %tensorboard --logdir runs/train" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { "colab": { - "name": "YOLOv5 Classification Tutorial", - "provenance": [] - }, - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" + "base_uri": "https://localhost:8080/" }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.7.12" + "id": "1NcFxRcFdJ_O", + "outputId": "77c8d487-16db-4073-b3ea-06cabf2e7766" + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\u001b[34m\u001b[1mclassify/train: \u001b[0mmodel=yolov5s-cls.pt, data=imagenette160, epochs=5, batch_size=64, imgsz=224, nosave=False, cache=ram, device=, workers=8, project=runs/train-cls, name=exp, exist_ok=False, pretrained=True, optimizer=Adam, lr0=0.001, decay=5e-05, label_smoothing=0.1, cutoff=None, dropout=None, verbose=False, seed=0, local_rank=-1\n", + "\u001b[34m\u001b[1mgithub: \u001b[0mup to date with https://github.com/ultralytics/yolov5 ✅\n", + "YOLOv5 🚀 v7.0-3-g61ebf5e Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n", + "\n", + "\u001b[34m\u001b[1mTensorBoard: \u001b[0mStart with 'tensorboard --logdir runs/train-cls', view at http://localhost:6006/\n", + "\n", + "Dataset not found ⚠️, missing path /content/datasets/imagenette160, attempting download...\n", + "Downloading https://github.com/ultralytics/assets/releases/download/v0.0.0/imagenette160.zip to /content/datasets/imagenette160.zip...\n", + "100% 103M/103M [00:00<00:00, 347MB/s] \n", + "Unzipping /content/datasets/imagenette160.zip...\n", + "Dataset download success ✅ (3.3s), saved to \u001b[1m/content/datasets/imagenette160\u001b[0m\n", + "\n", + "\u001b[34m\u001b[1malbumentations: \u001b[0mRandomResizedCrop(p=1.0, height=224, width=224, scale=(0.08, 1.0), ratio=(0.75, 1.3333333333333333), interpolation=1), HorizontalFlip(p=0.5), ColorJitter(p=0.5, brightness=[0.6, 1.4], contrast=[0.6, 1.4], saturation=[0.6, 1.4], hue=[0, 0]), Normalize(p=1.0, mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225), max_pixel_value=255.0), ToTensorV2(always_apply=True, p=1.0, transpose_mask=False)\n", + "Model summary: 149 layers, 4185290 parameters, 4185290 gradients, 10.5 GFLOPs\n", + "\u001b[34m\u001b[1moptimizer:\u001b[0m Adam(lr=0.001) with parameter groups 32 weight(decay=0.0), 33 weight(decay=5e-05), 33 bias\n", + "Image sizes 224 train, 224 test\n", + "Using 1 dataloader workers\n", + "Logging results to \u001b[1mruns/train-cls/exp\u001b[0m\n", + "Starting yolov5s-cls.pt training on imagenette160 dataset with 10 classes for 5 epochs...\n", + "\n", + " Epoch GPU_mem train_loss val_loss top1_acc top5_acc\n", + " 1/5 1.47G 1.05 0.974 0.828 0.975: 100% 148/148 [00:38<00:00, 3.82it/s]\n", + " 2/5 1.73G 0.895 0.766 0.911 0.994: 100% 148/148 [00:36<00:00, 4.03it/s]\n", + " 3/5 1.73G 0.82 0.704 0.934 0.996: 100% 148/148 [00:35<00:00, 4.20it/s]\n", + " 4/5 1.73G 0.766 0.664 0.951 0.998: 100% 148/148 [00:36<00:00, 4.05it/s]\n", + " 5/5 1.73G 0.724 0.634 0.959 0.997: 100% 148/148 [00:37<00:00, 3.94it/s]\n", + "\n", + "Training complete (0.052 hours)\n", + "Results saved to \u001b[1mruns/train-cls/exp\u001b[0m\n", + "Predict: python classify/predict.py --weights runs/train-cls/exp/weights/best.pt --source im.jpg\n", + "Validate: python classify/val.py --weights runs/train-cls/exp/weights/best.pt --data /content/datasets/imagenette160\n", + "Export: python export.py --weights runs/train-cls/exp/weights/best.pt --include onnx\n", + "PyTorch Hub: model = torch.hub.load('ultralytics/yolov5', 'custom', 'runs/train-cls/exp/weights/best.pt')\n", + "Visualize: https://netron.app\n", + "\n" + ] } + ], + "source": [ + "# Train YOLOv5s Classification on Imagenette160 for 3 epochs\n", + "!python classify/train.py --model yolov5s-cls.pt --data imagenette160 --epochs 5 --img 224 --cache" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "15glLzbQx5u0" + }, + "source": [ + "# 4. Visualize" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "nWOsI5wJR1o3" + }, + "source": [ + "## Comet Logging and Visualization 🌟 NEW\n", + "\n", + "[Comet](https://www.comet.com/site/lp/yolov5-with-comet/?utm_source=yolov5&utm_medium=partner&utm_campaign=partner_yolov5_2022&utm_content=yolov5_colab) is now fully integrated with YOLOv5. Track and visualize model metrics in real time, save your hyperparameters, datasets, and model checkpoints, and visualize your model predictions with [Comet Custom Panels](https://www.comet.com/docs/v2/guides/comet-dashboard/code-panels/about-panels/?utm_source=yolov5&utm_medium=partner&utm_campaign=partner_yolov5_2022&utm_content=yolov5_colab)! Comet makes sure you never lose track of your work and makes it easy to share results and collaborate across teams of all sizes!\n", + "\n", + "Getting started is easy:\n", + "```shell\n", + "pip install comet_ml # 1. install\n", + "export COMET_API_KEY= # 2. paste API key\n", + "python train.py --img 640 --epochs 3 --data coco128.yaml --weights yolov5s.pt # 3. train\n", + "```\n", + "To learn more about all of the supported Comet features for this integration, check out the [Comet Tutorial](https://docs.ultralytics.com/yolov5/tutorials/comet_logging_integration). If you'd like to learn more about Comet, head over to our [documentation](https://www.comet.com/docs/v2/?utm_source=yolov5&utm_medium=partner&utm_campaign=partner_yolov5_2022&utm_content=yolov5_colab). Get started by trying out the Comet Colab Notebook:\n", + "[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1RG0WOQyxlDlo5Km8GogJpIEJlg_5lyYO?usp=sharing)\n", + "\n", + "\n", + "\"Comet" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "Lay2WsTjNJzP" + }, + "source": [ + "## ClearML Logging and Automation 🌟 NEW\n", + "\n", + "[ClearML](https://cutt.ly/yolov5-notebook-clearml) is completely integrated into YOLOv5 to track your experimentation, manage dataset versions and even remotely execute training runs. To enable ClearML (check cells above):\n", + "\n", + "- `pip install clearml`\n", + "- run `clearml-init` to connect to a ClearML server (**deploy your own [open-source server](https://github.com/allegroai/clearml-server)**, or use our [free hosted server](https://cutt.ly/yolov5-notebook-clearml))\n", + "\n", + "You'll get all the great expected features from an experiment manager: live updates, model upload, experiment comparison etc. but ClearML also tracks uncommitted changes and installed packages for example. Thanks to that ClearML Tasks (which is what we call experiments) are also reproducible on different machines! With only 1 extra line, we can schedule a YOLOv5 training task on a queue to be executed by any number of ClearML Agents (workers).\n", + "\n", + "You can use ClearML Data to version your dataset and then pass it to YOLOv5 simply using its unique ID. This will help you keep track of your data without adding extra hassle. Explore the [ClearML Tutorial](https://docs.ultralytics.com/yolov5/tutorials/clearml_logging_integration) for details!\n", + "\n", + "\n", + "\"ClearML" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "-WPvRbS5Swl6" + }, + "source": [ + "## Local Logging\n", + "\n", + "Training results are automatically logged with [Tensorboard](https://www.tensorflow.org/tensorboard) and [CSV](https://github.com/ultralytics/yolov5/pull/4148) loggers to `runs/train`, with a new experiment directory created for each new training as `runs/train/exp2`, `runs/train/exp3`, etc.\n", + "\n", + "This directory contains train and val statistics, mosaics, labels, predictions and augmentated mosaics, as well as metrics and charts including precision-recall (PR) curves and confusion matrices. \n", + "\n", + "\"Local\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "Zelyeqbyt3GD" + }, + "source": [ + "# Environments\n", + "\n", + "YOLOv5 may be run in any of the following up-to-date verified environments (with all dependencies including [CUDA](https://developer.nvidia.com/cuda)/[CUDNN](https://developer.nvidia.com/cudnn), [Python](https://www.python.org/) and [PyTorch](https://pytorch.org/) preinstalled):\n", + "\n", + "- **Notebooks** with free GPU: \"Run \"Open \"Open\n", + "- **Google Cloud** Deep Learning VM. See [GCP Quickstart Guide](https://docs.ultralytics.com/yolov5/environments/google_cloud_quickstart_tutorial/)\n", + "- **Amazon** Deep Learning AMI. See [AWS Quickstart Guide](https://docs.ultralytics.com/yolov5/environments/aws_quickstart_tutorial/)\n", + "- **Docker Image**. See [Docker Quickstart Guide](https://docs.ultralytics.com/yolov5/environments/docker_image_quickstart_tutorial/) \"Docker\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "6Qu7Iesl0p54" + }, + "source": [ + "# Status\n", + "\n", + "![YOLOv5 CI](https://github.com/ultralytics/yolov5/actions/workflows/ci-testing.yml/badge.svg)\n", + "\n", + "If this badge is green, all [YOLOv5 GitHub Actions](https://github.com/ultralytics/yolov5/actions) Continuous Integration (CI) tests are currently passing. CI tests verify correct operation of YOLOv5 training ([train.py](https://github.com/ultralytics/yolov5/blob/master/train.py)), testing ([val.py](https://github.com/ultralytics/yolov5/blob/master/val.py)), inference ([detect.py](https://github.com/ultralytics/yolov5/blob/master/detect.py)) and export ([export.py](https://github.com/ultralytics/yolov5/blob/master/export.py)) on macOS, Windows, and Ubuntu every 24 hours and on every commit.\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "IEijrePND_2I" + }, + "source": [ + "# Appendix\n", + "\n", + "Additional content below." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "GMusP4OAxFu6" + }, + "outputs": [], + "source": [ + "# YOLOv5 PyTorch HUB Inference (DetectionModels only)\n", + "\n", + "model = torch.hub.load(\n", + " \"ultralytics/yolov5\", \"yolov5s\", force_reload=True, trust_repo=True\n", + ") # or yolov5n - yolov5x6 or custom\n", + "im = \"https://ultralytics.com/images/zidane.jpg\" # file, Path, PIL.Image, OpenCV, nparray, list\n", + "results = model(im) # inference\n", + "results.print() # or .show(), .save(), .crop(), .pandas(), etc." + ] + } + ], + "metadata": { + "accelerator": "GPU", + "colab": { + "name": "YOLOv5 Classification Tutorial", + "provenance": [] + }, + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" }, - "nbformat": 4, - "nbformat_minor": 0 + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.7.12" + } + }, + "nbformat": 4, + "nbformat_minor": 0 } diff --git a/segment/tutorial.ipynb b/segment/tutorial.ipynb index c96876801adb..56ea50500e66 100644 --- a/segment/tutorial.ipynb +++ b/segment/tutorial.ipynb @@ -1,595 +1,602 @@ { - "cells": [ - { - "cell_type": "markdown", - "metadata": { - "id": "t6MPjfT5NrKQ" - }, - "source": [ - "
\n", - "\n", - " \n", - " \n", - "\n", - "\n", - "
\n", - " \"Run\n", - " \"Open\n", - " \"Open\n", - "
\n", - "\n", - "This YOLOv5 🚀 notebook by Ultralytics presents simple train, validate and predict examples to help start your AI adventure.
See GitHub for community support or contact us for professional support.\n", - "\n", - "
" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "7mGmQbAO5pQb" - }, - "source": [ - "# Setup\n", - "\n", - "Clone GitHub [repository](https://github.com/ultralytics/yolov5), install [dependencies](https://github.com/ultralytics/yolov5/blob/master/requirements.txt) and check PyTorch and GPU." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "id": "wbvMlHd_QwMG", - "outputId": "171b23f0-71b9-4cbf-b666-6fa2ecef70c8" - }, - "outputs": [ - { - "output_type": "stream", - "name": "stderr", - "text": [ - "YOLOv5 🚀 v7.0-2-gc9d47ae Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n" - ] - }, - { - "output_type": "stream", - "name": "stdout", - "text": [ - "Setup complete ✅ (2 CPUs, 12.7 GB RAM, 22.6/78.2 GB disk)\n" - ] - } - ], - "source": [ - "!git clone https://github.com/ultralytics/yolov5 # clone\n", - "%cd yolov5\n", - "%pip install -qr requirements.txt comet_ml # install\n", - "\n", - "import torch\n", - "import utils\n", - "display = utils.notebook_init() # checks" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "4JnkELT0cIJg" - }, - "source": [ - "# 1. Predict\n", - "\n", - "`segment/predict.py` runs YOLOv5 instance segmentation inference on a variety of sources, downloading models automatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases), and saving results to `runs/predict`. Example inference sources are:\n", - "\n", - "```shell\n", - "python segment/predict.py --source 0 # webcam\n", - " img.jpg # image \n", - " vid.mp4 # video\n", - " screen # screenshot\n", - " path/ # directory\n", - " 'path/*.jpg' # glob\n", - " 'https://youtu.be/LNwODJXcvt4' # YouTube\n", - " 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream\n", - "```" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "id": "zR9ZbuQCH7FX", - "outputId": "3f67f1c7-f15e-4fa5-d251-967c3b77eaad" - }, - "outputs": [ - { - "output_type": "stream", - "name": "stdout", - "text": [ - "\u001b[34m\u001b[1msegment/predict: \u001b[0mweights=['yolov5s-seg.pt'], source=data/images, data=data/coco128.yaml, imgsz=[640, 640], conf_thres=0.25, iou_thres=0.45, max_det=1000, device=, view_img=False, save_txt=False, save_conf=False, save_crop=False, nosave=False, classes=None, agnostic_nms=False, augment=False, visualize=False, update=False, project=runs/predict-seg, name=exp, exist_ok=False, line_thickness=3, hide_labels=False, hide_conf=False, half=False, dnn=False, vid_stride=1, retina_masks=False\n", - "YOLOv5 🚀 v7.0-2-gc9d47ae Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n", - "\n", - "Downloading https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5s-seg.pt to yolov5s-seg.pt...\n", - "100% 14.9M/14.9M [00:01<00:00, 12.0MB/s]\n", - "\n", - "Fusing layers... \n", - "YOLOv5s-seg summary: 224 layers, 7611485 parameters, 0 gradients, 26.4 GFLOPs\n", - "image 1/2 /content/yolov5/data/images/bus.jpg: 640x480 4 persons, 1 bus, 18.2ms\n", - "image 2/2 /content/yolov5/data/images/zidane.jpg: 384x640 2 persons, 1 tie, 13.4ms\n", - "Speed: 0.5ms pre-process, 15.8ms inference, 18.5ms NMS per image at shape (1, 3, 640, 640)\n", - "Results saved to \u001b[1mruns/predict-seg/exp\u001b[0m\n" - ] - } - ], - "source": [ - "!python segment/predict.py --weights yolov5s-seg.pt --img 640 --conf 0.25 --source data/images\n", - "#display.Image(filename='runs/predict-seg/exp/zidane.jpg', width=600)" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "hkAzDWJ7cWTr" - }, - "source": [ - "        \n", - "" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "0eq1SMWl6Sfn" - }, - "source": [ - "# 2. Validate\n", - "Validate a model's accuracy on the [COCO](https://cocodataset.org/#home) dataset's `val` or `test` splits. Models are downloaded automatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases). To show results by class use the `--verbose` flag." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "id": "WQPtK1QYVaD_", - "outputId": "9d751d8c-bee8-4339-cf30-9854ca530449" - }, - "outputs": [ - { - "output_type": "stream", - "name": "stdout", - "text": [ - "Downloading https://github.com/ultralytics/assets/releases/download/v0.0.0/coco2017labels-segments.zip ...\n", - "Downloading http://images.cocodataset.org/zips/val2017.zip ...\n", - "######################################################################## 100.0%\n", - "######################################################################## 100.0%\n" - ] - } - ], - "source": [ - "# Download COCO val\n", - "!bash data/scripts/get_coco.sh --val --segments # download (780M - 5000 images)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "id": "X58w8JLpMnjH", - "outputId": "a140d67a-02da-479e-9ddb-7d54bf9e407a" - }, - "outputs": [ - { - "output_type": "stream", - "name": "stdout", - "text": [ - "\u001b[34m\u001b[1msegment/val: \u001b[0mdata=/content/yolov5/data/coco.yaml, weights=['yolov5s-seg.pt'], batch_size=32, imgsz=640, conf_thres=0.001, iou_thres=0.6, max_det=300, task=val, device=, workers=8, single_cls=False, augment=False, verbose=False, save_txt=False, save_hybrid=False, save_conf=False, save_json=False, project=runs/val-seg, name=exp, exist_ok=False, half=True, dnn=False\n", - "YOLOv5 🚀 v7.0-2-gc9d47ae Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n", - "\n", - "Fusing layers... \n", - "YOLOv5s-seg summary: 224 layers, 7611485 parameters, 0 gradients, 26.4 GFLOPs\n", - "\u001b[34m\u001b[1mval: \u001b[0mScanning /content/datasets/coco/val2017... 4952 images, 48 backgrounds, 0 corrupt: 100% 5000/5000 [00:03<00:00, 1361.31it/s]\n", - "\u001b[34m\u001b[1mval: \u001b[0mNew cache created: /content/datasets/coco/val2017.cache\n", - " Class Images Instances Box(P R mAP50 mAP50-95) Mask(P R mAP50 mAP50-95): 100% 157/157 [01:54<00:00, 1.37it/s]\n", - " all 5000 36335 0.673 0.517 0.566 0.373 0.672 0.49 0.532 0.319\n", - "Speed: 0.6ms pre-process, 4.4ms inference, 2.9ms NMS per image at shape (32, 3, 640, 640)\n", - "Results saved to \u001b[1mruns/val-seg/exp\u001b[0m\n" - ] - } - ], - "source": [ - "# Validate YOLOv5s-seg on COCO val\n", - "!python segment/val.py --weights yolov5s-seg.pt --data coco.yaml --img 640 --half" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "ZY2VXXXu74w5" - }, - "source": [ - "# 3. Train\n", - "\n", - "

\n", - "Close the active learning loop by sampling images from your inference conditions with the `roboflow` pip package\n", - "

\n", - "\n", - "Train a YOLOv5s-seg model on the [COCO128](https://www.kaggle.com/ultralytics/coco128) dataset with `--data coco128-seg.yaml`, starting from pretrained `--weights yolov5s-seg.pt`, or from randomly initialized `--weights '' --cfg yolov5s-seg.yaml`.\n", - "\n", - "- **Pretrained [Models](https://github.com/ultralytics/yolov5/tree/master/models)** are downloaded\n", - "automatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases)\n", - "- **[Datasets](https://github.com/ultralytics/yolov5/tree/master/data)** available for autodownload include: [COCO](https://github.com/ultralytics/yolov5/blob/master/data/coco.yaml), [COCO128](https://github.com/ultralytics/yolov5/blob/master/data/coco128.yaml), [VOC](https://github.com/ultralytics/yolov5/blob/master/data/VOC.yaml), [Argoverse](https://github.com/ultralytics/yolov5/blob/master/data/Argoverse.yaml), [VisDrone](https://github.com/ultralytics/yolov5/blob/master/data/VisDrone.yaml), [GlobalWheat](https://github.com/ultralytics/yolov5/blob/master/data/GlobalWheat2020.yaml), [xView](https://github.com/ultralytics/yolov5/blob/master/data/xView.yaml), [Objects365](https://github.com/ultralytics/yolov5/blob/master/data/Objects365.yaml), [SKU-110K](https://github.com/ultralytics/yolov5/blob/master/data/SKU-110K.yaml).\n", - "- **Training Results** are saved to `runs/train-seg/` with incrementing run directories, i.e. `runs/train-seg/exp2`, `runs/train-seg/exp3` etc.\n", - "

\n", - "\n", - "A **Mosaic Dataloader** is used for training which combines 4 images into 1 mosaic.\n", - "\n", - "## Train on Custom Data with Roboflow 🌟 NEW\n", - "\n", - "[Roboflow](https://roboflow.com/?ref=ultralytics) enables you to easily **organize, label, and prepare** a high quality dataset with your own custom data. Roboflow also makes it easy to establish an active learning pipeline, collaborate with your team on dataset improvement, and integrate directly into your model building workflow with the `roboflow` pip package.\n", - "\n", - "- Custom Training Example: [https://blog.roboflow.com/train-yolov5-instance-segmentation-custom-dataset/](https://blog.roboflow.com/train-yolov5-instance-segmentation-custom-dataset/?ref=ultralytics)\n", - "- Custom Training Notebook: [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1JTz7kpmHsg-5qwVz2d2IH3AaenI1tv0N?usp=sharing)\n", - "
\n", - "\n", - "

Label images lightning fast (including with model-assisted labeling)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "i3oKtE4g-aNn" - }, - "outputs": [], - "source": [ - "#@title Select YOLOv5 🚀 logger {run: 'auto'}\n", - "logger = 'Comet' #@param ['Comet', 'ClearML', 'TensorBoard']\n", - "\n", - "if logger == 'Comet':\n", - " %pip install -q comet_ml\n", - " import comet_ml; comet_ml.init()\n", - "elif logger == 'ClearML':\n", - " %pip install -q clearml\n", - " import clearml; clearml.browser_login()\n", - "elif logger == 'TensorBoard':\n", - " %load_ext tensorboard\n", - " %tensorboard --logdir runs/train" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "id": "1NcFxRcFdJ_O", - "outputId": "3a3e0cf7-e79c-47a5-c8e7-2d26eeeab988" - }, - "outputs": [ - { - "output_type": "stream", - "name": "stdout", - "text": [ - "\u001b[34m\u001b[1msegment/train: \u001b[0mweights=yolov5s-seg.pt, cfg=, data=coco128-seg.yaml, hyp=data/hyps/hyp.scratch-low.yaml, epochs=3, batch_size=16, imgsz=640, rect=False, resume=False, nosave=False, noval=False, noautoanchor=False, noplots=False, evolve=None, bucket=, cache=ram, image_weights=False, device=, multi_scale=False, single_cls=False, optimizer=SGD, sync_bn=False, workers=8, project=runs/train-seg, name=exp, exist_ok=False, quad=False, cos_lr=False, label_smoothing=0.0, patience=100, freeze=[0], save_period=-1, seed=0, local_rank=-1, mask_ratio=4, no_overlap=False\n", - "\u001b[34m\u001b[1mgithub: \u001b[0mup to date with https://github.com/ultralytics/yolov5 ✅\n", - "YOLOv5 🚀 v7.0-2-gc9d47ae Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n", - "\n", - "\u001b[34m\u001b[1mhyperparameters: \u001b[0mlr0=0.01, lrf=0.01, momentum=0.937, weight_decay=0.0005, warmup_epochs=3.0, warmup_momentum=0.8, warmup_bias_lr=0.1, box=0.05, cls=0.5, cls_pw=1.0, obj=1.0, obj_pw=1.0, iou_t=0.2, anchor_t=4.0, fl_gamma=0.0, hsv_h=0.015, hsv_s=0.7, hsv_v=0.4, degrees=0.0, translate=0.1, scale=0.5, shear=0.0, perspective=0.0, flipud=0.0, fliplr=0.5, mosaic=1.0, mixup=0.0, copy_paste=0.0\n", - "\u001b[34m\u001b[1mTensorBoard: \u001b[0mStart with 'tensorboard --logdir runs/train-seg', view at http://localhost:6006/\n", - "\n", - "Dataset not found ⚠️, missing paths ['/content/datasets/coco128-seg/images/train2017']\n", - "Downloading https://github.com/ultralytics/assets/releases/download/v0.0.0/coco128-seg.zip to coco128-seg.zip...\n", - "100% 6.79M/6.79M [00:01<00:00, 6.73MB/s]\n", - "Dataset download success ✅ (1.9s), saved to \u001b[1m/content/datasets\u001b[0m\n", - "\n", - " from n params module arguments \n", - " 0 -1 1 3520 models.common.Conv [3, 32, 6, 2, 2] \n", - " 1 -1 1 18560 models.common.Conv [32, 64, 3, 2] \n", - " 2 -1 1 18816 models.common.C3 [64, 64, 1] \n", - " 3 -1 1 73984 models.common.Conv [64, 128, 3, 2] \n", - " 4 -1 2 115712 models.common.C3 [128, 128, 2] \n", - " 5 -1 1 295424 models.common.Conv [128, 256, 3, 2] \n", - " 6 -1 3 625152 models.common.C3 [256, 256, 3] \n", - " 7 -1 1 1180672 models.common.Conv [256, 512, 3, 2] \n", - " 8 -1 1 1182720 models.common.C3 [512, 512, 1] \n", - " 9 -1 1 656896 models.common.SPPF [512, 512, 5] \n", - " 10 -1 1 131584 models.common.Conv [512, 256, 1, 1] \n", - " 11 -1 1 0 torch.nn.modules.upsampling.Upsample [None, 2, 'nearest'] \n", - " 12 [-1, 6] 1 0 models.common.Concat [1] \n", - " 13 -1 1 361984 models.common.C3 [512, 256, 1, False] \n", - " 14 -1 1 33024 models.common.Conv [256, 128, 1, 1] \n", - " 15 -1 1 0 torch.nn.modules.upsampling.Upsample [None, 2, 'nearest'] \n", - " 16 [-1, 4] 1 0 models.common.Concat [1] \n", - " 17 -1 1 90880 models.common.C3 [256, 128, 1, False] \n", - " 18 -1 1 147712 models.common.Conv [128, 128, 3, 2] \n", - " 19 [-1, 14] 1 0 models.common.Concat [1] \n", - " 20 -1 1 296448 models.common.C3 [256, 256, 1, False] \n", - " 21 -1 1 590336 models.common.Conv [256, 256, 3, 2] \n", - " 22 [-1, 10] 1 0 models.common.Concat [1] \n", - " 23 -1 1 1182720 models.common.C3 [512, 512, 1, False] \n", - " 24 [17, 20, 23] 1 615133 models.yolo.Segment [80, [[10, 13, 16, 30, 33, 23], [30, 61, 62, 45, 59, 119], [116, 90, 156, 198, 373, 326]], 32, 128, [128, 256, 512]]\n", - "Model summary: 225 layers, 7621277 parameters, 7621277 gradients, 26.6 GFLOPs\n", - "\n", - "Transferred 367/367 items from yolov5s-seg.pt\n", - "\u001b[34m\u001b[1mAMP: \u001b[0mchecks passed ✅\n", - "\u001b[34m\u001b[1moptimizer:\u001b[0m SGD(lr=0.01) with parameter groups 60 weight(decay=0.0), 63 weight(decay=0.0005), 63 bias\n", - "\u001b[34m\u001b[1malbumentations: \u001b[0mBlur(p=0.01, blur_limit=(3, 7)), MedianBlur(p=0.01, blur_limit=(3, 7)), ToGray(p=0.01), CLAHE(p=0.01, clip_limit=(1, 4.0), tile_grid_size=(8, 8))\n", - "\u001b[34m\u001b[1mtrain: \u001b[0mScanning /content/datasets/coco128-seg/labels/train2017... 126 images, 2 backgrounds, 0 corrupt: 100% 128/128 [00:00<00:00, 1389.59it/s]\n", - "\u001b[34m\u001b[1mtrain: \u001b[0mNew cache created: /content/datasets/coco128-seg/labels/train2017.cache\n", - "\u001b[34m\u001b[1mtrain: \u001b[0mCaching images (0.1GB ram): 100% 128/128 [00:00<00:00, 238.86it/s]\n", - "\u001b[34m\u001b[1mval: \u001b[0mScanning /content/datasets/coco128-seg/labels/train2017.cache... 126 images, 2 backgrounds, 0 corrupt: 100% 128/128 [00:00 # 2. paste API key\n", - "python train.py --img 640 --epochs 3 --data coco128.yaml --weights yolov5s.pt # 3. train\n", - "```\n", - "To learn more about all of the supported Comet features for this integration, check out the [Comet Tutorial](https://docs.ultralytics.com/yolov5/tutorials/comet_logging_integration). If you'd like to learn more about Comet, head over to our [documentation](https://www.comet.com/docs/v2/?utm_source=yolov5&utm_medium=partner&utm_campaign=partner_yolov5_2022&utm_content=yolov5_colab). Get started by trying out the Comet Colab Notebook:\n", - "[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1RG0WOQyxlDlo5Km8GogJpIEJlg_5lyYO?usp=sharing)\n", - "\n", - "\n", - "\"Comet" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "Lay2WsTjNJzP" - }, - "source": [ - "## ClearML Logging and Automation 🌟 NEW\n", - "\n", - "[ClearML](https://cutt.ly/yolov5-notebook-clearml) is completely integrated into YOLOv5 to track your experimentation, manage dataset versions and even remotely execute training runs. To enable ClearML (check cells above):\n", - "\n", - "- `pip install clearml`\n", - "- run `clearml-init` to connect to a ClearML server (**deploy your own [open-source server](https://github.com/allegroai/clearml-server)**, or use our [free hosted server](https://cutt.ly/yolov5-notebook-clearml))\n", - "\n", - "You'll get all the great expected features from an experiment manager: live updates, model upload, experiment comparison etc. but ClearML also tracks uncommitted changes and installed packages for example. Thanks to that ClearML Tasks (which is what we call experiments) are also reproducible on different machines! With only 1 extra line, we can schedule a YOLOv5 training task on a queue to be executed by any number of ClearML Agents (workers).\n", - "\n", - "You can use ClearML Data to version your dataset and then pass it to YOLOv5 simply using its unique ID. This will help you keep track of your data without adding extra hassle. Explore the [ClearML Tutorial](https://docs.ultralytics.com/yolov5/tutorials/clearml_logging_integration) for details!\n", - "\n", - "\n", - "\"ClearML" - ] + "cells": [ + { + "cell_type": "markdown", + "metadata": { + "id": "t6MPjfT5NrKQ" + }, + "source": [ + "
\n", + "\n", + " \n", + " \n", + "\n", + "\n", + "
\n", + " \"Run\n", + " \"Open\n", + " \"Open\n", + "
\n", + "\n", + "This YOLOv5 🚀 notebook by Ultralytics presents simple train, validate and predict examples to help start your AI adventure.
See GitHub for community support or contact us for professional support.\n", + "\n", + "
" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "7mGmQbAO5pQb" + }, + "source": [ + "# Setup\n", + "\n", + "Clone GitHub [repository](https://github.com/ultralytics/yolov5), install [dependencies](https://github.com/ultralytics/yolov5/blob/master/requirements.txt) and check PyTorch and GPU." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" }, + "id": "wbvMlHd_QwMG", + "outputId": "171b23f0-71b9-4cbf-b666-6fa2ecef70c8" + }, + "outputs": [ { - "cell_type": "markdown", - "metadata": { - "id": "-WPvRbS5Swl6" - }, - "source": [ - "## Local Logging\n", - "\n", - "Training results are automatically logged with [Tensorboard](https://www.tensorflow.org/tensorboard) and [CSV](https://github.com/ultralytics/yolov5/pull/4148) loggers to `runs/train`, with a new experiment directory created for each new training as `runs/train/exp2`, `runs/train/exp3`, etc.\n", - "\n", - "This directory contains train and val statistics, mosaics, labels, predictions and augmentated mosaics, as well as metrics and charts including precision-recall (PR) curves and confusion matrices. \n", - "\n", - "\"Local\n" - ] + "name": "stderr", + "output_type": "stream", + "text": [ + "YOLOv5 🚀 v7.0-2-gc9d47ae Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n" + ] }, { - "cell_type": "markdown", - "metadata": { - "id": "Zelyeqbyt3GD" - }, - "source": [ - "# Environments\n", - "\n", - "YOLOv5 may be run in any of the following up-to-date verified environments (with all dependencies including [CUDA](https://developer.nvidia.com/cuda)/[CUDNN](https://developer.nvidia.com/cudnn), [Python](https://www.python.org/) and [PyTorch](https://pytorch.org/) preinstalled):\n", - "\n", - "- **Notebooks** with free GPU: \"Run \"Open \"Open\n", - "- **Google Cloud** Deep Learning VM. See [GCP Quickstart Guide](https://docs.ultralytics.com/yolov5/environments/google_cloud_quickstart_tutorial/)\n", - "- **Amazon** Deep Learning AMI. See [AWS Quickstart Guide](https://docs.ultralytics.com/yolov5/environments/aws_quickstart_tutorial/)\n", - "- **Docker Image**. See [Docker Quickstart Guide](https://docs.ultralytics.com/yolov5/environments/docker_image_quickstart_tutorial/) \"Docker\n" - ] + "name": "stdout", + "output_type": "stream", + "text": [ + "Setup complete ✅ (2 CPUs, 12.7 GB RAM, 22.6/78.2 GB disk)\n" + ] + } + ], + "source": [ + "!git clone https://github.com/ultralytics/yolov5 # clone\n", + "%cd yolov5\n", + "%pip install -qr requirements.txt comet_ml # install\n", + "\n", + "import torch\n", + "\n", + "import utils\n", + "\n", + "display = utils.notebook_init() # checks" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "4JnkELT0cIJg" + }, + "source": [ + "# 1. Predict\n", + "\n", + "`segment/predict.py` runs YOLOv5 instance segmentation inference on a variety of sources, downloading models automatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases), and saving results to `runs/predict`. Example inference sources are:\n", + "\n", + "```shell\n", + "python segment/predict.py --source 0 # webcam\n", + " img.jpg # image \n", + " vid.mp4 # video\n", + " screen # screenshot\n", + " path/ # directory\n", + " 'path/*.jpg' # glob\n", + " 'https://youtu.be/LNwODJXcvt4' # YouTube\n", + " 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream\n", + "```" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" }, + "id": "zR9ZbuQCH7FX", + "outputId": "3f67f1c7-f15e-4fa5-d251-967c3b77eaad" + }, + "outputs": [ { - "cell_type": "markdown", - "metadata": { - "id": "6Qu7Iesl0p54" - }, - "source": [ - "# Status\n", - "\n", - "![YOLOv5 CI](https://github.com/ultralytics/yolov5/actions/workflows/ci-testing.yml/badge.svg)\n", - "\n", - "If this badge is green, all [YOLOv5 GitHub Actions](https://github.com/ultralytics/yolov5/actions) Continuous Integration (CI) tests are currently passing. CI tests verify correct operation of YOLOv5 training ([train.py](https://github.com/ultralytics/yolov5/blob/master/train.py)), testing ([val.py](https://github.com/ultralytics/yolov5/blob/master/val.py)), inference ([detect.py](https://github.com/ultralytics/yolov5/blob/master/detect.py)) and export ([export.py](https://github.com/ultralytics/yolov5/blob/master/export.py)) on macOS, Windows, and Ubuntu every 24 hours and on every commit.\n" - ] + "name": "stdout", + "output_type": "stream", + "text": [ + "\u001b[34m\u001b[1msegment/predict: \u001b[0mweights=['yolov5s-seg.pt'], source=data/images, data=data/coco128.yaml, imgsz=[640, 640], conf_thres=0.25, iou_thres=0.45, max_det=1000, device=, view_img=False, save_txt=False, save_conf=False, save_crop=False, nosave=False, classes=None, agnostic_nms=False, augment=False, visualize=False, update=False, project=runs/predict-seg, name=exp, exist_ok=False, line_thickness=3, hide_labels=False, hide_conf=False, half=False, dnn=False, vid_stride=1, retina_masks=False\n", + "YOLOv5 🚀 v7.0-2-gc9d47ae Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n", + "\n", + "Downloading https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5s-seg.pt to yolov5s-seg.pt...\n", + "100% 14.9M/14.9M [00:01<00:00, 12.0MB/s]\n", + "\n", + "Fusing layers... \n", + "YOLOv5s-seg summary: 224 layers, 7611485 parameters, 0 gradients, 26.4 GFLOPs\n", + "image 1/2 /content/yolov5/data/images/bus.jpg: 640x480 4 persons, 1 bus, 18.2ms\n", + "image 2/2 /content/yolov5/data/images/zidane.jpg: 384x640 2 persons, 1 tie, 13.4ms\n", + "Speed: 0.5ms pre-process, 15.8ms inference, 18.5ms NMS per image at shape (1, 3, 640, 640)\n", + "Results saved to \u001b[1mruns/predict-seg/exp\u001b[0m\n" + ] + } + ], + "source": [ + "!python segment/predict.py --weights yolov5s-seg.pt --img 640 --conf 0.25 --source data/images\n", + "# display.Image(filename='runs/predict-seg/exp/zidane.jpg', width=600)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "hkAzDWJ7cWTr" + }, + "source": [ + "        \n", + "" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "0eq1SMWl6Sfn" + }, + "source": [ + "# 2. Validate\n", + "Validate a model's accuracy on the [COCO](https://cocodataset.org/#home) dataset's `val` or `test` splits. Models are downloaded automatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases). To show results by class use the `--verbose` flag." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" }, + "id": "WQPtK1QYVaD_", + "outputId": "9d751d8c-bee8-4339-cf30-9854ca530449" + }, + "outputs": [ { - "cell_type": "markdown", - "metadata": { - "id": "IEijrePND_2I" - }, - "source": [ - "# Appendix\n", - "\n", - "Additional content below." - ] + "name": "stdout", + "output_type": "stream", + "text": [ + "Downloading https://github.com/ultralytics/assets/releases/download/v0.0.0/coco2017labels-segments.zip ...\n", + "Downloading http://images.cocodataset.org/zips/val2017.zip ...\n", + "######################################################################## 100.0%\n", + "######################################################################## 100.0%\n" + ] + } + ], + "source": [ + "# Download COCO val\n", + "!bash data/scripts/get_coco.sh --val --segments # download (780M - 5000 images)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" }, + "id": "X58w8JLpMnjH", + "outputId": "a140d67a-02da-479e-9ddb-7d54bf9e407a" + }, + "outputs": [ { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "GMusP4OAxFu6" - }, - "outputs": [], - "source": [ - "# YOLOv5 PyTorch HUB Inference (DetectionModels only)\n", - "import torch\n", - "\n", - "model = torch.hub.load('ultralytics/yolov5', 'yolov5s-seg', force_reload=True, trust_repo=True) # or yolov5n - yolov5x6 or custom\n", - "im = 'https://ultralytics.com/images/zidane.jpg' # file, Path, PIL.Image, OpenCV, nparray, list\n", - "results = model(im) # inference\n", - "results.print() # or .show(), .save(), .crop(), .pandas(), etc." - ] + "name": "stdout", + "output_type": "stream", + "text": [ + "\u001b[34m\u001b[1msegment/val: \u001b[0mdata=/content/yolov5/data/coco.yaml, weights=['yolov5s-seg.pt'], batch_size=32, imgsz=640, conf_thres=0.001, iou_thres=0.6, max_det=300, task=val, device=, workers=8, single_cls=False, augment=False, verbose=False, save_txt=False, save_hybrid=False, save_conf=False, save_json=False, project=runs/val-seg, name=exp, exist_ok=False, half=True, dnn=False\n", + "YOLOv5 🚀 v7.0-2-gc9d47ae Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n", + "\n", + "Fusing layers... \n", + "YOLOv5s-seg summary: 224 layers, 7611485 parameters, 0 gradients, 26.4 GFLOPs\n", + "\u001b[34m\u001b[1mval: \u001b[0mScanning /content/datasets/coco/val2017... 4952 images, 48 backgrounds, 0 corrupt: 100% 5000/5000 [00:03<00:00, 1361.31it/s]\n", + "\u001b[34m\u001b[1mval: \u001b[0mNew cache created: /content/datasets/coco/val2017.cache\n", + " Class Images Instances Box(P R mAP50 mAP50-95) Mask(P R mAP50 mAP50-95): 100% 157/157 [01:54<00:00, 1.37it/s]\n", + " all 5000 36335 0.673 0.517 0.566 0.373 0.672 0.49 0.532 0.319\n", + "Speed: 0.6ms pre-process, 4.4ms inference, 2.9ms NMS per image at shape (32, 3, 640, 640)\n", + "Results saved to \u001b[1mruns/val-seg/exp\u001b[0m\n" + ] } - ], - "metadata": { - "accelerator": "GPU", + ], + "source": [ + "# Validate YOLOv5s-seg on COCO val\n", + "!python segment/val.py --weights yolov5s-seg.pt --data coco.yaml --img 640 --half" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "ZY2VXXXu74w5" + }, + "source": [ + "# 3. Train\n", + "\n", + "

\n", + "Close the active learning loop by sampling images from your inference conditions with the `roboflow` pip package\n", + "

\n", + "\n", + "Train a YOLOv5s-seg model on the [COCO128](https://www.kaggle.com/ultralytics/coco128) dataset with `--data coco128-seg.yaml`, starting from pretrained `--weights yolov5s-seg.pt`, or from randomly initialized `--weights '' --cfg yolov5s-seg.yaml`.\n", + "\n", + "- **Pretrained [Models](https://github.com/ultralytics/yolov5/tree/master/models)** are downloaded\n", + "automatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases)\n", + "- **[Datasets](https://github.com/ultralytics/yolov5/tree/master/data)** available for autodownload include: [COCO](https://github.com/ultralytics/yolov5/blob/master/data/coco.yaml), [COCO128](https://github.com/ultralytics/yolov5/blob/master/data/coco128.yaml), [VOC](https://github.com/ultralytics/yolov5/blob/master/data/VOC.yaml), [Argoverse](https://github.com/ultralytics/yolov5/blob/master/data/Argoverse.yaml), [VisDrone](https://github.com/ultralytics/yolov5/blob/master/data/VisDrone.yaml), [GlobalWheat](https://github.com/ultralytics/yolov5/blob/master/data/GlobalWheat2020.yaml), [xView](https://github.com/ultralytics/yolov5/blob/master/data/xView.yaml), [Objects365](https://github.com/ultralytics/yolov5/blob/master/data/Objects365.yaml), [SKU-110K](https://github.com/ultralytics/yolov5/blob/master/data/SKU-110K.yaml).\n", + "- **Training Results** are saved to `runs/train-seg/` with incrementing run directories, i.e. `runs/train-seg/exp2`, `runs/train-seg/exp3` etc.\n", + "

\n", + "\n", + "A **Mosaic Dataloader** is used for training which combines 4 images into 1 mosaic.\n", + "\n", + "## Train on Custom Data with Roboflow 🌟 NEW\n", + "\n", + "[Roboflow](https://roboflow.com/?ref=ultralytics) enables you to easily **organize, label, and prepare** a high quality dataset with your own custom data. Roboflow also makes it easy to establish an active learning pipeline, collaborate with your team on dataset improvement, and integrate directly into your model building workflow with the `roboflow` pip package.\n", + "\n", + "- Custom Training Example: [https://blog.roboflow.com/train-yolov5-instance-segmentation-custom-dataset/](https://blog.roboflow.com/train-yolov5-instance-segmentation-custom-dataset/?ref=ultralytics)\n", + "- Custom Training Notebook: [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1JTz7kpmHsg-5qwVz2d2IH3AaenI1tv0N?usp=sharing)\n", + "
\n", + "\n", + "

Label images lightning fast (including with model-assisted labeling)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "i3oKtE4g-aNn" + }, + "outputs": [], + "source": [ + "# @title Select YOLOv5 🚀 logger {run: 'auto'}\n", + "logger = \"Comet\" # @param ['Comet', 'ClearML', 'TensorBoard']\n", + "\n", + "if logger == \"Comet\":\n", + " %pip install -q comet_ml\n", + " import comet_ml\n", + "\n", + " comet_ml.init()\n", + "elif logger == \"ClearML\":\n", + " %pip install -q clearml\n", + " import clearml\n", + "\n", + " clearml.browser_login()\n", + "elif logger == \"TensorBoard\":\n", + " %load_ext tensorboard\n", + " %tensorboard --logdir runs/train" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { "colab": { - "name": "YOLOv5 Segmentation Tutorial", - "provenance": [], - "toc_visible": true - }, - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" + "base_uri": "https://localhost:8080/" }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.7.12" + "id": "1NcFxRcFdJ_O", + "outputId": "3a3e0cf7-e79c-47a5-c8e7-2d26eeeab988" + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\u001b[34m\u001b[1msegment/train: \u001b[0mweights=yolov5s-seg.pt, cfg=, data=coco128-seg.yaml, hyp=data/hyps/hyp.scratch-low.yaml, epochs=3, batch_size=16, imgsz=640, rect=False, resume=False, nosave=False, noval=False, noautoanchor=False, noplots=False, evolve=None, bucket=, cache=ram, image_weights=False, device=, multi_scale=False, single_cls=False, optimizer=SGD, sync_bn=False, workers=8, project=runs/train-seg, name=exp, exist_ok=False, quad=False, cos_lr=False, label_smoothing=0.0, patience=100, freeze=[0], save_period=-1, seed=0, local_rank=-1, mask_ratio=4, no_overlap=False\n", + "\u001b[34m\u001b[1mgithub: \u001b[0mup to date with https://github.com/ultralytics/yolov5 ✅\n", + "YOLOv5 🚀 v7.0-2-gc9d47ae Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n", + "\n", + "\u001b[34m\u001b[1mhyperparameters: \u001b[0mlr0=0.01, lrf=0.01, momentum=0.937, weight_decay=0.0005, warmup_epochs=3.0, warmup_momentum=0.8, warmup_bias_lr=0.1, box=0.05, cls=0.5, cls_pw=1.0, obj=1.0, obj_pw=1.0, iou_t=0.2, anchor_t=4.0, fl_gamma=0.0, hsv_h=0.015, hsv_s=0.7, hsv_v=0.4, degrees=0.0, translate=0.1, scale=0.5, shear=0.0, perspective=0.0, flipud=0.0, fliplr=0.5, mosaic=1.0, mixup=0.0, copy_paste=0.0\n", + "\u001b[34m\u001b[1mTensorBoard: \u001b[0mStart with 'tensorboard --logdir runs/train-seg', view at http://localhost:6006/\n", + "\n", + "Dataset not found ⚠️, missing paths ['/content/datasets/coco128-seg/images/train2017']\n", + "Downloading https://github.com/ultralytics/assets/releases/download/v0.0.0/coco128-seg.zip to coco128-seg.zip...\n", + "100% 6.79M/6.79M [00:01<00:00, 6.73MB/s]\n", + "Dataset download success ✅ (1.9s), saved to \u001b[1m/content/datasets\u001b[0m\n", + "\n", + " from n params module arguments \n", + " 0 -1 1 3520 models.common.Conv [3, 32, 6, 2, 2] \n", + " 1 -1 1 18560 models.common.Conv [32, 64, 3, 2] \n", + " 2 -1 1 18816 models.common.C3 [64, 64, 1] \n", + " 3 -1 1 73984 models.common.Conv [64, 128, 3, 2] \n", + " 4 -1 2 115712 models.common.C3 [128, 128, 2] \n", + " 5 -1 1 295424 models.common.Conv [128, 256, 3, 2] \n", + " 6 -1 3 625152 models.common.C3 [256, 256, 3] \n", + " 7 -1 1 1180672 models.common.Conv [256, 512, 3, 2] \n", + " 8 -1 1 1182720 models.common.C3 [512, 512, 1] \n", + " 9 -1 1 656896 models.common.SPPF [512, 512, 5] \n", + " 10 -1 1 131584 models.common.Conv [512, 256, 1, 1] \n", + " 11 -1 1 0 torch.nn.modules.upsampling.Upsample [None, 2, 'nearest'] \n", + " 12 [-1, 6] 1 0 models.common.Concat [1] \n", + " 13 -1 1 361984 models.common.C3 [512, 256, 1, False] \n", + " 14 -1 1 33024 models.common.Conv [256, 128, 1, 1] \n", + " 15 -1 1 0 torch.nn.modules.upsampling.Upsample [None, 2, 'nearest'] \n", + " 16 [-1, 4] 1 0 models.common.Concat [1] \n", + " 17 -1 1 90880 models.common.C3 [256, 128, 1, False] \n", + " 18 -1 1 147712 models.common.Conv [128, 128, 3, 2] \n", + " 19 [-1, 14] 1 0 models.common.Concat [1] \n", + " 20 -1 1 296448 models.common.C3 [256, 256, 1, False] \n", + " 21 -1 1 590336 models.common.Conv [256, 256, 3, 2] \n", + " 22 [-1, 10] 1 0 models.common.Concat [1] \n", + " 23 -1 1 1182720 models.common.C3 [512, 512, 1, False] \n", + " 24 [17, 20, 23] 1 615133 models.yolo.Segment [80, [[10, 13, 16, 30, 33, 23], [30, 61, 62, 45, 59, 119], [116, 90, 156, 198, 373, 326]], 32, 128, [128, 256, 512]]\n", + "Model summary: 225 layers, 7621277 parameters, 7621277 gradients, 26.6 GFLOPs\n", + "\n", + "Transferred 367/367 items from yolov5s-seg.pt\n", + "\u001b[34m\u001b[1mAMP: \u001b[0mchecks passed ✅\n", + "\u001b[34m\u001b[1moptimizer:\u001b[0m SGD(lr=0.01) with parameter groups 60 weight(decay=0.0), 63 weight(decay=0.0005), 63 bias\n", + "\u001b[34m\u001b[1malbumentations: \u001b[0mBlur(p=0.01, blur_limit=(3, 7)), MedianBlur(p=0.01, blur_limit=(3, 7)), ToGray(p=0.01), CLAHE(p=0.01, clip_limit=(1, 4.0), tile_grid_size=(8, 8))\n", + "\u001b[34m\u001b[1mtrain: \u001b[0mScanning /content/datasets/coco128-seg/labels/train2017... 126 images, 2 backgrounds, 0 corrupt: 100% 128/128 [00:00<00:00, 1389.59it/s]\n", + "\u001b[34m\u001b[1mtrain: \u001b[0mNew cache created: /content/datasets/coco128-seg/labels/train2017.cache\n", + "\u001b[34m\u001b[1mtrain: \u001b[0mCaching images (0.1GB ram): 100% 128/128 [00:00<00:00, 238.86it/s]\n", + "\u001b[34m\u001b[1mval: \u001b[0mScanning /content/datasets/coco128-seg/labels/train2017.cache... 126 images, 2 backgrounds, 0 corrupt: 100% 128/128 [00:00 # 2. paste API key\n", + "python train.py --img 640 --epochs 3 --data coco128.yaml --weights yolov5s.pt # 3. train\n", + "```\n", + "To learn more about all of the supported Comet features for this integration, check out the [Comet Tutorial](https://docs.ultralytics.com/yolov5/tutorials/comet_logging_integration). If you'd like to learn more about Comet, head over to our [documentation](https://www.comet.com/docs/v2/?utm_source=yolov5&utm_medium=partner&utm_campaign=partner_yolov5_2022&utm_content=yolov5_colab). Get started by trying out the Comet Colab Notebook:\n", + "[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1RG0WOQyxlDlo5Km8GogJpIEJlg_5lyYO?usp=sharing)\n", + "\n", + "\n", + "\"Comet" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "Lay2WsTjNJzP" + }, + "source": [ + "## ClearML Logging and Automation 🌟 NEW\n", + "\n", + "[ClearML](https://cutt.ly/yolov5-notebook-clearml) is completely integrated into YOLOv5 to track your experimentation, manage dataset versions and even remotely execute training runs. To enable ClearML (check cells above):\n", + "\n", + "- `pip install clearml`\n", + "- run `clearml-init` to connect to a ClearML server (**deploy your own [open-source server](https://github.com/allegroai/clearml-server)**, or use our [free hosted server](https://cutt.ly/yolov5-notebook-clearml))\n", + "\n", + "You'll get all the great expected features from an experiment manager: live updates, model upload, experiment comparison etc. but ClearML also tracks uncommitted changes and installed packages for example. Thanks to that ClearML Tasks (which is what we call experiments) are also reproducible on different machines! With only 1 extra line, we can schedule a YOLOv5 training task on a queue to be executed by any number of ClearML Agents (workers).\n", + "\n", + "You can use ClearML Data to version your dataset and then pass it to YOLOv5 simply using its unique ID. This will help you keep track of your data without adding extra hassle. Explore the [ClearML Tutorial](https://docs.ultralytics.com/yolov5/tutorials/clearml_logging_integration) for details!\n", + "\n", + "\n", + "\"ClearML" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "-WPvRbS5Swl6" + }, + "source": [ + "## Local Logging\n", + "\n", + "Training results are automatically logged with [Tensorboard](https://www.tensorflow.org/tensorboard) and [CSV](https://github.com/ultralytics/yolov5/pull/4148) loggers to `runs/train`, with a new experiment directory created for each new training as `runs/train/exp2`, `runs/train/exp3`, etc.\n", + "\n", + "This directory contains train and val statistics, mosaics, labels, predictions and augmentated mosaics, as well as metrics and charts including precision-recall (PR) curves and confusion matrices. \n", + "\n", + "\"Local\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "Zelyeqbyt3GD" + }, + "source": [ + "# Environments\n", + "\n", + "YOLOv5 may be run in any of the following up-to-date verified environments (with all dependencies including [CUDA](https://developer.nvidia.com/cuda)/[CUDNN](https://developer.nvidia.com/cudnn), [Python](https://www.python.org/) and [PyTorch](https://pytorch.org/) preinstalled):\n", + "\n", + "- **Notebooks** with free GPU: \"Run \"Open \"Open\n", + "- **Google Cloud** Deep Learning VM. See [GCP Quickstart Guide](https://docs.ultralytics.com/yolov5/environments/google_cloud_quickstart_tutorial/)\n", + "- **Amazon** Deep Learning AMI. See [AWS Quickstart Guide](https://docs.ultralytics.com/yolov5/environments/aws_quickstart_tutorial/)\n", + "- **Docker Image**. See [Docker Quickstart Guide](https://docs.ultralytics.com/yolov5/environments/docker_image_quickstart_tutorial/) \"Docker\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "6Qu7Iesl0p54" + }, + "source": [ + "# Status\n", + "\n", + "![YOLOv5 CI](https://github.com/ultralytics/yolov5/actions/workflows/ci-testing.yml/badge.svg)\n", + "\n", + "If this badge is green, all [YOLOv5 GitHub Actions](https://github.com/ultralytics/yolov5/actions) Continuous Integration (CI) tests are currently passing. CI tests verify correct operation of YOLOv5 training ([train.py](https://github.com/ultralytics/yolov5/blob/master/train.py)), testing ([val.py](https://github.com/ultralytics/yolov5/blob/master/val.py)), inference ([detect.py](https://github.com/ultralytics/yolov5/blob/master/detect.py)) and export ([export.py](https://github.com/ultralytics/yolov5/blob/master/export.py)) on macOS, Windows, and Ubuntu every 24 hours and on every commit.\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "IEijrePND_2I" + }, + "source": [ + "# Appendix\n", + "\n", + "Additional content below." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "GMusP4OAxFu6" + }, + "outputs": [], + "source": [ + "# YOLOv5 PyTorch HUB Inference (DetectionModels only)\n", + "\n", + "model = torch.hub.load(\n", + " \"ultralytics/yolov5\", \"yolov5s-seg\", force_reload=True, trust_repo=True\n", + ") # or yolov5n - yolov5x6 or custom\n", + "im = \"https://ultralytics.com/images/zidane.jpg\" # file, Path, PIL.Image, OpenCV, nparray, list\n", + "results = model(im) # inference\n", + "results.print() # or .show(), .save(), .crop(), .pandas(), etc." + ] + } + ], + "metadata": { + "accelerator": "GPU", + "colab": { + "name": "YOLOv5 Segmentation Tutorial", + "provenance": [], + "toc_visible": true + }, + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" }, - "nbformat": 4, - "nbformat_minor": 0 + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.7.12" + } + }, + "nbformat": 4, + "nbformat_minor": 0 } From 7593ee561881328132c149dbc83f7c1f95d50a82 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 20 Aug 2024 16:37:04 +0800 Subject: [PATCH 1933/1976] Bump contributor-assistant/github-action from 2.4.0 to 2.5.1 in /.github/workflows (#13266) * Bump contributor-assistant/github-action in /.github/workflows Bumps [contributor-assistant/github-action](https://github.com/contributor-assistant/github-action) from 2.4.0 to 2.5.1. - [Release notes](https://github.com/contributor-assistant/github-action/releases) - [Commits](https://github.com/contributor-assistant/github-action/compare/v2.4.0...v2.5.1) --- updated-dependencies: - dependency-name: contributor-assistant/github-action dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * Auto-format by https://ultralytics.com/actions --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: UltralyticsAssistant Co-authored-by: Ultralytics Assistant <135830346+UltralyticsAssistant@users.noreply.github.com> --- .github/workflows/cla.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/cla.yml b/.github/workflows/cla.yml index 004acaf6461a..2fb5bfb8f616 100644 --- a/.github/workflows/cla.yml +++ b/.github/workflows/cla.yml @@ -26,7 +26,7 @@ jobs: steps: - name: CLA Assistant if: (github.event.comment.body == 'recheck' || github.event.comment.body == 'I have read the CLA Document and I sign the CLA') || github.event_name == 'pull_request_target' - uses: contributor-assistant/github-action@v2.4.0 + uses: contributor-assistant/github-action@v2.5.1 env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} # Must be repository secret PAT From 2070b30399c852ad93035479525b3194cb8050ac Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 20 Aug 2024 16:47:13 +0800 Subject: [PATCH 1934/1976] Remove Hindi links (#13270) * Remove Hindi links * Update tutorial.ipynb Signed-off-by: Glenn Jocher --------- Signed-off-by: Glenn Jocher Co-authored-by: UltralyticsAssistant --- tutorial.ipynb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tutorial.ipynb b/tutorial.ipynb index cb47d27e7e69..ebc6c0b22d80 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -24,7 +24,7 @@ " \n", " \n", "\n", - "[中文](https://docs.ultralytics.com/zh/) | [한국어](https://docs.ultralytics.com/ko/) | [日本語](https://docs.ultralytics.com/ja/) | [Русский](https://docs.ultralytics.com/ru/) | [Deutsch](https://docs.ultralytics.com/de/) | [Français](https://docs.ultralytics.com/fr/) | [Español](https://docs.ultralytics.com/es/) | [Português](https://docs.ultralytics.com/pt/) | [हिन्दी](https://docs.ultralytics.com/hi/) | [العربية](https://docs.ultralytics.com/ar/)\n", + "[中文](https://docs.ultralytics.com/zh/) | [한국어](https://docs.ultralytics.com/ko/) | [日本語](https://docs.ultralytics.com/ja/) | [Русский](https://docs.ultralytics.com/ru/) | [Deutsch](https://docs.ultralytics.com/de/) | [Français](https://docs.ultralytics.com/fr/) | [Español](https://docs.ultralytics.com/es/) | [Português](https://docs.ultralytics.com/pt/) | [العربية](https://docs.ultralytics.com/ar/)\n", "\n", " \"Run\n", " \"Open\n", From 41b5f45096e101b53fce50e7b641f600265ff32e Mon Sep 17 00:00:00 2001 From: Harsh Dhamecha <62664549+harshdhamecha@users.noreply.github.com> Date: Sun, 25 Aug 2024 03:10:38 +0530 Subject: [PATCH 1935/1976] Provide option for saving box coordinates in Pascal-VOC format. (#13272) * Provide option for saaving box coordinates in Pascal-VOC format. - Added save-format argument for saving box coordinates in Pascal-VOC format (Xmin, Ymin, Xmax, Ymax) when save_txt is True, 0 for YOLO and 1 for Pascal-VOC, By default - 0 * Auto-format by https://ultralytics.com/actions --------- Co-authored-by: UltralyticsAssistant --- detect.py | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/detect.py b/detect.py index 8a25ac235a3c..57d778740273 100644 --- a/detect.py +++ b/detect.py @@ -78,6 +78,7 @@ def run( device="", # cuda device, i.e. 0 or 0,1,2,3 or cpu view_img=False, # show results save_txt=False, # save results to *.txt + save_format=0, # save boxes coordinates in YOLO format or Pascal-VOC format (0 for YOLO and 1 for Pascal-VOC) save_csv=False, # save results in CSV format save_conf=False, # save confidences in --save-txt labels save_crop=False, # save cropped prediction boxes @@ -260,8 +261,13 @@ def write_to_csv(image_name, prediction, confidence): write_to_csv(p.name, label, confidence_str) if save_txt: # Write to file - xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh - line = (cls, *xywh, conf) if save_conf else (cls, *xywh) # label format + if save_format == 0: + coords = ( + (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() + ) # normalized xywh + else: + coords = (torch.tensor(xyxy).view(1, 4) / gn).view(-1).tolist() # xyxy + line = (cls, *coords, conf) if save_conf else (cls, *coords) # label format with open(f"{txt_path}.txt", "a") as f: f.write(("%g " * len(line)).rstrip() % line + "\n") @@ -369,6 +375,12 @@ def parse_opt(): parser.add_argument("--device", default="", help="cuda device, i.e. 0 or 0,1,2,3 or cpu") parser.add_argument("--view-img", action="store_true", help="show results") parser.add_argument("--save-txt", action="store_true", help="save results to *.txt") + parser.add_argument( + "--save-format", + type=int, + default=0, + help="whether to save boxes coordinates in YOLO format or Pascal-VOC format when save-txt is True, 0 for YOLO and 1 for Pascal-VOC", + ) parser.add_argument("--save-csv", action="store_true", help="save results in CSV format") parser.add_argument("--save-conf", action="store_true", help="save confidences in --save-txt labels") parser.add_argument("--save-crop", action="store_true", help="save cropped prediction boxes") From c07b9a8b8c5d5a9656f091cf9a9e22f47d87eb4f Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 25 Aug 2024 05:45:33 +0800 Subject: [PATCH 1936/1976] Apply Ruff Docstring reformat (#13278) * Apply Ruff Docstring reformat Signed-off-by: Glenn Jocher * Auto-format by https://ultralytics.com/actions * Update export.py Signed-off-by: Glenn Jocher --------- Signed-off-by: Glenn Jocher Co-authored-by: UltralyticsAssistant Co-authored-by: Ultralytics Assistant <135830346+UltralyticsAssistant@users.noreply.github.com> --- export.py | 4 +++- utils/augmentations.py | 1 - utils/callbacks.py | 1 - utils/dataloaders.py | 7 ++++--- utils/general.py | 2 -- utils/loggers/__init__.py | 3 ++- utils/loggers/clearml/clearml_utils.py | 14 +++++++------- utils/loggers/wandb/wandb_utils.py | 12 ++++++------ utils/metrics.py | 8 +++----- utils/segment/augmentations.py | 1 - utils/segment/general.py | 3 --- utils/triton.py | 3 +-- 12 files changed, 26 insertions(+), 33 deletions(-) diff --git a/export.py b/export.py index dfb1c06fb5e2..d06e0bf245ac 100644 --- a/export.py +++ b/export.py @@ -449,8 +449,10 @@ def transform_fn(data_item): Quantization transform function. Extracts and preprocess input data from dataloader item for quantization. - Parameters: + + Args: data_item: Tuple with data item produced by DataLoader during iteration + Returns: input_tensor: Input data for quantization """ diff --git a/utils/augmentations.py b/utils/augmentations.py index 4a6e441d7c45..bdbe07712716 100644 --- a/utils/augmentations.py +++ b/utils/augmentations.py @@ -156,7 +156,6 @@ def random_perspective( ): # torchvision.transforms.RandomAffine(degrees=(-10, 10), translate=(0.1, 0.1), scale=(0.9, 1.1), shear=(-10, 10)) # targets = [cls, xyxy] - """Applies random perspective transformation to an image, modifying the image and corresponding labels.""" height = im.shape[0] + border[0] * 2 # shape(h,w,c) width = im.shape[1] + border[1] * 2 diff --git a/utils/callbacks.py b/utils/callbacks.py index 0a0bcbdb2b96..21c587bd74c6 100644 --- a/utils/callbacks.py +++ b/utils/callbacks.py @@ -64,7 +64,6 @@ def run(self, hook, *args, thread=False, **kwargs): thread: (boolean) Run callbacks in daemon thread kwargs: Keyword Arguments to receive from YOLOv5 """ - assert hook in self._callbacks, f"hook '{hook}' not found in callbacks {self._callbacks}" for logger in self._callbacks[hook]: if thread: diff --git a/utils/dataloaders.py b/utils/dataloaders.py index 21308f0cedbd..bdeffec465e7 100644 --- a/utils/dataloaders.py +++ b/utils/dataloaders.py @@ -1104,7 +1104,8 @@ def extract_boxes(path=DATASETS_DIR / "coco128"): def autosplit(path=DATASETS_DIR / "coco128/images", weights=(0.9, 0.1, 0.0), annotated_only=False): """Autosplit a dataset into train/val/test splits and save path/autosplit_*.txt files Usage: from utils.dataloaders import *; autosplit() - Arguments + + Arguments: path: Path to images directory weights: Train, val, test weights (list, tuple) annotated_only: Only use images with an annotated txt file @@ -1183,7 +1184,7 @@ class HUBDatasetStats: """ Class for generating HUB dataset JSON and `-hub` dataset directory. - Arguments + Arguments: path: Path to data.yaml or data.zip (with data.yaml inside data.zip) autodownload: Attempt to download dataset if not found locally @@ -1314,7 +1315,7 @@ class ClassificationDataset(torchvision.datasets.ImageFolder): """ YOLOv5 Classification Dataset. - Arguments + Arguments: root: Dataset path transform: torchvision transforms, used by default album_transform: Albumentations transforms, used if installed diff --git a/utils/general.py b/utils/general.py index e311504b3031..57db68a7ac76 100644 --- a/utils/general.py +++ b/utils/general.py @@ -518,7 +518,6 @@ def check_font(font=FONT, progress=False): def check_dataset(data, autodownload=True): """Validates and/or auto-downloads a dataset, returning its configuration as a dictionary.""" - # Download (optional) extract_dir = "" if isinstance(data, (str, Path)) and (is_zipfile(data) or is_tarfile(data)): @@ -1023,7 +1022,6 @@ def non_max_suppression( Returns: list of detections, on (n,6) tensor per image [xyxy, conf, cls] """ - # Checks assert 0 <= conf_thres <= 1, f"Invalid Confidence threshold {conf_thres}, valid values are between 0.0 and 1.0" assert 0 <= iou_thres <= 1, f"Invalid IoU {iou_thres}, valid values are between 0.0 and 1.0" diff --git a/utils/loggers/__init__.py b/utils/loggers/__init__.py index 2bd8583d2ade..7051e8da0a29 100644 --- a/utils/loggers/__init__.py +++ b/utils/loggers/__init__.py @@ -350,7 +350,8 @@ class GenericLogger: """ YOLOv5 General purpose logger for non-task specific logging Usage: from utils.loggers import GenericLogger; logger = GenericLogger(...) - Arguments + + Arguments: opt: Run arguments console_logger: Console logger include: loggers to include diff --git a/utils/loggers/clearml/clearml_utils.py b/utils/loggers/clearml/clearml_utils.py index 2b5351ef8533..de4129e08a16 100644 --- a/utils/loggers/clearml/clearml_utils.py +++ b/utils/loggers/clearml/clearml_utils.py @@ -80,7 +80,7 @@ def __init__(self, opt, hyp): - Initialize ClearML Task, this object will capture the experiment - Upload dataset version to ClearML Data if opt.upload_dataset is True - arguments: + Arguments: opt (namespace) -- Commandline arguments for this run hyp (dict) -- Hyperparameters for this run @@ -133,7 +133,7 @@ def log_scalars(self, metrics, epoch): """ Log scalars/metrics to ClearML. - arguments: + Arguments: metrics (dict) Metrics in dict format: {"metrics/mAP": 0.8, ...} epoch (int) iteration number for the current set of metrics """ @@ -145,7 +145,7 @@ def log_model(self, model_path, model_name, epoch=0): """ Log model weights to ClearML. - arguments: + Arguments: model_path (PosixPath or str) Path to the model weights model_name (str) Name of the model visible in ClearML epoch (int) Iteration / epoch of the model weights @@ -158,7 +158,7 @@ def log_summary(self, metrics): """ Log final metrics to a summary table. - arguments: + Arguments: metrics (dict) Metrics in dict format: {"metrics/mAP": 0.8, ...} """ for k, v in metrics.items(): @@ -168,7 +168,7 @@ def log_plot(self, title, plot_path): """ Log image as plot in the plot section of ClearML. - arguments: + Arguments: title (str) Title of the plot plot_path (PosixPath or str) Path to the saved image file """ @@ -183,7 +183,7 @@ def log_debug_samples(self, files, title="Debug Samples"): """ Log files (images) as debug samples in the ClearML task. - arguments: + Arguments: files (List(PosixPath)) a list of file paths in PosixPath format title (str) A title that groups together images with the same values """ @@ -199,7 +199,7 @@ def log_image_with_boxes(self, image_path, boxes, class_names, image, conf_thres """ Draw the bounding boxes on a single image and report the result as a ClearML debug sample. - arguments: + Arguments: image_path (PosixPath) the path the original image file boxes (list): list of scaled predictions in the format - [xmin, ymin, xmax, ymax, confidence, class] class_names (dict): dict containing mapping of class int to class name diff --git a/utils/loggers/wandb/wandb_utils.py b/utils/loggers/wandb/wandb_utils.py index 930f2c7543af..6a32c8cc7b03 100644 --- a/utils/loggers/wandb/wandb_utils.py +++ b/utils/loggers/wandb/wandb_utils.py @@ -49,7 +49,7 @@ def __init__(self, opt, run_id=None, job_type="Training"): - Upload dataset if opt.upload_dataset is True - Setup training processes if job_type is 'Training' - arguments: + Arguments: opt (namespace) -- Commandline arguments for this run run_id (str) -- Run ID of W&B run to be resumed job_type (str) -- To set the job_type for this run @@ -90,7 +90,7 @@ def setup_training(self, opt): - Update data_dict, to contain info of previous run if resumed and the paths of dataset artifact if downloaded - Setup log_dict, initialize bbox_interval - arguments: + Arguments: opt (namespace) -- commandline arguments for this run """ @@ -120,7 +120,7 @@ def log_model(self, path, opt, epoch, fitness_score, best_model=False): """ Log the model checkpoint as W&B artifact. - arguments: + Arguments: path (Path) -- Path of directory containing the checkpoints opt (namespace) -- Command line arguments for this run epoch (int) -- Current epoch number @@ -159,7 +159,7 @@ def log(self, log_dict): """ Save the metrics to the logging dictionary. - arguments: + Arguments: log_dict (Dict) -- metrics/media to be logged in current step """ if self.wandb_run: @@ -170,7 +170,7 @@ def end_epoch(self): """ Commit the log_dict, model artifacts and Tables to W&B and flush the log_dict. - arguments: + Arguments: best_result (boolean): Boolean representing if the result of this evaluation is best or not """ if self.wandb_run: @@ -197,7 +197,7 @@ def finish_run(self): @contextmanager def all_logging_disabled(highest_level=logging.CRITICAL): - """source - https://gist.github.com/simon-weber/7853144 + """Source - https://gist.github.com/simon-weber/7853144 A context manager that will prevent any logging messages triggered during the body from being processed. :param highest_level: the maximum logging level in use. This would only need to be changed if a custom level greater than CRITICAL is defined. diff --git a/utils/metrics.py b/utils/metrics.py index 385fdc471748..9acc38591f96 100644 --- a/utils/metrics.py +++ b/utils/metrics.py @@ -41,7 +41,6 @@ def ap_per_class(tp, conf, pred_cls, target_cls, plot=False, save_dir=".", names # Returns The average precision as computed in py-faster-rcnn. """ - # Sort by objectness i = np.argsort(-conf) tp, conf, pred_cls = tp[i], conf[i], pred_cls[i] @@ -103,7 +102,6 @@ def compute_ap(recall, precision): # Returns Average precision, precision curve, recall curve """ - # Append sentinel values to beginning and end mrec = np.concatenate(([0.0], recall, [1.0])) mpre = np.concatenate(([1.0], precision, [0.0])) @@ -137,6 +135,7 @@ def process_batch(self, detections, labels): Return intersection-over-union (Jaccard index) of boxes. Both sets of boxes are expected to be in (x1, y1, x2, y2) format. + Arguments: detections (Array[N, 6]), x1, y1, x2, y2, conf, class labels (Array[M, 5]), class, x1, y1, x2, y2 @@ -233,7 +232,6 @@ def bbox_iou(box1, box2, xywh=True, GIoU=False, DIoU=False, CIoU=False, eps=1e-7 Input shapes are box1(1,4) to box2(n,4). """ - # Get the coordinates of bounding boxes if xywh: # transform from xywh to xyxy (x1, y1, w1, h1), (x2, y2, w2, h2) = box1.chunk(4, -1), box2.chunk(4, -1) @@ -279,14 +277,15 @@ def box_iou(box1, box2, eps=1e-7): Return intersection-over-union (Jaccard index) of boxes. Both sets of boxes are expected to be in (x1, y1, x2, y2) format. + Arguments: box1 (Tensor[N, 4]) box2 (Tensor[M, 4]) + Returns: iou (Tensor[N, M]): the NxM matrix containing the pairwise IoU values for every element in boxes1 and boxes2 """ - # inter(N,M) = (rb(N,M,2) - lt(N,M,2)).clamp(0).prod(2) (a1, a2), (b1, b2) = box1.unsqueeze(1).chunk(2, 2), box2.unsqueeze(0).chunk(2, 2) inter = (torch.min(a2, b2) - torch.max(a1, b1)).clamp(0).prod(2) @@ -304,7 +303,6 @@ def bbox_ioa(box1, box2, eps=1e-7): box2: np.array of shape(nx4) returns: np.array of shape(n) """ - # Get the coordinates of bounding boxes b1_x1, b1_y1, b1_x2, b1_y2 = box1 b2_x1, b2_y1, b2_x2, b2_y2 = box2.T diff --git a/utils/segment/augmentations.py b/utils/segment/augmentations.py index d7dd8aec6691..2e1dca1198b0 100644 --- a/utils/segment/augmentations.py +++ b/utils/segment/augmentations.py @@ -29,7 +29,6 @@ def random_perspective( ): # torchvision.transforms.RandomAffine(degrees=(-10, 10), translate=(.1, .1), scale=(.9, 1.1), shear=(-10, 10)) # targets = [cls, xyxy] - """Applies random perspective, rotation, scale, shear, and translation augmentations to an image and targets.""" height = im.shape[0] + border[0] * 2 # shape(h,w,c) width = im.shape[1] + border[1] * 2 diff --git a/utils/segment/general.py b/utils/segment/general.py index 2f65d60238dd..0793470a95e4 100644 --- a/utils/segment/general.py +++ b/utils/segment/general.py @@ -14,7 +14,6 @@ def crop_mask(masks, boxes): - masks should be a size [n, h, w] tensor of masks - boxes should be a size [n, 4] tensor of bbox coords in relative point form """ - n, h, w = masks.shape x1, y1, x2, y2 = torch.chunk(boxes[:, :, None], 4, 1) # x1 shape(1,1,n) r = torch.arange(w, device=masks.device, dtype=x1.dtype)[None, None, :] # rows shape(1,w,1) @@ -33,7 +32,6 @@ def process_mask_upsample(protos, masks_in, bboxes, shape): return: h, w, n """ - c, mh, mw = protos.shape # CHW masks = (masks_in @ protos.float().view(c, -1)).sigmoid().view(-1, mh, mw) masks = F.interpolate(masks[None], shape, mode="bilinear", align_corners=False)[0] # CHW @@ -51,7 +49,6 @@ def process_mask(protos, masks_in, bboxes, shape, upsample=False): return: h, w, n """ - c, mh, mw = protos.shape # CHW ih, iw = shape masks = (masks_in @ protos.float().view(c, -1)).sigmoid().view(-1, mh, mw) # CHW diff --git a/utils/triton.py b/utils/triton.py index 3d529ec88a07..2fee42815517 100644 --- a/utils/triton.py +++ b/utils/triton.py @@ -17,10 +17,9 @@ class TritonRemoteModel: def __init__(self, url: str): """ - Keyword arguments: + Keyword Arguments: url: Fully qualified address of the Triton server - for e.g. grpc://localhost:8000 """ - parsed_url = urlparse(url) if parsed_url.scheme == "grpc": from tritonclient.grpc import InferenceServerClient, InferInput From 427cfc4f735d83de5e25c8013ce74443686a8a3f Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 26 Aug 2024 01:32:35 +0800 Subject: [PATCH 1937/1976] Update merge-main-into-prs.yml (#13282) Signed-off-by: Glenn Jocher --- .github/workflows/merge-main-into-prs.yml | 61 ++++++++++++++--------- 1 file changed, 38 insertions(+), 23 deletions(-) diff --git a/.github/workflows/merge-main-into-prs.yml b/.github/workflows/merge-main-into-prs.yml index 2cd4b028c8b5..eae8e0024b34 100644 --- a/.github/workflows/merge-main-into-prs.yml +++ b/.github/workflows/merge-main-into-prs.yml @@ -1,4 +1,4 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics YOLO 🚀, AGPL-3.0 license # Automatically merges repository 'main' branch into all open PRs to keep them up-to-date # Action runs on updates to main branch so when one PR merges to main all others update @@ -6,10 +6,9 @@ name: Merge main into PRs on: workflow_dispatch: - push: - branches: - - main - - master + # push: + # branches: + # - ${{ github.event.repository.default_branch }} jobs: Merge: @@ -22,35 +21,51 @@ jobs: fetch-depth: 0 - uses: actions/setup-python@v5 with: - python-version: "3.11" - cache: "pip" # caching pip dependencies + python-version: "3.x" + cache: "pip" - name: Install requirements run: | pip install pygithub - - name: Merge main into PRs + - name: Merge default branch into PRs shell: python run: | from github import Github import os - # Authenticate with the GitHub Token g = Github(os.getenv('GITHUB_TOKEN')) - - # Get the repository dynamically repo = g.get_repo(os.getenv('GITHUB_REPOSITORY')) - - # List all open pull requests - open_pulls = repo.get_pulls(state='open', sort='created') - - for pr in open_pulls: - # Compare PR head with main to see if it's behind + + # Fetch the default branch name + default_branch_name = repo.default_branch + default_branch = repo.get_branch(default_branch_name) + + for pr in repo.get_pulls(state='open', sort='created'): try: - # Merge main into the PR branch - success = pr.update_branch() - assert success, "Branch update failed" - print(f"Merged 'master' into PR #{pr.number} ({pr.head.ref}) successfully.") + # Get full names for repositories and branches + base_repo_name = repo.full_name + head_repo_name = pr.head.repo.full_name + base_branch_name = pr.base.ref + head_branch_name = pr.head.ref + + # Check if PR is behind the default branch + comparison = repo.compare(default_branch.commit.sha, pr.head.sha) + + if comparison.behind_by > 0: + print(f"⚠️ PR #{pr.number} ({head_repo_name}:{head_branch_name} -> {base_repo_name}:{base_branch_name}) is behind {default_branch_name} by {comparison.behind_by} commit(s).") + + # Attempt to update the branch + try: + success = pr.update_branch() + assert success, "Branch update failed" + print(f"✅ Successfully merged '{default_branch_name}' into PR #{pr.number} ({head_repo_name}:{head_branch_name} -> {base_repo_name}:{base_branch_name}).") + except Exception as update_error: + print(f"❌ Could not update PR #{pr.number} ({head_repo_name}:{head_branch_name} -> {base_repo_name}:{base_branch_name}): {update_error}") + print(" This might be due to branch protection rules or insufficient permissions.") + else: + print(f"✅ PR #{pr.number} ({head_repo_name}:{head_branch_name} -> {base_repo_name}:{base_branch_name}) is up to date with {default_branch_name}.") except Exception as e: - print(f"Could not merge 'master' into PR #{pr.number} ({pr.head.ref}): {e}") + print(f"❌ Could not process PR #{pr.number}: {e}") + env: - GITHUB_TOKEN: ${{ secrets.PERSONAL_ACCESS_TOKEN }} + GITHUB_TOKEN: ${{ secrets.PERSONAL_ACCESS_TOKEN || secrets.GITHUB_TOKEN }} GITHUB_REPOSITORY: ${{ github.repository }} From b40781b3e356f27c89270e3d1b4e182c9ee0664f Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 26 Aug 2024 08:37:25 +0800 Subject: [PATCH 1938/1976] Update README.md (#13283) * Update README.md Signed-off-by: Glenn Jocher * Auto-format by https://ultralytics.com/actions --------- Signed-off-by: Glenn Jocher Co-authored-by: UltralyticsAssistant --- export.py | 4 ++-- hubconf.py | 2 +- models/tf.py | 2 +- segment/val.py | 2 +- utils/dataloaders.py | 6 +++--- utils/loggers/__init__.py | 2 +- utils/loggers/clearml/clearml_utils.py | 2 +- utils/loggers/wandb/wandb_utils.py | 4 ++-- utils/loss.py | 2 +- utils/metrics.py | 2 +- utils/plots.py | 2 +- utils/segment/general.py | 12 ++++++------ utils/segment/metrics.py | 6 +++--- utils/torch_utils.py | 4 ++-- utils/triton.py | 2 +- 15 files changed, 27 insertions(+), 27 deletions(-) diff --git a/export.py b/export.py index d06e0bf245ac..5bc9238eb05b 100644 --- a/export.py +++ b/export.py @@ -1,6 +1,6 @@ # Ultralytics YOLOv5 🚀, AGPL-3.0 license """ -Export a YOLOv5 PyTorch model to other formats. TensorFlow exports authored by https://github.com/zldrobit +Export a YOLOv5 PyTorch model to other formats. TensorFlow exports authored by https://github.com/zldrobit. Format | `export.py --include` | Model --- | --- | --- @@ -141,7 +141,7 @@ def forward(self, x): def export_formats(): - """ + r""" Returns a DataFrame of supported YOLOv5 model export formats and their properties. Returns: diff --git a/hubconf.py b/hubconf.py index 98e399421809..e7ca62b045c4 100644 --- a/hubconf.py +++ b/hubconf.py @@ -1,6 +1,6 @@ # Ultralytics YOLOv5 🚀, AGPL-3.0 license """ -PyTorch Hub models https://pytorch.org/hub/ultralytics_yolov5 +PyTorch Hub models https://pytorch.org/hub/ultralytics_yolov5. Usage: import torch diff --git a/models/tf.py b/models/tf.py index 9884ec3db355..d79e48610bf7 100644 --- a/models/tf.py +++ b/models/tf.py @@ -1,7 +1,7 @@ # Ultralytics YOLOv5 🚀, AGPL-3.0 license """ TensorFlow, Keras and TFLite versions of YOLOv5 -Authored by https://github.com/zldrobit in PR https://github.com/ultralytics/yolov5/pull/1127 +Authored by https://github.com/zldrobit in PR https://github.com/ultralytics/yolov5/pull/1127. Usage: $ python models/tf.py --weights yolov5s.pt diff --git a/segment/val.py b/segment/val.py index ab8a66a90c34..60a7fe7cba2e 100644 --- a/segment/val.py +++ b/segment/val.py @@ -121,7 +121,7 @@ def process_batch(detections, labels, iouv, pred_masks=None, gt_masks=None, over detections (array[N, 6]), x1, y1, x2, y2, conf, class labels (array[M, 5]), class, x1, y1, x2, y2 Returns: - correct (array[N, 10]), for 10 IoU levels + correct (array[N, 10]), for 10 IoU levels. """ if masks: if overlap: diff --git a/utils/dataloaders.py b/utils/dataloaders.py index bdeffec465e7..0593e52d7875 100644 --- a/utils/dataloaders.py +++ b/utils/dataloaders.py @@ -93,7 +93,7 @@ def exif_size(img): def exif_transpose(image): """ Transpose a PIL image accordingly if it has an EXIF Orientation tag. - Inplace version of https://github.com/python-pillow/Pillow/blob/master/src/PIL/ImageOps.py exif_transpose() + Inplace version of https://github.com/python-pillow/Pillow/blob/master/src/PIL/ImageOps.py exif_transpose(). :param image: The image to transpose. :return: An image. @@ -316,7 +316,7 @@ def __next__(self): class LoadImages: - """YOLOv5 image/video dataloader, i.e. `python detect.py --source image.jpg/vid.mp4`""" + """YOLOv5 image/video dataloader, i.e. `python detect.py --source image.jpg/vid.mp4`.""" def __init__(self, path, img_size=640, stride=32, auto=True, transforms=None, vid_stride=1): """Initializes YOLOv5 loader for images/videos, supporting glob patterns, directories, and lists of paths.""" @@ -1103,7 +1103,7 @@ def extract_boxes(path=DATASETS_DIR / "coco128"): def autosplit(path=DATASETS_DIR / "coco128/images", weights=(0.9, 0.1, 0.0), annotated_only=False): """Autosplit a dataset into train/val/test splits and save path/autosplit_*.txt files - Usage: from utils.dataloaders import *; autosplit() + Usage: from utils.dataloaders import *; autosplit(). Arguments: path: Path to images directory diff --git a/utils/loggers/__init__.py b/utils/loggers/__init__.py index 7051e8da0a29..a37ae0f8a9e9 100644 --- a/utils/loggers/__init__.py +++ b/utils/loggers/__init__.py @@ -349,7 +349,7 @@ def on_params_update(self, params: dict): class GenericLogger: """ YOLOv5 General purpose logger for non-task specific logging - Usage: from utils.loggers import GenericLogger; logger = GenericLogger(...) + Usage: from utils.loggers import GenericLogger; logger = GenericLogger(...). Arguments: opt: Run arguments diff --git a/utils/loggers/clearml/clearml_utils.py b/utils/loggers/clearml/clearml_utils.py index de4129e08a16..fc19c8cfe22a 100644 --- a/utils/loggers/clearml/clearml_utils.py +++ b/utils/loggers/clearml/clearml_utils.py @@ -78,7 +78,7 @@ class ClearmlLogger: def __init__(self, opt, hyp): """ - Initialize ClearML Task, this object will capture the experiment - - Upload dataset version to ClearML Data if opt.upload_dataset is True + - Upload dataset version to ClearML Data if opt.upload_dataset is True. Arguments: opt (namespace) -- Commandline arguments for this run diff --git a/utils/loggers/wandb/wandb_utils.py b/utils/loggers/wandb/wandb_utils.py index 6a32c8cc7b03..9883d5738afa 100644 --- a/utils/loggers/wandb/wandb_utils.py +++ b/utils/loggers/wandb/wandb_utils.py @@ -47,7 +47,7 @@ def __init__(self, opt, run_id=None, job_type="Training"): """ - Initialize WandbLogger instance - Upload dataset if opt.upload_dataset is True - - Setup training processes if job_type is 'Training' + - Setup training processes if job_type is 'Training'. Arguments: opt (namespace) -- Commandline arguments for this run @@ -88,7 +88,7 @@ def setup_training(self, opt): Setup the necessary processes for training YOLO models: - Attempt to download model checkpoint and dataset artifacts if opt.resume stats with WANDB_ARTIFACT_PREFIX - Update data_dict, to contain info of previous run if resumed and the paths of dataset artifact if downloaded - - Setup log_dict, initialize bbox_interval + - Setup log_dict, initialize bbox_interval. Arguments: opt (namespace) -- commandline arguments for this run diff --git a/utils/loss.py b/utils/loss.py index e8f148e77c74..7dc2ad3d2102 100644 --- a/utils/loss.py +++ b/utils/loss.py @@ -9,7 +9,7 @@ def smooth_BCE(eps=0.1): - """Returns label smoothing BCE targets for reducing overfitting; pos: `1.0 - 0.5*eps`, neg: `0.5*eps`. For details see https://github.com/ultralytics/yolov3/issues/238#issuecomment-598028441""" + """Returns label smoothing BCE targets for reducing overfitting; pos: `1.0 - 0.5*eps`, neg: `0.5*eps`. For details see https://github.com/ultralytics/yolov3/issues/238#issuecomment-598028441.""" return 1.0 - 0.5 * eps, 0.5 * eps diff --git a/utils/metrics.py b/utils/metrics.py index 9acc38591f96..d7a5820bcddf 100644 --- a/utils/metrics.py +++ b/utils/metrics.py @@ -100,7 +100,7 @@ def compute_ap(recall, precision): recall: The recall curve (list) precision: The precision curve (list) # Returns - Average precision, precision curve, recall curve + Average precision, precision curve, recall curve. """ # Append sentinel values to beginning and end mrec = np.concatenate(([0.0], recall, [1.0])) diff --git a/utils/plots.py b/utils/plots.py index 9bec34a159fb..89ed1bb9f25f 100644 --- a/utils/plots.py +++ b/utils/plots.py @@ -81,7 +81,7 @@ def feature_visualization(x, module_type, stage, n=32, save_dir=Path("runs/detec module_type: Module type stage: Module stage within model n: Maximum number of feature maps to plot - save_dir: Directory to save results + save_dir: Directory to save results. """ if ("Detect" not in module_type) and ( "Segment" not in module_type diff --git a/utils/segment/general.py b/utils/segment/general.py index 0793470a95e4..c9dfaaabe4d5 100644 --- a/utils/segment/general.py +++ b/utils/segment/general.py @@ -28,7 +28,7 @@ def process_mask_upsample(protos, masks_in, bboxes, shape): protos: [mask_dim, mask_h, mask_w] masks_in: [n, mask_dim], n is number of masks after nms bboxes: [n, 4], n is number of masks after nms - shape: input_image_size, (h, w) + shape: input_image_size, (h, w). return: h, w, n """ @@ -45,7 +45,7 @@ def process_mask(protos, masks_in, bboxes, shape, upsample=False): proto_out: [mask_dim, mask_h, mask_w] out_masks: [n, mask_dim], n is number of masks after nms bboxes: [n, 4], n is number of masks after nms - shape:input_image_size, (h, w) + shape:input_image_size, (h, w). return: h, w, n """ @@ -71,7 +71,7 @@ def process_mask_native(protos, masks_in, bboxes, shape): protos: [mask_dim, mask_h, mask_w] masks_in: [n, mask_dim], n is number of masks after nms bboxes: [n, 4], n is number of masks after nms - shape: input_image_size, (h, w) + shape: input_image_size, (h, w). return: h, w, n """ @@ -92,7 +92,7 @@ def scale_image(im1_shape, masks, im0_shape, ratio_pad=None): """ img1_shape: model input shape, [h, w] img0_shape: origin pic shape, [h, w, 3] - masks: [h, w, num] + masks: [h, w, num]. """ # Rescale coordinates (xyxy) from im1_shape to im0_shape if ratio_pad is None: # calculate from im0_shape @@ -120,7 +120,7 @@ def mask_iou(mask1, mask2, eps=1e-7): """ mask1: [N, n] m1 means number of predicted objects mask2: [M, n] m2 means number of gt objects - Note: n means image_w x image_h + Note: n means image_w x image_h. return: masks iou, [N, M] """ @@ -133,7 +133,7 @@ def masks_iou(mask1, mask2, eps=1e-7): """ mask1: [N, n] m1 means number of predicted objects mask2: [N, n] m2 means number of gt objects - Note: n means image_w x image_h + Note: n means image_w x image_h. return: masks iou, (N, ) """ diff --git a/utils/segment/metrics.py b/utils/segment/metrics.py index 6f57dec132e2..52ceb64eac93 100644 --- a/utils/segment/metrics.py +++ b/utils/segment/metrics.py @@ -127,7 +127,7 @@ def mean_results(self): return (self.mp, self.mr, self.map50, self.map) def class_result(self, i): - """Class-aware result, return p[i], r[i], ap50[i], ap[i]""" + """Class-aware result, return p[i], r[i], ap50[i], ap[i].""" return (self.p[i], self.r[i], self.ap50[i], self.ap[i]) def get_maps(self, nc): @@ -140,7 +140,7 @@ def get_maps(self, nc): def update(self, results): """ Args: - results: tuple(p, r, ap, f1, ap_class) + results: tuple(p, r, ap, f1, ap_class). """ p, r, all_ap, f1, ap_class_index = results self.p = p @@ -163,7 +163,7 @@ def __init__(self) -> None: def update(self, results): """ Args: - results: Dict{'boxes': Dict{}, 'masks': Dict{}} + results: Dict{'boxes': Dict{}, 'masks': Dict{}}. """ self.metric_box.update(list(results["boxes"].values())) self.metric_mask.update(list(results["masks"].values())) diff --git a/utils/torch_utils.py b/utils/torch_utils.py index d15f1f73f6c3..af3d046498c7 100644 --- a/utils/torch_utils.py +++ b/utils/torch_utils.py @@ -161,7 +161,7 @@ def profile(input, ops, n=10, device=None): input = torch.randn(16, 3, 640, 640) m1 = lambda x: x * torch.sigmoid(x) m2 = nn.SiLU() - profile(input, [m1, m2], n=100) # profile over 100 iterations + profile(input, [m1, m2], n=100) # profile over 100 iterations. """ results = [] if not isinstance(device, torch.device): @@ -449,7 +449,7 @@ def __call__(self, epoch, fitness): class ModelEMA: """Updated Exponential Moving Average (EMA) from https://github.com/rwightman/pytorch-image-models Keeps a moving average of everything in the model state_dict (parameters and buffers) - For EMA details see https://www.tensorflow.org/api_docs/python/tf/train/ExponentialMovingAverage + For EMA details see https://www.tensorflow.org/api_docs/python/tf/train/ExponentialMovingAverage. """ def __init__(self, model, decay=0.9999, tau=2000, updates=0): diff --git a/utils/triton.py b/utils/triton.py index 2fee42815517..3230ecd8e615 100644 --- a/utils/triton.py +++ b/utils/triton.py @@ -18,7 +18,7 @@ class TritonRemoteModel: def __init__(self, url: str): """ Keyword Arguments: - url: Fully qualified address of the Triton server - for e.g. grpc://localhost:8000 + url: Fully qualified address of the Triton server - for e.g. grpc://localhost:8000. """ parsed_url = urlparse(url) if parsed_url.scheme == "grpc": From c5ffbbf1c3f69c8e7acf2ec5106fb8edad33c3de Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 26 Aug 2024 15:46:32 +0200 Subject: [PATCH 1939/1976] Add https://www.reddit.com/r/Ultralytics/ badge (#13284) * Refactor code for speed and clarity * Update README.zh-CN.md Signed-off-by: Glenn Jocher --------- Signed-off-by: Glenn Jocher Co-authored-by: UltralyticsAssistant --- .github/workflows/merge-main-into-prs.yml | 4 +- README.md | 2 +- README.zh-CN.md | 1 + export.py | 2 + models/common.py | 78 +++++++++++++++-------- models/tf.py | 62 ++++++++++++------ models/yolo.py | 15 +++-- utils/__init__.py | 3 +- utils/activations.py | 8 +++ utils/augmentations.py | 12 ++-- utils/dataloaders.py | 11 +++- utils/general.py | 9 ++- utils/loggers/__init__.py | 3 +- utils/loss.py | 11 +++- utils/metrics.py | 3 +- utils/plots.py | 3 +- utils/segment/dataloaders.py | 2 + utils/segment/loss.py | 3 +- utils/segment/metrics.py | 2 + utils/torch_utils.py | 3 +- 20 files changed, 164 insertions(+), 73 deletions(-) diff --git a/.github/workflows/merge-main-into-prs.yml b/.github/workflows/merge-main-into-prs.yml index eae8e0024b34..05a0363a74b8 100644 --- a/.github/workflows/merge-main-into-prs.yml +++ b/.github/workflows/merge-main-into-prs.yml @@ -34,11 +34,11 @@ jobs: g = Github(os.getenv('GITHUB_TOKEN')) repo = g.get_repo(os.getenv('GITHUB_REPOSITORY')) - + # Fetch the default branch name default_branch_name = repo.default_branch default_branch = repo.get_branch(default_branch_name) - + for pr in repo.get_pulls(state='open', sort='created'): try: # Get full names for repositories and branches diff --git a/README.md b/README.md index ac8b63af87cf..88729597e4cd 100644 --- a/README.md +++ b/README.md @@ -10,7 +10,7 @@ YOLOv5 CI YOLOv5 Citation Docker Pulls - Discord Ultralytics Forums + Discord Ultralytics Forums Ultralytics Reddit
Run on Gradient Open In Colab diff --git a/README.zh-CN.md b/README.zh-CN.md index bb45872a0577..530fed5c13d1 100644 --- a/README.zh-CN.md +++ b/README.zh-CN.md @@ -10,6 +10,7 @@ YOLOv5 CI YOLOv5 Citation Docker Pulls + Discord Ultralytics Forums Ultralytics Reddit
Run on Gradient Open In Colab diff --git a/export.py b/export.py index 5bc9238eb05b..2c697eca1c96 100644 --- a/export.py +++ b/export.py @@ -91,6 +91,8 @@ class iOSModel(torch.nn.Module): + """An iOS-compatible wrapper for YOLOv5 models that normalizes input images based on their dimensions.""" + def __init__(self, model, im): """ Initializes an iOS compatible model with normalization based on image dimensions. diff --git a/models/common.py b/models/common.py index 1e0ffdd3abdb..8ad53d5d11b6 100644 --- a/models/common.py +++ b/models/common.py @@ -71,7 +71,8 @@ def autopad(k, p=None, d=1): class Conv(nn.Module): - # Standard convolution with args(ch_in, ch_out, kernel, stride, padding, groups, dilation, activation) + """Applies a convolution, batch normalization, and activation function to an input tensor in a neural network.""" + default_act = nn.SiLU() # default activation def __init__(self, c1, c2, k=1, s=1, p=None, g=1, d=1, act=True): @@ -91,7 +92,8 @@ def forward_fuse(self, x): class DWConv(Conv): - # Depth-wise convolution + """Implements a depth-wise convolution layer with optional activation for efficient spatial filtering.""" + def __init__(self, c1, c2, k=1, s=1, d=1, act=True): """Initializes a depth-wise convolution layer with optional activation; args: input channels (c1), output channels (c2), kernel size (k), stride (s), dilation (d), and activation flag (act). @@ -100,7 +102,8 @@ def __init__(self, c1, c2, k=1, s=1, d=1, act=True): class DWConvTranspose2d(nn.ConvTranspose2d): - # Depth-wise transpose convolution + """A depth-wise transpose convolutional layer for upsampling in neural networks, particularly in YOLOv5 models.""" + def __init__(self, c1, c2, k=1, s=1, p1=0, p2=0): """Initializes a depth-wise transpose convolutional layer for YOLOv5; args: input channels (c1), output channels (c2), kernel size (k), stride (s), input padding (p1), output padding (p2). @@ -109,7 +112,8 @@ def __init__(self, c1, c2, k=1, s=1, p1=0, p2=0): class TransformerLayer(nn.Module): - # Transformer layer https://arxiv.org/abs/2010.11929 (LayerNorm layers removed for better performance) + """Transformer layer with multihead attention and linear layers, optimized by removing LayerNorm.""" + def __init__(self, c, num_heads): """ Initializes a transformer layer, sans LayerNorm for performance, with multihead attention and linear layers. @@ -132,7 +136,8 @@ def forward(self, x): class TransformerBlock(nn.Module): - # Vision Transformer https://arxiv.org/abs/2010.11929 + """A Transformer block for vision tasks with convolution, position embeddings, and Transformer layers.""" + def __init__(self, c1, c2, num_heads, num_layers): """Initializes a Transformer block for vision tasks, adapting dimensions if necessary and stacking specified layers. @@ -157,7 +162,8 @@ def forward(self, x): class Bottleneck(nn.Module): - # Standard bottleneck + """A bottleneck layer with optional shortcut and group convolution for efficient feature extraction.""" + def __init__(self, c1, c2, shortcut=True, g=1, e=0.5): """Initializes a standard bottleneck layer with optional shortcut and group convolution, supporting channel expansion. @@ -176,7 +182,8 @@ def forward(self, x): class BottleneckCSP(nn.Module): - # CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks + """CSP bottleneck layer for feature extraction with cross-stage partial connections and optional shortcuts.""" + def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): """Initializes CSP bottleneck with optional shortcuts; args: ch_in, ch_out, number of repeats, shortcut bool, groups, expansion. @@ -201,7 +208,8 @@ def forward(self, x): class CrossConv(nn.Module): - # Cross Convolution Downsample + """Implements a cross convolution layer with downsampling, expansion, and optional shortcut.""" + def __init__(self, c1, c2, k=3, s=1, g=1, e=1.0, shortcut=False): """ Initializes CrossConv with downsampling, expanding, and optionally shortcutting; `c1` input, `c2` output @@ -221,7 +229,8 @@ def forward(self, x): class C3(nn.Module): - # CSP Bottleneck with 3 convolutions + """Implements a CSP Bottleneck module with three convolutions for enhanced feature extraction in neural networks.""" + def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): """Initializes C3 module with options for channel count, bottleneck repetition, shortcut usage, group convolutions, and expansion. @@ -239,7 +248,8 @@ def forward(self, x): class C3x(C3): - # C3 module with cross-convolutions + """Extends the C3 module with cross-convolutions for enhanced feature extraction in neural networks.""" + def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): """Initializes C3x module with cross-convolutions, extending C3 with customizable channel dimensions, groups, and expansion. @@ -250,7 +260,8 @@ def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): class C3TR(C3): - # C3 module with TransformerBlock() + """C3 module with TransformerBlock for enhanced feature extraction in object detection models.""" + def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): """Initializes C3 module with TransformerBlock for enhanced feature extraction, accepts channel sizes, shortcut config, group, and expansion. @@ -261,7 +272,8 @@ def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): class C3SPP(C3): - # C3 module with SPP() + """Extends the C3 module with an SPP layer for enhanced spatial feature extraction and customizable channels.""" + def __init__(self, c1, c2, k=(5, 9, 13), n=1, shortcut=True, g=1, e=0.5): """Initializes a C3 module with SPP layer for advanced spatial feature extraction, given channel sizes, kernel sizes, shortcut, group, and expansion ratio. @@ -272,7 +284,8 @@ def __init__(self, c1, c2, k=(5, 9, 13), n=1, shortcut=True, g=1, e=0.5): class C3Ghost(C3): - # C3 module with GhostBottleneck() + """Implements a C3 module with Ghost Bottlenecks for efficient feature extraction in YOLOv5.""" + def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): """Initializes YOLOv5's C3 module with Ghost Bottlenecks for efficient feature extraction.""" super().__init__(c1, c2, n, shortcut, g, e) @@ -281,7 +294,8 @@ def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): class SPP(nn.Module): - # Spatial Pyramid Pooling (SPP) layer https://arxiv.org/abs/1406.4729 + """Implements Spatial Pyramid Pooling (SPP) for feature extraction, ref: https://arxiv.org/abs/1406.4729.""" + def __init__(self, c1, c2, k=(5, 9, 13)): """Initializes SPP layer with Spatial Pyramid Pooling, ref: https://arxiv.org/abs/1406.4729, args: c1 (input channels), c2 (output channels), k (kernel sizes).""" super().__init__() @@ -301,7 +315,8 @@ def forward(self, x): class SPPF(nn.Module): - # Spatial Pyramid Pooling - Fast (SPPF) layer for YOLOv5 by Glenn Jocher + """Implements a fast Spatial Pyramid Pooling (SPPF) layer for efficient feature extraction in YOLOv5 models.""" + def __init__(self, c1, c2, k=5): """ Initializes YOLOv5 SPPF layer with given channels and kernel size for YOLOv5 model, combining convolution and @@ -326,7 +341,8 @@ def forward(self, x): class Focus(nn.Module): - # Focus wh information into c-space + """Focuses spatial information into channel space using slicing and convolution for efficient feature extraction.""" + def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True): """Initializes Focus module to concentrate width-height info into channel space with configurable convolution parameters. @@ -342,7 +358,8 @@ def forward(self, x): class GhostConv(nn.Module): - # Ghost Convolution https://github.com/huawei-noah/ghostnet + """Implements Ghost Convolution for efficient feature extraction, see https://github.com/huawei-noah/ghostnet.""" + def __init__(self, c1, c2, k=1, s=1, g=1, act=True): """Initializes GhostConv with in/out channels, kernel size, stride, groups, and activation; halves out channels for efficiency. @@ -359,7 +376,8 @@ def forward(self, x): class GhostBottleneck(nn.Module): - # Ghost Bottleneck https://github.com/huawei-noah/ghostnet + """Efficient bottleneck layer using Ghost Convolutions, see https://github.com/huawei-noah/ghostnet.""" + def __init__(self, c1, c2, k=3, s=1): """Initializes GhostBottleneck with ch_in `c1`, ch_out `c2`, kernel size `k`, stride `s`; see https://github.com/huawei-noah/ghostnet.""" super().__init__() @@ -379,7 +397,8 @@ def forward(self, x): class Contract(nn.Module): - # Contract width-height into channels, i.e. x(1,64,80,80) to x(1,256,40,40) + """Contracts spatial dimensions into channel dimensions for efficient processing in neural networks.""" + def __init__(self, gain=2): """Initializes a layer to contract spatial dimensions (width-height) into channels, e.g., input shape (1,64,80,80) to (1,256,40,40). @@ -399,7 +418,8 @@ def forward(self, x): class Expand(nn.Module): - # Expand channels into width-height, i.e. x(1,64,80,80) to x(1,16,160,160) + """Expands spatial dimensions by redistributing channels, e.g., from (1,64,80,80) to (1,16,160,160).""" + def __init__(self, gain=2): """ Initializes the Expand module to increase spatial dimensions by redistributing channels, with an optional gain @@ -422,7 +442,8 @@ def forward(self, x): class Concat(nn.Module): - # Concatenate a list of tensors along dimension + """Concatenates tensors along a specified dimension for efficient tensor manipulation in neural networks.""" + def __init__(self, dimension=1): """Initializes a Concat module to concatenate tensors along a specified dimension.""" super().__init__() @@ -436,7 +457,8 @@ def forward(self, x): class DetectMultiBackend(nn.Module): - # YOLOv5 MultiBackend class for python inference on various backends + """YOLOv5 MultiBackend class for inference on various backends including PyTorch, ONNX, TensorRT, and more.""" + def __init__(self, weights="yolov5s.pt", device=torch.device("cpu"), dnn=False, data=None, fp16=False, fuse=True): """Initializes DetectMultiBackend with support for various inference backends, including PyTorch and ONNX.""" # PyTorch: weights = *.pt @@ -778,7 +800,8 @@ def _load_metadata(f=Path("path/to/meta.yaml")): class AutoShape(nn.Module): - # YOLOv5 input-robust model wrapper for passing cv2/np/PIL/torch inputs. Includes preprocessing, inference and NMS + """AutoShape class for robust YOLOv5 inference with preprocessing, NMS, and support for various input formats.""" + conf = 0.25 # NMS confidence threshold iou = 0.45 # NMS IoU threshold agnostic = False # NMS class-agnostic @@ -889,7 +912,8 @@ def forward(self, ims, size=640, augment=False, profile=False): class Detections: - # YOLOv5 detections class for inference results + """Manages YOLOv5 detection results with methods for visualization, saving, cropping, and exporting detections.""" + def __init__(self, ims, pred, files, times=(0, 0, 0), names=None, shape=None): """Initializes the YOLOv5 Detections class with image info, predictions, filenames, timing and normalization.""" super().__init__() @@ -1047,7 +1071,8 @@ def __repr__(self): class Proto(nn.Module): - # YOLOv5 mask Proto module for segmentation models + """YOLOv5 mask Proto module for segmentation models, performing convolutions and upsampling on input tensors.""" + def __init__(self, c1, c_=256, c2=32): """Initializes YOLOv5 Proto module for segmentation with input, proto, and mask channels configuration.""" super().__init__() @@ -1062,7 +1087,8 @@ def forward(self, x): class Classify(nn.Module): - # YOLOv5 classification head, i.e. x(b,c1,20,20) to x(b,c2) + """YOLOv5 classification head with convolution, pooling, and dropout layers for channel transformation.""" + def __init__( self, c1, c2, k=1, s=1, p=None, g=1, dropout_p=0.0 ): # ch_in, ch_out, kernel, stride, padding, groups, dropout probability diff --git a/models/tf.py b/models/tf.py index d79e48610bf7..59bb7e0f558d 100644 --- a/models/tf.py +++ b/models/tf.py @@ -49,7 +49,8 @@ class TFBN(keras.layers.Layer): - # TensorFlow BatchNormalization wrapper + """TensorFlow BatchNormalization wrapper for initializing with optional pretrained weights.""" + def __init__(self, w=None): """Initializes a TensorFlow BatchNormalization layer with optional pretrained weights.""" super().__init__() @@ -67,7 +68,8 @@ def call(self, inputs): class TFPad(keras.layers.Layer): - # Pad inputs in spatial dimensions 1 and 2 + """Pads input tensors in spatial dimensions 1 and 2 with specified integer or tuple padding values.""" + def __init__(self, pad): """ Initializes a padding layer for spatial dimensions 1 and 2 with specified padding, supporting both int and tuple @@ -87,7 +89,8 @@ def call(self, inputs): class TFConv(keras.layers.Layer): - # Standard convolution + """Implements a standard convolutional layer with optional batch normalization and activation for TensorFlow.""" + def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True, w=None): """ Initializes a standard convolution layer with optional batch normalization and activation; supports only @@ -118,7 +121,8 @@ def call(self, inputs): class TFDWConv(keras.layers.Layer): - # Depthwise convolution + """Initializes a depthwise convolution layer with optional batch normalization and activation for TensorFlow.""" + def __init__(self, c1, c2, k=1, s=1, p=None, act=True, w=None): """ Initializes a depthwise convolution layer with optional batch normalization and activation for TensorFlow @@ -147,7 +151,8 @@ def call(self, inputs): class TFDWConvTranspose2d(keras.layers.Layer): - # Depthwise ConvTranspose2d + """Implements a depthwise ConvTranspose2D layer for TensorFlow with specific settings.""" + def __init__(self, c1, c2, k=1, s=1, p1=0, p2=0, w=None): """ Initializes depthwise ConvTranspose2D layer with specific channel, kernel, stride, and padding settings. @@ -179,7 +184,8 @@ def call(self, inputs): class TFFocus(keras.layers.Layer): - # Focus wh information into c-space + """Focuses spatial information into channel space using pixel shuffling and convolution for TensorFlow models.""" + def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True, w=None): """ Initializes TFFocus layer to focus width and height information into channel space with custom convolution @@ -201,7 +207,8 @@ def call(self, inputs): class TFBottleneck(keras.layers.Layer): - # Standard bottleneck + """Implements a TensorFlow bottleneck layer with optional shortcut connections for efficient feature extraction.""" + def __init__(self, c1, c2, shortcut=True, g=1, e=0.5, w=None): """ Initializes a standard bottleneck layer for TensorFlow models, expanding and contracting channels with optional @@ -223,7 +230,8 @@ def call(self, inputs): class TFCrossConv(keras.layers.Layer): - # Cross Convolution + """Implements a cross convolutional layer with optional expansion, grouping, and shortcut for TensorFlow.""" + def __init__(self, c1, c2, k=3, s=1, g=1, e=1.0, shortcut=False, w=None): """Initializes cross convolution layer with optional expansion, grouping, and shortcut addition capabilities.""" super().__init__() @@ -238,7 +246,8 @@ def call(self, inputs): class TFConv2d(keras.layers.Layer): - # Substitution for PyTorch nn.Conv2D + """Implements a TensorFlow 2D convolution layer, mimicking PyTorch's nn.Conv2D for specified filters and stride.""" + def __init__(self, c1, c2, k, s=1, g=1, bias=True, w=None): """Initializes a TensorFlow 2D convolution layer, mimicking PyTorch's nn.Conv2D functionality for given filter sizes and stride. @@ -261,7 +270,8 @@ def call(self, inputs): class TFBottleneckCSP(keras.layers.Layer): - # CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks + """Implements a CSP bottleneck layer for TensorFlow models to enhance gradient flow and efficiency.""" + def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5, w=None): """ Initializes CSP bottleneck layer with specified channel sizes, count, shortcut option, groups, and expansion @@ -289,7 +299,8 @@ def call(self, inputs): class TFC3(keras.layers.Layer): - # CSP Bottleneck with 3 convolutions + """CSP bottleneck layer with 3 convolutions for TensorFlow, supporting optional shortcuts and group convolutions.""" + def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5, w=None): """ Initializes CSP Bottleneck with 3 convolutions, supporting optional shortcuts and group convolutions. @@ -313,7 +324,8 @@ def call(self, inputs): class TFC3x(keras.layers.Layer): - # 3 module with cross-convolutions + """A TensorFlow layer for enhanced feature extraction using cross-convolutions in object detection models.""" + def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5, w=None): """ Initializes layer with cross-convolutions for enhanced feature extraction in object detection models. @@ -335,7 +347,8 @@ def call(self, inputs): class TFSPP(keras.layers.Layer): - # Spatial pyramid pooling layer used in YOLOv3-SPP + """Implements spatial pyramid pooling for YOLOv3-SPP with specific channels and kernel sizes.""" + def __init__(self, c1, c2, k=(5, 9, 13), w=None): """Initializes a YOLOv3-SPP layer with specific input/output channels and kernel sizes for pooling.""" super().__init__() @@ -351,7 +364,8 @@ def call(self, inputs): class TFSPPF(keras.layers.Layer): - # Spatial pyramid pooling-Fast layer + """Implements a fast spatial pyramid pooling layer for TensorFlow with optimized feature extraction.""" + def __init__(self, c1, c2, k=5, w=None): """Initializes a fast spatial pyramid pooling layer with customizable in/out channels, kernel size, and weights. @@ -373,7 +387,8 @@ def call(self, inputs): class TFDetect(keras.layers.Layer): - # TF YOLOv5 Detect layer + """Implements YOLOv5 object detection layer in TensorFlow for predicting bounding boxes and class probabilities.""" + def __init__(self, nc=80, anchors=(), ch=(), imgsz=(640, 640), w=None): """Initializes YOLOv5 detection layer for TensorFlow with configurable classes, anchors, channels, and image size. @@ -427,7 +442,8 @@ def _make_grid(nx=20, ny=20): class TFSegment(TFDetect): - # YOLOv5 Segment head for segmentation models + """YOLOv5 segmentation head for TensorFlow, combining detection and segmentation.""" + def __init__(self, nc=80, anchors=(), nm=32, npr=256, ch=(), imgsz=(640, 640), w=None): """Initializes YOLOv5 Segment head with specified channel depths, anchors, and input size for segmentation models. @@ -450,6 +466,8 @@ def call(self, x): class TFProto(keras.layers.Layer): + """Implements convolutional and upsampling layers for feature extraction in YOLOv5 segmentation.""" + def __init__(self, c1, c_=256, c2=32, w=None): """Initializes TFProto layer with convolutional and upsampling layers for feature extraction and transformation. @@ -466,7 +484,8 @@ def call(self, inputs): class TFUpsample(keras.layers.Layer): - # TF version of torch.nn.Upsample() + """Implements a TensorFlow upsampling layer with specified size, scale factor, and interpolation mode.""" + def __init__(self, size, scale_factor, mode, w=None): """ Initializes a TensorFlow upsampling layer with specified size, scale_factor, and mode, ensuring scale_factor is @@ -488,7 +507,8 @@ def call(self, inputs): class TFConcat(keras.layers.Layer): - # TF version of torch.concat() + """Implements TensorFlow's version of torch.concat() for concatenating tensors along the last dimension.""" + def __init__(self, dimension=1, w=None): """Initializes a TensorFlow layer for NCHW to NHWC concatenation, requiring dimension=1.""" super().__init__() @@ -581,7 +601,8 @@ def parse_model(d, ch, model, imgsz): class TFModel: - # TF YOLOv5 model + """Implements YOLOv5 model in TensorFlow, supporting TensorFlow, Keras, and TFLite formats for object detection.""" + def __init__(self, cfg="yolov5s.yaml", ch=3, nc=None, model=None, imgsz=(640, 640)): """Initializes TF YOLOv5 model with specified configuration, channels, classes, model instance, and input size. @@ -653,7 +674,8 @@ def _xywh2xyxy(xywh): class AgnosticNMS(keras.layers.Layer): - # TF Agnostic NMS + """Performs agnostic non-maximum suppression (NMS) on detected objects using IoU and confidence thresholds.""" + def call(self, input, topk_all, iou_thres, conf_thres): """Performs agnostic NMS on input tensors using given thresholds and top-K selection.""" return tf.map_fn( diff --git a/models/yolo.py b/models/yolo.py index d89c5da018de..c0dd946e0356 100644 --- a/models/yolo.py +++ b/models/yolo.py @@ -70,7 +70,8 @@ class Detect(nn.Module): - # YOLOv5 Detect head for detection models + """YOLOv5 Detect head for processing input tensors and generating detection outputs in object detection models.""" + stride = None # strides computed during build dynamic = False # force grid reconstruction export = False # export mode @@ -127,7 +128,8 @@ def _make_grid(self, nx=20, ny=20, i=0, torch_1_10=check_version(torch.__version class Segment(Detect): - # YOLOv5 Segment head for segmentation models + """YOLOv5 Segment head for segmentation models, extending Detect with mask and prototype layers.""" + def __init__(self, nc=80, anchors=(), nm=32, npr=256, ch=(), inplace=True): """Initializes YOLOv5 Segment head with options for mask count, protos, and channel adjustments.""" super().__init__(nc, anchors, ch, inplace) @@ -214,7 +216,8 @@ def _apply(self, fn): class DetectionModel(BaseModel): - # YOLOv5 detection model + """YOLOv5 detection model class for object detection tasks, supporting custom configurations and anchors.""" + def __init__(self, cfg="yolov5s.yaml", ch=3, nc=None, anchors=None): """Initializes YOLOv5 model with configuration file, input channels, number of classes, and custom anchors.""" super().__init__() @@ -332,14 +335,16 @@ def _initialize_biases(self, cf=None): class SegmentationModel(DetectionModel): - # YOLOv5 segmentation model + """YOLOv5 segmentation model for object detection and segmentation tasks with configurable parameters.""" + def __init__(self, cfg="yolov5s-seg.yaml", ch=3, nc=None, anchors=None): """Initializes a YOLOv5 segmentation model with configurable params: cfg (str) for configuration, ch (int) for channels, nc (int) for num classes, anchors (list).""" super().__init__(cfg, ch, nc, anchors) class ClassificationModel(BaseModel): - # YOLOv5 classification model + """YOLOv5 classification model for image classification tasks, initialized with a config file or detection model.""" + def __init__(self, cfg=None, model=None, nc=1000, cutoff=10): """Initializes YOLOv5 model with config file `cfg`, input channels `ch`, number of classes `nc`, and `cuttoff` index. diff --git a/utils/__init__.py b/utils/__init__.py index c7ece49fae10..185afd6964d7 100644 --- a/utils/__init__.py +++ b/utils/__init__.py @@ -12,7 +12,8 @@ def emojis(str=""): class TryExcept(contextlib.ContextDecorator): - # YOLOv5 TryExcept class. Usage: @TryExcept() decorator or 'with TryExcept():' context manager + """A context manager and decorator for error handling that prints an optional message with emojis on exception.""" + def __init__(self, msg=""): """Initializes TryExcept with an optional message, used as a decorator or context manager for error handling.""" self.msg = msg diff --git a/utils/activations.py b/utils/activations.py index 47f0a998024e..44bec8cd14b5 100644 --- a/utils/activations.py +++ b/utils/activations.py @@ -7,6 +7,8 @@ class SiLU(nn.Module): + """Applies the Sigmoid-weighted Linear Unit (SiLU) activation function, also known as Swish.""" + @staticmethod def forward(x): """ @@ -18,6 +20,8 @@ def forward(x): class Hardswish(nn.Module): + """Applies the Hardswish activation function, which is efficient for mobile and embedded devices.""" + @staticmethod def forward(x): """ @@ -38,7 +42,11 @@ def forward(x): class MemoryEfficientMish(nn.Module): + """Efficiently applies the Mish activation function using custom autograd for reduced memory usage.""" + class F(torch.autograd.Function): + """Implements a custom autograd function for memory-efficient Mish activation.""" + @staticmethod def forward(ctx, x): """Applies the Mish activation function, a smooth ReLU alternative, to the input tensor `x`.""" diff --git a/utils/augmentations.py b/utils/augmentations.py index bdbe07712716..af4c4057cba1 100644 --- a/utils/augmentations.py +++ b/utils/augmentations.py @@ -18,7 +18,8 @@ class Albumentations: - # YOLOv5 Albumentations class (optional, only used if package is installed) + """Provides optional data augmentation for YOLOv5 using Albumentations library if installed.""" + def __init__(self, size=640): """Initializes Albumentations class for optional data augmentation in YOLOv5 with specified input size.""" self.transform = None @@ -378,7 +379,8 @@ def classify_transforms(size=224): class LetterBox: - # YOLOv5 LetterBox class for image preprocessing, i.e. T.Compose([LetterBox(size), ToTensor()]) + """Resizes and pads images to specified dimensions while maintaining aspect ratio for YOLOv5 preprocessing.""" + def __init__(self, size=(640, 640), auto=False, stride=32): """Initializes a LetterBox object for YOLOv5 image preprocessing with optional auto sizing and stride adjustment. @@ -405,7 +407,8 @@ def __call__(self, im): class CenterCrop: - # YOLOv5 CenterCrop class for image preprocessing, i.e. T.Compose([CenterCrop(size), ToTensor()]) + """Applies center crop to an image, resizing it to the specified size while maintaining aspect ratio.""" + def __init__(self, size=640): """Initializes CenterCrop for image preprocessing, accepting single int or tuple for size, defaults to 640.""" super().__init__() @@ -424,7 +427,8 @@ def __call__(self, im): class ToTensor: - # YOLOv5 ToTensor class for image preprocessing, i.e. T.Compose([LetterBox(size), ToTensor()]) + """Converts BGR np.array image from HWC to RGB CHW format, normalizes to [0, 1], and supports FP16 if half=True.""" + def __init__(self, half=False): """Initializes ToTensor for YOLOv5 image preprocessing, with optional half precision (half=True for FP16).""" super().__init__() diff --git a/utils/dataloaders.py b/utils/dataloaders.py index 0593e52d7875..61358eb9fb90 100644 --- a/utils/dataloaders.py +++ b/utils/dataloaders.py @@ -131,6 +131,8 @@ def seed_worker(worker_id): # Inherit from DistributedSampler and override iterator # https://github.com/pytorch/pytorch/blob/master/torch/utils/data/distributed.py class SmartDistributedSampler(distributed.DistributedSampler): + """A distributed sampler ensuring deterministic shuffling and balanced data distribution across GPUs.""" + def __iter__(self): """Yields indices for distributed data sampling, shuffled deterministically based on epoch and seed.""" g = torch.Generator() @@ -259,7 +261,8 @@ def __iter__(self): class LoadScreenshots: - # YOLOv5 screenshot dataloader, i.e. `python detect.py --source "screen 0 100 100 512 256"` + """Loads and processes screenshots for YOLOv5 detection from specified screen regions using mss.""" + def __init__(self, source, img_size=640, stride=32, auto=True, transforms=None): """ Initializes a screenshot dataloader for YOLOv5 with specified source region, image size, stride, auto, and @@ -428,7 +431,8 @@ def __len__(self): class LoadStreams: - # YOLOv5 streamloader, i.e. `python detect.py --source 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP streams` + """Loads and processes video streams for YOLOv5, supporting various sources including YouTube and IP cameras.""" + def __init__(self, sources="file.streams", img_size=640, stride=32, auto=True, transforms=None, vid_stride=1): """Initializes a stream loader for processing video streams with YOLOv5, supporting various sources including YouTube. @@ -531,7 +535,8 @@ def img2label_paths(img_paths): class LoadImagesAndLabels(Dataset): - # YOLOv5 train_loader/val_loader, loads images and labels for training and validation + """Loads images and their corresponding labels for training and validation in YOLOv5.""" + cache_version = 0.6 # dataset labels *.cache version rand_interp_methods = [cv2.INTER_NEAREST, cv2.INTER_LINEAR, cv2.INTER_CUBIC, cv2.INTER_AREA, cv2.INTER_LANCZOS4] diff --git a/utils/general.py b/utils/general.py index 57db68a7ac76..8c0b2fcd3ef7 100644 --- a/utils/general.py +++ b/utils/general.py @@ -188,7 +188,8 @@ def user_config_dir(dir="Ultralytics", env_var="YOLOV5_CONFIG_DIR"): class Profile(contextlib.ContextDecorator): - # YOLOv5 Profile class. Usage: @Profile() decorator or 'with Profile():' context manager + """Context manager and decorator for profiling code execution time, with optional CUDA synchronization.""" + def __init__(self, t=0.0, device: torch.device = None): """Initializes a profiling context for YOLOv5 with optional timing threshold and device specification.""" self.t = t @@ -213,7 +214,8 @@ def time(self): class Timeout(contextlib.ContextDecorator): - # YOLOv5 Timeout class. Usage: @Timeout(seconds) decorator or 'with Timeout(seconds):' context manager + """Enforces a timeout on code execution, raising TimeoutError if the specified duration is exceeded.""" + def __init__(self, seconds, *, timeout_msg="", suppress_timeout_errors=True): """Initializes a timeout context/decorator with defined seconds, optional message, and error suppression.""" self.seconds = int(seconds) @@ -239,7 +241,8 @@ def __exit__(self, exc_type, exc_val, exc_tb): class WorkingDirectory(contextlib.ContextDecorator): - # Usage: @WorkingDirectory(dir) decorator or 'with WorkingDirectory(dir):' context manager + """Context manager/decorator to temporarily change the working directory within a 'with' statement or decorator.""" + def __init__(self, new_dir): """Initializes a context manager/decorator to temporarily change the working directory.""" self.dir = new_dir # new dir diff --git a/utils/loggers/__init__.py b/utils/loggers/__init__.py index a37ae0f8a9e9..92b4bcb05c58 100644 --- a/utils/loggers/__init__.py +++ b/utils/loggers/__init__.py @@ -76,7 +76,8 @@ def _json_default(value): class Loggers: - # YOLOv5 Loggers class + """Initializes and manages various logging utilities for tracking YOLOv5 training and validation metrics.""" + def __init__(self, save_dir=None, weights=None, opt=None, hyp=None, logger=None, include=LOGGERS): """Initializes loggers for YOLOv5 training and validation metrics, paths, and options.""" self.save_dir = save_dir diff --git a/utils/loss.py b/utils/loss.py index 7dc2ad3d2102..fd5912f49182 100644 --- a/utils/loss.py +++ b/utils/loss.py @@ -14,7 +14,8 @@ def smooth_BCE(eps=0.1): class BCEBlurWithLogitsLoss(nn.Module): - # BCEwithLogitLoss() with reduced missing label effects. + """Modified BCEWithLogitsLoss to reduce missing label effects in YOLOv5 training with optional alpha smoothing.""" + def __init__(self, alpha=0.05): """Initializes a modified BCEWithLogitsLoss with reduced missing label effects, taking optional alpha smoothing parameter. @@ -37,7 +38,8 @@ def forward(self, pred, true): class FocalLoss(nn.Module): - # Wraps focal loss around existing loss_fcn(), i.e. criteria = FocalLoss(nn.BCEWithLogitsLoss(), gamma=1.5) + """Applies focal loss to address class imbalance by modifying BCEWithLogitsLoss with gamma and alpha parameters.""" + def __init__(self, loss_fcn, gamma=1.5, alpha=0.25): """Initializes FocalLoss with specified loss function, gamma, and alpha values; modifies loss reduction to 'none'. @@ -71,7 +73,8 @@ def forward(self, pred, true): class QFocalLoss(nn.Module): - # Wraps Quality focal loss around existing loss_fcn(), i.e. criteria = FocalLoss(nn.BCEWithLogitsLoss(), gamma=1.5) + """Implements Quality Focal Loss to address class imbalance by modulating loss based on prediction confidence.""" + def __init__(self, loss_fcn, gamma=1.5, alpha=0.25): """Initializes Quality Focal Loss with given loss function, gamma, alpha; modifies reduction to 'none'.""" super().__init__() @@ -101,6 +104,8 @@ def forward(self, pred, true): class ComputeLoss: + """Computes the total loss for YOLOv5 model predictions, including classification, box, and objectness losses.""" + sort_obj_iou = False # Compute losses diff --git a/utils/metrics.py b/utils/metrics.py index d7a5820bcddf..85c1a70ccbef 100644 --- a/utils/metrics.py +++ b/utils/metrics.py @@ -122,7 +122,8 @@ def compute_ap(recall, precision): class ConfusionMatrix: - # Updated version of https://github.com/kaanakan/object_detection_confusion_matrix + """Generates and visualizes a confusion matrix for evaluating object detection classification performance.""" + def __init__(self, nc, conf=0.25, iou_thres=0.45): """Initializes ConfusionMatrix with given number of classes, confidence, and IoU threshold.""" self.matrix = np.zeros((nc + 1, nc + 1)) diff --git a/utils/plots.py b/utils/plots.py index 89ed1bb9f25f..e899ea4c15e7 100644 --- a/utils/plots.py +++ b/utils/plots.py @@ -29,7 +29,8 @@ class Colors: - # Ultralytics color palette https://ultralytics.com/ + """Provides an RGB color palette derived from Ultralytics color scheme for visualization tasks.""" + def __init__(self): """ Initializes the Colors class with a palette derived from Ultralytics color scheme, converting hex codes to RGB. diff --git a/utils/segment/dataloaders.py b/utils/segment/dataloaders.py index c2be5f0dfe9a..5f5666c38803 100644 --- a/utils/segment/dataloaders.py +++ b/utils/segment/dataloaders.py @@ -83,6 +83,8 @@ def create_dataloader( class LoadImagesAndLabelsAndMasks(LoadImagesAndLabels): # for training/testing + """Loads images, labels, and segmentation masks for training and testing YOLO models with augmentation support.""" + def __init__( self, path, diff --git a/utils/segment/loss.py b/utils/segment/loss.py index d4bc9d3aed54..b3e76ae00d9c 100644 --- a/utils/segment/loss.py +++ b/utils/segment/loss.py @@ -12,7 +12,8 @@ class ComputeLoss: - # Compute losses + """Computes the YOLOv5 model's loss components including classification, objectness, box, and mask losses.""" + def __init__(self, model, autobalance=False, overlap=False): """Initializes the compute loss function for YOLOv5 models with options for autobalancing and overlap handling. diff --git a/utils/segment/metrics.py b/utils/segment/metrics.py index 52ceb64eac93..091b5b16a5fa 100644 --- a/utils/segment/metrics.py +++ b/utils/segment/metrics.py @@ -54,6 +54,8 @@ def ap_per_class_box_and_mask( class Metric: + """Computes performance metrics like precision, recall, F1 score, and average precision for model evaluation.""" + def __init__(self) -> None: """Initializes performance metric attributes for precision, recall, F1 score, average precision, and class indices. diff --git a/utils/torch_utils.py b/utils/torch_utils.py index af3d046498c7..8bf6585bca0f 100644 --- a/utils/torch_utils.py +++ b/utils/torch_utils.py @@ -420,7 +420,8 @@ def smart_resume(ckpt, optimizer, ema=None, weights="yolov5s.pt", epochs=300, re class EarlyStopping: - # YOLOv5 simple early stopper + """Implements early stopping to halt training when no improvement is observed for a specified number of epochs.""" + def __init__(self, patience=30): """Initializes simple early stopping mechanism for YOLOv5, with adjustable patience for non-improving epochs.""" self.best_fitness = 0.0 # i.e. mAP From e8a30cf820b273afbbf1c73a3f1088471a84fade Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 28 Aug 2024 12:31:50 +0200 Subject: [PATCH 1940/1976] Fix YOLOv8 links (#13285) Co-authored-by: UltralyticsAssistant --- README.md | 2 +- README.zh-CN.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 88729597e4cd..2c9146900ba0 100644 --- a/README.md +++ b/README.md @@ -56,7 +56,7 @@ pip install ultralytics ``` diff --git a/README.zh-CN.md b/README.zh-CN.md index 530fed5c13d1..315f087ffbc4 100644 --- a/README.zh-CN.md +++ b/README.zh-CN.md @@ -54,7 +54,7 @@ pip install ultralytics ``` From f44db0100518c65023095a47e71518c12aa1db90 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 1 Sep 2024 17:35:08 +0200 Subject: [PATCH 1941/1976] PyUpgrade 3.8 updates (#13289) * PyUpgrade 3.8 updates Signed-off-by: Glenn Jocher * Auto-format by https://ultralytics.com/actions --------- Signed-off-by: Glenn Jocher Co-authored-by: UltralyticsAssistant --- classify/predict.py | 2 +- detect.py | 2 +- segment/predict.py | 2 +- utils/metrics.py | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/classify/predict.py b/classify/predict.py index 33140e9b56ca..d77b4af33007 100644 --- a/classify/predict.py +++ b/classify/predict.py @@ -147,7 +147,7 @@ def run( save_path = str(save_dir / p.name) # im.jpg txt_path = str(save_dir / "labels" / p.stem) + ("" if dataset.mode == "image" else f"_{frame}") # im.txt - s += "%gx%g " % im.shape[2:] # print string + s += "{:g}x{:g} ".format(*im.shape[2:]) # print string annotator = Annotator(im0, example=str(names), pil=True) # Print results diff --git a/detect.py b/detect.py index 57d778740273..f404a250345d 100644 --- a/detect.py +++ b/detect.py @@ -237,7 +237,7 @@ def write_to_csv(image_name, prediction, confidence): p = Path(p) # to Path save_path = str(save_dir / p.name) # im.jpg txt_path = str(save_dir / "labels" / p.stem) + ("" if dataset.mode == "image" else f"_{frame}") # im.txt - s += "%gx%g " % im.shape[2:] # print string + s += "{:g}x{:g} ".format(*im.shape[2:]) # print string gn = torch.tensor(im0.shape)[[1, 0, 1, 0]] # normalization gain whwh imc = im0.copy() if save_crop else im0 # for save_crop annotator = Annotator(im0, line_width=line_thickness, example=str(names)) diff --git a/segment/predict.py b/segment/predict.py index 0bccaaaae9f7..40f7623b38ed 100644 --- a/segment/predict.py +++ b/segment/predict.py @@ -164,7 +164,7 @@ def run( p = Path(p) # to Path save_path = str(save_dir / p.name) # im.jpg txt_path = str(save_dir / "labels" / p.stem) + ("" if dataset.mode == "image" else f"_{frame}") # im.txt - s += "%gx%g " % im.shape[2:] # print string + s += "{:g}x{:g} ".format(*im.shape[2:]) # print string imc = im0.copy() if save_crop else im0 # for save_crop annotator = Annotator(im0, line_width=line_thickness, example=str(names)) if len(det): diff --git a/utils/metrics.py b/utils/metrics.py index 85c1a70ccbef..e8dc9df4c168 100644 --- a/utils/metrics.py +++ b/utils/metrics.py @@ -347,7 +347,7 @@ def plot_pr_curve(px, py, ap, save_dir=Path("pr_curve.png"), names=()): else: ax.plot(px, py, linewidth=1, color="grey") # plot(recall, precision) - ax.plot(px, py.mean(1), linewidth=3, color="blue", label="all classes %.3f mAP@0.5" % ap[:, 0].mean()) + ax.plot(px, py.mean(1), linewidth=3, color="blue", label=f"all classes {ap[:, 0].mean():.3f} mAP@0.5") ax.set_xlabel("Recall") ax.set_ylabel("Precision") ax.set_xlim(0, 1) From 4d39325f8a5b3e226774f952c58de3a3f161d583 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 4 Sep 2024 10:39:26 +0200 Subject: [PATCH 1942/1976] Bump slackapi/slack-github-action from 1.26.0 to 1.27.0 in /.github/workflows (#13290) Bump slackapi/slack-github-action in /.github/workflows Bumps [slackapi/slack-github-action](https://github.com/slackapi/slack-github-action) from 1.26.0 to 1.27.0. - [Release notes](https://github.com/slackapi/slack-github-action/releases) - [Commits](https://github.com/slackapi/slack-github-action/compare/v1.26.0...v1.27.0) --- updated-dependencies: - dependency-name: slackapi/slack-github-action dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/ci-testing.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci-testing.yml b/.github/workflows/ci-testing.yml index 639d0449c281..278cd9dd652c 100644 --- a/.github/workflows/ci-testing.yml +++ b/.github/workflows/ci-testing.yml @@ -142,7 +142,7 @@ jobs: steps: - name: Check for failure and notify if: (needs.Benchmarks.result == 'failure' || needs.Tests.result == 'failure' || needs.Benchmarks.result == 'cancelled' || needs.Tests.result == 'cancelled') && github.repository == 'ultralytics/yolov5' && (github.event_name == 'schedule' || github.event_name == 'push') - uses: slackapi/slack-github-action@v1.26.0 + uses: slackapi/slack-github-action@v1.27.0 with: payload: | {"text": " GitHub Actions error for ${{ github.workflow }} ❌\n\n\n*Repository:* https://github.com/${{ github.repository }}\n*Action:* https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}\n*Author:* ${{ github.actor }}\n*Event:* ${{ github.event_name }}\n"} From 12b577c8d33d3a36e954cb3a9eca5fa55428563c Mon Sep 17 00:00:00 2001 From: Ultralytics Assistant <135830346+UltralyticsAssistant@users.noreply.github.com> Date: Fri, 6 Sep 2024 03:53:45 +0800 Subject: [PATCH 1943/1976] Ultralytics Code Refactor https://ultralytics.com/actions (#13301) * Refactor code for speed and clarity * Auto-format by https://ultralytics.com/actions * Update README.md Signed-off-by: Glenn Jocher * Update README.zh-CN.md Signed-off-by: Glenn Jocher * Auto-format by https://ultralytics.com/actions * Update README.md Signed-off-by: Glenn Jocher * Auto-format by https://ultralytics.com/actions --------- Signed-off-by: Glenn Jocher Co-authored-by: Glenn Jocher --- CONTRIBUTING.md | 4 ++-- README.md | 20 ++++++++++---------- README.zh-CN.md | 20 ++++++++++---------- export.py | 17 +++-------------- utils/flask_rest_api/README.md | 2 +- utils/loggers/clearml/README.md | 14 +++++++------- 6 files changed, 33 insertions(+), 44 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 556c554a2514..7b9c1cd64724 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -64,10 +64,10 @@ When asking a question, people will be better able to provide help if you provid - ✅ **Complete** – Provide **all** parts someone else needs to reproduce your problem in the question itself - ✅ **Reproducible** – Test the code you're about to provide to make sure it reproduces the problem -In addition to the above requirements, for [Ultralytics](https://ultralytics.com/) to provide assistance your code should be: +In addition to the above requirements, for [Ultralytics](https://www.ultralytics.com/) to provide assistance your code should be: - ✅ **Current** – Verify that your code is up-to-date with the current GitHub [master](https://github.com/ultralytics/yolov5/tree/master), and if necessary `git pull` or `git clone` a new copy to ensure your problem has not already been resolved by previous commits. -- ✅ **Unmodified** – Your problem must be reproducible without any modifications to the codebase in this repository. [Ultralytics](https://ultralytics.com/) does not provide support for custom code ⚠️. +- ✅ **Unmodified** – Your problem must be reproducible without any modifications to the codebase in this repository. [Ultralytics](https://www.ultralytics.com/) does not provide support for custom code ⚠️. If you believe your problem meets all of the above criteria, please close this issue and raise a new one using the 🐛 **Bug Report** [template](https://github.com/ultralytics/yolov5/issues/new/choose) and provide a [minimum reproducible example](https://docs.ultralytics.com/help/minimum_reproducible_example/) to help us better understand and diagnose your problem. diff --git a/README.md b/README.md index 2c9146900ba0..4e3709540e1b 100644 --- a/README.md +++ b/README.md @@ -4,7 +4,7 @@

-[中文](https://docs.ultralytics.com/zh/) | [한국어](https://docs.ultralytics.com/ko/) | [日本語](https://docs.ultralytics.com/ja/) | [Русский](https://docs.ultralytics.com/ru/) | [Deutsch](https://docs.ultralytics.com/de/) | [Français](https://docs.ultralytics.com/fr/) | [Español](https://docs.ultralytics.com/es/) | [Português](https://docs.ultralytics.com/pt/) | [Türkçe](https://docs.ultralytics.com/tr/) | [Tiếng Việt](https://docs.ultralytics.com/vi/) | [العربية](https://docs.ultralytics.com/ar/) +[中文](https://docs.ultralytics.com/zh) | [한국어](https://docs.ultralytics.com/ko) | [日本語](https://docs.ultralytics.com/ja) | [Русский](https://docs.ultralytics.com/ru) | [Deutsch](https://docs.ultralytics.com/de) | [Français](https://docs.ultralytics.com/fr) | [Español](https://docs.ultralytics.com/es) | [Português](https://docs.ultralytics.com/pt) | [Türkçe](https://docs.ultralytics.com/tr) | [Tiếng Việt](https://docs.ultralytics.com/vi) | [العربية](https://docs.ultralytics.com/ar)
YOLOv5 CI @@ -22,7 +22,7 @@ YOLOv5 🚀 is the world's most loved vision AI, representing Docs for details, raise an issue on GitHub for support, and join our Discord community for questions and discussions! -To request an Enterprise License please complete the form at [Ultralytics Licensing](https://ultralytics.com/license). +To request an Enterprise License please complete the form at [Ultralytics Licensing](https://www.ultralytics.com/license).
Ultralytics GitHub @@ -181,13 +181,13 @@ python train.py --data coco.yaml --epochs 300 --weights '' --cfg yolov5n.yaml -
-| Roboflow | ClearML ⭐ NEW | Comet ⭐ NEW | Neural Magic ⭐ NEW | -| :--------------------------------------------------------------------------------------------------------------------------: | :---------------------------------------------------------------------------------------------------------------------------------: | :--------------------------------------------------------------------------------------------------------------------------------------------------------: | :----------------------------------------------------------------------------------------------------: | -| Label and export your custom datasets directly to YOLOv5 for training with [Roboflow](https://roboflow.com/?ref=ultralytics) | Automatically track, visualize and even remotely train YOLOv5 using [ClearML](https://cutt.ly/yolov5-readme-clearml) (open-source!) | Free forever, [Comet](https://bit.ly/yolov5-readme-comet2) lets you save YOLOv5 models, resume training, and interactively visualise and debug predictions | Run YOLOv5 inference up to 6x faster with [Neural Magic DeepSparse](https://bit.ly/yolov5-neuralmagic) | +| Roboflow | ClearML ⭐ NEW | Comet ⭐ NEW | Neural Magic ⭐ NEW | +| :--------------------------------------------------------------------------------------------------------------------------: | :-------------------------------------------------------------------------------------------------------------: | :--------------------------------------------------------------------------------------------------------------------------------------------------------: | :----------------------------------------------------------------------------------------------------: | +| Label and export your custom datasets directly to YOLOv5 for training with [Roboflow](https://roboflow.com/?ref=ultralytics) | Automatically track, visualize and even remotely train YOLOv5 using [ClearML](https://clear.ml/) (open-source!) | Free forever, [Comet](https://bit.ly/yolov5-readme-comet2) lets you save YOLOv5 models, resume training, and interactively visualise and debug predictions | Run YOLOv5 inference up to 6x faster with [Neural Magic DeepSparse](https://bit.ly/yolov5-neuralmagic) | ##
Ultralytics HUB
-Experience seamless AI with [Ultralytics HUB](https://ultralytics.com/hub) ⭐, the all-in-one solution for data visualization, YOLOv5 and YOLOv8 🚀 model training and deployment, without any coding. Transform images into actionable insights and bring your AI visions to life with ease using our cutting-edge platform and user-friendly [Ultralytics App](https://ultralytics.com/app_install). Start your journey for **Free** now! +Experience seamless AI with [Ultralytics HUB](https://www.ultralytics.com/hub) ⭐, the all-in-one solution for data visualization, YOLOv5 and YOLOv8 🚀 model training and deployment, without any coding. Transform images into actionable insights and bring your AI visions to life with ease using our cutting-edge platform and user-friendly [Ultralytics App](https://www.ultralytics.com/app-install). Start your journey for **Free** now! @@ -430,7 +430,7 @@ Get started in seconds with our verified environments. Click each icon below for ##
Contribute
-We love your input! We want to make contributing to YOLOv5 as easy and transparent as possible. Please see our [Contributing Guide](https://docs.ultralytics.com/help/contributing/) to get started, and fill out the [YOLOv5 Survey](https://ultralytics.com/survey?utm_source=github&utm_medium=social&utm_campaign=Survey) to send us feedback on your experiences. Thank you to all our contributors! +We love your input! We want to make contributing to YOLOv5 as easy and transparent as possible. Please see our [Contributing Guide](https://docs.ultralytics.com/help/contributing/) to get started, and fill out the [YOLOv5 Survey](https://www.ultralytics.com/survey?utm_source=github&utm_medium=social&utm_campaign=Survey) to send us feedback on your experiences. Thank you to all our contributors! @@ -441,12 +441,12 @@ We love your input! We want to make contributing to YOLOv5 as easy and transpare Ultralytics offers two licensing options to accommodate diverse use cases: -- **AGPL-3.0 License**: This [OSI-approved](https://opensource.org/licenses/) open-source license is ideal for students and enthusiasts, promoting open collaboration and knowledge sharing. See the [LICENSE](https://github.com/ultralytics/yolov5/blob/master/LICENSE) file for more details. -- **Enterprise License**: Designed for commercial use, this license permits seamless integration of Ultralytics software and AI models into commercial goods and services, bypassing the open-source requirements of AGPL-3.0. If your scenario involves embedding our solutions into a commercial offering, reach out through [Ultralytics Licensing](https://ultralytics.com/license). +- **AGPL-3.0 License**: This [OSI-approved](https://opensource.org/license) open-source license is ideal for students and enthusiasts, promoting open collaboration and knowledge sharing. See the [LICENSE](https://github.com/ultralytics/yolov5/blob/master/LICENSE) file for more details. +- **Enterprise License**: Designed for commercial use, this license permits seamless integration of Ultralytics software and AI models into commercial goods and services, bypassing the open-source requirements of AGPL-3.0. If your scenario involves embedding our solutions into a commercial offering, reach out through [Ultralytics Licensing](https://www.ultralytics.com/license). ##
Contact
-For YOLOv5 bug reports and feature requests please visit [GitHub Issues](https://github.com/ultralytics/yolov5/issues), and join our [Discord](https://ultralytics.com/discord) community for questions and discussions! +For YOLOv5 bug reports and feature requests please visit [GitHub Issues](https://github.com/ultralytics/yolov5/issues), and join our [Discord](https://discord.com/invite/ultralytics) community for questions and discussions!
diff --git a/README.zh-CN.md b/README.zh-CN.md index 315f087ffbc4..f1dc961ee392 100644 --- a/README.zh-CN.md +++ b/README.zh-CN.md @@ -4,7 +4,7 @@

-[中文](https://docs.ultralytics.com/zh/) | [한국어](https://docs.ultralytics.com/ko/) | [日本語](https://docs.ultralytics.com/ja/) | [Русский](https://docs.ultralytics.com/ru/) | [Deutsch](https://docs.ultralytics.com/de/) | [Français](https://docs.ultralytics.com/fr/) | [Español](https://docs.ultralytics.com/es/) | [Português](https://docs.ultralytics.com/pt/) | [Türkçe](https://docs.ultralytics.com/tr/) | [Tiếng Việt](https://docs.ultralytics.com/vi/) | [العربية](https://docs.ultralytics.com/ar/) +[中文](https://docs.ultralytics.com/zh) | [한국어](https://docs.ultralytics.com/ko) | [日本語](https://docs.ultralytics.com/ja) | [Русский](https://docs.ultralytics.com/ru) | [Deutsch](https://docs.ultralytics.com/de) | [Français](https://docs.ultralytics.com/fr) | [Español](https://docs.ultralytics.com/es) | [Português](https://docs.ultralytics.com/pt) | [Türkçe](https://docs.ultralytics.com/tr) | [Tiếng Việt](https://docs.ultralytics.com/vi) | [العربية](https://docs.ultralytics.com/ar)
YOLOv5 CI @@ -22,7 +22,7 @@ YOLOv5 🚀 是世界上最受欢迎的视觉 AI,代表文档 了解详细信息,在 GitHub 上提交问题以获得支持,并加入我们的 Discord 社区进行问题和讨论! -如需申请企业许可,请在 [Ultralytics Licensing](https://ultralytics.com/license) 处填写表格 +如需申请企业许可,请在 [Ultralytics Licensing](https://www.ultralytics.com/license) 处填写表格
Ultralytics GitHub @@ -180,13 +180,13 @@ python train.py --data coco.yaml --epochs 300 --weights '' --cfg yolov5n.yaml -
-| Roboflow | ClearML ⭐ 新 | Comet ⭐ 新 | Neural Magic ⭐ 新 | -| :--------------------------------------------------------------------------------------------------------: | :--------------------------------------------------------------------------------------------: | :--------------------------------------------------------------------------------------------------------------: | :----------------------------------------------------------------------------------------------------: | -| 将您的自定义数据集进行标注并直接导出到 YOLOv5 以进行训练 [Roboflow](https://roboflow.com/?ref=ultralytics) | 自动跟踪、可视化甚至远程训练 YOLOv5 [ClearML](https://cutt.ly/yolov5-readme-clearml)(开源!) | 永远免费,[Comet](https://bit.ly/yolov5-readme-comet2)可让您保存 YOLOv5 模型、恢复训练以及交互式可视化和调试预测 | 使用 [Neural Magic DeepSparse](https://bit.ly/yolov5-neuralmagic),运行 YOLOv5 推理的速度最高可提高6倍 | +| Roboflow | ClearML ⭐ 新 | Comet ⭐ 新 | Neural Magic ⭐ 新 | +| :--------------------------------------------------------------------------------------------------------: | :------------------------------------------------------------------------: | :--------------------------------------------------------------------------------------------------------------: | :----------------------------------------------------------------------------------------------------: | +| 将您的自定义数据集进行标注并直接导出到 YOLOv5 以进行训练 [Roboflow](https://roboflow.com/?ref=ultralytics) | 自动跟踪、可视化甚至远程训练 YOLOv5 [ClearML](https://clear.ml/)(开源!) | 永远免费,[Comet](https://bit.ly/yolov5-readme-comet2)可让您保存 YOLOv5 模型、恢复训练以及交互式可视化和调试预测 | 使用 [Neural Magic DeepSparse](https://bit.ly/yolov5-neuralmagic),运行 YOLOv5 推理的速度最高可提高6倍 | ##
Ultralytics HUB
-[Ultralytics HUB](https://ultralytics.com/hub) 是我们的⭐**新的**用于可视化数据集、训练 YOLOv5 🚀 模型并以无缝体验部署到现实世界的无代码解决方案。现在开始 **免费** 使用他! +[Ultralytics HUB](https://www.ultralytics.com/hub) 是我们的⭐**新的**用于可视化数据集、训练 YOLOv5 🚀 模型并以无缝体验部署到现实世界的无代码解决方案。现在开始 **免费** 使用他! @@ -430,7 +430,7 @@ python export.py --weights yolov5s-cls.pt resnet50.pt efficientnet_b0.pt --inclu ##
贡献
-我们喜欢您的意见或建议!我们希望尽可能简单和透明地为 YOLOv5 做出贡献。请看我们的 [投稿指南](https://docs.ultralytics.com/help/contributing/),并填写 [YOLOv5调查](https://ultralytics.com/survey?utm_source=github&utm_medium=social&utm_campaign=Survey) 向我们发送您的体验反馈。感谢我们所有的贡献者! +我们喜欢您的意见或建议!我们希望尽可能简单和透明地为 YOLOv5 做出贡献。请看我们的 [投稿指南](https://docs.ultralytics.com/help/contributing/),并填写 [YOLOv5调查](https://www.ultralytics.com/survey?utm_source=github&utm_medium=social&utm_campaign=Survey) 向我们发送您的体验反馈。感谢我们所有的贡献者! @@ -441,12 +441,12 @@ python export.py --weights yolov5s-cls.pt resnet50.pt efficientnet_b0.pt --inclu Ultralytics 提供两种许可证选项以适应各种使用场景: -- **AGPL-3.0 许可证**:这个[OSI 批准](https://opensource.org/licenses/)的开源许可证非常适合学生和爱好者,可以推动开放的协作和知识分享。请查看[LICENSE](https://github.com/ultralytics/yolov5/blob/master/LICENSE) 文件以了解更多细节。 -- **企业许可证**:专为商业用途设计,该许可证允许将 Ultralytics 的软件和 AI 模型无缝集成到商业产品和服务中,从而绕过 AGPL-3.0 的开源要求。如果您的场景涉及将我们的解决方案嵌入到商业产品中,请通过 [Ultralytics Licensing](https://ultralytics.com/license)与我们联系。 +- **AGPL-3.0 许可证**:这个[OSI 批准](https://opensource.org/license)的开源许可证非常适合学生和爱好者,可以推动开放的协作和知识分享。请查看[LICENSE](https://github.com/ultralytics/yolov5/blob/master/LICENSE) 文件以了解更多细节。 +- **企业许可证**:专为商业用途设计,该许可证允许将 Ultralytics 的软件和 AI 模型无缝集成到商业产品和服务中,从而绕过 AGPL-3.0 的开源要求。如果您的场景涉及将我们的解决方案嵌入到商业产品中,请通过 [Ultralytics Licensing](https://www.ultralytics.com/license)与我们联系。 ##
联系方式
-对于 Ultralytics 的错误报告和功能请求,请访问 [GitHub Issues](https://github.com/ultralytics/yolov5/issues),并加入我们的 [Discord](https://ultralytics.com/discord) 社区进行问题和讨论! +对于 Ultralytics 的错误报告和功能请求,请访问 [GitHub Issues](https://github.com/ultralytics/yolov5/issues),并加入我们的 [Discord](https://discord.com/invite/ultralytics) 社区进行问题和讨论!
diff --git a/export.py b/export.py index 2c697eca1c96..9c6873939f7b 100644 --- a/export.py +++ b/export.py @@ -566,11 +566,7 @@ def export_coreml(model, im, file, int8, half, nms, mlmodel, prefix=colorstr("Co else: f = file.with_suffix(".mlpackage") convert_to = "mlprogram" - if half: - precision = ct.precision.FLOAT16 - else: - precision = ct.precision.FLOAT32 - + precision = ct.precision.FLOAT16 if half else ct.precision.FLOAT32 if nms: model = iOSModel(model, im) ts = torch.jit.trace(model, im, strict=False) # TorchScript model @@ -1138,11 +1134,7 @@ def pipeline_coreml(model, im, file, names, y, mlmodel, prefix=colorstr("CoreML import coremltools as ct from PIL import Image - if mlmodel: - f = file.with_suffix(".mlmodel") # filename - else: - f = file.with_suffix(".mlpackage") # filename - + f = file.with_suffix(".mlmodel") if mlmodel else file.with_suffix(".mlpackage") print(f"{prefix} starting pipeline with coremltools {ct.__version__}...") batch_size, ch, h, w = list(im.shape) # BCHW t = time.time() @@ -1186,10 +1178,7 @@ def pipeline_coreml(model, im, file, names, y, mlmodel, prefix=colorstr("CoreML # Model from spec weights_dir = None - if mlmodel: - weights_dir = None - else: - weights_dir = str(f / "Data/com.apple.CoreML/weights") + weights_dir = None if mlmodel else str(f / "Data/com.apple.CoreML/weights") model = ct.models.MLModel(spec, weights_dir=weights_dir) # 3. Create NMS protobuf diff --git a/utils/flask_rest_api/README.md b/utils/flask_rest_api/README.md index b18a3011cf32..d3ffaa2069ac 100644 --- a/utils/flask_rest_api/README.md +++ b/utils/flask_rest_api/README.md @@ -4,7 +4,7 @@ ## Requirements -[Flask](https://palletsprojects.com/p/flask/) is required. Install with: +[Flask](https://palletsprojects.com/projects/flask/) is required. Install with: ```shell $ pip install Flask diff --git a/utils/loggers/clearml/README.md b/utils/loggers/clearml/README.md index a61e3025445f..cf5f787b1a5d 100644 --- a/utils/loggers/clearml/README.md +++ b/utils/loggers/clearml/README.md @@ -4,7 +4,7 @@ ## About ClearML -[ClearML](https://cutt.ly/yolov5-tutorial-clearml) is an [open-source](https://github.com/allegroai/clearml) toolbox designed to save you time ⏱️. +[ClearML](https://clear.ml) is an [open-source](https://github.com/allegroai/clearml) toolbox designed to save you time ⏱️. 🔨 Track every YOLOv5 training run in the experiment manager @@ -18,13 +18,13 @@ And so much more. It's up to you how many of these tools you want to use, you can stick to the experiment manager, or chain them all together into an impressive pipeline! -![ClearML scalars dashboard](https://github.com/thepycoder/clearml_screenshots/raw/main/experiment_manager_with_compare.gif) +![ClearML scalars dashboard](https://raw.githubusercontent.com/thepycoder/clearml_screenshots/main/experiment_manager_with_compare.gif) ## 🦾 Setting Things Up To keep track of your experiments and/or data, ClearML needs to communicate to a server. You have 2 options to get one: -Either sign up for free to the [ClearML Hosted Service](https://cutt.ly/yolov5-tutorial-clearml) or you can set up your own server, see [here](https://clear.ml/docs/latest/docs/deploying_clearml/clearml_server). Even the server is open-source, so even if you're dealing with sensitive data, you should be good to go! +Either sign up for free to the [ClearML Hosted Service](https://clear.ml) or you can set up your own server, see [here](https://clear.ml/docs/latest/docs/deploying_clearml/clearml_server). Even the server is open-source, so even if you're dealing with sensitive data, you should be good to go! 1. Install the `clearml` python package: @@ -85,7 +85,7 @@ There even more we can do with all of this information, like hyperparameter opti Versioning your data separately from your code is generally a good idea and makes it easy to acquire the latest version too. This repository supports supplying a dataset version ID, and it will make sure to get the data if it's not there yet. Next to that, this workflow also saves the used dataset ID as part of the task parameters, so you will always know for sure which data was used in which experiment! -![ClearML Dataset Interface](https://github.com/thepycoder/clearml_screenshots/raw/main/clearml_data.gif) +![ClearML Dataset Interface](https://raw.githubusercontent.com/thepycoder/clearml_screenshots/main/clearml_data.gif) ### Prepare Your Dataset @@ -163,13 +163,13 @@ pip install optuna python utils/loggers/clearml/hpo.py ``` -![HPO](https://github.com/thepycoder/clearml_screenshots/raw/main/hpo.png) +![HPO](https://raw.githubusercontent.com/thepycoder/clearml_screenshots/main/hpo.png) ## 🤯 Remote Execution (advanced) Running HPO locally is really handy, but what if we want to run our experiments on a remote machine instead? Maybe you have access to a very powerful GPU machine on-site, or you have some budget to use cloud GPUs. This is where the ClearML Agent comes into play. Check out what the agent can do here: -- [YouTube video](https://youtu.be/MX3BrXnaULs) +- [YouTube video](https://www.youtube.com/watch?v=MX3BrXnaULs&feature=youtu.be) - [Documentation](https://clear.ml/docs/latest/docs/clearml_agent) In short: every experiment tracked by the experiment manager contains enough information to reproduce it on a different machine (installed packages, uncommitted changes etc.). So a ClearML agent does just that: it listens to a queue for incoming tasks and when it finds one, it recreates the environment and runs it while still reporting scalars, plots etc. to the experiment manager. @@ -190,7 +190,7 @@ With our agent running, we can give it some work. Remember from the HPO section ⏳ Enqueue the task to any of the queues by right-clicking it -![Enqueue a task from the UI](https://github.com/thepycoder/clearml_screenshots/raw/main/enqueue.gif) +![Enqueue a task from the UI](https://raw.githubusercontent.com/thepycoder/clearml_screenshots/main/enqueue.gif) ### Executing A Task Remotely From f7322921acec565c1296a47305db6de3b5a593f7 Mon Sep 17 00:00:00 2001 From: Ultralytics Assistant <135830346+UltralyticsAssistant@users.noreply.github.com> Date: Thu, 12 Sep 2024 20:05:58 +0200 Subject: [PATCH 1944/1976] Delete .github/workflows/greetings.yml (#13309) * Delete greetings.yml * Update format.yml Signed-off-by: Glenn Jocher --------- Signed-off-by: Glenn Jocher Co-authored-by: Glenn Jocher --- .github/workflows/format.yml | 41 +++++++++++++++++++++ .github/workflows/greetings.yml | 65 --------------------------------- 2 files changed, 41 insertions(+), 65 deletions(-) delete mode 100644 .github/workflows/greetings.yml diff --git a/.github/workflows/format.yml b/.github/workflows/format.yml index b326e778da64..3357e30dacc3 100644 --- a/.github/workflows/format.yml +++ b/.github/workflows/format.yml @@ -27,3 +27,44 @@ jobs: summary: true # print PR summary with GPT4o (requires 'openai_api_key') openai_azure_api_key: ${{ secrets.OPENAI_AZURE_API_KEY }} openai_azure_endpoint: ${{ secrets.OPENAI_AZURE_ENDPOINT }} + first_issue_response: | + 👋 Hello @${{ github.actor }}, thank you for your interest in YOLOv5 🚀! Please visit our ⭐️ [Tutorials](https://docs.ultralytics.com/yolov5/) to get started, where you can find quickstart guides for simple tasks like [Custom Data Training](https://docs.ultralytics.com/yolov5/tutorials/train_custom_data/) all the way to advanced concepts like [Hyperparameter Evolution](https://docs.ultralytics.com/yolov5/tutorials/hyperparameter_evolution/). + + If this is a 🐛 Bug Report, please provide a **minimum reproducible example** to help us debug it. + + If this is a custom training ❓ Question, please provide as much information as possible, including dataset image examples and training logs, and verify you are following our [Tips for Best Training Results](https://docs.ultralytics.com/guides/model-training-tips//). + + ## Requirements + + [**Python>=3.8.0**](https://www.python.org/) with all [requirements.txt](https://github.com/ultralytics/yolov5/blob/master/requirements.txt) installed including [**PyTorch>=1.8**](https://pytorch.org/get-started/locally/). To get started: + ```bash + git clone https://github.com/ultralytics/yolov5 # clone + cd yolov5 + pip install -r requirements.txt # install + ``` + + ## Environments + + YOLOv5 may be run in any of the following up-to-date verified environments (with all dependencies including [CUDA](https://developer.nvidia.com/cuda)/[CUDNN](https://developer.nvidia.com/cudnn), [Python](https://www.python.org/) and [PyTorch](https://pytorch.org/) preinstalled): + + - **Notebooks** with free GPU: Run on Gradient Open In Colab Open In Kaggle + - **Google Cloud** Deep Learning VM. See [GCP Quickstart Guide](https://docs.ultralytics.com/yolov5/environments/google_cloud_quickstart_tutorial/) + - **Amazon** Deep Learning AMI. See [AWS Quickstart Guide](https://docs.ultralytics.com/yolov5/environments/aws_quickstart_tutorial/) + - **Docker Image**. See [Docker Quickstart Guide](https://docs.ultralytics.com/yolov5/environments/docker_image_quickstart_tutorial/) Docker Pulls + + ## Status + + YOLOv5 CI + + If this badge is green, all [YOLOv5 GitHub Actions](https://github.com/ultralytics/yolov5/actions) Continuous Integration (CI) tests are currently passing. CI tests verify correct operation of YOLOv5 [training](https://github.com/ultralytics/yolov5/blob/master/train.py), [validation](https://github.com/ultralytics/yolov5/blob/master/val.py), [inference](https://github.com/ultralytics/yolov5/blob/master/detect.py), [export](https://github.com/ultralytics/yolov5/blob/master/export.py) and [benchmarks](https://github.com/ultralytics/yolov5/blob/master/benchmarks.py) on macOS, Windows, and Ubuntu every 24 hours and on every commit. + + ## Introducing YOLOv8 🚀 + + We're excited to announce the launch of our latest state-of-the-art (SOTA) object detection model for 2023 - [YOLOv8](https://github.com/ultralytics/ultralytics) 🚀! + + Designed to be fast, accurate, and easy to use, YOLOv8 is an ideal choice for a wide range of object detection, image segmentation and image classification tasks. With YOLOv8, you'll be able to quickly and accurately detect objects in real-time, streamline your workflows, and achieve new levels of accuracy in your projects. + + Check out our [YOLOv8 Docs](https://docs.ultralytics.com/) for details and get started with: + ```bash + pip install ultralytics + ``` diff --git a/.github/workflows/greetings.yml b/.github/workflows/greetings.yml deleted file mode 100644 index 212211d2445e..000000000000 --- a/.github/workflows/greetings.yml +++ /dev/null @@ -1,65 +0,0 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license - -name: Greetings - -on: - pull_request_target: - types: [opened] - issues: - types: [opened] - -jobs: - greeting: - runs-on: ubuntu-latest - steps: - - uses: actions/first-interaction@v1 - with: - repo-token: ${{ secrets.GITHUB_TOKEN }} - pr-message: | - 👋 Hello @${{ github.actor }}, thank you for submitting a YOLOv5 🚀 PR! To allow your work to be integrated as seamlessly as possible, we advise you to: - - - ✅ Verify your PR is **up-to-date** with `ultralytics/yolov5` `master` branch. If your PR is behind you can update your code by clicking the 'Update branch' button or by running `git pull` and `git merge master` locally. - - ✅ Verify all YOLOv5 Continuous Integration (CI) **checks are passing**. - - ✅ Reduce changes to the absolute **minimum** required for your bug fix or feature addition. _"It is not daily increase but daily decrease, hack away the unessential. The closer to the source, the less wastage there is."_ — Bruce Lee - - issue-message: | - 👋 Hello @${{ github.actor }}, thank you for your interest in YOLOv5 🚀! Please visit our ⭐️ [Tutorials](https://docs.ultralytics.com/yolov5/) to get started, where you can find quickstart guides for simple tasks like [Custom Data Training](https://docs.ultralytics.com/yolov5/tutorials/train_custom_data/) all the way to advanced concepts like [Hyperparameter Evolution](https://docs.ultralytics.com/yolov5/tutorials/hyperparameter_evolution/). - - If this is a 🐛 Bug Report, please provide a **minimum reproducible example** to help us debug it. - - If this is a custom training ❓ Question, please provide as much information as possible, including dataset image examples and training logs, and verify you are following our [Tips for Best Training Results](https://docs.ultralytics.com/guides/model-training-tips//). - - ## Requirements - - [**Python>=3.8.0**](https://www.python.org/) with all [requirements.txt](https://github.com/ultralytics/yolov5/blob/master/requirements.txt) installed including [**PyTorch>=1.8**](https://pytorch.org/get-started/locally/). To get started: - ```bash - git clone https://github.com/ultralytics/yolov5 # clone - cd yolov5 - pip install -r requirements.txt # install - ``` - - ## Environments - - YOLOv5 may be run in any of the following up-to-date verified environments (with all dependencies including [CUDA](https://developer.nvidia.com/cuda)/[CUDNN](https://developer.nvidia.com/cudnn), [Python](https://www.python.org/) and [PyTorch](https://pytorch.org/) preinstalled): - - - **Notebooks** with free GPU: Run on Gradient Open In Colab Open In Kaggle - - **Google Cloud** Deep Learning VM. See [GCP Quickstart Guide](https://docs.ultralytics.com/yolov5/environments/google_cloud_quickstart_tutorial/) - - **Amazon** Deep Learning AMI. See [AWS Quickstart Guide](https://docs.ultralytics.com/yolov5/environments/aws_quickstart_tutorial/) - - **Docker Image**. See [Docker Quickstart Guide](https://docs.ultralytics.com/yolov5/environments/docker_image_quickstart_tutorial/) Docker Pulls - - ## Status - - YOLOv5 CI - - If this badge is green, all [YOLOv5 GitHub Actions](https://github.com/ultralytics/yolov5/actions) Continuous Integration (CI) tests are currently passing. CI tests verify correct operation of YOLOv5 [training](https://github.com/ultralytics/yolov5/blob/master/train.py), [validation](https://github.com/ultralytics/yolov5/blob/master/val.py), [inference](https://github.com/ultralytics/yolov5/blob/master/detect.py), [export](https://github.com/ultralytics/yolov5/blob/master/export.py) and [benchmarks](https://github.com/ultralytics/yolov5/blob/master/benchmarks.py) on macOS, Windows, and Ubuntu every 24 hours and on every commit. - - ## Introducing YOLOv8 🚀 - - We're excited to announce the launch of our latest state-of-the-art (SOTA) object detection model for 2023 - [YOLOv8](https://github.com/ultralytics/ultralytics) 🚀! - - Designed to be fast, accurate, and easy to use, YOLOv8 is an ideal choice for a wide range of object detection, image segmentation and image classification tasks. With YOLOv8, you'll be able to quickly and accurately detect objects in real-time, streamline your workflows, and achieve new levels of accuracy in your projects. - - Check out our [YOLOv8 Docs](https://docs.ultralytics.com/) for details and get started with: - ```bash - pip install ultralytics - ``` From c5bb4087aa8bb7d0289cc558821fbe260aa33fa8 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 20 Sep 2024 14:28:29 +0200 Subject: [PATCH 1945/1976] [Snyk] Fix for 2 vulnerabilities (#13322) * fix: requirements.txt to reduce vulnerabilities The following vulnerabilities are fixed by pinning transitive dependencies: - https://snyk.io/vuln/SNYK-PYTHON-TQDM-6807582 - https://snyk.io/vuln/SNYK-PYTHON-ZIPP-7430899 * Update requirements.txt Signed-off-by: Glenn Jocher --------- Signed-off-by: Glenn Jocher Co-authored-by: snyk-bot --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index e10fd0cfd7d9..8fcb345365e5 100644 --- a/requirements.txt +++ b/requirements.txt @@ -14,7 +14,7 @@ scipy>=1.4.1 thop>=0.1.1 # FLOPs computation torch>=1.8.0 # see https://pytorch.org/get-started/locally (recommended) torchvision>=0.9.0 -tqdm>=4.64.0 +tqdm>=4.66.3 ultralytics>=8.2.34 # https://ultralytics.com # protobuf<=3.20.1 # https://github.com/ultralytics/yolov5/issues/8012 From b163ff8d257fe4bff0322a1379decacbf71bb0c1 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 20 Sep 2024 14:29:10 +0200 Subject: [PATCH 1946/1976] [Snyk] Fix for 17 vulnerabilities (#13323) * fix: requirements.txt to reduce vulnerabilities The following vulnerabilities are fixed by pinning transitive dependencies: - https://snyk.io/vuln/SNYK-PYTHON-FONTTOOLS-6133203 - https://snyk.io/vuln/SNYK-PYTHON-NUMPY-2321964 - https://snyk.io/vuln/SNYK-PYTHON-NUMPY-2321966 - https://snyk.io/vuln/SNYK-PYTHON-NUMPY-2321970 - https://snyk.io/vuln/SNYK-PYTHON-PILLOW-5918878 - https://snyk.io/vuln/SNYK-PYTHON-PILLOW-6043904 - https://snyk.io/vuln/SNYK-PYTHON-PILLOW-6182918 - https://snyk.io/vuln/SNYK-PYTHON-PILLOW-6219984 - https://snyk.io/vuln/SNYK-PYTHON-PILLOW-6219986 - https://snyk.io/vuln/SNYK-PYTHON-PILLOW-6514866 - https://snyk.io/vuln/SNYK-PYTHON-REQUESTS-6928867 - https://snyk.io/vuln/SNYK-PYTHON-SETUPTOOLS-3180412 - https://snyk.io/vuln/SNYK-PYTHON-SETUPTOOLS-7448482 - https://snyk.io/vuln/SNYK-PYTHON-TORCH-6619806 - https://snyk.io/vuln/SNYK-PYTHON-TORCH-6649934 - https://snyk.io/vuln/SNYK-PYTHON-URLLIB3-7267250 - https://snyk.io/vuln/SNYK-PYTHON-WHEEL-3180413 * Update requirements.txt Signed-off-by: Glenn Jocher --------- Signed-off-by: Glenn Jocher Co-authored-by: snyk-bot --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 8fcb345365e5..dcd23bf5310c 100644 --- a/requirements.txt +++ b/requirements.txt @@ -9,7 +9,7 @@ opencv-python>=4.1.1 pillow>=10.3.0 psutil # system resources PyYAML>=5.3.1 -requests>=2.32.0 +requests>=2.32.2 scipy>=1.4.1 thop>=0.1.1 # FLOPs computation torch>=1.8.0 # see https://pytorch.org/get-started/locally (recommended) From 907bef2f63771a352346d0b4f068946d7f961f0c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 1 Oct 2024 12:52:36 +0200 Subject: [PATCH 1947/1976] Bump contributor-assistant/github-action from 2.5.1 to 2.6.1 in /.github/workflows (#13337) Bump contributor-assistant/github-action in /.github/workflows Bumps [contributor-assistant/github-action](https://github.com/contributor-assistant/github-action) from 2.5.1 to 2.6.1. - [Release notes](https://github.com/contributor-assistant/github-action/releases) - [Commits](https://github.com/contributor-assistant/github-action/compare/v2.5.1...v2.6.1) --- updated-dependencies: - dependency-name: contributor-assistant/github-action dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/cla.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/cla.yml b/.github/workflows/cla.yml index 2fb5bfb8f616..f4d26c4c35e5 100644 --- a/.github/workflows/cla.yml +++ b/.github/workflows/cla.yml @@ -26,7 +26,7 @@ jobs: steps: - name: CLA Assistant if: (github.event.comment.body == 'recheck' || github.event.comment.body == 'I have read the CLA Document and I sign the CLA') || github.event_name == 'pull_request_target' - uses: contributor-assistant/github-action@v2.5.1 + uses: contributor-assistant/github-action@v2.6.1 env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} # Must be repository secret PAT From ce15409721762b0549834a943d3bcacd44a8c1a3 Mon Sep 17 00:00:00 2001 From: Ultralytics Assistant <135830346+UltralyticsAssistant@users.noreply.github.com> Date: Sat, 5 Oct 2024 14:10:12 +0200 Subject: [PATCH 1948/1976] Ultralytics Code Refactor https://ultralytics.com/actions (#13344) Refactor code for speed and clarity --- README.md | 40 ++++++++++++++++----------------- README.zh-CN.md | 38 +++++++++++++++---------------- utils/loggers/clearml/README.md | 4 ++-- 3 files changed, 41 insertions(+), 41 deletions(-) diff --git a/README.md b/README.md index 4e3709540e1b..4208c1e9bc3a 100644 --- a/README.md +++ b/README.md @@ -47,7 +47,7 @@ To request an Enterprise License please complete the form at [Ultralytics Licens We are thrilled to announce the launch of Ultralytics YOLOv8 🚀, our NEW cutting-edge, state-of-the-art (SOTA) model released at **[https://github.com/ultralytics/ultralytics](https://github.com/ultralytics/ultralytics)**. YOLOv8 is designed to be fast, accurate, and easy to use, making it an excellent choice for a wide range of object detection, image segmentation and image classification tasks. -See the [YOLOv8 Docs](https://docs.ultralytics.com) for details and get started with: +See the [YOLOv8 Docs](https://docs.ultralytics.com/) for details and get started with: [![PyPI version](https://badge.fury.io/py/ultralytics.svg)](https://badge.fury.io/py/ultralytics) [![Downloads](https://static.pepy.tech/badge/ultralytics)](https://pepy.tech/project/ultralytics) @@ -62,7 +62,7 @@ pip install ultralytics ##
Documentation
-See the [YOLOv5 Docs](https://docs.ultralytics.com/yolov5) for full documentation on training, testing and deployment. See below for quickstart examples. +See the [YOLOv5 Docs](https://docs.ultralytics.com/yolov5/) for full documentation on training, testing and deployment. See below for quickstart examples.
Install @@ -80,7 +80,7 @@ pip install -r requirements.txt # install
Inference -YOLOv5 [PyTorch Hub](https://docs.ultralytics.com/yolov5/tutorials/pytorch_hub_model_loading) inference. [Models](https://github.com/ultralytics/yolov5/tree/master/models) download automatically from the latest YOLOv5 [release](https://github.com/ultralytics/yolov5/releases). +YOLOv5 [PyTorch Hub](https://docs.ultralytics.com/yolov5/tutorials/pytorch_hub_model_loading/) inference. [Models](https://github.com/ultralytics/yolov5/tree/master/models) download automatically from the latest YOLOv5 [release](https://github.com/ultralytics/yolov5/releases). ```python import torch @@ -123,7 +123,7 @@ python detect.py --weights yolov5s.pt --source 0 #
Training -The commands below reproduce YOLOv5 [COCO](https://github.com/ultralytics/yolov5/blob/master/data/scripts/get_coco.sh) results. [Models](https://github.com/ultralytics/yolov5/tree/master/models) and [datasets](https://github.com/ultralytics/yolov5/tree/master/data) download automatically from the latest YOLOv5 [release](https://github.com/ultralytics/yolov5/releases). Training times for YOLOv5n/s/m/l/x are 1/2/4/6/8 days on a V100 GPU ([Multi-GPU](https://docs.ultralytics.com/yolov5/tutorials/multi_gpu_training) times faster). Use the largest `--batch-size` possible, or pass `--batch-size -1` for YOLOv5 [AutoBatch](https://github.com/ultralytics/yolov5/pull/5092). Batch sizes shown for V100-16GB. +The commands below reproduce YOLOv5 [COCO](https://github.com/ultralytics/yolov5/blob/master/data/scripts/get_coco.sh) results. [Models](https://github.com/ultralytics/yolov5/tree/master/models) and [datasets](https://github.com/ultralytics/yolov5/tree/master/data) download automatically from the latest YOLOv5 [release](https://github.com/ultralytics/yolov5/releases). Training times for YOLOv5n/s/m/l/x are 1/2/4/6/8 days on a V100 GPU ([Multi-GPU](https://docs.ultralytics.com/yolov5/tutorials/multi_gpu_training/) times faster). Use the largest `--batch-size` possible, or pass `--batch-size -1` for YOLOv5 [AutoBatch](https://github.com/ultralytics/yolov5/pull/5092). Batch sizes shown for V100-16GB. ```bash python train.py --data coco.yaml --epochs 300 --weights '' --cfg yolov5n.yaml --batch-size 128 @@ -140,22 +140,22 @@ python train.py --data coco.yaml --epochs 300 --weights '' --cfg yolov5n.yaml -
Tutorials -- [Train Custom Data](https://docs.ultralytics.com/yolov5/tutorials/train_custom_data) 🚀 RECOMMENDED +- [Train Custom Data](https://docs.ultralytics.com/yolov5/tutorials/train_custom_data/) 🚀 RECOMMENDED - [Tips for Best Training Results](https://docs.ultralytics.com/guides/model-training-tips/) ☘️ -- [Multi-GPU Training](https://docs.ultralytics.com/yolov5/tutorials/multi_gpu_training) -- [PyTorch Hub](https://docs.ultralytics.com/yolov5/tutorials/pytorch_hub_model_loading) 🌟 NEW -- [TFLite, ONNX, CoreML, TensorRT Export](https://docs.ultralytics.com/yolov5/tutorials/model_export) 🚀 -- [NVIDIA Jetson platform Deployment](https://docs.ultralytics.com/yolov5/tutorials/running_on_jetson_nano) 🌟 NEW -- [Test-Time Augmentation (TTA)](https://docs.ultralytics.com/yolov5/tutorials/test_time_augmentation) -- [Model Ensembling](https://docs.ultralytics.com/yolov5/tutorials/model_ensembling) -- [Model Pruning/Sparsity](https://docs.ultralytics.com/yolov5/tutorials/model_pruning_and_sparsity) -- [Hyperparameter Evolution](https://docs.ultralytics.com/yolov5/tutorials/hyperparameter_evolution) -- [Transfer Learning with Frozen Layers](https://docs.ultralytics.com/yolov5/tutorials/transfer_learning_with_frozen_layers) -- [Architecture Summary](https://docs.ultralytics.com/yolov5/tutorials/architecture_description) 🌟 NEW -- [Roboflow for Datasets, Labeling, and Active Learning](https://docs.ultralytics.com/yolov5/tutorials/roboflow_datasets_integration) -- [ClearML Logging](https://docs.ultralytics.com/yolov5/tutorials/clearml_logging_integration) 🌟 NEW -- [YOLOv5 with Neural Magic's Deepsparse](https://docs.ultralytics.com/yolov5/tutorials/neural_magic_pruning_quantization) 🌟 NEW -- [Comet Logging](https://docs.ultralytics.com/yolov5/tutorials/comet_logging_integration) 🌟 NEW +- [Multi-GPU Training](https://docs.ultralytics.com/yolov5/tutorials/multi_gpu_training/) +- [PyTorch Hub](https://docs.ultralytics.com/yolov5/tutorials/pytorch_hub_model_loading/) 🌟 NEW +- [TFLite, ONNX, CoreML, TensorRT Export](https://docs.ultralytics.com/yolov5/tutorials/model_export/) 🚀 +- [NVIDIA Jetson platform Deployment](https://docs.ultralytics.com/yolov5/tutorials/running_on_jetson_nano/) 🌟 NEW +- [Test-Time Augmentation (TTA)](https://docs.ultralytics.com/yolov5/tutorials/test_time_augmentation/) +- [Model Ensembling](https://docs.ultralytics.com/yolov5/tutorials/model_ensembling/) +- [Model Pruning/Sparsity](https://docs.ultralytics.com/yolov5/tutorials/model_pruning_and_sparsity/) +- [Hyperparameter Evolution](https://docs.ultralytics.com/yolov5/tutorials/hyperparameter_evolution/) +- [Transfer Learning with Frozen Layers](https://docs.ultralytics.com/yolov5/tutorials/transfer_learning_with_frozen_layers/) +- [Architecture Summary](https://docs.ultralytics.com/yolov5/tutorials/architecture_description/) 🌟 NEW +- [Roboflow for Datasets, Labeling, and Active Learning](https://docs.ultralytics.com/yolov5/tutorials/roboflow_datasets_integration/) +- [ClearML Logging](https://docs.ultralytics.com/yolov5/tutorials/clearml_logging_integration/) 🌟 NEW +- [YOLOv5 with Neural Magic's Deepsparse](https://docs.ultralytics.com/yolov5/tutorials/neural_magic_pruning_quantization/) 🌟 NEW +- [Comet Logging](https://docs.ultralytics.com/yolov5/tutorials/comet_logging_integration/) 🌟 NEW
@@ -234,7 +234,7 @@ YOLOv5 has been designed to be super easy to get started and simple to learn. We - All checkpoints are trained to 300 epochs with default settings. Nano and Small models use [hyp.scratch-low.yaml](https://github.com/ultralytics/yolov5/blob/master/data/hyps/hyp.scratch-low.yaml) hyps, all others use [hyp.scratch-high.yaml](https://github.com/ultralytics/yolov5/blob/master/data/hyps/hyp.scratch-high.yaml). - **mAPval** values are for single-model single-scale on [COCO val2017](http://cocodataset.org) dataset.
Reproduce by `python val.py --data coco.yaml --img 640 --conf 0.001 --iou 0.65` - **Speed** averaged over COCO val images using a [AWS p3.2xlarge](https://aws.amazon.com/ec2/instance-types/p3/) instance. NMS times (~1 ms/img) not included.
Reproduce by `python val.py --data coco.yaml --img 640 --task speed --batch 1` -- **TTA** [Test Time Augmentation](https://docs.ultralytics.com/yolov5/tutorials/test_time_augmentation) includes reflection and scale augmentations.
Reproduce by `python val.py --data coco.yaml --img 1536 --iou 0.7 --augment` +- **TTA** [Test Time Augmentation](https://docs.ultralytics.com/yolov5/tutorials/test_time_augmentation/) includes reflection and scale augmentations.
Reproduce by `python val.py --data coco.yaml --img 1536 --iou 0.7 --augment`
diff --git a/README.zh-CN.md b/README.zh-CN.md index f1dc961ee392..1c772416b4dc 100644 --- a/README.zh-CN.md +++ b/README.zh-CN.md @@ -45,7 +45,7 @@ YOLOv5 🚀 是世界上最受欢迎的视觉 AI,代表

- +

@@ -10,7 +10,7 @@ YOLOv5 CI YOLOv5 Citation Docker Pulls - Discord Ultralytics Forums Ultralytics Reddit + Discord Ultralytics Forums Ultralytics Reddit
Run on Gradient Open In Colab @@ -18,9 +18,9 @@

-YOLOv5 🚀 is the world's most loved vision AI, representing Ultralytics open-source research into future vision AI methods, incorporating lessons learned and best practices evolved over thousands of hours of research and development. +YOLOv5 🚀 is the world's most loved vision AI, representing Ultralytics open-source research into future vision AI methods, incorporating lessons learned and best practices evolved over thousands of hours of research and development. -We hope that the resources here will help you get the most out of YOLOv5. Please browse the YOLOv5 Docs for details, raise an issue on GitHub for support, and join our Discord community for questions and discussions! +We hope that the resources here will help you get the most out of YOLOv5. Please browse the YOLOv5 Docs for details, raise an issue on GitHub for support, and join our Discord community for questions and discussions! To request an Enterprise License please complete the form at [Ultralytics Licensing](https://www.ultralytics.com/license). @@ -37,7 +37,7 @@ To request an Enterprise License please complete the form at [Ultralytics Licens Ultralytics BiliBili - Ultralytics Discord + Ultralytics Discord
@@ -56,7 +56,7 @@ pip install ultralytics ``` @@ -162,7 +162,7 @@ python train.py --data coco.yaml --epochs 300 --weights '' --cfg yolov5n.yaml - ##
Integrations

- +

@@ -171,7 +171,7 @@ python train.py --data coco.yaml --epochs 300 --weights '' --cfg yolov5n.yaml - - + @@ -189,7 +189,7 @@ python train.py --data coco.yaml --epochs 300 --weights '' --cfg yolov5n.yaml - Experience seamless AI with [Ultralytics HUB](https://www.ultralytics.com/hub) ⭐, the all-in-one solution for data visualization, YOLOv5 and YOLOv8 🚀 model training and deployment, without any coding. Transform images into actionable insights and bring your AI visions to life with ease using our cutting-edge platform and user-friendly [Ultralytics App](https://www.ultralytics.com/app-install). Start your journey for **Free** now! - + ##
Why YOLOv5
@@ -246,7 +246,7 @@ Our new YOLOv5 [release v7.0](https://github.com/ultralytics/yolov5/releases/v7. Segmentation Checkpoints
- +
@@ -462,7 +462,7 @@ For YOLOv5 bug reports and feature requests please visit [GitHub Issues](https:/ Ultralytics BiliBili - Ultralytics Discord + Ultralytics Discord
[tta]: https://docs.ultralytics.com/yolov5/tutorials/test_time_augmentation diff --git a/README.zh-CN.md b/README.zh-CN.md index 1c772416b4dc..65c21c685a98 100644 --- a/README.zh-CN.md +++ b/README.zh-CN.md @@ -1,6 +1,6 @@

- +

@@ -10,7 +10,7 @@ YOLOv5 CI YOLOv5 Citation Docker Pulls - Discord Ultralytics Forums Ultralytics Reddit + Discord Ultralytics Forums Ultralytics Reddit
Run on Gradient Open In Colab @@ -18,9 +18,9 @@

-YOLOv5 🚀 是世界上最受欢迎的视觉 AI,代表 Ultralytics 对未来视觉 AI 方法的开源研究,结合在数千小时的研究和开发中积累的经验教训和最佳实践。 +YOLOv5 🚀 是世界上最受欢迎的视觉 AI,代表 Ultralytics 对未来视觉 AI 方法的开源研究,结合在数千小时的研究和开发中积累的经验教训和最佳实践。 -我们希望这里的资源能帮助您充分利用 YOLOv5。请浏览 YOLOv5 文档 了解详细信息,在 GitHub 上提交问题以获得支持,并加入我们的 Discord 社区进行问题和讨论! +我们希望这里的资源能帮助您充分利用 YOLOv5。请浏览 YOLOv5 文档 了解详细信息,在 GitHub 上提交问题以获得支持,并加入我们的 Discord 社区进行问题和讨论! 如需申请企业许可,请在 [Ultralytics Licensing](https://www.ultralytics.com/license) 处填写表格 @@ -37,7 +37,7 @@ YOLOv5 🚀 是世界上最受欢迎的视觉 AI,代表 Ultralytics BiliBili - Ultralytics Discord + Ultralytics Discord
@@ -54,7 +54,7 @@ pip install ultralytics ``` @@ -161,7 +161,7 @@ python train.py --data coco.yaml --epochs 300 --weights '' --cfg yolov5n.yaml - ##
模块集成

- +

@@ -170,7 +170,7 @@ python train.py --data coco.yaml --epochs 300 --weights '' --cfg yolov5n.yaml - - + @@ -188,7 +188,7 @@ python train.py --data coco.yaml --epochs 300 --weights '' --cfg yolov5n.yaml - [Ultralytics HUB](https://www.ultralytics.com/hub) 是我们的⭐**新的**用于可视化数据集、训练 YOLOv5 🚀 模型并以无缝体验部署到现实世界的无代码解决方案。现在开始 **免费** 使用他! - + ##
为什么选择 YOLOv5
@@ -247,7 +247,7 @@ YOLOv5 超级容易上手,简单易学。我们优先考虑现实世界的结
- +
@@ -462,7 +462,7 @@ Ultralytics 提供两种许可证选项以适应各种使用场景: Ultralytics BiliBili - Ultralytics Discord + Ultralytics Discord
[tta]: https://docs.ultralytics.com/yolov5/tutorials/test_time_augmentation From a3555241574a08e35b5f3b0138c09802ec4ed28a Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 11 Oct 2024 15:07:07 +0200 Subject: [PATCH 1950/1976] Update codeql-analysis.yml (#13353) Signed-off-by: Glenn Jocher --- .github/workflows/codeql-analysis.yml | 36 ++++++++------------------- 1 file changed, 11 insertions(+), 25 deletions(-) diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index 77054fd0d7c5..e6e3e85d3ced 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -1,26 +1,26 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license -# This action runs GitHub's industry-leading static analysis engine, CodeQL, against a repository's source code to find security vulnerabilities. -# https://github.com/github/codeql-action +# Ultralytics YOLO 🚀, AGPL-3.0 license name: "CodeQL" on: schedule: - - cron: "0 0 1 * *" # Runs at 00:00 UTC on the 1st of every month + - cron: "0 0 1 * *" workflow_dispatch: jobs: analyze: name: Analyze - runs-on: ubuntu-latest + runs-on: ${{ 'ubuntu-latest' }} + permissions: + actions: read + contents: read + security-events: write strategy: fail-fast: false matrix: language: ["python"] - # CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python' ] - # Learn more: - # https://docs.github.com/en/free-pro-team@latest/github/finding-security-vulnerabilities-and-errors-in-your-code/configuring-code-scanning#changing-the-languages-that-are-analyzed + # CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python', 'ruby' ] steps: - name: Checkout repository @@ -34,23 +34,9 @@ jobs: # If you wish to specify custom queries, you can do so here or in a config file. # By default, queries listed here will override any specified in a config file. # Prefix the list here with "+" to use these queries and those in the config file. - # queries: ./path/to/local/query, your-org/your-repo/queries@main - - # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). - # If this step fails, then you should remove it and run the build manually (see below) - - name: Autobuild - uses: github/codeql-action/autobuild@v3 - - # ℹ️ Command-line programs to run using the OS shell. - # 📚 https://git.io/JvXDl - - # ✏️ If the Autobuild fails above, remove it and uncomment the following three lines - # and modify them (or add more) to build your code if your project - # uses a compiled language - - #- run: | - # make bootstrap - # make release + # queries: security-extended,security-and-quality - name: Perform CodeQL Analysis uses: github/codeql-action/analyze@v3 + with: + category: "/language:${{matrix.language}}" From abdfbd68d1c1fe58b6f5c7d258f1433d40894254 Mon Sep 17 00:00:00 2001 From: Ultralytics Assistant <135830346+UltralyticsAssistant@users.noreply.github.com> Date: Tue, 15 Oct 2024 16:03:37 +0200 Subject: [PATCH 1951/1976] Ultralytics Code Refactor https://ultralytics.com/actions (#13361) Refactor code for speed and clarity --- .github/workflows/cla.yml | 2 +- .github/workflows/format.yml | 5 ++--- .github/workflows/merge-main-into-prs.yml | 2 +- 3 files changed, 4 insertions(+), 5 deletions(-) diff --git a/.github/workflows/cla.yml b/.github/workflows/cla.yml index f4d26c4c35e5..f725f5683d1b 100644 --- a/.github/workflows/cla.yml +++ b/.github/workflows/cla.yml @@ -30,7 +30,7 @@ jobs: env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} # Must be repository secret PAT - PERSONAL_ACCESS_TOKEN: ${{ secrets.PERSONAL_ACCESS_TOKEN }} + PERSONAL_ACCESS_TOKEN: ${{ secrets._GITHUB_TOKEN }} with: path-to-signatures: "signatures/version1/cla.json" path-to-document: "https://docs.ultralytics.com/help/CLA" # CLA document diff --git a/.github/workflows/format.yml b/.github/workflows/format.yml index 3357e30dacc3..c9152948af90 100644 --- a/.github/workflows/format.yml +++ b/.github/workflows/format.yml @@ -18,15 +18,14 @@ jobs: - name: Run Ultralytics Formatting uses: ultralytics/actions@main with: - token: ${{ secrets.PERSONAL_ACCESS_TOKEN || secrets.GITHUB_TOKEN }} # note GITHUB_TOKEN automatically generated + token: ${{ secrets._GITHUB_TOKEN }} # note GITHUB_TOKEN automatically generated labels: true # autolabel issues and PRs python: true # format Python code and docstrings prettier: true # format YAML, JSON, Markdown and CSS spelling: true # check spelling links: false # check broken links summary: true # print PR summary with GPT4o (requires 'openai_api_key') - openai_azure_api_key: ${{ secrets.OPENAI_AZURE_API_KEY }} - openai_azure_endpoint: ${{ secrets.OPENAI_AZURE_ENDPOINT }} + openai_api_key: ${{ secrets.OPENAI_API_KEY }} first_issue_response: | 👋 Hello @${{ github.actor }}, thank you for your interest in YOLOv5 🚀! Please visit our ⭐️ [Tutorials](https://docs.ultralytics.com/yolov5/) to get started, where you can find quickstart guides for simple tasks like [Custom Data Training](https://docs.ultralytics.com/yolov5/tutorials/train_custom_data/) all the way to advanced concepts like [Hyperparameter Evolution](https://docs.ultralytics.com/yolov5/tutorials/hyperparameter_evolution/). diff --git a/.github/workflows/merge-main-into-prs.yml b/.github/workflows/merge-main-into-prs.yml index 05a0363a74b8..29c0f9328f4f 100644 --- a/.github/workflows/merge-main-into-prs.yml +++ b/.github/workflows/merge-main-into-prs.yml @@ -67,5 +67,5 @@ jobs: print(f"❌ Could not process PR #{pr.number}: {e}") env: - GITHUB_TOKEN: ${{ secrets.PERSONAL_ACCESS_TOKEN || secrets.GITHUB_TOKEN }} + GITHUB_TOKEN: ${{ secrets._GITHUB_TOKEN }} GITHUB_REPOSITORY: ${{ github.repository }} From 94a6245617adf3df848e4879458f29208c50fa1f Mon Sep 17 00:00:00 2001 From: imyhxy Date: Wed, 16 Oct 2024 00:22:02 +0800 Subject: [PATCH 1952/1976] Fix bug when enable `--quad` training option (#13355) * fix: quad training * fix: quad training in segmentation --- utils/dataloaders.py | 1 + utils/segment/dataloaders.py | 1 + 2 files changed, 2 insertions(+) diff --git a/utils/dataloaders.py b/utils/dataloaders.py index 61358eb9fb90..00e98168ffbd 100644 --- a/utils/dataloaders.py +++ b/utils/dataloaders.py @@ -210,6 +210,7 @@ def create_dataloader( shuffle=shuffle and sampler is None, num_workers=nw, sampler=sampler, + drop_last=quad, pin_memory=PIN_MEMORY, collate_fn=LoadImagesAndLabels.collate_fn4 if quad else LoadImagesAndLabels.collate_fn, worker_init_fn=seed_worker, diff --git a/utils/segment/dataloaders.py b/utils/segment/dataloaders.py index 5f5666c38803..f6507649491f 100644 --- a/utils/segment/dataloaders.py +++ b/utils/segment/dataloaders.py @@ -75,6 +75,7 @@ def create_dataloader( shuffle=shuffle and sampler is None, num_workers=nw, sampler=sampler, + drop_last=quad, pin_memory=True, collate_fn=LoadImagesAndLabelsAndMasks.collate_fn4 if quad else LoadImagesAndLabelsAndMasks.collate_fn, worker_init_fn=seed_worker, From 79b7336f0dcf2916ca0d81cefd4e64085214b608 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 17 Oct 2024 19:14:18 +0200 Subject: [PATCH 1953/1976] Update Integrations table (#13365) * Update Integrations table Signed-off-by: Glenn Jocher * Update README.md Signed-off-by: Glenn Jocher * Update README.zh-CN.md Signed-off-by: Glenn Jocher --------- Signed-off-by: Glenn Jocher --- README.md | 32 +++++++++++++++++--------------- README.zh-CN.md | 34 ++++++++++++++++++---------------- 2 files changed, 35 insertions(+), 31 deletions(-) diff --git a/README.md b/README.md index 4cb814590eb5..d0d6262e03e0 100644 --- a/README.md +++ b/README.md @@ -161,29 +161,31 @@ python train.py --data coco.yaml --epochs 300 --weights '' --cfg yolov5n.yaml - ##
Integrations
+Our key integrations with leading AI platforms extend the functionality of Ultralytics' offerings, enhancing tasks like dataset labeling, training, visualization, and model management. Discover how Ultralytics, in collaboration with [W&B](https://docs.wandb.ai/guides/integrations/ultralytics/), [Comet](https://bit.ly/yolov8-readme-comet), [Roboflow](https://roboflow.com/?ref=ultralytics) and [OpenVINO](https://docs.ultralytics.com/integrations/openvino/), can optimize your AI workflow. +
- - + +Ultralytics active learning integrations

- - - - - - - - - + + Ultralytics HUB logo + space + + ClearML logo + space + + Comet ML logo + space - + NeuralMagic logo
-| Roboflow | ClearML ⭐ NEW | Comet ⭐ NEW | Neural Magic ⭐ NEW | -| :--------------------------------------------------------------------------------------------------------------------------: | :-------------------------------------------------------------------------------------------------------------: | :--------------------------------------------------------------------------------------------------------------------------------------------------------: | :----------------------------------------------------------------------------------------------------: | -| Label and export your custom datasets directly to YOLOv5 for training with [Roboflow](https://roboflow.com/?ref=ultralytics) | Automatically track, visualize and even remotely train YOLOv5 using [ClearML](https://clear.ml/) (open-source!) | Free forever, [Comet](https://bit.ly/yolov5-readme-comet2) lets you save YOLOv5 models, resume training, and interactively visualise and debug predictions | Run YOLOv5 inference up to 6x faster with [Neural Magic DeepSparse](https://bit.ly/yolov5-neuralmagic) | +| Ultralytics HUB 🚀 | W&B | Comet ⭐ NEW | Neural Magic | +| :----------------------------------------------------------------------------------------------------------------------------: | :-----------------------------------------------------------------------------------------------------------------------------: | :-------------------------------------------------------------------------------------------------------------------------------------------------------: | :----------------------------------------------------------------------------------------------------: | +| Streamline YOLO workflows: Label, train, and deploy effortlessly with [Ultralytics HUB](https://ultralytics.com/hub). Try now! | Track experiments, hyperparameters, and results with [Weights & Biases](https://docs.wandb.ai/guides/integrations/ultralytics/) | Free forever, [Comet](https://bit.ly/yolov5-readme-comet) lets you save YOLOv5 models, resume training, and interactively visualize and debug predictions | Run YOLO11 inference up to 6x faster with [Neural Magic DeepSparse](https://bit.ly/yolov5-neuralmagic) | ##
Ultralytics HUB
diff --git a/README.zh-CN.md b/README.zh-CN.md index 65c21c685a98..9bdbbf2caa1f 100644 --- a/README.zh-CN.md +++ b/README.zh-CN.md @@ -158,31 +158,33 @@ python train.py --data coco.yaml --epochs 300 --weights '' --cfg yolov5n.yaml -
-##
模块集成
+##
集成
+ +我们与领先的 AI 平台的关键集成扩展了 Ultralytics 产品的功能,提升了数据集标注、训练、可视化和模型管理等任务。探索 Ultralytics 如何通过与 [W&B](https://docs.wandb.ai/guides/integrations/ultralytics/)、[Comet](https://bit.ly/yolov8-readme-comet)、[Roboflow](https://roboflow.com/?ref=ultralytics) 和 [OpenVINO](https://docs.ultralytics.com/integrations/openvino/) 的合作,优化您的 AI 工作流程。
- - + +Ultralytics active learning integrations

- - - - - - - - - + + Ultralytics HUB logo + space + + W&B logo + space + + Comet ML logo + space - + NeuralMagic logo
-| Roboflow | ClearML ⭐ 新 | Comet ⭐ 新 | Neural Magic ⭐ 新 | -| :--------------------------------------------------------------------------------------------------------: | :------------------------------------------------------------------------: | :--------------------------------------------------------------------------------------------------------------: | :----------------------------------------------------------------------------------------------------: | -| 将您的自定义数据集进行标注并直接导出到 YOLOv5 以进行训练 [Roboflow](https://roboflow.com/?ref=ultralytics) | 自动跟踪、可视化甚至远程训练 YOLOv5 [ClearML](https://clear.ml/)(开源!) | 永远免费,[Comet](https://bit.ly/yolov5-readme-comet2)可让您保存 YOLOv5 模型、恢复训练以及交互式可视化和调试预测 | 使用 [Neural Magic DeepSparse](https://bit.ly/yolov5-neuralmagic),运行 YOLOv5 推理的速度最高可提高6倍 | +| Ultralytics HUB 🚀 | W&B | Comet ⭐ 全新 | Neural Magic | +| :------------------------------------------------------------------------------------------------------: | :----------------------------------------------------------------------------------------------------: | :--------------------------------------------------------------------------------------------------------------------: | :-------------------------------------------------------------------------------------------------: | +| 简化 YOLO 工作流程:通过 [Ultralytics HUB](https://ultralytics.com/hub) 轻松标注、训练和部署。立即试用! | 使用 [Weights & Biases](https://docs.wandb.ai/guides/integrations/ultralytics/) 跟踪实验、超参数和结果 | 永久免费,[Comet](https://bit.ly/yolov5-readme-comet) 允许您保存 YOLO11 模型、恢复训练,并交互式地可视化和调试预测结果 | 使用 [Neural Magic DeepSparse](https://bit.ly/yolov5-neuralmagic) 运行 YOLO11 推理,速度提升至 6 倍 | ##
Ultralytics HUB
From bcd88eaf595f0e7275bad7da542b37d42ab111d3 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 17 Oct 2024 19:32:33 +0200 Subject: [PATCH 1954/1976] Update README.md (#13366) * Update README.md Signed-off-by: Glenn Jocher * Update README.zh-CN.md Signed-off-by: Glenn Jocher --------- Signed-off-by: Glenn Jocher --- README.md | 2 +- README.zh-CN.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index d0d6262e03e0..bb4a38b9ed2a 100644 --- a/README.md +++ b/README.md @@ -173,7 +173,7 @@ Our key integrations with leading AI platforms extend the functionality of Ultra Ultralytics HUB logo space - + ClearML logo space diff --git a/README.zh-CN.md b/README.zh-CN.md index 9bdbbf2caa1f..cdb5214c1639 100644 --- a/README.zh-CN.md +++ b/README.zh-CN.md @@ -172,7 +172,7 @@ python train.py --data coco.yaml --epochs 300 --weights '' --cfg yolov5n.yaml - Ultralytics HUB logo space - + W&B logo space From 24ee28010fbf597ec796e6e471429cde21040f90 Mon Sep 17 00:00:00 2001 From: Ultralytics Assistant <135830346+UltralyticsAssistant@users.noreply.github.com> Date: Sat, 19 Oct 2024 17:46:11 +0200 Subject: [PATCH 1955/1976] Ultralytics Refactor https://ultralytics.com/actions (#13368) * Refactor code for speed and clarity * Auto-format by https://ultralytics.com/actions --- .github/workflows/format.yml | 2 +- README.md | 10 +++++----- README.zh-CN.md | 10 +++++----- classify/tutorial.ipynb | 4 ++-- data/coco128-seg.yaml | 2 +- data/coco128.yaml | 2 +- segment/tutorial.ipynb | 6 +++--- tutorial.ipynb | 6 +++--- 8 files changed, 21 insertions(+), 21 deletions(-) diff --git a/.github/workflows/format.yml b/.github/workflows/format.yml index c9152948af90..9f2f78e636dd 100644 --- a/.github/workflows/format.yml +++ b/.github/workflows/format.yml @@ -46,7 +46,7 @@ jobs: YOLOv5 may be run in any of the following up-to-date verified environments (with all dependencies including [CUDA](https://developer.nvidia.com/cuda)/[CUDNN](https://developer.nvidia.com/cudnn), [Python](https://www.python.org/) and [PyTorch](https://pytorch.org/) preinstalled): - - **Notebooks** with free GPU: Run on Gradient Open In Colab Open In Kaggle + - **Notebooks** with free GPU: Run on Gradient Open In Colab Open In Kaggle - **Google Cloud** Deep Learning VM. See [GCP Quickstart Guide](https://docs.ultralytics.com/yolov5/environments/google_cloud_quickstart_tutorial/) - **Amazon** Deep Learning AMI. See [AWS Quickstart Guide](https://docs.ultralytics.com/yolov5/environments/aws_quickstart_tutorial/) - **Docker Image**. See [Docker Quickstart Guide](https://docs.ultralytics.com/yolov5/environments/docker_image_quickstart_tutorial/) Docker Pulls diff --git a/README.md b/README.md index bb4a38b9ed2a..d39f824a4f2f 100644 --- a/README.md +++ b/README.md @@ -14,7 +14,7 @@
Run on Gradient Open In Colab - Open In Kaggle + Open In Kaggle
@@ -183,9 +183,9 @@ Our key integrations with leading AI platforms extend the functionality of Ultra NeuralMagic logo -| Ultralytics HUB 🚀 | W&B | Comet ⭐ NEW | Neural Magic | -| :----------------------------------------------------------------------------------------------------------------------------: | :-----------------------------------------------------------------------------------------------------------------------------: | :-------------------------------------------------------------------------------------------------------------------------------------------------------: | :----------------------------------------------------------------------------------------------------: | -| Streamline YOLO workflows: Label, train, and deploy effortlessly with [Ultralytics HUB](https://ultralytics.com/hub). Try now! | Track experiments, hyperparameters, and results with [Weights & Biases](https://docs.wandb.ai/guides/integrations/ultralytics/) | Free forever, [Comet](https://bit.ly/yolov5-readme-comet) lets you save YOLOv5 models, resume training, and interactively visualize and debug predictions | Run YOLO11 inference up to 6x faster with [Neural Magic DeepSparse](https://bit.ly/yolov5-neuralmagic) | +| Ultralytics HUB 🚀 | W&B | Comet ⭐ NEW | Neural Magic | +| :--------------------------------------------------------------------------------------------------------------------------------: | :-----------------------------------------------------------------------------------------------------------------------------: | :-------------------------------------------------------------------------------------------------------------------------------------------------------: | :----------------------------------------------------------------------------------------------------: | +| Streamline YOLO workflows: Label, train, and deploy effortlessly with [Ultralytics HUB](https://www.ultralytics.com/hub). Try now! | Track experiments, hyperparameters, and results with [Weights & Biases](https://docs.wandb.ai/guides/integrations/ultralytics/) | Free forever, [Comet](https://bit.ly/yolov5-readme-comet) lets you save YOLOv5 models, resume training, and interactively visualize and debug predictions | Run YOLO11 inference up to 6x faster with [Neural Magic DeepSparse](https://bit.ly/yolov5-neuralmagic) | ##
Ultralytics HUB
@@ -417,7 +417,7 @@ Get started in seconds with our verified environments. Click each icon below for - + diff --git a/README.zh-CN.md b/README.zh-CN.md index cdb5214c1639..8ef7f74c9d8b 100644 --- a/README.zh-CN.md +++ b/README.zh-CN.md @@ -14,7 +14,7 @@
Run on Gradient Open In Colab - Open In Kaggle + Open In Kaggle
@@ -182,9 +182,9 @@ python train.py --data coco.yaml --epochs 300 --weights '' --cfg yolov5n.yaml - NeuralMagic logo -| Ultralytics HUB 🚀 | W&B | Comet ⭐ 全新 | Neural Magic | -| :------------------------------------------------------------------------------------------------------: | :----------------------------------------------------------------------------------------------------: | :--------------------------------------------------------------------------------------------------------------------: | :-------------------------------------------------------------------------------------------------: | -| 简化 YOLO 工作流程:通过 [Ultralytics HUB](https://ultralytics.com/hub) 轻松标注、训练和部署。立即试用! | 使用 [Weights & Biases](https://docs.wandb.ai/guides/integrations/ultralytics/) 跟踪实验、超参数和结果 | 永久免费,[Comet](https://bit.ly/yolov5-readme-comet) 允许您保存 YOLO11 模型、恢复训练,并交互式地可视化和调试预测结果 | 使用 [Neural Magic DeepSparse](https://bit.ly/yolov5-neuralmagic) 运行 YOLO11 推理,速度提升至 6 倍 | +| Ultralytics HUB 🚀 | W&B | Comet ⭐ 全新 | Neural Magic | +| :----------------------------------------------------------------------------------------------------------: | :----------------------------------------------------------------------------------------------------: | :--------------------------------------------------------------------------------------------------------------------: | :-------------------------------------------------------------------------------------------------: | +| 简化 YOLO 工作流程:通过 [Ultralytics HUB](https://www.ultralytics.com/hub) 轻松标注、训练和部署。立即试用! | 使用 [Weights & Biases](https://docs.wandb.ai/guides/integrations/ultralytics/) 跟踪实验、超参数和结果 | 永久免费,[Comet](https://bit.ly/yolov5-readme-comet) 允许您保存 YOLO11 模型、恢复训练,并交互式地可视化和调试预测结果 | 使用 [Neural Magic DeepSparse](https://bit.ly/yolov5-neuralmagic) 运行 YOLO11 推理,速度提升至 6 倍 | ##
Ultralytics HUB
@@ -417,7 +417,7 @@ python export.py --weights yolov5s-cls.pt resnet50.pt efficientnet_b0.pt --inclu - + diff --git a/classify/tutorial.ipynb b/classify/tutorial.ipynb index e3bfbf674d31..c547a29a9c9c 100644 --- a/classify/tutorial.ipynb +++ b/classify/tutorial.ipynb @@ -15,7 +15,7 @@ "
\n", "
\"Run\n", " \"Open\n", - " \"Open\n", + " \"Open\n", "
\n", "\n", "This YOLOv5 🚀 notebook by Ultralytics presents simple train, validate and predict examples to help start your AI adventure.
See GitHub for community support or contact us for professional support.\n", @@ -1410,7 +1410,7 @@ "\n", "YOLOv5 may be run in any of the following up-to-date verified environments (with all dependencies including [CUDA](https://developer.nvidia.com/cuda)/[CUDNN](https://developer.nvidia.com/cudnn), [Python](https://www.python.org/) and [PyTorch](https://pytorch.org/) preinstalled):\n", "\n", - "- **Notebooks** with free GPU: \"Run \"Open \"Open\n", + "- **Notebooks** with free GPU: \"Run \"Open \"Open\n", "- **Google Cloud** Deep Learning VM. See [GCP Quickstart Guide](https://docs.ultralytics.com/yolov5/environments/google_cloud_quickstart_tutorial/)\n", "- **Amazon** Deep Learning AMI. See [AWS Quickstart Guide](https://docs.ultralytics.com/yolov5/environments/aws_quickstart_tutorial/)\n", "- **Docker Image**. See [Docker Quickstart Guide](https://docs.ultralytics.com/yolov5/environments/docker_image_quickstart_tutorial/) \"Docker\n" diff --git a/data/coco128-seg.yaml b/data/coco128-seg.yaml index aea711c98396..5b00f52a9c9b 100644 --- a/data/coco128-seg.yaml +++ b/data/coco128-seg.yaml @@ -1,5 +1,5 @@ # Ultralytics YOLOv5 🚀, AGPL-3.0 license -# COCO128-seg dataset https://www.kaggle.com/ultralytics/coco128 (first 128 images from COCO train2017) by Ultralytics +# COCO128-seg dataset https://www.kaggle.com/datasets/ultralytics/coco128 (first 128 images from COCO train2017) by Ultralytics # Example usage: python train.py --data coco128.yaml # parent # ├── yolov5 diff --git a/data/coco128.yaml b/data/coco128.yaml index 2ed35c06ea7e..cf3d27b87313 100644 --- a/data/coco128.yaml +++ b/data/coco128.yaml @@ -1,5 +1,5 @@ # Ultralytics YOLOv5 🚀, AGPL-3.0 license -# COCO128 dataset https://www.kaggle.com/ultralytics/coco128 (first 128 images from COCO train2017) by Ultralytics +# COCO128 dataset https://www.kaggle.com/datasets/ultralytics/coco128 (first 128 images from COCO train2017) by Ultralytics # Example usage: python train.py --data coco128.yaml # parent # ├── yolov5 diff --git a/segment/tutorial.ipynb b/segment/tutorial.ipynb index 56ea50500e66..bb5c1f99689e 100644 --- a/segment/tutorial.ipynb +++ b/segment/tutorial.ipynb @@ -15,7 +15,7 @@ "
\n", " \"Run\n", " \"Open\n", - " \"Open\n", + " \"Open\n", "
\n", "\n", "This YOLOv5 🚀 notebook by Ultralytics presents simple train, validate and predict examples to help start your AI adventure.
See GitHub for community support or contact us for professional support.\n", @@ -222,7 +222,7 @@ "Close the active learning loop by sampling images from your inference conditions with the `roboflow` pip package\n", "

\n", "\n", - "Train a YOLOv5s-seg model on the [COCO128](https://www.kaggle.com/ultralytics/coco128) dataset with `--data coco128-seg.yaml`, starting from pretrained `--weights yolov5s-seg.pt`, or from randomly initialized `--weights '' --cfg yolov5s-seg.yaml`.\n", + "Train a YOLOv5s-seg model on the [COCO128](https://www.kaggle.com/datasets/ultralytics/coco128) dataset with `--data coco128-seg.yaml`, starting from pretrained `--weights yolov5s-seg.pt`, or from randomly initialized `--weights '' --cfg yolov5s-seg.yaml`.\n", "\n", "- **Pretrained [Models](https://github.com/ultralytics/yolov5/tree/master/models)** are downloaded\n", "automatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases)\n", @@ -523,7 +523,7 @@ "\n", "YOLOv5 may be run in any of the following up-to-date verified environments (with all dependencies including [CUDA](https://developer.nvidia.com/cuda)/[CUDNN](https://developer.nvidia.com/cudnn), [Python](https://www.python.org/) and [PyTorch](https://pytorch.org/) preinstalled):\n", "\n", - "- **Notebooks** with free GPU: \"Run \"Open \"Open\n", + "- **Notebooks** with free GPU: \"Run \"Open \"Open\n", "- **Google Cloud** Deep Learning VM. See [GCP Quickstart Guide](https://docs.ultralytics.com/yolov5/environments/google_cloud_quickstart_tutorial/)\n", "- **Amazon** Deep Learning AMI. See [AWS Quickstart Guide](https://docs.ultralytics.com/yolov5/environments/aws_quickstart_tutorial/)\n", "- **Docker Image**. See [Docker Quickstart Guide](https://docs.ultralytics.com/yolov5/environments/docker_image_quickstart_tutorial/) \"Docker\n" diff --git a/tutorial.ipynb b/tutorial.ipynb index ebc6c0b22d80..b383deb7e917 100644 --- a/tutorial.ipynb +++ b/tutorial.ipynb @@ -28,7 +28,7 @@ "\n", " \"Run\n", " \"Open\n", - " \"Open\n", + " \"Open\n", "\n", "This YOLOv5 🚀 notebook by Ultralytics presents simple train, validate and predict examples to help start your AI adventure.
We hope that the resources in this notebook will help you get the most out of YOLOv5. Please browse the YOLOv5 Docs for details, raise an issue on GitHub for support, and join our Discord community for questions and discussions!\n", "\n", @@ -257,7 +257,7 @@ "Close the active learning loop by sampling images from your inference conditions with the `roboflow` pip package\n", "

\n", "\n", - "Train a YOLOv5s model on the [COCO128](https://www.kaggle.com/ultralytics/coco128) dataset with `--data coco128.yaml`, starting from pretrained `--weights yolov5s.pt`, or from randomly initialized `--weights '' --cfg yolov5s.yaml`.\n", + "Train a YOLOv5s model on the [COCO128](https://www.kaggle.com/datasets/ultralytics/coco128) dataset with `--data coco128.yaml`, starting from pretrained `--weights yolov5s.pt`, or from randomly initialized `--weights '' --cfg yolov5s.yaml`.\n", "\n", "- **Pretrained [Models](https://github.com/ultralytics/yolov5/tree/master/models)** are downloaded\n", "automatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases)\n", @@ -553,7 +553,7 @@ "\n", "YOLOv5 may be run in any of the following up-to-date verified environments (with all dependencies including [CUDA](https://developer.nvidia.com/cuda)/[CUDNN](https://developer.nvidia.com/cudnn), [Python](https://www.python.org/) and [PyTorch](https://pytorch.org/) preinstalled):\n", "\n", - "- **Notebooks** with free GPU: \"Run \"Open \"Open\n", + "- **Notebooks** with free GPU: \"Run \"Open \"Open\n", "- **Google Cloud** Deep Learning VM. See [GCP Quickstart Guide](https://docs.ultralytics.com/yolov5/environments/google_cloud_quickstart_tutorial/)\n", "- **Amazon** Deep Learning AMI. See [AWS Quickstart Guide](https://docs.ultralytics.com/yolov5/environments/aws_quickstart_tutorial/)\n", "- **Docker Image**. See [Docker Quickstart Guide](https://docs.ultralytics.com/yolov5/environments/docker_image_quickstart_tutorial/) \"Docker\n" From 2f74455adc74a587c9e9d5a6e45df880fce8ea3e Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 22 Oct 2024 19:22:45 +0200 Subject: [PATCH 1956/1976] Update README.md with YOLO11 section (#13374) * Update README with YOLO11 * Update README with YOLO11 --------- Co-authored-by: UltralyticsAssistant --- README.md | 14 +++++++------- README.zh-CN.md | 32 ++++++++++++++++---------------- 2 files changed, 23 insertions(+), 23 deletions(-) diff --git a/README.md b/README.md index d39f824a4f2f..79c9e6debff0 100644 --- a/README.md +++ b/README.md @@ -43,11 +43,11 @@ To request an Enterprise License please complete the form at [Ultralytics Licens
-##
YOLOv8 🚀 NEW
+##
YOLO11 🚀 NEW
-We are thrilled to announce the launch of Ultralytics YOLOv8 🚀, our NEW cutting-edge, state-of-the-art (SOTA) model released at **[https://github.com/ultralytics/ultralytics](https://github.com/ultralytics/ultralytics)**. YOLOv8 is designed to be fast, accurate, and easy to use, making it an excellent choice for a wide range of object detection, image segmentation and image classification tasks. +We are excited to unveil the launch of Ultralytics YOLO11 🚀, the latest advancement in our state-of-the-art (SOTA) vision models! Available now at **[GitHub](https://github.com/ultralytics/ultralytics)**, YOLO11 builds on our legacy of speed, precision, and ease of use. Whether you're tackling object detection, image segmentation, or image classification, YOLO11 delivers the performance and versatility needed to excel in diverse applications. -See the [YOLOv8 Docs](https://docs.ultralytics.com/) for details and get started with: +Get started today and unlock the full potential of YOLO11! Visit the [Ultralytics Docs](https://docs.ultralytics.com/) for comprehensive guides and resources: [![PyPI version](https://badge.fury.io/py/ultralytics.svg)](https://badge.fury.io/py/ultralytics) [![Downloads](https://static.pepy.tech/badge/ultralytics)](https://pepy.tech/project/ultralytics) @@ -57,7 +57,7 @@ pip install ultralytics
- +
##
Documentation
@@ -152,9 +152,9 @@ python train.py --data coco.yaml --epochs 300 --weights '' --cfg yolov5n.yaml - - [Hyperparameter Evolution](https://docs.ultralytics.com/yolov5/tutorials/hyperparameter_evolution/) - [Transfer Learning with Frozen Layers](https://docs.ultralytics.com/yolov5/tutorials/transfer_learning_with_frozen_layers/) - [Architecture Summary](https://docs.ultralytics.com/yolov5/tutorials/architecture_description/) 🌟 NEW -- [Roboflow for Datasets, Labeling, and Active Learning](https://docs.ultralytics.com/yolov5/tutorials/roboflow_datasets_integration/) -- [ClearML Logging](https://docs.ultralytics.com/yolov5/tutorials/clearml_logging_integration/) 🌟 NEW -- [YOLOv5 with Neural Magic's Deepsparse](https://docs.ultralytics.com/yolov5/tutorials/neural_magic_pruning_quantization/) 🌟 NEW +- [Ultralytics HUB to train and deploy YOLO](https://www.ultralytics.com/hub) 🚀 RECOMMENDED +- [ClearML Logging](https://docs.ultralytics.com/yolov5/tutorials/clearml_logging_integration/) +- [YOLOv5 with Neural Magic's Deepsparse](https://docs.ultralytics.com/yolov5/tutorials/neural_magic_pruning_quantization/) - [Comet Logging](https://docs.ultralytics.com/yolov5/tutorials/comet_logging_integration/) 🌟 NEW
diff --git a/README.zh-CN.md b/README.zh-CN.md index 8ef7f74c9d8b..6615daeff955 100644 --- a/README.zh-CN.md +++ b/README.zh-CN.md @@ -41,21 +41,21 @@ YOLOv5 🚀 是世界上最受欢迎的视觉 AI,代表YOLOv8 🚀 新品 +##
YOLO11 🚀 全新发布
-我们很高兴宣布 Ultralytics YOLOv8 🚀 的发布,这是我们新推出的领先水平、最先进的(SOTA)模型,发布于 **[https://github.com/ultralytics/ultralytics](https://github.com/ultralytics/ultralytics)**。 YOLOv8 旨在快速、准确且易于使用,使其成为广泛的物体检测、图像分割和图像分类任务的极佳选择。 +我们很高兴宣布推出 Ultralytics YOLO11 🚀,这是我们最先进视觉模型的最新进展!现已在 **[GitHub](https://github.com/ultralytics/ultralytics)** 上发布。YOLO11 在速度、精度和易用性方面进一步提升,无论是处理目标检测、图像分割还是图像分类任务,YOLO11 都具备出色的性能和多功能性,助您在各种应用中脱颖而出。 -请查看 [YOLOv8 文档](https://docs.ultralytics.com/)了解详细信息,并开始使用: +立即开始,解锁 YOLO11 的全部潜力!访问 [Ultralytics 文档](https://docs.ultralytics.com/) 获取全面的指南和资源: [![PyPI 版本](https://badge.fury.io/py/ultralytics.svg)](https://badge.fury.io/py/ultralytics) [![下载量](https://static.pepy.tech/badge/ultralytics)](https://pepy.tech/project/ultralytics) -```commandline +```bash pip install ultralytics ```
##
文档
@@ -139,22 +139,22 @@ python train.py --data coco.yaml --epochs 300 --weights '' --cfg yolov5n.yaml -
教程 -- [训练自定义数据](https://docs.ultralytics.com/yolov5/tutorials/train_custom_data/) 🚀 推荐 -- [获得最佳训练结果的技巧](https://docs.ultralytics.com/guides/model-training-tips/) ☘️ +- [自定义数据训练](https://docs.ultralytics.com/yolov5/tutorials/train_custom_data/) 🚀 **推荐** +- [最佳训练效果的提示](https://docs.ultralytics.com/guides/model-training-tips/) ☘️ - [多GPU训练](https://docs.ultralytics.com/yolov5/tutorials/multi_gpu_training/) -- [PyTorch Hub](https://docs.ultralytics.com/yolov5/tutorials/pytorch_hub_model_loading/) 🌟 新 -- [TFLite,ONNX,CoreML,TensorRT导出](https://docs.ultralytics.com/yolov5/tutorials/model_export/) 🚀 -- [NVIDIA Jetson平台部署](https://docs.ultralytics.com/yolov5/tutorials/running_on_jetson_nano/) 🌟 新 +- [PyTorch Hub](https://docs.ultralytics.com/yolov5/tutorials/pytorch_hub_model_loading/) 🌟 **全新** +- [TFLite, ONNX, CoreML, TensorRT 导出](https://docs.ultralytics.com/yolov5/tutorials/model_export/) 🚀 +- [NVIDIA Jetson 平台部署](https://docs.ultralytics.com/yolov5/tutorials/running_on_jetson_nano/) 🌟 **全新** - [测试时增强 (TTA)](https://docs.ultralytics.com/yolov5/tutorials/test_time_augmentation/) - [模型集成](https://docs.ultralytics.com/yolov5/tutorials/model_ensembling/) -- [模型剪枝/稀疏](https://docs.ultralytics.com/yolov5/tutorials/model_pruning_and_sparsity/) +- [模型剪枝/稀疏化](https://docs.ultralytics.com/yolov5/tutorials/model_pruning_and_sparsity/) - [超参数进化](https://docs.ultralytics.com/yolov5/tutorials/hyperparameter_evolution/) - [冻结层的迁移学习](https://docs.ultralytics.com/yolov5/tutorials/transfer_learning_with_frozen_layers/) -- [架构概述](https://docs.ultralytics.com/yolov5/tutorials/architecture_description/) 🌟 新 -- [Roboflow用于数据集、标注和主动学习](https://docs.ultralytics.com/yolov5/tutorials/roboflow_datasets_integration/) -- [ClearML日志记录](https://docs.ultralytics.com/yolov5/tutorials/clearml_logging_integration/) 🌟 新 -- [使用Neural Magic的Deepsparse的YOLOv5](https://docs.ultralytics.com/yolov5/tutorials/neural_magic_pruning_quantization/) 🌟 新 -- [Comet日志记录](https://docs.ultralytics.com/yolov5/tutorials/comet_logging_integration/) 🌟 新 +- [架构概述](https://docs.ultralytics.com/yolov5/tutorials/architecture_description/) 🌟 **全新** +- [使用 Ultralytics HUB 进行 YOLO 训练和部署](https://www.ultralytics.com/hub) 🚀 **推荐** +- [ClearML 日志记录](https://docs.ultralytics.com/yolov5/tutorials/clearml_logging_integration/) +- [与 Neural Magic 的 Deepsparse 集成的 YOLOv5](https://docs.ultralytics.com/yolov5/tutorials/neural_magic_pruning_quantization/) +- [Comet 日志记录](https://docs.ultralytics.com/yolov5/tutorials/comet_logging_integration/) 🌟 **全新**
From d4e4c351815b49c2baadb561de8d42a84d43e365 Mon Sep 17 00:00:00 2001 From: Paula Derrenger <107626595+pderrenger@users.noreply.github.com> Date: Thu, 7 Nov 2024 16:19:24 +0100 Subject: [PATCH 1957/1976] Fix PyPI downloads links (#13403) Refactor code for speed and clarity Co-authored-by: UltralyticsAssistant --- README.md | 2 +- README.zh-CN.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 79c9e6debff0..9658bef91587 100644 --- a/README.md +++ b/README.md @@ -49,7 +49,7 @@ We are excited to unveil the launch of Ultralytics YOLO11 🚀, the latest advan Get started today and unlock the full potential of YOLO11! Visit the [Ultralytics Docs](https://docs.ultralytics.com/) for comprehensive guides and resources: -[![PyPI version](https://badge.fury.io/py/ultralytics.svg)](https://badge.fury.io/py/ultralytics) [![Downloads](https://static.pepy.tech/badge/ultralytics)](https://pepy.tech/project/ultralytics) +[![PyPI version](https://badge.fury.io/py/ultralytics.svg)](https://badge.fury.io/py/ultralytics) [![Downloads](https://static.pepy.tech/badge/ultralytics)](https://www.pepy.tech/projects/ultralytics) ```bash pip install ultralytics diff --git a/README.zh-CN.md b/README.zh-CN.md index 6615daeff955..d1eeeef081da 100644 --- a/README.zh-CN.md +++ b/README.zh-CN.md @@ -47,7 +47,7 @@ YOLOv5 🚀 是世界上最受欢迎的视觉 AI,代表YOLOv5 CI If this badge is green, all [YOLOv5 GitHub Actions](https://github.com/ultralytics/yolov5/actions) Continuous Integration (CI) tests are currently passing. CI tests verify correct operation of YOLOv5 [training](https://github.com/ultralytics/yolov5/blob/master/train.py), [validation](https://github.com/ultralytics/yolov5/blob/master/val.py), [inference](https://github.com/ultralytics/yolov5/blob/master/detect.py), [export](https://github.com/ultralytics/yolov5/blob/master/export.py) and [benchmarks](https://github.com/ultralytics/yolov5/blob/master/benchmarks.py) on macOS, Windows, and Ubuntu every 24 hours and on every commit. - - ## Introducing YOLOv8 🚀 - - We're excited to announce the launch of our latest state-of-the-art (SOTA) object detection model for 2023 - [YOLOv8](https://github.com/ultralytics/ultralytics) 🚀! - - Designed to be fast, accurate, and easy to use, YOLOv8 is an ideal choice for a wide range of object detection, image segmentation and image classification tasks. With YOLOv8, you'll be able to quickly and accurately detect objects in real-time, streamline your workflows, and achieve new levels of accuracy in your projects. - - Check out our [YOLOv8 Docs](https://docs.ultralytics.com/) for details and get started with: - ```bash - pip install ultralytics - ``` From 1435a8eed6b16d125e7808c81969a0c879d6b8a0 Mon Sep 17 00:00:00 2001 From: imyhxy Date: Sat, 9 Nov 2024 06:24:30 +0800 Subject: [PATCH 1961/1976] Add timing cache to accelerate consequent `.engine` export (#13386) * fix: typos * feat: enable timing cache for engine export * Auto-format by https://ultralytics.com/actions --------- Co-authored-by: UltralyticsAssistant --- export.py | 22 +++++++++++++++++++--- train.py | 6 +++--- 2 files changed, 22 insertions(+), 6 deletions(-) diff --git a/export.py b/export.py index 9c6873939f7b..f5dc0684e7a1 100644 --- a/export.py +++ b/export.py @@ -593,7 +593,9 @@ def export_coreml(model, im, file, int8, half, nms, mlmodel, prefix=colorstr("Co @try_export -def export_engine(model, im, file, half, dynamic, simplify, workspace=4, verbose=False, prefix=colorstr("TensorRT:")): +def export_engine( + model, im, file, half, dynamic, simplify, workspace=4, verbose=False, cache="", prefix=colorstr("TensorRT:") +): """ Export a YOLOv5 model to TensorRT engine format, requiring GPU and TensorRT>=7.0.0. @@ -606,6 +608,7 @@ def export_engine(model, im, file, half, dynamic, simplify, workspace=4, verbose simplify (bool): Set to True to simplify the model during export. workspace (int): Workspace size in GB (default is 4). verbose (bool): Set to True for verbose logging output. + cache (str): Path to save the TensorRT timing cache. prefix (str): Log message prefix. Returns: @@ -660,6 +663,11 @@ def export_engine(model, im, file, half, dynamic, simplify, workspace=4, verbose config.set_memory_pool_limit(trt.MemoryPoolType.WORKSPACE, workspace << 30) else: # TensorRT versions 7, 8 config.max_workspace_size = workspace * 1 << 30 + if cache: # enable timing cache + Path(cache).parent.mkdir(parents=True, exist_ok=True) + buf = Path(cache).read_bytes() if Path(cache).exists() else b"" + timing_cache = config.create_timing_cache(buf) + config.set_timing_cache(timing_cache, ignore_mismatch=True) flag = 1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH) network = builder.create_network(flag) parser = trt.OnnxParser(network, logger) @@ -688,6 +696,9 @@ def export_engine(model, im, file, half, dynamic, simplify, workspace=4, verbose build = builder.build_serialized_network if is_trt10 else builder.build_engine with build(network, config) as engine, open(f, "wb") as t: t.write(engine if is_trt10 else engine.serialize()) + if cache: # save timing cache + with open(cache, "wb") as c: + c.write(config.get_timing_cache().serialize()) return f, None @@ -1277,6 +1288,7 @@ def run( int8=False, # CoreML/TF INT8 quantization per_tensor=False, # TF per tensor quantization dynamic=False, # ONNX/TF/TensorRT: dynamic axes + cache="", # TensorRT: timing cache path simplify=False, # ONNX: simplify model mlmodel=False, # CoreML: Export in *.mlmodel format opset=12, # ONNX: opset version @@ -1306,6 +1318,7 @@ def run( int8 (bool): Apply INT8 quantization for CoreML or TensorFlow models. Default is False. per_tensor (bool): Apply per tensor quantization for TensorFlow models. Default is False. dynamic (bool): Enable dynamic axes for ONNX, TensorFlow, or TensorRT exports. Default is False. + cache (str): TensorRT timing cache path. Default is an empty string. simplify (bool): Simplify the ONNX model during export. Default is False. opset (int): ONNX opset version. Default is 12. verbose (bool): Enable verbose logging for TensorRT export. Default is False. @@ -1341,6 +1354,7 @@ def run( int8=False, per_tensor=False, dynamic=False, + cache="", simplify=False, opset=12, verbose=False, @@ -1378,7 +1392,8 @@ def run( # Input gs = int(max(model.stride)) # grid size (max stride) imgsz = [check_img_size(x, gs) for x in imgsz] # verify img_size are gs-multiples - im = torch.zeros(batch_size, 3, *imgsz).to(device) # image size(1,3,320,192) BCHW iDetection + ch = next(model.parameters()).size(1) # require input image channels + im = torch.zeros(batch_size, ch, *imgsz).to(device) # image size(1,3,320,192) BCHW iDetection # Update model model.eval() @@ -1402,7 +1417,7 @@ def run( if jit: # TorchScript f[0], _ = export_torchscript(model, im, file, optimize) if engine: # TensorRT required before ONNX - f[1], _ = export_engine(model, im, file, half, dynamic, simplify, workspace, verbose) + f[1], _ = export_engine(model, im, file, half, dynamic, simplify, workspace, verbose, cache) if onnx or xml: # OpenVINO requires ONNX f[2], _ = export_onnx(model, im, file, opset, dynamic, simplify) if xml: # OpenVINO @@ -1497,6 +1512,7 @@ def parse_opt(known=False): parser.add_argument("--int8", action="store_true", help="CoreML/TF/OpenVINO INT8 quantization") parser.add_argument("--per-tensor", action="store_true", help="TF per-tensor quantization") parser.add_argument("--dynamic", action="store_true", help="ONNX/TF/TensorRT: dynamic axes") + parser.add_argument("--cache", type=str, default="", help="TensorRT: timing cache file path") parser.add_argument("--simplify", action="store_true", help="ONNX: simplify model") parser.add_argument("--mlmodel", action="store_true", help="CoreML: Export in *.mlmodel format") parser.add_argument("--opset", type=int, default=17, help="ONNX: opset version") diff --git a/train.py b/train.py index b4395d7e8d15..da3d28c9705f 100644 --- a/train.py +++ b/train.py @@ -717,10 +717,10 @@ def main(opt, callbacks=Callbacks()): "perspective": (True, 0.0, 0.001), # image perspective (+/- fraction), range 0-0.001 "flipud": (True, 0.0, 1.0), # image flip up-down (probability) "fliplr": (True, 0.0, 1.0), # image flip left-right (probability) - "mosaic": (True, 0.0, 1.0), # image mixup (probability) + "mosaic": (True, 0.0, 1.0), # image mosaic (probability) "mixup": (True, 0.0, 1.0), # image mixup (probability) - "copy_paste": (True, 0.0, 1.0), - } # segment copy-paste (probability) + "copy_paste": (True, 0.0, 1.0), # segment copy-paste (probability) + } # GA configs pop_size = 50 From 7a5722738c6fa0d3d129172a8f0ff11b74c96302 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 19 Nov 2024 10:51:47 +0100 Subject: [PATCH 1962/1976] Bump slackapi/slack-github-action from 1.27.0 to 2.0.0 in /.github/workflows (#13417) Bump slackapi/slack-github-action in /.github/workflows Bumps [slackapi/slack-github-action](https://github.com/slackapi/slack-github-action) from 1.27.0 to 2.0.0. - [Release notes](https://github.com/slackapi/slack-github-action/releases) - [Commits](https://github.com/slackapi/slack-github-action/compare/v1.27.0...v2.0.0) --- updated-dependencies: - dependency-name: slackapi/slack-github-action dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/ci-testing.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci-testing.yml b/.github/workflows/ci-testing.yml index 278cd9dd652c..17773e57a3cb 100644 --- a/.github/workflows/ci-testing.yml +++ b/.github/workflows/ci-testing.yml @@ -142,7 +142,7 @@ jobs: steps: - name: Check for failure and notify if: (needs.Benchmarks.result == 'failure' || needs.Tests.result == 'failure' || needs.Benchmarks.result == 'cancelled' || needs.Tests.result == 'cancelled') && github.repository == 'ultralytics/yolov5' && (github.event_name == 'schedule' || github.event_name == 'push') - uses: slackapi/slack-github-action@v1.27.0 + uses: slackapi/slack-github-action@v2.0.0 with: payload: | {"text": " GitHub Actions error for ${{ github.workflow }} ❌\n\n\n*Repository:* https://github.com/${{ github.repository }}\n*Action:* https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}\n*Author:* ${{ github.actor }}\n*Event:* ${{ github.event_name }}\n"} From b968b2d70ca1e752fb19bde0c5fde5049e96afb8 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Tue, 19 Nov 2024 10:54:23 +0100 Subject: [PATCH 1963/1976] Update ci-testing.yml (#13421) Signed-off-by: Glenn Jocher --- .github/workflows/ci-testing.yml | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/.github/workflows/ci-testing.yml b/.github/workflows/ci-testing.yml index 17773e57a3cb..89cc8e9a4aeb 100644 --- a/.github/workflows/ci-testing.yml +++ b/.github/workflows/ci-testing.yml @@ -137,14 +137,14 @@ jobs: Summary: runs-on: ubuntu-latest - needs: [Benchmarks, Tests] # Add job names that you want to check for failure - if: always() # This ensures the job runs even if previous jobs fail + needs: [Benchmarks, Tests] + if: always() steps: - name: Check for failure and notify if: (needs.Benchmarks.result == 'failure' || needs.Tests.result == 'failure' || needs.Benchmarks.result == 'cancelled' || needs.Tests.result == 'cancelled') && github.repository == 'ultralytics/yolov5' && (github.event_name == 'schedule' || github.event_name == 'push') uses: slackapi/slack-github-action@v2.0.0 with: + webhook-type: incoming-webhook + webhook: ${{ secrets.SLACK_WEBHOOK_URL_YOLO }} payload: | - {"text": " GitHub Actions error for ${{ github.workflow }} ❌\n\n\n*Repository:* https://github.com/${{ github.repository }}\n*Action:* https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}\n*Author:* ${{ github.actor }}\n*Event:* ${{ github.event_name }}\n"} - env: - SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL_YOLO }} + text: " GitHub Actions error for ${{ github.workflow }} ❌\n\n\n*Repository:* https://github.com/${{ github.repository }}\n*Action:* https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}\n*Author:* ${{ github.actor }}\n*Event:* ${{ github.event_name }}\n" From 81ac034a862ff56e64dc525d041d78d168904799 Mon Sep 17 00:00:00 2001 From: zzzer <48149018+zhixuwei@users.noreply.github.com> Date: Thu, 21 Nov 2024 23:33:14 +0800 Subject: [PATCH 1964/1976] Fix possible image drawing issues (#13423) * Update plots.py Fixed non-meaningful labels in the top right (row 1, column 4) plots when batch size is larger than max_subplots. Signed-off-by: zzzer <48149018+zhixuwei@users.noreply.github.com> * Update plots.py Signed-off-by: Glenn Jocher * Auto-format by https://ultralytics.com/actions --------- Signed-off-by: zzzer <48149018+zhixuwei@users.noreply.github.com> Signed-off-by: Glenn Jocher Co-authored-by: Glenn Jocher Co-authored-by: UltralyticsAssistant --- utils/plots.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/plots.py b/utils/plots.py index e899ea4c15e7..3493fa95d133 100644 --- a/utils/plots.py +++ b/utils/plots.py @@ -183,7 +183,7 @@ def plot_images(images, targets, paths=None, fname="images.jpg", names=None): # Annotate fs = int((h + w) * ns * 0.01) # font size annotator = Annotator(mosaic, line_width=round(fs / 10), font_size=fs, pil=True, example=names) - for i in range(i + 1): + for i in range(min(bs, i + 1)): x, y = int(w * (i // ns)), int(h * (i % ns)) # block origin annotator.rectangle([x, y, x + w, y + h], None, (255, 255, 255), width=2) # borders if paths: From 4c904ac7f91c3fa2a4aaf3c58cd2bca39cdf7195 Mon Sep 17 00:00:00 2001 From: zzzer <48149018+zhixuwei@users.noreply.github.com> Date: Sat, 23 Nov 2024 23:01:47 +0800 Subject: [PATCH 1965/1976] Fix possible image drawing issues (#13426) * Update plots.py Fixed non-meaningful labels in the top right (row 1, column 4) plots when batch size is larger than max_subplots. Signed-off-by: zzzer <48149018+zhixuwei@users.noreply.github.com> * Update plots.py Signed-off-by: Glenn Jocher * Auto-format by https://ultralytics.com/actions * Simple loop conditions Simple loop conditions Signed-off-by: zzzer <48149018+zhixuwei@users.noreply.github.com> --------- Signed-off-by: zzzer <48149018+zhixuwei@users.noreply.github.com> Signed-off-by: Glenn Jocher Co-authored-by: Glenn Jocher Co-authored-by: UltralyticsAssistant --- utils/plots.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/plots.py b/utils/plots.py index 3493fa95d133..44d1035104a9 100644 --- a/utils/plots.py +++ b/utils/plots.py @@ -183,7 +183,7 @@ def plot_images(images, targets, paths=None, fname="images.jpg", names=None): # Annotate fs = int((h + w) * ns * 0.01) # font size annotator = Annotator(mosaic, line_width=round(fs / 10), font_size=fs, pil=True, example=names) - for i in range(min(bs, i + 1)): + for i in range(bs): x, y = int(w * (i // ns)), int(h * (i % ns)) # block origin annotator.rectangle([x, y, x + w, y + h], None, (255, 255, 255), width=2) # borders if paths: From 882c35fc43a4d7175ff24e8e20b0ec0636d75f49 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 25 Nov 2024 11:56:38 +0100 Subject: [PATCH 1966/1976] Delete .github/workflows/codeql-analysis.yml (#13431) Signed-off-by: Glenn Jocher --- .github/workflows/codeql-analysis.yml | 42 --------------------------- 1 file changed, 42 deletions(-) delete mode 100644 .github/workflows/codeql-analysis.yml diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml deleted file mode 100644 index e6e3e85d3ced..000000000000 --- a/.github/workflows/codeql-analysis.yml +++ /dev/null @@ -1,42 +0,0 @@ -# Ultralytics YOLO 🚀, AGPL-3.0 license - -name: "CodeQL" - -on: - schedule: - - cron: "0 0 1 * *" - workflow_dispatch: - -jobs: - analyze: - name: Analyze - runs-on: ${{ 'ubuntu-latest' }} - permissions: - actions: read - contents: read - security-events: write - - strategy: - fail-fast: false - matrix: - language: ["python"] - # CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python', 'ruby' ] - - steps: - - name: Checkout repository - uses: actions/checkout@v4 - - # Initializes the CodeQL tools for scanning. - - name: Initialize CodeQL - uses: github/codeql-action/init@v3 - with: - languages: ${{ matrix.language }} - # If you wish to specify custom queries, you can do so here or in a config file. - # By default, queries listed here will override any specified in a config file. - # Prefix the list here with "+" to use these queries and those in the config file. - # queries: security-extended,security-and-quality - - - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@v3 - with: - category: "/language:${{matrix.language}}" From e62a31b601641c5b967226b6b4c44aaee23f99ae Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 7 Dec 2024 22:59:35 +0100 Subject: [PATCH 1967/1976] Update format.yml (#13451) Signed-off-by: Glenn Jocher --- .github/workflows/format.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/format.yml b/.github/workflows/format.yml index d1935e40afc0..1672a51691d3 100644 --- a/.github/workflows/format.yml +++ b/.github/workflows/format.yml @@ -7,7 +7,7 @@ name: Ultralytics Actions on: issues: types: [opened] - pull_request_target: + pull_request: branches: [main, master] types: [opened, closed, synchronize, review_requested] From 0797106a98d18a145d46e57c44147fc2d5afd7a0 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 25 Dec 2024 20:44:32 +0100 Subject: [PATCH 1968/1976] Update links.yml (#13470) * Update links.yml Signed-off-by: Glenn Jocher * Update links.yml Signed-off-by: Glenn Jocher --------- Signed-off-by: Glenn Jocher --- .github/workflows/links.yml | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/.github/workflows/links.yml b/.github/workflows/links.yml index 49fde8395643..64f266929308 100644 --- a/.github/workflows/links.yml +++ b/.github/workflows/links.yml @@ -23,9 +23,7 @@ jobs: - name: Download and install lychee run: | LYCHEE_URL=$(curl -s https://api.github.com/repos/lycheeverse/lychee/releases/latest | grep "browser_download_url" | grep "x86_64-unknown-linux-gnu.tar.gz" | cut -d '"' -f 4) - curl -L $LYCHEE_URL -o lychee.tar.gz - tar xzf lychee.tar.gz - sudo mv lychee /usr/local/bin + curl -L $LYCHEE_URL | tar xz -C /usr/local/bin - name: Test Markdown and HTML links with retry uses: ultralytics/actions/retry@main From 915ce21ebc558b74ffc5e4c288f7f6cd4482c2b1 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Wed, 1 Jan 2025 15:47:35 +0100 Subject: [PATCH 1969/1976] Update format.yml (#13479) Signed-off-by: Glenn Jocher --- .github/workflows/format.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/format.yml b/.github/workflows/format.yml index 1672a51691d3..b88eebcba45e 100644 --- a/.github/workflows/format.yml +++ b/.github/workflows/format.yml @@ -18,7 +18,7 @@ jobs: - name: Run Ultralytics Formatting uses: ultralytics/actions@main with: - token: ${{ secrets._GITHUB_TOKEN }} # note GITHUB_TOKEN automatically generated + token: ${{ secrets._GITHUB_TOKEN || secrets.GITHUB_TOKEN}} labels: true # autolabel issues and PRs python: true # format Python code and docstrings prettier: true # format YAML, JSON, Markdown and CSS From f003c3df9ec2b7dd8825edf21c775c95f1f45a2b Mon Sep 17 00:00:00 2001 From: Ali Ghanbari Date: Thu, 2 Jan 2025 00:16:59 +0330 Subject: [PATCH 1970/1976] Update detect.py (Fix save-csv: Ensure header is written to CSV) (#13472) This commit resolves an issue where the save-csv command did not write the CSV header. The code now correctly saves the header in the CSV file. Signed-off-by: Ali Ghanbari Co-authored-by: Glenn Jocher --- detect.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/detect.py b/detect.py index f404a250345d..ac18f3c3641d 100644 --- a/detect.py +++ b/detect.py @@ -219,9 +219,10 @@ def run( def write_to_csv(image_name, prediction, confidence): """Writes prediction data for an image to a CSV file, appending if the file exists.""" data = {"Image Name": image_name, "Prediction": prediction, "Confidence": confidence} + file_exists = os.path.isfile(csv_path) with open(csv_path, mode="a", newline="") as f: writer = csv.DictWriter(f, fieldnames=data.keys()) - if not csv_path.is_file(): + if not file_exists: writer.writeheader() writer.writerow(data) From 6981c2742af9030d7f30c0dc3f5c0bcf32588339 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 6 Jan 2025 21:25:12 +0100 Subject: [PATCH 1971/1976] Ultralytics Refactor https://ultralytics.com/actions (#13484) Refactor code for speed and clarity Co-authored-by: UltralyticsAssistant --- .github/workflows/format.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/format.yml b/.github/workflows/format.yml index b88eebcba45e..b9b9107032f3 100644 --- a/.github/workflows/format.yml +++ b/.github/workflows/format.yml @@ -18,7 +18,7 @@ jobs: - name: Run Ultralytics Formatting uses: ultralytics/actions@main with: - token: ${{ secrets._GITHUB_TOKEN || secrets.GITHUB_TOKEN}} + token: ${{ secrets._GITHUB_TOKEN || secrets.GITHUB_TOKEN }} labels: true # autolabel issues and PRs python: true # format Python code and docstrings prettier: true # format YAML, JSON, Markdown and CSS From 86fd1ab270cb2f7e53ee7412cd4a0650bf4bcc51 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Fri, 10 Jan 2025 01:42:04 +0100 Subject: [PATCH 1972/1976] Apply `ruff==0.9.0` formatting (#13486) * Refactor code for speed and clarity * Auto-format by https://ultralytics.com/actions --------- Co-authored-by: UltralyticsAssistant --- README.md | 4 ++-- README.zh-CN.md | 4 ++-- classify/predict.py | 2 +- classify/train.py | 16 ++++++++-------- detect.py | 2 +- export.py | 4 ++-- segment/predict.py | 2 +- segment/train.py | 12 ++++++------ train.py | 12 ++++++------ utils/augmentations.py | 10 +--------- utils/dataloaders.py | 13 +++++-------- utils/general.py | 9 ++++----- utils/loggers/clearml/clearml_utils.py | 8 +++----- utils/loggers/comet/__init__.py | 4 +--- utils/loggers/wandb/wandb_utils.py | 2 +- utils/loss.py | 7 +------ utils/segment/augmentations.py | 10 +--------- utils/segment/dataloaders.py | 5 ++--- utils/segment/loss.py | 3 +-- utils/torch_utils.py | 8 ++++---- 20 files changed, 53 insertions(+), 84 deletions(-) diff --git a/README.md b/README.md index 9658bef91587..46b2a833e613 100644 --- a/README.md +++ b/README.md @@ -208,7 +208,7 @@ YOLOv5 has been designed to be super easy to get started and simple to learn. We Figure Notes - **COCO AP val** denotes mAP@0.5:0.95 metric measured on the 5000-image [COCO val2017](http://cocodataset.org) dataset over various inference sizes from 256 to 1536. -- **GPU Speed** measures average inference time per image on [COCO val2017](http://cocodataset.org) dataset using a [AWS p3.2xlarge](https://aws.amazon.com/ec2/instance-types/p3/) V100 instance at batch-size 32. +- **GPU Speed** measures average inference time per image on [COCO val2017](http://cocodataset.org) dataset using a [AWS p3.2xlarge](https://aws.amazon.com/ec2/instance-types/p4/) V100 instance at batch-size 32. - **EfficientDet** data from [google/automl](https://github.com/google/automl) at batch size 8. - **Reproduce** by `python val.py --task study --data coco.yaml --iou 0.7 --weights yolov5n6.pt yolov5s6.pt yolov5m6.pt yolov5l6.pt yolov5x6.pt` @@ -235,7 +235,7 @@ YOLOv5 has been designed to be super easy to get started and simple to learn. We - All checkpoints are trained to 300 epochs with default settings. Nano and Small models use [hyp.scratch-low.yaml](https://github.com/ultralytics/yolov5/blob/master/data/hyps/hyp.scratch-low.yaml) hyps, all others use [hyp.scratch-high.yaml](https://github.com/ultralytics/yolov5/blob/master/data/hyps/hyp.scratch-high.yaml). - **mAPval** values are for single-model single-scale on [COCO val2017](http://cocodataset.org) dataset.
Reproduce by `python val.py --data coco.yaml --img 640 --conf 0.001 --iou 0.65` -- **Speed** averaged over COCO val images using a [AWS p3.2xlarge](https://aws.amazon.com/ec2/instance-types/p3/) instance. NMS times (~1 ms/img) not included.
Reproduce by `python val.py --data coco.yaml --img 640 --task speed --batch 1` +- **Speed** averaged over COCO val images using a [AWS p3.2xlarge](https://aws.amazon.com/ec2/instance-types/p4/) instance. NMS times (~1 ms/img) not included.
Reproduce by `python val.py --data coco.yaml --img 640 --task speed --batch 1` - **TTA** [Test Time Augmentation](https://docs.ultralytics.com/yolov5/tutorials/test_time_augmentation/) includes reflection and scale augmentations.
Reproduce by `python val.py --data coco.yaml --img 1536 --iou 0.7 --augment`
diff --git a/README.zh-CN.md b/README.zh-CN.md index d1eeeef081da..b76c66d76fdc 100644 --- a/README.zh-CN.md +++ b/README.zh-CN.md @@ -207,7 +207,7 @@ YOLOv5 超级容易上手,简单易学。我们优先考虑现实世界的结 图表笔记 - **COCO AP val** 表示 mAP@0.5:0.95 指标,在 [COCO val2017](http://cocodataset.org) 数据集的 5000 张图像上测得, 图像包含 256 到 1536 各种推理大小。 -- **显卡推理速度** 为在 [COCO val2017](http://cocodataset.org) 数据集上的平均推理时间,使用 [AWS p3.2xlarge](https://aws.amazon.com/ec2/instance-types/p3/) V100实例,batchsize 为 32 。 +- **显卡推理速度** 为在 [COCO val2017](http://cocodataset.org) 数据集上的平均推理时间,使用 [AWS p3.2xlarge](https://aws.amazon.com/ec2/instance-types/p4/) V100实例,batchsize 为 32 。 - **EfficientDet** 数据来自 [google/automl](https://github.com/google/automl) , batchsize 为32。 - **复现命令** 为 `python val.py --task study --data coco.yaml --iou 0.7 --weights yolov5n6.pt yolov5s6.pt yolov5m6.pt yolov5l6.pt yolov5x6.pt` @@ -234,7 +234,7 @@ YOLOv5 超级容易上手,简单易学。我们优先考虑现实世界的结 - 所有模型都使用默认配置,训练 300 epochs。n和s模型使用 [hyp.scratch-low.yaml](https://github.com/ultralytics/yolov5/blob/master/data/hyps/hyp.scratch-low.yaml) ,其他模型都使用 [hyp.scratch-high.yaml](https://github.com/ultralytics/yolov5/blob/master/data/hyps/hyp.scratch-high.yaml) 。 - \*\*mAPval\*\*在单模型单尺度上计算,数据集使用 [COCO val2017](http://cocodataset.org) 。
复现命令 `python val.py --data coco.yaml --img 640 --conf 0.001 --iou 0.65` -- **推理速度**在 COCO val 图像总体时间上进行平均得到,测试环境使用[AWS p3.2xlarge](https://aws.amazon.com/ec2/instance-types/p3/)实例。 NMS 时间 (大约 1 ms/img) 不包括在内。
复现命令 `python val.py --data coco.yaml --img 640 --task speed --batch 1` +- **推理速度**在 COCO val 图像总体时间上进行平均得到,测试环境使用[AWS p3.2xlarge](https://aws.amazon.com/ec2/instance-types/p4/)实例。 NMS 时间 (大约 1 ms/img) 不包括在内。
复现命令 `python val.py --data coco.yaml --img 640 --task speed --batch 1` - **TTA** [测试时数据增强](https://docs.ultralytics.com/yolov5/tutorials/test_time_augmentation/) 包括反射和尺度变换。
复现命令 `python val.py --data coco.yaml --img 1536 --iou 0.7 --augment`
diff --git a/classify/predict.py b/classify/predict.py index d77b4af33007..ddc67ca8019f 100644 --- a/classify/predict.py +++ b/classify/predict.py @@ -192,7 +192,7 @@ def run( vid_writer[i].write(im0) # Print time (inference-only) - LOGGER.info(f"{s}{dt[1].dt * 1E3:.1f}ms") + LOGGER.info(f"{s}{dt[1].dt * 1e3:.1f}ms") # Print results t = tuple(x.t / seen * 1e3 for x in dt) # speeds per image diff --git a/classify/train.py b/classify/train.py index 9c12a66c326f..e56db8ce4b08 100644 --- a/classify/train.py +++ b/classify/train.py @@ -201,10 +201,10 @@ def lf(x): scaler = amp.GradScaler(enabled=cuda) val = test_dir.stem # 'val' or 'test' LOGGER.info( - f'Image sizes {imgsz} train, {imgsz} test\n' - f'Using {nw * WORLD_SIZE} dataloader workers\n' + f"Image sizes {imgsz} train, {imgsz} test\n" + f"Using {nw * WORLD_SIZE} dataloader workers\n" f"Logging results to {colorstr('bold', save_dir)}\n" - f'Starting {opt.model} training on {data} dataset with {nc} classes for {epochs} epochs...\n\n' + f"Starting {opt.model} training on {data} dataset with {nc} classes for {epochs} epochs...\n\n" f"{'Epoch':>10}{'GPU_mem':>10}{'train_loss':>12}{f'{val}_loss':>12}{'top1_acc':>12}{'top5_acc':>12}" ) for epoch in range(epochs): # loop over the dataset multiple times @@ -290,13 +290,13 @@ def lf(x): # Train complete if RANK in {-1, 0} and final_epoch: LOGGER.info( - f'\nTraining complete ({(time.time() - t0) / 3600:.3f} hours)' + f"\nTraining complete ({(time.time() - t0) / 3600:.3f} hours)" f"\nResults saved to {colorstr('bold', save_dir)}" - f'\nPredict: python classify/predict.py --weights {best} --source im.jpg' - f'\nValidate: python classify/val.py --weights {best} --data {data_dir}' - f'\nExport: python export.py --weights {best} --include onnx' + f"\nPredict: python classify/predict.py --weights {best} --source im.jpg" + f"\nValidate: python classify/val.py --weights {best} --data {data_dir}" + f"\nExport: python export.py --weights {best} --include onnx" f"\nPyTorch Hub: model = torch.hub.load('ultralytics/yolov5', 'custom', '{best}')" - f'\nVisualize: https://netron.app\n' + f"\nVisualize: https://netron.app\n" ) # Plot examples diff --git a/detect.py b/detect.py index ac18f3c3641d..6bc666dccf9b 100644 --- a/detect.py +++ b/detect.py @@ -309,7 +309,7 @@ def write_to_csv(image_name, prediction, confidence): vid_writer[i].write(im0) # Print time (inference-only) - LOGGER.info(f"{s}{'' if len(det) else '(no detections), '}{dt[1].dt * 1E3:.1f}ms") + LOGGER.info(f"{s}{'' if len(det) else '(no detections), '}{dt[1].dt * 1e3:.1f}ms") # Print results t = tuple(x.t / seen * 1e3 for x in dt) # speeds per image diff --git a/export.py b/export.py index f5dc0684e7a1..f617c755100f 100644 --- a/export.py +++ b/export.py @@ -1471,12 +1471,12 @@ def run( else "" ) LOGGER.info( - f'\nExport complete ({time.time() - t:.1f}s)' + f"\nExport complete ({time.time() - t:.1f}s)" f"\nResults saved to {colorstr('bold', file.parent.resolve())}" f"\nDetect: python {dir / ('detect.py' if det else 'predict.py')} --weights {f[-1]} {h}" f"\nValidate: python {dir / 'val.py'} --weights {f[-1]} {h}" f"\nPyTorch Hub: model = torch.hub.load('ultralytics/yolov5', 'custom', '{f[-1]}') {s}" - f'\nVisualize: https://netron.app' + f"\nVisualize: https://netron.app" ) return f # return list of exported files/dirs diff --git a/segment/predict.py b/segment/predict.py index 40f7623b38ed..1dd892a1962a 100644 --- a/segment/predict.py +++ b/segment/predict.py @@ -245,7 +245,7 @@ def run( vid_writer[i].write(im0) # Print time (inference-only) - LOGGER.info(f"{s}{'' if len(det) else '(no detections), '}{dt[1].dt * 1E3:.1f}ms") + LOGGER.info(f"{s}{'' if len(det) else '(no detections), '}{dt[1].dt * 1e3:.1f}ms") # Print results t = tuple(x.t / seen * 1e3 for x in dt) # speeds per image diff --git a/segment/train.py b/segment/train.py index 379fed0b2f14..f68249d92301 100644 --- a/segment/train.py +++ b/segment/train.py @@ -325,10 +325,10 @@ def lf(x): compute_loss = ComputeLoss(model, overlap=overlap) # init loss class # callbacks.run('on_train_start') LOGGER.info( - f'Image sizes {imgsz} train, {imgsz} val\n' - f'Using {train_loader.num_workers * WORLD_SIZE} dataloader workers\n' + f"Image sizes {imgsz} train, {imgsz} val\n" + f"Using {train_loader.num_workers * WORLD_SIZE} dataloader workers\n" f"Logging results to {colorstr('bold', save_dir)}\n" - f'Starting training for {epochs} epochs...' + f"Starting training for {epochs} epochs..." ) for epoch in range(start_epoch, epochs): # epoch ------------------------------------------------------------------ # callbacks.run('on_train_epoch_start') @@ -405,7 +405,7 @@ def lf(x): # Log if RANK in {-1, 0}: mloss = (mloss * i + loss_items) / (i + 1) # update mean losses - mem = f"{torch.cuda.memory_reserved() / 1E9 if torch.cuda.is_available() else 0:.3g}G" # (GB) + mem = f"{torch.cuda.memory_reserved() / 1e9 if torch.cuda.is_available() else 0:.3g}G" # (GB) pbar.set_description( ("%11s" * 2 + "%11.4g" * 6) % (f"{epoch}/{epochs - 1}", mem, *mloss, targets.shape[0], imgs.shape[-1]) @@ -740,9 +740,9 @@ def main(opt, callbacks=Callbacks()): # Plot results plot_evolve(evolve_csv) LOGGER.info( - f'Hyperparameter evolution finished {opt.evolve} generations\n' + f"Hyperparameter evolution finished {opt.evolve} generations\n" f"Results saved to {colorstr('bold', save_dir)}\n" - f'Usage example: $ python train.py --hyp {evolve_yaml}' + f"Usage example: $ python train.py --hyp {evolve_yaml}" ) diff --git a/train.py b/train.py index da3d28c9705f..fec0a239c0af 100644 --- a/train.py +++ b/train.py @@ -357,10 +357,10 @@ def lf(x): compute_loss = ComputeLoss(model) # init loss class callbacks.run("on_train_start") LOGGER.info( - f'Image sizes {imgsz} train, {imgsz} val\n' - f'Using {train_loader.num_workers * WORLD_SIZE} dataloader workers\n' + f"Image sizes {imgsz} train, {imgsz} val\n" + f"Using {train_loader.num_workers * WORLD_SIZE} dataloader workers\n" f"Logging results to {colorstr('bold', save_dir)}\n" - f'Starting training for {epochs} epochs...' + f"Starting training for {epochs} epochs..." ) for epoch in range(start_epoch, epochs): # epoch ------------------------------------------------------------------ callbacks.run("on_train_epoch_start") @@ -434,7 +434,7 @@ def lf(x): # Log if RANK in {-1, 0}: mloss = (mloss * i + loss_items) / (i + 1) # update mean losses - mem = f"{torch.cuda.memory_reserved() / 1E9 if torch.cuda.is_available() else 0:.3g}G" # (GB) + mem = f"{torch.cuda.memory_reserved() / 1e9 if torch.cuda.is_available() else 0:.3g}G" # (GB) pbar.set_description( ("%11s" * 2 + "%11.4g" * 5) % (f"{epoch}/{epochs - 1}", mem, *mloss, targets.shape[0], imgs.shape[-1]) @@ -880,9 +880,9 @@ def main(opt, callbacks=Callbacks()): # Plot results plot_evolve(evolve_csv) LOGGER.info( - f'Hyperparameter evolution finished {opt.evolve} generations\n' + f"Hyperparameter evolution finished {opt.evolve} generations\n" f"Results saved to {colorstr('bold', save_dir)}\n" - f'Usage example: $ python train.py --hyp {evolve_yaml}' + f"Usage example: $ python train.py --hyp {evolve_yaml}" ) diff --git a/utils/augmentations.py b/utils/augmentations.py index af4c4057cba1..e051ad66fb8d 100644 --- a/utils/augmentations.py +++ b/utils/augmentations.py @@ -197,15 +197,7 @@ def random_perspective( else: # affine im = cv2.warpAffine(im, M[:2], dsize=(width, height), borderValue=(114, 114, 114)) - # Visualize - # import matplotlib.pyplot as plt - # ax = plt.subplots(1, 2, figsize=(12, 6))[1].ravel() - # ax[0].imshow(im[:, :, ::-1]) # base - # ax[1].imshow(im2[:, :, ::-1]) # warped - - # Transform label coordinates - n = len(targets) - if n: + if n := len(targets): use_segments = any(x.any() for x in segments) and len(segments) == n new = np.zeros((n, 4)) if use_segments: # warp segments diff --git a/utils/dataloaders.py b/utils/dataloaders.py index 00e98168ffbd..fc5da6bff2a3 100644 --- a/utils/dataloaders.py +++ b/utils/dataloaders.py @@ -356,8 +356,7 @@ def __init__(self, path, img_size=640, stride=32, auto=True, transforms=None, vi else: self.cap = None assert self.nf > 0, ( - f"No images or videos found in {p}. " - f"Supported formats are:\nimages: {IMG_FORMATS}\nvideos: {VID_FORMATS}" + f"No images or videos found in {p}. Supported formats are:\nimages: {IMG_FORMATS}\nvideos: {VID_FORMATS}" ) def __iter__(self): @@ -713,8 +712,8 @@ def check_cache_ram(self, safety_margin=0.1, prefix=""): cache = mem_required * (1 + safety_margin) < mem.available # to cache or not to cache, that is the question if not cache: LOGGER.info( - f'{prefix}{mem_required / gb:.1f}GB RAM required, ' - f'{mem.available / gb:.1f}/{mem.total / gb:.1f}GB available, ' + f"{prefix}{mem_required / gb:.1f}GB RAM required, " + f"{mem.available / gb:.1f}/{mem.total / gb:.1f}GB available, " f"{'caching images ✅' if cache else 'not caching images ⚠️'}" ) return cache @@ -774,8 +773,7 @@ def __getitem__(self, index): index = self.indices[index] # linear, shuffled, or image_weights hyp = self.hyp - mosaic = self.mosaic and random.random() < hyp["mosaic"] - if mosaic: + if mosaic := self.mosaic and random.random() < hyp["mosaic"]: # Load mosaic img, labels = self.load_mosaic(index) shapes = None @@ -1162,8 +1160,7 @@ def verify_image_label(args): segments = [np.array(x[1:], dtype=np.float32).reshape(-1, 2) for x in lb] # (cls, xy1...) lb = np.concatenate((classes.reshape(-1, 1), segments2boxes(segments)), 1) # (cls, xywh) lb = np.array(lb, dtype=np.float32) - nl = len(lb) - if nl: + if nl := len(lb): assert lb.shape[1] == 5, f"labels require 5 columns, {lb.shape[1]} columns detected" assert (lb >= 0).all(), f"negative label values {lb[lb < 0]}" assert (lb[:, 1:] <= 1).all(), f"non-normalized or out of bounds coordinates {lb[:, 1:][lb[:, 1:] > 1]}" diff --git a/utils/general.py b/utils/general.py index 8c0b2fcd3ef7..41cd2032c821 100644 --- a/utils/general.py +++ b/utils/general.py @@ -173,8 +173,7 @@ def user_config_dir(dir="Ultralytics", env_var="YOLOV5_CONFIG_DIR"): """Returns user configuration directory path, preferring environment variable `YOLOV5_CONFIG_DIR` if set, else OS- specific. """ - env = os.getenv(env_var) - if env: + if env := os.getenv(env_var): path = Path(env) # use environment variable else: cfg = {"Windows": "AppData/Roaming", "Linux": ".config", "Darwin": "Library/Application Support"} # 3 OS dirs @@ -496,9 +495,9 @@ def check_file(file, suffix=""): assert Path(file).exists() and Path(file).stat().st_size > 0, f"File download failed: {url}" # check return file elif file.startswith("clearml://"): # ClearML Dataset ID - assert ( - "clearml" in sys.modules - ), "ClearML is not installed, so cannot use ClearML dataset. Try running 'pip install clearml'." + assert "clearml" in sys.modules, ( + "ClearML is not installed, so cannot use ClearML dataset. Try running 'pip install clearml'." + ) return file else: # search files = [] diff --git a/utils/loggers/clearml/clearml_utils.py b/utils/loggers/clearml/clearml_utils.py index fc19c8cfe22a..2ddf3a4711a3 100644 --- a/utils/loggers/clearml/clearml_utils.py +++ b/utils/loggers/clearml/clearml_utils.py @@ -41,11 +41,9 @@ def construct_dataset(clearml_info_string): with open(yaml_filenames[0]) as f: dataset_definition = yaml.safe_load(f) - assert set( - dataset_definition.keys() - ).issuperset( - {"train", "test", "val", "nc", "names"} - ), "The right keys were not found in the yaml file, make sure it at least has the following keys: ('train', 'test', 'val', 'nc', 'names')" + assert set(dataset_definition.keys()).issuperset({"train", "test", "val", "nc", "names"}), ( + "The right keys were not found in the yaml file, make sure it at least has the following keys: ('train', 'test', 'val', 'nc', 'names')" + ) data_dict = { "train": ( diff --git a/utils/loggers/comet/__init__.py b/utils/loggers/comet/__init__.py index 846dcb42a225..f4940a1a2c67 100644 --- a/utils/loggers/comet/__init__.py +++ b/utils/loggers/comet/__init__.py @@ -88,14 +88,12 @@ def __init__(self, opt, hyp, run_id=None, job_type="Training", **experiment_kwar self.upload_dataset = self.opt.upload_dataset or COMET_UPLOAD_DATASET self.resume = self.opt.resume - # Default parameters to pass to Experiment objects self.default_experiment_kwargs = { "log_code": False, "log_env_gpu": True, "log_env_cpu": True, "project_name": COMET_PROJECT_NAME, - } - self.default_experiment_kwargs.update(experiment_kwargs) + } | experiment_kwargs self.experiment = self._get_experiment(self.comet_mode, run_id) self.experiment.set_name(self.opt.name) diff --git a/utils/loggers/wandb/wandb_utils.py b/utils/loggers/wandb/wandb_utils.py index 9883d5738afa..102228a67176 100644 --- a/utils/loggers/wandb/wandb_utils.py +++ b/utils/loggers/wandb/wandb_utils.py @@ -18,7 +18,7 @@ RANK = int(os.getenv("RANK", -1)) DEPRECATION_WARNING = ( f"{colorstr('wandb')}: WARNING ⚠️ wandb is deprecated and will be removed in a future release. " - f'See supported integrations at https://github.com/ultralytics/yolov5#integrations.' + f"See supported integrations at https://github.com/ultralytics/yolov5#integrations." ) try: diff --git a/utils/loss.py b/utils/loss.py index fd5912f49182..c2201833970a 100644 --- a/utils/loss.py +++ b/utils/loss.py @@ -148,8 +148,7 @@ def __call__(self, p, targets): # predictions, targets b, a, gj, gi = indices[i] # image, anchor, gridy, gridx tobj = torch.zeros(pi.shape[:4], dtype=pi.dtype, device=self.device) # target obj - n = b.shape[0] # number of targets - if n: + if n := b.shape[0]: # pxy, pwh, _, pcls = pi[b, a, gj, gi].tensor_split((2, 4, 5), dim=1) # faster, requires torch 1.8.0 pxy, pwh, _, pcls = pi[b, a, gj, gi].split((2, 2, 1, self.nc), 1) # target-subset of predictions @@ -175,10 +174,6 @@ def __call__(self, p, targets): # predictions, targets t[range(n), tcls[i]] = self.cp lcls += self.BCEcls(pcls, t) # BCE - # Append targets to text file - # with open('targets.txt', 'a') as file: - # [file.write('%11.5g ' * 4 % tuple(x) + '\n') for x in torch.cat((txy[i], twh[i]), 1)] - obji = self.BCEobj(pi[..., 4], tobj) lobj += obji * self.balance[i] # obj loss if self.autobalance: diff --git a/utils/segment/augmentations.py b/utils/segment/augmentations.py index 2e1dca1198b0..302e41379999 100644 --- a/utils/segment/augmentations.py +++ b/utils/segment/augmentations.py @@ -69,16 +69,8 @@ def random_perspective( else: # affine im = cv2.warpAffine(im, M[:2], dsize=(width, height), borderValue=(114, 114, 114)) - # Visualize - # import matplotlib.pyplot as plt - # ax = plt.subplots(1, 2, figsize=(12, 6))[1].ravel() - # ax[0].imshow(im[:, :, ::-1]) # base - # ax[1].imshow(im2[:, :, ::-1]) # warped - - # Transform label coordinates - n = len(targets) new_segments = [] - if n: + if n := len(targets): new = np.zeros((n, 4)) segments = resample_segments(segments) # upsample for i, segment in enumerate(segments): diff --git a/utils/segment/dataloaders.py b/utils/segment/dataloaders.py index f6507649491f..ee6e6186d140 100644 --- a/utils/segment/dataloaders.py +++ b/utils/segment/dataloaders.py @@ -132,9 +132,7 @@ def __getitem__(self, index): index = self.indices[index] # linear, shuffled, or image_weights hyp = self.hyp - mosaic = self.mosaic and random.random() < hyp["mosaic"] - masks = [] - if mosaic: + if mosaic := self.mosaic and random.random() < hyp["mosaic"]: # Load mosaic img, labels, segments = self.load_mosaic(index) shapes = None @@ -180,6 +178,7 @@ def __getitem__(self, index): ) nl = len(labels) # number of labels + masks = [] if nl: labels[:, 1:5] = xyxy2xywhn(labels[:, 1:5], w=img.shape[1], h=img.shape[0], clip=True, eps=1e-3) if self.overlap: diff --git a/utils/segment/loss.py b/utils/segment/loss.py index b3e76ae00d9c..f63b262b5a66 100644 --- a/utils/segment/loss.py +++ b/utils/segment/loss.py @@ -61,8 +61,7 @@ def __call__(self, preds, targets, masks): # predictions, targets, model b, a, gj, gi = indices[i] # image, anchor, gridy, gridx tobj = torch.zeros(pi.shape[:4], dtype=pi.dtype, device=self.device) # target obj - n = b.shape[0] # number of targets - if n: + if n := b.shape[0]: pxy, pwh, _, pcls, pmask = pi[b, a, gj, gi].split((2, 2, 1, self.nc, nm), 1) # subset of predictions # Box regression diff --git a/utils/torch_utils.py b/utils/torch_utils.py index 8bf6585bca0f..29877faa6ce3 100644 --- a/utils/torch_utils.py +++ b/utils/torch_utils.py @@ -121,9 +121,9 @@ def select_device(device="", batch_size=0, newline=True): os.environ["CUDA_VISIBLE_DEVICES"] = "-1" # force torch.cuda.is_available() = False elif device: # non-cpu device requested os.environ["CUDA_VISIBLE_DEVICES"] = device # set environment variable - must be before assert is_available() - assert torch.cuda.is_available() and torch.cuda.device_count() >= len( - device.replace(",", "") - ), f"Invalid CUDA '--device {device}' requested, use '--device cpu' or pass valid CUDA device(s)" + assert torch.cuda.is_available() and torch.cuda.device_count() >= len(device.replace(",", "")), ( + f"Invalid CUDA '--device {device}' requested, use '--device cpu' or pass valid CUDA device(s)" + ) if not cpu and not mps and torch.cuda.is_available(): # prefer GPU if available devices = device.split(",") if device else "0" # range(torch.cuda.device_count()) # i.e. 0,1,6,7 @@ -380,7 +380,7 @@ def smart_optimizer(model, name="Adam", lr=0.001, momentum=0.9, decay=1e-5): optimizer.add_param_group({"params": g[1], "weight_decay": 0.0}) # add g1 (BatchNorm2d weights) LOGGER.info( f"{colorstr('optimizer:')} {type(optimizer).__name__}(lr={lr}) with parameter groups " - f'{len(g[1])} weight(decay=0.0), {len(g[0])} weight(decay={decay}), {len(g[2])} bias' + f"{len(g[1])} weight(decay=0.0), {len(g[0])} weight(decay={decay}), {len(g[2])} bias" ) return optimizer From 6420a1db87460d36fd2141a65659093df27c1996 Mon Sep 17 00:00:00 2001 From: Mohammed Yasin <32206511+Y-T-G@users.noreply.github.com> Date: Mon, 13 Jan 2025 02:05:20 +0800 Subject: [PATCH 1973/1976] Fix TFLite Segment inference (#13488) * Fix TFLite Segment inference * Auto-format by https://ultralytics.com --------- Co-authored-by: UltralyticsAssistant --- models/common.py | 3 +++ utils/dataloaders.py | 6 +++--- utils/general.py | 6 +++--- utils/loggers/clearml/clearml_utils.py | 8 +++++--- utils/torch_utils.py | 6 +++--- 5 files changed, 17 insertions(+), 12 deletions(-) diff --git a/models/common.py b/models/common.py index 8ad53d5d11b6..1ae3280f97d8 100644 --- a/models/common.py +++ b/models/common.py @@ -750,6 +750,9 @@ def forward(self, im, augment=False, visualize=False): scale, zero_point = output["quantization"] x = (x.astype(np.float32) - zero_point) * scale # re-scale y.append(x) + if len(y) == 2: # segment with (det, proto) output order reversed + if len(y[1].shape) != 4: + y = list(reversed(y)) # should be y = (1, 116, 8400), (1, 160, 160, 32) y = [x if isinstance(x, np.ndarray) else x.numpy() for x in y] y[0][..., :4] *= [w, h, w, h] # xywh normalized to pixels diff --git a/utils/dataloaders.py b/utils/dataloaders.py index fc5da6bff2a3..08240483339f 100644 --- a/utils/dataloaders.py +++ b/utils/dataloaders.py @@ -355,9 +355,9 @@ def __init__(self, path, img_size=640, stride=32, auto=True, transforms=None, vi self._new_video(videos[0]) # new video else: self.cap = None - assert self.nf > 0, ( - f"No images or videos found in {p}. Supported formats are:\nimages: {IMG_FORMATS}\nvideos: {VID_FORMATS}" - ) + assert ( + self.nf > 0 + ), f"No images or videos found in {p}. Supported formats are:\nimages: {IMG_FORMATS}\nvideos: {VID_FORMATS}" def __iter__(self): """Initializes iterator by resetting count and returns the iterator object itself.""" diff --git a/utils/general.py b/utils/general.py index 41cd2032c821..db6ddacefd85 100644 --- a/utils/general.py +++ b/utils/general.py @@ -495,9 +495,9 @@ def check_file(file, suffix=""): assert Path(file).exists() and Path(file).stat().st_size > 0, f"File download failed: {url}" # check return file elif file.startswith("clearml://"): # ClearML Dataset ID - assert "clearml" in sys.modules, ( - "ClearML is not installed, so cannot use ClearML dataset. Try running 'pip install clearml'." - ) + assert ( + "clearml" in sys.modules + ), "ClearML is not installed, so cannot use ClearML dataset. Try running 'pip install clearml'." return file else: # search files = [] diff --git a/utils/loggers/clearml/clearml_utils.py b/utils/loggers/clearml/clearml_utils.py index 2ddf3a4711a3..fc19c8cfe22a 100644 --- a/utils/loggers/clearml/clearml_utils.py +++ b/utils/loggers/clearml/clearml_utils.py @@ -41,9 +41,11 @@ def construct_dataset(clearml_info_string): with open(yaml_filenames[0]) as f: dataset_definition = yaml.safe_load(f) - assert set(dataset_definition.keys()).issuperset({"train", "test", "val", "nc", "names"}), ( - "The right keys were not found in the yaml file, make sure it at least has the following keys: ('train', 'test', 'val', 'nc', 'names')" - ) + assert set( + dataset_definition.keys() + ).issuperset( + {"train", "test", "val", "nc", "names"} + ), "The right keys were not found in the yaml file, make sure it at least has the following keys: ('train', 'test', 'val', 'nc', 'names')" data_dict = { "train": ( diff --git a/utils/torch_utils.py b/utils/torch_utils.py index 29877faa6ce3..53e707607915 100644 --- a/utils/torch_utils.py +++ b/utils/torch_utils.py @@ -121,9 +121,9 @@ def select_device(device="", batch_size=0, newline=True): os.environ["CUDA_VISIBLE_DEVICES"] = "-1" # force torch.cuda.is_available() = False elif device: # non-cpu device requested os.environ["CUDA_VISIBLE_DEVICES"] = device # set environment variable - must be before assert is_available() - assert torch.cuda.is_available() and torch.cuda.device_count() >= len(device.replace(",", "")), ( - f"Invalid CUDA '--device {device}' requested, use '--device cpu' or pass valid CUDA device(s)" - ) + assert torch.cuda.is_available() and torch.cuda.device_count() >= len( + device.replace(",", "") + ), f"Invalid CUDA '--device {device}' requested, use '--device cpu' or pass valid CUDA device(s)" if not cpu and not mps and torch.cuda.is_available(): # prefer GPU if available devices = device.split(",") if device else "0" # range(torch.cuda.device_count()) # i.e. 0,1,6,7 From c568972d597ee2fa14525a739d8acb284fc19c87 Mon Sep 17 00:00:00 2001 From: Paula Derrenger <107626595+pderrenger@users.noreply.github.com> Date: Wed, 15 Jan 2025 15:31:19 +0100 Subject: [PATCH 1974/1976] Standardize license headers in Python files (#13490) * Refactor code for speed and clarity * Auto-format by https://ultralytics.com/actions --------- Co-authored-by: Glenn Jocher Co-authored-by: UltralyticsAssistant --- benchmarks.py | 2 +- classify/predict.py | 2 +- classify/train.py | 2 +- classify/val.py | 2 +- detect.py | 2 +- export.py | 2 +- hubconf.py | 2 +- models/__init__.py | 1 + models/common.py | 7 +++---- models/experimental.py | 2 +- models/tf.py | 2 +- models/yolo.py | 2 +- segment/predict.py | 2 +- segment/train.py | 2 +- segment/val.py | 2 +- train.py | 2 +- utils/__init__.py | 2 +- utils/activations.py | 2 +- utils/augmentations.py | 2 +- utils/autoanchor.py | 2 +- utils/autobatch.py | 2 +- utils/aws/__init__.py | 1 + utils/aws/resume.py | 3 ++- utils/callbacks.py | 2 +- utils/dataloaders.py | 8 ++++---- utils/downloads.py | 2 +- utils/flask_rest_api/example_request.py | 2 +- utils/flask_rest_api/restapi.py | 2 +- utils/general.py | 8 ++++---- utils/loggers/__init__.py | 2 +- utils/loggers/clearml/README.md | 2 +- utils/loggers/clearml/__init__.py | 1 + utils/loggers/clearml/clearml_utils.py | 10 ++++------ utils/loggers/clearml/hpo.py | 2 +- utils/loggers/comet/__init__.py | 2 +- utils/loggers/comet/comet_utils.py | 2 +- utils/loggers/comet/hpo.py | 2 +- utils/loggers/wandb/__init__.py | 1 + utils/loggers/wandb/wandb_utils.py | 2 +- utils/loss.py | 2 +- utils/metrics.py | 2 +- utils/plots.py | 2 +- utils/segment/__init__.py | 1 + utils/segment/augmentations.py | 2 +- utils/segment/dataloaders.py | 2 +- utils/segment/general.py | 2 +- utils/segment/loss.py | 2 +- utils/segment/metrics.py | 2 +- utils/segment/plots.py | 2 +- utils/torch_utils.py | 8 ++++---- utils/triton.py | 2 +- val.py | 2 +- 52 files changed, 67 insertions(+), 64 deletions(-) diff --git a/benchmarks.py b/benchmarks.py index 996b8d438053..45ae55b36f0c 100644 --- a/benchmarks.py +++ b/benchmarks.py @@ -1,4 +1,4 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license """ Run YOLOv5 benchmarks on all supported export formats. diff --git a/classify/predict.py b/classify/predict.py index ddc67ca8019f..59db1330a0df 100644 --- a/classify/predict.py +++ b/classify/predict.py @@ -1,4 +1,4 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license """ Run YOLOv5 classification inference on images, videos, directories, globs, YouTube, webcam, streams, etc. diff --git a/classify/train.py b/classify/train.py index e56db8ce4b08..d454c7187339 100644 --- a/classify/train.py +++ b/classify/train.py @@ -1,4 +1,4 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license """ Train a YOLOv5 classifier model on a classification dataset. diff --git a/classify/val.py b/classify/val.py index 8ce48f0645bf..72bd0e14e2c5 100644 --- a/classify/val.py +++ b/classify/val.py @@ -1,4 +1,4 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license """ Validate a trained YOLOv5 classification model on a classification dataset. diff --git a/detect.py b/detect.py index 6bc666dccf9b..24724a35f618 100644 --- a/detect.py +++ b/detect.py @@ -1,4 +1,4 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license """ Run YOLOv5 detection inference on images, videos, directories, globs, YouTube, webcam, streams, etc. diff --git a/export.py b/export.py index f617c755100f..3ecb353b712a 100644 --- a/export.py +++ b/export.py @@ -1,4 +1,4 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license """ Export a YOLOv5 PyTorch model to other formats. TensorFlow exports authored by https://github.com/zldrobit. diff --git a/hubconf.py b/hubconf.py index e7ca62b045c4..2eeac4eddc87 100644 --- a/hubconf.py +++ b/hubconf.py @@ -1,4 +1,4 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license """ PyTorch Hub models https://pytorch.org/hub/ultralytics_yolov5. diff --git a/models/__init__.py b/models/__init__.py index e69de29bb2d1..77a19dcf0f80 100644 --- a/models/__init__.py +++ b/models/__init__.py @@ -0,0 +1 @@ +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license diff --git a/models/common.py b/models/common.py index 1ae3280f97d8..ea893db4b66f 100644 --- a/models/common.py +++ b/models/common.py @@ -1,4 +1,4 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license """Common modules.""" import ast @@ -750,9 +750,8 @@ def forward(self, im, augment=False, visualize=False): scale, zero_point = output["quantization"] x = (x.astype(np.float32) - zero_point) * scale # re-scale y.append(x) - if len(y) == 2: # segment with (det, proto) output order reversed - if len(y[1].shape) != 4: - y = list(reversed(y)) # should be y = (1, 116, 8400), (1, 160, 160, 32) + if len(y) == 2 and len(y[1].shape) != 4: + y = list(reversed(y)) y = [x if isinstance(x, np.ndarray) else x.numpy() for x in y] y[0][..., :4] *= [w, h, w, h] # xywh normalized to pixels diff --git a/models/experimental.py b/models/experimental.py index ab9b0ed23dc3..63d9c461af35 100644 --- a/models/experimental.py +++ b/models/experimental.py @@ -1,4 +1,4 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license """Experimental modules.""" import math diff --git a/models/tf.py b/models/tf.py index 59bb7e0f558d..c2cad393e7ea 100644 --- a/models/tf.py +++ b/models/tf.py @@ -1,4 +1,4 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license """ TensorFlow, Keras and TFLite versions of YOLOv5 Authored by https://github.com/zldrobit in PR https://github.com/ultralytics/yolov5/pull/1127. diff --git a/models/yolo.py b/models/yolo.py index c0dd946e0356..13498aced8ee 100644 --- a/models/yolo.py +++ b/models/yolo.py @@ -1,4 +1,4 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license """ YOLO-specific modules. diff --git a/segment/predict.py b/segment/predict.py index 1dd892a1962a..e0e4336c1bd1 100644 --- a/segment/predict.py +++ b/segment/predict.py @@ -1,4 +1,4 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license """ Run YOLOv5 segmentation inference on images, videos, directories, streams, etc. diff --git a/segment/train.py b/segment/train.py index f68249d92301..815c97ce1d48 100644 --- a/segment/train.py +++ b/segment/train.py @@ -1,4 +1,4 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license """ Train a YOLOv5 segment model on a segment dataset Models and datasets download automatically from the latest YOLOv5 release. diff --git a/segment/val.py b/segment/val.py index 60a7fe7cba2e..edd6a08fa0d9 100644 --- a/segment/val.py +++ b/segment/val.py @@ -1,4 +1,4 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license """ Validate a trained YOLOv5 segment model on a segment dataset. diff --git a/train.py b/train.py index fec0a239c0af..1401ccb969b4 100644 --- a/train.py +++ b/train.py @@ -1,4 +1,4 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license """ Train a YOLOv5 model on a custom dataset. Models and datasets download automatically from the latest YOLOv5 release. diff --git a/utils/__init__.py b/utils/__init__.py index 185afd6964d7..3c43c9b68426 100644 --- a/utils/__init__.py +++ b/utils/__init__.py @@ -1,4 +1,4 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license """utils/initialization.""" import contextlib diff --git a/utils/activations.py b/utils/activations.py index 44bec8cd14b5..4652540db4d8 100644 --- a/utils/activations.py +++ b/utils/activations.py @@ -1,4 +1,4 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license """Activation functions.""" import torch diff --git a/utils/augmentations.py b/utils/augmentations.py index e051ad66fb8d..79e7afc8dbdf 100644 --- a/utils/augmentations.py +++ b/utils/augmentations.py @@ -1,4 +1,4 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license """Image augmentation functions.""" import math diff --git a/utils/autoanchor.py b/utils/autoanchor.py index 00eee2eb776d..1fc4207596bf 100644 --- a/utils/autoanchor.py +++ b/utils/autoanchor.py @@ -1,4 +1,4 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license """AutoAnchor utils.""" import random diff --git a/utils/autobatch.py b/utils/autobatch.py index 08a0de841a98..9d5ea0a94296 100644 --- a/utils/autobatch.py +++ b/utils/autobatch.py @@ -1,4 +1,4 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license """Auto-batch utils.""" from copy import deepcopy diff --git a/utils/aws/__init__.py b/utils/aws/__init__.py index e69de29bb2d1..77a19dcf0f80 100644 --- a/utils/aws/__init__.py +++ b/utils/aws/__init__.py @@ -0,0 +1 @@ +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license diff --git a/utils/aws/resume.py b/utils/aws/resume.py index ea432a1615ca..5b80fd41fb10 100644 --- a/utils/aws/resume.py +++ b/utils/aws/resume.py @@ -1,4 +1,5 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license + # Resume all interrupted trainings in yolov5/ dir including DDP trainings # Usage: $ python utils/aws/resume.py diff --git a/utils/callbacks.py b/utils/callbacks.py index 21c587bd74c6..1a60928401d3 100644 --- a/utils/callbacks.py +++ b/utils/callbacks.py @@ -1,4 +1,4 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license """Callback utils.""" import threading diff --git a/utils/dataloaders.py b/utils/dataloaders.py index 08240483339f..7ca5a852415c 100644 --- a/utils/dataloaders.py +++ b/utils/dataloaders.py @@ -1,4 +1,4 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license """Dataloaders and dataset utils.""" import contextlib @@ -355,9 +355,9 @@ def __init__(self, path, img_size=640, stride=32, auto=True, transforms=None, vi self._new_video(videos[0]) # new video else: self.cap = None - assert ( - self.nf > 0 - ), f"No images or videos found in {p}. Supported formats are:\nimages: {IMG_FORMATS}\nvideos: {VID_FORMATS}" + assert self.nf > 0, ( + f"No images or videos found in {p}. Supported formats are:\nimages: {IMG_FORMATS}\nvideos: {VID_FORMATS}" + ) def __iter__(self): """Initializes iterator by resetting count and returns the iterator object itself.""" diff --git a/utils/downloads.py b/utils/downloads.py index c7e2273c794e..f51d67aa9dd8 100644 --- a/utils/downloads.py +++ b/utils/downloads.py @@ -1,4 +1,4 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license """Download utils.""" import logging diff --git a/utils/flask_rest_api/example_request.py b/utils/flask_rest_api/example_request.py index 104249002aa3..db88e8040709 100644 --- a/utils/flask_rest_api/example_request.py +++ b/utils/flask_rest_api/example_request.py @@ -1,4 +1,4 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license """Perform test request.""" import pprint diff --git a/utils/flask_rest_api/restapi.py b/utils/flask_rest_api/restapi.py index 7e03d3a6679a..410ae26c5942 100644 --- a/utils/flask_rest_api/restapi.py +++ b/utils/flask_rest_api/restapi.py @@ -1,4 +1,4 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license """Run a Flask REST API exposing one or more YOLOv5s models.""" import argparse diff --git a/utils/general.py b/utils/general.py index db6ddacefd85..89bbc61f4559 100644 --- a/utils/general.py +++ b/utils/general.py @@ -1,4 +1,4 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license """General utils.""" import contextlib @@ -495,9 +495,9 @@ def check_file(file, suffix=""): assert Path(file).exists() and Path(file).stat().st_size > 0, f"File download failed: {url}" # check return file elif file.startswith("clearml://"): # ClearML Dataset ID - assert ( - "clearml" in sys.modules - ), "ClearML is not installed, so cannot use ClearML dataset. Try running 'pip install clearml'." + assert "clearml" in sys.modules, ( + "ClearML is not installed, so cannot use ClearML dataset. Try running 'pip install clearml'." + ) return file else: # search files = [] diff --git a/utils/loggers/__init__.py b/utils/loggers/__init__.py index 92b4bcb05c58..9ff5f56f2bec 100644 --- a/utils/loggers/__init__.py +++ b/utils/loggers/__init__.py @@ -1,4 +1,4 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license """Logging utils.""" import json diff --git a/utils/loggers/clearml/README.md b/utils/loggers/clearml/README.md index 3dbaaa301885..374765dfb67b 100644 --- a/utils/loggers/clearml/README.md +++ b/utils/loggers/clearml/README.md @@ -4,7 +4,7 @@ ## About ClearML -[ClearML](https://clear.ml/) is an [open-source](https://github.com/allegroai/clearml) toolbox designed to save you time ⏱️. +[ClearML](https://clear.ml/) is an [open-source](https://github.com/clearml/clearml) toolbox designed to save you time ⏱️. 🔨 Track every YOLOv5 training run in the experiment manager diff --git a/utils/loggers/clearml/__init__.py b/utils/loggers/clearml/__init__.py index e69de29bb2d1..77a19dcf0f80 100644 --- a/utils/loggers/clearml/__init__.py +++ b/utils/loggers/clearml/__init__.py @@ -0,0 +1 @@ +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license diff --git a/utils/loggers/clearml/clearml_utils.py b/utils/loggers/clearml/clearml_utils.py index fc19c8cfe22a..67553bdb4a43 100644 --- a/utils/loggers/clearml/clearml_utils.py +++ b/utils/loggers/clearml/clearml_utils.py @@ -1,4 +1,4 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license """Main Logger class for ClearML experiment tracking.""" import glob @@ -41,11 +41,9 @@ def construct_dataset(clearml_info_string): with open(yaml_filenames[0]) as f: dataset_definition = yaml.safe_load(f) - assert set( - dataset_definition.keys() - ).issuperset( - {"train", "test", "val", "nc", "names"} - ), "The right keys were not found in the yaml file, make sure it at least has the following keys: ('train', 'test', 'val', 'nc', 'names')" + assert set(dataset_definition.keys()).issuperset({"train", "test", "val", "nc", "names"}), ( + "The right keys were not found in the yaml file, make sure it at least has the following keys: ('train', 'test', 'val', 'nc', 'names')" + ) data_dict = { "train": ( diff --git a/utils/loggers/clearml/hpo.py b/utils/loggers/clearml/hpo.py index 5a9be757aac0..099a87fcad76 100644 --- a/utils/loggers/clearml/hpo.py +++ b/utils/loggers/clearml/hpo.py @@ -1,4 +1,4 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license from clearml import Task diff --git a/utils/loggers/comet/__init__.py b/utils/loggers/comet/__init__.py index f4940a1a2c67..1ad44b9a320c 100644 --- a/utils/loggers/comet/__init__.py +++ b/utils/loggers/comet/__init__.py @@ -1,4 +1,4 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license import glob import json diff --git a/utils/loggers/comet/comet_utils.py b/utils/loggers/comet/comet_utils.py index cf936ab48453..1dc572cca4c2 100644 --- a/utils/loggers/comet/comet_utils.py +++ b/utils/loggers/comet/comet_utils.py @@ -1,4 +1,4 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license import logging import os diff --git a/utils/loggers/comet/hpo.py b/utils/loggers/comet/hpo.py index c225ebbd0484..dc171e261516 100644 --- a/utils/loggers/comet/hpo.py +++ b/utils/loggers/comet/hpo.py @@ -1,4 +1,4 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license import argparse import json diff --git a/utils/loggers/wandb/__init__.py b/utils/loggers/wandb/__init__.py index e69de29bb2d1..77a19dcf0f80 100644 --- a/utils/loggers/wandb/__init__.py +++ b/utils/loggers/wandb/__init__.py @@ -0,0 +1 @@ +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license diff --git a/utils/loggers/wandb/wandb_utils.py b/utils/loggers/wandb/wandb_utils.py index 102228a67176..83c5ee130fbb 100644 --- a/utils/loggers/wandb/wandb_utils.py +++ b/utils/loggers/wandb/wandb_utils.py @@ -1,4 +1,4 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license # WARNING ⚠️ wandb is deprecated and will be removed in future release. # See supported integrations at https://github.com/ultralytics/yolov5#integrations diff --git a/utils/loss.py b/utils/loss.py index c2201833970a..35ce6d7924ed 100644 --- a/utils/loss.py +++ b/utils/loss.py @@ -1,4 +1,4 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license """Loss functions.""" import torch diff --git a/utils/metrics.py b/utils/metrics.py index e8dc9df4c168..03013f4e3bde 100644 --- a/utils/metrics.py +++ b/utils/metrics.py @@ -1,4 +1,4 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license """Model validation metrics.""" import math diff --git a/utils/plots.py b/utils/plots.py index 44d1035104a9..f70775f26af0 100644 --- a/utils/plots.py +++ b/utils/plots.py @@ -1,4 +1,4 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license """Plotting utils.""" import contextlib diff --git a/utils/segment/__init__.py b/utils/segment/__init__.py index e69de29bb2d1..77a19dcf0f80 100644 --- a/utils/segment/__init__.py +++ b/utils/segment/__init__.py @@ -0,0 +1 @@ +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license diff --git a/utils/segment/augmentations.py b/utils/segment/augmentations.py index 302e41379999..14a81cf7d44f 100644 --- a/utils/segment/augmentations.py +++ b/utils/segment/augmentations.py @@ -1,4 +1,4 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license """Image augmentation functions.""" import math diff --git a/utils/segment/dataloaders.py b/utils/segment/dataloaders.py index ee6e6186d140..2363d7265bed 100644 --- a/utils/segment/dataloaders.py +++ b/utils/segment/dataloaders.py @@ -1,4 +1,4 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license """Dataloaders.""" import os diff --git a/utils/segment/general.py b/utils/segment/general.py index c9dfaaabe4d5..6a71c2551409 100644 --- a/utils/segment/general.py +++ b/utils/segment/general.py @@ -1,4 +1,4 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license import cv2 import numpy as np diff --git a/utils/segment/loss.py b/utils/segment/loss.py index f63b262b5a66..6ef590620621 100644 --- a/utils/segment/loss.py +++ b/utils/segment/loss.py @@ -1,4 +1,4 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license import torch import torch.nn as nn diff --git a/utils/segment/metrics.py b/utils/segment/metrics.py index 091b5b16a5fa..3bb7aeec42f3 100644 --- a/utils/segment/metrics.py +++ b/utils/segment/metrics.py @@ -1,4 +1,4 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license """Model validation metrics.""" import numpy as np diff --git a/utils/segment/plots.py b/utils/segment/plots.py index f5b81711cc93..5619e9de946c 100644 --- a/utils/segment/plots.py +++ b/utils/segment/plots.py @@ -1,4 +1,4 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license import contextlib import math diff --git a/utils/torch_utils.py b/utils/torch_utils.py index 53e707607915..8b3c43b5bdf4 100644 --- a/utils/torch_utils.py +++ b/utils/torch_utils.py @@ -1,4 +1,4 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license """PyTorch utils.""" import math @@ -121,9 +121,9 @@ def select_device(device="", batch_size=0, newline=True): os.environ["CUDA_VISIBLE_DEVICES"] = "-1" # force torch.cuda.is_available() = False elif device: # non-cpu device requested os.environ["CUDA_VISIBLE_DEVICES"] = device # set environment variable - must be before assert is_available() - assert torch.cuda.is_available() and torch.cuda.device_count() >= len( - device.replace(",", "") - ), f"Invalid CUDA '--device {device}' requested, use '--device cpu' or pass valid CUDA device(s)" + assert torch.cuda.is_available() and torch.cuda.device_count() >= len(device.replace(",", "")), ( + f"Invalid CUDA '--device {device}' requested, use '--device cpu' or pass valid CUDA device(s)" + ) if not cpu and not mps and torch.cuda.is_available(): # prefer GPU if available devices = device.split(",") if device else "0" # range(torch.cuda.device_count()) # i.e. 0,1,6,7 diff --git a/utils/triton.py b/utils/triton.py index 3230ecd8e615..88c0bd7b2a5e 100644 --- a/utils/triton.py +++ b/utils/triton.py @@ -1,4 +1,4 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license """Utils to interact with the Triton Inference Server.""" import typing diff --git a/val.py b/val.py index b8db6122f54a..600353b1c83d 100644 --- a/val.py +++ b/val.py @@ -1,4 +1,4 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license """ Validate a trained YOLOv5 detection model on a detection dataset. From de62f93c210aaaffd710f9afe3b43ab624f7a21b Mon Sep 17 00:00:00 2001 From: Paula Derrenger <107626595+pderrenger@users.noreply.github.com> Date: Wed, 15 Jan 2025 17:53:24 +0100 Subject: [PATCH 1975/1976] Standardize license headers in TOML/YAML files (#13491) Refactor code for speed and clarity Co-authored-by: Glenn Jocher --- .github/ISSUE_TEMPLATE/bug-report.yml | 2 +- .github/ISSUE_TEMPLATE/config.yml | 2 +- .github/ISSUE_TEMPLATE/feature-request.yml | 2 +- .github/ISSUE_TEMPLATE/question.yml | 2 +- .github/dependabot.yml | 3 ++- .github/workflows/ci-testing.yml | 3 ++- .github/workflows/cla.yml | 3 ++- .github/workflows/docker.yml | 3 ++- .github/workflows/format.yml | 3 ++- .github/workflows/links.yml | 3 ++- .github/workflows/merge-main-into-prs.yml | 3 ++- .github/workflows/stale.yml | 2 +- data/Argoverse.yaml | 3 ++- data/GlobalWheat2020.yaml | 3 ++- data/ImageNet.yaml | 3 ++- data/ImageNet10.yaml | 3 ++- data/ImageNet100.yaml | 3 ++- data/ImageNet1000.yaml | 3 ++- data/Objects365.yaml | 3 ++- data/SKU-110K.yaml | 3 ++- data/VOC.yaml | 3 ++- data/VisDrone.yaml | 3 ++- data/coco.yaml | 3 ++- data/coco128-seg.yaml | 3 ++- data/coco128.yaml | 3 ++- data/hyps/hyp.Objects365.yaml | 3 ++- data/hyps/hyp.VOC.yaml | 3 ++- data/hyps/hyp.no-augmentation.yaml | 3 ++- data/hyps/hyp.scratch-high.yaml | 3 ++- data/hyps/hyp.scratch-low.yaml | 3 ++- data/hyps/hyp.scratch-med.yaml | 3 ++- data/xView.yaml | 3 ++- models/hub/anchors.yaml | 3 ++- models/hub/yolov3-spp.yaml | 2 +- models/hub/yolov3-tiny.yaml | 2 +- models/hub/yolov3.yaml | 2 +- models/hub/yolov5-bifpn.yaml | 2 +- models/hub/yolov5-fpn.yaml | 2 +- models/hub/yolov5-p2.yaml | 2 +- models/hub/yolov5-p34.yaml | 2 +- models/hub/yolov5-p6.yaml | 2 +- models/hub/yolov5-p7.yaml | 2 +- models/hub/yolov5-panet.yaml | 2 +- models/hub/yolov5l6.yaml | 2 +- models/hub/yolov5m6.yaml | 2 +- models/hub/yolov5n6.yaml | 2 +- models/hub/yolov5s-LeakyReLU.yaml | 2 +- models/hub/yolov5s-ghost.yaml | 2 +- models/hub/yolov5s-transformer.yaml | 2 +- models/hub/yolov5s6.yaml | 2 +- models/hub/yolov5x6.yaml | 2 +- models/segment/yolov5l-seg.yaml | 2 +- models/segment/yolov5m-seg.yaml | 2 +- models/segment/yolov5n-seg.yaml | 2 +- models/segment/yolov5s-seg.yaml | 2 +- models/segment/yolov5x-seg.yaml | 2 +- models/yolov5l.yaml | 2 +- models/yolov5m.yaml | 2 +- models/yolov5n.yaml | 2 +- models/yolov5s.yaml | 2 +- models/yolov5x.yaml | 2 +- pyproject.toml | 2 +- utils/google_app_engine/app.yaml | 2 +- 63 files changed, 91 insertions(+), 63 deletions(-) diff --git a/.github/ISSUE_TEMPLATE/bug-report.yml b/.github/ISSUE_TEMPLATE/bug-report.yml index 76b426a195e4..3bd94176115f 100644 --- a/.github/ISSUE_TEMPLATE/bug-report.yml +++ b/.github/ISSUE_TEMPLATE/bug-report.yml @@ -1,4 +1,4 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license name: 🐛 Bug Report # title: " " diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml index e23dfcf9c56b..d05d057dc018 100644 --- a/.github/ISSUE_TEMPLATE/config.yml +++ b/.github/ISSUE_TEMPLATE/config.yml @@ -1,4 +1,4 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license blank_issues_enabled: true contact_links: diff --git a/.github/ISSUE_TEMPLATE/feature-request.yml b/.github/ISSUE_TEMPLATE/feature-request.yml index 9282e62d2a57..bf201809b44b 100644 --- a/.github/ISSUE_TEMPLATE/feature-request.yml +++ b/.github/ISSUE_TEMPLATE/feature-request.yml @@ -1,4 +1,4 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license name: 🚀 Feature Request description: Suggest a YOLOv5 idea diff --git a/.github/ISSUE_TEMPLATE/question.yml b/.github/ISSUE_TEMPLATE/question.yml index 5b62af675e2a..0787bbea9ee3 100644 --- a/.github/ISSUE_TEMPLATE/question.yml +++ b/.github/ISSUE_TEMPLATE/question.yml @@ -1,4 +1,4 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license name: ❓ Question description: Ask a YOLOv5 question diff --git a/.github/dependabot.yml b/.github/dependabot.yml index 061d61f1bc24..233db72b026d 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -1,4 +1,5 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license + # Dependabot for package version updates # https://docs.github.com/github/administering-a-repository/configuration-options-for-dependency-updates diff --git a/.github/workflows/ci-testing.yml b/.github/workflows/ci-testing.yml index 89cc8e9a4aeb..ae24eb11ce7d 100644 --- a/.github/workflows/ci-testing.yml +++ b/.github/workflows/ci-testing.yml @@ -1,4 +1,5 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license + # YOLOv5 Continuous Integration (CI) GitHub Actions tests name: YOLOv5 CI diff --git a/.github/workflows/cla.yml b/.github/workflows/cla.yml index f725f5683d1b..61b944167537 100644 --- a/.github/workflows/cla.yml +++ b/.github/workflows/cla.yml @@ -1,4 +1,5 @@ -# Ultralytics YOLO 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license + # Ultralytics Contributor License Agreement (CLA) action https://docs.ultralytics.com/help/CLA # This workflow automatically requests Pull Requests (PR) authors to sign the Ultralytics CLA before PRs can be merged diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index 2cd1c13f78b0..b43c2bc280fe 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -1,4 +1,5 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license + # Builds ultralytics/yolov5:latest images on DockerHub https://hub.docker.com/r/ultralytics/yolov5 name: Publish Docker Images diff --git a/.github/workflows/format.yml b/.github/workflows/format.yml index b9b9107032f3..10891c87834b 100644 --- a/.github/workflows/format.yml +++ b/.github/workflows/format.yml @@ -1,4 +1,5 @@ -# Ultralytics 🚀 - AGPL-3.0 License https://ultralytics.com/license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license + # Ultralytics Actions https://github.com/ultralytics/actions # This workflow automatically formats code and documentation in PRs to official Ultralytics standards diff --git a/.github/workflows/links.yml b/.github/workflows/links.yml index 64f266929308..07bf6db74e32 100644 --- a/.github/workflows/links.yml +++ b/.github/workflows/links.yml @@ -1,4 +1,5 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license + # Continuous Integration (CI) GitHub Actions tests broken link checker using https://github.com/lycheeverse/lychee # Ignores the following status codes to reduce false positives: # - 403(OpenVINO, 'forbidden') diff --git a/.github/workflows/merge-main-into-prs.yml b/.github/workflows/merge-main-into-prs.yml index 29c0f9328f4f..034c6c143597 100644 --- a/.github/workflows/merge-main-into-prs.yml +++ b/.github/workflows/merge-main-into-prs.yml @@ -1,4 +1,5 @@ -# Ultralytics YOLO 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license + # Automatically merges repository 'main' branch into all open PRs to keep them up-to-date # Action runs on updates to main branch so when one PR merges to main all others update diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml index 374bc01ab0b7..fda092a44236 100644 --- a/.github/workflows/stale.yml +++ b/.github/workflows/stale.yml @@ -1,4 +1,4 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license name: Close stale issues on: diff --git a/data/Argoverse.yaml b/data/Argoverse.yaml index 366552ea4f31..651b6431bbda 100644 --- a/data/Argoverse.yaml +++ b/data/Argoverse.yaml @@ -1,4 +1,5 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license + # Argoverse-HD dataset (ring-front-center camera) http://www.cs.cmu.edu/~mengtial/proj/streaming/ by Argo AI # Example usage: python train.py --data Argoverse.yaml # parent diff --git a/data/GlobalWheat2020.yaml b/data/GlobalWheat2020.yaml index acb88290f7e3..eb25871c6585 100644 --- a/data/GlobalWheat2020.yaml +++ b/data/GlobalWheat2020.yaml @@ -1,4 +1,5 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license + # Global Wheat 2020 dataset http://www.global-wheat.com/ by University of Saskatchewan # Example usage: python train.py --data GlobalWheat2020.yaml # parent diff --git a/data/ImageNet.yaml b/data/ImageNet.yaml index 979a0e4de337..a3cf694bc580 100644 --- a/data/ImageNet.yaml +++ b/data/ImageNet.yaml @@ -1,4 +1,5 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license + # ImageNet-1k dataset https://www.image-net.org/index.php by Stanford University # Simplified class names from https://github.com/anishathalye/imagenet-simple-labels # Example usage: python classify/train.py --data imagenet diff --git a/data/ImageNet10.yaml b/data/ImageNet10.yaml index 2189def7d457..e50e58888e1d 100644 --- a/data/ImageNet10.yaml +++ b/data/ImageNet10.yaml @@ -1,4 +1,5 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license + # ImageNet-1k dataset https://www.image-net.org/index.php by Stanford University # Simplified class names from https://github.com/anishathalye/imagenet-simple-labels # Example usage: python classify/train.py --data imagenet diff --git a/data/ImageNet100.yaml b/data/ImageNet100.yaml index 560cdecdbae9..e3891bcb4cef 100644 --- a/data/ImageNet100.yaml +++ b/data/ImageNet100.yaml @@ -1,4 +1,5 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license + # ImageNet-1k dataset https://www.image-net.org/index.php by Stanford University # Simplified class names from https://github.com/anishathalye/imagenet-simple-labels # Example usage: python classify/train.py --data imagenet diff --git a/data/ImageNet1000.yaml b/data/ImageNet1000.yaml index aa17e9e059cf..8943d3312641 100644 --- a/data/ImageNet1000.yaml +++ b/data/ImageNet1000.yaml @@ -1,4 +1,5 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license + # ImageNet-1k dataset https://www.image-net.org/index.php by Stanford University # Simplified class names from https://github.com/anishathalye/imagenet-simple-labels # Example usage: python classify/train.py --data imagenet diff --git a/data/Objects365.yaml b/data/Objects365.yaml index f1f0a1ae4891..248b6c7750f3 100644 --- a/data/Objects365.yaml +++ b/data/Objects365.yaml @@ -1,4 +1,5 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license + # Objects365 dataset https://www.objects365.org/ by Megvii # Example usage: python train.py --data Objects365.yaml # parent diff --git a/data/SKU-110K.yaml b/data/SKU-110K.yaml index b012bec3128b..695b89cd4375 100644 --- a/data/SKU-110K.yaml +++ b/data/SKU-110K.yaml @@ -1,4 +1,5 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license + # SKU-110K retail items dataset https://github.com/eg4000/SKU110K_CVPR19 by Trax Retail # Example usage: python train.py --data SKU-110K.yaml # parent diff --git a/data/VOC.yaml b/data/VOC.yaml index 227d91d76e10..9dad47777a07 100644 --- a/data/VOC.yaml +++ b/data/VOC.yaml @@ -1,4 +1,5 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license + # PASCAL VOC dataset http://host.robots.ox.ac.uk/pascal/VOC by University of Oxford # Example usage: python train.py --data VOC.yaml # parent diff --git a/data/VisDrone.yaml b/data/VisDrone.yaml index 20ff1d39cdd8..637433b5098e 100644 --- a/data/VisDrone.yaml +++ b/data/VisDrone.yaml @@ -1,4 +1,5 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license + # VisDrone2019-DET dataset https://github.com/VisDrone/VisDrone-Dataset by Tianjin University # Example usage: python train.py --data VisDrone.yaml # parent diff --git a/data/coco.yaml b/data/coco.yaml index 816efa5cf180..7f872e8ca6de 100644 --- a/data/coco.yaml +++ b/data/coco.yaml @@ -1,4 +1,5 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license + # COCO 2017 dataset http://cocodataset.org by Microsoft # Example usage: python train.py --data coco.yaml # parent diff --git a/data/coco128-seg.yaml b/data/coco128-seg.yaml index 5b00f52a9c9b..fa618d87e705 100644 --- a/data/coco128-seg.yaml +++ b/data/coco128-seg.yaml @@ -1,4 +1,5 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license + # COCO128-seg dataset https://www.kaggle.com/datasets/ultralytics/coco128 (first 128 images from COCO train2017) by Ultralytics # Example usage: python train.py --data coco128.yaml # parent diff --git a/data/coco128.yaml b/data/coco128.yaml index cf3d27b87313..e81fb1ff4bf0 100644 --- a/data/coco128.yaml +++ b/data/coco128.yaml @@ -1,4 +1,5 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license + # COCO128 dataset https://www.kaggle.com/datasets/ultralytics/coco128 (first 128 images from COCO train2017) by Ultralytics # Example usage: python train.py --data coco128.yaml # parent diff --git a/data/hyps/hyp.Objects365.yaml b/data/hyps/hyp.Objects365.yaml index 7a6c507c73bf..7b26a053bba4 100644 --- a/data/hyps/hyp.Objects365.yaml +++ b/data/hyps/hyp.Objects365.yaml @@ -1,4 +1,5 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license + # Hyperparameters for Objects365 training # python train.py --weights yolov5m.pt --data Objects365.yaml --evolve # See Hyperparameter Evolution tutorial for details https://github.com/ultralytics/yolov5#tutorials diff --git a/data/hyps/hyp.VOC.yaml b/data/hyps/hyp.VOC.yaml index c04c63e21ea0..378bc4030932 100644 --- a/data/hyps/hyp.VOC.yaml +++ b/data/hyps/hyp.VOC.yaml @@ -1,4 +1,5 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license + # Hyperparameters for VOC training # python train.py --batch 128 --weights yolov5m6.pt --data VOC.yaml --epochs 50 --img 512 --hyp hyp.scratch-med.yaml --evolve # See Hyperparameter Evolution tutorial for details https://github.com/ultralytics/yolov5#tutorials diff --git a/data/hyps/hyp.no-augmentation.yaml b/data/hyps/hyp.no-augmentation.yaml index adc360bb83f8..08378961f51b 100644 --- a/data/hyps/hyp.no-augmentation.yaml +++ b/data/hyps/hyp.no-augmentation.yaml @@ -1,4 +1,5 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license + # Hyperparameters when using Albumentations frameworks # python train.py --hyp hyp.no-augmentation.yaml # See https://github.com/ultralytics/yolov5/pull/3882 for YOLOv5 + Albumentations Usage examples diff --git a/data/hyps/hyp.scratch-high.yaml b/data/hyps/hyp.scratch-high.yaml index 3e913e36df16..74536c2973dc 100644 --- a/data/hyps/hyp.scratch-high.yaml +++ b/data/hyps/hyp.scratch-high.yaml @@ -1,4 +1,5 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license + # Hyperparameters for high-augmentation COCO training from scratch # python train.py --batch 32 --cfg yolov5m6.yaml --weights '' --data coco.yaml --img 1280 --epochs 300 # See tutorials for hyperparameter evolution https://github.com/ultralytics/yolov5#tutorials diff --git a/data/hyps/hyp.scratch-low.yaml b/data/hyps/hyp.scratch-low.yaml index ff0d1e7ff1ac..e89b3ba4e7c1 100644 --- a/data/hyps/hyp.scratch-low.yaml +++ b/data/hyps/hyp.scratch-low.yaml @@ -1,4 +1,5 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license + # Hyperparameters for low-augmentation COCO training from scratch # python train.py --batch 64 --cfg yolov5n6.yaml --weights '' --data coco.yaml --img 640 --epochs 300 --linear # See tutorials for hyperparameter evolution https://github.com/ultralytics/yolov5#tutorials diff --git a/data/hyps/hyp.scratch-med.yaml b/data/hyps/hyp.scratch-med.yaml index c2fba1fc2b80..7dfd2f306062 100644 --- a/data/hyps/hyp.scratch-med.yaml +++ b/data/hyps/hyp.scratch-med.yaml @@ -1,4 +1,5 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license + # Hyperparameters for medium-augmentation COCO training from scratch # python train.py --batch 32 --cfg yolov5m6.yaml --weights '' --data coco.yaml --img 1280 --epochs 300 # See tutorials for hyperparameter evolution https://github.com/ultralytics/yolov5#tutorials diff --git a/data/xView.yaml b/data/xView.yaml index 407159831e7c..6bea7637e816 100644 --- a/data/xView.yaml +++ b/data/xView.yaml @@ -1,4 +1,5 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license + # DIUx xView 2018 Challenge https://challenge.xviewdataset.org by U.S. National Geospatial-Intelligence Agency (NGA) # -------- DOWNLOAD DATA MANUALLY and jar xf val_images.zip to 'datasets/xView' before running train command! -------- # Example usage: python train.py --data xView.yaml diff --git a/models/hub/anchors.yaml b/models/hub/anchors.yaml index c8089311b267..0f3e288e16da 100644 --- a/models/hub/anchors.yaml +++ b/models/hub/anchors.yaml @@ -1,4 +1,5 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license + # Default anchors for COCO data # P5 ------------------------------------------------------------------------------------------------------------------- diff --git a/models/hub/yolov3-spp.yaml b/models/hub/yolov3-spp.yaml index 0e073667bf70..34c2d517c6d0 100644 --- a/models/hub/yolov3-spp.yaml +++ b/models/hub/yolov3-spp.yaml @@ -1,4 +1,4 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license # Parameters nc: 80 # number of classes diff --git a/models/hub/yolov3-tiny.yaml b/models/hub/yolov3-tiny.yaml index 0a74fff715f8..f1861012ea27 100644 --- a/models/hub/yolov3-tiny.yaml +++ b/models/hub/yolov3-tiny.yaml @@ -1,4 +1,4 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license # Parameters nc: 80 # number of classes diff --git a/models/hub/yolov3.yaml b/models/hub/yolov3.yaml index ce4a980c8200..15cb68a83c80 100644 --- a/models/hub/yolov3.yaml +++ b/models/hub/yolov3.yaml @@ -1,4 +1,4 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license # Parameters nc: 80 # number of classes diff --git a/models/hub/yolov5-bifpn.yaml b/models/hub/yolov5-bifpn.yaml index bf05e434c6a2..fba3fe5f7d6e 100644 --- a/models/hub/yolov5-bifpn.yaml +++ b/models/hub/yolov5-bifpn.yaml @@ -1,4 +1,4 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license # Parameters nc: 80 # number of classes diff --git a/models/hub/yolov5-fpn.yaml b/models/hub/yolov5-fpn.yaml index dcfdd14a7d2c..4411d1cc0373 100644 --- a/models/hub/yolov5-fpn.yaml +++ b/models/hub/yolov5-fpn.yaml @@ -1,4 +1,4 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license # Parameters nc: 80 # number of classes diff --git a/models/hub/yolov5-p2.yaml b/models/hub/yolov5-p2.yaml index 2626e734835e..e47d39e4eb4b 100644 --- a/models/hub/yolov5-p2.yaml +++ b/models/hub/yolov5-p2.yaml @@ -1,4 +1,4 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license # Parameters nc: 80 # number of classes diff --git a/models/hub/yolov5-p34.yaml b/models/hub/yolov5-p34.yaml index fba35ec10a1e..17e46f7bdc61 100644 --- a/models/hub/yolov5-p34.yaml +++ b/models/hub/yolov5-p34.yaml @@ -1,4 +1,4 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license # Parameters nc: 80 # number of classes diff --git a/models/hub/yolov5-p6.yaml b/models/hub/yolov5-p6.yaml index c997df2db505..dbc1ae4d0b53 100644 --- a/models/hub/yolov5-p6.yaml +++ b/models/hub/yolov5-p6.yaml @@ -1,4 +1,4 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license # Parameters nc: 80 # number of classes diff --git a/models/hub/yolov5-p7.yaml b/models/hub/yolov5-p7.yaml index 14e6ce05d791..2c1706992e60 100644 --- a/models/hub/yolov5-p7.yaml +++ b/models/hub/yolov5-p7.yaml @@ -1,4 +1,4 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license # Parameters nc: 80 # number of classes diff --git a/models/hub/yolov5-panet.yaml b/models/hub/yolov5-panet.yaml index f0857f92d53a..68a717566134 100644 --- a/models/hub/yolov5-panet.yaml +++ b/models/hub/yolov5-panet.yaml @@ -1,4 +1,4 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license # Parameters nc: 80 # number of classes diff --git a/models/hub/yolov5l6.yaml b/models/hub/yolov5l6.yaml index 05501a9d134b..223f681bf7bd 100644 --- a/models/hub/yolov5l6.yaml +++ b/models/hub/yolov5l6.yaml @@ -1,4 +1,4 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license # Parameters nc: 80 # number of classes diff --git a/models/hub/yolov5m6.yaml b/models/hub/yolov5m6.yaml index 1512e2b6e08d..6878d8996080 100644 --- a/models/hub/yolov5m6.yaml +++ b/models/hub/yolov5m6.yaml @@ -1,4 +1,4 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license # Parameters nc: 80 # number of classes diff --git a/models/hub/yolov5n6.yaml b/models/hub/yolov5n6.yaml index 11350413ecf8..0d454c9ca31f 100644 --- a/models/hub/yolov5n6.yaml +++ b/models/hub/yolov5n6.yaml @@ -1,4 +1,4 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license # Parameters nc: 80 # number of classes diff --git a/models/hub/yolov5s-LeakyReLU.yaml b/models/hub/yolov5s-LeakyReLU.yaml index 6e9d4a8820e2..61d6d33176f4 100644 --- a/models/hub/yolov5s-LeakyReLU.yaml +++ b/models/hub/yolov5s-LeakyReLU.yaml @@ -1,4 +1,4 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license # Parameters nc: 80 # number of classes diff --git a/models/hub/yolov5s-ghost.yaml b/models/hub/yolov5s-ghost.yaml index cc4336948760..53695ae48abc 100644 --- a/models/hub/yolov5s-ghost.yaml +++ b/models/hub/yolov5s-ghost.yaml @@ -1,4 +1,4 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license # Parameters nc: 80 # number of classes diff --git a/models/hub/yolov5s-transformer.yaml b/models/hub/yolov5s-transformer.yaml index 1b2d62c5a3fe..213e4dac1364 100644 --- a/models/hub/yolov5s-transformer.yaml +++ b/models/hub/yolov5s-transformer.yaml @@ -1,4 +1,4 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license # Parameters nc: 80 # number of classes diff --git a/models/hub/yolov5s6.yaml b/models/hub/yolov5s6.yaml index 2a4c1162575b..6e69964a95a1 100644 --- a/models/hub/yolov5s6.yaml +++ b/models/hub/yolov5s6.yaml @@ -1,4 +1,4 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license # Parameters nc: 80 # number of classes diff --git a/models/hub/yolov5x6.yaml b/models/hub/yolov5x6.yaml index 0c8f29e600c3..33a8525f109d 100644 --- a/models/hub/yolov5x6.yaml +++ b/models/hub/yolov5x6.yaml @@ -1,4 +1,4 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license # Parameters nc: 80 # number of classes diff --git a/models/segment/yolov5l-seg.yaml b/models/segment/yolov5l-seg.yaml index de430f4fbdf3..824e8aec2248 100644 --- a/models/segment/yolov5l-seg.yaml +++ b/models/segment/yolov5l-seg.yaml @@ -1,4 +1,4 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license # Parameters nc: 80 # number of classes diff --git a/models/segment/yolov5m-seg.yaml b/models/segment/yolov5m-seg.yaml index 288577778252..c3c1e668af46 100644 --- a/models/segment/yolov5m-seg.yaml +++ b/models/segment/yolov5m-seg.yaml @@ -1,4 +1,4 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license # Parameters nc: 80 # number of classes diff --git a/models/segment/yolov5n-seg.yaml b/models/segment/yolov5n-seg.yaml index faf5228fd3ef..2461e4160f23 100644 --- a/models/segment/yolov5n-seg.yaml +++ b/models/segment/yolov5n-seg.yaml @@ -1,4 +1,4 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license # Parameters nc: 80 # number of classes diff --git a/models/segment/yolov5s-seg.yaml b/models/segment/yolov5s-seg.yaml index a199f1d82388..fac7664a3544 100644 --- a/models/segment/yolov5s-seg.yaml +++ b/models/segment/yolov5s-seg.yaml @@ -1,4 +1,4 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license # Parameters nc: 80 # number of classes diff --git a/models/segment/yolov5x-seg.yaml b/models/segment/yolov5x-seg.yaml index 75f426386e3a..d3c457a6db57 100644 --- a/models/segment/yolov5x-seg.yaml +++ b/models/segment/yolov5x-seg.yaml @@ -1,4 +1,4 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license # Parameters nc: 80 # number of classes diff --git a/models/yolov5l.yaml b/models/yolov5l.yaml index 7cac7ead20aa..c6c878a10ea5 100644 --- a/models/yolov5l.yaml +++ b/models/yolov5l.yaml @@ -1,4 +1,4 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license # Parameters nc: 80 # number of classes diff --git a/models/yolov5m.yaml b/models/yolov5m.yaml index 820e6070499c..41d9c223a2ac 100644 --- a/models/yolov5m.yaml +++ b/models/yolov5m.yaml @@ -1,4 +1,4 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license # Parameters nc: 80 # number of classes diff --git a/models/yolov5n.yaml b/models/yolov5n.yaml index d3b84ace2b76..588674923d90 100644 --- a/models/yolov5n.yaml +++ b/models/yolov5n.yaml @@ -1,4 +1,4 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license # Parameters nc: 80 # number of classes diff --git a/models/yolov5s.yaml b/models/yolov5s.yaml index 090cb67c2bec..11ff79001899 100644 --- a/models/yolov5s.yaml +++ b/models/yolov5s.yaml @@ -1,4 +1,4 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license # Parameters nc: 80 # number of classes diff --git a/models/yolov5x.yaml b/models/yolov5x.yaml index 8c1a6be1b7a8..817b4f911849 100644 --- a/models/yolov5x.yaml +++ b/models/yolov5x.yaml @@ -1,4 +1,4 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license # Parameters nc: 80 # number of classes diff --git a/pyproject.toml b/pyproject.toml index 2bcf6592988d..9680857af1d7 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,4 +1,4 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license # Overview: # This pyproject.toml file manages the build, packaging, and distribution of the Ultralytics library. diff --git a/utils/google_app_engine/app.yaml b/utils/google_app_engine/app.yaml index 4c1751f55429..6fb9d5f9dbb2 100644 --- a/utils/google_app_engine/app.yaml +++ b/utils/google_app_engine/app.yaml @@ -1,4 +1,4 @@ -# Ultralytics YOLOv5 🚀, AGPL-3.0 license +# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license runtime: custom env: flex From 5cdad8922c83b0ed49a0173cd1a8b0739acbb336 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Thu, 30 Jan 2025 17:42:46 +0100 Subject: [PATCH 1976/1976] Update links.yml (#13503) Signed-off-by: Glenn Jocher --- .github/workflows/links.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/links.yml b/.github/workflows/links.yml index 07bf6db74e32..2da3c066f3d7 100644 --- a/.github/workflows/links.yml +++ b/.github/workflows/links.yml @@ -44,7 +44,7 @@ jobs: --github-token ${{ secrets.GITHUB_TOKEN }} \ --header "User-Agent=Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.6478.183 Safari/537.36" \ './**/*.md' \ - './**/*.html' + './**/*.html' | tee -a $GITHUB_STEP_SUMMARY - name: Test Markdown, HTML, YAML, Python and Notebook links with retry if: github.event_name == 'workflow_dispatch' @@ -69,4 +69,4 @@ jobs: './**/*.yml' \ './**/*.yaml' \ './**/*.py' \ - './**/*.ipynb' + './**/*.ipynb' | tee -a $GITHUB_STEP_SUMMARY