Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

MMDet MaskRCNN ResNet50/SwinTransformer Decouple #3281

Merged
Changes from 1 commit
Commits
Show all changes
72 commits
Select commit Hold shift + click to select a range
fa0d462
migrate mmdet maskrcnn modules
eugene123tw Apr 5, 2024
763ce7d
style reformat
eugene123tw Apr 5, 2024
5ace6b0
style reformat
eugene123tw Apr 5, 2024
b2b6e0c
stype reformat
eugene123tw Apr 5, 2024
a305c7c
ignore mypy, ruff errors
eugene123tw Apr 8, 2024
6b682b7
skip mypy error
eugene123tw Apr 8, 2024
727cf2b
update
eugene123tw Apr 8, 2024
22f8f81
fix loss
eugene123tw Apr 8, 2024
08d766f
add maskrcnn
eugene123tw Apr 8, 2024
873a101
update import
eugene123tw Apr 8, 2024
bfca5f0
update import
eugene123tw Apr 8, 2024
47f3744
add necks
eugene123tw Apr 8, 2024
7bb7f39
update
eugene123tw Apr 8, 2024
d37f8a1
update
eugene123tw Apr 8, 2024
abd1f21
add cross-entropy loss
eugene123tw Apr 9, 2024
4978171
style changes
eugene123tw Apr 9, 2024
330f721
mypy changes and style changes
eugene123tw Apr 10, 2024
92dc0bf
update style
eugene123tw Apr 11, 2024
87a2415
Merge branch 'develop' into eugene/CVS-137823-mmdet-maskrcnn-decouple
eugene123tw Apr 11, 2024
c4dec94
remove box structures
eugene123tw Apr 11, 2024
5723ec2
add resnet
eugene123tw Apr 11, 2024
47de4f1
udpate
eugene123tw Apr 11, 2024
f5a51da
modify resnet
eugene123tw Apr 15, 2024
63f46c4
add annotation
eugene123tw Apr 15, 2024
46b37d5
style changes
eugene123tw Apr 15, 2024
cef1e24
update
eugene123tw Apr 15, 2024
655baea
fix all mypy issues
eugene123tw Apr 15, 2024
b1ed150
fix mypy issues
eugene123tw Apr 15, 2024
27b6a4a
style changes
eugene123tw Apr 16, 2024
e87cfa9
remove unused losses
eugene123tw Apr 16, 2024
c2c2394
remove focal_loss_pb
eugene123tw Apr 16, 2024
edd85e0
fix all rull and mypy issues
eugene123tw Apr 16, 2024
742b6fd
fix conflicts
eugene123tw Apr 16, 2024
194a6c2
style change
eugene123tw Apr 16, 2024
c1938de
update
eugene123tw Apr 16, 2024
734a459
udpate license
eugene123tw Apr 16, 2024
93fcd79
udpate
eugene123tw Apr 16, 2024
1d0926d
remove duplicates
eugene123tw Apr 17, 2024
1238d28
remove as F
eugene123tw Apr 17, 2024
55d77f5
remove as F
eugene123tw Apr 17, 2024
1598929
remove mmdet mask structures
eugene123tw Apr 17, 2024
498b750
remove duplicates
eugene123tw Apr 17, 2024
a0f52de
style changes
eugene123tw Apr 17, 2024
549d0ef
add new test
eugene123tw Apr 17, 2024
0a074ae
test style change
eugene123tw Apr 17, 2024
9106bc1
fix test
eugene123tw Apr 17, 2024
8197825
Merge branch 'develop' into eugene/CVS-137823-mmdet-maskrcnn-decouple
eugene123tw Apr 17, 2024
07cd25e
chagne device for unit test
eugene123tw Apr 17, 2024
ab07675
add deployment files
eugene123tw Apr 17, 2024
70717ce
remove deployment from inst-seg
eugene123tw Apr 17, 2024
e5027d9
update deployment
eugene123tw Apr 18, 2024
03e26d3
add mmdeploy maskrcnn opset
eugene123tw Apr 18, 2024
bbeaa32
fix linter
eugene123tw Apr 18, 2024
478158a
update test
eugene123tw Apr 18, 2024
052a582
update test
eugene123tw Apr 18, 2024
28cea6f
update test
eugene123tw Apr 18, 2024
ed7275e
Merge branch 'develop' into eugene/CVS-137823-mmdet-maskrcnn-decouple
eugene123tw Apr 22, 2024
04cd223
replace mmcv.cnn module
eugene123tw Apr 22, 2024
11ae260
remove upsample building
eugene123tw Apr 22, 2024
d8a78ca
remove upsample building
eugene123tw Apr 22, 2024
34ea7d7
use batch_nms from otx
eugene123tw Apr 22, 2024
617e1ac
add swintransformer
eugene123tw Apr 22, 2024
5a6737f
add transformers
eugene123tw Apr 22, 2024
fe34145
add swin transformer
eugene123tw Apr 23, 2024
4dd8bdc
style changes
eugene123tw Apr 23, 2024
a6de9b4
merge upstream
eugene123tw Apr 23, 2024
1b199a0
solve conflicts
eugene123tw Apr 23, 2024
c332430
update instance_segmentation/maskrcnn.py
eugene123tw Apr 23, 2024
6d92199
update nms
eugene123tw Apr 23, 2024
4d75001
fix xai
eugene123tw Apr 24, 2024
7b4b43e
change rotate detection recipe
eugene123tw Apr 24, 2024
0b2a326
fix swint recipe
eugene123tw Apr 24, 2024
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Prev Previous commit
Next Next commit
fix all mypy issues
  • Loading branch information
eugene123tw committed Apr 15, 2024

Verified

This commit was created on GitHub.com and signed with GitHub’s verified signature. The key has expired.
commit 655baeab699b305bb5de0fe0a856feaf3cf7c040
Original file line number Diff line number Diff line change
@@ -171,7 +171,7 @@ def predict_by_feat(
cls_scores: tuple[Tensor],
bbox_preds: tuple[Tensor],
batch_img_metas: list[dict],
rcnn_test_cfg: ConfigDict | None = None,
rcnn_test_cfg: ConfigDict,
rescale: bool = False,
) -> InstanceList:
"""Transform a batch of output features extracted from the head into bbox results.
@@ -224,8 +224,8 @@ def _predict_by_feat_single(
cls_score: Tensor,
bbox_pred: Tensor,
img_meta: dict,
rcnn_test_cfg: ConfigDict,
rescale: bool = False,
rcnn_test_cfg: ConfigDict | None = None,
) -> InstanceData:
"""Transform a single image's features extracted from the head into bbox results.

@@ -278,13 +278,13 @@ def _predict_by_feat_single(
if rescale and bboxes.size(0) > 0:
assert img_meta.get("scale_factor") is not None
scale_factor = [1 / s for s in img_meta["scale_factor"]]
bboxes = scale_boxes(bboxes, scale_factor)
bboxes = scale_boxes(bboxes, scale_factor) # type: ignore

# Get the inside tensor when `bboxes` is a box type
box_dim = bboxes.size(-1)
bboxes = bboxes.view(num_rois, -1)

det_bboxes, det_labels = multiclass_nms(
det_bboxes, det_labels = multiclass_nms( # type: ignore
bboxes,
scores,
rcnn_test_cfg.score_thr,
Original file line number Diff line number Diff line change
@@ -37,7 +37,7 @@ def __init__(
*args,
**kwargs,
) -> None:
super().__init__(*args, init_cfg=init_cfg, **kwargs)
super().__init__(*args, init_cfg=init_cfg, **kwargs) # type: ignore
assert num_shared_convs + num_shared_fcs + num_cls_convs + num_cls_fcs + num_reg_convs + num_reg_fcs > 0
if num_cls_convs > 0 or num_reg_convs > 0:
assert num_shared_fcs == 0
@@ -137,12 +137,11 @@ def _add_conv_fc_branch(
last_layer_dim = self.fc_out_channels
return branch_convs, branch_fcs, last_layer_dim

def forward(self, x: tuple[Tensor]) -> tuple:
def forward(self, x: Tensor) -> tuple:
"""Forward features from the upstream network.

Args:
x (tuple[Tensor]): Features from the upstream network, each is
a 4D-tensor.
x (Tensor): Features from the upstream network, each is a 4D-tensor.

Returns:
tuple: A tuple of classification scores and bbox prediction.
@@ -173,7 +172,7 @@ def forward(self, x: tuple[Tensor]) -> tuple:
@MODELS.register_module()
class Shared2FCBBoxHead(ConvFCBBoxHead):
def __init__(self, fc_out_channels: int = 1024, *args, **kwargs) -> None:
super().__init__(
super().__init__( # type: ignore
num_shared_convs=0,
num_shared_fcs=2,
num_cls_convs=0,
Original file line number Diff line number Diff line change
@@ -9,10 +9,10 @@
from torch import Tensor

from otx.algo.instance_segmentation.mmdet.models.utils import (
ConfigType,
InstanceList,
OptConfigType,
MultiConfig,
OptInstanceList,
OptMultiConfig,
images_to_levels,
multi_apply,
unmap,
@@ -80,16 +80,16 @@ class AnchorHead(BaseDenseHead):
def __init__(
self,
num_classes: int,
in_channels: tuple[int, ...],
in_channels: int,
anchor_generator: dict,
bbox_coder: dict,
loss_cls: dict,
loss_bbox: dict,
train_cfg: ConfigType,
test_cfg: ConfigType,
init_cfg: MultiConfig,
feat_channels: int = 256,
reg_decoded_bbox: bool = False,
train_cfg: OptConfigType = None,
test_cfg: OptConfigType = None,
init_cfg: OptMultiConfig = None,
) -> None:
super().__init__(init_cfg=init_cfg)
self.in_channels = in_channels
@@ -142,7 +142,7 @@ def forward(self, x: tuple[Tensor]) -> tuple[list[Tensor]]:
scale levels, each is a 4D-tensor, the channels number \
is num_base_priors * 4.
"""
return multi_apply(self.forward_single, x)
return multi_apply(self.forward_single, x) # type: ignore

def get_anchors(
self,
@@ -389,10 +389,6 @@ def get_targets(
bbox_targets_list = images_to_levels(all_bbox_targets, num_level_anchors)
bbox_weights_list = images_to_levels(all_bbox_weights, num_level_anchors)
res = (labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, avg_factor)
if return_sampling_results:
res = (*res, sampling_results_list)
for i, r in enumerate(rest_results): # user-added return values
rest_results[i] = images_to_levels(r, num_level_anchors)

return res + tuple(rest_results)

Original file line number Diff line number Diff line change
@@ -3,7 +3,7 @@
# Copyright (c) OpenMMLab. All rights reserved.
from __future__ import annotations

from abc import ABCMeta, abstractmethod
from abc import ABCMeta
from typing import TYPE_CHECKING

# TODO(Eugene): replace mmcv.batched_nms with torchvision
@@ -77,10 +77,6 @@ def init_weights(self) -> None:
if hasattr(m, "conv_offset"):
constant_init(m.conv_offset, 0)

@abstractmethod
def loss_by_feat(self, **kwargs) -> dict:
"""Calculate the loss based on the features extracted by the detection head."""

def loss_and_predict(
self,
x: tuple[Tensor],
@@ -142,8 +138,8 @@ def predict_by_feat(
self,
cls_scores: list[Tensor],
bbox_preds: list[Tensor],
batch_img_metas: list[dict],
score_factors: list[Tensor] | None = None,
batch_img_metas: list[dict] | None = None,
cfg: ConfigDict | None = None,
rescale: bool = False,
with_nms: bool = True,
Original file line number Diff line number Diff line change
@@ -23,7 +23,6 @@
from otx.algo.instance_segmentation.mmdet.structures.bbox import (
empty_box_as,
get_box_wh,
scale_boxes,
)

from .anchor_head import AnchorHead
@@ -266,12 +265,8 @@ def _bbox_post_process(
raise RuntimeError(msg)

if rescale:
if img_meta.get("scale_factor") is None:
msg = "scale_factor is required when rescale is True"
raise ValueError(msg)

scale_factor = [1 / s for s in img_meta["scale_factor"]]
results.bboxes = scale_boxes(results.bboxes, scale_factor)
msg = "Rescale is not implemented in RPNHead"
raise NotImplementedError

# filter small size bboxes
if cfg.get("min_bbox_size", -1) >= 0:
Original file line number Diff line number Diff line change
@@ -95,7 +95,7 @@ def predict(self, batch_inputs: Tensor, batch_data_samples: SampleList) -> Sampl
"""Predict results from a batch of inputs and data samples with post-processing."""

@abstractmethod
def _forward(self, batch_inputs: Tensor, batch_data_samples: OptSampleList = None) -> tuple:
def _forward(self, batch_inputs: Tensor, batch_data_samples: SampleList) -> tuple:
"""Network forward process.

Usually includes backbone, neck and head forward without any post-
Original file line number Diff line number Diff line change
@@ -30,57 +30,54 @@ class TwoStageDetector(BaseDetector):
def __init__(
self,
backbone: ConfigType,
neck: OptConfigType = None,
rpn_head: OptConfigType = None,
roi_head: OptConfigType = None,
train_cfg: OptConfigType = None,
test_cfg: OptConfigType = None,
neck: ConfigType,
rpn_head: ConfigType,
roi_head: ConfigType,
train_cfg: ConfigType,
test_cfg: ConfigType,
data_preprocessor: OptConfigType = None,
init_cfg: OptMultiConfig = None,
) -> None:
super().__init__(data_preprocessor=data_preprocessor, init_cfg=init_cfg)
self.backbone = MODELS.build(backbone)

if neck is not None:
if neck.type != FPN.__name__:
msg = f"neck type must be {FPN.__name__}, but got {neck.type}"
raise ValueError(msg)
# pop out type for FPN
neck.pop("type")
self.neck = FPN(**neck)

if rpn_head is not None:
rpn_train_cfg = train_cfg.rpn if train_cfg is not None else None
rpn_head_ = rpn_head.copy()
rpn_head_.update(train_cfg=rpn_train_cfg, test_cfg=test_cfg.rpn)
rpn_head_num_classes = rpn_head_.get("num_classes", None)
if rpn_head_num_classes is None:
rpn_head_.update(num_classes=1)
elif rpn_head_num_classes != 1:
warnings.warn(
"The `num_classes` should be 1 in RPN, but get "
f"{rpn_head_num_classes}, please set "
"rpn_head.num_classes = 1 in your config file.",
)
rpn_head_.update(num_classes=1)
if rpn_head_.type != RPNHead.__name__:
msg = f"rpn_head type must be {RPNHead.__name__}, but got {rpn_head_.type}"
raise ValueError(msg)
# pop out type for RPNHead
rpn_head_.pop("type")
self.rpn_head = RPNHead(**rpn_head_)

if roi_head is not None:
# update train and test cfg here for now
rcnn_train_cfg = train_cfg.rcnn if train_cfg is not None else None
roi_head.update(train_cfg=rcnn_train_cfg)
roi_head.update(test_cfg=test_cfg.rcnn)
if roi_head.type != CustomRoIHead.__name__:
msg = f"roi_head type must be {CustomRoIHead.__name__}, but got {roi_head.type}"
raise ValueError(msg)
# pop out type for RoIHead
roi_head.pop("type")
self.roi_head = CustomRoIHead(**roi_head)
if neck["type"] != FPN.__name__:
msg = f"neck type must be {FPN.__name__}, but got {neck['type']}"
raise ValueError(msg)
# pop out type for FPN
neck.pop("type")
self.neck = FPN(**neck)

rpn_train_cfg = train_cfg["rpn"]
rpn_head_ = rpn_head.copy()
rpn_head_.update(train_cfg=rpn_train_cfg, test_cfg=test_cfg["rpn"])
rpn_head_num_classes = rpn_head_.get("num_classes", None)
if rpn_head_num_classes is None:
rpn_head_.update(num_classes=1)
elif rpn_head_num_classes != 1:
warnings.warn(
"The `num_classes` should be 1 in RPN, but get "
f"{rpn_head_num_classes}, please set "
"rpn_head.num_classes = 1 in your config file.",
)
rpn_head_.update(num_classes=1)
if rpn_head_["type"] != RPNHead.__name__:
msg = f"rpn_head type must be {RPNHead.__name__}, but got {rpn_head_['type']}"
raise ValueError(msg)
# pop out type for RPNHead
rpn_head_.pop("type")
self.rpn_head = RPNHead(**rpn_head_)

# update train and test cfg here for now
rcnn_train_cfg = train_cfg["rcnn"]
roi_head.update(train_cfg=rcnn_train_cfg)
roi_head.update(test_cfg=test_cfg["rcnn"])
if roi_head["type"] != CustomRoIHead.__name__:
msg = f"roi_head type must be {CustomRoIHead.__name__}, but got {roi_head['type']}"
raise ValueError(msg)
# pop out type for RoIHead
roi_head.pop("type")
self.roi_head = CustomRoIHead(**roi_head)

self.train_cfg = train_cfg
self.test_cfg = test_cfg
@@ -158,7 +155,7 @@ def _forward(self, batch_inputs: Tensor, batch_data_samples: SampleList) -> tupl
raise ValueError(msg)
rpn_results_list = [data_sample.proposals for data_sample in batch_data_samples]
roi_outs = self.roi_head.forward(x, rpn_results_list, batch_data_samples)
return results + (roi_outs,)
return (*results, roi_outs)

def loss(self, batch_inputs: Tensor, batch_data_samples: SampleList) -> dict:
"""Calculate losses from a batch of inputs and data samples.
@@ -175,11 +172,11 @@ def loss(self, batch_inputs: Tensor, batch_data_samples: SampleList) -> dict:
"""
x = self.extract_feat(batch_inputs)

losses = dict()
losses = {}

# RPN forward and loss
if self.with_rpn:
proposal_cfg = self.train_cfg.get("rpn_proposal", self.test_cfg.rpn)
proposal_cfg = self.train_cfg.get("rpn_proposal", self.test_cfg["rpn"])
rpn_data_samples = copy.deepcopy(batch_data_samples)
# set cat_id of gt_labels to 0 in RPN
for data_sample in rpn_data_samples:
Original file line number Diff line number Diff line change
@@ -1,7 +1,8 @@
"""The original source code is from mmdet. Please refer to https://github.com/open-mmlab/mmdetection/."""

# Copyright (c) OpenMMLab. All rights reserved.
from typing import Optional, Tuple, Union

from __future__ import annotations

import torch

@@ -19,10 +20,10 @@ def multiclass_nms(
score_thr: float,
nms_cfg: ConfigType,
max_num: int = -1,
score_factors: Optional[Tensor] = None,
score_factors: Tensor | None = None,
return_inds: bool = False,
box_dim: int = 4,
) -> Union[Tuple[Tensor, Tensor, Tensor], Tuple[Tensor, Tensor]]:
) -> tuple[Tensor, Tensor, Tensor] | tuple[Tensor, Tensor]:
"""NMS for multi-class bboxes.

Args:
@@ -91,8 +92,7 @@ def multiclass_nms(
dets = torch.cat([bboxes, scores[:, None]], -1)
if return_inds:
return dets, labels, inds
else:
return dets, labels
return dets, labels

dets, keep = batched_nms(bboxes, scores, labels, nms_cfg)

@@ -102,5 +102,4 @@ def multiclass_nms(

if return_inds:
return dets, labels[keep], inds[keep]
else:
return dets, labels[keep]
return dets, labels[keep]
Loading