Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

MMDet MaskRCNN ResNet50/SwinTransformer Decouple #3281

Merged
Changes from 1 commit
Commits
Show all changes
72 commits
Select commit Hold shift + click to select a range
fa0d462
migrate mmdet maskrcnn modules
eugene123tw Apr 5, 2024
763ce7d
style reformat
eugene123tw Apr 5, 2024
5ace6b0
style reformat
eugene123tw Apr 5, 2024
b2b6e0c
stype reformat
eugene123tw Apr 5, 2024
a305c7c
ignore mypy, ruff errors
eugene123tw Apr 8, 2024
6b682b7
skip mypy error
eugene123tw Apr 8, 2024
727cf2b
update
eugene123tw Apr 8, 2024
22f8f81
fix loss
eugene123tw Apr 8, 2024
08d766f
add maskrcnn
eugene123tw Apr 8, 2024
873a101
update import
eugene123tw Apr 8, 2024
bfca5f0
update import
eugene123tw Apr 8, 2024
47f3744
add necks
eugene123tw Apr 8, 2024
7bb7f39
update
eugene123tw Apr 8, 2024
d37f8a1
update
eugene123tw Apr 8, 2024
abd1f21
add cross-entropy loss
eugene123tw Apr 9, 2024
4978171
style changes
eugene123tw Apr 9, 2024
330f721
mypy changes and style changes
eugene123tw Apr 10, 2024
92dc0bf
update style
eugene123tw Apr 11, 2024
87a2415
Merge branch 'develop' into eugene/CVS-137823-mmdet-maskrcnn-decouple
eugene123tw Apr 11, 2024
c4dec94
remove box structures
eugene123tw Apr 11, 2024
5723ec2
add resnet
eugene123tw Apr 11, 2024
47de4f1
udpate
eugene123tw Apr 11, 2024
f5a51da
modify resnet
eugene123tw Apr 15, 2024
63f46c4
add annotation
eugene123tw Apr 15, 2024
46b37d5
style changes
eugene123tw Apr 15, 2024
cef1e24
update
eugene123tw Apr 15, 2024
655baea
fix all mypy issues
eugene123tw Apr 15, 2024
b1ed150
fix mypy issues
eugene123tw Apr 15, 2024
27b6a4a
style changes
eugene123tw Apr 16, 2024
e87cfa9
remove unused losses
eugene123tw Apr 16, 2024
c2c2394
remove focal_loss_pb
eugene123tw Apr 16, 2024
edd85e0
fix all rull and mypy issues
eugene123tw Apr 16, 2024
742b6fd
fix conflicts
eugene123tw Apr 16, 2024
194a6c2
style change
eugene123tw Apr 16, 2024
c1938de
update
eugene123tw Apr 16, 2024
734a459
udpate license
eugene123tw Apr 16, 2024
93fcd79
udpate
eugene123tw Apr 16, 2024
1d0926d
remove duplicates
eugene123tw Apr 17, 2024
1238d28
remove as F
eugene123tw Apr 17, 2024
55d77f5
remove as F
eugene123tw Apr 17, 2024
1598929
remove mmdet mask structures
eugene123tw Apr 17, 2024
498b750
remove duplicates
eugene123tw Apr 17, 2024
a0f52de
style changes
eugene123tw Apr 17, 2024
549d0ef
add new test
eugene123tw Apr 17, 2024
0a074ae
test style change
eugene123tw Apr 17, 2024
9106bc1
fix test
eugene123tw Apr 17, 2024
8197825
Merge branch 'develop' into eugene/CVS-137823-mmdet-maskrcnn-decouple
eugene123tw Apr 17, 2024
07cd25e
chagne device for unit test
eugene123tw Apr 17, 2024
ab07675
add deployment files
eugene123tw Apr 17, 2024
70717ce
remove deployment from inst-seg
eugene123tw Apr 17, 2024
e5027d9
update deployment
eugene123tw Apr 18, 2024
03e26d3
add mmdeploy maskrcnn opset
eugene123tw Apr 18, 2024
bbeaa32
fix linter
eugene123tw Apr 18, 2024
478158a
update test
eugene123tw Apr 18, 2024
052a582
update test
eugene123tw Apr 18, 2024
28cea6f
update test
eugene123tw Apr 18, 2024
ed7275e
Merge branch 'develop' into eugene/CVS-137823-mmdet-maskrcnn-decouple
eugene123tw Apr 22, 2024
04cd223
replace mmcv.cnn module
eugene123tw Apr 22, 2024
11ae260
remove upsample building
eugene123tw Apr 22, 2024
d8a78ca
remove upsample building
eugene123tw Apr 22, 2024
34ea7d7
use batch_nms from otx
eugene123tw Apr 22, 2024
617e1ac
add swintransformer
eugene123tw Apr 22, 2024
5a6737f
add transformers
eugene123tw Apr 22, 2024
fe34145
add swin transformer
eugene123tw Apr 23, 2024
4dd8bdc
style changes
eugene123tw Apr 23, 2024
a6de9b4
merge upstream
eugene123tw Apr 23, 2024
1b199a0
solve conflicts
eugene123tw Apr 23, 2024
c332430
update instance_segmentation/maskrcnn.py
eugene123tw Apr 23, 2024
6d92199
update nms
eugene123tw Apr 23, 2024
4d75001
fix xai
eugene123tw Apr 24, 2024
7b4b43e
change rotate detection recipe
eugene123tw Apr 24, 2024
0b2a326
fix swint recipe
eugene123tw Apr 24, 2024
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Prev Previous commit
Next Next commit
style changes
  • Loading branch information
eugene123tw committed Apr 23, 2024

Verified

This commit was created on GitHub.com and signed with GitHub’s verified signature. The key has expired.
commit 4dd8bdc508feed043ce10c234838a11afdd9bbc2
Original file line number Diff line number Diff line change
@@ -29,6 +29,8 @@
from otx.algo.modules.norm import build_norm_layer
from otx.algo.modules.transformer import FFN

# ruff: noqa: PLR0913


class WindowMSA(BaseModule):
"""Window based multi-head self-attention (W-MSA) module with relative position bias.
@@ -170,7 +172,7 @@ def __init__(
qk_scale: float | None = None,
attn_drop_rate: float = 0,
proj_drop_rate: float = 0,
dropout_layer: dict = dict(type="DropPath", drop_prob=0.0),
dropout_layer: dict | None = None,
init_cfg: None = None,
):
super().__init__(init_cfg)
@@ -192,6 +194,7 @@ def __init__(
init_cfg=None,
)

dropout_layer = {"type": "DropPath", "drop_prob": 0.0} if dropout_layer is None else dropout_layer
_dropout_layer = deepcopy(dropout_layer)
dropout_type = _dropout_layer.pop("type")
if dropout_type != "DropPath":
@@ -340,8 +343,8 @@ def __init__(
drop_rate: float = 0.0,
attn_drop_rate: float = 0.0,
drop_path_rate: float = 0.0,
act_cfg: dict = dict(type="GELU"),
norm_cfg: dict = dict(type="LN"),
act_cfg: dict | None = None,
norm_cfg: dict | None = None,
with_cp: bool = False,
init_cfg: None = None,
):
@@ -350,6 +353,9 @@ def __init__(
self.init_cfg = init_cfg
self.with_cp = with_cp

act_cfg = act_cfg if act_cfg is not None else {"type": "GELU"}
norm_cfg = norm_cfg if norm_cfg is not None else {"type": "LN"}

self.norm1 = build_norm_layer(norm_cfg, embed_dims)[1]
self.attn = ShiftWindowMSA(
embed_dims=embed_dims,
@@ -360,7 +366,7 @@ def __init__(
qk_scale=qk_scale,
attn_drop_rate=attn_drop_rate,
proj_drop_rate=drop_rate,
dropout_layer=dict(type="DropPath", drop_prob=drop_path_rate),
dropout_layer={"type": "DropPath", "drop_prob": drop_path_rate},
init_cfg=None,
)

@@ -370,7 +376,7 @@ def __init__(
feedforward_channels=feedforward_channels,
num_fcs=2,
ffn_drop=drop_rate,
dropout_layer=dict(type="DropPath", drop_prob=drop_path_rate),
dropout_layer={"type": "DropPath", "drop_prob": drop_path_rate},
act_cfg=act_cfg,
add_identity=True,
init_cfg=None,
@@ -436,13 +442,16 @@ def __init__(
attn_drop_rate: float = 0.0,
drop_path_rate: list[float] | float = 0.0,
downsample: BaseModule | None = None,
act_cfg: dict = dict(type="GELU"),
norm_cfg: dict = dict(type="LN"),
act_cfg: dict | None = None,
norm_cfg: dict | None = None,
with_cp: bool = False,
init_cfg: None = None,
):
super().__init__(init_cfg=init_cfg)

act_cfg = act_cfg if act_cfg is not None else {"type": "GELU"}
norm_cfg = norm_cfg if norm_cfg is not None else {"type": "LN"}

if isinstance(drop_path_rate, list):
drop_path_rates = drop_path_rate
if len(drop_path_rates) != depth:
@@ -561,14 +570,16 @@ def __init__(
drop_rate: float = 0.0,
attn_drop_rate: float = 0.0,
drop_path_rate: float = 0.1,
act_cfg: dict = dict(type="GELU"),
norm_cfg: dict = dict(type="LN"),
act_cfg: dict | None = None,
norm_cfg: dict | None = None,
with_cp: bool = False,
pretrained: str | None = None,
convert_weights: bool = False,
frozen_stages: int = -1,
init_cfg: dict | None = None,
):
act_cfg = act_cfg if act_cfg is not None else {"type": "GELU"}
norm_cfg = norm_cfg if norm_cfg is not None else {"type": "LN"}
self.convert_weights = convert_weights
self.frozen_stages = frozen_stages
if isinstance(pretrain_img_size, int):
Original file line number Diff line number Diff line change
@@ -236,10 +236,11 @@ def __init__(
padding: int | tuple | str = "corner",
dilation: int | tuple = 1,
bias: bool = False,
norm_cfg: ConfigDict | dict | None = dict(type="LN"),
norm_cfg: ConfigDict | dict | None = None,
init_cfg: ConfigDict | dict | None = None,
) -> None:
super().__init__(init_cfg=init_cfg)
norm_cfg = norm_cfg if norm_cfg is not None else {"type": "LN"}
self.in_channels = in_channels
self.out_channels = out_channels
stride = stride if stride else kernel_size
Loading