Skip to content

Commit

Permalink
fix linter
Browse files Browse the repository at this point in the history
  • Loading branch information
eugene123tw committed Apr 18, 2024
1 parent 03e26d3 commit bbeaa32
Show file tree
Hide file tree
Showing 5 changed files with 45 additions and 18 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@
"""MMDet BBox Head."""
from __future__ import annotations

import warnings
from typing import TYPE_CHECKING

import torch
Expand Down Expand Up @@ -324,7 +325,7 @@ def _predict_by_feat_single(
@FUNCTION_REWRITER.register_rewriter(
"otx.algo.instance_segmentation.mmdet.models.custom_roi_head.CustomConvFCBBoxHead.forward",
)
def bbox_head__forward(self, x):
def bbox_head__forward(self: BBoxHead, x: Tensor) -> tuple[Tensor]:
"""Rewrite `forward` for default backend.
This function uses the specific `forward` function for the BBoxHead
Expand All @@ -343,7 +344,7 @@ def bbox_head__forward(self, x):
ctx = FUNCTION_REWRITER.get_context()

@mark("bbox_head_forward", inputs=["bbox_feats"], outputs=["cls_score", "bbox_pred"])
def __forward(self, x):
def __forward(self: BBoxHead, x: Tensor) -> tuple[Tensor]:
return ctx.origin_func(self, x)

return __forward(self, x)
Expand Down Expand Up @@ -385,9 +386,11 @@ def bbox_head__predict_by_feat(
- labels (Tensor): Labels of bboxes, has a shape
(num_instances, ).
"""
warnings.warn(f"rescale: {rescale} is not supported in ONNX export. Ignored.", stacklevel=2)
ctx = FUNCTION_REWRITER.get_context()
if rois.ndim != 3:
raise ValueError("Only support export two stage model to ONNX with batch dimension.")
msg = "Only support export two stage model to ONNX with batch dimension."
raise ValueError(msg)

img_shape = batch_img_metas[0]["img_shape"]
if self.custom_cls_channels:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -691,7 +691,7 @@ def standard_roi_head__predict_mask(
batch_img_metas: list[dict],
results_list: list[Tensor],
rescale: bool = False,
) -> list[Tensor]:
) -> tuple[Tensor, Tensor, Tensor]:
"""Forward the mask head and predict detection results on the features of the upstream network.
Args:
Expand Down Expand Up @@ -729,7 +729,7 @@ def standard_roi_head__predict_mask(
mask_results = self._mask_forward(x, mask_rois)
mask_preds = mask_results["mask_preds"]
num_det = det_bboxes.shape[1]
segm_results = self.mask_head.predict_by_feat(
segm_results: Tensor = self.mask_head.predict_by_feat(
mask_preds,
results_list,
batch_img_metas,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,8 @@
get_box_wh,
)

# ruff: noqa: PLW2901

if TYPE_CHECKING:
from mmengine.config import ConfigDict

Expand Down Expand Up @@ -201,8 +203,8 @@ def _predict_by_feat_single(
raise RuntimeError(msg)

reg_dim = self.bbox_coder.encode_size
bbox_pred = bbox_pred.permute(1, 2, 0).reshape(-1, reg_dim) # noqa: PLW2901
cls_score = cls_score.permute(1, 2, 0).reshape(-1, self.cls_out_channels) # noqa: PLW2901
bbox_pred = bbox_pred.permute(1, 2, 0).reshape(-1, reg_dim)
cls_score = cls_score.permute(1, 2, 0).reshape(-1, self.cls_out_channels)
scores = cls_score.sigmoid() if self.use_sigmoid_cls else cls_score.softmax(-1)[:, :-1]

scores = torch.squeeze(scores)
Expand All @@ -212,8 +214,8 @@ def _predict_by_feat_single(
ranked_scores, rank_inds = scores.sort(descending=True)
topk_inds = rank_inds[:nms_pre]
scores = ranked_scores[:nms_pre]
bbox_pred = bbox_pred[topk_inds, :] # noqa: PLW2901
priors = priors[topk_inds] # noqa: PLW2901
bbox_pred = bbox_pred[topk_inds, :]
priors = priors[topk_inds]

mlvl_bbox_preds.append(bbox_pred)
mlvl_valid_priors.append(priors)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@
"""MMDet FCNMaskHead."""
from __future__ import annotations

import warnings
from typing import TYPE_CHECKING

import numpy as np
Expand Down Expand Up @@ -441,7 +442,7 @@ def fcn_mask_head__predict_by_feat(
rcnn_test_cfg: ConfigDict,
rescale: bool = False,
activate_map: bool = False,
) -> list[Tensor]:
) -> Tensor:
"""Transform a batch of output features extracted from the head into mask results.
Args:
Expand All @@ -467,6 +468,9 @@ def fcn_mask_head__predict_by_feat(
(num_instances, ).
- masks (Tensor): Has a shape (num_instances, H, W).
"""
warnings.warn(f"rescale: {rescale} is not supported in deploy mode", stacklevel=2)
warnings.warn(f"activate_map: {activate_map} is not supported in deploy mode", stacklevel=2)

ctx = FUNCTION_REWRITER.get_context()
ori_shape = batch_img_metas[0]["img_shape"]
dets, det_labels = results_list
Expand Down Expand Up @@ -528,22 +532,23 @@ def _do_paste_mask_ops(
# this has more operations but is faster on COCO-scale dataset.
device = masks.device
if skip_empty:
x0_int, y0_int = torch.clamp(boxes.min(dim=0).values.floor()[:2] - 1, min=0).to(dtype=torch.int32)
box_values, _ = boxes.min(dim=0)
x0_int, y0_int = torch.clamp(box_values.floor()[:2] - 1, min=0).to(dtype=torch.int32)
x1_int = torch.clamp(boxes[:, 2].max().ceil() + 1, max=img_w).to(dtype=torch.int32)
y1_int = torch.clamp(boxes[:, 3].max().ceil() + 1, max=img_h).to(dtype=torch.int32)
else:
x0_int, y0_int = 0, 0
x1_int, y1_int = img_w, img_h
x0, y0, x1, y1 = torch.split(boxes, 1, dim=1) # each is Nx1

N = masks.shape[0]
num_preds = masks.shape[0]

img_y = torch.arange(y0_int, y1_int, device=device).to(torch.float32) + 0.5
img_x = torch.arange(x0_int, x1_int, device=device).to(torch.float32) + 0.5
img_y = (img_y - y0) / (y1 - y0) * 2 - 1
img_x = (img_x - x0) / (x1 - x0) * 2 - 1
gx = img_x[:, None, :].expand(N, img_y.size(1), img_x.size(1))
gy = img_y[:, :, None].expand(N, img_y.size(1), img_x.size(1))
gx = img_x[:, None, :].expand(num_preds, img_y.size(1), img_x.size(1))
gy = img_y[:, :, None].expand(num_preds, img_y.size(1), img_x.size(1))
grid = torch.stack([gx, gy], dim=3)

img_masks = torch.nn.functional.grid_sample(masks.to(dtype=torch.float32), grid, align_corners=False)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,8 @@

from .base_roi_extractor import BaseRoIExtractor

# ruff: noqa: ARG004


@MODELS.register_module()
class SingleRoIExtractor(BaseRoIExtractor):
Expand Down Expand Up @@ -121,6 +123,7 @@ def forward(self, feats: tuple[Tensor], rois: Tensor, roi_scale_factor: float |

if is_mmdeploy_enabled():
from mmdeploy.core.rewriters import FUNCTION_REWRITER
from torch import Graph
from torch.autograd import Function

class SingleRoIExtractorOpenVINO(Function):
Expand All @@ -136,12 +139,26 @@ def __init__(self) -> None:
super().__init__()

@staticmethod
def forward(g, output_size, featmap_strides, sample_num, rois, *feats):
def forward(
g: Graph,
output_size: int,
featmap_strides: int,
sample_num: int,
rois: torch.Value,
*feats: tuple[torch.Value],
) -> Tensor:
"""Run forward."""
return SingleRoIExtractorOpenVINO.origin_output

@staticmethod
def symbolic(g, output_size, featmap_strides, sample_num, rois, *feats):
def symbolic(
g: Graph,
output_size: int,
featmap_strides: list[int],
sample_num: int,
rois: torch.Value,
*feats: tuple[torch.Value],
) -> Graph:
"""Symbolic function for creating onnx op."""
from torch.onnx.symbolic_opset10 import _slice

Expand Down Expand Up @@ -180,10 +197,10 @@ def single_roi_extractor__forward__openvino(
ctx = FUNCTION_REWRITER.get_context()

# Adding original output to SingleRoIExtractorOpenVINO.
state = torch._C._get_tracing_state()
state = torch._C._get_tracing_state() # noqa: SLF001
origin_output = ctx.origin_func(self, feats, rois, roi_scale_factor)
SingleRoIExtractorOpenVINO.origin_output = origin_output
torch._C._set_tracing_state(state)
torch._C._set_tracing_state(state) # noqa: SLF001

output_size = self.roi_layers[0].output_size[0]
featmap_strides = self.featmap_strides
Expand Down

0 comments on commit bbeaa32

Please sign in to comment.